gdb/
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include <stddef.h>
22 #include <signal.h>
23 #include <limits.h>
24 #include "server.h"
25 #include "linux-low.h"
26 #include "i387-fp.h"
27 #include "i386-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
30
31 #include "gdb_proc_service.h"
32
33 /* Defined in auto-generated file i386-linux.c. */
34 void init_registers_i386_linux (void);
35 /* Defined in auto-generated file amd64-linux.c. */
36 void init_registers_amd64_linux (void);
37 /* Defined in auto-generated file i386-avx-linux.c. */
38 void init_registers_i386_avx_linux (void);
39 /* Defined in auto-generated file amd64-avx-linux.c. */
40 void init_registers_amd64_avx_linux (void);
41 /* Defined in auto-generated file i386-mmx-linux.c. */
42 void init_registers_i386_mmx_linux (void);
43
44 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
45
46 /* Backward compatibility for gdb without XML support. */
47
48 static const char *xmltarget_i386_linux_no_xml = "@<target>\
49 <architecture>i386</architecture>\
50 <osabi>GNU/Linux</osabi>\
51 </target>";
52
53 #ifdef __x86_64__
54 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
55 <architecture>i386:x86-64</architecture>\
56 <osabi>GNU/Linux</osabi>\
57 </target>";
58 #endif
59
60 #include <sys/reg.h>
61 #include <sys/procfs.h>
62 #include <sys/ptrace.h>
63 #include <sys/uio.h>
64
65 #ifndef PTRACE_GETREGSET
66 #define PTRACE_GETREGSET 0x4204
67 #endif
68
69 #ifndef PTRACE_SETREGSET
70 #define PTRACE_SETREGSET 0x4205
71 #endif
72
73
74 #ifndef PTRACE_GET_THREAD_AREA
75 #define PTRACE_GET_THREAD_AREA 25
76 #endif
77
78 /* This definition comes from prctl.h, but some kernels may not have it. */
79 #ifndef PTRACE_ARCH_PRCTL
80 #define PTRACE_ARCH_PRCTL 30
81 #endif
82
83 /* The following definitions come from prctl.h, but may be absent
84 for certain configurations. */
85 #ifndef ARCH_GET_FS
86 #define ARCH_SET_GS 0x1001
87 #define ARCH_SET_FS 0x1002
88 #define ARCH_GET_FS 0x1003
89 #define ARCH_GET_GS 0x1004
90 #endif
91
92 /* Per-process arch-specific data we want to keep. */
93
94 struct arch_process_info
95 {
96 struct i386_debug_reg_state debug_reg_state;
97 };
98
99 /* Per-thread arch-specific data we want to keep. */
100
101 struct arch_lwp_info
102 {
103 /* Non-zero if our copy differs from what's recorded in the thread. */
104 int debug_registers_changed;
105 };
106
107 #ifdef __x86_64__
108
109 /* Mapping between the general-purpose registers in `struct user'
110 format and GDB's register array layout.
111 Note that the transfer layout uses 64-bit regs. */
112 static /*const*/ int i386_regmap[] =
113 {
114 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
115 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
116 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
117 DS * 8, ES * 8, FS * 8, GS * 8
118 };
119
120 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
121
122 /* So code below doesn't have to care, i386 or amd64. */
123 #define ORIG_EAX ORIG_RAX
124
125 static const int x86_64_regmap[] =
126 {
127 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
128 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
129 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
130 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
131 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
132 DS * 8, ES * 8, FS * 8, GS * 8,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1, -1,
137 ORIG_RAX * 8
138 };
139
140 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
141
142 #else /* ! __x86_64__ */
143
144 /* Mapping between the general-purpose registers in `struct user'
145 format and GDB's register array layout. */
146 static /*const*/ int i386_regmap[] =
147 {
148 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
149 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
150 EIP * 4, EFL * 4, CS * 4, SS * 4,
151 DS * 4, ES * 4, FS * 4, GS * 4
152 };
153
154 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
155
156 #endif
157 \f
158 /* Called by libthread_db. */
159
160 ps_err_e
161 ps_get_thread_area (const struct ps_prochandle *ph,
162 lwpid_t lwpid, int idx, void **base)
163 {
164 #ifdef __x86_64__
165 int use_64bit = register_size (0) == 8;
166
167 if (use_64bit)
168 {
169 switch (idx)
170 {
171 case FS:
172 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
173 return PS_OK;
174 break;
175 case GS:
176 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
177 return PS_OK;
178 break;
179 default:
180 return PS_BADADDR;
181 }
182 return PS_ERR;
183 }
184 #endif
185
186 {
187 unsigned int desc[4];
188
189 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
190 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
191 return PS_ERR;
192
193 *(int *)base = desc[1];
194 return PS_OK;
195 }
196 }
197
198 /* Get the thread area address. This is used to recognize which
199 thread is which when tracing with the in-process agent library. We
200 don't read anything from the address, and treat it as opaque; it's
201 the address itself that we assume is unique per-thread. */
202
203 static int
204 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
205 {
206 #ifdef __x86_64__
207 int use_64bit = register_size (0) == 8;
208
209 if (use_64bit)
210 {
211 void *base;
212 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
213 {
214 *addr = (CORE_ADDR) (uintptr_t) base;
215 return 0;
216 }
217
218 return -1;
219 }
220 #endif
221
222 {
223 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
224 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
225 unsigned int desc[4];
226 ULONGEST gs = 0;
227 const int reg_thread_area = 3; /* bits to scale down register value. */
228 int idx;
229
230 collect_register_by_name (regcache, "gs", &gs);
231
232 idx = gs >> reg_thread_area;
233
234 if (ptrace (PTRACE_GET_THREAD_AREA,
235 lwpid_of (lwp), (void *) (long) idx, (unsigned long) &desc) < 0)
236 return -1;
237
238 *addr = desc[1];
239 return 0;
240 }
241 }
242
243
244 \f
245 static int
246 i386_cannot_store_register (int regno)
247 {
248 return regno >= I386_NUM_REGS;
249 }
250
251 static int
252 i386_cannot_fetch_register (int regno)
253 {
254 return regno >= I386_NUM_REGS;
255 }
256
257 static void
258 x86_fill_gregset (struct regcache *regcache, void *buf)
259 {
260 int i;
261
262 #ifdef __x86_64__
263 if (register_size (0) == 8)
264 {
265 for (i = 0; i < X86_64_NUM_REGS; i++)
266 if (x86_64_regmap[i] != -1)
267 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
268 return;
269 }
270 #endif
271
272 for (i = 0; i < I386_NUM_REGS; i++)
273 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
274
275 collect_register_by_name (regcache, "orig_eax",
276 ((char *) buf) + ORIG_EAX * 4);
277 }
278
279 static void
280 x86_store_gregset (struct regcache *regcache, const void *buf)
281 {
282 int i;
283
284 #ifdef __x86_64__
285 if (register_size (0) == 8)
286 {
287 for (i = 0; i < X86_64_NUM_REGS; i++)
288 if (x86_64_regmap[i] != -1)
289 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
290 return;
291 }
292 #endif
293
294 for (i = 0; i < I386_NUM_REGS; i++)
295 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
296
297 supply_register_by_name (regcache, "orig_eax",
298 ((char *) buf) + ORIG_EAX * 4);
299 }
300
301 static void
302 x86_fill_fpregset (struct regcache *regcache, void *buf)
303 {
304 #ifdef __x86_64__
305 i387_cache_to_fxsave (regcache, buf);
306 #else
307 i387_cache_to_fsave (regcache, buf);
308 #endif
309 }
310
311 static void
312 x86_store_fpregset (struct regcache *regcache, const void *buf)
313 {
314 #ifdef __x86_64__
315 i387_fxsave_to_cache (regcache, buf);
316 #else
317 i387_fsave_to_cache (regcache, buf);
318 #endif
319 }
320
321 #ifndef __x86_64__
322
323 static void
324 x86_fill_fpxregset (struct regcache *regcache, void *buf)
325 {
326 i387_cache_to_fxsave (regcache, buf);
327 }
328
329 static void
330 x86_store_fpxregset (struct regcache *regcache, const void *buf)
331 {
332 i387_fxsave_to_cache (regcache, buf);
333 }
334
335 #endif
336
337 static void
338 x86_fill_xstateregset (struct regcache *regcache, void *buf)
339 {
340 i387_cache_to_xsave (regcache, buf);
341 }
342
343 static void
344 x86_store_xstateregset (struct regcache *regcache, const void *buf)
345 {
346 i387_xsave_to_cache (regcache, buf);
347 }
348
349 /* ??? The non-biarch i386 case stores all the i387 regs twice.
350 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
351 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
352 doesn't work. IWBN to avoid the duplication in the case where it
353 does work. Maybe the arch_setup routine could check whether it works
354 and update target_regsets accordingly, maybe by moving target_regsets
355 to linux_target_ops and set the right one there, rather than having to
356 modify the target_regsets global. */
357
358 struct regset_info target_regsets[] =
359 {
360 #ifdef HAVE_PTRACE_GETREGS
361 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
362 GENERAL_REGS,
363 x86_fill_gregset, x86_store_gregset },
364 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
365 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
366 # ifndef __x86_64__
367 # ifdef HAVE_PTRACE_GETFPXREGS
368 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
369 EXTENDED_REGS,
370 x86_fill_fpxregset, x86_store_fpxregset },
371 # endif
372 # endif
373 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
374 FP_REGS,
375 x86_fill_fpregset, x86_store_fpregset },
376 #endif /* HAVE_PTRACE_GETREGS */
377 { 0, 0, 0, -1, -1, NULL, NULL }
378 };
379
380 static CORE_ADDR
381 x86_get_pc (struct regcache *regcache)
382 {
383 int use_64bit = register_size (0) == 8;
384
385 if (use_64bit)
386 {
387 unsigned long pc;
388 collect_register_by_name (regcache, "rip", &pc);
389 return (CORE_ADDR) pc;
390 }
391 else
392 {
393 unsigned int pc;
394 collect_register_by_name (regcache, "eip", &pc);
395 return (CORE_ADDR) pc;
396 }
397 }
398
399 static void
400 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
401 {
402 int use_64bit = register_size (0) == 8;
403
404 if (use_64bit)
405 {
406 unsigned long newpc = pc;
407 supply_register_by_name (regcache, "rip", &newpc);
408 }
409 else
410 {
411 unsigned int newpc = pc;
412 supply_register_by_name (regcache, "eip", &newpc);
413 }
414 }
415 \f
416 static const unsigned char x86_breakpoint[] = { 0xCC };
417 #define x86_breakpoint_len 1
418
419 static int
420 x86_breakpoint_at (CORE_ADDR pc)
421 {
422 unsigned char c;
423
424 (*the_target->read_memory) (pc, &c, 1);
425 if (c == 0xCC)
426 return 1;
427
428 return 0;
429 }
430 \f
431 /* Support for debug registers. */
432
433 static unsigned long
434 x86_linux_dr_get (ptid_t ptid, int regnum)
435 {
436 int tid;
437 unsigned long value;
438
439 tid = ptid_get_lwp (ptid);
440
441 errno = 0;
442 value = ptrace (PTRACE_PEEKUSER, tid,
443 offsetof (struct user, u_debugreg[regnum]), 0);
444 if (errno != 0)
445 error ("Couldn't read debug register");
446
447 return value;
448 }
449
450 static void
451 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
452 {
453 int tid;
454
455 tid = ptid_get_lwp (ptid);
456
457 errno = 0;
458 ptrace (PTRACE_POKEUSER, tid,
459 offsetof (struct user, u_debugreg[regnum]), value);
460 if (errno != 0)
461 error ("Couldn't write debug register");
462 }
463
464 static int
465 update_debug_registers_callback (struct inferior_list_entry *entry,
466 void *pid_p)
467 {
468 struct lwp_info *lwp = (struct lwp_info *) entry;
469 int pid = *(int *) pid_p;
470
471 /* Only update the threads of this process. */
472 if (pid_of (lwp) == pid)
473 {
474 /* The actual update is done later just before resuming the lwp,
475 we just mark that the registers need updating. */
476 lwp->arch_private->debug_registers_changed = 1;
477
478 /* If the lwp isn't stopped, force it to momentarily pause, so
479 we can update its debug registers. */
480 if (!lwp->stopped)
481 linux_stop_lwp (lwp);
482 }
483
484 return 0;
485 }
486
487 /* Update the inferior's debug register REGNUM from STATE. */
488
489 void
490 i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
491 {
492 /* Only update the threads of this process. */
493 int pid = pid_of (get_thread_lwp (current_inferior));
494
495 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
496 fatal ("Invalid debug register %d", regnum);
497
498 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
499 }
500
501 /* Return the inferior's debug register REGNUM. */
502
503 CORE_ADDR
504 i386_dr_low_get_addr (int regnum)
505 {
506 struct lwp_info *lwp = get_thread_lwp (current_inferior);
507 ptid_t ptid = ptid_of (lwp);
508
509 /* DR6 and DR7 are retrieved with some other way. */
510 gdb_assert (DR_FIRSTADDR <= regnum && regnum < DR_LASTADDR);
511
512 return x86_linux_dr_get (ptid, regnum);
513 }
514
515 /* Update the inferior's DR7 debug control register from STATE. */
516
517 void
518 i386_dr_low_set_control (const struct i386_debug_reg_state *state)
519 {
520 /* Only update the threads of this process. */
521 int pid = pid_of (get_thread_lwp (current_inferior));
522
523 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
524 }
525
526 /* Return the inferior's DR7 debug control register. */
527
528 unsigned
529 i386_dr_low_get_control (void)
530 {
531 struct lwp_info *lwp = get_thread_lwp (current_inferior);
532 ptid_t ptid = ptid_of (lwp);
533
534 return x86_linux_dr_get (ptid, DR_CONTROL);
535 }
536
537 /* Get the value of the DR6 debug status register from the inferior
538 and record it in STATE. */
539
540 unsigned
541 i386_dr_low_get_status (void)
542 {
543 struct lwp_info *lwp = get_thread_lwp (current_inferior);
544 ptid_t ptid = ptid_of (lwp);
545
546 return x86_linux_dr_get (ptid, DR_STATUS);
547 }
548 \f
549 /* Breakpoint/Watchpoint support. */
550
551 static int
552 x86_insert_point (char type, CORE_ADDR addr, int len)
553 {
554 struct process_info *proc = current_process ();
555 switch (type)
556 {
557 case '0':
558 {
559 int ret;
560
561 ret = prepare_to_access_memory ();
562 if (ret)
563 return -1;
564 ret = set_gdb_breakpoint_at (addr);
565 done_accessing_memory ();
566 return ret;
567 }
568 case '2':
569 case '3':
570 case '4':
571 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
572 type, addr, len);
573 default:
574 /* Unsupported. */
575 return 1;
576 }
577 }
578
579 static int
580 x86_remove_point (char type, CORE_ADDR addr, int len)
581 {
582 struct process_info *proc = current_process ();
583 switch (type)
584 {
585 case '0':
586 {
587 int ret;
588
589 ret = prepare_to_access_memory ();
590 if (ret)
591 return -1;
592 ret = delete_gdb_breakpoint_at (addr);
593 done_accessing_memory ();
594 return ret;
595 }
596 case '2':
597 case '3':
598 case '4':
599 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
600 type, addr, len);
601 default:
602 /* Unsupported. */
603 return 1;
604 }
605 }
606
607 static int
608 x86_stopped_by_watchpoint (void)
609 {
610 struct process_info *proc = current_process ();
611 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
612 }
613
614 static CORE_ADDR
615 x86_stopped_data_address (void)
616 {
617 struct process_info *proc = current_process ();
618 CORE_ADDR addr;
619 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
620 &addr))
621 return addr;
622 return 0;
623 }
624 \f
625 /* Called when a new process is created. */
626
627 static struct arch_process_info *
628 x86_linux_new_process (void)
629 {
630 struct arch_process_info *info = xcalloc (1, sizeof (*info));
631
632 i386_low_init_dregs (&info->debug_reg_state);
633
634 return info;
635 }
636
637 /* Called when a new thread is detected. */
638
639 static struct arch_lwp_info *
640 x86_linux_new_thread (void)
641 {
642 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
643
644 info->debug_registers_changed = 1;
645
646 return info;
647 }
648
649 /* Called when resuming a thread.
650 If the debug regs have changed, update the thread's copies. */
651
652 static void
653 x86_linux_prepare_to_resume (struct lwp_info *lwp)
654 {
655 ptid_t ptid = ptid_of (lwp);
656
657 if (lwp->arch_private->debug_registers_changed)
658 {
659 int i;
660 int pid = ptid_get_pid (ptid);
661 struct process_info *proc = find_process_pid (pid);
662 struct i386_debug_reg_state *state = &proc->private->arch_private->debug_reg_state;
663
664 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
665 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
666
667 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
668
669 lwp->arch_private->debug_registers_changed = 0;
670 }
671
672 if (lwp->stopped_by_watchpoint)
673 x86_linux_dr_set (ptid, DR_STATUS, 0);
674 }
675 \f
676 /* When GDBSERVER is built as a 64-bit application on linux, the
677 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
678 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
679 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
680 conversion in-place ourselves. */
681
682 /* These types below (compat_*) define a siginfo type that is layout
683 compatible with the siginfo type exported by the 32-bit userspace
684 support. */
685
686 #ifdef __x86_64__
687
688 typedef int compat_int_t;
689 typedef unsigned int compat_uptr_t;
690
691 typedef int compat_time_t;
692 typedef int compat_timer_t;
693 typedef int compat_clock_t;
694
695 struct compat_timeval
696 {
697 compat_time_t tv_sec;
698 int tv_usec;
699 };
700
701 typedef union compat_sigval
702 {
703 compat_int_t sival_int;
704 compat_uptr_t sival_ptr;
705 } compat_sigval_t;
706
707 typedef struct compat_siginfo
708 {
709 int si_signo;
710 int si_errno;
711 int si_code;
712
713 union
714 {
715 int _pad[((128 / sizeof (int)) - 3)];
716
717 /* kill() */
718 struct
719 {
720 unsigned int _pid;
721 unsigned int _uid;
722 } _kill;
723
724 /* POSIX.1b timers */
725 struct
726 {
727 compat_timer_t _tid;
728 int _overrun;
729 compat_sigval_t _sigval;
730 } _timer;
731
732 /* POSIX.1b signals */
733 struct
734 {
735 unsigned int _pid;
736 unsigned int _uid;
737 compat_sigval_t _sigval;
738 } _rt;
739
740 /* SIGCHLD */
741 struct
742 {
743 unsigned int _pid;
744 unsigned int _uid;
745 int _status;
746 compat_clock_t _utime;
747 compat_clock_t _stime;
748 } _sigchld;
749
750 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
751 struct
752 {
753 unsigned int _addr;
754 } _sigfault;
755
756 /* SIGPOLL */
757 struct
758 {
759 int _band;
760 int _fd;
761 } _sigpoll;
762 } _sifields;
763 } compat_siginfo_t;
764
765 #define cpt_si_pid _sifields._kill._pid
766 #define cpt_si_uid _sifields._kill._uid
767 #define cpt_si_timerid _sifields._timer._tid
768 #define cpt_si_overrun _sifields._timer._overrun
769 #define cpt_si_status _sifields._sigchld._status
770 #define cpt_si_utime _sifields._sigchld._utime
771 #define cpt_si_stime _sifields._sigchld._stime
772 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
773 #define cpt_si_addr _sifields._sigfault._addr
774 #define cpt_si_band _sifields._sigpoll._band
775 #define cpt_si_fd _sifields._sigpoll._fd
776
777 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
778 In their place is si_timer1,si_timer2. */
779 #ifndef si_timerid
780 #define si_timerid si_timer1
781 #endif
782 #ifndef si_overrun
783 #define si_overrun si_timer2
784 #endif
785
786 static void
787 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
788 {
789 memset (to, 0, sizeof (*to));
790
791 to->si_signo = from->si_signo;
792 to->si_errno = from->si_errno;
793 to->si_code = from->si_code;
794
795 if (to->si_code == SI_TIMER)
796 {
797 to->cpt_si_timerid = from->si_timerid;
798 to->cpt_si_overrun = from->si_overrun;
799 to->cpt_si_ptr = (intptr_t) from->si_ptr;
800 }
801 else if (to->si_code == SI_USER)
802 {
803 to->cpt_si_pid = from->si_pid;
804 to->cpt_si_uid = from->si_uid;
805 }
806 else if (to->si_code < 0)
807 {
808 to->cpt_si_pid = from->si_pid;
809 to->cpt_si_uid = from->si_uid;
810 to->cpt_si_ptr = (intptr_t) from->si_ptr;
811 }
812 else
813 {
814 switch (to->si_signo)
815 {
816 case SIGCHLD:
817 to->cpt_si_pid = from->si_pid;
818 to->cpt_si_uid = from->si_uid;
819 to->cpt_si_status = from->si_status;
820 to->cpt_si_utime = from->si_utime;
821 to->cpt_si_stime = from->si_stime;
822 break;
823 case SIGILL:
824 case SIGFPE:
825 case SIGSEGV:
826 case SIGBUS:
827 to->cpt_si_addr = (intptr_t) from->si_addr;
828 break;
829 case SIGPOLL:
830 to->cpt_si_band = from->si_band;
831 to->cpt_si_fd = from->si_fd;
832 break;
833 default:
834 to->cpt_si_pid = from->si_pid;
835 to->cpt_si_uid = from->si_uid;
836 to->cpt_si_ptr = (intptr_t) from->si_ptr;
837 break;
838 }
839 }
840 }
841
842 static void
843 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
844 {
845 memset (to, 0, sizeof (*to));
846
847 to->si_signo = from->si_signo;
848 to->si_errno = from->si_errno;
849 to->si_code = from->si_code;
850
851 if (to->si_code == SI_TIMER)
852 {
853 to->si_timerid = from->cpt_si_timerid;
854 to->si_overrun = from->cpt_si_overrun;
855 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
856 }
857 else if (to->si_code == SI_USER)
858 {
859 to->si_pid = from->cpt_si_pid;
860 to->si_uid = from->cpt_si_uid;
861 }
862 else if (to->si_code < 0)
863 {
864 to->si_pid = from->cpt_si_pid;
865 to->si_uid = from->cpt_si_uid;
866 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
867 }
868 else
869 {
870 switch (to->si_signo)
871 {
872 case SIGCHLD:
873 to->si_pid = from->cpt_si_pid;
874 to->si_uid = from->cpt_si_uid;
875 to->si_status = from->cpt_si_status;
876 to->si_utime = from->cpt_si_utime;
877 to->si_stime = from->cpt_si_stime;
878 break;
879 case SIGILL:
880 case SIGFPE:
881 case SIGSEGV:
882 case SIGBUS:
883 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
884 break;
885 case SIGPOLL:
886 to->si_band = from->cpt_si_band;
887 to->si_fd = from->cpt_si_fd;
888 break;
889 default:
890 to->si_pid = from->cpt_si_pid;
891 to->si_uid = from->cpt_si_uid;
892 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
893 break;
894 }
895 }
896 }
897
898 #endif /* __x86_64__ */
899
900 /* Convert a native/host siginfo object, into/from the siginfo in the
901 layout of the inferiors' architecture. Returns true if any
902 conversion was done; false otherwise. If DIRECTION is 1, then copy
903 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
904 INF. */
905
906 static int
907 x86_siginfo_fixup (struct siginfo *native, void *inf, int direction)
908 {
909 #ifdef __x86_64__
910 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
911 if (register_size (0) == 4)
912 {
913 if (sizeof (struct siginfo) != sizeof (compat_siginfo_t))
914 fatal ("unexpected difference in siginfo");
915
916 if (direction == 0)
917 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
918 else
919 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
920
921 return 1;
922 }
923 #endif
924
925 return 0;
926 }
927 \f
928 static int use_xml;
929
930 /* Update gdbserver_xmltarget. */
931
932 static void
933 x86_linux_update_xmltarget (void)
934 {
935 int pid;
936 struct regset_info *regset;
937 static unsigned long long xcr0;
938 static int have_ptrace_getregset = -1;
939 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
940 static int have_ptrace_getfpxregs = -1;
941 #endif
942
943 if (!current_inferior)
944 return;
945
946 /* Before changing the register cache internal layout or the target
947 regsets, flush the contents of the current valid caches back to
948 the threads. */
949 regcache_invalidate ();
950
951 pid = pid_of (get_thread_lwp (current_inferior));
952 #ifdef __x86_64__
953 if (num_xmm_registers == 8)
954 init_registers_i386_linux ();
955 else
956 init_registers_amd64_linux ();
957 #else
958 {
959 # ifdef HAVE_PTRACE_GETFPXREGS
960 if (have_ptrace_getfpxregs == -1)
961 {
962 elf_fpxregset_t fpxregs;
963
964 if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
965 {
966 have_ptrace_getfpxregs = 0;
967 x86_xcr0 = I386_XSTATE_X87_MASK;
968
969 /* Disable PTRACE_GETFPXREGS. */
970 for (regset = target_regsets;
971 regset->fill_function != NULL; regset++)
972 if (regset->get_request == PTRACE_GETFPXREGS)
973 {
974 regset->size = 0;
975 break;
976 }
977 }
978 else
979 have_ptrace_getfpxregs = 1;
980 }
981
982 if (!have_ptrace_getfpxregs)
983 {
984 init_registers_i386_mmx_linux ();
985 return;
986 }
987 # endif
988 init_registers_i386_linux ();
989 }
990 #endif
991
992 if (!use_xml)
993 {
994 /* Don't use XML. */
995 #ifdef __x86_64__
996 if (num_xmm_registers == 8)
997 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
998 else
999 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
1000 #else
1001 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1002 #endif
1003
1004 x86_xcr0 = I386_XSTATE_SSE_MASK;
1005
1006 return;
1007 }
1008
1009 /* Check if XSAVE extended state is supported. */
1010 if (have_ptrace_getregset == -1)
1011 {
1012 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
1013 struct iovec iov;
1014
1015 iov.iov_base = xstateregs;
1016 iov.iov_len = sizeof (xstateregs);
1017
1018 /* Check if PTRACE_GETREGSET works. */
1019 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
1020 &iov) < 0)
1021 {
1022 have_ptrace_getregset = 0;
1023 return;
1024 }
1025 else
1026 have_ptrace_getregset = 1;
1027
1028 /* Get XCR0 from XSAVE extended state at byte 464. */
1029 xcr0 = xstateregs[464 / sizeof (long long)];
1030
1031 /* Use PTRACE_GETREGSET if it is available. */
1032 for (regset = target_regsets;
1033 regset->fill_function != NULL; regset++)
1034 if (regset->get_request == PTRACE_GETREGSET)
1035 regset->size = I386_XSTATE_SIZE (xcr0);
1036 else if (regset->type != GENERAL_REGS)
1037 regset->size = 0;
1038 }
1039
1040 if (have_ptrace_getregset)
1041 {
1042 /* AVX is the highest feature we support. */
1043 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
1044 {
1045 x86_xcr0 = xcr0;
1046
1047 #ifdef __x86_64__
1048 /* I386 has 8 xmm regs. */
1049 if (num_xmm_registers == 8)
1050 init_registers_i386_avx_linux ();
1051 else
1052 init_registers_amd64_avx_linux ();
1053 #else
1054 init_registers_i386_avx_linux ();
1055 #endif
1056 }
1057 }
1058 }
1059
1060 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1061 PTRACE_GETREGSET. */
1062
1063 static void
1064 x86_linux_process_qsupported (const char *query)
1065 {
1066 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1067 with "i386" in qSupported query, it supports x86 XML target
1068 descriptions. */
1069 use_xml = 0;
1070 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1071 {
1072 char *copy = xstrdup (query + 13);
1073 char *p;
1074
1075 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1076 {
1077 if (strcmp (p, "i386") == 0)
1078 {
1079 use_xml = 1;
1080 break;
1081 }
1082 }
1083
1084 free (copy);
1085 }
1086
1087 x86_linux_update_xmltarget ();
1088 }
1089
1090 /* Initialize gdbserver for the architecture of the inferior. */
1091
1092 static void
1093 x86_arch_setup (void)
1094 {
1095 #ifdef __x86_64__
1096 int pid = pid_of (get_thread_lwp (current_inferior));
1097 char *file = linux_child_pid_to_exec_file (pid);
1098 int use_64bit = elf_64_file_p (file);
1099
1100 free (file);
1101
1102 if (use_64bit < 0)
1103 {
1104 /* This can only happen if /proc/<pid>/exe is unreadable,
1105 but "that can't happen" if we've gotten this far.
1106 Fall through and assume this is a 32-bit program. */
1107 }
1108 else if (use_64bit)
1109 {
1110 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1111 the_low_target.num_regs = -1;
1112 the_low_target.regmap = NULL;
1113 the_low_target.cannot_fetch_register = NULL;
1114 the_low_target.cannot_store_register = NULL;
1115
1116 /* Amd64 has 16 xmm regs. */
1117 num_xmm_registers = 16;
1118
1119 x86_linux_update_xmltarget ();
1120 return;
1121 }
1122 #endif
1123
1124 /* Ok we have a 32-bit inferior. */
1125
1126 the_low_target.num_regs = I386_NUM_REGS;
1127 the_low_target.regmap = i386_regmap;
1128 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1129 the_low_target.cannot_store_register = i386_cannot_store_register;
1130
1131 /* I386 has 8 xmm regs. */
1132 num_xmm_registers = 8;
1133
1134 x86_linux_update_xmltarget ();
1135 }
1136
1137 static int
1138 x86_supports_tracepoints (void)
1139 {
1140 return 1;
1141 }
1142
1143 static void
1144 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1145 {
1146 write_inferior_memory (*to, buf, len);
1147 *to += len;
1148 }
1149
1150 static int
1151 push_opcode (unsigned char *buf, char *op)
1152 {
1153 unsigned char *buf_org = buf;
1154
1155 while (1)
1156 {
1157 char *endptr;
1158 unsigned long ul = strtoul (op, &endptr, 16);
1159
1160 if (endptr == op)
1161 break;
1162
1163 *buf++ = ul;
1164 op = endptr;
1165 }
1166
1167 return buf - buf_org;
1168 }
1169
1170 #ifdef __x86_64__
1171
1172 /* Build a jump pad that saves registers and calls a collection
1173 function. Writes a jump instruction to the jump pad to
1174 JJUMPAD_INSN. The caller is responsible to write it in at the
1175 tracepoint address. */
1176
1177 static int
1178 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1179 CORE_ADDR collector,
1180 CORE_ADDR lockaddr,
1181 ULONGEST orig_size,
1182 CORE_ADDR *jump_entry,
1183 unsigned char *jjump_pad_insn,
1184 ULONGEST *jjump_pad_insn_size,
1185 CORE_ADDR *adjusted_insn_addr,
1186 CORE_ADDR *adjusted_insn_addr_end)
1187 {
1188 unsigned char buf[40];
1189 int i, offset;
1190 CORE_ADDR buildaddr = *jump_entry;
1191
1192 /* Build the jump pad. */
1193
1194 /* First, do tracepoint data collection. Save registers. */
1195 i = 0;
1196 /* Need to ensure stack pointer saved first. */
1197 buf[i++] = 0x54; /* push %rsp */
1198 buf[i++] = 0x55; /* push %rbp */
1199 buf[i++] = 0x57; /* push %rdi */
1200 buf[i++] = 0x56; /* push %rsi */
1201 buf[i++] = 0x52; /* push %rdx */
1202 buf[i++] = 0x51; /* push %rcx */
1203 buf[i++] = 0x53; /* push %rbx */
1204 buf[i++] = 0x50; /* push %rax */
1205 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1206 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1207 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1208 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1209 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1210 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1211 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1212 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1213 buf[i++] = 0x9c; /* pushfq */
1214 buf[i++] = 0x48; /* movl <addr>,%rdi */
1215 buf[i++] = 0xbf;
1216 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1217 i += sizeof (unsigned long);
1218 buf[i++] = 0x57; /* push %rdi */
1219 append_insns (&buildaddr, i, buf);
1220
1221 /* Stack space for the collecting_t object. */
1222 i = 0;
1223 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1224 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1225 memcpy (buf + i, &tpoint, 8);
1226 i += 8;
1227 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1228 i += push_opcode (&buf[i],
1229 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1230 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1231 append_insns (&buildaddr, i, buf);
1232
1233 /* spin-lock. */
1234 i = 0;
1235 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1236 memcpy (&buf[i], (void *) &lockaddr, 8);
1237 i += 8;
1238 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1239 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1240 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1241 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1242 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1243 append_insns (&buildaddr, i, buf);
1244
1245 /* Set up the gdb_collect call. */
1246 /* At this point, (stack pointer + 0x18) is the base of our saved
1247 register block. */
1248
1249 i = 0;
1250 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1251 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1252
1253 /* tpoint address may be 64-bit wide. */
1254 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1255 memcpy (buf + i, &tpoint, 8);
1256 i += 8;
1257 append_insns (&buildaddr, i, buf);
1258
1259 /* The collector function being in the shared library, may be
1260 >31-bits away off the jump pad. */
1261 i = 0;
1262 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1263 memcpy (buf + i, &collector, 8);
1264 i += 8;
1265 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1266 append_insns (&buildaddr, i, buf);
1267
1268 /* Clear the spin-lock. */
1269 i = 0;
1270 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1271 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1272 memcpy (buf + i, &lockaddr, 8);
1273 i += 8;
1274 append_insns (&buildaddr, i, buf);
1275
1276 /* Remove stack that had been used for the collect_t object. */
1277 i = 0;
1278 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1279 append_insns (&buildaddr, i, buf);
1280
1281 /* Restore register state. */
1282 i = 0;
1283 buf[i++] = 0x48; /* add $0x8,%rsp */
1284 buf[i++] = 0x83;
1285 buf[i++] = 0xc4;
1286 buf[i++] = 0x08;
1287 buf[i++] = 0x9d; /* popfq */
1288 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1289 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1290 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1291 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1292 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1293 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1294 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1295 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1296 buf[i++] = 0x58; /* pop %rax */
1297 buf[i++] = 0x5b; /* pop %rbx */
1298 buf[i++] = 0x59; /* pop %rcx */
1299 buf[i++] = 0x5a; /* pop %rdx */
1300 buf[i++] = 0x5e; /* pop %rsi */
1301 buf[i++] = 0x5f; /* pop %rdi */
1302 buf[i++] = 0x5d; /* pop %rbp */
1303 buf[i++] = 0x5c; /* pop %rsp */
1304 append_insns (&buildaddr, i, buf);
1305
1306 /* Now, adjust the original instruction to execute in the jump
1307 pad. */
1308 *adjusted_insn_addr = buildaddr;
1309 relocate_instruction (&buildaddr, tpaddr);
1310 *adjusted_insn_addr_end = buildaddr;
1311
1312 /* Finally, write a jump back to the program. */
1313 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1314 memcpy (buf, jump_insn, sizeof (jump_insn));
1315 memcpy (buf + 1, &offset, 4);
1316 append_insns (&buildaddr, sizeof (jump_insn), buf);
1317
1318 /* The jump pad is now built. Wire in a jump to our jump pad. This
1319 is always done last (by our caller actually), so that we can
1320 install fast tracepoints with threads running. This relies on
1321 the agent's atomic write support. */
1322 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1323 memcpy (buf, jump_insn, sizeof (jump_insn));
1324 memcpy (buf + 1, &offset, 4);
1325 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1326 *jjump_pad_insn_size = sizeof (jump_insn);
1327
1328 /* Return the end address of our pad. */
1329 *jump_entry = buildaddr;
1330
1331 return 0;
1332 }
1333
1334 #endif /* __x86_64__ */
1335
1336 /* Build a jump pad that saves registers and calls a collection
1337 function. Writes a jump instruction to the jump pad to
1338 JJUMPAD_INSN. The caller is responsible to write it in at the
1339 tracepoint address. */
1340
1341 static int
1342 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1343 CORE_ADDR collector,
1344 CORE_ADDR lockaddr,
1345 ULONGEST orig_size,
1346 CORE_ADDR *jump_entry,
1347 unsigned char *jjump_pad_insn,
1348 ULONGEST *jjump_pad_insn_size,
1349 CORE_ADDR *adjusted_insn_addr,
1350 CORE_ADDR *adjusted_insn_addr_end)
1351 {
1352 unsigned char buf[0x100];
1353 int i, offset;
1354 CORE_ADDR buildaddr = *jump_entry;
1355
1356 /* Build the jump pad. */
1357
1358 /* First, do tracepoint data collection. Save registers. */
1359 i = 0;
1360 buf[i++] = 0x60; /* pushad */
1361 buf[i++] = 0x68; /* push tpaddr aka $pc */
1362 *((int *)(buf + i)) = (int) tpaddr;
1363 i += 4;
1364 buf[i++] = 0x9c; /* pushf */
1365 buf[i++] = 0x1e; /* push %ds */
1366 buf[i++] = 0x06; /* push %es */
1367 buf[i++] = 0x0f; /* push %fs */
1368 buf[i++] = 0xa0;
1369 buf[i++] = 0x0f; /* push %gs */
1370 buf[i++] = 0xa8;
1371 buf[i++] = 0x16; /* push %ss */
1372 buf[i++] = 0x0e; /* push %cs */
1373 append_insns (&buildaddr, i, buf);
1374
1375 /* Stack space for the collecting_t object. */
1376 i = 0;
1377 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1378
1379 /* Build the object. */
1380 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1381 memcpy (buf + i, &tpoint, 4);
1382 i += 4;
1383 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1384
1385 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1386 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1387 append_insns (&buildaddr, i, buf);
1388
1389 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1390 If we cared for it, this could be using xchg alternatively. */
1391
1392 i = 0;
1393 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1394 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1395 %esp,<lockaddr> */
1396 memcpy (&buf[i], (void *) &lockaddr, 4);
1397 i += 4;
1398 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1399 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1400 append_insns (&buildaddr, i, buf);
1401
1402
1403 /* Set up arguments to the gdb_collect call. */
1404 i = 0;
1405 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1406 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1407 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1408 append_insns (&buildaddr, i, buf);
1409
1410 i = 0;
1411 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1412 append_insns (&buildaddr, i, buf);
1413
1414 i = 0;
1415 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1416 memcpy (&buf[i], (void *) &tpoint, 4);
1417 i += 4;
1418 append_insns (&buildaddr, i, buf);
1419
1420 buf[0] = 0xe8; /* call <reladdr> */
1421 offset = collector - (buildaddr + sizeof (jump_insn));
1422 memcpy (buf + 1, &offset, 4);
1423 append_insns (&buildaddr, 5, buf);
1424 /* Clean up after the call. */
1425 buf[0] = 0x83; /* add $0x8,%esp */
1426 buf[1] = 0xc4;
1427 buf[2] = 0x08;
1428 append_insns (&buildaddr, 3, buf);
1429
1430
1431 /* Clear the spin-lock. This would need the LOCK prefix on older
1432 broken archs. */
1433 i = 0;
1434 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1435 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1436 memcpy (buf + i, &lockaddr, 4);
1437 i += 4;
1438 append_insns (&buildaddr, i, buf);
1439
1440
1441 /* Remove stack that had been used for the collect_t object. */
1442 i = 0;
1443 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1444 append_insns (&buildaddr, i, buf);
1445
1446 i = 0;
1447 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1448 buf[i++] = 0xc4;
1449 buf[i++] = 0x04;
1450 buf[i++] = 0x17; /* pop %ss */
1451 buf[i++] = 0x0f; /* pop %gs */
1452 buf[i++] = 0xa9;
1453 buf[i++] = 0x0f; /* pop %fs */
1454 buf[i++] = 0xa1;
1455 buf[i++] = 0x07; /* pop %es */
1456 buf[i++] = 0x1f; /* pop %de */
1457 buf[i++] = 0x9d; /* popf */
1458 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1459 buf[i++] = 0xc4;
1460 buf[i++] = 0x04;
1461 buf[i++] = 0x61; /* popad */
1462 append_insns (&buildaddr, i, buf);
1463
1464 /* Now, adjust the original instruction to execute in the jump
1465 pad. */
1466 *adjusted_insn_addr = buildaddr;
1467 relocate_instruction (&buildaddr, tpaddr);
1468 *adjusted_insn_addr_end = buildaddr;
1469
1470 /* Write the jump back to the program. */
1471 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1472 memcpy (buf, jump_insn, sizeof (jump_insn));
1473 memcpy (buf + 1, &offset, 4);
1474 append_insns (&buildaddr, sizeof (jump_insn), buf);
1475
1476 /* The jump pad is now built. Wire in a jump to our jump pad. This
1477 is always done last (by our caller actually), so that we can
1478 install fast tracepoints with threads running. This relies on
1479 the agent's atomic write support. */
1480 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1481 memcpy (buf, jump_insn, sizeof (jump_insn));
1482 memcpy (buf + 1, &offset, 4);
1483 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1484 *jjump_pad_insn_size = sizeof (jump_insn);
1485
1486 /* Return the end address of our pad. */
1487 *jump_entry = buildaddr;
1488
1489 return 0;
1490 }
1491
1492 static int
1493 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1494 CORE_ADDR collector,
1495 CORE_ADDR lockaddr,
1496 ULONGEST orig_size,
1497 CORE_ADDR *jump_entry,
1498 unsigned char *jjump_pad_insn,
1499 ULONGEST *jjump_pad_insn_size,
1500 CORE_ADDR *adjusted_insn_addr,
1501 CORE_ADDR *adjusted_insn_addr_end)
1502 {
1503 #ifdef __x86_64__
1504 if (register_size (0) == 8)
1505 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1506 collector, lockaddr,
1507 orig_size, jump_entry,
1508 jjump_pad_insn,
1509 jjump_pad_insn_size,
1510 adjusted_insn_addr,
1511 adjusted_insn_addr_end);
1512 #endif
1513
1514 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1515 collector, lockaddr,
1516 orig_size, jump_entry,
1517 jjump_pad_insn,
1518 jjump_pad_insn_size,
1519 adjusted_insn_addr,
1520 adjusted_insn_addr_end);
1521 }
1522
1523 static void
1524 add_insns (unsigned char *start, int len)
1525 {
1526 CORE_ADDR buildaddr = current_insn_ptr;
1527
1528 if (debug_threads)
1529 fprintf (stderr, "Adding %d bytes of insn at %s\n",
1530 len, paddress (buildaddr));
1531
1532 append_insns (&buildaddr, len, start);
1533 current_insn_ptr = buildaddr;
1534 }
1535
1536 /* Our general strategy for emitting code is to avoid specifying raw
1537 bytes whenever possible, and instead copy a block of inline asm
1538 that is embedded in the function. This is a little messy, because
1539 we need to keep the compiler from discarding what looks like dead
1540 code, plus suppress various warnings. */
1541
1542 #define EMIT_ASM(NAME, INSNS) \
1543 do \
1544 { \
1545 extern unsigned char start_ ## NAME, end_ ## NAME; \
1546 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1547 __asm__ ("jmp end_" #NAME "\n" \
1548 "\t" "start_" #NAME ":" \
1549 "\t" INSNS "\n" \
1550 "\t" "end_" #NAME ":"); \
1551 } while (0)
1552
1553 #ifdef __x86_64__
1554
1555 #define EMIT_ASM32(NAME,INSNS) \
1556 do \
1557 { \
1558 extern unsigned char start_ ## NAME, end_ ## NAME; \
1559 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1560 __asm__ (".code32\n" \
1561 "\t" "jmp end_" #NAME "\n" \
1562 "\t" "start_" #NAME ":\n" \
1563 "\t" INSNS "\n" \
1564 "\t" "end_" #NAME ":\n" \
1565 ".code64\n"); \
1566 } while (0)
1567
1568 #else
1569
1570 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1571
1572 #endif
1573
1574 #ifdef __x86_64__
1575
1576 static void
1577 amd64_emit_prologue (void)
1578 {
1579 EMIT_ASM (amd64_prologue,
1580 "pushq %rbp\n\t"
1581 "movq %rsp,%rbp\n\t"
1582 "sub $0x20,%rsp\n\t"
1583 "movq %rdi,-8(%rbp)\n\t"
1584 "movq %rsi,-16(%rbp)");
1585 }
1586
1587
1588 static void
1589 amd64_emit_epilogue (void)
1590 {
1591 EMIT_ASM (amd64_epilogue,
1592 "movq -16(%rbp),%rdi\n\t"
1593 "movq %rax,(%rdi)\n\t"
1594 "xor %rax,%rax\n\t"
1595 "leave\n\t"
1596 "ret");
1597 }
1598
1599 static void
1600 amd64_emit_add (void)
1601 {
1602 EMIT_ASM (amd64_add,
1603 "add (%rsp),%rax\n\t"
1604 "lea 0x8(%rsp),%rsp");
1605 }
1606
1607 static void
1608 amd64_emit_sub (void)
1609 {
1610 EMIT_ASM (amd64_sub,
1611 "sub %rax,(%rsp)\n\t"
1612 "pop %rax");
1613 }
1614
1615 static void
1616 amd64_emit_mul (void)
1617 {
1618 emit_error = 1;
1619 }
1620
1621 static void
1622 amd64_emit_lsh (void)
1623 {
1624 emit_error = 1;
1625 }
1626
1627 static void
1628 amd64_emit_rsh_signed (void)
1629 {
1630 emit_error = 1;
1631 }
1632
1633 static void
1634 amd64_emit_rsh_unsigned (void)
1635 {
1636 emit_error = 1;
1637 }
1638
1639 static void
1640 amd64_emit_ext (int arg)
1641 {
1642 switch (arg)
1643 {
1644 case 8:
1645 EMIT_ASM (amd64_ext_8,
1646 "cbtw\n\t"
1647 "cwtl\n\t"
1648 "cltq");
1649 break;
1650 case 16:
1651 EMIT_ASM (amd64_ext_16,
1652 "cwtl\n\t"
1653 "cltq");
1654 break;
1655 case 32:
1656 EMIT_ASM (amd64_ext_32,
1657 "cltq");
1658 break;
1659 default:
1660 emit_error = 1;
1661 }
1662 }
1663
1664 static void
1665 amd64_emit_log_not (void)
1666 {
1667 EMIT_ASM (amd64_log_not,
1668 "test %rax,%rax\n\t"
1669 "sete %cl\n\t"
1670 "movzbq %cl,%rax");
1671 }
1672
1673 static void
1674 amd64_emit_bit_and (void)
1675 {
1676 EMIT_ASM (amd64_and,
1677 "and (%rsp),%rax\n\t"
1678 "lea 0x8(%rsp),%rsp");
1679 }
1680
1681 static void
1682 amd64_emit_bit_or (void)
1683 {
1684 EMIT_ASM (amd64_or,
1685 "or (%rsp),%rax\n\t"
1686 "lea 0x8(%rsp),%rsp");
1687 }
1688
1689 static void
1690 amd64_emit_bit_xor (void)
1691 {
1692 EMIT_ASM (amd64_xor,
1693 "xor (%rsp),%rax\n\t"
1694 "lea 0x8(%rsp),%rsp");
1695 }
1696
1697 static void
1698 amd64_emit_bit_not (void)
1699 {
1700 EMIT_ASM (amd64_bit_not,
1701 "xorq $0xffffffffffffffff,%rax");
1702 }
1703
1704 static void
1705 amd64_emit_equal (void)
1706 {
1707 EMIT_ASM (amd64_equal,
1708 "cmp %rax,(%rsp)\n\t"
1709 "je .Lamd64_equal_true\n\t"
1710 "xor %rax,%rax\n\t"
1711 "jmp .Lamd64_equal_end\n\t"
1712 ".Lamd64_equal_true:\n\t"
1713 "mov $0x1,%rax\n\t"
1714 ".Lamd64_equal_end:\n\t"
1715 "lea 0x8(%rsp),%rsp");
1716 }
1717
1718 static void
1719 amd64_emit_less_signed (void)
1720 {
1721 EMIT_ASM (amd64_less_signed,
1722 "cmp %rax,(%rsp)\n\t"
1723 "jl .Lamd64_less_signed_true\n\t"
1724 "xor %rax,%rax\n\t"
1725 "jmp .Lamd64_less_signed_end\n\t"
1726 ".Lamd64_less_signed_true:\n\t"
1727 "mov $1,%rax\n\t"
1728 ".Lamd64_less_signed_end:\n\t"
1729 "lea 0x8(%rsp),%rsp");
1730 }
1731
1732 static void
1733 amd64_emit_less_unsigned (void)
1734 {
1735 EMIT_ASM (amd64_less_unsigned,
1736 "cmp %rax,(%rsp)\n\t"
1737 "jb .Lamd64_less_unsigned_true\n\t"
1738 "xor %rax,%rax\n\t"
1739 "jmp .Lamd64_less_unsigned_end\n\t"
1740 ".Lamd64_less_unsigned_true:\n\t"
1741 "mov $1,%rax\n\t"
1742 ".Lamd64_less_unsigned_end:\n\t"
1743 "lea 0x8(%rsp),%rsp");
1744 }
1745
1746 static void
1747 amd64_emit_ref (int size)
1748 {
1749 switch (size)
1750 {
1751 case 1:
1752 EMIT_ASM (amd64_ref1,
1753 "movb (%rax),%al");
1754 break;
1755 case 2:
1756 EMIT_ASM (amd64_ref2,
1757 "movw (%rax),%ax");
1758 break;
1759 case 4:
1760 EMIT_ASM (amd64_ref4,
1761 "movl (%rax),%eax");
1762 break;
1763 case 8:
1764 EMIT_ASM (amd64_ref8,
1765 "movq (%rax),%rax");
1766 break;
1767 }
1768 }
1769
1770 static void
1771 amd64_emit_if_goto (int *offset_p, int *size_p)
1772 {
1773 EMIT_ASM (amd64_if_goto,
1774 "mov %rax,%rcx\n\t"
1775 "pop %rax\n\t"
1776 "cmp $0,%rcx\n\t"
1777 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1778 if (offset_p)
1779 *offset_p = 10;
1780 if (size_p)
1781 *size_p = 4;
1782 }
1783
1784 static void
1785 amd64_emit_goto (int *offset_p, int *size_p)
1786 {
1787 EMIT_ASM (amd64_goto,
1788 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1789 if (offset_p)
1790 *offset_p = 1;
1791 if (size_p)
1792 *size_p = 4;
1793 }
1794
1795 static void
1796 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1797 {
1798 int diff = (to - (from + size));
1799 unsigned char buf[sizeof (int)];
1800
1801 if (size != 4)
1802 {
1803 emit_error = 1;
1804 return;
1805 }
1806
1807 memcpy (buf, &diff, sizeof (int));
1808 write_inferior_memory (from, buf, sizeof (int));
1809 }
1810
1811 static void
1812 amd64_emit_const (LONGEST num)
1813 {
1814 unsigned char buf[16];
1815 int i;
1816 CORE_ADDR buildaddr = current_insn_ptr;
1817
1818 i = 0;
1819 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1820 *((LONGEST *) (&buf[i])) = num;
1821 i += 8;
1822 append_insns (&buildaddr, i, buf);
1823 current_insn_ptr = buildaddr;
1824 }
1825
1826 static void
1827 amd64_emit_call (CORE_ADDR fn)
1828 {
1829 unsigned char buf[16];
1830 int i;
1831 CORE_ADDR buildaddr;
1832 LONGEST offset64;
1833
1834 /* The destination function being in the shared library, may be
1835 >31-bits away off the compiled code pad. */
1836
1837 buildaddr = current_insn_ptr;
1838
1839 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1840
1841 i = 0;
1842
1843 if (offset64 > INT_MAX || offset64 < INT_MIN)
1844 {
1845 /* Offset is too large for a call. Use callq, but that requires
1846 a register, so avoid it if possible. Use r10, since it is
1847 call-clobbered, we don't have to push/pop it. */
1848 buf[i++] = 0x48; /* mov $fn,%r10 */
1849 buf[i++] = 0xba;
1850 memcpy (buf + i, &fn, 8);
1851 i += 8;
1852 buf[i++] = 0xff; /* callq *%r10 */
1853 buf[i++] = 0xd2;
1854 }
1855 else
1856 {
1857 int offset32 = offset64; /* we know we can't overflow here. */
1858 memcpy (buf + i, &offset32, 4);
1859 i += 4;
1860 }
1861
1862 append_insns (&buildaddr, i, buf);
1863 current_insn_ptr = buildaddr;
1864 }
1865
1866 static void
1867 amd64_emit_reg (int reg)
1868 {
1869 unsigned char buf[16];
1870 int i;
1871 CORE_ADDR buildaddr;
1872
1873 /* Assume raw_regs is still in %rdi. */
1874 buildaddr = current_insn_ptr;
1875 i = 0;
1876 buf[i++] = 0xbe; /* mov $<n>,%esi */
1877 *((int *) (&buf[i])) = reg;
1878 i += 4;
1879 append_insns (&buildaddr, i, buf);
1880 current_insn_ptr = buildaddr;
1881 amd64_emit_call (get_raw_reg_func_addr ());
1882 }
1883
1884 static void
1885 amd64_emit_pop (void)
1886 {
1887 EMIT_ASM (amd64_pop,
1888 "pop %rax");
1889 }
1890
1891 static void
1892 amd64_emit_stack_flush (void)
1893 {
1894 EMIT_ASM (amd64_stack_flush,
1895 "push %rax");
1896 }
1897
1898 static void
1899 amd64_emit_zero_ext (int arg)
1900 {
1901 switch (arg)
1902 {
1903 case 8:
1904 EMIT_ASM (amd64_zero_ext_8,
1905 "and $0xff,%rax");
1906 break;
1907 case 16:
1908 EMIT_ASM (amd64_zero_ext_16,
1909 "and $0xffff,%rax");
1910 break;
1911 case 32:
1912 EMIT_ASM (amd64_zero_ext_32,
1913 "mov $0xffffffff,%rcx\n\t"
1914 "and %rcx,%rax");
1915 break;
1916 default:
1917 emit_error = 1;
1918 }
1919 }
1920
1921 static void
1922 amd64_emit_swap (void)
1923 {
1924 EMIT_ASM (amd64_swap,
1925 "mov %rax,%rcx\n\t"
1926 "pop %rax\n\t"
1927 "push %rcx");
1928 }
1929
1930 static void
1931 amd64_emit_stack_adjust (int n)
1932 {
1933 unsigned char buf[16];
1934 int i;
1935 CORE_ADDR buildaddr = current_insn_ptr;
1936
1937 i = 0;
1938 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1939 buf[i++] = 0x8d;
1940 buf[i++] = 0x64;
1941 buf[i++] = 0x24;
1942 /* This only handles adjustments up to 16, but we don't expect any more. */
1943 buf[i++] = n * 8;
1944 append_insns (&buildaddr, i, buf);
1945 current_insn_ptr = buildaddr;
1946 }
1947
1948 /* FN's prototype is `LONGEST(*fn)(int)'. */
1949
1950 static void
1951 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1952 {
1953 unsigned char buf[16];
1954 int i;
1955 CORE_ADDR buildaddr;
1956
1957 buildaddr = current_insn_ptr;
1958 i = 0;
1959 buf[i++] = 0xbf; /* movl $<n>,%edi */
1960 *((int *) (&buf[i])) = arg1;
1961 i += 4;
1962 append_insns (&buildaddr, i, buf);
1963 current_insn_ptr = buildaddr;
1964 amd64_emit_call (fn);
1965 }
1966
1967 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1968
1969 static void
1970 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1971 {
1972 unsigned char buf[16];
1973 int i;
1974 CORE_ADDR buildaddr;
1975
1976 buildaddr = current_insn_ptr;
1977 i = 0;
1978 buf[i++] = 0xbf; /* movl $<n>,%edi */
1979 *((int *) (&buf[i])) = arg1;
1980 i += 4;
1981 append_insns (&buildaddr, i, buf);
1982 current_insn_ptr = buildaddr;
1983 EMIT_ASM (amd64_void_call_2_a,
1984 /* Save away a copy of the stack top. */
1985 "push %rax\n\t"
1986 /* Also pass top as the second argument. */
1987 "mov %rax,%rsi");
1988 amd64_emit_call (fn);
1989 EMIT_ASM (amd64_void_call_2_b,
1990 /* Restore the stack top, %rax may have been trashed. */
1991 "pop %rax");
1992 }
1993
1994 struct emit_ops amd64_emit_ops =
1995 {
1996 amd64_emit_prologue,
1997 amd64_emit_epilogue,
1998 amd64_emit_add,
1999 amd64_emit_sub,
2000 amd64_emit_mul,
2001 amd64_emit_lsh,
2002 amd64_emit_rsh_signed,
2003 amd64_emit_rsh_unsigned,
2004 amd64_emit_ext,
2005 amd64_emit_log_not,
2006 amd64_emit_bit_and,
2007 amd64_emit_bit_or,
2008 amd64_emit_bit_xor,
2009 amd64_emit_bit_not,
2010 amd64_emit_equal,
2011 amd64_emit_less_signed,
2012 amd64_emit_less_unsigned,
2013 amd64_emit_ref,
2014 amd64_emit_if_goto,
2015 amd64_emit_goto,
2016 amd64_write_goto_address,
2017 amd64_emit_const,
2018 amd64_emit_call,
2019 amd64_emit_reg,
2020 amd64_emit_pop,
2021 amd64_emit_stack_flush,
2022 amd64_emit_zero_ext,
2023 amd64_emit_swap,
2024 amd64_emit_stack_adjust,
2025 amd64_emit_int_call_1,
2026 amd64_emit_void_call_2
2027 };
2028
2029 #endif /* __x86_64__ */
2030
2031 static void
2032 i386_emit_prologue (void)
2033 {
2034 EMIT_ASM32 (i386_prologue,
2035 "push %ebp\n\t"
2036 "mov %esp,%ebp");
2037 /* At this point, the raw regs base address is at 8(%ebp), and the
2038 value pointer is at 12(%ebp). */
2039 }
2040
2041 static void
2042 i386_emit_epilogue (void)
2043 {
2044 EMIT_ASM32 (i386_epilogue,
2045 "mov 12(%ebp),%ecx\n\t"
2046 "mov %eax,(%ecx)\n\t"
2047 "mov %ebx,0x4(%ecx)\n\t"
2048 "xor %eax,%eax\n\t"
2049 "pop %ebp\n\t"
2050 "ret");
2051 }
2052
2053 static void
2054 i386_emit_add (void)
2055 {
2056 EMIT_ASM32 (i386_add,
2057 "add (%esp),%eax\n\t"
2058 "adc 0x4(%esp),%ebx\n\t"
2059 "lea 0x8(%esp),%esp");
2060 }
2061
2062 static void
2063 i386_emit_sub (void)
2064 {
2065 EMIT_ASM32 (i386_sub,
2066 "subl %eax,(%esp)\n\t"
2067 "sbbl %ebx,4(%esp)\n\t"
2068 "pop %eax\n\t"
2069 "pop %ebx\n\t");
2070 }
2071
2072 static void
2073 i386_emit_mul (void)
2074 {
2075 emit_error = 1;
2076 }
2077
2078 static void
2079 i386_emit_lsh (void)
2080 {
2081 emit_error = 1;
2082 }
2083
2084 static void
2085 i386_emit_rsh_signed (void)
2086 {
2087 emit_error = 1;
2088 }
2089
2090 static void
2091 i386_emit_rsh_unsigned (void)
2092 {
2093 emit_error = 1;
2094 }
2095
2096 static void
2097 i386_emit_ext (int arg)
2098 {
2099 switch (arg)
2100 {
2101 case 8:
2102 EMIT_ASM32 (i386_ext_8,
2103 "cbtw\n\t"
2104 "cwtl\n\t"
2105 "movl %eax,%ebx\n\t"
2106 "sarl $31,%ebx");
2107 break;
2108 case 16:
2109 EMIT_ASM32 (i386_ext_16,
2110 "cwtl\n\t"
2111 "movl %eax,%ebx\n\t"
2112 "sarl $31,%ebx");
2113 break;
2114 case 32:
2115 EMIT_ASM32 (i386_ext_32,
2116 "movl %eax,%ebx\n\t"
2117 "sarl $31,%ebx");
2118 break;
2119 default:
2120 emit_error = 1;
2121 }
2122 }
2123
2124 static void
2125 i386_emit_log_not (void)
2126 {
2127 EMIT_ASM32 (i386_log_not,
2128 "or %ebx,%eax\n\t"
2129 "test %eax,%eax\n\t"
2130 "sete %cl\n\t"
2131 "xor %ebx,%ebx\n\t"
2132 "movzbl %cl,%eax");
2133 }
2134
2135 static void
2136 i386_emit_bit_and (void)
2137 {
2138 EMIT_ASM32 (i386_and,
2139 "and (%esp),%eax\n\t"
2140 "and 0x4(%esp),%ebx\n\t"
2141 "lea 0x8(%esp),%esp");
2142 }
2143
2144 static void
2145 i386_emit_bit_or (void)
2146 {
2147 EMIT_ASM32 (i386_or,
2148 "or (%esp),%eax\n\t"
2149 "or 0x4(%esp),%ebx\n\t"
2150 "lea 0x8(%esp),%esp");
2151 }
2152
2153 static void
2154 i386_emit_bit_xor (void)
2155 {
2156 EMIT_ASM32 (i386_xor,
2157 "xor (%esp),%eax\n\t"
2158 "xor 0x4(%esp),%ebx\n\t"
2159 "lea 0x8(%esp),%esp");
2160 }
2161
2162 static void
2163 i386_emit_bit_not (void)
2164 {
2165 EMIT_ASM32 (i386_bit_not,
2166 "xor $0xffffffff,%eax\n\t"
2167 "xor $0xffffffff,%ebx\n\t");
2168 }
2169
2170 static void
2171 i386_emit_equal (void)
2172 {
2173 EMIT_ASM32 (i386_equal,
2174 "cmpl %ebx,4(%esp)\n\t"
2175 "jne .Li386_equal_false\n\t"
2176 "cmpl %eax,(%esp)\n\t"
2177 "je .Li386_equal_true\n\t"
2178 ".Li386_equal_false:\n\t"
2179 "xor %eax,%eax\n\t"
2180 "jmp .Li386_equal_end\n\t"
2181 ".Li386_equal_true:\n\t"
2182 "mov $1,%eax\n\t"
2183 ".Li386_equal_end:\n\t"
2184 "xor %ebx,%ebx\n\t"
2185 "lea 0x8(%esp),%esp");
2186 }
2187
2188 static void
2189 i386_emit_less_signed (void)
2190 {
2191 EMIT_ASM32 (i386_less_signed,
2192 "cmpl %ebx,4(%esp)\n\t"
2193 "jl .Li386_less_signed_true\n\t"
2194 "jne .Li386_less_signed_false\n\t"
2195 "cmpl %eax,(%esp)\n\t"
2196 "jl .Li386_less_signed_true\n\t"
2197 ".Li386_less_signed_false:\n\t"
2198 "xor %eax,%eax\n\t"
2199 "jmp .Li386_less_signed_end\n\t"
2200 ".Li386_less_signed_true:\n\t"
2201 "mov $1,%eax\n\t"
2202 ".Li386_less_signed_end:\n\t"
2203 "xor %ebx,%ebx\n\t"
2204 "lea 0x8(%esp),%esp");
2205 }
2206
2207 static void
2208 i386_emit_less_unsigned (void)
2209 {
2210 EMIT_ASM32 (i386_less_unsigned,
2211 "cmpl %ebx,4(%esp)\n\t"
2212 "jb .Li386_less_unsigned_true\n\t"
2213 "jne .Li386_less_unsigned_false\n\t"
2214 "cmpl %eax,(%esp)\n\t"
2215 "jb .Li386_less_unsigned_true\n\t"
2216 ".Li386_less_unsigned_false:\n\t"
2217 "xor %eax,%eax\n\t"
2218 "jmp .Li386_less_unsigned_end\n\t"
2219 ".Li386_less_unsigned_true:\n\t"
2220 "mov $1,%eax\n\t"
2221 ".Li386_less_unsigned_end:\n\t"
2222 "xor %ebx,%ebx\n\t"
2223 "lea 0x8(%esp),%esp");
2224 }
2225
2226 static void
2227 i386_emit_ref (int size)
2228 {
2229 switch (size)
2230 {
2231 case 1:
2232 EMIT_ASM32 (i386_ref1,
2233 "movb (%eax),%al");
2234 break;
2235 case 2:
2236 EMIT_ASM32 (i386_ref2,
2237 "movw (%eax),%ax");
2238 break;
2239 case 4:
2240 EMIT_ASM32 (i386_ref4,
2241 "movl (%eax),%eax");
2242 break;
2243 case 8:
2244 EMIT_ASM32 (i386_ref8,
2245 "movl 4(%eax),%ebx\n\t"
2246 "movl (%eax),%eax");
2247 break;
2248 }
2249 }
2250
2251 static void
2252 i386_emit_if_goto (int *offset_p, int *size_p)
2253 {
2254 EMIT_ASM32 (i386_if_goto,
2255 "mov %eax,%ecx\n\t"
2256 "or %ebx,%ecx\n\t"
2257 "pop %eax\n\t"
2258 "pop %ebx\n\t"
2259 "cmpl $0,%ecx\n\t"
2260 /* Don't trust the assembler to choose the right jump */
2261 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2262
2263 if (offset_p)
2264 *offset_p = 11; /* be sure that this matches the sequence above */
2265 if (size_p)
2266 *size_p = 4;
2267 }
2268
2269 static void
2270 i386_emit_goto (int *offset_p, int *size_p)
2271 {
2272 EMIT_ASM32 (i386_goto,
2273 /* Don't trust the assembler to choose the right jump */
2274 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2275 if (offset_p)
2276 *offset_p = 1;
2277 if (size_p)
2278 *size_p = 4;
2279 }
2280
2281 static void
2282 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2283 {
2284 int diff = (to - (from + size));
2285 unsigned char buf[sizeof (int)];
2286
2287 /* We're only doing 4-byte sizes at the moment. */
2288 if (size != 4)
2289 {
2290 emit_error = 1;
2291 return;
2292 }
2293
2294 memcpy (buf, &diff, sizeof (int));
2295 write_inferior_memory (from, buf, sizeof (int));
2296 }
2297
2298 static void
2299 i386_emit_const (LONGEST num)
2300 {
2301 unsigned char buf[16];
2302 int i, hi;
2303 CORE_ADDR buildaddr = current_insn_ptr;
2304
2305 i = 0;
2306 buf[i++] = 0xb8; /* mov $<n>,%eax */
2307 *((int *) (&buf[i])) = (num & 0xffffffff);
2308 i += 4;
2309 hi = ((num >> 32) & 0xffffffff);
2310 if (hi)
2311 {
2312 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2313 *((int *) (&buf[i])) = hi;
2314 i += 4;
2315 }
2316 else
2317 {
2318 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2319 }
2320 append_insns (&buildaddr, i, buf);
2321 current_insn_ptr = buildaddr;
2322 }
2323
2324 static void
2325 i386_emit_call (CORE_ADDR fn)
2326 {
2327 unsigned char buf[16];
2328 int i, offset;
2329 CORE_ADDR buildaddr;
2330
2331 buildaddr = current_insn_ptr;
2332 i = 0;
2333 buf[i++] = 0xe8; /* call <reladdr> */
2334 offset = ((int) fn) - (buildaddr + 5);
2335 memcpy (buf + 1, &offset, 4);
2336 append_insns (&buildaddr, 5, buf);
2337 current_insn_ptr = buildaddr;
2338 }
2339
2340 static void
2341 i386_emit_reg (int reg)
2342 {
2343 unsigned char buf[16];
2344 int i;
2345 CORE_ADDR buildaddr;
2346
2347 EMIT_ASM32 (i386_reg_a,
2348 "sub $0x8,%esp");
2349 buildaddr = current_insn_ptr;
2350 i = 0;
2351 buf[i++] = 0xb8; /* mov $<n>,%eax */
2352 *((int *) (&buf[i])) = reg;
2353 i += 4;
2354 append_insns (&buildaddr, i, buf);
2355 current_insn_ptr = buildaddr;
2356 EMIT_ASM32 (i386_reg_b,
2357 "mov %eax,4(%esp)\n\t"
2358 "mov 8(%ebp),%eax\n\t"
2359 "mov %eax,(%esp)");
2360 i386_emit_call (get_raw_reg_func_addr ());
2361 EMIT_ASM32 (i386_reg_c,
2362 "xor %ebx,%ebx\n\t"
2363 "lea 0x8(%esp),%esp");
2364 }
2365
2366 static void
2367 i386_emit_pop (void)
2368 {
2369 EMIT_ASM32 (i386_pop,
2370 "pop %eax\n\t"
2371 "pop %ebx");
2372 }
2373
2374 static void
2375 i386_emit_stack_flush (void)
2376 {
2377 EMIT_ASM32 (i386_stack_flush,
2378 "push %ebx\n\t"
2379 "push %eax");
2380 }
2381
2382 static void
2383 i386_emit_zero_ext (int arg)
2384 {
2385 switch (arg)
2386 {
2387 case 8:
2388 EMIT_ASM32 (i386_zero_ext_8,
2389 "and $0xff,%eax\n\t"
2390 "xor %ebx,%ebx");
2391 break;
2392 case 16:
2393 EMIT_ASM32 (i386_zero_ext_16,
2394 "and $0xffff,%eax\n\t"
2395 "xor %ebx,%ebx");
2396 break;
2397 case 32:
2398 EMIT_ASM32 (i386_zero_ext_32,
2399 "xor %ebx,%ebx");
2400 break;
2401 default:
2402 emit_error = 1;
2403 }
2404 }
2405
2406 static void
2407 i386_emit_swap (void)
2408 {
2409 EMIT_ASM32 (i386_swap,
2410 "mov %eax,%ecx\n\t"
2411 "mov %ebx,%edx\n\t"
2412 "pop %eax\n\t"
2413 "pop %ebx\n\t"
2414 "push %edx\n\t"
2415 "push %ecx");
2416 }
2417
2418 static void
2419 i386_emit_stack_adjust (int n)
2420 {
2421 unsigned char buf[16];
2422 int i;
2423 CORE_ADDR buildaddr = current_insn_ptr;
2424
2425 i = 0;
2426 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2427 buf[i++] = 0x64;
2428 buf[i++] = 0x24;
2429 buf[i++] = n * 8;
2430 append_insns (&buildaddr, i, buf);
2431 current_insn_ptr = buildaddr;
2432 }
2433
2434 /* FN's prototype is `LONGEST(*fn)(int)'. */
2435
2436 static void
2437 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2438 {
2439 unsigned char buf[16];
2440 int i;
2441 CORE_ADDR buildaddr;
2442
2443 EMIT_ASM32 (i386_int_call_1_a,
2444 /* Reserve a bit of stack space. */
2445 "sub $0x8,%esp");
2446 /* Put the one argument on the stack. */
2447 buildaddr = current_insn_ptr;
2448 i = 0;
2449 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2450 buf[i++] = 0x04;
2451 buf[i++] = 0x24;
2452 *((int *) (&buf[i])) = arg1;
2453 i += 4;
2454 append_insns (&buildaddr, i, buf);
2455 current_insn_ptr = buildaddr;
2456 i386_emit_call (fn);
2457 EMIT_ASM32 (i386_int_call_1_c,
2458 "mov %edx,%ebx\n\t"
2459 "lea 0x8(%esp),%esp");
2460 }
2461
2462 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2463
2464 static void
2465 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2466 {
2467 unsigned char buf[16];
2468 int i;
2469 CORE_ADDR buildaddr;
2470
2471 EMIT_ASM32 (i386_void_call_2_a,
2472 /* Preserve %eax only; we don't have to worry about %ebx. */
2473 "push %eax\n\t"
2474 /* Reserve a bit of stack space for arguments. */
2475 "sub $0x10,%esp\n\t"
2476 /* Copy "top" to the second argument position. (Note that
2477 we can't assume function won't scribble on its
2478 arguments, so don't try to restore from this.) */
2479 "mov %eax,4(%esp)\n\t"
2480 "mov %ebx,8(%esp)");
2481 /* Put the first argument on the stack. */
2482 buildaddr = current_insn_ptr;
2483 i = 0;
2484 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2485 buf[i++] = 0x04;
2486 buf[i++] = 0x24;
2487 *((int *) (&buf[i])) = arg1;
2488 i += 4;
2489 append_insns (&buildaddr, i, buf);
2490 current_insn_ptr = buildaddr;
2491 i386_emit_call (fn);
2492 EMIT_ASM32 (i386_void_call_2_b,
2493 "lea 0x10(%esp),%esp\n\t"
2494 /* Restore original stack top. */
2495 "pop %eax");
2496 }
2497
2498 struct emit_ops i386_emit_ops =
2499 {
2500 i386_emit_prologue,
2501 i386_emit_epilogue,
2502 i386_emit_add,
2503 i386_emit_sub,
2504 i386_emit_mul,
2505 i386_emit_lsh,
2506 i386_emit_rsh_signed,
2507 i386_emit_rsh_unsigned,
2508 i386_emit_ext,
2509 i386_emit_log_not,
2510 i386_emit_bit_and,
2511 i386_emit_bit_or,
2512 i386_emit_bit_xor,
2513 i386_emit_bit_not,
2514 i386_emit_equal,
2515 i386_emit_less_signed,
2516 i386_emit_less_unsigned,
2517 i386_emit_ref,
2518 i386_emit_if_goto,
2519 i386_emit_goto,
2520 i386_write_goto_address,
2521 i386_emit_const,
2522 i386_emit_call,
2523 i386_emit_reg,
2524 i386_emit_pop,
2525 i386_emit_stack_flush,
2526 i386_emit_zero_ext,
2527 i386_emit_swap,
2528 i386_emit_stack_adjust,
2529 i386_emit_int_call_1,
2530 i386_emit_void_call_2
2531 };
2532
2533
2534 static struct emit_ops *
2535 x86_emit_ops (void)
2536 {
2537 #ifdef __x86_64__
2538 int use_64bit = register_size (0) == 8;
2539
2540 if (use_64bit)
2541 return &amd64_emit_ops;
2542 else
2543 #endif
2544 return &i386_emit_ops;
2545 }
2546
2547 /* This is initialized assuming an amd64 target.
2548 x86_arch_setup will correct it for i386 or amd64 targets. */
2549
2550 struct linux_target_ops the_low_target =
2551 {
2552 x86_arch_setup,
2553 -1,
2554 NULL,
2555 NULL,
2556 NULL,
2557 x86_get_pc,
2558 x86_set_pc,
2559 x86_breakpoint,
2560 x86_breakpoint_len,
2561 NULL,
2562 1,
2563 x86_breakpoint_at,
2564 x86_insert_point,
2565 x86_remove_point,
2566 x86_stopped_by_watchpoint,
2567 x86_stopped_data_address,
2568 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2569 native i386 case (no registers smaller than an xfer unit), and are not
2570 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2571 NULL,
2572 NULL,
2573 /* need to fix up i386 siginfo if host is amd64 */
2574 x86_siginfo_fixup,
2575 x86_linux_new_process,
2576 x86_linux_new_thread,
2577 x86_linux_prepare_to_resume,
2578 x86_linux_process_qsupported,
2579 x86_supports_tracepoints,
2580 x86_get_thread_area,
2581 x86_install_fast_tracepoint_jump_pad,
2582 x86_emit_ops
2583 };
This page took 0.111565 seconds and 4 git commands to generate.