gdb/gdbserver/
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include <stddef.h>
22 #include <signal.h>
23 #include "server.h"
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "i386-low.h"
27 #include "i386-xstate.h"
28 #include "elf/common.h"
29
30 #include "gdb_proc_service.h"
31
32 /* Defined in auto-generated file i386-linux.c. */
33 void init_registers_i386_linux (void);
34 /* Defined in auto-generated file amd64-linux.c. */
35 void init_registers_amd64_linux (void);
36 /* Defined in auto-generated file i386-avx-linux.c. */
37 void init_registers_i386_avx_linux (void);
38 /* Defined in auto-generated file amd64-avx-linux.c. */
39 void init_registers_amd64_avx_linux (void);
40 /* Defined in auto-generated file i386-mmx-linux.c. */
41 void init_registers_i386_mmx_linux (void);
42
43 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
44
45 /* Backward compatibility for gdb without XML support. */
46
47 static const char *xmltarget_i386_linux_no_xml = "@<target>\
48 <architecture>i386</architecture>\
49 <osabi>GNU/Linux</osabi>\
50 </target>";
51
52 #ifdef __x86_64__
53 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
54 <architecture>i386:x86-64</architecture>\
55 <osabi>GNU/Linux</osabi>\
56 </target>";
57 #endif
58
59 #include <sys/reg.h>
60 #include <sys/procfs.h>
61 #include <sys/ptrace.h>
62 #include <sys/uio.h>
63
64 #ifndef PTRACE_GETREGSET
65 #define PTRACE_GETREGSET 0x4204
66 #endif
67
68 #ifndef PTRACE_SETREGSET
69 #define PTRACE_SETREGSET 0x4205
70 #endif
71
72
73 #ifndef PTRACE_GET_THREAD_AREA
74 #define PTRACE_GET_THREAD_AREA 25
75 #endif
76
77 /* This definition comes from prctl.h, but some kernels may not have it. */
78 #ifndef PTRACE_ARCH_PRCTL
79 #define PTRACE_ARCH_PRCTL 30
80 #endif
81
82 /* The following definitions come from prctl.h, but may be absent
83 for certain configurations. */
84 #ifndef ARCH_GET_FS
85 #define ARCH_SET_GS 0x1001
86 #define ARCH_SET_FS 0x1002
87 #define ARCH_GET_FS 0x1003
88 #define ARCH_GET_GS 0x1004
89 #endif
90
91 /* Per-process arch-specific data we want to keep. */
92
93 struct arch_process_info
94 {
95 struct i386_debug_reg_state debug_reg_state;
96 };
97
98 /* Per-thread arch-specific data we want to keep. */
99
100 struct arch_lwp_info
101 {
102 /* Non-zero if our copy differs from what's recorded in the thread. */
103 int debug_registers_changed;
104 };
105
106 #ifdef __x86_64__
107
108 /* Mapping between the general-purpose registers in `struct user'
109 format and GDB's register array layout.
110 Note that the transfer layout uses 64-bit regs. */
111 static /*const*/ int i386_regmap[] =
112 {
113 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
114 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
115 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
116 DS * 8, ES * 8, FS * 8, GS * 8
117 };
118
119 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
120
121 /* So code below doesn't have to care, i386 or amd64. */
122 #define ORIG_EAX ORIG_RAX
123
124 static const int x86_64_regmap[] =
125 {
126 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
127 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
128 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
129 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
130 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
131 DS * 8, ES * 8, FS * 8, GS * 8,
132 -1, -1, -1, -1, -1, -1, -1, -1,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1, -1,
136 ORIG_RAX * 8
137 };
138
139 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
140
141 #else /* ! __x86_64__ */
142
143 /* Mapping between the general-purpose registers in `struct user'
144 format and GDB's register array layout. */
145 static /*const*/ int i386_regmap[] =
146 {
147 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
148 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
149 EIP * 4, EFL * 4, CS * 4, SS * 4,
150 DS * 4, ES * 4, FS * 4, GS * 4
151 };
152
153 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
154
155 #endif
156 \f
157 /* Called by libthread_db. */
158
159 ps_err_e
160 ps_get_thread_area (const struct ps_prochandle *ph,
161 lwpid_t lwpid, int idx, void **base)
162 {
163 #ifdef __x86_64__
164 int use_64bit = register_size (0) == 8;
165
166 if (use_64bit)
167 {
168 switch (idx)
169 {
170 case FS:
171 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
172 return PS_OK;
173 break;
174 case GS:
175 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
176 return PS_OK;
177 break;
178 default:
179 return PS_BADADDR;
180 }
181 return PS_ERR;
182 }
183 #endif
184
185 {
186 unsigned int desc[4];
187
188 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
189 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
190 return PS_ERR;
191
192 *(int *)base = desc[1];
193 return PS_OK;
194 }
195 }
196
197 /* Get the thread area address. This is used to recognize which
198 thread is which when tracing with the in-process agent library. We
199 don't read anything from the address, and treat it as opaque; it's
200 the address itself that we assume is unique per-thread. */
201
202 static int
203 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
204 {
205 #ifdef __x86_64__
206 int use_64bit = register_size (0) == 8;
207
208 if (use_64bit)
209 {
210 void *base;
211 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
212 {
213 *addr = (CORE_ADDR) (uintptr_t) base;
214 return 0;
215 }
216
217 return -1;
218 }
219 #endif
220
221 {
222 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
223 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
224 unsigned int desc[4];
225 ULONGEST gs = 0;
226 const int reg_thread_area = 3; /* bits to scale down register value. */
227 int idx;
228
229 collect_register_by_name (regcache, "gs", &gs);
230
231 idx = gs >> reg_thread_area;
232
233 if (ptrace (PTRACE_GET_THREAD_AREA,
234 lwpid_of (lwp), (void *) (long) idx, (unsigned long) &desc) < 0)
235 return -1;
236
237 *addr = desc[1];
238 return 0;
239 }
240 }
241
242
243 \f
244 static int
245 i386_cannot_store_register (int regno)
246 {
247 return regno >= I386_NUM_REGS;
248 }
249
250 static int
251 i386_cannot_fetch_register (int regno)
252 {
253 return regno >= I386_NUM_REGS;
254 }
255
256 static void
257 x86_fill_gregset (struct regcache *regcache, void *buf)
258 {
259 int i;
260
261 #ifdef __x86_64__
262 if (register_size (0) == 8)
263 {
264 for (i = 0; i < X86_64_NUM_REGS; i++)
265 if (x86_64_regmap[i] != -1)
266 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
267 return;
268 }
269 #endif
270
271 for (i = 0; i < I386_NUM_REGS; i++)
272 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
273
274 collect_register_by_name (regcache, "orig_eax",
275 ((char *) buf) + ORIG_EAX * 4);
276 }
277
278 static void
279 x86_store_gregset (struct regcache *regcache, const void *buf)
280 {
281 int i;
282
283 #ifdef __x86_64__
284 if (register_size (0) == 8)
285 {
286 for (i = 0; i < X86_64_NUM_REGS; i++)
287 if (x86_64_regmap[i] != -1)
288 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
289 return;
290 }
291 #endif
292
293 for (i = 0; i < I386_NUM_REGS; i++)
294 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
295
296 supply_register_by_name (regcache, "orig_eax",
297 ((char *) buf) + ORIG_EAX * 4);
298 }
299
300 static void
301 x86_fill_fpregset (struct regcache *regcache, void *buf)
302 {
303 #ifdef __x86_64__
304 i387_cache_to_fxsave (regcache, buf);
305 #else
306 i387_cache_to_fsave (regcache, buf);
307 #endif
308 }
309
310 static void
311 x86_store_fpregset (struct regcache *regcache, const void *buf)
312 {
313 #ifdef __x86_64__
314 i387_fxsave_to_cache (regcache, buf);
315 #else
316 i387_fsave_to_cache (regcache, buf);
317 #endif
318 }
319
320 #ifndef __x86_64__
321
322 static void
323 x86_fill_fpxregset (struct regcache *regcache, void *buf)
324 {
325 i387_cache_to_fxsave (regcache, buf);
326 }
327
328 static void
329 x86_store_fpxregset (struct regcache *regcache, const void *buf)
330 {
331 i387_fxsave_to_cache (regcache, buf);
332 }
333
334 #endif
335
336 static void
337 x86_fill_xstateregset (struct regcache *regcache, void *buf)
338 {
339 i387_cache_to_xsave (regcache, buf);
340 }
341
342 static void
343 x86_store_xstateregset (struct regcache *regcache, const void *buf)
344 {
345 i387_xsave_to_cache (regcache, buf);
346 }
347
348 /* ??? The non-biarch i386 case stores all the i387 regs twice.
349 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
350 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
351 doesn't work. IWBN to avoid the duplication in the case where it
352 does work. Maybe the arch_setup routine could check whether it works
353 and update target_regsets accordingly, maybe by moving target_regsets
354 to linux_target_ops and set the right one there, rather than having to
355 modify the target_regsets global. */
356
357 struct regset_info target_regsets[] =
358 {
359 #ifdef HAVE_PTRACE_GETREGS
360 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
361 GENERAL_REGS,
362 x86_fill_gregset, x86_store_gregset },
363 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
364 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
365 # ifndef __x86_64__
366 # ifdef HAVE_PTRACE_GETFPXREGS
367 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
368 EXTENDED_REGS,
369 x86_fill_fpxregset, x86_store_fpxregset },
370 # endif
371 # endif
372 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
373 FP_REGS,
374 x86_fill_fpregset, x86_store_fpregset },
375 #endif /* HAVE_PTRACE_GETREGS */
376 { 0, 0, 0, -1, -1, NULL, NULL }
377 };
378
379 static CORE_ADDR
380 x86_get_pc (struct regcache *regcache)
381 {
382 int use_64bit = register_size (0) == 8;
383
384 if (use_64bit)
385 {
386 unsigned long pc;
387 collect_register_by_name (regcache, "rip", &pc);
388 return (CORE_ADDR) pc;
389 }
390 else
391 {
392 unsigned int pc;
393 collect_register_by_name (regcache, "eip", &pc);
394 return (CORE_ADDR) pc;
395 }
396 }
397
398 static void
399 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
400 {
401 int use_64bit = register_size (0) == 8;
402
403 if (use_64bit)
404 {
405 unsigned long newpc = pc;
406 supply_register_by_name (regcache, "rip", &newpc);
407 }
408 else
409 {
410 unsigned int newpc = pc;
411 supply_register_by_name (regcache, "eip", &newpc);
412 }
413 }
414 \f
415 static const unsigned char x86_breakpoint[] = { 0xCC };
416 #define x86_breakpoint_len 1
417
418 static int
419 x86_breakpoint_at (CORE_ADDR pc)
420 {
421 unsigned char c;
422
423 (*the_target->read_memory) (pc, &c, 1);
424 if (c == 0xCC)
425 return 1;
426
427 return 0;
428 }
429 \f
430 /* Support for debug registers. */
431
432 static unsigned long
433 x86_linux_dr_get (ptid_t ptid, int regnum)
434 {
435 int tid;
436 unsigned long value;
437
438 tid = ptid_get_lwp (ptid);
439
440 errno = 0;
441 value = ptrace (PTRACE_PEEKUSER, tid,
442 offsetof (struct user, u_debugreg[regnum]), 0);
443 if (errno != 0)
444 error ("Couldn't read debug register");
445
446 return value;
447 }
448
449 static void
450 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
451 {
452 int tid;
453
454 tid = ptid_get_lwp (ptid);
455
456 errno = 0;
457 ptrace (PTRACE_POKEUSER, tid,
458 offsetof (struct user, u_debugreg[regnum]), value);
459 if (errno != 0)
460 error ("Couldn't write debug register");
461 }
462
463 /* Update the inferior's debug register REGNUM from STATE. */
464
465 void
466 i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
467 {
468 struct inferior_list_entry *lp;
469 CORE_ADDR addr;
470 /* Only need to update the threads of this process. */
471 int pid = pid_of (get_thread_lwp (current_inferior));
472
473 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
474 fatal ("Invalid debug register %d", regnum);
475
476 addr = state->dr_mirror[regnum];
477
478 for (lp = all_lwps.head; lp; lp = lp->next)
479 {
480 struct lwp_info *lwp = (struct lwp_info *) lp;
481
482 /* The actual update is done later, we just mark that the register
483 needs updating. */
484 if (pid_of (lwp) == pid)
485 lwp->arch_private->debug_registers_changed = 1;
486 }
487 }
488
489 /* Update the inferior's DR7 debug control register from STATE. */
490
491 void
492 i386_dr_low_set_control (const struct i386_debug_reg_state *state)
493 {
494 struct inferior_list_entry *lp;
495 /* Only need to update the threads of this process. */
496 int pid = pid_of (get_thread_lwp (current_inferior));
497
498 for (lp = all_lwps.head; lp; lp = lp->next)
499 {
500 struct lwp_info *lwp = (struct lwp_info *) lp;
501
502 /* The actual update is done later, we just mark that the register
503 needs updating. */
504 if (pid_of (lwp) == pid)
505 lwp->arch_private->debug_registers_changed = 1;
506 }
507 }
508
509 /* Get the value of the DR6 debug status register from the inferior
510 and record it in STATE. */
511
512 void
513 i386_dr_low_get_status (struct i386_debug_reg_state *state)
514 {
515 struct lwp_info *lwp = get_thread_lwp (current_inferior);
516 ptid_t ptid = ptid_of (lwp);
517
518 state->dr_status_mirror = x86_linux_dr_get (ptid, DR_STATUS);
519 }
520 \f
521 /* Watchpoint support. */
522
523 static int
524 x86_insert_point (char type, CORE_ADDR addr, int len)
525 {
526 struct process_info *proc = current_process ();
527 switch (type)
528 {
529 case '0':
530 return set_gdb_breakpoint_at (addr);
531 case '2':
532 case '3':
533 case '4':
534 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
535 type, addr, len);
536 default:
537 /* Unsupported. */
538 return 1;
539 }
540 }
541
542 static int
543 x86_remove_point (char type, CORE_ADDR addr, int len)
544 {
545 struct process_info *proc = current_process ();
546 switch (type)
547 {
548 case '0':
549 return delete_gdb_breakpoint_at (addr);
550 case '2':
551 case '3':
552 case '4':
553 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
554 type, addr, len);
555 default:
556 /* Unsupported. */
557 return 1;
558 }
559 }
560
561 static int
562 x86_stopped_by_watchpoint (void)
563 {
564 struct process_info *proc = current_process ();
565 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
566 }
567
568 static CORE_ADDR
569 x86_stopped_data_address (void)
570 {
571 struct process_info *proc = current_process ();
572 CORE_ADDR addr;
573 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
574 &addr))
575 return addr;
576 return 0;
577 }
578 \f
579 /* Called when a new process is created. */
580
581 static struct arch_process_info *
582 x86_linux_new_process (void)
583 {
584 struct arch_process_info *info = xcalloc (1, sizeof (*info));
585
586 i386_low_init_dregs (&info->debug_reg_state);
587
588 return info;
589 }
590
591 /* Called when a new thread is detected. */
592
593 static struct arch_lwp_info *
594 x86_linux_new_thread (void)
595 {
596 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
597
598 info->debug_registers_changed = 1;
599
600 return info;
601 }
602
603 /* Called when resuming a thread.
604 If the debug regs have changed, update the thread's copies. */
605
606 static void
607 x86_linux_prepare_to_resume (struct lwp_info *lwp)
608 {
609 ptid_t ptid = ptid_of (lwp);
610
611 if (lwp->arch_private->debug_registers_changed)
612 {
613 int i;
614 int pid = ptid_get_pid (ptid);
615 struct process_info *proc = find_process_pid (pid);
616 struct i386_debug_reg_state *state = &proc->private->arch_private->debug_reg_state;
617
618 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
619 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
620
621 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
622
623 lwp->arch_private->debug_registers_changed = 0;
624 }
625
626 if (lwp->stopped_by_watchpoint)
627 x86_linux_dr_set (ptid, DR_STATUS, 0);
628 }
629 \f
630 /* When GDBSERVER is built as a 64-bit application on linux, the
631 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
632 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
633 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
634 conversion in-place ourselves. */
635
636 /* These types below (compat_*) define a siginfo type that is layout
637 compatible with the siginfo type exported by the 32-bit userspace
638 support. */
639
640 #ifdef __x86_64__
641
642 typedef int compat_int_t;
643 typedef unsigned int compat_uptr_t;
644
645 typedef int compat_time_t;
646 typedef int compat_timer_t;
647 typedef int compat_clock_t;
648
649 struct compat_timeval
650 {
651 compat_time_t tv_sec;
652 int tv_usec;
653 };
654
655 typedef union compat_sigval
656 {
657 compat_int_t sival_int;
658 compat_uptr_t sival_ptr;
659 } compat_sigval_t;
660
661 typedef struct compat_siginfo
662 {
663 int si_signo;
664 int si_errno;
665 int si_code;
666
667 union
668 {
669 int _pad[((128 / sizeof (int)) - 3)];
670
671 /* kill() */
672 struct
673 {
674 unsigned int _pid;
675 unsigned int _uid;
676 } _kill;
677
678 /* POSIX.1b timers */
679 struct
680 {
681 compat_timer_t _tid;
682 int _overrun;
683 compat_sigval_t _sigval;
684 } _timer;
685
686 /* POSIX.1b signals */
687 struct
688 {
689 unsigned int _pid;
690 unsigned int _uid;
691 compat_sigval_t _sigval;
692 } _rt;
693
694 /* SIGCHLD */
695 struct
696 {
697 unsigned int _pid;
698 unsigned int _uid;
699 int _status;
700 compat_clock_t _utime;
701 compat_clock_t _stime;
702 } _sigchld;
703
704 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
705 struct
706 {
707 unsigned int _addr;
708 } _sigfault;
709
710 /* SIGPOLL */
711 struct
712 {
713 int _band;
714 int _fd;
715 } _sigpoll;
716 } _sifields;
717 } compat_siginfo_t;
718
719 #define cpt_si_pid _sifields._kill._pid
720 #define cpt_si_uid _sifields._kill._uid
721 #define cpt_si_timerid _sifields._timer._tid
722 #define cpt_si_overrun _sifields._timer._overrun
723 #define cpt_si_status _sifields._sigchld._status
724 #define cpt_si_utime _sifields._sigchld._utime
725 #define cpt_si_stime _sifields._sigchld._stime
726 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
727 #define cpt_si_addr _sifields._sigfault._addr
728 #define cpt_si_band _sifields._sigpoll._band
729 #define cpt_si_fd _sifields._sigpoll._fd
730
731 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
732 In their place is si_timer1,si_timer2. */
733 #ifndef si_timerid
734 #define si_timerid si_timer1
735 #endif
736 #ifndef si_overrun
737 #define si_overrun si_timer2
738 #endif
739
740 static void
741 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
742 {
743 memset (to, 0, sizeof (*to));
744
745 to->si_signo = from->si_signo;
746 to->si_errno = from->si_errno;
747 to->si_code = from->si_code;
748
749 if (to->si_code < 0)
750 {
751 to->cpt_si_ptr = (intptr_t) from->si_ptr;
752 }
753 else if (to->si_code == SI_USER)
754 {
755 to->cpt_si_pid = from->si_pid;
756 to->cpt_si_uid = from->si_uid;
757 }
758 else if (to->si_code == SI_TIMER)
759 {
760 to->cpt_si_timerid = from->si_timerid;
761 to->cpt_si_overrun = from->si_overrun;
762 to->cpt_si_ptr = (intptr_t) from->si_ptr;
763 }
764 else
765 {
766 switch (to->si_signo)
767 {
768 case SIGCHLD:
769 to->cpt_si_pid = from->si_pid;
770 to->cpt_si_uid = from->si_uid;
771 to->cpt_si_status = from->si_status;
772 to->cpt_si_utime = from->si_utime;
773 to->cpt_si_stime = from->si_stime;
774 break;
775 case SIGILL:
776 case SIGFPE:
777 case SIGSEGV:
778 case SIGBUS:
779 to->cpt_si_addr = (intptr_t) from->si_addr;
780 break;
781 case SIGPOLL:
782 to->cpt_si_band = from->si_band;
783 to->cpt_si_fd = from->si_fd;
784 break;
785 default:
786 to->cpt_si_pid = from->si_pid;
787 to->cpt_si_uid = from->si_uid;
788 to->cpt_si_ptr = (intptr_t) from->si_ptr;
789 break;
790 }
791 }
792 }
793
794 static void
795 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
796 {
797 memset (to, 0, sizeof (*to));
798
799 to->si_signo = from->si_signo;
800 to->si_errno = from->si_errno;
801 to->si_code = from->si_code;
802
803 if (to->si_code < 0)
804 {
805 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
806 }
807 else if (to->si_code == SI_USER)
808 {
809 to->si_pid = from->cpt_si_pid;
810 to->si_uid = from->cpt_si_uid;
811 }
812 else if (to->si_code == SI_TIMER)
813 {
814 to->si_timerid = from->cpt_si_timerid;
815 to->si_overrun = from->cpt_si_overrun;
816 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
817 }
818 else
819 {
820 switch (to->si_signo)
821 {
822 case SIGCHLD:
823 to->si_pid = from->cpt_si_pid;
824 to->si_uid = from->cpt_si_uid;
825 to->si_status = from->cpt_si_status;
826 to->si_utime = from->cpt_si_utime;
827 to->si_stime = from->cpt_si_stime;
828 break;
829 case SIGILL:
830 case SIGFPE:
831 case SIGSEGV:
832 case SIGBUS:
833 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
834 break;
835 case SIGPOLL:
836 to->si_band = from->cpt_si_band;
837 to->si_fd = from->cpt_si_fd;
838 break;
839 default:
840 to->si_pid = from->cpt_si_pid;
841 to->si_uid = from->cpt_si_uid;
842 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
843 break;
844 }
845 }
846 }
847
848 #endif /* __x86_64__ */
849
850 /* Convert a native/host siginfo object, into/from the siginfo in the
851 layout of the inferiors' architecture. Returns true if any
852 conversion was done; false otherwise. If DIRECTION is 1, then copy
853 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
854 INF. */
855
856 static int
857 x86_siginfo_fixup (struct siginfo *native, void *inf, int direction)
858 {
859 #ifdef __x86_64__
860 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
861 if (register_size (0) == 4)
862 {
863 if (sizeof (struct siginfo) != sizeof (compat_siginfo_t))
864 fatal ("unexpected difference in siginfo");
865
866 if (direction == 0)
867 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
868 else
869 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
870
871 return 1;
872 }
873 #endif
874
875 return 0;
876 }
877 \f
878 static int use_xml;
879
880 /* Update gdbserver_xmltarget. */
881
882 static void
883 x86_linux_update_xmltarget (void)
884 {
885 int pid;
886 struct regset_info *regset;
887 static unsigned long long xcr0;
888 static int have_ptrace_getregset = -1;
889 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
890 static int have_ptrace_getfpxregs = -1;
891 #endif
892
893 if (!current_inferior)
894 return;
895
896 /* Before changing the register cache internal layout or the target
897 regsets, flush the contents of the current valid caches back to
898 the threads. */
899 regcache_invalidate ();
900
901 pid = pid_of (get_thread_lwp (current_inferior));
902 #ifdef __x86_64__
903 if (num_xmm_registers == 8)
904 init_registers_i386_linux ();
905 else
906 init_registers_amd64_linux ();
907 #else
908 {
909 # ifdef HAVE_PTRACE_GETFPXREGS
910 if (have_ptrace_getfpxregs == -1)
911 {
912 elf_fpxregset_t fpxregs;
913
914 if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
915 {
916 have_ptrace_getfpxregs = 0;
917 x86_xcr0 = I386_XSTATE_X87_MASK;
918
919 /* Disable PTRACE_GETFPXREGS. */
920 for (regset = target_regsets;
921 regset->fill_function != NULL; regset++)
922 if (regset->get_request == PTRACE_GETFPXREGS)
923 {
924 regset->size = 0;
925 break;
926 }
927 }
928 else
929 have_ptrace_getfpxregs = 1;
930 }
931
932 if (!have_ptrace_getfpxregs)
933 {
934 init_registers_i386_mmx_linux ();
935 return;
936 }
937 # endif
938 init_registers_i386_linux ();
939 }
940 #endif
941
942 if (!use_xml)
943 {
944 /* Don't use XML. */
945 #ifdef __x86_64__
946 if (num_xmm_registers == 8)
947 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
948 else
949 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
950 #else
951 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
952 #endif
953
954 x86_xcr0 = I386_XSTATE_SSE_MASK;
955
956 return;
957 }
958
959 /* Check if XSAVE extended state is supported. */
960 if (have_ptrace_getregset == -1)
961 {
962 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
963 struct iovec iov;
964
965 iov.iov_base = xstateregs;
966 iov.iov_len = sizeof (xstateregs);
967
968 /* Check if PTRACE_GETREGSET works. */
969 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
970 &iov) < 0)
971 {
972 have_ptrace_getregset = 0;
973 return;
974 }
975 else
976 have_ptrace_getregset = 1;
977
978 /* Get XCR0 from XSAVE extended state at byte 464. */
979 xcr0 = xstateregs[464 / sizeof (long long)];
980
981 /* Use PTRACE_GETREGSET if it is available. */
982 for (regset = target_regsets;
983 regset->fill_function != NULL; regset++)
984 if (regset->get_request == PTRACE_GETREGSET)
985 regset->size = I386_XSTATE_SIZE (xcr0);
986 else if (regset->type != GENERAL_REGS)
987 regset->size = 0;
988 }
989
990 if (have_ptrace_getregset)
991 {
992 /* AVX is the highest feature we support. */
993 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
994 {
995 x86_xcr0 = xcr0;
996
997 #ifdef __x86_64__
998 /* I386 has 8 xmm regs. */
999 if (num_xmm_registers == 8)
1000 init_registers_i386_avx_linux ();
1001 else
1002 init_registers_amd64_avx_linux ();
1003 #else
1004 init_registers_i386_avx_linux ();
1005 #endif
1006 }
1007 }
1008 }
1009
1010 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1011 PTRACE_GETREGSET. */
1012
1013 static void
1014 x86_linux_process_qsupported (const char *query)
1015 {
1016 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1017 with "i386" in qSupported query, it supports x86 XML target
1018 descriptions. */
1019 use_xml = 0;
1020 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1021 {
1022 char *copy = xstrdup (query + 13);
1023 char *p;
1024
1025 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1026 {
1027 if (strcmp (p, "i386") == 0)
1028 {
1029 use_xml = 1;
1030 break;
1031 }
1032 }
1033
1034 free (copy);
1035 }
1036
1037 x86_linux_update_xmltarget ();
1038 }
1039
1040 /* Initialize gdbserver for the architecture of the inferior. */
1041
1042 static void
1043 x86_arch_setup (void)
1044 {
1045 #ifdef __x86_64__
1046 int pid = pid_of (get_thread_lwp (current_inferior));
1047 char *file = linux_child_pid_to_exec_file (pid);
1048 int use_64bit = elf_64_file_p (file);
1049
1050 free (file);
1051
1052 if (use_64bit < 0)
1053 {
1054 /* This can only happen if /proc/<pid>/exe is unreadable,
1055 but "that can't happen" if we've gotten this far.
1056 Fall through and assume this is a 32-bit program. */
1057 }
1058 else if (use_64bit)
1059 {
1060 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1061 the_low_target.num_regs = -1;
1062 the_low_target.regmap = NULL;
1063 the_low_target.cannot_fetch_register = NULL;
1064 the_low_target.cannot_store_register = NULL;
1065
1066 /* Amd64 has 16 xmm regs. */
1067 num_xmm_registers = 16;
1068
1069 x86_linux_update_xmltarget ();
1070 return;
1071 }
1072 #endif
1073
1074 /* Ok we have a 32-bit inferior. */
1075
1076 the_low_target.num_regs = I386_NUM_REGS;
1077 the_low_target.regmap = i386_regmap;
1078 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1079 the_low_target.cannot_store_register = i386_cannot_store_register;
1080
1081 /* I386 has 8 xmm regs. */
1082 num_xmm_registers = 8;
1083
1084 x86_linux_update_xmltarget ();
1085 }
1086
1087 static int
1088 x86_supports_tracepoints (void)
1089 {
1090 return 1;
1091 }
1092
1093 static void
1094 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1095 {
1096 write_inferior_memory (*to, buf, len);
1097 *to += len;
1098 }
1099
1100 static int
1101 push_opcode (unsigned char *buf, char *op)
1102 {
1103 unsigned char *buf_org = buf;
1104
1105 while (1)
1106 {
1107 char *endptr;
1108 unsigned long ul = strtoul (op, &endptr, 16);
1109
1110 if (endptr == op)
1111 break;
1112
1113 *buf++ = ul;
1114 op = endptr;
1115 }
1116
1117 return buf - buf_org;
1118 }
1119
1120 #ifdef __x86_64__
1121
1122 /* Build a jump pad that saves registers and calls a collection
1123 function. Writes a jump instruction to the jump pad to
1124 JJUMPAD_INSN. The caller is responsible to write it in at the
1125 tracepoint address. */
1126
1127 static int
1128 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1129 CORE_ADDR collector,
1130 CORE_ADDR lockaddr,
1131 ULONGEST orig_size,
1132 CORE_ADDR *jump_entry,
1133 unsigned char *jjump_pad_insn,
1134 ULONGEST *jjump_pad_insn_size,
1135 CORE_ADDR *adjusted_insn_addr,
1136 CORE_ADDR *adjusted_insn_addr_end)
1137 {
1138 unsigned char buf[40];
1139 int i, offset;
1140 CORE_ADDR buildaddr = *jump_entry;
1141
1142 /* Build the jump pad. */
1143
1144 /* First, do tracepoint data collection. Save registers. */
1145 i = 0;
1146 /* Need to ensure stack pointer saved first. */
1147 buf[i++] = 0x54; /* push %rsp */
1148 buf[i++] = 0x55; /* push %rbp */
1149 buf[i++] = 0x57; /* push %rdi */
1150 buf[i++] = 0x56; /* push %rsi */
1151 buf[i++] = 0x52; /* push %rdx */
1152 buf[i++] = 0x51; /* push %rcx */
1153 buf[i++] = 0x53; /* push %rbx */
1154 buf[i++] = 0x50; /* push %rax */
1155 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1156 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1157 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1158 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1159 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1160 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1161 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1162 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1163 buf[i++] = 0x9c; /* pushfq */
1164 buf[i++] = 0x48; /* movl <addr>,%rdi */
1165 buf[i++] = 0xbf;
1166 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1167 i += sizeof (unsigned long);
1168 buf[i++] = 0x57; /* push %rdi */
1169 append_insns (&buildaddr, i, buf);
1170
1171 /* Stack space for the collecting_t object. */
1172 i = 0;
1173 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1174 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1175 memcpy (buf + i, &tpoint, 8);
1176 i += 8;
1177 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1178 i += push_opcode (&buf[i],
1179 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1180 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1181 append_insns (&buildaddr, i, buf);
1182
1183 /* spin-lock. */
1184 i = 0;
1185 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1186 memcpy (&buf[i], (void *) &lockaddr, 8);
1187 i += 8;
1188 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1189 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1190 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1191 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1192 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1193 append_insns (&buildaddr, i, buf);
1194
1195 /* Set up the gdb_collect call. */
1196 /* At this point, (stack pointer + 0x18) is the base of our saved
1197 register block. */
1198
1199 i = 0;
1200 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1201 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1202
1203 /* tpoint address may be 64-bit wide. */
1204 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1205 memcpy (buf + i, &tpoint, 8);
1206 i += 8;
1207 append_insns (&buildaddr, i, buf);
1208
1209 /* The collector function being in the shared library, may be
1210 >31-bits away off the jump pad. */
1211 i = 0;
1212 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1213 memcpy (buf + i, &collector, 8);
1214 i += 8;
1215 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1216 append_insns (&buildaddr, i, buf);
1217
1218 /* Clear the spin-lock. */
1219 i = 0;
1220 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1221 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1222 memcpy (buf + i, &lockaddr, 8);
1223 i += 8;
1224 append_insns (&buildaddr, i, buf);
1225
1226 /* Remove stack that had been used for the collect_t object. */
1227 i = 0;
1228 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1229 append_insns (&buildaddr, i, buf);
1230
1231 /* Restore register state. */
1232 i = 0;
1233 buf[i++] = 0x48; /* add $0x8,%rsp */
1234 buf[i++] = 0x83;
1235 buf[i++] = 0xc4;
1236 buf[i++] = 0x08;
1237 buf[i++] = 0x9d; /* popfq */
1238 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1239 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1240 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1241 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1242 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1243 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1244 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1245 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1246 buf[i++] = 0x58; /* pop %rax */
1247 buf[i++] = 0x5b; /* pop %rbx */
1248 buf[i++] = 0x59; /* pop %rcx */
1249 buf[i++] = 0x5a; /* pop %rdx */
1250 buf[i++] = 0x5e; /* pop %rsi */
1251 buf[i++] = 0x5f; /* pop %rdi */
1252 buf[i++] = 0x5d; /* pop %rbp */
1253 buf[i++] = 0x5c; /* pop %rsp */
1254 append_insns (&buildaddr, i, buf);
1255
1256 /* Now, adjust the original instruction to execute in the jump
1257 pad. */
1258 *adjusted_insn_addr = buildaddr;
1259 relocate_instruction (&buildaddr, tpaddr);
1260 *adjusted_insn_addr_end = buildaddr;
1261
1262 /* Finally, write a jump back to the program. */
1263 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1264 memcpy (buf, jump_insn, sizeof (jump_insn));
1265 memcpy (buf + 1, &offset, 4);
1266 append_insns (&buildaddr, sizeof (jump_insn), buf);
1267
1268 /* The jump pad is now built. Wire in a jump to our jump pad. This
1269 is always done last (by our caller actually), so that we can
1270 install fast tracepoints with threads running. This relies on
1271 the agent's atomic write support. */
1272 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1273 memcpy (buf, jump_insn, sizeof (jump_insn));
1274 memcpy (buf + 1, &offset, 4);
1275 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1276 *jjump_pad_insn_size = sizeof (jump_insn);
1277
1278 /* Return the end address of our pad. */
1279 *jump_entry = buildaddr;
1280
1281 return 0;
1282 }
1283
1284 #endif /* __x86_64__ */
1285
1286 /* Build a jump pad that saves registers and calls a collection
1287 function. Writes a jump instruction to the jump pad to
1288 JJUMPAD_INSN. The caller is responsible to write it in at the
1289 tracepoint address. */
1290
1291 static int
1292 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1293 CORE_ADDR collector,
1294 CORE_ADDR lockaddr,
1295 ULONGEST orig_size,
1296 CORE_ADDR *jump_entry,
1297 unsigned char *jjump_pad_insn,
1298 ULONGEST *jjump_pad_insn_size,
1299 CORE_ADDR *adjusted_insn_addr,
1300 CORE_ADDR *adjusted_insn_addr_end)
1301 {
1302 unsigned char buf[0x100];
1303 int i, offset;
1304 CORE_ADDR buildaddr = *jump_entry;
1305
1306 /* Build the jump pad. */
1307
1308 /* First, do tracepoint data collection. Save registers. */
1309 i = 0;
1310 buf[i++] = 0x60; /* pushad */
1311 buf[i++] = 0x68; /* push tpaddr aka $pc */
1312 *((int *)(buf + i)) = (int) tpaddr;
1313 i += 4;
1314 buf[i++] = 0x9c; /* pushf */
1315 buf[i++] = 0x1e; /* push %ds */
1316 buf[i++] = 0x06; /* push %es */
1317 buf[i++] = 0x0f; /* push %fs */
1318 buf[i++] = 0xa0;
1319 buf[i++] = 0x0f; /* push %gs */
1320 buf[i++] = 0xa8;
1321 buf[i++] = 0x16; /* push %ss */
1322 buf[i++] = 0x0e; /* push %cs */
1323 append_insns (&buildaddr, i, buf);
1324
1325 /* Stack space for the collecting_t object. */
1326 i = 0;
1327 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1328
1329 /* Build the object. */
1330 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1331 memcpy (buf + i, &tpoint, 4);
1332 i += 4;
1333 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1334
1335 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1336 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1337 append_insns (&buildaddr, i, buf);
1338
1339 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1340 If we cared for it, this could be using xchg alternatively. */
1341
1342 i = 0;
1343 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1344 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1345 %esp,<lockaddr> */
1346 memcpy (&buf[i], (void *) &lockaddr, 4);
1347 i += 4;
1348 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1349 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1350 append_insns (&buildaddr, i, buf);
1351
1352
1353 /* Set up arguments to the gdb_collect call. */
1354 i = 0;
1355 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1356 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1357 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1358 append_insns (&buildaddr, i, buf);
1359
1360 i = 0;
1361 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1362 append_insns (&buildaddr, i, buf);
1363
1364 i = 0;
1365 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1366 memcpy (&buf[i], (void *) &tpoint, 4);
1367 i += 4;
1368 append_insns (&buildaddr, i, buf);
1369
1370 buf[0] = 0xe8; /* call <reladdr> */
1371 offset = collector - (buildaddr + sizeof (jump_insn));
1372 memcpy (buf + 1, &offset, 4);
1373 append_insns (&buildaddr, 5, buf);
1374 /* Clean up after the call. */
1375 buf[0] = 0x83; /* add $0x8,%esp */
1376 buf[1] = 0xc4;
1377 buf[2] = 0x08;
1378 append_insns (&buildaddr, 3, buf);
1379
1380
1381 /* Clear the spin-lock. This would need the LOCK prefix on older
1382 broken archs. */
1383 i = 0;
1384 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1385 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1386 memcpy (buf + i, &lockaddr, 4);
1387 i += 4;
1388 append_insns (&buildaddr, i, buf);
1389
1390
1391 /* Remove stack that had been used for the collect_t object. */
1392 i = 0;
1393 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1394 append_insns (&buildaddr, i, buf);
1395
1396 i = 0;
1397 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1398 buf[i++] = 0xc4;
1399 buf[i++] = 0x04;
1400 buf[i++] = 0x17; /* pop %ss */
1401 buf[i++] = 0x0f; /* pop %gs */
1402 buf[i++] = 0xa9;
1403 buf[i++] = 0x0f; /* pop %fs */
1404 buf[i++] = 0xa1;
1405 buf[i++] = 0x07; /* pop %es */
1406 buf[i++] = 0x1f; /* pop %de */
1407 buf[i++] = 0x9d; /* popf */
1408 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1409 buf[i++] = 0xc4;
1410 buf[i++] = 0x04;
1411 buf[i++] = 0x61; /* popad */
1412 append_insns (&buildaddr, i, buf);
1413
1414 /* Now, adjust the original instruction to execute in the jump
1415 pad. */
1416 *adjusted_insn_addr = buildaddr;
1417 relocate_instruction (&buildaddr, tpaddr);
1418 *adjusted_insn_addr_end = buildaddr;
1419
1420 /* Write the jump back to the program. */
1421 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1422 memcpy (buf, jump_insn, sizeof (jump_insn));
1423 memcpy (buf + 1, &offset, 4);
1424 append_insns (&buildaddr, sizeof (jump_insn), buf);
1425
1426 /* The jump pad is now built. Wire in a jump to our jump pad. This
1427 is always done last (by our caller actually), so that we can
1428 install fast tracepoints with threads running. This relies on
1429 the agent's atomic write support. */
1430 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1431 memcpy (buf, jump_insn, sizeof (jump_insn));
1432 memcpy (buf + 1, &offset, 4);
1433 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1434 *jjump_pad_insn_size = sizeof (jump_insn);
1435
1436 /* Return the end address of our pad. */
1437 *jump_entry = buildaddr;
1438
1439 return 0;
1440 }
1441
1442 static int
1443 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1444 CORE_ADDR collector,
1445 CORE_ADDR lockaddr,
1446 ULONGEST orig_size,
1447 CORE_ADDR *jump_entry,
1448 unsigned char *jjump_pad_insn,
1449 ULONGEST *jjump_pad_insn_size,
1450 CORE_ADDR *adjusted_insn_addr,
1451 CORE_ADDR *adjusted_insn_addr_end)
1452 {
1453 #ifdef __x86_64__
1454 if (register_size (0) == 8)
1455 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1456 collector, lockaddr,
1457 orig_size, jump_entry,
1458 jjump_pad_insn,
1459 jjump_pad_insn_size,
1460 adjusted_insn_addr,
1461 adjusted_insn_addr_end);
1462 #endif
1463
1464 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1465 collector, lockaddr,
1466 orig_size, jump_entry,
1467 jjump_pad_insn,
1468 jjump_pad_insn_size,
1469 adjusted_insn_addr,
1470 adjusted_insn_addr_end);
1471 }
1472
1473 /* This is initialized assuming an amd64 target.
1474 x86_arch_setup will correct it for i386 or amd64 targets. */
1475
1476 struct linux_target_ops the_low_target =
1477 {
1478 x86_arch_setup,
1479 -1,
1480 NULL,
1481 NULL,
1482 NULL,
1483 x86_get_pc,
1484 x86_set_pc,
1485 x86_breakpoint,
1486 x86_breakpoint_len,
1487 NULL,
1488 1,
1489 x86_breakpoint_at,
1490 x86_insert_point,
1491 x86_remove_point,
1492 x86_stopped_by_watchpoint,
1493 x86_stopped_data_address,
1494 /* collect_ptrace_register/supply_ptrace_register are not needed in the
1495 native i386 case (no registers smaller than an xfer unit), and are not
1496 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
1497 NULL,
1498 NULL,
1499 /* need to fix up i386 siginfo if host is amd64 */
1500 x86_siginfo_fixup,
1501 x86_linux_new_process,
1502 x86_linux_new_thread,
1503 x86_linux_prepare_to_resume,
1504 x86_linux_process_qsupported,
1505 x86_supports_tracepoints,
1506 x86_get_thread_area,
1507 x86_install_fast_tracepoint_jump_pad
1508 };
This page took 0.061892 seconds and 4 git commands to generate.