1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2019 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "common/agent.h"
24 #include "common/rsp-low.h"
25 #include "common/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "common/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
34 #include <sys/ioctl.h>
37 #include <sys/syscall.h>
41 #include <sys/types.h>
46 #include "common/filestuff.h"
47 #include "tracepoint.h"
50 #include "common/common-inferior.h"
51 #include "nat/fork-inferior.h"
52 #include "common/environ.h"
53 #include "common/scoped_restore.h"
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
61 #include "nat/linux-namespaces.h"
64 #define SPUFS_MAGIC 0x23c9b64e
67 #ifdef HAVE_PERSONALITY
68 # include <sys/personality.h>
69 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
70 # define ADDR_NO_RANDOMIZE 0x0040000
82 /* Some targets did not define these ptrace constants from the start,
83 so gdbserver defines them locally here. In the future, these may
84 be removed after they are added to asm/ptrace.h. */
85 #if !(defined(PT_TEXT_ADDR) \
86 || defined(PT_DATA_ADDR) \
87 || defined(PT_TEXT_END_ADDR))
88 #if defined(__mcoldfire__)
89 /* These are still undefined in 3.10 kernels. */
90 #define PT_TEXT_ADDR 49*4
91 #define PT_DATA_ADDR 50*4
92 #define PT_TEXT_END_ADDR 51*4
93 /* BFIN already defines these since at least 2.6.32 kernels. */
95 #define PT_TEXT_ADDR 220
96 #define PT_TEXT_END_ADDR 224
97 #define PT_DATA_ADDR 228
98 /* These are still undefined in 3.10 kernels. */
99 #elif defined(__TMS320C6X__)
100 #define PT_TEXT_ADDR (0x10000*4)
101 #define PT_DATA_ADDR (0x10004*4)
102 #define PT_TEXT_END_ADDR (0x10008*4)
106 #ifdef HAVE_LINUX_BTRACE
107 # include "nat/linux-btrace.h"
108 # include "common/btrace-common.h"
111 #ifndef HAVE_ELF32_AUXV_T
112 /* Copied from glibc's elf.h. */
115 uint32_t a_type
; /* Entry type */
118 uint32_t a_val
; /* Integer value */
119 /* We use to have pointer elements added here. We cannot do that,
120 though, since it does not work when using 32-bit definitions
121 on 64-bit platforms and vice versa. */
126 #ifndef HAVE_ELF64_AUXV_T
127 /* Copied from glibc's elf.h. */
130 uint64_t a_type
; /* Entry type */
133 uint64_t a_val
; /* Integer value */
134 /* We use to have pointer elements added here. We cannot do that,
135 though, since it does not work when using 32-bit definitions
136 on 64-bit platforms and vice versa. */
141 /* Does the current host support PTRACE_GETREGSET? */
142 int have_ptrace_getregset
= -1;
146 /* See nat/linux-nat.h. */
149 ptid_of_lwp (struct lwp_info
*lwp
)
151 return ptid_of (get_lwp_thread (lwp
));
154 /* See nat/linux-nat.h. */
157 lwp_set_arch_private_info (struct lwp_info
*lwp
,
158 struct arch_lwp_info
*info
)
160 lwp
->arch_private
= info
;
163 /* See nat/linux-nat.h. */
165 struct arch_lwp_info
*
166 lwp_arch_private_info (struct lwp_info
*lwp
)
168 return lwp
->arch_private
;
171 /* See nat/linux-nat.h. */
174 lwp_is_stopped (struct lwp_info
*lwp
)
179 /* See nat/linux-nat.h. */
181 enum target_stop_reason
182 lwp_stop_reason (struct lwp_info
*lwp
)
184 return lwp
->stop_reason
;
187 /* See nat/linux-nat.h. */
190 lwp_is_stepping (struct lwp_info
*lwp
)
192 return lwp
->stepping
;
195 /* A list of all unknown processes which receive stop signals. Some
196 other process will presumably claim each of these as forked
197 children momentarily. */
199 struct simple_pid_list
201 /* The process ID. */
204 /* The status as reported by waitpid. */
208 struct simple_pid_list
*next
;
210 struct simple_pid_list
*stopped_pids
;
212 /* Trivial list manipulation functions to keep track of a list of new
213 stopped processes. */
216 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
218 struct simple_pid_list
*new_pid
= XNEW (struct simple_pid_list
);
221 new_pid
->status
= status
;
222 new_pid
->next
= *listp
;
227 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *statusp
)
229 struct simple_pid_list
**p
;
231 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
232 if ((*p
)->pid
== pid
)
234 struct simple_pid_list
*next
= (*p
)->next
;
236 *statusp
= (*p
)->status
;
244 enum stopping_threads_kind
246 /* Not stopping threads presently. */
247 NOT_STOPPING_THREADS
,
249 /* Stopping threads. */
252 /* Stopping and suspending threads. */
253 STOPPING_AND_SUSPENDING_THREADS
256 /* This is set while stop_all_lwps is in effect. */
257 enum stopping_threads_kind stopping_threads
= NOT_STOPPING_THREADS
;
259 /* FIXME make into a target method? */
260 int using_threads
= 1;
262 /* True if we're presently stabilizing threads (moving them out of
264 static int stabilizing_threads
;
266 static void linux_resume_one_lwp (struct lwp_info
*lwp
,
267 int step
, int signal
, siginfo_t
*info
);
268 static void linux_resume (struct thread_resume
*resume_info
, size_t n
);
269 static void stop_all_lwps (int suspend
, struct lwp_info
*except
);
270 static void unstop_all_lwps (int unsuspend
, struct lwp_info
*except
);
271 static void unsuspend_all_lwps (struct lwp_info
*except
);
272 static int linux_wait_for_event_filtered (ptid_t wait_ptid
, ptid_t filter_ptid
,
273 int *wstat
, int options
);
274 static int linux_wait_for_event (ptid_t ptid
, int *wstat
, int options
);
275 static struct lwp_info
*add_lwp (ptid_t ptid
);
276 static void linux_mourn (struct process_info
*process
);
277 static int linux_stopped_by_watchpoint (void);
278 static void mark_lwp_dead (struct lwp_info
*lwp
, int wstat
);
279 static int lwp_is_marked_dead (struct lwp_info
*lwp
);
280 static void proceed_all_lwps (void);
281 static int finish_step_over (struct lwp_info
*lwp
);
282 static int kill_lwp (unsigned long lwpid
, int signo
);
283 static void enqueue_pending_signal (struct lwp_info
*lwp
, int signal
, siginfo_t
*info
);
284 static void complete_ongoing_step_over (void);
285 static int linux_low_ptrace_options (int attached
);
286 static int check_ptrace_stopped_lwp_gone (struct lwp_info
*lp
);
287 static void proceed_one_lwp (thread_info
*thread
, lwp_info
*except
);
289 /* When the event-loop is doing a step-over, this points at the thread
291 ptid_t step_over_bkpt
;
293 /* True if the low target can hardware single-step. */
296 can_hardware_single_step (void)
298 if (the_low_target
.supports_hardware_single_step
!= NULL
)
299 return the_low_target
.supports_hardware_single_step ();
304 /* True if the low target can software single-step. Such targets
305 implement the GET_NEXT_PCS callback. */
308 can_software_single_step (void)
310 return (the_low_target
.get_next_pcs
!= NULL
);
313 /* True if the low target supports memory breakpoints. If so, we'll
314 have a GET_PC implementation. */
317 supports_breakpoints (void)
319 return (the_low_target
.get_pc
!= NULL
);
322 /* Returns true if this target can support fast tracepoints. This
323 does not mean that the in-process agent has been loaded in the
327 supports_fast_tracepoints (void)
329 return the_low_target
.install_fast_tracepoint_jump_pad
!= NULL
;
332 /* True if LWP is stopped in its stepping range. */
335 lwp_in_step_range (struct lwp_info
*lwp
)
337 CORE_ADDR pc
= lwp
->stop_pc
;
339 return (pc
>= lwp
->step_range_start
&& pc
< lwp
->step_range_end
);
342 struct pending_signals
346 struct pending_signals
*prev
;
349 /* The read/write ends of the pipe registered as waitable file in the
351 static int linux_event_pipe
[2] = { -1, -1 };
353 /* True if we're currently in async mode. */
354 #define target_is_async_p() (linux_event_pipe[0] != -1)
356 static void send_sigstop (struct lwp_info
*lwp
);
357 static void wait_for_sigstop (void);
359 /* Return non-zero if HEADER is a 64-bit ELF file. */
362 elf_64_header_p (const Elf64_Ehdr
*header
, unsigned int *machine
)
364 if (header
->e_ident
[EI_MAG0
] == ELFMAG0
365 && header
->e_ident
[EI_MAG1
] == ELFMAG1
366 && header
->e_ident
[EI_MAG2
] == ELFMAG2
367 && header
->e_ident
[EI_MAG3
] == ELFMAG3
)
369 *machine
= header
->e_machine
;
370 return header
->e_ident
[EI_CLASS
] == ELFCLASS64
;
377 /* Return non-zero if FILE is a 64-bit ELF file,
378 zero if the file is not a 64-bit ELF file,
379 and -1 if the file is not accessible or doesn't exist. */
382 elf_64_file_p (const char *file
, unsigned int *machine
)
387 fd
= open (file
, O_RDONLY
);
391 if (read (fd
, &header
, sizeof (header
)) != sizeof (header
))
398 return elf_64_header_p (&header
, machine
);
401 /* Accepts an integer PID; Returns true if the executable PID is
402 running is a 64-bit ELF file.. */
405 linux_pid_exe_is_elf_64_file (int pid
, unsigned int *machine
)
409 sprintf (file
, "/proc/%d/exe", pid
);
410 return elf_64_file_p (file
, machine
);
414 delete_lwp (struct lwp_info
*lwp
)
416 struct thread_info
*thr
= get_lwp_thread (lwp
);
419 debug_printf ("deleting %ld\n", lwpid_of (thr
));
423 if (the_low_target
.delete_thread
!= NULL
)
424 the_low_target
.delete_thread (lwp
->arch_private
);
426 gdb_assert (lwp
->arch_private
== NULL
);
431 /* Add a process to the common process list, and set its private
434 static struct process_info
*
435 linux_add_process (int pid
, int attached
)
437 struct process_info
*proc
;
439 proc
= add_process (pid
, attached
);
440 proc
->priv
= XCNEW (struct process_info_private
);
442 if (the_low_target
.new_process
!= NULL
)
443 proc
->priv
->arch_private
= the_low_target
.new_process ();
448 static CORE_ADDR
get_pc (struct lwp_info
*lwp
);
450 /* Call the target arch_setup function on the current thread. */
453 linux_arch_setup (void)
455 the_low_target
.arch_setup ();
458 /* Call the target arch_setup function on THREAD. */
461 linux_arch_setup_thread (struct thread_info
*thread
)
463 struct thread_info
*saved_thread
;
465 saved_thread
= current_thread
;
466 current_thread
= thread
;
470 current_thread
= saved_thread
;
473 /* Handle a GNU/Linux extended wait response. If we see a clone,
474 fork, or vfork event, we need to add the new LWP to our list
475 (and return 0 so as not to report the trap to higher layers).
476 If we see an exec event, we will modify ORIG_EVENT_LWP to point
477 to a new LWP representing the new program. */
480 handle_extended_wait (struct lwp_info
**orig_event_lwp
, int wstat
)
482 client_state
&cs
= get_client_state ();
483 struct lwp_info
*event_lwp
= *orig_event_lwp
;
484 int event
= linux_ptrace_get_extended_event (wstat
);
485 struct thread_info
*event_thr
= get_lwp_thread (event_lwp
);
486 struct lwp_info
*new_lwp
;
488 gdb_assert (event_lwp
->waitstatus
.kind
== TARGET_WAITKIND_IGNORE
);
490 /* All extended events we currently use are mid-syscall. Only
491 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
492 you have to be using PTRACE_SEIZE to get that. */
493 event_lwp
->syscall_state
= TARGET_WAITKIND_SYSCALL_ENTRY
;
495 if ((event
== PTRACE_EVENT_FORK
) || (event
== PTRACE_EVENT_VFORK
)
496 || (event
== PTRACE_EVENT_CLONE
))
499 unsigned long new_pid
;
502 /* Get the pid of the new lwp. */
503 ptrace (PTRACE_GETEVENTMSG
, lwpid_of (event_thr
), (PTRACE_TYPE_ARG3
) 0,
506 /* If we haven't already seen the new PID stop, wait for it now. */
507 if (!pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
509 /* The new child has a pending SIGSTOP. We can't affect it until it
510 hits the SIGSTOP, but we're already attached. */
512 ret
= my_waitpid (new_pid
, &status
, __WALL
);
515 perror_with_name ("waiting for new child");
516 else if (ret
!= new_pid
)
517 warning ("wait returned unexpected PID %d", ret
);
518 else if (!WIFSTOPPED (status
))
519 warning ("wait returned unexpected status 0x%x", status
);
522 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
)
524 struct process_info
*parent_proc
;
525 struct process_info
*child_proc
;
526 struct lwp_info
*child_lwp
;
527 struct thread_info
*child_thr
;
528 struct target_desc
*tdesc
;
530 ptid
= ptid_t (new_pid
, new_pid
, 0);
534 debug_printf ("HEW: Got fork event from LWP %ld, "
536 ptid_of (event_thr
).lwp (),
540 /* Add the new process to the tables and clone the breakpoint
541 lists of the parent. We need to do this even if the new process
542 will be detached, since we will need the process object and the
543 breakpoints to remove any breakpoints from memory when we
544 detach, and the client side will access registers. */
545 child_proc
= linux_add_process (new_pid
, 0);
546 gdb_assert (child_proc
!= NULL
);
547 child_lwp
= add_lwp (ptid
);
548 gdb_assert (child_lwp
!= NULL
);
549 child_lwp
->stopped
= 1;
550 child_lwp
->must_set_ptrace_flags
= 1;
551 child_lwp
->status_pending_p
= 0;
552 child_thr
= get_lwp_thread (child_lwp
);
553 child_thr
->last_resume_kind
= resume_stop
;
554 child_thr
->last_status
.kind
= TARGET_WAITKIND_STOPPED
;
556 /* If we're suspending all threads, leave this one suspended
557 too. If the fork/clone parent is stepping over a breakpoint,
558 all other threads have been suspended already. Leave the
559 child suspended too. */
560 if (stopping_threads
== STOPPING_AND_SUSPENDING_THREADS
561 || event_lwp
->bp_reinsert
!= 0)
564 debug_printf ("HEW: leaving child suspended\n");
565 child_lwp
->suspended
= 1;
568 parent_proc
= get_thread_process (event_thr
);
569 child_proc
->attached
= parent_proc
->attached
;
571 if (event_lwp
->bp_reinsert
!= 0
572 && can_software_single_step ()
573 && event
== PTRACE_EVENT_VFORK
)
575 /* If we leave single-step breakpoints there, child will
576 hit it, so uninsert single-step breakpoints from parent
577 (and child). Once vfork child is done, reinsert
578 them back to parent. */
579 uninsert_single_step_breakpoints (event_thr
);
582 clone_all_breakpoints (child_thr
, event_thr
);
584 tdesc
= allocate_target_description ();
585 copy_target_description (tdesc
, parent_proc
->tdesc
);
586 child_proc
->tdesc
= tdesc
;
588 /* Clone arch-specific process data. */
589 if (the_low_target
.new_fork
!= NULL
)
590 the_low_target
.new_fork (parent_proc
, child_proc
);
592 /* Save fork info in the parent thread. */
593 if (event
== PTRACE_EVENT_FORK
)
594 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_FORKED
;
595 else if (event
== PTRACE_EVENT_VFORK
)
596 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_VFORKED
;
598 event_lwp
->waitstatus
.value
.related_pid
= ptid
;
600 /* The status_pending field contains bits denoting the
601 extended event, so when the pending event is handled,
602 the handler will look at lwp->waitstatus. */
603 event_lwp
->status_pending_p
= 1;
604 event_lwp
->status_pending
= wstat
;
606 /* Link the threads until the parent event is passed on to
608 event_lwp
->fork_relative
= child_lwp
;
609 child_lwp
->fork_relative
= event_lwp
;
611 /* If the parent thread is doing step-over with single-step
612 breakpoints, the list of single-step breakpoints are cloned
613 from the parent's. Remove them from the child process.
614 In case of vfork, we'll reinsert them back once vforked
616 if (event_lwp
->bp_reinsert
!= 0
617 && can_software_single_step ())
619 /* The child process is forked and stopped, so it is safe
620 to access its memory without stopping all other threads
621 from other processes. */
622 delete_single_step_breakpoints (child_thr
);
624 gdb_assert (has_single_step_breakpoints (event_thr
));
625 gdb_assert (!has_single_step_breakpoints (child_thr
));
628 /* Report the event. */
633 debug_printf ("HEW: Got clone event "
634 "from LWP %ld, new child is LWP %ld\n",
635 lwpid_of (event_thr
), new_pid
);
637 ptid
= ptid_t (pid_of (event_thr
), new_pid
, 0);
638 new_lwp
= add_lwp (ptid
);
640 /* Either we're going to immediately resume the new thread
641 or leave it stopped. linux_resume_one_lwp is a nop if it
642 thinks the thread is currently running, so set this first
643 before calling linux_resume_one_lwp. */
644 new_lwp
->stopped
= 1;
646 /* If we're suspending all threads, leave this one suspended
647 too. If the fork/clone parent is stepping over a breakpoint,
648 all other threads have been suspended already. Leave the
649 child suspended too. */
650 if (stopping_threads
== STOPPING_AND_SUSPENDING_THREADS
651 || event_lwp
->bp_reinsert
!= 0)
652 new_lwp
->suspended
= 1;
654 /* Normally we will get the pending SIGSTOP. But in some cases
655 we might get another signal delivered to the group first.
656 If we do get another signal, be sure not to lose it. */
657 if (WSTOPSIG (status
) != SIGSTOP
)
659 new_lwp
->stop_expected
= 1;
660 new_lwp
->status_pending_p
= 1;
661 new_lwp
->status_pending
= status
;
663 else if (cs
.report_thread_events
)
665 new_lwp
->waitstatus
.kind
= TARGET_WAITKIND_THREAD_CREATED
;
666 new_lwp
->status_pending_p
= 1;
667 new_lwp
->status_pending
= status
;
671 thread_db_notice_clone (event_thr
, ptid
);
674 /* Don't report the event. */
677 else if (event
== PTRACE_EVENT_VFORK_DONE
)
679 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_VFORK_DONE
;
681 if (event_lwp
->bp_reinsert
!= 0 && can_software_single_step ())
683 reinsert_single_step_breakpoints (event_thr
);
685 gdb_assert (has_single_step_breakpoints (event_thr
));
688 /* Report the event. */
691 else if (event
== PTRACE_EVENT_EXEC
&& cs
.report_exec_events
)
693 struct process_info
*proc
;
694 std::vector
<int> syscalls_to_catch
;
700 debug_printf ("HEW: Got exec event from LWP %ld\n",
701 lwpid_of (event_thr
));
704 /* Get the event ptid. */
705 event_ptid
= ptid_of (event_thr
);
706 event_pid
= event_ptid
.pid ();
708 /* Save the syscall list from the execing process. */
709 proc
= get_thread_process (event_thr
);
710 syscalls_to_catch
= std::move (proc
->syscalls_to_catch
);
712 /* Delete the execing process and all its threads. */
714 current_thread
= NULL
;
716 /* Create a new process/lwp/thread. */
717 proc
= linux_add_process (event_pid
, 0);
718 event_lwp
= add_lwp (event_ptid
);
719 event_thr
= get_lwp_thread (event_lwp
);
720 gdb_assert (current_thread
== event_thr
);
721 linux_arch_setup_thread (event_thr
);
723 /* Set the event status. */
724 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_EXECD
;
725 event_lwp
->waitstatus
.value
.execd_pathname
726 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr
)));
728 /* Mark the exec status as pending. */
729 event_lwp
->stopped
= 1;
730 event_lwp
->status_pending_p
= 1;
731 event_lwp
->status_pending
= wstat
;
732 event_thr
->last_resume_kind
= resume_continue
;
733 event_thr
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
735 /* Update syscall state in the new lwp, effectively mid-syscall too. */
736 event_lwp
->syscall_state
= TARGET_WAITKIND_SYSCALL_ENTRY
;
738 /* Restore the list to catch. Don't rely on the client, which is free
739 to avoid sending a new list when the architecture doesn't change.
740 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
741 proc
->syscalls_to_catch
= std::move (syscalls_to_catch
);
743 /* Report the event. */
744 *orig_event_lwp
= event_lwp
;
748 internal_error (__FILE__
, __LINE__
, _("unknown ptrace event %d"), event
);
751 /* Return the PC as read from the regcache of LWP, without any
755 get_pc (struct lwp_info
*lwp
)
757 struct thread_info
*saved_thread
;
758 struct regcache
*regcache
;
761 if (the_low_target
.get_pc
== NULL
)
764 saved_thread
= current_thread
;
765 current_thread
= get_lwp_thread (lwp
);
767 regcache
= get_thread_regcache (current_thread
, 1);
768 pc
= (*the_low_target
.get_pc
) (regcache
);
771 debug_printf ("pc is 0x%lx\n", (long) pc
);
773 current_thread
= saved_thread
;
777 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
778 Fill *SYSNO with the syscall nr trapped. */
781 get_syscall_trapinfo (struct lwp_info
*lwp
, int *sysno
)
783 struct thread_info
*saved_thread
;
784 struct regcache
*regcache
;
786 if (the_low_target
.get_syscall_trapinfo
== NULL
)
788 /* If we cannot get the syscall trapinfo, report an unknown
789 system call number. */
790 *sysno
= UNKNOWN_SYSCALL
;
794 saved_thread
= current_thread
;
795 current_thread
= get_lwp_thread (lwp
);
797 regcache
= get_thread_regcache (current_thread
, 1);
798 (*the_low_target
.get_syscall_trapinfo
) (regcache
, sysno
);
801 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno
);
803 current_thread
= saved_thread
;
806 static int check_stopped_by_watchpoint (struct lwp_info
*child
);
808 /* Called when the LWP stopped for a signal/trap. If it stopped for a
809 trap check what caused it (breakpoint, watchpoint, trace, etc.),
810 and save the result in the LWP's stop_reason field. If it stopped
811 for a breakpoint, decrement the PC if necessary on the lwp's
812 architecture. Returns true if we now have the LWP's stop PC. */
815 save_stop_reason (struct lwp_info
*lwp
)
818 CORE_ADDR sw_breakpoint_pc
;
819 struct thread_info
*saved_thread
;
820 #if USE_SIGTRAP_SIGINFO
824 if (the_low_target
.get_pc
== NULL
)
828 sw_breakpoint_pc
= pc
- the_low_target
.decr_pc_after_break
;
830 /* breakpoint_at reads from the current thread. */
831 saved_thread
= current_thread
;
832 current_thread
= get_lwp_thread (lwp
);
834 #if USE_SIGTRAP_SIGINFO
835 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
836 (PTRACE_TYPE_ARG3
) 0, &siginfo
) == 0)
838 if (siginfo
.si_signo
== SIGTRAP
)
840 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo
.si_code
)
841 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo
.si_code
))
843 /* The si_code is ambiguous on this arch -- check debug
845 if (!check_stopped_by_watchpoint (lwp
))
846 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
848 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo
.si_code
))
850 /* If we determine the LWP stopped for a SW breakpoint,
851 trust it. Particularly don't check watchpoint
852 registers, because at least on s390, we'd find
853 stopped-by-watchpoint as long as there's a watchpoint
855 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
857 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo
.si_code
))
859 /* This can indicate either a hardware breakpoint or
860 hardware watchpoint. Check debug registers. */
861 if (!check_stopped_by_watchpoint (lwp
))
862 lwp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
864 else if (siginfo
.si_code
== TRAP_TRACE
)
866 /* We may have single stepped an instruction that
867 triggered a watchpoint. In that case, on some
868 architectures (such as x86), instead of TRAP_HWBKPT,
869 si_code indicates TRAP_TRACE, and we need to check
870 the debug registers separately. */
871 if (!check_stopped_by_watchpoint (lwp
))
872 lwp
->stop_reason
= TARGET_STOPPED_BY_SINGLE_STEP
;
877 /* We may have just stepped a breakpoint instruction. E.g., in
878 non-stop mode, GDB first tells the thread A to step a range, and
879 then the user inserts a breakpoint inside the range. In that
880 case we need to report the breakpoint PC. */
881 if ((!lwp
->stepping
|| lwp
->stop_pc
== sw_breakpoint_pc
)
882 && (*the_low_target
.breakpoint_at
) (sw_breakpoint_pc
))
883 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
885 if (hardware_breakpoint_inserted_here (pc
))
886 lwp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
888 if (lwp
->stop_reason
== TARGET_STOPPED_BY_NO_REASON
)
889 check_stopped_by_watchpoint (lwp
);
892 if (lwp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
)
896 struct thread_info
*thr
= get_lwp_thread (lwp
);
898 debug_printf ("CSBB: %s stopped by software breakpoint\n",
899 target_pid_to_str (ptid_of (thr
)));
902 /* Back up the PC if necessary. */
903 if (pc
!= sw_breakpoint_pc
)
905 struct regcache
*regcache
906 = get_thread_regcache (current_thread
, 1);
907 (*the_low_target
.set_pc
) (regcache
, sw_breakpoint_pc
);
910 /* Update this so we record the correct stop PC below. */
911 pc
= sw_breakpoint_pc
;
913 else if (lwp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
)
917 struct thread_info
*thr
= get_lwp_thread (lwp
);
919 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
920 target_pid_to_str (ptid_of (thr
)));
923 else if (lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
)
927 struct thread_info
*thr
= get_lwp_thread (lwp
);
929 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
930 target_pid_to_str (ptid_of (thr
)));
933 else if (lwp
->stop_reason
== TARGET_STOPPED_BY_SINGLE_STEP
)
937 struct thread_info
*thr
= get_lwp_thread (lwp
);
939 debug_printf ("CSBB: %s stopped by trace\n",
940 target_pid_to_str (ptid_of (thr
)));
945 current_thread
= saved_thread
;
949 static struct lwp_info
*
950 add_lwp (ptid_t ptid
)
952 struct lwp_info
*lwp
;
954 lwp
= XCNEW (struct lwp_info
);
956 lwp
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
958 lwp
->thread
= add_thread (ptid
, lwp
);
960 if (the_low_target
.new_thread
!= NULL
)
961 the_low_target
.new_thread (lwp
);
966 /* Callback to be used when calling fork_inferior, responsible for
967 actually initiating the tracing of the inferior. */
972 if (ptrace (PTRACE_TRACEME
, 0, (PTRACE_TYPE_ARG3
) 0,
973 (PTRACE_TYPE_ARG4
) 0) < 0)
974 trace_start_error_with_name ("ptrace");
976 if (setpgid (0, 0) < 0)
977 trace_start_error_with_name ("setpgid");
979 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
980 stdout to stderr so that inferior i/o doesn't corrupt the connection.
981 Also, redirect stdin to /dev/null. */
982 if (remote_connection_is_stdio ())
985 trace_start_error_with_name ("close");
986 if (open ("/dev/null", O_RDONLY
) < 0)
987 trace_start_error_with_name ("open");
989 trace_start_error_with_name ("dup2");
990 if (write (2, "stdin/stdout redirected\n",
991 sizeof ("stdin/stdout redirected\n") - 1) < 0)
993 /* Errors ignored. */;
998 /* Start an inferior process and returns its pid.
999 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
1000 are its arguments. */
1003 linux_create_inferior (const char *program
,
1004 const std::vector
<char *> &program_args
)
1006 client_state
&cs
= get_client_state ();
1007 struct lwp_info
*new_lwp
;
1012 maybe_disable_address_space_randomization restore_personality
1013 (cs
.disable_randomization
);
1014 std::string str_program_args
= stringify_argv (program_args
);
1016 pid
= fork_inferior (program
,
1017 str_program_args
.c_str (),
1018 get_environ ()->envp (), linux_ptrace_fun
,
1019 NULL
, NULL
, NULL
, NULL
);
1022 linux_add_process (pid
, 0);
1024 ptid
= ptid_t (pid
, pid
, 0);
1025 new_lwp
= add_lwp (ptid
);
1026 new_lwp
->must_set_ptrace_flags
= 1;
1028 post_fork_inferior (pid
, program
);
1033 /* Implement the post_create_inferior target_ops method. */
1036 linux_post_create_inferior (void)
1038 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
1040 linux_arch_setup ();
1042 if (lwp
->must_set_ptrace_flags
)
1044 struct process_info
*proc
= current_process ();
1045 int options
= linux_low_ptrace_options (proc
->attached
);
1047 linux_enable_event_reporting (lwpid_of (current_thread
), options
);
1048 lwp
->must_set_ptrace_flags
= 0;
1052 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1056 linux_attach_lwp (ptid_t ptid
)
1058 struct lwp_info
*new_lwp
;
1059 int lwpid
= ptid
.lwp ();
1061 if (ptrace (PTRACE_ATTACH
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0)
1065 new_lwp
= add_lwp (ptid
);
1067 /* We need to wait for SIGSTOP before being able to make the next
1068 ptrace call on this LWP. */
1069 new_lwp
->must_set_ptrace_flags
= 1;
1071 if (linux_proc_pid_is_stopped (lwpid
))
1074 debug_printf ("Attached to a stopped process\n");
1076 /* The process is definitely stopped. It is in a job control
1077 stop, unless the kernel predates the TASK_STOPPED /
1078 TASK_TRACED distinction, in which case it might be in a
1079 ptrace stop. Make sure it is in a ptrace stop; from there we
1080 can kill it, signal it, et cetera.
1082 First make sure there is a pending SIGSTOP. Since we are
1083 already attached, the process can not transition from stopped
1084 to running without a PTRACE_CONT; so we know this signal will
1085 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1086 probably already in the queue (unless this kernel is old
1087 enough to use TASK_STOPPED for ptrace stops); but since
1088 SIGSTOP is not an RT signal, it can only be queued once. */
1089 kill_lwp (lwpid
, SIGSTOP
);
1091 /* Finally, resume the stopped process. This will deliver the
1092 SIGSTOP (or a higher priority signal, just like normal
1093 PTRACE_ATTACH), which we'll catch later on. */
1094 ptrace (PTRACE_CONT
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
1097 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1098 brings it to a halt.
1100 There are several cases to consider here:
1102 1) gdbserver has already attached to the process and is being notified
1103 of a new thread that is being created.
1104 In this case we should ignore that SIGSTOP and resume the
1105 process. This is handled below by setting stop_expected = 1,
1106 and the fact that add_thread sets last_resume_kind ==
1109 2) This is the first thread (the process thread), and we're attaching
1110 to it via attach_inferior.
1111 In this case we want the process thread to stop.
1112 This is handled by having linux_attach set last_resume_kind ==
1113 resume_stop after we return.
1115 If the pid we are attaching to is also the tgid, we attach to and
1116 stop all the existing threads. Otherwise, we attach to pid and
1117 ignore any other threads in the same group as this pid.
1119 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1121 In this case we want the thread to stop.
1122 FIXME: This case is currently not properly handled.
1123 We should wait for the SIGSTOP but don't. Things work apparently
1124 because enough time passes between when we ptrace (ATTACH) and when
1125 gdb makes the next ptrace call on the thread.
1127 On the other hand, if we are currently trying to stop all threads, we
1128 should treat the new thread as if we had sent it a SIGSTOP. This works
1129 because we are guaranteed that the add_lwp call above added us to the
1130 end of the list, and so the new thread has not yet reached
1131 wait_for_sigstop (but will). */
1132 new_lwp
->stop_expected
= 1;
1137 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1138 already attached. Returns true if a new LWP is found, false
1142 attach_proc_task_lwp_callback (ptid_t ptid
)
1144 /* Is this a new thread? */
1145 if (find_thread_ptid (ptid
) == NULL
)
1147 int lwpid
= ptid
.lwp ();
1151 debug_printf ("Found new lwp %d\n", lwpid
);
1153 err
= linux_attach_lwp (ptid
);
1155 /* Be quiet if we simply raced with the thread exiting. EPERM
1156 is returned if the thread's task still exists, and is marked
1157 as exited or zombie, as well as other conditions, so in that
1158 case, confirm the status in /proc/PID/status. */
1160 || (err
== EPERM
&& linux_proc_pid_is_gone (lwpid
)))
1164 debug_printf ("Cannot attach to lwp %d: "
1165 "thread is gone (%d: %s)\n",
1166 lwpid
, err
, strerror (err
));
1172 = linux_ptrace_attach_fail_reason_string (ptid
, err
);
1174 warning (_("Cannot attach to lwp %d: %s"), lwpid
, reason
.c_str ());
1182 static void async_file_mark (void);
1184 /* Attach to PID. If PID is the tgid, attach to it and all
1188 linux_attach (unsigned long pid
)
1190 struct process_info
*proc
;
1191 struct thread_info
*initial_thread
;
1192 ptid_t ptid
= ptid_t (pid
, pid
, 0);
1195 proc
= linux_add_process (pid
, 1);
1197 /* Attach to PID. We will check for other threads
1199 err
= linux_attach_lwp (ptid
);
1202 remove_process (proc
);
1204 std::string reason
= linux_ptrace_attach_fail_reason_string (ptid
, err
);
1205 error ("Cannot attach to process %ld: %s", pid
, reason
.c_str ());
1208 /* Don't ignore the initial SIGSTOP if we just attached to this
1209 process. It will be collected by wait shortly. */
1210 initial_thread
= find_thread_ptid (ptid_t (pid
, pid
, 0));
1211 initial_thread
->last_resume_kind
= resume_stop
;
1213 /* We must attach to every LWP. If /proc is mounted, use that to
1214 find them now. On the one hand, the inferior may be using raw
1215 clone instead of using pthreads. On the other hand, even if it
1216 is using pthreads, GDB may not be connected yet (thread_db needs
1217 to do symbol lookups, through qSymbol). Also, thread_db walks
1218 structures in the inferior's address space to find the list of
1219 threads/LWPs, and those structures may well be corrupted. Note
1220 that once thread_db is loaded, we'll still use it to list threads
1221 and associate pthread info with each LWP. */
1222 linux_proc_attach_tgid_threads (pid
, attach_proc_task_lwp_callback
);
1224 /* GDB will shortly read the xml target description for this
1225 process, to figure out the process' architecture. But the target
1226 description is only filled in when the first process/thread in
1227 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1228 that now, otherwise, if GDB is fast enough, it could read the
1229 target description _before_ that initial stop. */
1232 struct lwp_info
*lwp
;
1234 ptid_t pid_ptid
= ptid_t (pid
);
1236 lwpid
= linux_wait_for_event_filtered (pid_ptid
, pid_ptid
,
1238 gdb_assert (lwpid
> 0);
1240 lwp
= find_lwp_pid (ptid_t (lwpid
));
1242 if (!WIFSTOPPED (wstat
) || WSTOPSIG (wstat
) != SIGSTOP
)
1244 lwp
->status_pending_p
= 1;
1245 lwp
->status_pending
= wstat
;
1248 initial_thread
->last_resume_kind
= resume_continue
;
1252 gdb_assert (proc
->tdesc
!= NULL
);
1259 last_thread_of_process_p (int pid
)
1261 bool seen_one
= false;
1263 thread_info
*thread
= find_thread (pid
, [&] (thread_info
*thr_arg
)
1267 /* This is the first thread of this process we see. */
1273 /* This is the second thread of this process we see. */
1278 return thread
== NULL
;
1284 linux_kill_one_lwp (struct lwp_info
*lwp
)
1286 struct thread_info
*thr
= get_lwp_thread (lwp
);
1287 int pid
= lwpid_of (thr
);
1289 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1290 there is no signal context, and ptrace(PTRACE_KILL) (or
1291 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1292 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1293 alternative is to kill with SIGKILL. We only need one SIGKILL
1294 per process, not one for each thread. But since we still support
1295 support debugging programs using raw clone without CLONE_THREAD,
1296 we send one for each thread. For years, we used PTRACE_KILL
1297 only, so we're being a bit paranoid about some old kernels where
1298 PTRACE_KILL might work better (dubious if there are any such, but
1299 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1300 second, and so we're fine everywhere. */
1303 kill_lwp (pid
, SIGKILL
);
1306 int save_errno
= errno
;
1308 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1309 target_pid_to_str (ptid_of (thr
)),
1310 save_errno
? strerror (save_errno
) : "OK");
1314 ptrace (PTRACE_KILL
, pid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
1317 int save_errno
= errno
;
1319 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1320 target_pid_to_str (ptid_of (thr
)),
1321 save_errno
? strerror (save_errno
) : "OK");
1325 /* Kill LWP and wait for it to die. */
1328 kill_wait_lwp (struct lwp_info
*lwp
)
1330 struct thread_info
*thr
= get_lwp_thread (lwp
);
1331 int pid
= ptid_of (thr
).pid ();
1332 int lwpid
= ptid_of (thr
).lwp ();
1337 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid
, pid
);
1341 linux_kill_one_lwp (lwp
);
1343 /* Make sure it died. Notes:
1345 - The loop is most likely unnecessary.
1347 - We don't use linux_wait_for_event as that could delete lwps
1348 while we're iterating over them. We're not interested in
1349 any pending status at this point, only in making sure all
1350 wait status on the kernel side are collected until the
1353 - We don't use __WALL here as the __WALL emulation relies on
1354 SIGCHLD, and killing a stopped process doesn't generate
1355 one, nor an exit status.
1357 res
= my_waitpid (lwpid
, &wstat
, 0);
1358 if (res
== -1 && errno
== ECHILD
)
1359 res
= my_waitpid (lwpid
, &wstat
, __WCLONE
);
1360 } while (res
> 0 && WIFSTOPPED (wstat
));
1362 /* Even if it was stopped, the child may have already disappeared.
1363 E.g., if it was killed by SIGKILL. */
1364 if (res
< 0 && errno
!= ECHILD
)
1365 perror_with_name ("kill_wait_lwp");
1368 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1369 except the leader. */
1372 kill_one_lwp_callback (thread_info
*thread
, int pid
)
1374 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1376 /* We avoid killing the first thread here, because of a Linux kernel (at
1377 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1378 the children get a chance to be reaped, it will remain a zombie
1381 if (lwpid_of (thread
) == pid
)
1384 debug_printf ("lkop: is last of process %s\n",
1385 target_pid_to_str (thread
->id
));
1389 kill_wait_lwp (lwp
);
1393 linux_kill (process_info
*process
)
1395 int pid
= process
->pid
;
1397 /* If we're killing a running inferior, make sure it is stopped
1398 first, as PTRACE_KILL will not work otherwise. */
1399 stop_all_lwps (0, NULL
);
1401 for_each_thread (pid
, [&] (thread_info
*thread
)
1403 kill_one_lwp_callback (thread
, pid
);
1406 /* See the comment in linux_kill_one_lwp. We did not kill the first
1407 thread in the list, so do so now. */
1408 lwp_info
*lwp
= find_lwp_pid (ptid_t (pid
));
1413 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1417 kill_wait_lwp (lwp
);
1419 the_target
->mourn (process
);
1421 /* Since we presently can only stop all lwps of all processes, we
1422 need to unstop lwps of other processes. */
1423 unstop_all_lwps (0, NULL
);
1427 /* Get pending signal of THREAD, for detaching purposes. This is the
1428 signal the thread last stopped for, which we need to deliver to the
1429 thread when detaching, otherwise, it'd be suppressed/lost. */
1432 get_detach_signal (struct thread_info
*thread
)
1434 client_state
&cs
= get_client_state ();
1435 enum gdb_signal signo
= GDB_SIGNAL_0
;
1437 struct lwp_info
*lp
= get_thread_lwp (thread
);
1439 if (lp
->status_pending_p
)
1440 status
= lp
->status_pending
;
1443 /* If the thread had been suspended by gdbserver, and it stopped
1444 cleanly, then it'll have stopped with SIGSTOP. But we don't
1445 want to deliver that SIGSTOP. */
1446 if (thread
->last_status
.kind
!= TARGET_WAITKIND_STOPPED
1447 || thread
->last_status
.value
.sig
== GDB_SIGNAL_0
)
1450 /* Otherwise, we may need to deliver the signal we
1452 status
= lp
->last_status
;
1455 if (!WIFSTOPPED (status
))
1458 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1459 target_pid_to_str (ptid_of (thread
)));
1463 /* Extended wait statuses aren't real SIGTRAPs. */
1464 if (WSTOPSIG (status
) == SIGTRAP
&& linux_is_extended_waitstatus (status
))
1467 debug_printf ("GPS: lwp %s had stopped with extended "
1468 "status: no pending signal\n",
1469 target_pid_to_str (ptid_of (thread
)));
1473 signo
= gdb_signal_from_host (WSTOPSIG (status
));
1475 if (cs
.program_signals_p
&& !cs
.program_signals
[signo
])
1478 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1479 target_pid_to_str (ptid_of (thread
)),
1480 gdb_signal_to_string (signo
));
1483 else if (!cs
.program_signals_p
1484 /* If we have no way to know which signals GDB does not
1485 want to have passed to the program, assume
1486 SIGTRAP/SIGINT, which is GDB's default. */
1487 && (signo
== GDB_SIGNAL_TRAP
|| signo
== GDB_SIGNAL_INT
))
1490 debug_printf ("GPS: lwp %s had signal %s, "
1491 "but we don't know if we should pass it. "
1492 "Default to not.\n",
1493 target_pid_to_str (ptid_of (thread
)),
1494 gdb_signal_to_string (signo
));
1500 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1501 target_pid_to_str (ptid_of (thread
)),
1502 gdb_signal_to_string (signo
));
1504 return WSTOPSIG (status
);
1508 /* Detach from LWP. */
1511 linux_detach_one_lwp (struct lwp_info
*lwp
)
1513 struct thread_info
*thread
= get_lwp_thread (lwp
);
1517 /* If there is a pending SIGSTOP, get rid of it. */
1518 if (lwp
->stop_expected
)
1521 debug_printf ("Sending SIGCONT to %s\n",
1522 target_pid_to_str (ptid_of (thread
)));
1524 kill_lwp (lwpid_of (thread
), SIGCONT
);
1525 lwp
->stop_expected
= 0;
1528 /* Pass on any pending signal for this thread. */
1529 sig
= get_detach_signal (thread
);
1531 /* Preparing to resume may try to write registers, and fail if the
1532 lwp is zombie. If that happens, ignore the error. We'll handle
1533 it below, when detach fails with ESRCH. */
1536 /* Flush any pending changes to the process's registers. */
1537 regcache_invalidate_thread (thread
);
1539 /* Finally, let it resume. */
1540 if (the_low_target
.prepare_to_resume
!= NULL
)
1541 the_low_target
.prepare_to_resume (lwp
);
1543 catch (const gdb_exception_error
&ex
)
1545 if (!check_ptrace_stopped_lwp_gone (lwp
))
1549 lwpid
= lwpid_of (thread
);
1550 if (ptrace (PTRACE_DETACH
, lwpid
, (PTRACE_TYPE_ARG3
) 0,
1551 (PTRACE_TYPE_ARG4
) (long) sig
) < 0)
1553 int save_errno
= errno
;
1555 /* We know the thread exists, so ESRCH must mean the lwp is
1556 zombie. This can happen if one of the already-detached
1557 threads exits the whole thread group. In that case we're
1558 still attached, and must reap the lwp. */
1559 if (save_errno
== ESRCH
)
1563 ret
= my_waitpid (lwpid
, &status
, __WALL
);
1566 warning (_("Couldn't reap LWP %d while detaching: %s"),
1567 lwpid
, strerror (errno
));
1569 else if (!WIFEXITED (status
) && !WIFSIGNALED (status
))
1571 warning (_("Reaping LWP %d while detaching "
1572 "returned unexpected status 0x%x"),
1578 error (_("Can't detach %s: %s"),
1579 target_pid_to_str (ptid_of (thread
)),
1580 strerror (save_errno
));
1583 else if (debug_threads
)
1585 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1586 target_pid_to_str (ptid_of (thread
)),
1593 /* Callback for for_each_thread. Detaches from non-leader threads of a
1597 linux_detach_lwp_callback (thread_info
*thread
)
1599 /* We don't actually detach from the thread group leader just yet.
1600 If the thread group exits, we must reap the zombie clone lwps
1601 before we're able to reap the leader. */
1602 if (thread
->id
.pid () == thread
->id
.lwp ())
1605 lwp_info
*lwp
= get_thread_lwp (thread
);
1606 linux_detach_one_lwp (lwp
);
1610 linux_detach (process_info
*process
)
1612 struct lwp_info
*main_lwp
;
1614 /* As there's a step over already in progress, let it finish first,
1615 otherwise nesting a stabilize_threads operation on top gets real
1617 complete_ongoing_step_over ();
1619 /* Stop all threads before detaching. First, ptrace requires that
1620 the thread is stopped to sucessfully detach. Second, thread_db
1621 may need to uninstall thread event breakpoints from memory, which
1622 only works with a stopped process anyway. */
1623 stop_all_lwps (0, NULL
);
1625 #ifdef USE_THREAD_DB
1626 thread_db_detach (process
);
1629 /* Stabilize threads (move out of jump pads). */
1630 stabilize_threads ();
1632 /* Detach from the clone lwps first. If the thread group exits just
1633 while we're detaching, we must reap the clone lwps before we're
1634 able to reap the leader. */
1635 for_each_thread (process
->pid
, linux_detach_lwp_callback
);
1637 main_lwp
= find_lwp_pid (ptid_t (process
->pid
));
1638 linux_detach_one_lwp (main_lwp
);
1640 the_target
->mourn (process
);
1642 /* Since we presently can only stop all lwps of all processes, we
1643 need to unstop lwps of other processes. */
1644 unstop_all_lwps (0, NULL
);
1648 /* Remove all LWPs that belong to process PROC from the lwp list. */
1651 linux_mourn (struct process_info
*process
)
1653 struct process_info_private
*priv
;
1655 #ifdef USE_THREAD_DB
1656 thread_db_mourn (process
);
1659 for_each_thread (process
->pid
, [] (thread_info
*thread
)
1661 delete_lwp (get_thread_lwp (thread
));
1664 /* Freeing all private data. */
1665 priv
= process
->priv
;
1666 if (the_low_target
.delete_process
!= NULL
)
1667 the_low_target
.delete_process (priv
->arch_private
);
1669 gdb_assert (priv
->arch_private
== NULL
);
1671 process
->priv
= NULL
;
1673 remove_process (process
);
1677 linux_join (int pid
)
1682 ret
= my_waitpid (pid
, &status
, 0);
1683 if (WIFEXITED (status
) || WIFSIGNALED (status
))
1685 } while (ret
!= -1 || errno
!= ECHILD
);
1688 /* Return nonzero if the given thread is still alive. */
1690 linux_thread_alive (ptid_t ptid
)
1692 struct lwp_info
*lwp
= find_lwp_pid (ptid
);
1694 /* We assume we always know if a thread exits. If a whole process
1695 exited but we still haven't been able to report it to GDB, we'll
1696 hold on to the last lwp of the dead process. */
1698 return !lwp_is_marked_dead (lwp
);
1703 /* Return 1 if this lwp still has an interesting status pending. If
1704 not (e.g., it had stopped for a breakpoint that is gone), return
1708 thread_still_has_status_pending_p (struct thread_info
*thread
)
1710 struct lwp_info
*lp
= get_thread_lwp (thread
);
1712 if (!lp
->status_pending_p
)
1715 if (thread
->last_resume_kind
!= resume_stop
1716 && (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
1717 || lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
))
1719 struct thread_info
*saved_thread
;
1723 gdb_assert (lp
->last_status
!= 0);
1727 saved_thread
= current_thread
;
1728 current_thread
= thread
;
1730 if (pc
!= lp
->stop_pc
)
1733 debug_printf ("PC of %ld changed\n",
1738 #if !USE_SIGTRAP_SIGINFO
1739 else if (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
1740 && !(*the_low_target
.breakpoint_at
) (pc
))
1743 debug_printf ("previous SW breakpoint of %ld gone\n",
1747 else if (lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
1748 && !hardware_breakpoint_inserted_here (pc
))
1751 debug_printf ("previous HW breakpoint of %ld gone\n",
1757 current_thread
= saved_thread
;
1762 debug_printf ("discarding pending breakpoint status\n");
1763 lp
->status_pending_p
= 0;
1771 /* Returns true if LWP is resumed from the client's perspective. */
1774 lwp_resumed (struct lwp_info
*lwp
)
1776 struct thread_info
*thread
= get_lwp_thread (lwp
);
1778 if (thread
->last_resume_kind
!= resume_stop
)
1781 /* Did gdb send us a `vCont;t', but we haven't reported the
1782 corresponding stop to gdb yet? If so, the thread is still
1783 resumed/running from gdb's perspective. */
1784 if (thread
->last_resume_kind
== resume_stop
1785 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
)
1791 /* Return true if this lwp has an interesting status pending. */
1793 status_pending_p_callback (thread_info
*thread
, ptid_t ptid
)
1795 struct lwp_info
*lp
= get_thread_lwp (thread
);
1797 /* Check if we're only interested in events from a specific process
1798 or a specific LWP. */
1799 if (!thread
->id
.matches (ptid
))
1802 if (!lwp_resumed (lp
))
1805 if (lp
->status_pending_p
1806 && !thread_still_has_status_pending_p (thread
))
1808 linux_resume_one_lwp (lp
, lp
->stepping
, GDB_SIGNAL_0
, NULL
);
1812 return lp
->status_pending_p
;
1816 find_lwp_pid (ptid_t ptid
)
1818 thread_info
*thread
= find_thread ([&] (thread_info
*thr_arg
)
1820 int lwp
= ptid
.lwp () != 0 ? ptid
.lwp () : ptid
.pid ();
1821 return thr_arg
->id
.lwp () == lwp
;
1827 return get_thread_lwp (thread
);
1830 /* Return the number of known LWPs in the tgid given by PID. */
1837 for_each_thread (pid
, [&] (thread_info
*thread
)
1845 /* See nat/linux-nat.h. */
1848 iterate_over_lwps (ptid_t filter
,
1849 gdb::function_view
<iterate_over_lwps_ftype
> callback
)
1851 thread_info
*thread
= find_thread (filter
, [&] (thread_info
*thr_arg
)
1853 lwp_info
*lwp
= get_thread_lwp (thr_arg
);
1855 return callback (lwp
);
1861 return get_thread_lwp (thread
);
1864 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1865 their exits until all other threads in the group have exited. */
1868 check_zombie_leaders (void)
1870 for_each_process ([] (process_info
*proc
) {
1871 pid_t leader_pid
= pid_of (proc
);
1872 struct lwp_info
*leader_lp
;
1874 leader_lp
= find_lwp_pid (ptid_t (leader_pid
));
1877 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1878 "num_lwps=%d, zombie=%d\n",
1879 leader_pid
, leader_lp
!= NULL
, num_lwps (leader_pid
),
1880 linux_proc_pid_is_zombie (leader_pid
));
1882 if (leader_lp
!= NULL
&& !leader_lp
->stopped
1883 /* Check if there are other threads in the group, as we may
1884 have raced with the inferior simply exiting. */
1885 && !last_thread_of_process_p (leader_pid
)
1886 && linux_proc_pid_is_zombie (leader_pid
))
1888 /* A leader zombie can mean one of two things:
1890 - It exited, and there's an exit status pending
1891 available, or only the leader exited (not the whole
1892 program). In the latter case, we can't waitpid the
1893 leader's exit status until all other threads are gone.
1895 - There are 3 or more threads in the group, and a thread
1896 other than the leader exec'd. On an exec, the Linux
1897 kernel destroys all other threads (except the execing
1898 one) in the thread group, and resets the execing thread's
1899 tid to the tgid. No exit notification is sent for the
1900 execing thread -- from the ptracer's perspective, it
1901 appears as though the execing thread just vanishes.
1902 Until we reap all other threads except the leader and the
1903 execing thread, the leader will be zombie, and the
1904 execing thread will be in `D (disc sleep)'. As soon as
1905 all other threads are reaped, the execing thread changes
1906 it's tid to the tgid, and the previous (zombie) leader
1907 vanishes, giving place to the "new" leader. We could try
1908 distinguishing the exit and exec cases, by waiting once
1909 more, and seeing if something comes out, but it doesn't
1910 sound useful. The previous leader _does_ go away, and
1911 we'll re-add the new one once we see the exec event
1912 (which is just the same as what would happen if the
1913 previous leader did exit voluntarily before some other
1917 debug_printf ("CZL: Thread group leader %d zombie "
1918 "(it exited, or another thread execd).\n",
1921 delete_lwp (leader_lp
);
1926 /* Callback for `find_thread'. Returns the first LWP that is not
1930 not_stopped_callback (thread_info
*thread
, ptid_t filter
)
1932 if (!thread
->id
.matches (filter
))
1935 lwp_info
*lwp
= get_thread_lwp (thread
);
1937 return !lwp
->stopped
;
1940 /* Increment LWP's suspend count. */
1943 lwp_suspended_inc (struct lwp_info
*lwp
)
1947 if (debug_threads
&& lwp
->suspended
> 4)
1949 struct thread_info
*thread
= get_lwp_thread (lwp
);
1951 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1952 " suspended=%d\n", lwpid_of (thread
), lwp
->suspended
);
1956 /* Decrement LWP's suspend count. */
1959 lwp_suspended_decr (struct lwp_info
*lwp
)
1963 if (lwp
->suspended
< 0)
1965 struct thread_info
*thread
= get_lwp_thread (lwp
);
1967 internal_error (__FILE__
, __LINE__
,
1968 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread
),
1973 /* This function should only be called if the LWP got a SIGTRAP.
1975 Handle any tracepoint steps or hits. Return true if a tracepoint
1976 event was handled, 0 otherwise. */
1979 handle_tracepoints (struct lwp_info
*lwp
)
1981 struct thread_info
*tinfo
= get_lwp_thread (lwp
);
1982 int tpoint_related_event
= 0;
1984 gdb_assert (lwp
->suspended
== 0);
1986 /* If this tracepoint hit causes a tracing stop, we'll immediately
1987 uninsert tracepoints. To do this, we temporarily pause all
1988 threads, unpatch away, and then unpause threads. We need to make
1989 sure the unpausing doesn't resume LWP too. */
1990 lwp_suspended_inc (lwp
);
1992 /* And we need to be sure that any all-threads-stopping doesn't try
1993 to move threads out of the jump pads, as it could deadlock the
1994 inferior (LWP could be in the jump pad, maybe even holding the
1997 /* Do any necessary step collect actions. */
1998 tpoint_related_event
|= tracepoint_finished_step (tinfo
, lwp
->stop_pc
);
2000 tpoint_related_event
|= handle_tracepoint_bkpts (tinfo
, lwp
->stop_pc
);
2002 /* See if we just hit a tracepoint and do its main collect
2004 tpoint_related_event
|= tracepoint_was_hit (tinfo
, lwp
->stop_pc
);
2006 lwp_suspended_decr (lwp
);
2008 gdb_assert (lwp
->suspended
== 0);
2009 gdb_assert (!stabilizing_threads
2010 || (lwp
->collecting_fast_tracepoint
2011 != fast_tpoint_collect_result::not_collecting
));
2013 if (tpoint_related_event
)
2016 debug_printf ("got a tracepoint event\n");
2023 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2024 collection status. */
2026 static fast_tpoint_collect_result
2027 linux_fast_tracepoint_collecting (struct lwp_info
*lwp
,
2028 struct fast_tpoint_collect_status
*status
)
2030 CORE_ADDR thread_area
;
2031 struct thread_info
*thread
= get_lwp_thread (lwp
);
2033 if (the_low_target
.get_thread_area
== NULL
)
2034 return fast_tpoint_collect_result::not_collecting
;
2036 /* Get the thread area address. This is used to recognize which
2037 thread is which when tracing with the in-process agent library.
2038 We don't read anything from the address, and treat it as opaque;
2039 it's the address itself that we assume is unique per-thread. */
2040 if ((*the_low_target
.get_thread_area
) (lwpid_of (thread
), &thread_area
) == -1)
2041 return fast_tpoint_collect_result::not_collecting
;
2043 return fast_tracepoint_collecting (thread_area
, lwp
->stop_pc
, status
);
2046 /* The reason we resume in the caller, is because we want to be able
2047 to pass lwp->status_pending as WSTAT, and we need to clear
2048 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2049 refuses to resume. */
2052 maybe_move_out_of_jump_pad (struct lwp_info
*lwp
, int *wstat
)
2054 struct thread_info
*saved_thread
;
2056 saved_thread
= current_thread
;
2057 current_thread
= get_lwp_thread (lwp
);
2060 || (WIFSTOPPED (*wstat
) && WSTOPSIG (*wstat
) != SIGTRAP
))
2061 && supports_fast_tracepoints ()
2062 && agent_loaded_p ())
2064 struct fast_tpoint_collect_status status
;
2067 debug_printf ("Checking whether LWP %ld needs to move out of the "
2069 lwpid_of (current_thread
));
2071 fast_tpoint_collect_result r
2072 = linux_fast_tracepoint_collecting (lwp
, &status
);
2075 || (WSTOPSIG (*wstat
) != SIGILL
2076 && WSTOPSIG (*wstat
) != SIGFPE
2077 && WSTOPSIG (*wstat
) != SIGSEGV
2078 && WSTOPSIG (*wstat
) != SIGBUS
))
2080 lwp
->collecting_fast_tracepoint
= r
;
2082 if (r
!= fast_tpoint_collect_result::not_collecting
)
2084 if (r
== fast_tpoint_collect_result::before_insn
2085 && lwp
->exit_jump_pad_bkpt
== NULL
)
2087 /* Haven't executed the original instruction yet.
2088 Set breakpoint there, and wait till it's hit,
2089 then single-step until exiting the jump pad. */
2090 lwp
->exit_jump_pad_bkpt
2091 = set_breakpoint_at (status
.adjusted_insn_addr
, NULL
);
2095 debug_printf ("Checking whether LWP %ld needs to move out of "
2096 "the jump pad...it does\n",
2097 lwpid_of (current_thread
));
2098 current_thread
= saved_thread
;
2105 /* If we get a synchronous signal while collecting, *and*
2106 while executing the (relocated) original instruction,
2107 reset the PC to point at the tpoint address, before
2108 reporting to GDB. Otherwise, it's an IPA lib bug: just
2109 report the signal to GDB, and pray for the best. */
2111 lwp
->collecting_fast_tracepoint
2112 = fast_tpoint_collect_result::not_collecting
;
2114 if (r
!= fast_tpoint_collect_result::not_collecting
2115 && (status
.adjusted_insn_addr
<= lwp
->stop_pc
2116 && lwp
->stop_pc
< status
.adjusted_insn_addr_end
))
2119 struct regcache
*regcache
;
2121 /* The si_addr on a few signals references the address
2122 of the faulting instruction. Adjust that as
2124 if ((WSTOPSIG (*wstat
) == SIGILL
2125 || WSTOPSIG (*wstat
) == SIGFPE
2126 || WSTOPSIG (*wstat
) == SIGBUS
2127 || WSTOPSIG (*wstat
) == SIGSEGV
)
2128 && ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
2129 (PTRACE_TYPE_ARG3
) 0, &info
) == 0
2130 /* Final check just to make sure we don't clobber
2131 the siginfo of non-kernel-sent signals. */
2132 && (uintptr_t) info
.si_addr
== lwp
->stop_pc
)
2134 info
.si_addr
= (void *) (uintptr_t) status
.tpoint_addr
;
2135 ptrace (PTRACE_SETSIGINFO
, lwpid_of (current_thread
),
2136 (PTRACE_TYPE_ARG3
) 0, &info
);
2139 regcache
= get_thread_regcache (current_thread
, 1);
2140 (*the_low_target
.set_pc
) (regcache
, status
.tpoint_addr
);
2141 lwp
->stop_pc
= status
.tpoint_addr
;
2143 /* Cancel any fast tracepoint lock this thread was
2145 force_unlock_trace_buffer ();
2148 if (lwp
->exit_jump_pad_bkpt
!= NULL
)
2151 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2152 "stopping all threads momentarily.\n");
2154 stop_all_lwps (1, lwp
);
2156 delete_breakpoint (lwp
->exit_jump_pad_bkpt
);
2157 lwp
->exit_jump_pad_bkpt
= NULL
;
2159 unstop_all_lwps (1, lwp
);
2161 gdb_assert (lwp
->suspended
>= 0);
2167 debug_printf ("Checking whether LWP %ld needs to move out of the "
2169 lwpid_of (current_thread
));
2171 current_thread
= saved_thread
;
2175 /* Enqueue one signal in the "signals to report later when out of the
2179 enqueue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
2181 struct pending_signals
*p_sig
;
2182 struct thread_info
*thread
= get_lwp_thread (lwp
);
2185 debug_printf ("Deferring signal %d for LWP %ld.\n",
2186 WSTOPSIG (*wstat
), lwpid_of (thread
));
2190 struct pending_signals
*sig
;
2192 for (sig
= lwp
->pending_signals_to_report
;
2195 debug_printf (" Already queued %d\n",
2198 debug_printf (" (no more currently queued signals)\n");
2201 /* Don't enqueue non-RT signals if they are already in the deferred
2202 queue. (SIGSTOP being the easiest signal to see ending up here
2204 if (WSTOPSIG (*wstat
) < __SIGRTMIN
)
2206 struct pending_signals
*sig
;
2208 for (sig
= lwp
->pending_signals_to_report
;
2212 if (sig
->signal
== WSTOPSIG (*wstat
))
2215 debug_printf ("Not requeuing already queued non-RT signal %d"
2224 p_sig
= XCNEW (struct pending_signals
);
2225 p_sig
->prev
= lwp
->pending_signals_to_report
;
2226 p_sig
->signal
= WSTOPSIG (*wstat
);
2228 ptrace (PTRACE_GETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
2231 lwp
->pending_signals_to_report
= p_sig
;
2234 /* Dequeue one signal from the "signals to report later when out of
2235 the jump pad" list. */
2238 dequeue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
2240 struct thread_info
*thread
= get_lwp_thread (lwp
);
2242 if (lwp
->pending_signals_to_report
!= NULL
)
2244 struct pending_signals
**p_sig
;
2246 p_sig
= &lwp
->pending_signals_to_report
;
2247 while ((*p_sig
)->prev
!= NULL
)
2248 p_sig
= &(*p_sig
)->prev
;
2250 *wstat
= W_STOPCODE ((*p_sig
)->signal
);
2251 if ((*p_sig
)->info
.si_signo
!= 0)
2252 ptrace (PTRACE_SETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
2258 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2259 WSTOPSIG (*wstat
), lwpid_of (thread
));
2263 struct pending_signals
*sig
;
2265 for (sig
= lwp
->pending_signals_to_report
;
2268 debug_printf (" Still queued %d\n",
2271 debug_printf (" (no more queued signals)\n");
2280 /* Fetch the possibly triggered data watchpoint info and store it in
2283 On some archs, like x86, that use debug registers to set
2284 watchpoints, it's possible that the way to know which watched
2285 address trapped, is to check the register that is used to select
2286 which address to watch. Problem is, between setting the watchpoint
2287 and reading back which data address trapped, the user may change
2288 the set of watchpoints, and, as a consequence, GDB changes the
2289 debug registers in the inferior. To avoid reading back a stale
2290 stopped-data-address when that happens, we cache in LP the fact
2291 that a watchpoint trapped, and the corresponding data address, as
2292 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2293 registers meanwhile, we have the cached data we can rely on. */
2296 check_stopped_by_watchpoint (struct lwp_info
*child
)
2298 if (the_low_target
.stopped_by_watchpoint
!= NULL
)
2300 struct thread_info
*saved_thread
;
2302 saved_thread
= current_thread
;
2303 current_thread
= get_lwp_thread (child
);
2305 if (the_low_target
.stopped_by_watchpoint ())
2307 child
->stop_reason
= TARGET_STOPPED_BY_WATCHPOINT
;
2309 if (the_low_target
.stopped_data_address
!= NULL
)
2310 child
->stopped_data_address
2311 = the_low_target
.stopped_data_address ();
2313 child
->stopped_data_address
= 0;
2316 current_thread
= saved_thread
;
2319 return child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
2322 /* Return the ptrace options that we want to try to enable. */
2325 linux_low_ptrace_options (int attached
)
2327 client_state
&cs
= get_client_state ();
2331 options
|= PTRACE_O_EXITKILL
;
2333 if (cs
.report_fork_events
)
2334 options
|= PTRACE_O_TRACEFORK
;
2336 if (cs
.report_vfork_events
)
2337 options
|= (PTRACE_O_TRACEVFORK
| PTRACE_O_TRACEVFORKDONE
);
2339 if (cs
.report_exec_events
)
2340 options
|= PTRACE_O_TRACEEXEC
;
2342 options
|= PTRACE_O_TRACESYSGOOD
;
2347 /* Do low-level handling of the event, and check if we should go on
2348 and pass it to caller code. Return the affected lwp if we are, or
2351 static struct lwp_info
*
2352 linux_low_filter_event (int lwpid
, int wstat
)
2354 client_state
&cs
= get_client_state ();
2355 struct lwp_info
*child
;
2356 struct thread_info
*thread
;
2357 int have_stop_pc
= 0;
2359 child
= find_lwp_pid (ptid_t (lwpid
));
2361 /* Check for stop events reported by a process we didn't already
2362 know about - anything not already in our LWP list.
2364 If we're expecting to receive stopped processes after
2365 fork, vfork, and clone events, then we'll just add the
2366 new one to our list and go back to waiting for the event
2367 to be reported - the stopped process might be returned
2368 from waitpid before or after the event is.
2370 But note the case of a non-leader thread exec'ing after the
2371 leader having exited, and gone from our lists (because
2372 check_zombie_leaders deleted it). The non-leader thread
2373 changes its tid to the tgid. */
2375 if (WIFSTOPPED (wstat
) && child
== NULL
&& WSTOPSIG (wstat
) == SIGTRAP
2376 && linux_ptrace_get_extended_event (wstat
) == PTRACE_EVENT_EXEC
)
2380 /* A multi-thread exec after we had seen the leader exiting. */
2383 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2384 "after exec.\n", lwpid
);
2387 child_ptid
= ptid_t (lwpid
, lwpid
, 0);
2388 child
= add_lwp (child_ptid
);
2390 current_thread
= child
->thread
;
2393 /* If we didn't find a process, one of two things presumably happened:
2394 - A process we started and then detached from has exited. Ignore it.
2395 - A process we are controlling has forked and the new child's stop
2396 was reported to us by the kernel. Save its PID. */
2397 if (child
== NULL
&& WIFSTOPPED (wstat
))
2399 add_to_pid_list (&stopped_pids
, lwpid
, wstat
);
2402 else if (child
== NULL
)
2405 thread
= get_lwp_thread (child
);
2409 child
->last_status
= wstat
;
2411 /* Check if the thread has exited. */
2412 if ((WIFEXITED (wstat
) || WIFSIGNALED (wstat
)))
2415 debug_printf ("LLFE: %d exited.\n", lwpid
);
2417 if (finish_step_over (child
))
2419 /* Unsuspend all other LWPs, and set them back running again. */
2420 unsuspend_all_lwps (child
);
2423 /* If there is at least one more LWP, then the exit signal was
2424 not the end of the debugged application and should be
2425 ignored, unless GDB wants to hear about thread exits. */
2426 if (cs
.report_thread_events
2427 || last_thread_of_process_p (pid_of (thread
)))
2429 /* Since events are serialized to GDB core, and we can't
2430 report this one right now. Leave the status pending for
2431 the next time we're able to report it. */
2432 mark_lwp_dead (child
, wstat
);
2442 gdb_assert (WIFSTOPPED (wstat
));
2444 if (WIFSTOPPED (wstat
))
2446 struct process_info
*proc
;
2448 /* Architecture-specific setup after inferior is running. */
2449 proc
= find_process_pid (pid_of (thread
));
2450 if (proc
->tdesc
== NULL
)
2454 /* This needs to happen after we have attached to the
2455 inferior and it is stopped for the first time, but
2456 before we access any inferior registers. */
2457 linux_arch_setup_thread (thread
);
2461 /* The process is started, but GDBserver will do
2462 architecture-specific setup after the program stops at
2463 the first instruction. */
2464 child
->status_pending_p
= 1;
2465 child
->status_pending
= wstat
;
2471 if (WIFSTOPPED (wstat
) && child
->must_set_ptrace_flags
)
2473 struct process_info
*proc
= find_process_pid (pid_of (thread
));
2474 int options
= linux_low_ptrace_options (proc
->attached
);
2476 linux_enable_event_reporting (lwpid
, options
);
2477 child
->must_set_ptrace_flags
= 0;
2480 /* Always update syscall_state, even if it will be filtered later. */
2481 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SYSCALL_SIGTRAP
)
2483 child
->syscall_state
2484 = (child
->syscall_state
== TARGET_WAITKIND_SYSCALL_ENTRY
2485 ? TARGET_WAITKIND_SYSCALL_RETURN
2486 : TARGET_WAITKIND_SYSCALL_ENTRY
);
2490 /* Almost all other ptrace-stops are known to be outside of system
2491 calls, with further exceptions in handle_extended_wait. */
2492 child
->syscall_state
= TARGET_WAITKIND_IGNORE
;
2495 /* Be careful to not overwrite stop_pc until save_stop_reason is
2497 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGTRAP
2498 && linux_is_extended_waitstatus (wstat
))
2500 child
->stop_pc
= get_pc (child
);
2501 if (handle_extended_wait (&child
, wstat
))
2503 /* The event has been handled, so just return without
2509 if (linux_wstatus_maybe_breakpoint (wstat
))
2511 if (save_stop_reason (child
))
2516 child
->stop_pc
= get_pc (child
);
2518 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGSTOP
2519 && child
->stop_expected
)
2522 debug_printf ("Expected stop.\n");
2523 child
->stop_expected
= 0;
2525 if (thread
->last_resume_kind
== resume_stop
)
2527 /* We want to report the stop to the core. Treat the
2528 SIGSTOP as a normal event. */
2530 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2531 target_pid_to_str (ptid_of (thread
)));
2533 else if (stopping_threads
!= NOT_STOPPING_THREADS
)
2535 /* Stopping threads. We don't want this SIGSTOP to end up
2538 debug_printf ("LLW: SIGSTOP caught for %s "
2539 "while stopping threads.\n",
2540 target_pid_to_str (ptid_of (thread
)));
2545 /* This is a delayed SIGSTOP. Filter out the event. */
2547 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2548 child
->stepping
? "step" : "continue",
2549 target_pid_to_str (ptid_of (thread
)));
2551 linux_resume_one_lwp (child
, child
->stepping
, 0, NULL
);
2556 child
->status_pending_p
= 1;
2557 child
->status_pending
= wstat
;
2561 /* Return true if THREAD is doing hardware single step. */
2564 maybe_hw_step (struct thread_info
*thread
)
2566 if (can_hardware_single_step ())
2570 /* GDBserver must insert single-step breakpoint for software
2572 gdb_assert (has_single_step_breakpoints (thread
));
2577 /* Resume LWPs that are currently stopped without any pending status
2578 to report, but are resumed from the core's perspective. */
2581 resume_stopped_resumed_lwps (thread_info
*thread
)
2583 struct lwp_info
*lp
= get_thread_lwp (thread
);
2587 && !lp
->status_pending_p
2588 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
)
2592 if (thread
->last_resume_kind
== resume_step
)
2593 step
= maybe_hw_step (thread
);
2596 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2597 target_pid_to_str (ptid_of (thread
)),
2598 paddress (lp
->stop_pc
),
2601 linux_resume_one_lwp (lp
, step
, GDB_SIGNAL_0
, NULL
);
2605 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2606 match FILTER_PTID (leaving others pending). The PTIDs can be:
2607 minus_one_ptid, to specify any child; a pid PTID, specifying all
2608 lwps of a thread group; or a PTID representing a single lwp. Store
2609 the stop status through the status pointer WSTAT. OPTIONS is
2610 passed to the waitpid call. Return 0 if no event was found and
2611 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2612 was found. Return the PID of the stopped child otherwise. */
2615 linux_wait_for_event_filtered (ptid_t wait_ptid
, ptid_t filter_ptid
,
2616 int *wstatp
, int options
)
2618 struct thread_info
*event_thread
;
2619 struct lwp_info
*event_child
, *requested_child
;
2620 sigset_t block_mask
, prev_mask
;
2623 /* N.B. event_thread points to the thread_info struct that contains
2624 event_child. Keep them in sync. */
2625 event_thread
= NULL
;
2627 requested_child
= NULL
;
2629 /* Check for a lwp with a pending status. */
2631 if (filter_ptid
== minus_one_ptid
|| filter_ptid
.is_pid ())
2633 event_thread
= find_thread_in_random ([&] (thread_info
*thread
)
2635 return status_pending_p_callback (thread
, filter_ptid
);
2638 if (event_thread
!= NULL
)
2639 event_child
= get_thread_lwp (event_thread
);
2640 if (debug_threads
&& event_thread
)
2641 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread
));
2643 else if (filter_ptid
!= null_ptid
)
2645 requested_child
= find_lwp_pid (filter_ptid
);
2647 if (stopping_threads
== NOT_STOPPING_THREADS
2648 && requested_child
->status_pending_p
2649 && (requested_child
->collecting_fast_tracepoint
2650 != fast_tpoint_collect_result::not_collecting
))
2652 enqueue_one_deferred_signal (requested_child
,
2653 &requested_child
->status_pending
);
2654 requested_child
->status_pending_p
= 0;
2655 requested_child
->status_pending
= 0;
2656 linux_resume_one_lwp (requested_child
, 0, 0, NULL
);
2659 if (requested_child
->suspended
2660 && requested_child
->status_pending_p
)
2662 internal_error (__FILE__
, __LINE__
,
2663 "requesting an event out of a"
2664 " suspended child?");
2667 if (requested_child
->status_pending_p
)
2669 event_child
= requested_child
;
2670 event_thread
= get_lwp_thread (event_child
);
2674 if (event_child
!= NULL
)
2677 debug_printf ("Got an event from pending child %ld (%04x)\n",
2678 lwpid_of (event_thread
), event_child
->status_pending
);
2679 *wstatp
= event_child
->status_pending
;
2680 event_child
->status_pending_p
= 0;
2681 event_child
->status_pending
= 0;
2682 current_thread
= event_thread
;
2683 return lwpid_of (event_thread
);
2686 /* But if we don't find a pending event, we'll have to wait.
2688 We only enter this loop if no process has a pending wait status.
2689 Thus any action taken in response to a wait status inside this
2690 loop is responding as soon as we detect the status, not after any
2693 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2694 all signals while here. */
2695 sigfillset (&block_mask
);
2696 sigprocmask (SIG_BLOCK
, &block_mask
, &prev_mask
);
2698 /* Always pull all events out of the kernel. We'll randomly select
2699 an event LWP out of all that have events, to prevent
2701 while (event_child
== NULL
)
2705 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2708 - If the thread group leader exits while other threads in the
2709 thread group still exist, waitpid(TGID, ...) hangs. That
2710 waitpid won't return an exit status until the other threads
2711 in the group are reaped.
2713 - When a non-leader thread execs, that thread just vanishes
2714 without reporting an exit (so we'd hang if we waited for it
2715 explicitly in that case). The exec event is reported to
2718 ret
= my_waitpid (-1, wstatp
, options
| WNOHANG
);
2721 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2722 ret
, errno
? strerror (errno
) : "ERRNO-OK");
2728 debug_printf ("LLW: waitpid %ld received %s\n",
2729 (long) ret
, status_to_str (*wstatp
));
2732 /* Filter all events. IOW, leave all events pending. We'll
2733 randomly select an event LWP out of all that have events
2735 linux_low_filter_event (ret
, *wstatp
);
2736 /* Retry until nothing comes out of waitpid. A single
2737 SIGCHLD can indicate more than one child stopped. */
2741 /* Now that we've pulled all events out of the kernel, resume
2742 LWPs that don't have an interesting event to report. */
2743 if (stopping_threads
== NOT_STOPPING_THREADS
)
2744 for_each_thread (resume_stopped_resumed_lwps
);
2746 /* ... and find an LWP with a status to report to the core, if
2748 event_thread
= find_thread_in_random ([&] (thread_info
*thread
)
2750 return status_pending_p_callback (thread
, filter_ptid
);
2753 if (event_thread
!= NULL
)
2755 event_child
= get_thread_lwp (event_thread
);
2756 *wstatp
= event_child
->status_pending
;
2757 event_child
->status_pending_p
= 0;
2758 event_child
->status_pending
= 0;
2762 /* Check for zombie thread group leaders. Those can't be reaped
2763 until all other threads in the thread group are. */
2764 check_zombie_leaders ();
2766 auto not_stopped
= [&] (thread_info
*thread
)
2768 return not_stopped_callback (thread
, wait_ptid
);
2771 /* If there are no resumed children left in the set of LWPs we
2772 want to wait for, bail. We can't just block in
2773 waitpid/sigsuspend, because lwps might have been left stopped
2774 in trace-stop state, and we'd be stuck forever waiting for
2775 their status to change (which would only happen if we resumed
2776 them). Even if WNOHANG is set, this return code is preferred
2777 over 0 (below), as it is more detailed. */
2778 if (find_thread (not_stopped
) == NULL
)
2781 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2782 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2786 /* No interesting event to report to the caller. */
2787 if ((options
& WNOHANG
))
2790 debug_printf ("WNOHANG set, no event found\n");
2792 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2796 /* Block until we get an event reported with SIGCHLD. */
2798 debug_printf ("sigsuspend'ing\n");
2800 sigsuspend (&prev_mask
);
2801 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2805 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2807 current_thread
= event_thread
;
2809 return lwpid_of (event_thread
);
2812 /* Wait for an event from child(ren) PTID. PTIDs can be:
2813 minus_one_ptid, to specify any child; a pid PTID, specifying all
2814 lwps of a thread group; or a PTID representing a single lwp. Store
2815 the stop status through the status pointer WSTAT. OPTIONS is
2816 passed to the waitpid call. Return 0 if no event was found and
2817 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2818 was found. Return the PID of the stopped child otherwise. */
2821 linux_wait_for_event (ptid_t ptid
, int *wstatp
, int options
)
2823 return linux_wait_for_event_filtered (ptid
, ptid
, wstatp
, options
);
2826 /* Select one LWP out of those that have events pending. */
2829 select_event_lwp (struct lwp_info
**orig_lp
)
2831 int random_selector
;
2832 struct thread_info
*event_thread
= NULL
;
2834 /* In all-stop, give preference to the LWP that is being
2835 single-stepped. There will be at most one, and it's the LWP that
2836 the core is most interested in. If we didn't do this, then we'd
2837 have to handle pending step SIGTRAPs somehow in case the core
2838 later continues the previously-stepped thread, otherwise we'd
2839 report the pending SIGTRAP, and the core, not having stepped the
2840 thread, wouldn't understand what the trap was for, and therefore
2841 would report it to the user as a random signal. */
2844 event_thread
= find_thread ([] (thread_info
*thread
)
2846 lwp_info
*lp
= get_thread_lwp (thread
);
2848 return (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2849 && thread
->last_resume_kind
== resume_step
2850 && lp
->status_pending_p
);
2853 if (event_thread
!= NULL
)
2856 debug_printf ("SEL: Select single-step %s\n",
2857 target_pid_to_str (ptid_of (event_thread
)));
2860 if (event_thread
== NULL
)
2862 /* No single-stepping LWP. Select one at random, out of those
2863 which have had events. */
2865 /* First see how many events we have. */
2867 for_each_thread ([&] (thread_info
*thread
)
2869 lwp_info
*lp
= get_thread_lwp (thread
);
2871 /* Count only resumed LWPs that have an event pending. */
2872 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2873 && lp
->status_pending_p
)
2876 gdb_assert (num_events
> 0);
2878 /* Now randomly pick a LWP out of those that have had
2880 random_selector
= (int)
2881 ((num_events
* (double) rand ()) / (RAND_MAX
+ 1.0));
2883 if (debug_threads
&& num_events
> 1)
2884 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2885 num_events
, random_selector
);
2887 event_thread
= find_thread ([&] (thread_info
*thread
)
2889 lwp_info
*lp
= get_thread_lwp (thread
);
2891 /* Select only resumed LWPs that have an event pending. */
2892 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2893 && lp
->status_pending_p
)
2894 if (random_selector
-- == 0)
2901 if (event_thread
!= NULL
)
2903 struct lwp_info
*event_lp
= get_thread_lwp (event_thread
);
2905 /* Switch the event LWP. */
2906 *orig_lp
= event_lp
;
2910 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2914 unsuspend_all_lwps (struct lwp_info
*except
)
2916 for_each_thread ([&] (thread_info
*thread
)
2918 lwp_info
*lwp
= get_thread_lwp (thread
);
2921 lwp_suspended_decr (lwp
);
2925 static void move_out_of_jump_pad_callback (thread_info
*thread
);
2926 static bool stuck_in_jump_pad_callback (thread_info
*thread
);
2927 static bool lwp_running (thread_info
*thread
);
2928 static ptid_t
linux_wait_1 (ptid_t ptid
,
2929 struct target_waitstatus
*ourstatus
,
2930 int target_options
);
2932 /* Stabilize threads (move out of jump pads).
2934 If a thread is midway collecting a fast tracepoint, we need to
2935 finish the collection and move it out of the jump pad before
2936 reporting the signal.
2938 This avoids recursion while collecting (when a signal arrives
2939 midway, and the signal handler itself collects), which would trash
2940 the trace buffer. In case the user set a breakpoint in a signal
2941 handler, this avoids the backtrace showing the jump pad, etc..
2942 Most importantly, there are certain things we can't do safely if
2943 threads are stopped in a jump pad (or in its callee's). For
2946 - starting a new trace run. A thread still collecting the
2947 previous run, could trash the trace buffer when resumed. The trace
2948 buffer control structures would have been reset but the thread had
2949 no way to tell. The thread could even midway memcpy'ing to the
2950 buffer, which would mean that when resumed, it would clobber the
2951 trace buffer that had been set for a new run.
2953 - we can't rewrite/reuse the jump pads for new tracepoints
2954 safely. Say you do tstart while a thread is stopped midway while
2955 collecting. When the thread is later resumed, it finishes the
2956 collection, and returns to the jump pad, to execute the original
2957 instruction that was under the tracepoint jump at the time the
2958 older run had been started. If the jump pad had been rewritten
2959 since for something else in the new run, the thread would now
2960 execute the wrong / random instructions. */
2963 linux_stabilize_threads (void)
2965 thread_info
*thread_stuck
= find_thread (stuck_in_jump_pad_callback
);
2967 if (thread_stuck
!= NULL
)
2970 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2971 lwpid_of (thread_stuck
));
2975 thread_info
*saved_thread
= current_thread
;
2977 stabilizing_threads
= 1;
2980 for_each_thread (move_out_of_jump_pad_callback
);
2982 /* Loop until all are stopped out of the jump pads. */
2983 while (find_thread (lwp_running
) != NULL
)
2985 struct target_waitstatus ourstatus
;
2986 struct lwp_info
*lwp
;
2989 /* Note that we go through the full wait even loop. While
2990 moving threads out of jump pad, we need to be able to step
2991 over internal breakpoints and such. */
2992 linux_wait_1 (minus_one_ptid
, &ourstatus
, 0);
2994 if (ourstatus
.kind
== TARGET_WAITKIND_STOPPED
)
2996 lwp
= get_thread_lwp (current_thread
);
2999 lwp_suspended_inc (lwp
);
3001 if (ourstatus
.value
.sig
!= GDB_SIGNAL_0
3002 || current_thread
->last_resume_kind
== resume_stop
)
3004 wstat
= W_STOPCODE (gdb_signal_to_host (ourstatus
.value
.sig
));
3005 enqueue_one_deferred_signal (lwp
, &wstat
);
3010 unsuspend_all_lwps (NULL
);
3012 stabilizing_threads
= 0;
3014 current_thread
= saved_thread
;
3018 thread_stuck
= find_thread (stuck_in_jump_pad_callback
);
3020 if (thread_stuck
!= NULL
)
3021 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3022 lwpid_of (thread_stuck
));
3026 /* Convenience function that is called when the kernel reports an
3027 event that is not passed out to GDB. */
3030 ignore_event (struct target_waitstatus
*ourstatus
)
3032 /* If we got an event, there may still be others, as a single
3033 SIGCHLD can indicate more than one child stopped. This forces
3034 another target_wait call. */
3037 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
3041 /* Convenience function that is called when the kernel reports an exit
3042 event. This decides whether to report the event to GDB as a
3043 process exit event, a thread exit event, or to suppress the
3047 filter_exit_event (struct lwp_info
*event_child
,
3048 struct target_waitstatus
*ourstatus
)
3050 client_state
&cs
= get_client_state ();
3051 struct thread_info
*thread
= get_lwp_thread (event_child
);
3052 ptid_t ptid
= ptid_of (thread
);
3054 if (!last_thread_of_process_p (pid_of (thread
)))
3056 if (cs
.report_thread_events
)
3057 ourstatus
->kind
= TARGET_WAITKIND_THREAD_EXITED
;
3059 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
3061 delete_lwp (event_child
);
3066 /* Returns 1 if GDB is interested in any event_child syscalls. */
3069 gdb_catching_syscalls_p (struct lwp_info
*event_child
)
3071 struct thread_info
*thread
= get_lwp_thread (event_child
);
3072 struct process_info
*proc
= get_thread_process (thread
);
3074 return !proc
->syscalls_to_catch
.empty ();
3077 /* Returns 1 if GDB is interested in the event_child syscall.
3078 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3081 gdb_catch_this_syscall_p (struct lwp_info
*event_child
)
3084 struct thread_info
*thread
= get_lwp_thread (event_child
);
3085 struct process_info
*proc
= get_thread_process (thread
);
3087 if (proc
->syscalls_to_catch
.empty ())
3090 if (proc
->syscalls_to_catch
[0] == ANY_SYSCALL
)
3093 get_syscall_trapinfo (event_child
, &sysno
);
3095 for (int iter
: proc
->syscalls_to_catch
)
3102 /* Wait for process, returns status. */
3105 linux_wait_1 (ptid_t ptid
,
3106 struct target_waitstatus
*ourstatus
, int target_options
)
3108 client_state
&cs
= get_client_state ();
3110 struct lwp_info
*event_child
;
3113 int step_over_finished
;
3114 int bp_explains_trap
;
3115 int maybe_internal_trap
;
3124 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid
));
3127 /* Translate generic target options into linux options. */
3129 if (target_options
& TARGET_WNOHANG
)
3132 bp_explains_trap
= 0;
3135 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
3137 auto status_pending_p_any
= [&] (thread_info
*thread
)
3139 return status_pending_p_callback (thread
, minus_one_ptid
);
3142 auto not_stopped
= [&] (thread_info
*thread
)
3144 return not_stopped_callback (thread
, minus_one_ptid
);
3147 /* Find a resumed LWP, if any. */
3148 if (find_thread (status_pending_p_any
) != NULL
)
3150 else if (find_thread (not_stopped
) != NULL
)
3155 if (step_over_bkpt
== null_ptid
)
3156 pid
= linux_wait_for_event (ptid
, &w
, options
);
3160 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3161 target_pid_to_str (step_over_bkpt
));
3162 pid
= linux_wait_for_event (step_over_bkpt
, &w
, options
& ~WNOHANG
);
3165 if (pid
== 0 || (pid
== -1 && !any_resumed
))
3167 gdb_assert (target_options
& TARGET_WNOHANG
);
3171 debug_printf ("linux_wait_1 ret = null_ptid, "
3172 "TARGET_WAITKIND_IGNORE\n");
3176 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
3183 debug_printf ("linux_wait_1 ret = null_ptid, "
3184 "TARGET_WAITKIND_NO_RESUMED\n");
3188 ourstatus
->kind
= TARGET_WAITKIND_NO_RESUMED
;
3192 event_child
= get_thread_lwp (current_thread
);
3194 /* linux_wait_for_event only returns an exit status for the last
3195 child of a process. Report it. */
3196 if (WIFEXITED (w
) || WIFSIGNALED (w
))
3200 ourstatus
->kind
= TARGET_WAITKIND_EXITED
;
3201 ourstatus
->value
.integer
= WEXITSTATUS (w
);
3205 debug_printf ("linux_wait_1 ret = %s, exited with "
3207 target_pid_to_str (ptid_of (current_thread
)),
3214 ourstatus
->kind
= TARGET_WAITKIND_SIGNALLED
;
3215 ourstatus
->value
.sig
= gdb_signal_from_host (WTERMSIG (w
));
3219 debug_printf ("linux_wait_1 ret = %s, terminated with "
3221 target_pid_to_str (ptid_of (current_thread
)),
3227 if (ourstatus
->kind
== TARGET_WAITKIND_EXITED
)
3228 return filter_exit_event (event_child
, ourstatus
);
3230 return ptid_of (current_thread
);
3233 /* If step-over executes a breakpoint instruction, in the case of a
3234 hardware single step it means a gdb/gdbserver breakpoint had been
3235 planted on top of a permanent breakpoint, in the case of a software
3236 single step it may just mean that gdbserver hit the reinsert breakpoint.
3237 The PC has been adjusted by save_stop_reason to point at
3238 the breakpoint address.
3239 So in the case of the hardware single step advance the PC manually
3240 past the breakpoint and in the case of software single step advance only
3241 if it's not the single_step_breakpoint we are hitting.
3242 This avoids that a program would keep trapping a permanent breakpoint
3244 if (step_over_bkpt
!= null_ptid
3245 && event_child
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
3246 && (event_child
->stepping
3247 || !single_step_breakpoint_inserted_here (event_child
->stop_pc
)))
3249 int increment_pc
= 0;
3250 int breakpoint_kind
= 0;
3251 CORE_ADDR stop_pc
= event_child
->stop_pc
;
3254 the_target
->breakpoint_kind_from_current_state (&stop_pc
);
3255 the_target
->sw_breakpoint_from_kind (breakpoint_kind
, &increment_pc
);
3259 debug_printf ("step-over for %s executed software breakpoint\n",
3260 target_pid_to_str (ptid_of (current_thread
)));
3263 if (increment_pc
!= 0)
3265 struct regcache
*regcache
3266 = get_thread_regcache (current_thread
, 1);
3268 event_child
->stop_pc
+= increment_pc
;
3269 (*the_low_target
.set_pc
) (regcache
, event_child
->stop_pc
);
3271 if (!(*the_low_target
.breakpoint_at
) (event_child
->stop_pc
))
3272 event_child
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
3276 /* If this event was not handled before, and is not a SIGTRAP, we
3277 report it. SIGILL and SIGSEGV are also treated as traps in case
3278 a breakpoint is inserted at the current PC. If this target does
3279 not support internal breakpoints at all, we also report the
3280 SIGTRAP without further processing; it's of no concern to us. */
3282 = (supports_breakpoints ()
3283 && (WSTOPSIG (w
) == SIGTRAP
3284 || ((WSTOPSIG (w
) == SIGILL
3285 || WSTOPSIG (w
) == SIGSEGV
)
3286 && (*the_low_target
.breakpoint_at
) (event_child
->stop_pc
))));
3288 if (maybe_internal_trap
)
3290 /* Handle anything that requires bookkeeping before deciding to
3291 report the event or continue waiting. */
3293 /* First check if we can explain the SIGTRAP with an internal
3294 breakpoint, or if we should possibly report the event to GDB.
3295 Do this before anything that may remove or insert a
3297 bp_explains_trap
= breakpoint_inserted_here (event_child
->stop_pc
);
3299 /* We have a SIGTRAP, possibly a step-over dance has just
3300 finished. If so, tweak the state machine accordingly,
3301 reinsert breakpoints and delete any single-step
3303 step_over_finished
= finish_step_over (event_child
);
3305 /* Now invoke the callbacks of any internal breakpoints there. */
3306 check_breakpoints (event_child
->stop_pc
);
3308 /* Handle tracepoint data collecting. This may overflow the
3309 trace buffer, and cause a tracing stop, removing
3311 trace_event
= handle_tracepoints (event_child
);
3313 if (bp_explains_trap
)
3316 debug_printf ("Hit a gdbserver breakpoint.\n");
3321 /* We have some other signal, possibly a step-over dance was in
3322 progress, and it should be cancelled too. */
3323 step_over_finished
= finish_step_over (event_child
);
3326 /* We have all the data we need. Either report the event to GDB, or
3327 resume threads and keep waiting for more. */
3329 /* If we're collecting a fast tracepoint, finish the collection and
3330 move out of the jump pad before delivering a signal. See
3331 linux_stabilize_threads. */
3334 && WSTOPSIG (w
) != SIGTRAP
3335 && supports_fast_tracepoints ()
3336 && agent_loaded_p ())
3339 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3340 "to defer or adjust it.\n",
3341 WSTOPSIG (w
), lwpid_of (current_thread
));
3343 /* Allow debugging the jump pad itself. */
3344 if (current_thread
->last_resume_kind
!= resume_step
3345 && maybe_move_out_of_jump_pad (event_child
, &w
))
3347 enqueue_one_deferred_signal (event_child
, &w
);
3350 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3351 WSTOPSIG (w
), lwpid_of (current_thread
));
3353 linux_resume_one_lwp (event_child
, 0, 0, NULL
);
3357 return ignore_event (ourstatus
);
3361 if (event_child
->collecting_fast_tracepoint
3362 != fast_tpoint_collect_result::not_collecting
)
3365 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3366 "Check if we're already there.\n",
3367 lwpid_of (current_thread
),
3368 (int) event_child
->collecting_fast_tracepoint
);
3372 event_child
->collecting_fast_tracepoint
3373 = linux_fast_tracepoint_collecting (event_child
, NULL
);
3375 if (event_child
->collecting_fast_tracepoint
3376 != fast_tpoint_collect_result::before_insn
)
3378 /* No longer need this breakpoint. */
3379 if (event_child
->exit_jump_pad_bkpt
!= NULL
)
3382 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3383 "stopping all threads momentarily.\n");
3385 /* Other running threads could hit this breakpoint.
3386 We don't handle moribund locations like GDB does,
3387 instead we always pause all threads when removing
3388 breakpoints, so that any step-over or
3389 decr_pc_after_break adjustment is always taken
3390 care of while the breakpoint is still
3392 stop_all_lwps (1, event_child
);
3394 delete_breakpoint (event_child
->exit_jump_pad_bkpt
);
3395 event_child
->exit_jump_pad_bkpt
= NULL
;
3397 unstop_all_lwps (1, event_child
);
3399 gdb_assert (event_child
->suspended
>= 0);
3403 if (event_child
->collecting_fast_tracepoint
3404 == fast_tpoint_collect_result::not_collecting
)
3407 debug_printf ("fast tracepoint finished "
3408 "collecting successfully.\n");
3410 /* We may have a deferred signal to report. */
3411 if (dequeue_one_deferred_signal (event_child
, &w
))
3414 debug_printf ("dequeued one signal.\n");
3419 debug_printf ("no deferred signals.\n");
3421 if (stabilizing_threads
)
3423 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
3424 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
3428 debug_printf ("linux_wait_1 ret = %s, stopped "
3429 "while stabilizing threads\n",
3430 target_pid_to_str (ptid_of (current_thread
)));
3434 return ptid_of (current_thread
);
3440 /* Check whether GDB would be interested in this event. */
3442 /* Check if GDB is interested in this syscall. */
3444 && WSTOPSIG (w
) == SYSCALL_SIGTRAP
3445 && !gdb_catch_this_syscall_p (event_child
))
3449 debug_printf ("Ignored syscall for LWP %ld.\n",
3450 lwpid_of (current_thread
));
3453 linux_resume_one_lwp (event_child
, event_child
->stepping
,
3458 return ignore_event (ourstatus
);
3461 /* If GDB is not interested in this signal, don't stop other
3462 threads, and don't report it to GDB. Just resume the inferior
3463 right away. We do this for threading-related signals as well as
3464 any that GDB specifically requested we ignore. But never ignore
3465 SIGSTOP if we sent it ourselves, and do not ignore signals when
3466 stepping - they may require special handling to skip the signal
3467 handler. Also never ignore signals that could be caused by a
3470 && current_thread
->last_resume_kind
!= resume_step
3472 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3473 (current_process ()->priv
->thread_db
!= NULL
3474 && (WSTOPSIG (w
) == __SIGRTMIN
3475 || WSTOPSIG (w
) == __SIGRTMIN
+ 1))
3478 (cs
.pass_signals
[gdb_signal_from_host (WSTOPSIG (w
))]
3479 && !(WSTOPSIG (w
) == SIGSTOP
3480 && current_thread
->last_resume_kind
== resume_stop
)
3481 && !linux_wstatus_maybe_breakpoint (w
))))
3483 siginfo_t info
, *info_p
;
3486 debug_printf ("Ignored signal %d for LWP %ld.\n",
3487 WSTOPSIG (w
), lwpid_of (current_thread
));
3489 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
3490 (PTRACE_TYPE_ARG3
) 0, &info
) == 0)
3495 if (step_over_finished
)
3497 /* We cancelled this thread's step-over above. We still
3498 need to unsuspend all other LWPs, and set them back
3499 running again while the signal handler runs. */
3500 unsuspend_all_lwps (event_child
);
3502 /* Enqueue the pending signal info so that proceed_all_lwps
3504 enqueue_pending_signal (event_child
, WSTOPSIG (w
), info_p
);
3506 proceed_all_lwps ();
3510 linux_resume_one_lwp (event_child
, event_child
->stepping
,
3511 WSTOPSIG (w
), info_p
);
3517 return ignore_event (ourstatus
);
3520 /* Note that all addresses are always "out of the step range" when
3521 there's no range to begin with. */
3522 in_step_range
= lwp_in_step_range (event_child
);
3524 /* If GDB wanted this thread to single step, and the thread is out
3525 of the step range, we always want to report the SIGTRAP, and let
3526 GDB handle it. Watchpoints should always be reported. So should
3527 signals we can't explain. A SIGTRAP we can't explain could be a
3528 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3529 do, we're be able to handle GDB breakpoints on top of internal
3530 breakpoints, by handling the internal breakpoint and still
3531 reporting the event to GDB. If we don't, we're out of luck, GDB
3532 won't see the breakpoint hit. If we see a single-step event but
3533 the thread should be continuing, don't pass the trap to gdb.
3534 That indicates that we had previously finished a single-step but
3535 left the single-step pending -- see
3536 complete_ongoing_step_over. */
3537 report_to_gdb
= (!maybe_internal_trap
3538 || (current_thread
->last_resume_kind
== resume_step
3540 || event_child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
3542 && !bp_explains_trap
3544 && !step_over_finished
3545 && !(current_thread
->last_resume_kind
== resume_continue
3546 && event_child
->stop_reason
== TARGET_STOPPED_BY_SINGLE_STEP
))
3547 || (gdb_breakpoint_here (event_child
->stop_pc
)
3548 && gdb_condition_true_at_breakpoint (event_child
->stop_pc
)
3549 && gdb_no_commands_at_breakpoint (event_child
->stop_pc
))
3550 || event_child
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
);
3552 run_breakpoint_commands (event_child
->stop_pc
);
3554 /* We found no reason GDB would want us to stop. We either hit one
3555 of our own breakpoints, or finished an internal step GDB
3556 shouldn't know about. */
3561 if (bp_explains_trap
)
3562 debug_printf ("Hit a gdbserver breakpoint.\n");
3563 if (step_over_finished
)
3564 debug_printf ("Step-over finished.\n");
3566 debug_printf ("Tracepoint event.\n");
3567 if (lwp_in_step_range (event_child
))
3568 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3569 paddress (event_child
->stop_pc
),
3570 paddress (event_child
->step_range_start
),
3571 paddress (event_child
->step_range_end
));
3574 /* We're not reporting this breakpoint to GDB, so apply the
3575 decr_pc_after_break adjustment to the inferior's regcache
3578 if (the_low_target
.set_pc
!= NULL
)
3580 struct regcache
*regcache
3581 = get_thread_regcache (current_thread
, 1);
3582 (*the_low_target
.set_pc
) (regcache
, event_child
->stop_pc
);
3585 if (step_over_finished
)
3587 /* If we have finished stepping over a breakpoint, we've
3588 stopped and suspended all LWPs momentarily except the
3589 stepping one. This is where we resume them all again.
3590 We're going to keep waiting, so use proceed, which
3591 handles stepping over the next breakpoint. */
3592 unsuspend_all_lwps (event_child
);
3596 /* Remove the single-step breakpoints if any. Note that
3597 there isn't single-step breakpoint if we finished stepping
3599 if (can_software_single_step ()
3600 && has_single_step_breakpoints (current_thread
))
3602 stop_all_lwps (0, event_child
);
3603 delete_single_step_breakpoints (current_thread
);
3604 unstop_all_lwps (0, event_child
);
3609 debug_printf ("proceeding all threads.\n");
3610 proceed_all_lwps ();
3615 return ignore_event (ourstatus
);
3620 if (event_child
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
3623 = target_waitstatus_to_string (&event_child
->waitstatus
);
3625 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3626 lwpid_of (get_lwp_thread (event_child
)), str
.c_str ());
3628 if (current_thread
->last_resume_kind
== resume_step
)
3630 if (event_child
->step_range_start
== event_child
->step_range_end
)
3631 debug_printf ("GDB wanted to single-step, reporting event.\n");
3632 else if (!lwp_in_step_range (event_child
))
3633 debug_printf ("Out of step range, reporting event.\n");
3635 if (event_child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
)
3636 debug_printf ("Stopped by watchpoint.\n");
3637 else if (gdb_breakpoint_here (event_child
->stop_pc
))
3638 debug_printf ("Stopped by GDB breakpoint.\n");
3640 debug_printf ("Hit a non-gdbserver trap event.\n");
3643 /* Alright, we're going to report a stop. */
3645 /* Remove single-step breakpoints. */
3646 if (can_software_single_step ())
3648 /* Remove single-step breakpoints or not. It it is true, stop all
3649 lwps, so that other threads won't hit the breakpoint in the
3651 int remove_single_step_breakpoints_p
= 0;
3655 remove_single_step_breakpoints_p
3656 = has_single_step_breakpoints (current_thread
);
3660 /* In all-stop, a stop reply cancels all previous resume
3661 requests. Delete all single-step breakpoints. */
3663 find_thread ([&] (thread_info
*thread
) {
3664 if (has_single_step_breakpoints (thread
))
3666 remove_single_step_breakpoints_p
= 1;
3674 if (remove_single_step_breakpoints_p
)
3676 /* If we remove single-step breakpoints from memory, stop all lwps,
3677 so that other threads won't hit the breakpoint in the staled
3679 stop_all_lwps (0, event_child
);
3683 gdb_assert (has_single_step_breakpoints (current_thread
));
3684 delete_single_step_breakpoints (current_thread
);
3688 for_each_thread ([] (thread_info
*thread
){
3689 if (has_single_step_breakpoints (thread
))
3690 delete_single_step_breakpoints (thread
);
3694 unstop_all_lwps (0, event_child
);
3698 if (!stabilizing_threads
)
3700 /* In all-stop, stop all threads. */
3702 stop_all_lwps (0, NULL
);
3704 if (step_over_finished
)
3708 /* If we were doing a step-over, all other threads but
3709 the stepping one had been paused in start_step_over,
3710 with their suspend counts incremented. We don't want
3711 to do a full unstop/unpause, because we're in
3712 all-stop mode (so we want threads stopped), but we
3713 still need to unsuspend the other threads, to
3714 decrement their `suspended' count back. */
3715 unsuspend_all_lwps (event_child
);
3719 /* If we just finished a step-over, then all threads had
3720 been momentarily paused. In all-stop, that's fine,
3721 we want threads stopped by now anyway. In non-stop,
3722 we need to re-resume threads that GDB wanted to be
3724 unstop_all_lwps (1, event_child
);
3728 /* If we're not waiting for a specific LWP, choose an event LWP
3729 from among those that have had events. Giving equal priority
3730 to all LWPs that have had events helps prevent
3732 if (ptid
== minus_one_ptid
)
3734 event_child
->status_pending_p
= 1;
3735 event_child
->status_pending
= w
;
3737 select_event_lwp (&event_child
);
3739 /* current_thread and event_child must stay in sync. */
3740 current_thread
= get_lwp_thread (event_child
);
3742 event_child
->status_pending_p
= 0;
3743 w
= event_child
->status_pending
;
3747 /* Stabilize threads (move out of jump pads). */
3749 stabilize_threads ();
3753 /* If we just finished a step-over, then all threads had been
3754 momentarily paused. In all-stop, that's fine, we want
3755 threads stopped by now anyway. In non-stop, we need to
3756 re-resume threads that GDB wanted to be running. */
3757 if (step_over_finished
)
3758 unstop_all_lwps (1, event_child
);
3761 if (event_child
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
3763 /* If the reported event is an exit, fork, vfork or exec, let
3766 /* Break the unreported fork relationship chain. */
3767 if (event_child
->waitstatus
.kind
== TARGET_WAITKIND_FORKED
3768 || event_child
->waitstatus
.kind
== TARGET_WAITKIND_VFORKED
)
3770 event_child
->fork_relative
->fork_relative
= NULL
;
3771 event_child
->fork_relative
= NULL
;
3774 *ourstatus
= event_child
->waitstatus
;
3775 /* Clear the event lwp's waitstatus since we handled it already. */
3776 event_child
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
3779 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
3781 /* Now that we've selected our final event LWP, un-adjust its PC if
3782 it was a software breakpoint, and the client doesn't know we can
3783 adjust the breakpoint ourselves. */
3784 if (event_child
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
3785 && !cs
.swbreak_feature
)
3787 int decr_pc
= the_low_target
.decr_pc_after_break
;
3791 struct regcache
*regcache
3792 = get_thread_regcache (current_thread
, 1);
3793 (*the_low_target
.set_pc
) (regcache
, event_child
->stop_pc
+ decr_pc
);
3797 if (WSTOPSIG (w
) == SYSCALL_SIGTRAP
)
3799 get_syscall_trapinfo (event_child
,
3800 &ourstatus
->value
.syscall_number
);
3801 ourstatus
->kind
= event_child
->syscall_state
;
3803 else if (current_thread
->last_resume_kind
== resume_stop
3804 && WSTOPSIG (w
) == SIGSTOP
)
3806 /* A thread that has been requested to stop by GDB with vCont;t,
3807 and it stopped cleanly, so report as SIG0. The use of
3808 SIGSTOP is an implementation detail. */
3809 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
3811 else if (current_thread
->last_resume_kind
== resume_stop
3812 && WSTOPSIG (w
) != SIGSTOP
)
3814 /* A thread that has been requested to stop by GDB with vCont;t,
3815 but, it stopped for other reasons. */
3816 ourstatus
->value
.sig
= gdb_signal_from_host (WSTOPSIG (w
));
3818 else if (ourstatus
->kind
== TARGET_WAITKIND_STOPPED
)
3820 ourstatus
->value
.sig
= gdb_signal_from_host (WSTOPSIG (w
));
3823 gdb_assert (step_over_bkpt
== null_ptid
);
3827 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3828 target_pid_to_str (ptid_of (current_thread
)),
3829 ourstatus
->kind
, ourstatus
->value
.sig
);
3833 if (ourstatus
->kind
== TARGET_WAITKIND_EXITED
)
3834 return filter_exit_event (event_child
, ourstatus
);
3836 return ptid_of (current_thread
);
3839 /* Get rid of any pending event in the pipe. */
3841 async_file_flush (void)
3847 ret
= read (linux_event_pipe
[0], &buf
, 1);
3848 while (ret
>= 0 || (ret
== -1 && errno
== EINTR
));
3851 /* Put something in the pipe, so the event loop wakes up. */
3853 async_file_mark (void)
3857 async_file_flush ();
3860 ret
= write (linux_event_pipe
[1], "+", 1);
3861 while (ret
== 0 || (ret
== -1 && errno
== EINTR
));
3863 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3864 be awakened anyway. */
3868 linux_wait (ptid_t ptid
,
3869 struct target_waitstatus
*ourstatus
, int target_options
)
3873 /* Flush the async file first. */
3874 if (target_is_async_p ())
3875 async_file_flush ();
3879 event_ptid
= linux_wait_1 (ptid
, ourstatus
, target_options
);
3881 while ((target_options
& TARGET_WNOHANG
) == 0
3882 && event_ptid
== null_ptid
3883 && ourstatus
->kind
== TARGET_WAITKIND_IGNORE
);
3885 /* If at least one stop was reported, there may be more. A single
3886 SIGCHLD can signal more than one child stop. */
3887 if (target_is_async_p ()
3888 && (target_options
& TARGET_WNOHANG
) != 0
3889 && event_ptid
!= null_ptid
)
3895 /* Send a signal to an LWP. */
3898 kill_lwp (unsigned long lwpid
, int signo
)
3903 ret
= syscall (__NR_tkill
, lwpid
, signo
);
3904 if (errno
== ENOSYS
)
3906 /* If tkill fails, then we are not using nptl threads, a
3907 configuration we no longer support. */
3908 perror_with_name (("tkill"));
3914 linux_stop_lwp (struct lwp_info
*lwp
)
3920 send_sigstop (struct lwp_info
*lwp
)
3924 pid
= lwpid_of (get_lwp_thread (lwp
));
3926 /* If we already have a pending stop signal for this process, don't
3928 if (lwp
->stop_expected
)
3931 debug_printf ("Have pending sigstop for lwp %d\n", pid
);
3937 debug_printf ("Sending sigstop to lwp %d\n", pid
);
3939 lwp
->stop_expected
= 1;
3940 kill_lwp (pid
, SIGSTOP
);
3944 send_sigstop (thread_info
*thread
, lwp_info
*except
)
3946 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3948 /* Ignore EXCEPT. */
3958 /* Increment the suspend count of an LWP, and stop it, if not stopped
3961 suspend_and_send_sigstop (thread_info
*thread
, lwp_info
*except
)
3963 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3965 /* Ignore EXCEPT. */
3969 lwp_suspended_inc (lwp
);
3971 send_sigstop (thread
, except
);
3975 mark_lwp_dead (struct lwp_info
*lwp
, int wstat
)
3977 /* Store the exit status for later. */
3978 lwp
->status_pending_p
= 1;
3979 lwp
->status_pending
= wstat
;
3981 /* Store in waitstatus as well, as there's nothing else to process
3983 if (WIFEXITED (wstat
))
3985 lwp
->waitstatus
.kind
= TARGET_WAITKIND_EXITED
;
3986 lwp
->waitstatus
.value
.integer
= WEXITSTATUS (wstat
);
3988 else if (WIFSIGNALED (wstat
))
3990 lwp
->waitstatus
.kind
= TARGET_WAITKIND_SIGNALLED
;
3991 lwp
->waitstatus
.value
.sig
= gdb_signal_from_host (WTERMSIG (wstat
));
3994 /* Prevent trying to stop it. */
3997 /* No further stops are expected from a dead lwp. */
3998 lwp
->stop_expected
= 0;
4001 /* Return true if LWP has exited already, and has a pending exit event
4002 to report to GDB. */
4005 lwp_is_marked_dead (struct lwp_info
*lwp
)
4007 return (lwp
->status_pending_p
4008 && (WIFEXITED (lwp
->status_pending
)
4009 || WIFSIGNALED (lwp
->status_pending
)));
4012 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4015 wait_for_sigstop (void)
4017 struct thread_info
*saved_thread
;
4022 saved_thread
= current_thread
;
4023 if (saved_thread
!= NULL
)
4024 saved_tid
= saved_thread
->id
;
4026 saved_tid
= null_ptid
; /* avoid bogus unused warning */
4029 debug_printf ("wait_for_sigstop: pulling events\n");
4031 /* Passing NULL_PTID as filter indicates we want all events to be
4032 left pending. Eventually this returns when there are no
4033 unwaited-for children left. */
4034 ret
= linux_wait_for_event_filtered (minus_one_ptid
, null_ptid
,
4036 gdb_assert (ret
== -1);
4038 if (saved_thread
== NULL
|| linux_thread_alive (saved_tid
))
4039 current_thread
= saved_thread
;
4043 debug_printf ("Previously current thread died.\n");
4045 /* We can't change the current inferior behind GDB's back,
4046 otherwise, a subsequent command may apply to the wrong
4048 current_thread
= NULL
;
4052 /* Returns true if THREAD is stopped in a jump pad, and we can't
4053 move it out, because we need to report the stop event to GDB. For
4054 example, if the user puts a breakpoint in the jump pad, it's
4055 because she wants to debug it. */
4058 stuck_in_jump_pad_callback (thread_info
*thread
)
4060 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4062 if (lwp
->suspended
!= 0)
4064 internal_error (__FILE__
, __LINE__
,
4065 "LWP %ld is suspended, suspended=%d\n",
4066 lwpid_of (thread
), lwp
->suspended
);
4068 gdb_assert (lwp
->stopped
);
4070 /* Allow debugging the jump pad, gdb_collect, etc.. */
4071 return (supports_fast_tracepoints ()
4072 && agent_loaded_p ()
4073 && (gdb_breakpoint_here (lwp
->stop_pc
)
4074 || lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
4075 || thread
->last_resume_kind
== resume_step
)
4076 && (linux_fast_tracepoint_collecting (lwp
, NULL
)
4077 != fast_tpoint_collect_result::not_collecting
));
4081 move_out_of_jump_pad_callback (thread_info
*thread
)
4083 struct thread_info
*saved_thread
;
4084 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4087 if (lwp
->suspended
!= 0)
4089 internal_error (__FILE__
, __LINE__
,
4090 "LWP %ld is suspended, suspended=%d\n",
4091 lwpid_of (thread
), lwp
->suspended
);
4093 gdb_assert (lwp
->stopped
);
4095 /* For gdb_breakpoint_here. */
4096 saved_thread
= current_thread
;
4097 current_thread
= thread
;
4099 wstat
= lwp
->status_pending_p
? &lwp
->status_pending
: NULL
;
4101 /* Allow debugging the jump pad, gdb_collect, etc. */
4102 if (!gdb_breakpoint_here (lwp
->stop_pc
)
4103 && lwp
->stop_reason
!= TARGET_STOPPED_BY_WATCHPOINT
4104 && thread
->last_resume_kind
!= resume_step
4105 && maybe_move_out_of_jump_pad (lwp
, wstat
))
4108 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4113 lwp
->status_pending_p
= 0;
4114 enqueue_one_deferred_signal (lwp
, wstat
);
4117 debug_printf ("Signal %d for LWP %ld deferred "
4119 WSTOPSIG (*wstat
), lwpid_of (thread
));
4122 linux_resume_one_lwp (lwp
, 0, 0, NULL
);
4125 lwp_suspended_inc (lwp
);
4127 current_thread
= saved_thread
;
4131 lwp_running (thread_info
*thread
)
4133 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4135 if (lwp_is_marked_dead (lwp
))
4138 return !lwp
->stopped
;
4141 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4142 If SUSPEND, then also increase the suspend count of every LWP,
4146 stop_all_lwps (int suspend
, struct lwp_info
*except
)
4148 /* Should not be called recursively. */
4149 gdb_assert (stopping_threads
== NOT_STOPPING_THREADS
);
4154 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4155 suspend
? "stop-and-suspend" : "stop",
4157 ? target_pid_to_str (ptid_of (get_lwp_thread (except
)))
4161 stopping_threads
= (suspend
4162 ? STOPPING_AND_SUSPENDING_THREADS
4163 : STOPPING_THREADS
);
4166 for_each_thread ([&] (thread_info
*thread
)
4168 suspend_and_send_sigstop (thread
, except
);
4171 for_each_thread ([&] (thread_info
*thread
)
4173 send_sigstop (thread
, except
);
4176 wait_for_sigstop ();
4177 stopping_threads
= NOT_STOPPING_THREADS
;
4181 debug_printf ("stop_all_lwps done, setting stopping_threads "
4182 "back to !stopping\n");
4187 /* Enqueue one signal in the chain of signals which need to be
4188 delivered to this process on next resume. */
4191 enqueue_pending_signal (struct lwp_info
*lwp
, int signal
, siginfo_t
*info
)
4193 struct pending_signals
*p_sig
= XNEW (struct pending_signals
);
4195 p_sig
->prev
= lwp
->pending_signals
;
4196 p_sig
->signal
= signal
;
4198 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
4200 memcpy (&p_sig
->info
, info
, sizeof (siginfo_t
));
4201 lwp
->pending_signals
= p_sig
;
4204 /* Install breakpoints for software single stepping. */
4207 install_software_single_step_breakpoints (struct lwp_info
*lwp
)
4209 struct thread_info
*thread
= get_lwp_thread (lwp
);
4210 struct regcache
*regcache
= get_thread_regcache (thread
, 1);
4212 scoped_restore save_current_thread
= make_scoped_restore (¤t_thread
);
4214 current_thread
= thread
;
4215 std::vector
<CORE_ADDR
> next_pcs
= the_low_target
.get_next_pcs (regcache
);
4217 for (CORE_ADDR pc
: next_pcs
)
4218 set_single_step_breakpoint (pc
, current_ptid
);
4221 /* Single step via hardware or software single step.
4222 Return 1 if hardware single stepping, 0 if software single stepping
4223 or can't single step. */
4226 single_step (struct lwp_info
* lwp
)
4230 if (can_hardware_single_step ())
4234 else if (can_software_single_step ())
4236 install_software_single_step_breakpoints (lwp
);
4242 debug_printf ("stepping is not implemented on this target");
4248 /* The signal can be delivered to the inferior if we are not trying to
4249 finish a fast tracepoint collect. Since signal can be delivered in
4250 the step-over, the program may go to signal handler and trap again
4251 after return from the signal handler. We can live with the spurious
4255 lwp_signal_can_be_delivered (struct lwp_info
*lwp
)
4257 return (lwp
->collecting_fast_tracepoint
4258 == fast_tpoint_collect_result::not_collecting
);
4261 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4262 SIGNAL is nonzero, give it that signal. */
4265 linux_resume_one_lwp_throw (struct lwp_info
*lwp
,
4266 int step
, int signal
, siginfo_t
*info
)
4268 struct thread_info
*thread
= get_lwp_thread (lwp
);
4269 struct thread_info
*saved_thread
;
4271 struct process_info
*proc
= get_thread_process (thread
);
4273 /* Note that target description may not be initialised
4274 (proc->tdesc == NULL) at this point because the program hasn't
4275 stopped at the first instruction yet. It means GDBserver skips
4276 the extra traps from the wrapper program (see option --wrapper).
4277 Code in this function that requires register access should be
4278 guarded by proc->tdesc == NULL or something else. */
4280 if (lwp
->stopped
== 0)
4283 gdb_assert (lwp
->waitstatus
.kind
== TARGET_WAITKIND_IGNORE
);
4285 fast_tpoint_collect_result fast_tp_collecting
4286 = lwp
->collecting_fast_tracepoint
;
4288 gdb_assert (!stabilizing_threads
4289 || (fast_tp_collecting
4290 != fast_tpoint_collect_result::not_collecting
));
4292 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4293 user used the "jump" command, or "set $pc = foo"). */
4294 if (thread
->while_stepping
!= NULL
&& lwp
->stop_pc
!= get_pc (lwp
))
4296 /* Collecting 'while-stepping' actions doesn't make sense
4298 release_while_stepping_state_list (thread
);
4301 /* If we have pending signals or status, and a new signal, enqueue the
4302 signal. Also enqueue the signal if it can't be delivered to the
4303 inferior right now. */
4305 && (lwp
->status_pending_p
4306 || lwp
->pending_signals
!= NULL
4307 || !lwp_signal_can_be_delivered (lwp
)))
4309 enqueue_pending_signal (lwp
, signal
, info
);
4311 /* Postpone any pending signal. It was enqueued above. */
4315 if (lwp
->status_pending_p
)
4318 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4319 " has pending status\n",
4320 lwpid_of (thread
), step
? "step" : "continue",
4321 lwp
->stop_expected
? "expected" : "not expected");
4325 saved_thread
= current_thread
;
4326 current_thread
= thread
;
4328 /* This bit needs some thinking about. If we get a signal that
4329 we must report while a single-step reinsert is still pending,
4330 we often end up resuming the thread. It might be better to
4331 (ew) allow a stack of pending events; then we could be sure that
4332 the reinsert happened right away and not lose any signals.
4334 Making this stack would also shrink the window in which breakpoints are
4335 uninserted (see comment in linux_wait_for_lwp) but not enough for
4336 complete correctness, so it won't solve that problem. It may be
4337 worthwhile just to solve this one, however. */
4338 if (lwp
->bp_reinsert
!= 0)
4341 debug_printf (" pending reinsert at 0x%s\n",
4342 paddress (lwp
->bp_reinsert
));
4344 if (can_hardware_single_step ())
4346 if (fast_tp_collecting
== fast_tpoint_collect_result::not_collecting
)
4349 warning ("BAD - reinserting but not stepping.");
4351 warning ("BAD - reinserting and suspended(%d).",
4356 step
= maybe_hw_step (thread
);
4359 if (fast_tp_collecting
== fast_tpoint_collect_result::before_insn
)
4362 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4363 " (exit-jump-pad-bkpt)\n",
4366 else if (fast_tp_collecting
== fast_tpoint_collect_result::at_insn
)
4369 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4370 " single-stepping\n",
4373 if (can_hardware_single_step ())
4377 internal_error (__FILE__
, __LINE__
,
4378 "moving out of jump pad single-stepping"
4379 " not implemented on this target");
4383 /* If we have while-stepping actions in this thread set it stepping.
4384 If we have a signal to deliver, it may or may not be set to
4385 SIG_IGN, we don't know. Assume so, and allow collecting
4386 while-stepping into a signal handler. A possible smart thing to
4387 do would be to set an internal breakpoint at the signal return
4388 address, continue, and carry on catching this while-stepping
4389 action only when that breakpoint is hit. A future
4391 if (thread
->while_stepping
!= NULL
)
4394 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4397 step
= single_step (lwp
);
4400 if (proc
->tdesc
!= NULL
&& the_low_target
.get_pc
!= NULL
)
4402 struct regcache
*regcache
= get_thread_regcache (current_thread
, 1);
4404 lwp
->stop_pc
= (*the_low_target
.get_pc
) (regcache
);
4408 debug_printf (" %s from pc 0x%lx\n", step
? "step" : "continue",
4409 (long) lwp
->stop_pc
);
4413 /* If we have pending signals, consume one if it can be delivered to
4415 if (lwp
->pending_signals
!= NULL
&& lwp_signal_can_be_delivered (lwp
))
4417 struct pending_signals
**p_sig
;
4419 p_sig
= &lwp
->pending_signals
;
4420 while ((*p_sig
)->prev
!= NULL
)
4421 p_sig
= &(*p_sig
)->prev
;
4423 signal
= (*p_sig
)->signal
;
4424 if ((*p_sig
)->info
.si_signo
!= 0)
4425 ptrace (PTRACE_SETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
4433 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4434 lwpid_of (thread
), step
? "step" : "continue", signal
,
4435 lwp
->stop_expected
? "expected" : "not expected");
4437 if (the_low_target
.prepare_to_resume
!= NULL
)
4438 the_low_target
.prepare_to_resume (lwp
);
4440 regcache_invalidate_thread (thread
);
4442 lwp
->stepping
= step
;
4444 ptrace_request
= PTRACE_SINGLESTEP
;
4445 else if (gdb_catching_syscalls_p (lwp
))
4446 ptrace_request
= PTRACE_SYSCALL
;
4448 ptrace_request
= PTRACE_CONT
;
4449 ptrace (ptrace_request
,
4451 (PTRACE_TYPE_ARG3
) 0,
4452 /* Coerce to a uintptr_t first to avoid potential gcc warning
4453 of coercing an 8 byte integer to a 4 byte pointer. */
4454 (PTRACE_TYPE_ARG4
) (uintptr_t) signal
);
4456 current_thread
= saved_thread
;
4458 perror_with_name ("resuming thread");
4460 /* Successfully resumed. Clear state that no longer makes sense,
4461 and mark the LWP as running. Must not do this before resuming
4462 otherwise if that fails other code will be confused. E.g., we'd
4463 later try to stop the LWP and hang forever waiting for a stop
4464 status. Note that we must not throw after this is cleared,
4465 otherwise handle_zombie_lwp_error would get confused. */
4467 lwp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
4470 /* Called when we try to resume a stopped LWP and that errors out. If
4471 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4472 or about to become), discard the error, clear any pending status
4473 the LWP may have, and return true (we'll collect the exit status
4474 soon enough). Otherwise, return false. */
4477 check_ptrace_stopped_lwp_gone (struct lwp_info
*lp
)
4479 struct thread_info
*thread
= get_lwp_thread (lp
);
4481 /* If we get an error after resuming the LWP successfully, we'd
4482 confuse !T state for the LWP being gone. */
4483 gdb_assert (lp
->stopped
);
4485 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4486 because even if ptrace failed with ESRCH, the tracee may be "not
4487 yet fully dead", but already refusing ptrace requests. In that
4488 case the tracee has 'R (Running)' state for a little bit
4489 (observed in Linux 3.18). See also the note on ESRCH in the
4490 ptrace(2) man page. Instead, check whether the LWP has any state
4491 other than ptrace-stopped. */
4493 /* Don't assume anything if /proc/PID/status can't be read. */
4494 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread
)) == 0)
4496 lp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
4497 lp
->status_pending_p
= 0;
4503 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4504 disappears while we try to resume it. */
4507 linux_resume_one_lwp (struct lwp_info
*lwp
,
4508 int step
, int signal
, siginfo_t
*info
)
4512 linux_resume_one_lwp_throw (lwp
, step
, signal
, info
);
4514 catch (const gdb_exception_error
&ex
)
4516 if (!check_ptrace_stopped_lwp_gone (lwp
))
4521 /* This function is called once per thread via for_each_thread.
4522 We look up which resume request applies to THREAD and mark it with a
4523 pointer to the appropriate resume request.
4525 This algorithm is O(threads * resume elements), but resume elements
4526 is small (and will remain small at least until GDB supports thread
4530 linux_set_resume_request (thread_info
*thread
, thread_resume
*resume
, size_t n
)
4532 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4534 for (int ndx
= 0; ndx
< n
; ndx
++)
4536 ptid_t ptid
= resume
[ndx
].thread
;
4537 if (ptid
== minus_one_ptid
4538 || ptid
== thread
->id
4539 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4541 || (ptid
.pid () == pid_of (thread
)
4543 || ptid
.lwp () == -1)))
4545 if (resume
[ndx
].kind
== resume_stop
4546 && thread
->last_resume_kind
== resume_stop
)
4549 debug_printf ("already %s LWP %ld at GDB's request\n",
4550 (thread
->last_status
.kind
4551 == TARGET_WAITKIND_STOPPED
)
4559 /* Ignore (wildcard) resume requests for already-resumed
4561 if (resume
[ndx
].kind
!= resume_stop
4562 && thread
->last_resume_kind
!= resume_stop
)
4565 debug_printf ("already %s LWP %ld at GDB's request\n",
4566 (thread
->last_resume_kind
4574 /* Don't let wildcard resumes resume fork children that GDB
4575 does not yet know are new fork children. */
4576 if (lwp
->fork_relative
!= NULL
)
4578 struct lwp_info
*rel
= lwp
->fork_relative
;
4580 if (rel
->status_pending_p
4581 && (rel
->waitstatus
.kind
== TARGET_WAITKIND_FORKED
4582 || rel
->waitstatus
.kind
== TARGET_WAITKIND_VFORKED
))
4585 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4591 /* If the thread has a pending event that has already been
4592 reported to GDBserver core, but GDB has not pulled the
4593 event out of the vStopped queue yet, likewise, ignore the
4594 (wildcard) resume request. */
4595 if (in_queued_stop_replies (thread
->id
))
4598 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4603 lwp
->resume
= &resume
[ndx
];
4604 thread
->last_resume_kind
= lwp
->resume
->kind
;
4606 lwp
->step_range_start
= lwp
->resume
->step_range_start
;
4607 lwp
->step_range_end
= lwp
->resume
->step_range_end
;
4609 /* If we had a deferred signal to report, dequeue one now.
4610 This can happen if LWP gets more than one signal while
4611 trying to get out of a jump pad. */
4613 && !lwp
->status_pending_p
4614 && dequeue_one_deferred_signal (lwp
, &lwp
->status_pending
))
4616 lwp
->status_pending_p
= 1;
4619 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4620 "leaving status pending.\n",
4621 WSTOPSIG (lwp
->status_pending
),
4629 /* No resume action for this thread. */
4633 /* find_thread callback for linux_resume. Return true if this lwp has an
4634 interesting status pending. */
4637 resume_status_pending_p (thread_info
*thread
)
4639 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4641 /* LWPs which will not be resumed are not interesting, because
4642 we might not wait for them next time through linux_wait. */
4643 if (lwp
->resume
== NULL
)
4646 return thread_still_has_status_pending_p (thread
);
4649 /* Return 1 if this lwp that GDB wants running is stopped at an
4650 internal breakpoint that we need to step over. It assumes that any
4651 required STOP_PC adjustment has already been propagated to the
4652 inferior's regcache. */
4655 need_step_over_p (thread_info
*thread
)
4657 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4658 struct thread_info
*saved_thread
;
4660 struct process_info
*proc
= get_thread_process (thread
);
4662 /* GDBserver is skipping the extra traps from the wrapper program,
4663 don't have to do step over. */
4664 if (proc
->tdesc
== NULL
)
4667 /* LWPs which will not be resumed are not interesting, because we
4668 might not wait for them next time through linux_wait. */
4673 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4678 if (thread
->last_resume_kind
== resume_stop
)
4681 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4687 gdb_assert (lwp
->suspended
>= 0);
4692 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4697 if (lwp
->status_pending_p
)
4700 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4706 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4710 /* If the PC has changed since we stopped, then don't do anything,
4711 and let the breakpoint/tracepoint be hit. This happens if, for
4712 instance, GDB handled the decr_pc_after_break subtraction itself,
4713 GDB is OOL stepping this thread, or the user has issued a "jump"
4714 command, or poked thread's registers herself. */
4715 if (pc
!= lwp
->stop_pc
)
4718 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4719 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4721 paddress (lwp
->stop_pc
), paddress (pc
));
4725 /* On software single step target, resume the inferior with signal
4726 rather than stepping over. */
4727 if (can_software_single_step ()
4728 && lwp
->pending_signals
!= NULL
4729 && lwp_signal_can_be_delivered (lwp
))
4732 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4739 saved_thread
= current_thread
;
4740 current_thread
= thread
;
4742 /* We can only step over breakpoints we know about. */
4743 if (breakpoint_here (pc
) || fast_tracepoint_jump_here (pc
))
4745 /* Don't step over a breakpoint that GDB expects to hit
4746 though. If the condition is being evaluated on the target's side
4747 and it evaluate to false, step over this breakpoint as well. */
4748 if (gdb_breakpoint_here (pc
)
4749 && gdb_condition_true_at_breakpoint (pc
)
4750 && gdb_no_commands_at_breakpoint (pc
))
4753 debug_printf ("Need step over [LWP %ld]? yes, but found"
4754 " GDB breakpoint at 0x%s; skipping step over\n",
4755 lwpid_of (thread
), paddress (pc
));
4757 current_thread
= saved_thread
;
4763 debug_printf ("Need step over [LWP %ld]? yes, "
4764 "found breakpoint at 0x%s\n",
4765 lwpid_of (thread
), paddress (pc
));
4767 /* We've found an lwp that needs stepping over --- return 1 so
4768 that find_thread stops looking. */
4769 current_thread
= saved_thread
;
4775 current_thread
= saved_thread
;
4778 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4780 lwpid_of (thread
), paddress (pc
));
4785 /* Start a step-over operation on LWP. When LWP stopped at a
4786 breakpoint, to make progress, we need to remove the breakpoint out
4787 of the way. If we let other threads run while we do that, they may
4788 pass by the breakpoint location and miss hitting it. To avoid
4789 that, a step-over momentarily stops all threads while LWP is
4790 single-stepped by either hardware or software while the breakpoint
4791 is temporarily uninserted from the inferior. When the single-step
4792 finishes, we reinsert the breakpoint, and let all threads that are
4793 supposed to be running, run again. */
4796 start_step_over (struct lwp_info
*lwp
)
4798 struct thread_info
*thread
= get_lwp_thread (lwp
);
4799 struct thread_info
*saved_thread
;
4804 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4807 stop_all_lwps (1, lwp
);
4809 if (lwp
->suspended
!= 0)
4811 internal_error (__FILE__
, __LINE__
,
4812 "LWP %ld suspended=%d\n", lwpid_of (thread
),
4817 debug_printf ("Done stopping all threads for step-over.\n");
4819 /* Note, we should always reach here with an already adjusted PC,
4820 either by GDB (if we're resuming due to GDB's request), or by our
4821 caller, if we just finished handling an internal breakpoint GDB
4822 shouldn't care about. */
4825 saved_thread
= current_thread
;
4826 current_thread
= thread
;
4828 lwp
->bp_reinsert
= pc
;
4829 uninsert_breakpoints_at (pc
);
4830 uninsert_fast_tracepoint_jumps_at (pc
);
4832 step
= single_step (lwp
);
4834 current_thread
= saved_thread
;
4836 linux_resume_one_lwp (lwp
, step
, 0, NULL
);
4838 /* Require next event from this LWP. */
4839 step_over_bkpt
= thread
->id
;
4843 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4844 start_step_over, if still there, and delete any single-step
4845 breakpoints we've set, on non hardware single-step targets. */
4848 finish_step_over (struct lwp_info
*lwp
)
4850 if (lwp
->bp_reinsert
!= 0)
4852 struct thread_info
*saved_thread
= current_thread
;
4855 debug_printf ("Finished step over.\n");
4857 current_thread
= get_lwp_thread (lwp
);
4859 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4860 may be no breakpoint to reinsert there by now. */
4861 reinsert_breakpoints_at (lwp
->bp_reinsert
);
4862 reinsert_fast_tracepoint_jumps_at (lwp
->bp_reinsert
);
4864 lwp
->bp_reinsert
= 0;
4866 /* Delete any single-step breakpoints. No longer needed. We
4867 don't have to worry about other threads hitting this trap,
4868 and later not being able to explain it, because we were
4869 stepping over a breakpoint, and we hold all threads but
4870 LWP stopped while doing that. */
4871 if (!can_hardware_single_step ())
4873 gdb_assert (has_single_step_breakpoints (current_thread
));
4874 delete_single_step_breakpoints (current_thread
);
4877 step_over_bkpt
= null_ptid
;
4878 current_thread
= saved_thread
;
4885 /* If there's a step over in progress, wait until all threads stop
4886 (that is, until the stepping thread finishes its step), and
4887 unsuspend all lwps. The stepping thread ends with its status
4888 pending, which is processed later when we get back to processing
4892 complete_ongoing_step_over (void)
4894 if (step_over_bkpt
!= null_ptid
)
4896 struct lwp_info
*lwp
;
4901 debug_printf ("detach: step over in progress, finish it first\n");
4903 /* Passing NULL_PTID as filter indicates we want all events to
4904 be left pending. Eventually this returns when there are no
4905 unwaited-for children left. */
4906 ret
= linux_wait_for_event_filtered (minus_one_ptid
, null_ptid
,
4908 gdb_assert (ret
== -1);
4910 lwp
= find_lwp_pid (step_over_bkpt
);
4912 finish_step_over (lwp
);
4913 step_over_bkpt
= null_ptid
;
4914 unsuspend_all_lwps (lwp
);
4918 /* This function is called once per thread. We check the thread's resume
4919 request, which will tell us whether to resume, step, or leave the thread
4920 stopped; and what signal, if any, it should be sent.
4922 For threads which we aren't explicitly told otherwise, we preserve
4923 the stepping flag; this is used for stepping over gdbserver-placed
4926 If pending_flags was set in any thread, we queue any needed
4927 signals, since we won't actually resume. We already have a pending
4928 event to report, so we don't need to preserve any step requests;
4929 they should be re-issued if necessary. */
4932 linux_resume_one_thread (thread_info
*thread
, bool leave_all_stopped
)
4934 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4937 if (lwp
->resume
== NULL
)
4940 if (lwp
->resume
->kind
== resume_stop
)
4943 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread
));
4948 debug_printf ("stopping LWP %ld\n", lwpid_of (thread
));
4950 /* Stop the thread, and wait for the event asynchronously,
4951 through the event loop. */
4957 debug_printf ("already stopped LWP %ld\n",
4960 /* The LWP may have been stopped in an internal event that
4961 was not meant to be notified back to GDB (e.g., gdbserver
4962 breakpoint), so we should be reporting a stop event in
4965 /* If the thread already has a pending SIGSTOP, this is a
4966 no-op. Otherwise, something later will presumably resume
4967 the thread and this will cause it to cancel any pending
4968 operation, due to last_resume_kind == resume_stop. If
4969 the thread already has a pending status to report, we
4970 will still report it the next time we wait - see
4971 status_pending_p_callback. */
4973 /* If we already have a pending signal to report, then
4974 there's no need to queue a SIGSTOP, as this means we're
4975 midway through moving the LWP out of the jumppad, and we
4976 will report the pending signal as soon as that is
4978 if (lwp
->pending_signals_to_report
== NULL
)
4982 /* For stop requests, we're done. */
4984 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
4988 /* If this thread which is about to be resumed has a pending status,
4989 then don't resume it - we can just report the pending status.
4990 Likewise if it is suspended, because e.g., another thread is
4991 stepping past a breakpoint. Make sure to queue any signals that
4992 would otherwise be sent. In all-stop mode, we do this decision
4993 based on if *any* thread has a pending status. If there's a
4994 thread that needs the step-over-breakpoint dance, then don't
4995 resume any other thread but that particular one. */
4996 leave_pending
= (lwp
->suspended
4997 || lwp
->status_pending_p
4998 || leave_all_stopped
);
5000 /* If we have a new signal, enqueue the signal. */
5001 if (lwp
->resume
->sig
!= 0)
5003 siginfo_t info
, *info_p
;
5005 /* If this is the same signal we were previously stopped by,
5006 make sure to queue its siginfo. */
5007 if (WIFSTOPPED (lwp
->last_status
)
5008 && WSTOPSIG (lwp
->last_status
) == lwp
->resume
->sig
5009 && ptrace (PTRACE_GETSIGINFO
, lwpid_of (thread
),
5010 (PTRACE_TYPE_ARG3
) 0, &info
) == 0)
5015 enqueue_pending_signal (lwp
, lwp
->resume
->sig
, info_p
);
5021 debug_printf ("resuming LWP %ld\n", lwpid_of (thread
));
5023 proceed_one_lwp (thread
, NULL
);
5028 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread
));
5031 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
5036 linux_resume (struct thread_resume
*resume_info
, size_t n
)
5038 struct thread_info
*need_step_over
= NULL
;
5043 debug_printf ("linux_resume:\n");
5046 for_each_thread ([&] (thread_info
*thread
)
5048 linux_set_resume_request (thread
, resume_info
, n
);
5051 /* If there is a thread which would otherwise be resumed, which has
5052 a pending status, then don't resume any threads - we can just
5053 report the pending status. Make sure to queue any signals that
5054 would otherwise be sent. In non-stop mode, we'll apply this
5055 logic to each thread individually. We consume all pending events
5056 before considering to start a step-over (in all-stop). */
5057 bool any_pending
= false;
5059 any_pending
= find_thread (resume_status_pending_p
) != NULL
;
5061 /* If there is a thread which would otherwise be resumed, which is
5062 stopped at a breakpoint that needs stepping over, then don't
5063 resume any threads - have it step over the breakpoint with all
5064 other threads stopped, then resume all threads again. Make sure
5065 to queue any signals that would otherwise be delivered or
5067 if (!any_pending
&& supports_breakpoints ())
5068 need_step_over
= find_thread (need_step_over_p
);
5070 bool leave_all_stopped
= (need_step_over
!= NULL
|| any_pending
);
5074 if (need_step_over
!= NULL
)
5075 debug_printf ("Not resuming all, need step over\n");
5076 else if (any_pending
)
5077 debug_printf ("Not resuming, all-stop and found "
5078 "an LWP with pending status\n");
5080 debug_printf ("Resuming, no pending status or step over needed\n");
5083 /* Even if we're leaving threads stopped, queue all signals we'd
5084 otherwise deliver. */
5085 for_each_thread ([&] (thread_info
*thread
)
5087 linux_resume_one_thread (thread
, leave_all_stopped
);
5091 start_step_over (get_thread_lwp (need_step_over
));
5095 debug_printf ("linux_resume done\n");
5099 /* We may have events that were pending that can/should be sent to
5100 the client now. Trigger a linux_wait call. */
5101 if (target_is_async_p ())
5105 /* This function is called once per thread. We check the thread's
5106 last resume request, which will tell us whether to resume, step, or
5107 leave the thread stopped. Any signal the client requested to be
5108 delivered has already been enqueued at this point.
5110 If any thread that GDB wants running is stopped at an internal
5111 breakpoint that needs stepping over, we start a step-over operation
5112 on that particular thread, and leave all others stopped. */
5115 proceed_one_lwp (thread_info
*thread
, lwp_info
*except
)
5117 struct lwp_info
*lwp
= get_thread_lwp (thread
);
5124 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread
));
5129 debug_printf (" LWP %ld already running\n", lwpid_of (thread
));
5133 if (thread
->last_resume_kind
== resume_stop
5134 && thread
->last_status
.kind
!= TARGET_WAITKIND_IGNORE
)
5137 debug_printf (" client wants LWP to remain %ld stopped\n",
5142 if (lwp
->status_pending_p
)
5145 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5150 gdb_assert (lwp
->suspended
>= 0);
5155 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread
));
5159 if (thread
->last_resume_kind
== resume_stop
5160 && lwp
->pending_signals_to_report
== NULL
5161 && (lwp
->collecting_fast_tracepoint
5162 == fast_tpoint_collect_result::not_collecting
))
5164 /* We haven't reported this LWP as stopped yet (otherwise, the
5165 last_status.kind check above would catch it, and we wouldn't
5166 reach here. This LWP may have been momentarily paused by a
5167 stop_all_lwps call while handling for example, another LWP's
5168 step-over. In that case, the pending expected SIGSTOP signal
5169 that was queued at vCont;t handling time will have already
5170 been consumed by wait_for_sigstop, and so we need to requeue
5171 another one here. Note that if the LWP already has a SIGSTOP
5172 pending, this is a no-op. */
5175 debug_printf ("Client wants LWP %ld to stop. "
5176 "Making sure it has a SIGSTOP pending\n",
5182 if (thread
->last_resume_kind
== resume_step
)
5185 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5188 /* If resume_step is requested by GDB, install single-step
5189 breakpoints when the thread is about to be actually resumed if
5190 the single-step breakpoints weren't removed. */
5191 if (can_software_single_step ()
5192 && !has_single_step_breakpoints (thread
))
5193 install_software_single_step_breakpoints (lwp
);
5195 step
= maybe_hw_step (thread
);
5197 else if (lwp
->bp_reinsert
!= 0)
5200 debug_printf (" stepping LWP %ld, reinsert set\n",
5203 step
= maybe_hw_step (thread
);
5208 linux_resume_one_lwp (lwp
, step
, 0, NULL
);
5212 unsuspend_and_proceed_one_lwp (thread_info
*thread
, lwp_info
*except
)
5214 struct lwp_info
*lwp
= get_thread_lwp (thread
);
5219 lwp_suspended_decr (lwp
);
5221 proceed_one_lwp (thread
, except
);
5224 /* When we finish a step-over, set threads running again. If there's
5225 another thread that may need a step-over, now's the time to start
5226 it. Eventually, we'll move all threads past their breakpoints. */
5229 proceed_all_lwps (void)
5231 struct thread_info
*need_step_over
;
5233 /* If there is a thread which would otherwise be resumed, which is
5234 stopped at a breakpoint that needs stepping over, then don't
5235 resume any threads - have it step over the breakpoint with all
5236 other threads stopped, then resume all threads again. */
5238 if (supports_breakpoints ())
5240 need_step_over
= find_thread (need_step_over_p
);
5242 if (need_step_over
!= NULL
)
5245 debug_printf ("proceed_all_lwps: found "
5246 "thread %ld needing a step-over\n",
5247 lwpid_of (need_step_over
));
5249 start_step_over (get_thread_lwp (need_step_over
));
5255 debug_printf ("Proceeding, no step-over needed\n");
5257 for_each_thread ([] (thread_info
*thread
)
5259 proceed_one_lwp (thread
, NULL
);
5263 /* Stopped LWPs that the client wanted to be running, that don't have
5264 pending statuses, are set to run again, except for EXCEPT, if not
5265 NULL. This undoes a stop_all_lwps call. */
5268 unstop_all_lwps (int unsuspend
, struct lwp_info
*except
)
5274 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5275 lwpid_of (get_lwp_thread (except
)));
5277 debug_printf ("unstopping all lwps\n");
5281 for_each_thread ([&] (thread_info
*thread
)
5283 unsuspend_and_proceed_one_lwp (thread
, except
);
5286 for_each_thread ([&] (thread_info
*thread
)
5288 proceed_one_lwp (thread
, except
);
5293 debug_printf ("unstop_all_lwps done\n");
5299 #ifdef HAVE_LINUX_REGSETS
5301 #define use_linux_regsets 1
5303 /* Returns true if REGSET has been disabled. */
5306 regset_disabled (struct regsets_info
*info
, struct regset_info
*regset
)
5308 return (info
->disabled_regsets
!= NULL
5309 && info
->disabled_regsets
[regset
- info
->regsets
]);
5312 /* Disable REGSET. */
5315 disable_regset (struct regsets_info
*info
, struct regset_info
*regset
)
5319 dr_offset
= regset
- info
->regsets
;
5320 if (info
->disabled_regsets
== NULL
)
5321 info
->disabled_regsets
= (char *) xcalloc (1, info
->num_regsets
);
5322 info
->disabled_regsets
[dr_offset
] = 1;
5326 regsets_fetch_inferior_registers (struct regsets_info
*regsets_info
,
5327 struct regcache
*regcache
)
5329 struct regset_info
*regset
;
5330 int saw_general_regs
= 0;
5334 pid
= lwpid_of (current_thread
);
5335 for (regset
= regsets_info
->regsets
; regset
->size
>= 0; regset
++)
5340 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
))
5343 buf
= xmalloc (regset
->size
);
5345 nt_type
= regset
->nt_type
;
5349 iov
.iov_len
= regset
->size
;
5350 data
= (void *) &iov
;
5356 res
= ptrace (regset
->get_request
, pid
,
5357 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
5359 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
5364 || (errno
== EINVAL
&& regset
->type
== OPTIONAL_REGS
))
5366 /* If we get EIO on a regset, or an EINVAL and the regset is
5367 optional, do not try it again for this process mode. */
5368 disable_regset (regsets_info
, regset
);
5370 else if (errno
== ENODATA
)
5372 /* ENODATA may be returned if the regset is currently
5373 not "active". This can happen in normal operation,
5374 so suppress the warning in this case. */
5376 else if (errno
== ESRCH
)
5378 /* At this point, ESRCH should mean the process is
5379 already gone, in which case we simply ignore attempts
5380 to read its registers. */
5385 sprintf (s
, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5392 if (regset
->type
== GENERAL_REGS
)
5393 saw_general_regs
= 1;
5394 regset
->store_function (regcache
, buf
);
5398 if (saw_general_regs
)
5405 regsets_store_inferior_registers (struct regsets_info
*regsets_info
,
5406 struct regcache
*regcache
)
5408 struct regset_info
*regset
;
5409 int saw_general_regs
= 0;
5413 pid
= lwpid_of (current_thread
);
5414 for (regset
= regsets_info
->regsets
; regset
->size
>= 0; regset
++)
5419 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
)
5420 || regset
->fill_function
== NULL
)
5423 buf
= xmalloc (regset
->size
);
5425 /* First fill the buffer with the current register set contents,
5426 in case there are any items in the kernel's regset that are
5427 not in gdbserver's regcache. */
5429 nt_type
= regset
->nt_type
;
5433 iov
.iov_len
= regset
->size
;
5434 data
= (void *) &iov
;
5440 res
= ptrace (regset
->get_request
, pid
,
5441 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
5443 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
5448 /* Then overlay our cached registers on that. */
5449 regset
->fill_function (regcache
, buf
);
5451 /* Only now do we write the register set. */
5453 res
= ptrace (regset
->set_request
, pid
,
5454 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
5456 res
= ptrace (regset
->set_request
, pid
, data
, nt_type
);
5463 || (errno
== EINVAL
&& regset
->type
== OPTIONAL_REGS
))
5465 /* If we get EIO on a regset, or an EINVAL and the regset is
5466 optional, do not try it again for this process mode. */
5467 disable_regset (regsets_info
, regset
);
5469 else if (errno
== ESRCH
)
5471 /* At this point, ESRCH should mean the process is
5472 already gone, in which case we simply ignore attempts
5473 to change its registers. See also the related
5474 comment in linux_resume_one_lwp. */
5480 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5483 else if (regset
->type
== GENERAL_REGS
)
5484 saw_general_regs
= 1;
5487 if (saw_general_regs
)
5493 #else /* !HAVE_LINUX_REGSETS */
5495 #define use_linux_regsets 0
5496 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5497 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5501 /* Return 1 if register REGNO is supported by one of the regset ptrace
5502 calls or 0 if it has to be transferred individually. */
5505 linux_register_in_regsets (const struct regs_info
*regs_info
, int regno
)
5507 unsigned char mask
= 1 << (regno
% 8);
5508 size_t index
= regno
/ 8;
5510 return (use_linux_regsets
5511 && (regs_info
->regset_bitmap
== NULL
5512 || (regs_info
->regset_bitmap
[index
] & mask
) != 0));
5515 #ifdef HAVE_LINUX_USRREGS
5518 register_addr (const struct usrregs_info
*usrregs
, int regnum
)
5522 if (regnum
< 0 || regnum
>= usrregs
->num_regs
)
5523 error ("Invalid register number %d.", regnum
);
5525 addr
= usrregs
->regmap
[regnum
];
5530 /* Fetch one register. */
5532 fetch_register (const struct usrregs_info
*usrregs
,
5533 struct regcache
*regcache
, int regno
)
5540 if (regno
>= usrregs
->num_regs
)
5542 if ((*the_low_target
.cannot_fetch_register
) (regno
))
5545 regaddr
= register_addr (usrregs
, regno
);
5549 size
= ((register_size (regcache
->tdesc
, regno
)
5550 + sizeof (PTRACE_XFER_TYPE
) - 1)
5551 & -sizeof (PTRACE_XFER_TYPE
));
5552 buf
= (char *) alloca (size
);
5554 pid
= lwpid_of (current_thread
);
5555 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
5558 *(PTRACE_XFER_TYPE
*) (buf
+ i
) =
5559 ptrace (PTRACE_PEEKUSER
, pid
,
5560 /* Coerce to a uintptr_t first to avoid potential gcc warning
5561 of coercing an 8 byte integer to a 4 byte pointer. */
5562 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
, (PTRACE_TYPE_ARG4
) 0);
5563 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
5566 /* Mark register REGNO unavailable. */
5567 supply_register (regcache
, regno
, NULL
);
5572 if (the_low_target
.supply_ptrace_register
)
5573 the_low_target
.supply_ptrace_register (regcache
, regno
, buf
);
5575 supply_register (regcache
, regno
, buf
);
5578 /* Store one register. */
5580 store_register (const struct usrregs_info
*usrregs
,
5581 struct regcache
*regcache
, int regno
)
5588 if (regno
>= usrregs
->num_regs
)
5590 if ((*the_low_target
.cannot_store_register
) (regno
))
5593 regaddr
= register_addr (usrregs
, regno
);
5597 size
= ((register_size (regcache
->tdesc
, regno
)
5598 + sizeof (PTRACE_XFER_TYPE
) - 1)
5599 & -sizeof (PTRACE_XFER_TYPE
));
5600 buf
= (char *) alloca (size
);
5601 memset (buf
, 0, size
);
5603 if (the_low_target
.collect_ptrace_register
)
5604 the_low_target
.collect_ptrace_register (regcache
, regno
, buf
);
5606 collect_register (regcache
, regno
, buf
);
5608 pid
= lwpid_of (current_thread
);
5609 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
5612 ptrace (PTRACE_POKEUSER
, pid
,
5613 /* Coerce to a uintptr_t first to avoid potential gcc warning
5614 about coercing an 8 byte integer to a 4 byte pointer. */
5615 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
,
5616 (PTRACE_TYPE_ARG4
) *(PTRACE_XFER_TYPE
*) (buf
+ i
));
5619 /* At this point, ESRCH should mean the process is
5620 already gone, in which case we simply ignore attempts
5621 to change its registers. See also the related
5622 comment in linux_resume_one_lwp. */
5626 if ((*the_low_target
.cannot_store_register
) (regno
) == 0)
5627 error ("writing register %d: %s", regno
, strerror (errno
));
5629 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
5633 /* Fetch all registers, or just one, from the child process.
5634 If REGNO is -1, do this for all registers, skipping any that are
5635 assumed to have been retrieved by regsets_fetch_inferior_registers,
5636 unless ALL is non-zero.
5637 Otherwise, REGNO specifies which register (so we can save time). */
5639 usr_fetch_inferior_registers (const struct regs_info
*regs_info
,
5640 struct regcache
*regcache
, int regno
, int all
)
5642 struct usrregs_info
*usr
= regs_info
->usrregs
;
5646 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
5647 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
5648 fetch_register (usr
, regcache
, regno
);
5651 fetch_register (usr
, regcache
, regno
);
5654 /* Store our register values back into the inferior.
5655 If REGNO is -1, do this for all registers, skipping any that are
5656 assumed to have been saved by regsets_store_inferior_registers,
5657 unless ALL is non-zero.
5658 Otherwise, REGNO specifies which register (so we can save time). */
5660 usr_store_inferior_registers (const struct regs_info
*regs_info
,
5661 struct regcache
*regcache
, int regno
, int all
)
5663 struct usrregs_info
*usr
= regs_info
->usrregs
;
5667 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
5668 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
5669 store_register (usr
, regcache
, regno
);
5672 store_register (usr
, regcache
, regno
);
5675 #else /* !HAVE_LINUX_USRREGS */
5677 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5678 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5684 linux_fetch_registers (struct regcache
*regcache
, int regno
)
5688 const struct regs_info
*regs_info
= (*the_low_target
.regs_info
) ();
5692 if (the_low_target
.fetch_register
!= NULL
5693 && regs_info
->usrregs
!= NULL
)
5694 for (regno
= 0; regno
< regs_info
->usrregs
->num_regs
; regno
++)
5695 (*the_low_target
.fetch_register
) (regcache
, regno
);
5697 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
, regcache
);
5698 if (regs_info
->usrregs
!= NULL
)
5699 usr_fetch_inferior_registers (regs_info
, regcache
, -1, all
);
5703 if (the_low_target
.fetch_register
!= NULL
5704 && (*the_low_target
.fetch_register
) (regcache
, regno
))
5707 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
5709 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
,
5711 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
5712 usr_fetch_inferior_registers (regs_info
, regcache
, regno
, 1);
5717 linux_store_registers (struct regcache
*regcache
, int regno
)
5721 const struct regs_info
*regs_info
= (*the_low_target
.regs_info
) ();
5725 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
5727 if (regs_info
->usrregs
!= NULL
)
5728 usr_store_inferior_registers (regs_info
, regcache
, regno
, all
);
5732 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
5734 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
5736 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
5737 usr_store_inferior_registers (regs_info
, regcache
, regno
, 1);
5742 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5743 to debugger memory starting at MYADDR. */
5746 linux_read_memory (CORE_ADDR memaddr
, unsigned char *myaddr
, int len
)
5748 int pid
= lwpid_of (current_thread
);
5749 PTRACE_XFER_TYPE
*buffer
;
5757 /* Try using /proc. Don't bother for one word. */
5758 if (len
>= 3 * sizeof (long))
5762 /* We could keep this file open and cache it - possibly one per
5763 thread. That requires some juggling, but is even faster. */
5764 sprintf (filename
, "/proc/%d/mem", pid
);
5765 fd
= open (filename
, O_RDONLY
| O_LARGEFILE
);
5769 /* If pread64 is available, use it. It's faster if the kernel
5770 supports it (only one syscall), and it's 64-bit safe even on
5771 32-bit platforms (for instance, SPARC debugging a SPARC64
5774 bytes
= pread64 (fd
, myaddr
, len
, memaddr
);
5777 if (lseek (fd
, memaddr
, SEEK_SET
) != -1)
5778 bytes
= read (fd
, myaddr
, len
);
5785 /* Some data was read, we'll try to get the rest with ptrace. */
5795 /* Round starting address down to longword boundary. */
5796 addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
5797 /* Round ending address up; get number of longwords that makes. */
5798 count
= ((((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
5799 / sizeof (PTRACE_XFER_TYPE
));
5800 /* Allocate buffer of that many longwords. */
5801 buffer
= XALLOCAVEC (PTRACE_XFER_TYPE
, count
);
5803 /* Read all the longwords */
5805 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
5807 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5808 about coercing an 8 byte integer to a 4 byte pointer. */
5809 buffer
[i
] = ptrace (PTRACE_PEEKTEXT
, pid
,
5810 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5811 (PTRACE_TYPE_ARG4
) 0);
5817 /* Copy appropriate bytes out of the buffer. */
5820 i
*= sizeof (PTRACE_XFER_TYPE
);
5821 i
-= memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1);
5823 (char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
5830 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5831 memory at MEMADDR. On failure (cannot write to the inferior)
5832 returns the value of errno. Always succeeds if LEN is zero. */
5835 linux_write_memory (CORE_ADDR memaddr
, const unsigned char *myaddr
, int len
)
5838 /* Round starting address down to longword boundary. */
5839 CORE_ADDR addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
5840 /* Round ending address up; get number of longwords that makes. */
5842 = (((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
5843 / sizeof (PTRACE_XFER_TYPE
);
5845 /* Allocate buffer of that many longwords. */
5846 PTRACE_XFER_TYPE
*buffer
= XALLOCAVEC (PTRACE_XFER_TYPE
, count
);
5848 int pid
= lwpid_of (current_thread
);
5852 /* Zero length write always succeeds. */
5858 /* Dump up to four bytes. */
5859 char str
[4 * 2 + 1];
5861 int dump
= len
< 4 ? len
: 4;
5863 for (i
= 0; i
< dump
; i
++)
5865 sprintf (p
, "%02x", myaddr
[i
]);
5870 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5871 str
, (long) memaddr
, pid
);
5874 /* Fill start and end extra bytes of buffer with existing memory data. */
5877 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5878 about coercing an 8 byte integer to a 4 byte pointer. */
5879 buffer
[0] = ptrace (PTRACE_PEEKTEXT
, pid
,
5880 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5881 (PTRACE_TYPE_ARG4
) 0);
5889 = ptrace (PTRACE_PEEKTEXT
, pid
,
5890 /* Coerce to a uintptr_t first to avoid potential gcc warning
5891 about coercing an 8 byte integer to a 4 byte pointer. */
5892 (PTRACE_TYPE_ARG3
) (uintptr_t) (addr
+ (count
- 1)
5893 * sizeof (PTRACE_XFER_TYPE
)),
5894 (PTRACE_TYPE_ARG4
) 0);
5899 /* Copy data to be written over corresponding part of buffer. */
5901 memcpy ((char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
5904 /* Write the entire buffer. */
5906 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
5909 ptrace (PTRACE_POKETEXT
, pid
,
5910 /* Coerce to a uintptr_t first to avoid potential gcc warning
5911 about coercing an 8 byte integer to a 4 byte pointer. */
5912 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5913 (PTRACE_TYPE_ARG4
) buffer
[i
]);
5922 linux_look_up_symbols (void)
5924 #ifdef USE_THREAD_DB
5925 struct process_info
*proc
= current_process ();
5927 if (proc
->priv
->thread_db
!= NULL
)
5935 linux_request_interrupt (void)
5937 /* Send a SIGINT to the process group. This acts just like the user
5938 typed a ^C on the controlling terminal. */
5939 kill (-signal_pid
, SIGINT
);
5942 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5943 to debugger memory starting at MYADDR. */
5946 linux_read_auxv (CORE_ADDR offset
, unsigned char *myaddr
, unsigned int len
)
5948 char filename
[PATH_MAX
];
5950 int pid
= lwpid_of (current_thread
);
5952 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
5954 fd
= open (filename
, O_RDONLY
);
5958 if (offset
!= (CORE_ADDR
) 0
5959 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
5962 n
= read (fd
, myaddr
, len
);
5969 /* These breakpoint and watchpoint related wrapper functions simply
5970 pass on the function call if the target has registered a
5971 corresponding function. */
5974 linux_supports_z_point_type (char z_type
)
5976 return (the_low_target
.supports_z_point_type
!= NULL
5977 && the_low_target
.supports_z_point_type (z_type
));
5981 linux_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
5982 int size
, struct raw_breakpoint
*bp
)
5984 if (type
== raw_bkpt_type_sw
)
5985 return insert_memory_breakpoint (bp
);
5986 else if (the_low_target
.insert_point
!= NULL
)
5987 return the_low_target
.insert_point (type
, addr
, size
, bp
);
5989 /* Unsupported (see target.h). */
5994 linux_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
5995 int size
, struct raw_breakpoint
*bp
)
5997 if (type
== raw_bkpt_type_sw
)
5998 return remove_memory_breakpoint (bp
);
5999 else if (the_low_target
.remove_point
!= NULL
)
6000 return the_low_target
.remove_point (type
, addr
, size
, bp
);
6002 /* Unsupported (see target.h). */
6006 /* Implement the to_stopped_by_sw_breakpoint target_ops
6010 linux_stopped_by_sw_breakpoint (void)
6012 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
6014 return (lwp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
);
6017 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6021 linux_supports_stopped_by_sw_breakpoint (void)
6023 return USE_SIGTRAP_SIGINFO
;
6026 /* Implement the to_stopped_by_hw_breakpoint target_ops
6030 linux_stopped_by_hw_breakpoint (void)
6032 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
6034 return (lwp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
);
6037 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6041 linux_supports_stopped_by_hw_breakpoint (void)
6043 return USE_SIGTRAP_SIGINFO
;
6046 /* Implement the supports_hardware_single_step target_ops method. */
6049 linux_supports_hardware_single_step (void)
6051 return can_hardware_single_step ();
6055 linux_supports_software_single_step (void)
6057 return can_software_single_step ();
6061 linux_stopped_by_watchpoint (void)
6063 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
6065 return lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
6069 linux_stopped_data_address (void)
6071 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
6073 return lwp
->stopped_data_address
;
6076 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6077 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6078 && defined(PT_TEXT_END_ADDR)
6080 /* This is only used for targets that define PT_TEXT_ADDR,
6081 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6082 the target has different ways of acquiring this information, like
6085 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6086 to tell gdb about. */
6089 linux_read_offsets (CORE_ADDR
*text_p
, CORE_ADDR
*data_p
)
6091 unsigned long text
, text_end
, data
;
6092 int pid
= lwpid_of (current_thread
);
6096 text
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_ADDR
,
6097 (PTRACE_TYPE_ARG4
) 0);
6098 text_end
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_END_ADDR
,
6099 (PTRACE_TYPE_ARG4
) 0);
6100 data
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_DATA_ADDR
,
6101 (PTRACE_TYPE_ARG4
) 0);
6105 /* Both text and data offsets produced at compile-time (and so
6106 used by gdb) are relative to the beginning of the program,
6107 with the data segment immediately following the text segment.
6108 However, the actual runtime layout in memory may put the data
6109 somewhere else, so when we send gdb a data base-address, we
6110 use the real data base address and subtract the compile-time
6111 data base-address from it (which is just the length of the
6112 text segment). BSS immediately follows data in both
6115 *data_p
= data
- (text_end
- text
);
6124 linux_qxfer_osdata (const char *annex
,
6125 unsigned char *readbuf
, unsigned const char *writebuf
,
6126 CORE_ADDR offset
, int len
)
6128 return linux_common_xfer_osdata (annex
, readbuf
, offset
, len
);
6131 /* Convert a native/host siginfo object, into/from the siginfo in the
6132 layout of the inferiors' architecture. */
6135 siginfo_fixup (siginfo_t
*siginfo
, gdb_byte
*inf_siginfo
, int direction
)
6139 if (the_low_target
.siginfo_fixup
!= NULL
)
6140 done
= the_low_target
.siginfo_fixup (siginfo
, inf_siginfo
, direction
);
6142 /* If there was no callback, or the callback didn't do anything,
6143 then just do a straight memcpy. */
6147 memcpy (siginfo
, inf_siginfo
, sizeof (siginfo_t
));
6149 memcpy (inf_siginfo
, siginfo
, sizeof (siginfo_t
));
6154 linux_xfer_siginfo (const char *annex
, unsigned char *readbuf
,
6155 unsigned const char *writebuf
, CORE_ADDR offset
, int len
)
6159 gdb_byte inf_siginfo
[sizeof (siginfo_t
)];
6161 if (current_thread
== NULL
)
6164 pid
= lwpid_of (current_thread
);
6167 debug_printf ("%s siginfo for lwp %d.\n",
6168 readbuf
!= NULL
? "Reading" : "Writing",
6171 if (offset
>= sizeof (siginfo
))
6174 if (ptrace (PTRACE_GETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
6177 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6178 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6179 inferior with a 64-bit GDBSERVER should look the same as debugging it
6180 with a 32-bit GDBSERVER, we need to convert it. */
6181 siginfo_fixup (&siginfo
, inf_siginfo
, 0);
6183 if (offset
+ len
> sizeof (siginfo
))
6184 len
= sizeof (siginfo
) - offset
;
6186 if (readbuf
!= NULL
)
6187 memcpy (readbuf
, inf_siginfo
+ offset
, len
);
6190 memcpy (inf_siginfo
+ offset
, writebuf
, len
);
6192 /* Convert back to ptrace layout before flushing it out. */
6193 siginfo_fixup (&siginfo
, inf_siginfo
, 1);
6195 if (ptrace (PTRACE_SETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
6202 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6203 so we notice when children change state; as the handler for the
6204 sigsuspend in my_waitpid. */
6207 sigchld_handler (int signo
)
6209 int old_errno
= errno
;
6215 /* fprintf is not async-signal-safe, so call write
6217 if (write (2, "sigchld_handler\n",
6218 sizeof ("sigchld_handler\n") - 1) < 0)
6219 break; /* just ignore */
6223 if (target_is_async_p ())
6224 async_file_mark (); /* trigger a linux_wait */
6230 linux_supports_non_stop (void)
6236 linux_async (int enable
)
6238 int previous
= target_is_async_p ();
6241 debug_printf ("linux_async (%d), previous=%d\n",
6244 if (previous
!= enable
)
6247 sigemptyset (&mask
);
6248 sigaddset (&mask
, SIGCHLD
);
6250 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
6254 if (pipe (linux_event_pipe
) == -1)
6256 linux_event_pipe
[0] = -1;
6257 linux_event_pipe
[1] = -1;
6258 sigprocmask (SIG_UNBLOCK
, &mask
, NULL
);
6260 warning ("creating event pipe failed.");
6264 fcntl (linux_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
6265 fcntl (linux_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
6267 /* Register the event loop handler. */
6268 add_file_handler (linux_event_pipe
[0],
6269 handle_target_event
, NULL
);
6271 /* Always trigger a linux_wait. */
6276 delete_file_handler (linux_event_pipe
[0]);
6278 close (linux_event_pipe
[0]);
6279 close (linux_event_pipe
[1]);
6280 linux_event_pipe
[0] = -1;
6281 linux_event_pipe
[1] = -1;
6284 sigprocmask (SIG_UNBLOCK
, &mask
, NULL
);
6291 linux_start_non_stop (int nonstop
)
6293 /* Register or unregister from event-loop accordingly. */
6294 linux_async (nonstop
);
6296 if (target_is_async_p () != (nonstop
!= 0))
6303 linux_supports_multi_process (void)
6308 /* Check if fork events are supported. */
6311 linux_supports_fork_events (void)
6313 return linux_supports_tracefork ();
6316 /* Check if vfork events are supported. */
6319 linux_supports_vfork_events (void)
6321 return linux_supports_tracefork ();
6324 /* Check if exec events are supported. */
6327 linux_supports_exec_events (void)
6329 return linux_supports_traceexec ();
6332 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6333 ptrace flags for all inferiors. This is in case the new GDB connection
6334 doesn't support the same set of events that the previous one did. */
6337 linux_handle_new_gdb_connection (void)
6339 /* Request that all the lwps reset their ptrace options. */
6340 for_each_thread ([] (thread_info
*thread
)
6342 struct lwp_info
*lwp
= get_thread_lwp (thread
);
6346 /* Stop the lwp so we can modify its ptrace options. */
6347 lwp
->must_set_ptrace_flags
= 1;
6348 linux_stop_lwp (lwp
);
6352 /* Already stopped; go ahead and set the ptrace options. */
6353 struct process_info
*proc
= find_process_pid (pid_of (thread
));
6354 int options
= linux_low_ptrace_options (proc
->attached
);
6356 linux_enable_event_reporting (lwpid_of (thread
), options
);
6357 lwp
->must_set_ptrace_flags
= 0;
6363 linux_supports_disable_randomization (void)
6365 #ifdef HAVE_PERSONALITY
6373 linux_supports_agent (void)
6379 linux_supports_range_stepping (void)
6381 if (can_software_single_step ())
6383 if (*the_low_target
.supports_range_stepping
== NULL
)
6386 return (*the_low_target
.supports_range_stepping
) ();
6389 /* Enumerate spufs IDs for process PID. */
6391 spu_enumerate_spu_ids (long pid
, unsigned char *buf
, CORE_ADDR offset
, int len
)
6397 struct dirent
*entry
;
6399 sprintf (path
, "/proc/%ld/fd", pid
);
6400 dir
= opendir (path
);
6405 while ((entry
= readdir (dir
)) != NULL
)
6411 fd
= atoi (entry
->d_name
);
6415 sprintf (path
, "/proc/%ld/fd/%d", pid
, fd
);
6416 if (stat (path
, &st
) != 0)
6418 if (!S_ISDIR (st
.st_mode
))
6421 if (statfs (path
, &stfs
) != 0)
6423 if (stfs
.f_type
!= SPUFS_MAGIC
)
6426 if (pos
>= offset
&& pos
+ 4 <= offset
+ len
)
6428 *(unsigned int *)(buf
+ pos
- offset
) = fd
;
6438 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6439 object type, using the /proc file system. */
6441 linux_qxfer_spu (const char *annex
, unsigned char *readbuf
,
6442 unsigned const char *writebuf
,
6443 CORE_ADDR offset
, int len
)
6445 long pid
= lwpid_of (current_thread
);
6450 if (!writebuf
&& !readbuf
)
6458 return spu_enumerate_spu_ids (pid
, readbuf
, offset
, len
);
6461 sprintf (buf
, "/proc/%ld/fd/%s", pid
, annex
);
6462 fd
= open (buf
, writebuf
? O_WRONLY
: O_RDONLY
);
6467 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
6474 ret
= write (fd
, writebuf
, (size_t) len
);
6476 ret
= read (fd
, readbuf
, (size_t) len
);
6482 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6483 struct target_loadseg
6485 /* Core address to which the segment is mapped. */
6487 /* VMA recorded in the program header. */
6489 /* Size of this segment in memory. */
6493 # if defined PT_GETDSBT
6494 struct target_loadmap
6496 /* Protocol version number, must be zero. */
6498 /* Pointer to the DSBT table, its size, and the DSBT index. */
6499 unsigned *dsbt_table
;
6500 unsigned dsbt_size
, dsbt_index
;
6501 /* Number of segments in this map. */
6503 /* The actual memory map. */
6504 struct target_loadseg segs
[/*nsegs*/];
6506 # define LINUX_LOADMAP PT_GETDSBT
6507 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6508 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6510 struct target_loadmap
6512 /* Protocol version number, must be zero. */
6514 /* Number of segments in this map. */
6516 /* The actual memory map. */
6517 struct target_loadseg segs
[/*nsegs*/];
6519 # define LINUX_LOADMAP PTRACE_GETFDPIC
6520 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6521 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6525 linux_read_loadmap (const char *annex
, CORE_ADDR offset
,
6526 unsigned char *myaddr
, unsigned int len
)
6528 int pid
= lwpid_of (current_thread
);
6530 struct target_loadmap
*data
= NULL
;
6531 unsigned int actual_length
, copy_length
;
6533 if (strcmp (annex
, "exec") == 0)
6534 addr
= (int) LINUX_LOADMAP_EXEC
;
6535 else if (strcmp (annex
, "interp") == 0)
6536 addr
= (int) LINUX_LOADMAP_INTERP
;
6540 if (ptrace (LINUX_LOADMAP
, pid
, addr
, &data
) != 0)
6546 actual_length
= sizeof (struct target_loadmap
)
6547 + sizeof (struct target_loadseg
) * data
->nsegs
;
6549 if (offset
< 0 || offset
> actual_length
)
6552 copy_length
= actual_length
- offset
< len
? actual_length
- offset
: len
;
6553 memcpy (myaddr
, (char *) data
+ offset
, copy_length
);
6557 # define linux_read_loadmap NULL
6558 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6561 linux_process_qsupported (char **features
, int count
)
6563 if (the_low_target
.process_qsupported
!= NULL
)
6564 the_low_target
.process_qsupported (features
, count
);
6568 linux_supports_catch_syscall (void)
6570 return (the_low_target
.get_syscall_trapinfo
!= NULL
6571 && linux_supports_tracesysgood ());
6575 linux_get_ipa_tdesc_idx (void)
6577 if (the_low_target
.get_ipa_tdesc_idx
== NULL
)
6580 return (*the_low_target
.get_ipa_tdesc_idx
) ();
6584 linux_supports_tracepoints (void)
6586 if (*the_low_target
.supports_tracepoints
== NULL
)
6589 return (*the_low_target
.supports_tracepoints
) ();
6593 linux_read_pc (struct regcache
*regcache
)
6595 if (the_low_target
.get_pc
== NULL
)
6598 return (*the_low_target
.get_pc
) (regcache
);
6602 linux_write_pc (struct regcache
*regcache
, CORE_ADDR pc
)
6604 gdb_assert (the_low_target
.set_pc
!= NULL
);
6606 (*the_low_target
.set_pc
) (regcache
, pc
);
6610 linux_thread_stopped (struct thread_info
*thread
)
6612 return get_thread_lwp (thread
)->stopped
;
6615 /* This exposes stop-all-threads functionality to other modules. */
6618 linux_pause_all (int freeze
)
6620 stop_all_lwps (freeze
, NULL
);
6623 /* This exposes unstop-all-threads functionality to other gdbserver
6627 linux_unpause_all (int unfreeze
)
6629 unstop_all_lwps (unfreeze
, NULL
);
6633 linux_prepare_to_access_memory (void)
6635 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6638 linux_pause_all (1);
6643 linux_done_accessing_memory (void)
6645 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6648 linux_unpause_all (1);
6652 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
6653 CORE_ADDR collector
,
6656 CORE_ADDR
*jump_entry
,
6657 CORE_ADDR
*trampoline
,
6658 ULONGEST
*trampoline_size
,
6659 unsigned char *jjump_pad_insn
,
6660 ULONGEST
*jjump_pad_insn_size
,
6661 CORE_ADDR
*adjusted_insn_addr
,
6662 CORE_ADDR
*adjusted_insn_addr_end
,
6665 return (*the_low_target
.install_fast_tracepoint_jump_pad
)
6666 (tpoint
, tpaddr
, collector
, lockaddr
, orig_size
,
6667 jump_entry
, trampoline
, trampoline_size
,
6668 jjump_pad_insn
, jjump_pad_insn_size
,
6669 adjusted_insn_addr
, adjusted_insn_addr_end
,
6673 static struct emit_ops
*
6674 linux_emit_ops (void)
6676 if (the_low_target
.emit_ops
!= NULL
)
6677 return (*the_low_target
.emit_ops
) ();
6683 linux_get_min_fast_tracepoint_insn_len (void)
6685 return (*the_low_target
.get_min_fast_tracepoint_insn_len
) ();
6688 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6691 get_phdr_phnum_from_proc_auxv (const int pid
, const int is_elf64
,
6692 CORE_ADDR
*phdr_memaddr
, int *num_phdr
)
6694 char filename
[PATH_MAX
];
6696 const int auxv_size
= is_elf64
6697 ? sizeof (Elf64_auxv_t
) : sizeof (Elf32_auxv_t
);
6698 char buf
[sizeof (Elf64_auxv_t
)]; /* The larger of the two. */
6700 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
6702 fd
= open (filename
, O_RDONLY
);
6708 while (read (fd
, buf
, auxv_size
) == auxv_size
6709 && (*phdr_memaddr
== 0 || *num_phdr
== 0))
6713 Elf64_auxv_t
*const aux
= (Elf64_auxv_t
*) buf
;
6715 switch (aux
->a_type
)
6718 *phdr_memaddr
= aux
->a_un
.a_val
;
6721 *num_phdr
= aux
->a_un
.a_val
;
6727 Elf32_auxv_t
*const aux
= (Elf32_auxv_t
*) buf
;
6729 switch (aux
->a_type
)
6732 *phdr_memaddr
= aux
->a_un
.a_val
;
6735 *num_phdr
= aux
->a_un
.a_val
;
6743 if (*phdr_memaddr
== 0 || *num_phdr
== 0)
6745 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6746 "phdr_memaddr = %ld, phdr_num = %d",
6747 (long) *phdr_memaddr
, *num_phdr
);
6754 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6757 get_dynamic (const int pid
, const int is_elf64
)
6759 CORE_ADDR phdr_memaddr
, relocation
;
6761 unsigned char *phdr_buf
;
6762 const int phdr_size
= is_elf64
? sizeof (Elf64_Phdr
) : sizeof (Elf32_Phdr
);
6764 if (get_phdr_phnum_from_proc_auxv (pid
, is_elf64
, &phdr_memaddr
, &num_phdr
))
6767 gdb_assert (num_phdr
< 100); /* Basic sanity check. */
6768 phdr_buf
= (unsigned char *) alloca (num_phdr
* phdr_size
);
6770 if (linux_read_memory (phdr_memaddr
, phdr_buf
, num_phdr
* phdr_size
))
6773 /* Compute relocation: it is expected to be 0 for "regular" executables,
6774 non-zero for PIE ones. */
6776 for (i
= 0; relocation
== -1 && i
< num_phdr
; i
++)
6779 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6781 if (p
->p_type
== PT_PHDR
)
6782 relocation
= phdr_memaddr
- p
->p_vaddr
;
6786 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6788 if (p
->p_type
== PT_PHDR
)
6789 relocation
= phdr_memaddr
- p
->p_vaddr
;
6792 if (relocation
== -1)
6794 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6795 any real world executables, including PIE executables, have always
6796 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6797 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6798 or present DT_DEBUG anyway (fpc binaries are statically linked).
6800 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6802 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6807 for (i
= 0; i
< num_phdr
; i
++)
6811 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6813 if (p
->p_type
== PT_DYNAMIC
)
6814 return p
->p_vaddr
+ relocation
;
6818 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6820 if (p
->p_type
== PT_DYNAMIC
)
6821 return p
->p_vaddr
+ relocation
;
6828 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6829 can be 0 if the inferior does not yet have the library list initialized.
6830 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6831 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6834 get_r_debug (const int pid
, const int is_elf64
)
6836 CORE_ADDR dynamic_memaddr
;
6837 const int dyn_size
= is_elf64
? sizeof (Elf64_Dyn
) : sizeof (Elf32_Dyn
);
6838 unsigned char buf
[sizeof (Elf64_Dyn
)]; /* The larger of the two. */
6841 dynamic_memaddr
= get_dynamic (pid
, is_elf64
);
6842 if (dynamic_memaddr
== 0)
6845 while (linux_read_memory (dynamic_memaddr
, buf
, dyn_size
) == 0)
6849 Elf64_Dyn
*const dyn
= (Elf64_Dyn
*) buf
;
6850 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6854 unsigned char buf
[sizeof (Elf64_Xword
)];
6858 #ifdef DT_MIPS_RLD_MAP
6859 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
6861 if (linux_read_memory (dyn
->d_un
.d_val
,
6862 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6867 #endif /* DT_MIPS_RLD_MAP */
6868 #ifdef DT_MIPS_RLD_MAP_REL
6869 if (dyn
->d_tag
== DT_MIPS_RLD_MAP_REL
)
6871 if (linux_read_memory (dyn
->d_un
.d_val
+ dynamic_memaddr
,
6872 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6877 #endif /* DT_MIPS_RLD_MAP_REL */
6879 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
6880 map
= dyn
->d_un
.d_val
;
6882 if (dyn
->d_tag
== DT_NULL
)
6887 Elf32_Dyn
*const dyn
= (Elf32_Dyn
*) buf
;
6888 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6892 unsigned char buf
[sizeof (Elf32_Word
)];
6896 #ifdef DT_MIPS_RLD_MAP
6897 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
6899 if (linux_read_memory (dyn
->d_un
.d_val
,
6900 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6905 #endif /* DT_MIPS_RLD_MAP */
6906 #ifdef DT_MIPS_RLD_MAP_REL
6907 if (dyn
->d_tag
== DT_MIPS_RLD_MAP_REL
)
6909 if (linux_read_memory (dyn
->d_un
.d_val
+ dynamic_memaddr
,
6910 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6915 #endif /* DT_MIPS_RLD_MAP_REL */
6917 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
6918 map
= dyn
->d_un
.d_val
;
6920 if (dyn
->d_tag
== DT_NULL
)
6924 dynamic_memaddr
+= dyn_size
;
6930 /* Read one pointer from MEMADDR in the inferior. */
6933 read_one_ptr (CORE_ADDR memaddr
, CORE_ADDR
*ptr
, int ptr_size
)
6937 /* Go through a union so this works on either big or little endian
6938 hosts, when the inferior's pointer size is smaller than the size
6939 of CORE_ADDR. It is assumed the inferior's endianness is the
6940 same of the superior's. */
6943 CORE_ADDR core_addr
;
6948 ret
= linux_read_memory (memaddr
, &addr
.uc
, ptr_size
);
6951 if (ptr_size
== sizeof (CORE_ADDR
))
6952 *ptr
= addr
.core_addr
;
6953 else if (ptr_size
== sizeof (unsigned int))
6956 gdb_assert_not_reached ("unhandled pointer size");
6961 struct link_map_offsets
6963 /* Offset and size of r_debug.r_version. */
6964 int r_version_offset
;
6966 /* Offset and size of r_debug.r_map. */
6969 /* Offset to l_addr field in struct link_map. */
6972 /* Offset to l_name field in struct link_map. */
6975 /* Offset to l_ld field in struct link_map. */
6978 /* Offset to l_next field in struct link_map. */
6981 /* Offset to l_prev field in struct link_map. */
6985 /* Construct qXfer:libraries-svr4:read reply. */
6988 linux_qxfer_libraries_svr4 (const char *annex
, unsigned char *readbuf
,
6989 unsigned const char *writebuf
,
6990 CORE_ADDR offset
, int len
)
6992 struct process_info_private
*const priv
= current_process ()->priv
;
6993 char filename
[PATH_MAX
];
6996 static const struct link_map_offsets lmo_32bit_offsets
=
6998 0, /* r_version offset. */
6999 4, /* r_debug.r_map offset. */
7000 0, /* l_addr offset in link_map. */
7001 4, /* l_name offset in link_map. */
7002 8, /* l_ld offset in link_map. */
7003 12, /* l_next offset in link_map. */
7004 16 /* l_prev offset in link_map. */
7007 static const struct link_map_offsets lmo_64bit_offsets
=
7009 0, /* r_version offset. */
7010 8, /* r_debug.r_map offset. */
7011 0, /* l_addr offset in link_map. */
7012 8, /* l_name offset in link_map. */
7013 16, /* l_ld offset in link_map. */
7014 24, /* l_next offset in link_map. */
7015 32 /* l_prev offset in link_map. */
7017 const struct link_map_offsets
*lmo
;
7018 unsigned int machine
;
7020 CORE_ADDR lm_addr
= 0, lm_prev
= 0;
7021 CORE_ADDR l_name
, l_addr
, l_ld
, l_next
, l_prev
;
7022 int header_done
= 0;
7024 if (writebuf
!= NULL
)
7026 if (readbuf
== NULL
)
7029 pid
= lwpid_of (current_thread
);
7030 xsnprintf (filename
, sizeof filename
, "/proc/%d/exe", pid
);
7031 is_elf64
= elf_64_file_p (filename
, &machine
);
7032 lmo
= is_elf64
? &lmo_64bit_offsets
: &lmo_32bit_offsets
;
7033 ptr_size
= is_elf64
? 8 : 4;
7035 while (annex
[0] != '\0')
7041 sep
= strchr (annex
, '=');
7045 name_len
= sep
- annex
;
7046 if (name_len
== 5 && startswith (annex
, "start"))
7048 else if (name_len
== 4 && startswith (annex
, "prev"))
7052 annex
= strchr (sep
, ';');
7059 annex
= decode_address_to_semicolon (addrp
, sep
+ 1);
7066 if (priv
->r_debug
== 0)
7067 priv
->r_debug
= get_r_debug (pid
, is_elf64
);
7069 /* We failed to find DT_DEBUG. Such situation will not change
7070 for this inferior - do not retry it. Report it to GDB as
7071 E01, see for the reasons at the GDB solib-svr4.c side. */
7072 if (priv
->r_debug
== (CORE_ADDR
) -1)
7075 if (priv
->r_debug
!= 0)
7077 if (linux_read_memory (priv
->r_debug
+ lmo
->r_version_offset
,
7078 (unsigned char *) &r_version
,
7079 sizeof (r_version
)) != 0
7082 warning ("unexpected r_debug version %d", r_version
);
7084 else if (read_one_ptr (priv
->r_debug
+ lmo
->r_map_offset
,
7085 &lm_addr
, ptr_size
) != 0)
7087 warning ("unable to read r_map from 0x%lx",
7088 (long) priv
->r_debug
+ lmo
->r_map_offset
);
7093 std::string document
= "<library-list-svr4 version=\"1.0\"";
7096 && read_one_ptr (lm_addr
+ lmo
->l_name_offset
,
7097 &l_name
, ptr_size
) == 0
7098 && read_one_ptr (lm_addr
+ lmo
->l_addr_offset
,
7099 &l_addr
, ptr_size
) == 0
7100 && read_one_ptr (lm_addr
+ lmo
->l_ld_offset
,
7101 &l_ld
, ptr_size
) == 0
7102 && read_one_ptr (lm_addr
+ lmo
->l_prev_offset
,
7103 &l_prev
, ptr_size
) == 0
7104 && read_one_ptr (lm_addr
+ lmo
->l_next_offset
,
7105 &l_next
, ptr_size
) == 0)
7107 unsigned char libname
[PATH_MAX
];
7109 if (lm_prev
!= l_prev
)
7111 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7112 (long) lm_prev
, (long) l_prev
);
7116 /* Ignore the first entry even if it has valid name as the first entry
7117 corresponds to the main executable. The first entry should not be
7118 skipped if the dynamic loader was loaded late by a static executable
7119 (see solib-svr4.c parameter ignore_first). But in such case the main
7120 executable does not have PT_DYNAMIC present and this function already
7121 exited above due to failed get_r_debug. */
7123 string_appendf (document
, " main-lm=\"0x%lx\"", (unsigned long) lm_addr
);
7126 /* Not checking for error because reading may stop before
7127 we've got PATH_MAX worth of characters. */
7129 linux_read_memory (l_name
, libname
, sizeof (libname
) - 1);
7130 libname
[sizeof (libname
) - 1] = '\0';
7131 if (libname
[0] != '\0')
7135 /* Terminate `<library-list-svr4'. */
7140 string_appendf (document
, "<library name=\"");
7141 xml_escape_text_append (&document
, (char *) libname
);
7142 string_appendf (document
, "\" lm=\"0x%lx\" "
7143 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7144 (unsigned long) lm_addr
, (unsigned long) l_addr
,
7145 (unsigned long) l_ld
);
7155 /* Empty list; terminate `<library-list-svr4'. */
7159 document
+= "</library-list-svr4>";
7161 int document_len
= document
.length ();
7162 if (offset
< document_len
)
7163 document_len
-= offset
;
7166 if (len
> document_len
)
7169 memcpy (readbuf
, document
.data () + offset
, len
);
7174 #ifdef HAVE_LINUX_BTRACE
7176 /* See to_disable_btrace target method. */
7179 linux_low_disable_btrace (struct btrace_target_info
*tinfo
)
7181 enum btrace_error err
;
7183 err
= linux_disable_btrace (tinfo
);
7184 return (err
== BTRACE_ERR_NONE
? 0 : -1);
7187 /* Encode an Intel Processor Trace configuration. */
7190 linux_low_encode_pt_config (struct buffer
*buffer
,
7191 const struct btrace_data_pt_config
*config
)
7193 buffer_grow_str (buffer
, "<pt-config>\n");
7195 switch (config
->cpu
.vendor
)
7198 buffer_xml_printf (buffer
, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7199 "model=\"%u\" stepping=\"%u\"/>\n",
7200 config
->cpu
.family
, config
->cpu
.model
,
7201 config
->cpu
.stepping
);
7208 buffer_grow_str (buffer
, "</pt-config>\n");
7211 /* Encode a raw buffer. */
7214 linux_low_encode_raw (struct buffer
*buffer
, const gdb_byte
*data
,
7220 /* We use hex encoding - see common/rsp-low.h. */
7221 buffer_grow_str (buffer
, "<raw>\n");
7227 elem
[0] = tohex ((*data
>> 4) & 0xf);
7228 elem
[1] = tohex (*data
++ & 0xf);
7230 buffer_grow (buffer
, elem
, 2);
7233 buffer_grow_str (buffer
, "</raw>\n");
7236 /* See to_read_btrace target method. */
7239 linux_low_read_btrace (struct btrace_target_info
*tinfo
, struct buffer
*buffer
,
7240 enum btrace_read_type type
)
7242 struct btrace_data btrace
;
7243 struct btrace_block
*block
;
7244 enum btrace_error err
;
7247 err
= linux_read_btrace (&btrace
, tinfo
, type
);
7248 if (err
!= BTRACE_ERR_NONE
)
7250 if (err
== BTRACE_ERR_OVERFLOW
)
7251 buffer_grow_str0 (buffer
, "E.Overflow.");
7253 buffer_grow_str0 (buffer
, "E.Generic Error.");
7258 switch (btrace
.format
)
7260 case BTRACE_FORMAT_NONE
:
7261 buffer_grow_str0 (buffer
, "E.No Trace.");
7264 case BTRACE_FORMAT_BTS
:
7265 buffer_grow_str (buffer
, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7266 buffer_grow_str (buffer
, "<btrace version=\"1.0\">\n");
7269 VEC_iterate (btrace_block_s
, btrace
.variant
.bts
.blocks
, i
, block
);
7271 buffer_xml_printf (buffer
, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7272 paddress (block
->begin
), paddress (block
->end
));
7274 buffer_grow_str0 (buffer
, "</btrace>\n");
7277 case BTRACE_FORMAT_PT
:
7278 buffer_grow_str (buffer
, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7279 buffer_grow_str (buffer
, "<btrace version=\"1.0\">\n");
7280 buffer_grow_str (buffer
, "<pt>\n");
7282 linux_low_encode_pt_config (buffer
, &btrace
.variant
.pt
.config
);
7284 linux_low_encode_raw (buffer
, btrace
.variant
.pt
.data
,
7285 btrace
.variant
.pt
.size
);
7287 buffer_grow_str (buffer
, "</pt>\n");
7288 buffer_grow_str0 (buffer
, "</btrace>\n");
7292 buffer_grow_str0 (buffer
, "E.Unsupported Trace Format.");
7299 /* See to_btrace_conf target method. */
7302 linux_low_btrace_conf (const struct btrace_target_info
*tinfo
,
7303 struct buffer
*buffer
)
7305 const struct btrace_config
*conf
;
7307 buffer_grow_str (buffer
, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7308 buffer_grow_str (buffer
, "<btrace-conf version=\"1.0\">\n");
7310 conf
= linux_btrace_conf (tinfo
);
7313 switch (conf
->format
)
7315 case BTRACE_FORMAT_NONE
:
7318 case BTRACE_FORMAT_BTS
:
7319 buffer_xml_printf (buffer
, "<bts");
7320 buffer_xml_printf (buffer
, " size=\"0x%x\"", conf
->bts
.size
);
7321 buffer_xml_printf (buffer
, " />\n");
7324 case BTRACE_FORMAT_PT
:
7325 buffer_xml_printf (buffer
, "<pt");
7326 buffer_xml_printf (buffer
, " size=\"0x%x\"", conf
->pt
.size
);
7327 buffer_xml_printf (buffer
, "/>\n");
7332 buffer_grow_str0 (buffer
, "</btrace-conf>\n");
7335 #endif /* HAVE_LINUX_BTRACE */
7337 /* See nat/linux-nat.h. */
7340 current_lwp_ptid (void)
7342 return ptid_of (current_thread
);
7345 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7348 linux_breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
7350 if (the_low_target
.breakpoint_kind_from_pc
!= NULL
)
7351 return (*the_low_target
.breakpoint_kind_from_pc
) (pcptr
);
7353 return default_breakpoint_kind_from_pc (pcptr
);
7356 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7358 static const gdb_byte
*
7359 linux_sw_breakpoint_from_kind (int kind
, int *size
)
7361 gdb_assert (the_low_target
.sw_breakpoint_from_kind
!= NULL
);
7363 return (*the_low_target
.sw_breakpoint_from_kind
) (kind
, size
);
7366 /* Implementation of the target_ops method
7367 "breakpoint_kind_from_current_state". */
7370 linux_breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
7372 if (the_low_target
.breakpoint_kind_from_current_state
!= NULL
)
7373 return (*the_low_target
.breakpoint_kind_from_current_state
) (pcptr
);
7375 return linux_breakpoint_kind_from_pc (pcptr
);
7378 /* Default implementation of linux_target_ops method "set_pc" for
7379 32-bit pc register which is literally named "pc". */
7382 linux_set_pc_32bit (struct regcache
*regcache
, CORE_ADDR pc
)
7384 uint32_t newpc
= pc
;
7386 supply_register_by_name (regcache
, "pc", &newpc
);
7389 /* Default implementation of linux_target_ops method "get_pc" for
7390 32-bit pc register which is literally named "pc". */
7393 linux_get_pc_32bit (struct regcache
*regcache
)
7397 collect_register_by_name (regcache
, "pc", &pc
);
7399 debug_printf ("stop pc is 0x%" PRIx32
"\n", pc
);
7403 /* Default implementation of linux_target_ops method "set_pc" for
7404 64-bit pc register which is literally named "pc". */
7407 linux_set_pc_64bit (struct regcache
*regcache
, CORE_ADDR pc
)
7409 uint64_t newpc
= pc
;
7411 supply_register_by_name (regcache
, "pc", &newpc
);
7414 /* Default implementation of linux_target_ops method "get_pc" for
7415 64-bit pc register which is literally named "pc". */
7418 linux_get_pc_64bit (struct regcache
*regcache
)
7422 collect_register_by_name (regcache
, "pc", &pc
);
7424 debug_printf ("stop pc is 0x%" PRIx64
"\n", pc
);
7428 /* See linux-low.h. */
7431 linux_get_auxv (int wordsize
, CORE_ADDR match
, CORE_ADDR
*valp
)
7433 gdb_byte
*data
= (gdb_byte
*) alloca (2 * wordsize
);
7436 gdb_assert (wordsize
== 4 || wordsize
== 8);
7438 while ((*the_target
->read_auxv
) (offset
, data
, 2 * wordsize
) == 2 * wordsize
)
7442 uint32_t *data_p
= (uint32_t *) data
;
7443 if (data_p
[0] == match
)
7451 uint64_t *data_p
= (uint64_t *) data
;
7452 if (data_p
[0] == match
)
7459 offset
+= 2 * wordsize
;
7465 /* See linux-low.h. */
7468 linux_get_hwcap (int wordsize
)
7470 CORE_ADDR hwcap
= 0;
7471 linux_get_auxv (wordsize
, AT_HWCAP
, &hwcap
);
7475 /* See linux-low.h. */
7478 linux_get_hwcap2 (int wordsize
)
7480 CORE_ADDR hwcap2
= 0;
7481 linux_get_auxv (wordsize
, AT_HWCAP2
, &hwcap2
);
7485 static struct target_ops linux_target_ops
= {
7486 linux_create_inferior
,
7487 linux_post_create_inferior
,
7496 linux_fetch_registers
,
7497 linux_store_registers
,
7498 linux_prepare_to_access_memory
,
7499 linux_done_accessing_memory
,
7502 linux_look_up_symbols
,
7503 linux_request_interrupt
,
7505 linux_supports_z_point_type
,
7508 linux_stopped_by_sw_breakpoint
,
7509 linux_supports_stopped_by_sw_breakpoint
,
7510 linux_stopped_by_hw_breakpoint
,
7511 linux_supports_stopped_by_hw_breakpoint
,
7512 linux_supports_hardware_single_step
,
7513 linux_stopped_by_watchpoint
,
7514 linux_stopped_data_address
,
7515 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7516 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7517 && defined(PT_TEXT_END_ADDR)
7522 #ifdef USE_THREAD_DB
7523 thread_db_get_tls_address
,
7528 hostio_last_error_from_errno
,
7531 linux_supports_non_stop
,
7533 linux_start_non_stop
,
7534 linux_supports_multi_process
,
7535 linux_supports_fork_events
,
7536 linux_supports_vfork_events
,
7537 linux_supports_exec_events
,
7538 linux_handle_new_gdb_connection
,
7539 #ifdef USE_THREAD_DB
7540 thread_db_handle_monitor_command
,
7544 linux_common_core_of_thread
,
7546 linux_process_qsupported
,
7547 linux_supports_tracepoints
,
7550 linux_thread_stopped
,
7554 linux_stabilize_threads
,
7555 linux_install_fast_tracepoint_jump_pad
,
7557 linux_supports_disable_randomization
,
7558 linux_get_min_fast_tracepoint_insn_len
,
7559 linux_qxfer_libraries_svr4
,
7560 linux_supports_agent
,
7561 #ifdef HAVE_LINUX_BTRACE
7562 linux_enable_btrace
,
7563 linux_low_disable_btrace
,
7564 linux_low_read_btrace
,
7565 linux_low_btrace_conf
,
7572 linux_supports_range_stepping
,
7573 linux_proc_pid_to_exec_file
,
7574 linux_mntns_open_cloexec
,
7576 linux_mntns_readlink
,
7577 linux_breakpoint_kind_from_pc
,
7578 linux_sw_breakpoint_from_kind
,
7579 linux_proc_tid_get_name
,
7580 linux_breakpoint_kind_from_current_state
,
7581 linux_supports_software_single_step
,
7582 linux_supports_catch_syscall
,
7583 linux_get_ipa_tdesc_idx
,
7585 thread_db_thread_handle
,
7591 #ifdef HAVE_LINUX_REGSETS
7593 initialize_regsets_info (struct regsets_info
*info
)
7595 for (info
->num_regsets
= 0;
7596 info
->regsets
[info
->num_regsets
].size
>= 0;
7597 info
->num_regsets
++)
7603 initialize_low (void)
7605 struct sigaction sigchld_action
;
7607 memset (&sigchld_action
, 0, sizeof (sigchld_action
));
7608 set_target_ops (&linux_target_ops
);
7610 linux_ptrace_init_warnings ();
7611 linux_proc_init_warnings ();
7613 sigchld_action
.sa_handler
= sigchld_handler
;
7614 sigemptyset (&sigchld_action
.sa_mask
);
7615 sigchld_action
.sa_flags
= SA_RESTART
;
7616 sigaction (SIGCHLD
, &sigchld_action
, NULL
);
7618 initialize_low_arch ();
7620 linux_check_ptrace_features ();