1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2020 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
34 #include <sys/ioctl.h>
37 #include <sys/syscall.h>
41 #include <sys/types.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
61 #include "nat/linux-namespaces.h"
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
78 /* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81 #if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84 #if defined(__mcoldfire__)
85 /* These are still undefined in 3.10 kernels. */
86 #define PT_TEXT_ADDR 49*4
87 #define PT_DATA_ADDR 50*4
88 #define PT_TEXT_END_ADDR 51*4
89 /* BFIN already defines these since at least 2.6.32 kernels. */
91 #define PT_TEXT_ADDR 220
92 #define PT_TEXT_END_ADDR 224
93 #define PT_DATA_ADDR 228
94 /* These are still undefined in 3.10 kernels. */
95 #elif defined(__TMS320C6X__)
96 #define PT_TEXT_ADDR (0x10000*4)
97 #define PT_DATA_ADDR (0x10004*4)
98 #define PT_TEXT_END_ADDR (0x10008*4)
102 #if (defined(__UCLIBC__) \
103 && defined(HAS_NOMMU) \
104 && defined(PT_TEXT_ADDR) \
105 && defined(PT_DATA_ADDR) \
106 && defined(PT_TEXT_END_ADDR))
107 #define SUPPORTS_READ_OFFSETS
110 #ifdef HAVE_LINUX_BTRACE
111 # include "nat/linux-btrace.h"
112 # include "gdbsupport/btrace-common.h"
115 #ifndef HAVE_ELF32_AUXV_T
116 /* Copied from glibc's elf.h. */
119 uint32_t a_type
; /* Entry type */
122 uint32_t a_val
; /* Integer value */
123 /* We use to have pointer elements added here. We cannot do that,
124 though, since it does not work when using 32-bit definitions
125 on 64-bit platforms and vice versa. */
130 #ifndef HAVE_ELF64_AUXV_T
131 /* Copied from glibc's elf.h. */
134 uint64_t a_type
; /* Entry type */
137 uint64_t a_val
; /* Integer value */
138 /* We use to have pointer elements added here. We cannot do that,
139 though, since it does not work when using 32-bit definitions
140 on 64-bit platforms and vice versa. */
145 /* Does the current host support PTRACE_GETREGSET? */
146 int have_ptrace_getregset
= -1;
150 /* See nat/linux-nat.h. */
153 ptid_of_lwp (struct lwp_info
*lwp
)
155 return ptid_of (get_lwp_thread (lwp
));
158 /* See nat/linux-nat.h. */
161 lwp_set_arch_private_info (struct lwp_info
*lwp
,
162 struct arch_lwp_info
*info
)
164 lwp
->arch_private
= info
;
167 /* See nat/linux-nat.h. */
169 struct arch_lwp_info
*
170 lwp_arch_private_info (struct lwp_info
*lwp
)
172 return lwp
->arch_private
;
175 /* See nat/linux-nat.h. */
178 lwp_is_stopped (struct lwp_info
*lwp
)
183 /* See nat/linux-nat.h. */
185 enum target_stop_reason
186 lwp_stop_reason (struct lwp_info
*lwp
)
188 return lwp
->stop_reason
;
191 /* See nat/linux-nat.h. */
194 lwp_is_stepping (struct lwp_info
*lwp
)
196 return lwp
->stepping
;
199 /* A list of all unknown processes which receive stop signals. Some
200 other process will presumably claim each of these as forked
201 children momentarily. */
203 struct simple_pid_list
205 /* The process ID. */
208 /* The status as reported by waitpid. */
212 struct simple_pid_list
*next
;
214 struct simple_pid_list
*stopped_pids
;
216 /* Trivial list manipulation functions to keep track of a list of new
217 stopped processes. */
220 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
222 struct simple_pid_list
*new_pid
= XNEW (struct simple_pid_list
);
225 new_pid
->status
= status
;
226 new_pid
->next
= *listp
;
231 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *statusp
)
233 struct simple_pid_list
**p
;
235 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
236 if ((*p
)->pid
== pid
)
238 struct simple_pid_list
*next
= (*p
)->next
;
240 *statusp
= (*p
)->status
;
248 enum stopping_threads_kind
250 /* Not stopping threads presently. */
251 NOT_STOPPING_THREADS
,
253 /* Stopping threads. */
256 /* Stopping and suspending threads. */
257 STOPPING_AND_SUSPENDING_THREADS
260 /* This is set while stop_all_lwps is in effect. */
261 enum stopping_threads_kind stopping_threads
= NOT_STOPPING_THREADS
;
263 /* FIXME make into a target method? */
264 int using_threads
= 1;
266 /* True if we're presently stabilizing threads (moving them out of
268 static int stabilizing_threads
;
270 static void unsuspend_all_lwps (struct lwp_info
*except
);
271 static void mark_lwp_dead (struct lwp_info
*lwp
, int wstat
);
272 static int lwp_is_marked_dead (struct lwp_info
*lwp
);
273 static int finish_step_over (struct lwp_info
*lwp
);
274 static int kill_lwp (unsigned long lwpid
, int signo
);
275 static void enqueue_pending_signal (struct lwp_info
*lwp
, int signal
, siginfo_t
*info
);
276 static int linux_low_ptrace_options (int attached
);
277 static int check_ptrace_stopped_lwp_gone (struct lwp_info
*lp
);
279 /* When the event-loop is doing a step-over, this points at the thread
281 ptid_t step_over_bkpt
;
283 /* True if the low target can hardware single-step. */
286 can_hardware_single_step (void)
288 if (the_low_target
.supports_hardware_single_step
!= NULL
)
289 return the_low_target
.supports_hardware_single_step ();
295 linux_process_target::low_supports_breakpoints ()
301 linux_process_target::low_get_pc (regcache
*regcache
)
307 linux_process_target::low_set_pc (regcache
*regcache
, CORE_ADDR newpc
)
309 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
312 std::vector
<CORE_ADDR
>
313 linux_process_target::low_get_next_pcs (regcache
*regcache
)
315 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
320 linux_process_target::low_decr_pc_after_break ()
325 /* Returns true if this target can support fast tracepoints. This
326 does not mean that the in-process agent has been loaded in the
330 supports_fast_tracepoints (void)
332 return the_low_target
.install_fast_tracepoint_jump_pad
!= NULL
;
335 /* True if LWP is stopped in its stepping range. */
338 lwp_in_step_range (struct lwp_info
*lwp
)
340 CORE_ADDR pc
= lwp
->stop_pc
;
342 return (pc
>= lwp
->step_range_start
&& pc
< lwp
->step_range_end
);
345 struct pending_signals
349 struct pending_signals
*prev
;
352 /* The read/write ends of the pipe registered as waitable file in the
354 static int linux_event_pipe
[2] = { -1, -1 };
356 /* True if we're currently in async mode. */
357 #define target_is_async_p() (linux_event_pipe[0] != -1)
359 static void send_sigstop (struct lwp_info
*lwp
);
361 /* Return non-zero if HEADER is a 64-bit ELF file. */
364 elf_64_header_p (const Elf64_Ehdr
*header
, unsigned int *machine
)
366 if (header
->e_ident
[EI_MAG0
] == ELFMAG0
367 && header
->e_ident
[EI_MAG1
] == ELFMAG1
368 && header
->e_ident
[EI_MAG2
] == ELFMAG2
369 && header
->e_ident
[EI_MAG3
] == ELFMAG3
)
371 *machine
= header
->e_machine
;
372 return header
->e_ident
[EI_CLASS
] == ELFCLASS64
;
379 /* Return non-zero if FILE is a 64-bit ELF file,
380 zero if the file is not a 64-bit ELF file,
381 and -1 if the file is not accessible or doesn't exist. */
384 elf_64_file_p (const char *file
, unsigned int *machine
)
389 fd
= open (file
, O_RDONLY
);
393 if (read (fd
, &header
, sizeof (header
)) != sizeof (header
))
400 return elf_64_header_p (&header
, machine
);
403 /* Accepts an integer PID; Returns true if the executable PID is
404 running is a 64-bit ELF file.. */
407 linux_pid_exe_is_elf_64_file (int pid
, unsigned int *machine
)
411 sprintf (file
, "/proc/%d/exe", pid
);
412 return elf_64_file_p (file
, machine
);
416 linux_process_target::delete_lwp (lwp_info
*lwp
)
418 struct thread_info
*thr
= get_lwp_thread (lwp
);
421 debug_printf ("deleting %ld\n", lwpid_of (thr
));
425 low_delete_thread (lwp
->arch_private
);
431 linux_process_target::low_delete_thread (arch_lwp_info
*info
)
433 /* Default implementation should be overridden if architecture-specific
434 info is being used. */
435 gdb_assert (info
== nullptr);
439 linux_process_target::add_linux_process (int pid
, int attached
)
441 struct process_info
*proc
;
443 proc
= add_process (pid
, attached
);
444 proc
->priv
= XCNEW (struct process_info_private
);
446 proc
->priv
->arch_private
= low_new_process ();
452 linux_process_target::low_new_process ()
458 linux_process_target::low_delete_process (arch_process_info
*info
)
460 /* Default implementation must be overridden if architecture-specific
462 gdb_assert (info
== nullptr);
466 linux_process_target::low_new_fork (process_info
*parent
, process_info
*child
)
472 linux_process_target::arch_setup_thread (thread_info
*thread
)
474 struct thread_info
*saved_thread
;
476 saved_thread
= current_thread
;
477 current_thread
= thread
;
481 current_thread
= saved_thread
;
485 linux_process_target::handle_extended_wait (lwp_info
**orig_event_lwp
,
488 client_state
&cs
= get_client_state ();
489 struct lwp_info
*event_lwp
= *orig_event_lwp
;
490 int event
= linux_ptrace_get_extended_event (wstat
);
491 struct thread_info
*event_thr
= get_lwp_thread (event_lwp
);
492 struct lwp_info
*new_lwp
;
494 gdb_assert (event_lwp
->waitstatus
.kind
== TARGET_WAITKIND_IGNORE
);
496 /* All extended events we currently use are mid-syscall. Only
497 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
498 you have to be using PTRACE_SEIZE to get that. */
499 event_lwp
->syscall_state
= TARGET_WAITKIND_SYSCALL_ENTRY
;
501 if ((event
== PTRACE_EVENT_FORK
) || (event
== PTRACE_EVENT_VFORK
)
502 || (event
== PTRACE_EVENT_CLONE
))
505 unsigned long new_pid
;
508 /* Get the pid of the new lwp. */
509 ptrace (PTRACE_GETEVENTMSG
, lwpid_of (event_thr
), (PTRACE_TYPE_ARG3
) 0,
512 /* If we haven't already seen the new PID stop, wait for it now. */
513 if (!pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
515 /* The new child has a pending SIGSTOP. We can't affect it until it
516 hits the SIGSTOP, but we're already attached. */
518 ret
= my_waitpid (new_pid
, &status
, __WALL
);
521 perror_with_name ("waiting for new child");
522 else if (ret
!= new_pid
)
523 warning ("wait returned unexpected PID %d", ret
);
524 else if (!WIFSTOPPED (status
))
525 warning ("wait returned unexpected status 0x%x", status
);
528 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
)
530 struct process_info
*parent_proc
;
531 struct process_info
*child_proc
;
532 struct lwp_info
*child_lwp
;
533 struct thread_info
*child_thr
;
534 struct target_desc
*tdesc
;
536 ptid
= ptid_t (new_pid
, new_pid
, 0);
540 debug_printf ("HEW: Got fork event from LWP %ld, "
542 ptid_of (event_thr
).lwp (),
546 /* Add the new process to the tables and clone the breakpoint
547 lists of the parent. We need to do this even if the new process
548 will be detached, since we will need the process object and the
549 breakpoints to remove any breakpoints from memory when we
550 detach, and the client side will access registers. */
551 child_proc
= add_linux_process (new_pid
, 0);
552 gdb_assert (child_proc
!= NULL
);
553 child_lwp
= add_lwp (ptid
);
554 gdb_assert (child_lwp
!= NULL
);
555 child_lwp
->stopped
= 1;
556 child_lwp
->must_set_ptrace_flags
= 1;
557 child_lwp
->status_pending_p
= 0;
558 child_thr
= get_lwp_thread (child_lwp
);
559 child_thr
->last_resume_kind
= resume_stop
;
560 child_thr
->last_status
.kind
= TARGET_WAITKIND_STOPPED
;
562 /* If we're suspending all threads, leave this one suspended
563 too. If the fork/clone parent is stepping over a breakpoint,
564 all other threads have been suspended already. Leave the
565 child suspended too. */
566 if (stopping_threads
== STOPPING_AND_SUSPENDING_THREADS
567 || event_lwp
->bp_reinsert
!= 0)
570 debug_printf ("HEW: leaving child suspended\n");
571 child_lwp
->suspended
= 1;
574 parent_proc
= get_thread_process (event_thr
);
575 child_proc
->attached
= parent_proc
->attached
;
577 if (event_lwp
->bp_reinsert
!= 0
578 && supports_software_single_step ()
579 && event
== PTRACE_EVENT_VFORK
)
581 /* If we leave single-step breakpoints there, child will
582 hit it, so uninsert single-step breakpoints from parent
583 (and child). Once vfork child is done, reinsert
584 them back to parent. */
585 uninsert_single_step_breakpoints (event_thr
);
588 clone_all_breakpoints (child_thr
, event_thr
);
590 tdesc
= allocate_target_description ();
591 copy_target_description (tdesc
, parent_proc
->tdesc
);
592 child_proc
->tdesc
= tdesc
;
594 /* Clone arch-specific process data. */
595 low_new_fork (parent_proc
, child_proc
);
597 /* Save fork info in the parent thread. */
598 if (event
== PTRACE_EVENT_FORK
)
599 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_FORKED
;
600 else if (event
== PTRACE_EVENT_VFORK
)
601 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_VFORKED
;
603 event_lwp
->waitstatus
.value
.related_pid
= ptid
;
605 /* The status_pending field contains bits denoting the
606 extended event, so when the pending event is handled,
607 the handler will look at lwp->waitstatus. */
608 event_lwp
->status_pending_p
= 1;
609 event_lwp
->status_pending
= wstat
;
611 /* Link the threads until the parent event is passed on to
613 event_lwp
->fork_relative
= child_lwp
;
614 child_lwp
->fork_relative
= event_lwp
;
616 /* If the parent thread is doing step-over with single-step
617 breakpoints, the list of single-step breakpoints are cloned
618 from the parent's. Remove them from the child process.
619 In case of vfork, we'll reinsert them back once vforked
621 if (event_lwp
->bp_reinsert
!= 0
622 && supports_software_single_step ())
624 /* The child process is forked and stopped, so it is safe
625 to access its memory without stopping all other threads
626 from other processes. */
627 delete_single_step_breakpoints (child_thr
);
629 gdb_assert (has_single_step_breakpoints (event_thr
));
630 gdb_assert (!has_single_step_breakpoints (child_thr
));
633 /* Report the event. */
638 debug_printf ("HEW: Got clone event "
639 "from LWP %ld, new child is LWP %ld\n",
640 lwpid_of (event_thr
), new_pid
);
642 ptid
= ptid_t (pid_of (event_thr
), new_pid
, 0);
643 new_lwp
= add_lwp (ptid
);
645 /* Either we're going to immediately resume the new thread
646 or leave it stopped. resume_one_lwp is a nop if it
647 thinks the thread is currently running, so set this first
648 before calling resume_one_lwp. */
649 new_lwp
->stopped
= 1;
651 /* If we're suspending all threads, leave this one suspended
652 too. If the fork/clone parent is stepping over a breakpoint,
653 all other threads have been suspended already. Leave the
654 child suspended too. */
655 if (stopping_threads
== STOPPING_AND_SUSPENDING_THREADS
656 || event_lwp
->bp_reinsert
!= 0)
657 new_lwp
->suspended
= 1;
659 /* Normally we will get the pending SIGSTOP. But in some cases
660 we might get another signal delivered to the group first.
661 If we do get another signal, be sure not to lose it. */
662 if (WSTOPSIG (status
) != SIGSTOP
)
664 new_lwp
->stop_expected
= 1;
665 new_lwp
->status_pending_p
= 1;
666 new_lwp
->status_pending
= status
;
668 else if (cs
.report_thread_events
)
670 new_lwp
->waitstatus
.kind
= TARGET_WAITKIND_THREAD_CREATED
;
671 new_lwp
->status_pending_p
= 1;
672 new_lwp
->status_pending
= status
;
676 thread_db_notice_clone (event_thr
, ptid
);
679 /* Don't report the event. */
682 else if (event
== PTRACE_EVENT_VFORK_DONE
)
684 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_VFORK_DONE
;
686 if (event_lwp
->bp_reinsert
!= 0 && supports_software_single_step ())
688 reinsert_single_step_breakpoints (event_thr
);
690 gdb_assert (has_single_step_breakpoints (event_thr
));
693 /* Report the event. */
696 else if (event
== PTRACE_EVENT_EXEC
&& cs
.report_exec_events
)
698 struct process_info
*proc
;
699 std::vector
<int> syscalls_to_catch
;
705 debug_printf ("HEW: Got exec event from LWP %ld\n",
706 lwpid_of (event_thr
));
709 /* Get the event ptid. */
710 event_ptid
= ptid_of (event_thr
);
711 event_pid
= event_ptid
.pid ();
713 /* Save the syscall list from the execing process. */
714 proc
= get_thread_process (event_thr
);
715 syscalls_to_catch
= std::move (proc
->syscalls_to_catch
);
717 /* Delete the execing process and all its threads. */
719 current_thread
= NULL
;
721 /* Create a new process/lwp/thread. */
722 proc
= add_linux_process (event_pid
, 0);
723 event_lwp
= add_lwp (event_ptid
);
724 event_thr
= get_lwp_thread (event_lwp
);
725 gdb_assert (current_thread
== event_thr
);
726 arch_setup_thread (event_thr
);
728 /* Set the event status. */
729 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_EXECD
;
730 event_lwp
->waitstatus
.value
.execd_pathname
731 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr
)));
733 /* Mark the exec status as pending. */
734 event_lwp
->stopped
= 1;
735 event_lwp
->status_pending_p
= 1;
736 event_lwp
->status_pending
= wstat
;
737 event_thr
->last_resume_kind
= resume_continue
;
738 event_thr
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
740 /* Update syscall state in the new lwp, effectively mid-syscall too. */
741 event_lwp
->syscall_state
= TARGET_WAITKIND_SYSCALL_ENTRY
;
743 /* Restore the list to catch. Don't rely on the client, which is free
744 to avoid sending a new list when the architecture doesn't change.
745 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
746 proc
->syscalls_to_catch
= std::move (syscalls_to_catch
);
748 /* Report the event. */
749 *orig_event_lwp
= event_lwp
;
753 internal_error (__FILE__
, __LINE__
, _("unknown ptrace event %d"), event
);
757 linux_process_target::get_pc (lwp_info
*lwp
)
759 struct thread_info
*saved_thread
;
760 struct regcache
*regcache
;
763 if (!low_supports_breakpoints ())
766 saved_thread
= current_thread
;
767 current_thread
= get_lwp_thread (lwp
);
769 regcache
= get_thread_regcache (current_thread
, 1);
770 pc
= low_get_pc (regcache
);
773 debug_printf ("pc is 0x%lx\n", (long) pc
);
775 current_thread
= saved_thread
;
779 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
780 Fill *SYSNO with the syscall nr trapped. */
783 get_syscall_trapinfo (struct lwp_info
*lwp
, int *sysno
)
785 struct thread_info
*saved_thread
;
786 struct regcache
*regcache
;
788 if (the_low_target
.get_syscall_trapinfo
== NULL
)
790 /* If we cannot get the syscall trapinfo, report an unknown
791 system call number. */
792 *sysno
= UNKNOWN_SYSCALL
;
796 saved_thread
= current_thread
;
797 current_thread
= get_lwp_thread (lwp
);
799 regcache
= get_thread_regcache (current_thread
, 1);
800 (*the_low_target
.get_syscall_trapinfo
) (regcache
, sysno
);
803 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno
);
805 current_thread
= saved_thread
;
809 linux_process_target::save_stop_reason (lwp_info
*lwp
)
812 CORE_ADDR sw_breakpoint_pc
;
813 struct thread_info
*saved_thread
;
814 #if USE_SIGTRAP_SIGINFO
818 if (!low_supports_breakpoints ())
822 sw_breakpoint_pc
= pc
- low_decr_pc_after_break ();
824 /* breakpoint_at reads from the current thread. */
825 saved_thread
= current_thread
;
826 current_thread
= get_lwp_thread (lwp
);
828 #if USE_SIGTRAP_SIGINFO
829 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
830 (PTRACE_TYPE_ARG3
) 0, &siginfo
) == 0)
832 if (siginfo
.si_signo
== SIGTRAP
)
834 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo
.si_code
)
835 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo
.si_code
))
837 /* The si_code is ambiguous on this arch -- check debug
839 if (!check_stopped_by_watchpoint (lwp
))
840 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
842 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo
.si_code
))
844 /* If we determine the LWP stopped for a SW breakpoint,
845 trust it. Particularly don't check watchpoint
846 registers, because at least on s390, we'd find
847 stopped-by-watchpoint as long as there's a watchpoint
849 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
851 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo
.si_code
))
853 /* This can indicate either a hardware breakpoint or
854 hardware watchpoint. Check debug registers. */
855 if (!check_stopped_by_watchpoint (lwp
))
856 lwp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
858 else if (siginfo
.si_code
== TRAP_TRACE
)
860 /* We may have single stepped an instruction that
861 triggered a watchpoint. In that case, on some
862 architectures (such as x86), instead of TRAP_HWBKPT,
863 si_code indicates TRAP_TRACE, and we need to check
864 the debug registers separately. */
865 if (!check_stopped_by_watchpoint (lwp
))
866 lwp
->stop_reason
= TARGET_STOPPED_BY_SINGLE_STEP
;
871 /* We may have just stepped a breakpoint instruction. E.g., in
872 non-stop mode, GDB first tells the thread A to step a range, and
873 then the user inserts a breakpoint inside the range. In that
874 case we need to report the breakpoint PC. */
875 if ((!lwp
->stepping
|| lwp
->stop_pc
== sw_breakpoint_pc
)
876 && low_breakpoint_at (sw_breakpoint_pc
))
877 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
879 if (hardware_breakpoint_inserted_here (pc
))
880 lwp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
882 if (lwp
->stop_reason
== TARGET_STOPPED_BY_NO_REASON
)
883 check_stopped_by_watchpoint (lwp
);
886 if (lwp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
)
890 struct thread_info
*thr
= get_lwp_thread (lwp
);
892 debug_printf ("CSBB: %s stopped by software breakpoint\n",
893 target_pid_to_str (ptid_of (thr
)));
896 /* Back up the PC if necessary. */
897 if (pc
!= sw_breakpoint_pc
)
899 struct regcache
*regcache
900 = get_thread_regcache (current_thread
, 1);
901 low_set_pc (regcache
, sw_breakpoint_pc
);
904 /* Update this so we record the correct stop PC below. */
905 pc
= sw_breakpoint_pc
;
907 else if (lwp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
)
911 struct thread_info
*thr
= get_lwp_thread (lwp
);
913 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
914 target_pid_to_str (ptid_of (thr
)));
917 else if (lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
)
921 struct thread_info
*thr
= get_lwp_thread (lwp
);
923 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
924 target_pid_to_str (ptid_of (thr
)));
927 else if (lwp
->stop_reason
== TARGET_STOPPED_BY_SINGLE_STEP
)
931 struct thread_info
*thr
= get_lwp_thread (lwp
);
933 debug_printf ("CSBB: %s stopped by trace\n",
934 target_pid_to_str (ptid_of (thr
)));
939 current_thread
= saved_thread
;
944 linux_process_target::add_lwp (ptid_t ptid
)
946 struct lwp_info
*lwp
;
948 lwp
= XCNEW (struct lwp_info
);
950 lwp
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
952 lwp
->thread
= add_thread (ptid
, lwp
);
954 low_new_thread (lwp
);
960 linux_process_target::low_new_thread (lwp_info
*info
)
965 /* Callback to be used when calling fork_inferior, responsible for
966 actually initiating the tracing of the inferior. */
971 if (ptrace (PTRACE_TRACEME
, 0, (PTRACE_TYPE_ARG3
) 0,
972 (PTRACE_TYPE_ARG4
) 0) < 0)
973 trace_start_error_with_name ("ptrace");
975 if (setpgid (0, 0) < 0)
976 trace_start_error_with_name ("setpgid");
978 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
979 stdout to stderr so that inferior i/o doesn't corrupt the connection.
980 Also, redirect stdin to /dev/null. */
981 if (remote_connection_is_stdio ())
984 trace_start_error_with_name ("close");
985 if (open ("/dev/null", O_RDONLY
) < 0)
986 trace_start_error_with_name ("open");
988 trace_start_error_with_name ("dup2");
989 if (write (2, "stdin/stdout redirected\n",
990 sizeof ("stdin/stdout redirected\n") - 1) < 0)
992 /* Errors ignored. */;
997 /* Start an inferior process and returns its pid.
998 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
999 are its arguments. */
1002 linux_process_target::create_inferior (const char *program
,
1003 const std::vector
<char *> &program_args
)
1005 client_state
&cs
= get_client_state ();
1006 struct lwp_info
*new_lwp
;
1011 maybe_disable_address_space_randomization restore_personality
1012 (cs
.disable_randomization
);
1013 std::string str_program_args
= stringify_argv (program_args
);
1015 pid
= fork_inferior (program
,
1016 str_program_args
.c_str (),
1017 get_environ ()->envp (), linux_ptrace_fun
,
1018 NULL
, NULL
, NULL
, NULL
);
1021 add_linux_process (pid
, 0);
1023 ptid
= ptid_t (pid
, pid
, 0);
1024 new_lwp
= add_lwp (ptid
);
1025 new_lwp
->must_set_ptrace_flags
= 1;
1027 post_fork_inferior (pid
, program
);
1032 /* Implement the post_create_inferior target_ops method. */
1035 linux_process_target::post_create_inferior ()
1037 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
1041 if (lwp
->must_set_ptrace_flags
)
1043 struct process_info
*proc
= current_process ();
1044 int options
= linux_low_ptrace_options (proc
->attached
);
1046 linux_enable_event_reporting (lwpid_of (current_thread
), options
);
1047 lwp
->must_set_ptrace_flags
= 0;
1052 linux_process_target::attach_lwp (ptid_t ptid
)
1054 struct lwp_info
*new_lwp
;
1055 int lwpid
= ptid
.lwp ();
1057 if (ptrace (PTRACE_ATTACH
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0)
1061 new_lwp
= add_lwp (ptid
);
1063 /* We need to wait for SIGSTOP before being able to make the next
1064 ptrace call on this LWP. */
1065 new_lwp
->must_set_ptrace_flags
= 1;
1067 if (linux_proc_pid_is_stopped (lwpid
))
1070 debug_printf ("Attached to a stopped process\n");
1072 /* The process is definitely stopped. It is in a job control
1073 stop, unless the kernel predates the TASK_STOPPED /
1074 TASK_TRACED distinction, in which case it might be in a
1075 ptrace stop. Make sure it is in a ptrace stop; from there we
1076 can kill it, signal it, et cetera.
1078 First make sure there is a pending SIGSTOP. Since we are
1079 already attached, the process can not transition from stopped
1080 to running without a PTRACE_CONT; so we know this signal will
1081 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1082 probably already in the queue (unless this kernel is old
1083 enough to use TASK_STOPPED for ptrace stops); but since
1084 SIGSTOP is not an RT signal, it can only be queued once. */
1085 kill_lwp (lwpid
, SIGSTOP
);
1087 /* Finally, resume the stopped process. This will deliver the
1088 SIGSTOP (or a higher priority signal, just like normal
1089 PTRACE_ATTACH), which we'll catch later on. */
1090 ptrace (PTRACE_CONT
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
1093 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1094 brings it to a halt.
1096 There are several cases to consider here:
1098 1) gdbserver has already attached to the process and is being notified
1099 of a new thread that is being created.
1100 In this case we should ignore that SIGSTOP and resume the
1101 process. This is handled below by setting stop_expected = 1,
1102 and the fact that add_thread sets last_resume_kind ==
1105 2) This is the first thread (the process thread), and we're attaching
1106 to it via attach_inferior.
1107 In this case we want the process thread to stop.
1108 This is handled by having linux_attach set last_resume_kind ==
1109 resume_stop after we return.
1111 If the pid we are attaching to is also the tgid, we attach to and
1112 stop all the existing threads. Otherwise, we attach to pid and
1113 ignore any other threads in the same group as this pid.
1115 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1117 In this case we want the thread to stop.
1118 FIXME: This case is currently not properly handled.
1119 We should wait for the SIGSTOP but don't. Things work apparently
1120 because enough time passes between when we ptrace (ATTACH) and when
1121 gdb makes the next ptrace call on the thread.
1123 On the other hand, if we are currently trying to stop all threads, we
1124 should treat the new thread as if we had sent it a SIGSTOP. This works
1125 because we are guaranteed that the add_lwp call above added us to the
1126 end of the list, and so the new thread has not yet reached
1127 wait_for_sigstop (but will). */
1128 new_lwp
->stop_expected
= 1;
1133 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1134 already attached. Returns true if a new LWP is found, false
1138 attach_proc_task_lwp_callback (ptid_t ptid
)
1140 /* Is this a new thread? */
1141 if (find_thread_ptid (ptid
) == NULL
)
1143 int lwpid
= ptid
.lwp ();
1147 debug_printf ("Found new lwp %d\n", lwpid
);
1149 err
= the_linux_target
->attach_lwp (ptid
);
1151 /* Be quiet if we simply raced with the thread exiting. EPERM
1152 is returned if the thread's task still exists, and is marked
1153 as exited or zombie, as well as other conditions, so in that
1154 case, confirm the status in /proc/PID/status. */
1156 || (err
== EPERM
&& linux_proc_pid_is_gone (lwpid
)))
1160 debug_printf ("Cannot attach to lwp %d: "
1161 "thread is gone (%d: %s)\n",
1162 lwpid
, err
, safe_strerror (err
));
1168 = linux_ptrace_attach_fail_reason_string (ptid
, err
);
1170 warning (_("Cannot attach to lwp %d: %s"), lwpid
, reason
.c_str ());
1178 static void async_file_mark (void);
1180 /* Attach to PID. If PID is the tgid, attach to it and all
1184 linux_process_target::attach (unsigned long pid
)
1186 struct process_info
*proc
;
1187 struct thread_info
*initial_thread
;
1188 ptid_t ptid
= ptid_t (pid
, pid
, 0);
1191 proc
= add_linux_process (pid
, 1);
1193 /* Attach to PID. We will check for other threads
1195 err
= attach_lwp (ptid
);
1198 remove_process (proc
);
1200 std::string reason
= linux_ptrace_attach_fail_reason_string (ptid
, err
);
1201 error ("Cannot attach to process %ld: %s", pid
, reason
.c_str ());
1204 /* Don't ignore the initial SIGSTOP if we just attached to this
1205 process. It will be collected by wait shortly. */
1206 initial_thread
= find_thread_ptid (ptid_t (pid
, pid
, 0));
1207 initial_thread
->last_resume_kind
= resume_stop
;
1209 /* We must attach to every LWP. If /proc is mounted, use that to
1210 find them now. On the one hand, the inferior may be using raw
1211 clone instead of using pthreads. On the other hand, even if it
1212 is using pthreads, GDB may not be connected yet (thread_db needs
1213 to do symbol lookups, through qSymbol). Also, thread_db walks
1214 structures in the inferior's address space to find the list of
1215 threads/LWPs, and those structures may well be corrupted. Note
1216 that once thread_db is loaded, we'll still use it to list threads
1217 and associate pthread info with each LWP. */
1218 linux_proc_attach_tgid_threads (pid
, attach_proc_task_lwp_callback
);
1220 /* GDB will shortly read the xml target description for this
1221 process, to figure out the process' architecture. But the target
1222 description is only filled in when the first process/thread in
1223 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1224 that now, otherwise, if GDB is fast enough, it could read the
1225 target description _before_ that initial stop. */
1228 struct lwp_info
*lwp
;
1230 ptid_t pid_ptid
= ptid_t (pid
);
1232 lwpid
= wait_for_event_filtered (pid_ptid
, pid_ptid
, &wstat
, __WALL
);
1233 gdb_assert (lwpid
> 0);
1235 lwp
= find_lwp_pid (ptid_t (lwpid
));
1237 if (!WIFSTOPPED (wstat
) || WSTOPSIG (wstat
) != SIGSTOP
)
1239 lwp
->status_pending_p
= 1;
1240 lwp
->status_pending
= wstat
;
1243 initial_thread
->last_resume_kind
= resume_continue
;
1247 gdb_assert (proc
->tdesc
!= NULL
);
1254 last_thread_of_process_p (int pid
)
1256 bool seen_one
= false;
1258 thread_info
*thread
= find_thread (pid
, [&] (thread_info
*thr_arg
)
1262 /* This is the first thread of this process we see. */
1268 /* This is the second thread of this process we see. */
1273 return thread
== NULL
;
1279 linux_kill_one_lwp (struct lwp_info
*lwp
)
1281 struct thread_info
*thr
= get_lwp_thread (lwp
);
1282 int pid
= lwpid_of (thr
);
1284 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1285 there is no signal context, and ptrace(PTRACE_KILL) (or
1286 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1287 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1288 alternative is to kill with SIGKILL. We only need one SIGKILL
1289 per process, not one for each thread. But since we still support
1290 support debugging programs using raw clone without CLONE_THREAD,
1291 we send one for each thread. For years, we used PTRACE_KILL
1292 only, so we're being a bit paranoid about some old kernels where
1293 PTRACE_KILL might work better (dubious if there are any such, but
1294 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1295 second, and so we're fine everywhere. */
1298 kill_lwp (pid
, SIGKILL
);
1301 int save_errno
= errno
;
1303 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1304 target_pid_to_str (ptid_of (thr
)),
1305 save_errno
? safe_strerror (save_errno
) : "OK");
1309 ptrace (PTRACE_KILL
, pid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
1312 int save_errno
= errno
;
1314 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1315 target_pid_to_str (ptid_of (thr
)),
1316 save_errno
? safe_strerror (save_errno
) : "OK");
1320 /* Kill LWP and wait for it to die. */
1323 kill_wait_lwp (struct lwp_info
*lwp
)
1325 struct thread_info
*thr
= get_lwp_thread (lwp
);
1326 int pid
= ptid_of (thr
).pid ();
1327 int lwpid
= ptid_of (thr
).lwp ();
1332 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid
, pid
);
1336 linux_kill_one_lwp (lwp
);
1338 /* Make sure it died. Notes:
1340 - The loop is most likely unnecessary.
1342 - We don't use wait_for_event as that could delete lwps
1343 while we're iterating over them. We're not interested in
1344 any pending status at this point, only in making sure all
1345 wait status on the kernel side are collected until the
1348 - We don't use __WALL here as the __WALL emulation relies on
1349 SIGCHLD, and killing a stopped process doesn't generate
1350 one, nor an exit status.
1352 res
= my_waitpid (lwpid
, &wstat
, 0);
1353 if (res
== -1 && errno
== ECHILD
)
1354 res
= my_waitpid (lwpid
, &wstat
, __WCLONE
);
1355 } while (res
> 0 && WIFSTOPPED (wstat
));
1357 /* Even if it was stopped, the child may have already disappeared.
1358 E.g., if it was killed by SIGKILL. */
1359 if (res
< 0 && errno
!= ECHILD
)
1360 perror_with_name ("kill_wait_lwp");
1363 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1364 except the leader. */
1367 kill_one_lwp_callback (thread_info
*thread
, int pid
)
1369 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1371 /* We avoid killing the first thread here, because of a Linux kernel (at
1372 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1373 the children get a chance to be reaped, it will remain a zombie
1376 if (lwpid_of (thread
) == pid
)
1379 debug_printf ("lkop: is last of process %s\n",
1380 target_pid_to_str (thread
->id
));
1384 kill_wait_lwp (lwp
);
1388 linux_process_target::kill (process_info
*process
)
1390 int pid
= process
->pid
;
1392 /* If we're killing a running inferior, make sure it is stopped
1393 first, as PTRACE_KILL will not work otherwise. */
1394 stop_all_lwps (0, NULL
);
1396 for_each_thread (pid
, [&] (thread_info
*thread
)
1398 kill_one_lwp_callback (thread
, pid
);
1401 /* See the comment in linux_kill_one_lwp. We did not kill the first
1402 thread in the list, so do so now. */
1403 lwp_info
*lwp
= find_lwp_pid (ptid_t (pid
));
1408 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1412 kill_wait_lwp (lwp
);
1416 /* Since we presently can only stop all lwps of all processes, we
1417 need to unstop lwps of other processes. */
1418 unstop_all_lwps (0, NULL
);
1422 /* Get pending signal of THREAD, for detaching purposes. This is the
1423 signal the thread last stopped for, which we need to deliver to the
1424 thread when detaching, otherwise, it'd be suppressed/lost. */
1427 get_detach_signal (struct thread_info
*thread
)
1429 client_state
&cs
= get_client_state ();
1430 enum gdb_signal signo
= GDB_SIGNAL_0
;
1432 struct lwp_info
*lp
= get_thread_lwp (thread
);
1434 if (lp
->status_pending_p
)
1435 status
= lp
->status_pending
;
1438 /* If the thread had been suspended by gdbserver, and it stopped
1439 cleanly, then it'll have stopped with SIGSTOP. But we don't
1440 want to deliver that SIGSTOP. */
1441 if (thread
->last_status
.kind
!= TARGET_WAITKIND_STOPPED
1442 || thread
->last_status
.value
.sig
== GDB_SIGNAL_0
)
1445 /* Otherwise, we may need to deliver the signal we
1447 status
= lp
->last_status
;
1450 if (!WIFSTOPPED (status
))
1453 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1454 target_pid_to_str (ptid_of (thread
)));
1458 /* Extended wait statuses aren't real SIGTRAPs. */
1459 if (WSTOPSIG (status
) == SIGTRAP
&& linux_is_extended_waitstatus (status
))
1462 debug_printf ("GPS: lwp %s had stopped with extended "
1463 "status: no pending signal\n",
1464 target_pid_to_str (ptid_of (thread
)));
1468 signo
= gdb_signal_from_host (WSTOPSIG (status
));
1470 if (cs
.program_signals_p
&& !cs
.program_signals
[signo
])
1473 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1474 target_pid_to_str (ptid_of (thread
)),
1475 gdb_signal_to_string (signo
));
1478 else if (!cs
.program_signals_p
1479 /* If we have no way to know which signals GDB does not
1480 want to have passed to the program, assume
1481 SIGTRAP/SIGINT, which is GDB's default. */
1482 && (signo
== GDB_SIGNAL_TRAP
|| signo
== GDB_SIGNAL_INT
))
1485 debug_printf ("GPS: lwp %s had signal %s, "
1486 "but we don't know if we should pass it. "
1487 "Default to not.\n",
1488 target_pid_to_str (ptid_of (thread
)),
1489 gdb_signal_to_string (signo
));
1495 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1496 target_pid_to_str (ptid_of (thread
)),
1497 gdb_signal_to_string (signo
));
1499 return WSTOPSIG (status
);
1504 linux_process_target::detach_one_lwp (lwp_info
*lwp
)
1506 struct thread_info
*thread
= get_lwp_thread (lwp
);
1510 /* If there is a pending SIGSTOP, get rid of it. */
1511 if (lwp
->stop_expected
)
1514 debug_printf ("Sending SIGCONT to %s\n",
1515 target_pid_to_str (ptid_of (thread
)));
1517 kill_lwp (lwpid_of (thread
), SIGCONT
);
1518 lwp
->stop_expected
= 0;
1521 /* Pass on any pending signal for this thread. */
1522 sig
= get_detach_signal (thread
);
1524 /* Preparing to resume may try to write registers, and fail if the
1525 lwp is zombie. If that happens, ignore the error. We'll handle
1526 it below, when detach fails with ESRCH. */
1529 /* Flush any pending changes to the process's registers. */
1530 regcache_invalidate_thread (thread
);
1532 /* Finally, let it resume. */
1533 low_prepare_to_resume (lwp
);
1535 catch (const gdb_exception_error
&ex
)
1537 if (!check_ptrace_stopped_lwp_gone (lwp
))
1541 lwpid
= lwpid_of (thread
);
1542 if (ptrace (PTRACE_DETACH
, lwpid
, (PTRACE_TYPE_ARG3
) 0,
1543 (PTRACE_TYPE_ARG4
) (long) sig
) < 0)
1545 int save_errno
= errno
;
1547 /* We know the thread exists, so ESRCH must mean the lwp is
1548 zombie. This can happen if one of the already-detached
1549 threads exits the whole thread group. In that case we're
1550 still attached, and must reap the lwp. */
1551 if (save_errno
== ESRCH
)
1555 ret
= my_waitpid (lwpid
, &status
, __WALL
);
1558 warning (_("Couldn't reap LWP %d while detaching: %s"),
1559 lwpid
, safe_strerror (errno
));
1561 else if (!WIFEXITED (status
) && !WIFSIGNALED (status
))
1563 warning (_("Reaping LWP %d while detaching "
1564 "returned unexpected status 0x%x"),
1570 error (_("Can't detach %s: %s"),
1571 target_pid_to_str (ptid_of (thread
)),
1572 safe_strerror (save_errno
));
1575 else if (debug_threads
)
1577 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1578 target_pid_to_str (ptid_of (thread
)),
1586 linux_process_target::detach (process_info
*process
)
1588 struct lwp_info
*main_lwp
;
1590 /* As there's a step over already in progress, let it finish first,
1591 otherwise nesting a stabilize_threads operation on top gets real
1593 complete_ongoing_step_over ();
1595 /* Stop all threads before detaching. First, ptrace requires that
1596 the thread is stopped to successfully detach. Second, thread_db
1597 may need to uninstall thread event breakpoints from memory, which
1598 only works with a stopped process anyway. */
1599 stop_all_lwps (0, NULL
);
1601 #ifdef USE_THREAD_DB
1602 thread_db_detach (process
);
1605 /* Stabilize threads (move out of jump pads). */
1606 target_stabilize_threads ();
1608 /* Detach from the clone lwps first. If the thread group exits just
1609 while we're detaching, we must reap the clone lwps before we're
1610 able to reap the leader. */
1611 for_each_thread (process
->pid
, [this] (thread_info
*thread
)
1613 /* We don't actually detach from the thread group leader just yet.
1614 If the thread group exits, we must reap the zombie clone lwps
1615 before we're able to reap the leader. */
1616 if (thread
->id
.pid () == thread
->id
.lwp ())
1619 lwp_info
*lwp
= get_thread_lwp (thread
);
1620 detach_one_lwp (lwp
);
1623 main_lwp
= find_lwp_pid (ptid_t (process
->pid
));
1624 detach_one_lwp (main_lwp
);
1628 /* Since we presently can only stop all lwps of all processes, we
1629 need to unstop lwps of other processes. */
1630 unstop_all_lwps (0, NULL
);
1634 /* Remove all LWPs that belong to process PROC from the lwp list. */
1637 linux_process_target::mourn (process_info
*process
)
1639 struct process_info_private
*priv
;
1641 #ifdef USE_THREAD_DB
1642 thread_db_mourn (process
);
1645 for_each_thread (process
->pid
, [this] (thread_info
*thread
)
1647 delete_lwp (get_thread_lwp (thread
));
1650 /* Freeing all private data. */
1651 priv
= process
->priv
;
1652 low_delete_process (priv
->arch_private
);
1654 process
->priv
= NULL
;
1656 remove_process (process
);
1660 linux_process_target::join (int pid
)
1665 ret
= my_waitpid (pid
, &status
, 0);
1666 if (WIFEXITED (status
) || WIFSIGNALED (status
))
1668 } while (ret
!= -1 || errno
!= ECHILD
);
1671 /* Return true if the given thread is still alive. */
1674 linux_process_target::thread_alive (ptid_t ptid
)
1676 struct lwp_info
*lwp
= find_lwp_pid (ptid
);
1678 /* We assume we always know if a thread exits. If a whole process
1679 exited but we still haven't been able to report it to GDB, we'll
1680 hold on to the last lwp of the dead process. */
1682 return !lwp_is_marked_dead (lwp
);
1688 linux_process_target::thread_still_has_status_pending (thread_info
*thread
)
1690 struct lwp_info
*lp
= get_thread_lwp (thread
);
1692 if (!lp
->status_pending_p
)
1695 if (thread
->last_resume_kind
!= resume_stop
1696 && (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
1697 || lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
))
1699 struct thread_info
*saved_thread
;
1703 gdb_assert (lp
->last_status
!= 0);
1707 saved_thread
= current_thread
;
1708 current_thread
= thread
;
1710 if (pc
!= lp
->stop_pc
)
1713 debug_printf ("PC of %ld changed\n",
1718 #if !USE_SIGTRAP_SIGINFO
1719 else if (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
1720 && !low_breakpoint_at (pc
))
1723 debug_printf ("previous SW breakpoint of %ld gone\n",
1727 else if (lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
1728 && !hardware_breakpoint_inserted_here (pc
))
1731 debug_printf ("previous HW breakpoint of %ld gone\n",
1737 current_thread
= saved_thread
;
1742 debug_printf ("discarding pending breakpoint status\n");
1743 lp
->status_pending_p
= 0;
1751 /* Returns true if LWP is resumed from the client's perspective. */
1754 lwp_resumed (struct lwp_info
*lwp
)
1756 struct thread_info
*thread
= get_lwp_thread (lwp
);
1758 if (thread
->last_resume_kind
!= resume_stop
)
1761 /* Did gdb send us a `vCont;t', but we haven't reported the
1762 corresponding stop to gdb yet? If so, the thread is still
1763 resumed/running from gdb's perspective. */
1764 if (thread
->last_resume_kind
== resume_stop
1765 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
)
1772 linux_process_target::status_pending_p_callback (thread_info
*thread
,
1775 struct lwp_info
*lp
= get_thread_lwp (thread
);
1777 /* Check if we're only interested in events from a specific process
1778 or a specific LWP. */
1779 if (!thread
->id
.matches (ptid
))
1782 if (!lwp_resumed (lp
))
1785 if (lp
->status_pending_p
1786 && !thread_still_has_status_pending (thread
))
1788 resume_one_lwp (lp
, lp
->stepping
, GDB_SIGNAL_0
, NULL
);
1792 return lp
->status_pending_p
;
1796 find_lwp_pid (ptid_t ptid
)
1798 thread_info
*thread
= find_thread ([&] (thread_info
*thr_arg
)
1800 int lwp
= ptid
.lwp () != 0 ? ptid
.lwp () : ptid
.pid ();
1801 return thr_arg
->id
.lwp () == lwp
;
1807 return get_thread_lwp (thread
);
1810 /* Return the number of known LWPs in the tgid given by PID. */
1817 for_each_thread (pid
, [&] (thread_info
*thread
)
1825 /* See nat/linux-nat.h. */
1828 iterate_over_lwps (ptid_t filter
,
1829 gdb::function_view
<iterate_over_lwps_ftype
> callback
)
1831 thread_info
*thread
= find_thread (filter
, [&] (thread_info
*thr_arg
)
1833 lwp_info
*lwp
= get_thread_lwp (thr_arg
);
1835 return callback (lwp
);
1841 return get_thread_lwp (thread
);
1845 linux_process_target::check_zombie_leaders ()
1847 for_each_process ([this] (process_info
*proc
) {
1848 pid_t leader_pid
= pid_of (proc
);
1849 struct lwp_info
*leader_lp
;
1851 leader_lp
= find_lwp_pid (ptid_t (leader_pid
));
1854 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1855 "num_lwps=%d, zombie=%d\n",
1856 leader_pid
, leader_lp
!= NULL
, num_lwps (leader_pid
),
1857 linux_proc_pid_is_zombie (leader_pid
));
1859 if (leader_lp
!= NULL
&& !leader_lp
->stopped
1860 /* Check if there are other threads in the group, as we may
1861 have raced with the inferior simply exiting. */
1862 && !last_thread_of_process_p (leader_pid
)
1863 && linux_proc_pid_is_zombie (leader_pid
))
1865 /* A leader zombie can mean one of two things:
1867 - It exited, and there's an exit status pending
1868 available, or only the leader exited (not the whole
1869 program). In the latter case, we can't waitpid the
1870 leader's exit status until all other threads are gone.
1872 - There are 3 or more threads in the group, and a thread
1873 other than the leader exec'd. On an exec, the Linux
1874 kernel destroys all other threads (except the execing
1875 one) in the thread group, and resets the execing thread's
1876 tid to the tgid. No exit notification is sent for the
1877 execing thread -- from the ptracer's perspective, it
1878 appears as though the execing thread just vanishes.
1879 Until we reap all other threads except the leader and the
1880 execing thread, the leader will be zombie, and the
1881 execing thread will be in `D (disc sleep)'. As soon as
1882 all other threads are reaped, the execing thread changes
1883 it's tid to the tgid, and the previous (zombie) leader
1884 vanishes, giving place to the "new" leader. We could try
1885 distinguishing the exit and exec cases, by waiting once
1886 more, and seeing if something comes out, but it doesn't
1887 sound useful. The previous leader _does_ go away, and
1888 we'll re-add the new one once we see the exec event
1889 (which is just the same as what would happen if the
1890 previous leader did exit voluntarily before some other
1894 debug_printf ("CZL: Thread group leader %d zombie "
1895 "(it exited, or another thread execd).\n",
1898 delete_lwp (leader_lp
);
1903 /* Callback for `find_thread'. Returns the first LWP that is not
1907 not_stopped_callback (thread_info
*thread
, ptid_t filter
)
1909 if (!thread
->id
.matches (filter
))
1912 lwp_info
*lwp
= get_thread_lwp (thread
);
1914 return !lwp
->stopped
;
1917 /* Increment LWP's suspend count. */
1920 lwp_suspended_inc (struct lwp_info
*lwp
)
1924 if (debug_threads
&& lwp
->suspended
> 4)
1926 struct thread_info
*thread
= get_lwp_thread (lwp
);
1928 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1929 " suspended=%d\n", lwpid_of (thread
), lwp
->suspended
);
1933 /* Decrement LWP's suspend count. */
1936 lwp_suspended_decr (struct lwp_info
*lwp
)
1940 if (lwp
->suspended
< 0)
1942 struct thread_info
*thread
= get_lwp_thread (lwp
);
1944 internal_error (__FILE__
, __LINE__
,
1945 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread
),
1950 /* This function should only be called if the LWP got a SIGTRAP.
1952 Handle any tracepoint steps or hits. Return true if a tracepoint
1953 event was handled, 0 otherwise. */
1956 handle_tracepoints (struct lwp_info
*lwp
)
1958 struct thread_info
*tinfo
= get_lwp_thread (lwp
);
1959 int tpoint_related_event
= 0;
1961 gdb_assert (lwp
->suspended
== 0);
1963 /* If this tracepoint hit causes a tracing stop, we'll immediately
1964 uninsert tracepoints. To do this, we temporarily pause all
1965 threads, unpatch away, and then unpause threads. We need to make
1966 sure the unpausing doesn't resume LWP too. */
1967 lwp_suspended_inc (lwp
);
1969 /* And we need to be sure that any all-threads-stopping doesn't try
1970 to move threads out of the jump pads, as it could deadlock the
1971 inferior (LWP could be in the jump pad, maybe even holding the
1974 /* Do any necessary step collect actions. */
1975 tpoint_related_event
|= tracepoint_finished_step (tinfo
, lwp
->stop_pc
);
1977 tpoint_related_event
|= handle_tracepoint_bkpts (tinfo
, lwp
->stop_pc
);
1979 /* See if we just hit a tracepoint and do its main collect
1981 tpoint_related_event
|= tracepoint_was_hit (tinfo
, lwp
->stop_pc
);
1983 lwp_suspended_decr (lwp
);
1985 gdb_assert (lwp
->suspended
== 0);
1986 gdb_assert (!stabilizing_threads
1987 || (lwp
->collecting_fast_tracepoint
1988 != fast_tpoint_collect_result::not_collecting
));
1990 if (tpoint_related_event
)
1993 debug_printf ("got a tracepoint event\n");
2000 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2001 collection status. */
2003 static fast_tpoint_collect_result
2004 linux_fast_tracepoint_collecting (struct lwp_info
*lwp
,
2005 struct fast_tpoint_collect_status
*status
)
2007 CORE_ADDR thread_area
;
2008 struct thread_info
*thread
= get_lwp_thread (lwp
);
2010 if (the_low_target
.get_thread_area
== NULL
)
2011 return fast_tpoint_collect_result::not_collecting
;
2013 /* Get the thread area address. This is used to recognize which
2014 thread is which when tracing with the in-process agent library.
2015 We don't read anything from the address, and treat it as opaque;
2016 it's the address itself that we assume is unique per-thread. */
2017 if ((*the_low_target
.get_thread_area
) (lwpid_of (thread
), &thread_area
) == -1)
2018 return fast_tpoint_collect_result::not_collecting
;
2020 return fast_tracepoint_collecting (thread_area
, lwp
->stop_pc
, status
);
2024 linux_process_target::maybe_move_out_of_jump_pad (lwp_info
*lwp
, int *wstat
)
2026 struct thread_info
*saved_thread
;
2028 saved_thread
= current_thread
;
2029 current_thread
= get_lwp_thread (lwp
);
2032 || (WIFSTOPPED (*wstat
) && WSTOPSIG (*wstat
) != SIGTRAP
))
2033 && supports_fast_tracepoints ()
2034 && agent_loaded_p ())
2036 struct fast_tpoint_collect_status status
;
2039 debug_printf ("Checking whether LWP %ld needs to move out of the "
2041 lwpid_of (current_thread
));
2043 fast_tpoint_collect_result r
2044 = linux_fast_tracepoint_collecting (lwp
, &status
);
2047 || (WSTOPSIG (*wstat
) != SIGILL
2048 && WSTOPSIG (*wstat
) != SIGFPE
2049 && WSTOPSIG (*wstat
) != SIGSEGV
2050 && WSTOPSIG (*wstat
) != SIGBUS
))
2052 lwp
->collecting_fast_tracepoint
= r
;
2054 if (r
!= fast_tpoint_collect_result::not_collecting
)
2056 if (r
== fast_tpoint_collect_result::before_insn
2057 && lwp
->exit_jump_pad_bkpt
== NULL
)
2059 /* Haven't executed the original instruction yet.
2060 Set breakpoint there, and wait till it's hit,
2061 then single-step until exiting the jump pad. */
2062 lwp
->exit_jump_pad_bkpt
2063 = set_breakpoint_at (status
.adjusted_insn_addr
, NULL
);
2067 debug_printf ("Checking whether LWP %ld needs to move out of "
2068 "the jump pad...it does\n",
2069 lwpid_of (current_thread
));
2070 current_thread
= saved_thread
;
2077 /* If we get a synchronous signal while collecting, *and*
2078 while executing the (relocated) original instruction,
2079 reset the PC to point at the tpoint address, before
2080 reporting to GDB. Otherwise, it's an IPA lib bug: just
2081 report the signal to GDB, and pray for the best. */
2083 lwp
->collecting_fast_tracepoint
2084 = fast_tpoint_collect_result::not_collecting
;
2086 if (r
!= fast_tpoint_collect_result::not_collecting
2087 && (status
.adjusted_insn_addr
<= lwp
->stop_pc
2088 && lwp
->stop_pc
< status
.adjusted_insn_addr_end
))
2091 struct regcache
*regcache
;
2093 /* The si_addr on a few signals references the address
2094 of the faulting instruction. Adjust that as
2096 if ((WSTOPSIG (*wstat
) == SIGILL
2097 || WSTOPSIG (*wstat
) == SIGFPE
2098 || WSTOPSIG (*wstat
) == SIGBUS
2099 || WSTOPSIG (*wstat
) == SIGSEGV
)
2100 && ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
2101 (PTRACE_TYPE_ARG3
) 0, &info
) == 0
2102 /* Final check just to make sure we don't clobber
2103 the siginfo of non-kernel-sent signals. */
2104 && (uintptr_t) info
.si_addr
== lwp
->stop_pc
)
2106 info
.si_addr
= (void *) (uintptr_t) status
.tpoint_addr
;
2107 ptrace (PTRACE_SETSIGINFO
, lwpid_of (current_thread
),
2108 (PTRACE_TYPE_ARG3
) 0, &info
);
2111 regcache
= get_thread_regcache (current_thread
, 1);
2112 low_set_pc (regcache
, status
.tpoint_addr
);
2113 lwp
->stop_pc
= status
.tpoint_addr
;
2115 /* Cancel any fast tracepoint lock this thread was
2117 force_unlock_trace_buffer ();
2120 if (lwp
->exit_jump_pad_bkpt
!= NULL
)
2123 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2124 "stopping all threads momentarily.\n");
2126 stop_all_lwps (1, lwp
);
2128 delete_breakpoint (lwp
->exit_jump_pad_bkpt
);
2129 lwp
->exit_jump_pad_bkpt
= NULL
;
2131 unstop_all_lwps (1, lwp
);
2133 gdb_assert (lwp
->suspended
>= 0);
2139 debug_printf ("Checking whether LWP %ld needs to move out of the "
2141 lwpid_of (current_thread
));
2143 current_thread
= saved_thread
;
2147 /* Enqueue one signal in the "signals to report later when out of the
2151 enqueue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
2153 struct pending_signals
*p_sig
;
2154 struct thread_info
*thread
= get_lwp_thread (lwp
);
2157 debug_printf ("Deferring signal %d for LWP %ld.\n",
2158 WSTOPSIG (*wstat
), lwpid_of (thread
));
2162 struct pending_signals
*sig
;
2164 for (sig
= lwp
->pending_signals_to_report
;
2167 debug_printf (" Already queued %d\n",
2170 debug_printf (" (no more currently queued signals)\n");
2173 /* Don't enqueue non-RT signals if they are already in the deferred
2174 queue. (SIGSTOP being the easiest signal to see ending up here
2176 if (WSTOPSIG (*wstat
) < __SIGRTMIN
)
2178 struct pending_signals
*sig
;
2180 for (sig
= lwp
->pending_signals_to_report
;
2184 if (sig
->signal
== WSTOPSIG (*wstat
))
2187 debug_printf ("Not requeuing already queued non-RT signal %d"
2196 p_sig
= XCNEW (struct pending_signals
);
2197 p_sig
->prev
= lwp
->pending_signals_to_report
;
2198 p_sig
->signal
= WSTOPSIG (*wstat
);
2200 ptrace (PTRACE_GETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
2203 lwp
->pending_signals_to_report
= p_sig
;
2206 /* Dequeue one signal from the "signals to report later when out of
2207 the jump pad" list. */
2210 dequeue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
2212 struct thread_info
*thread
= get_lwp_thread (lwp
);
2214 if (lwp
->pending_signals_to_report
!= NULL
)
2216 struct pending_signals
**p_sig
;
2218 p_sig
= &lwp
->pending_signals_to_report
;
2219 while ((*p_sig
)->prev
!= NULL
)
2220 p_sig
= &(*p_sig
)->prev
;
2222 *wstat
= W_STOPCODE ((*p_sig
)->signal
);
2223 if ((*p_sig
)->info
.si_signo
!= 0)
2224 ptrace (PTRACE_SETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
2230 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2231 WSTOPSIG (*wstat
), lwpid_of (thread
));
2235 struct pending_signals
*sig
;
2237 for (sig
= lwp
->pending_signals_to_report
;
2240 debug_printf (" Still queued %d\n",
2243 debug_printf (" (no more queued signals)\n");
2253 linux_process_target::check_stopped_by_watchpoint (lwp_info
*child
)
2255 struct thread_info
*saved_thread
= current_thread
;
2256 current_thread
= get_lwp_thread (child
);
2258 if (low_stopped_by_watchpoint ())
2260 child
->stop_reason
= TARGET_STOPPED_BY_WATCHPOINT
;
2261 child
->stopped_data_address
= low_stopped_data_address ();
2264 current_thread
= saved_thread
;
2266 return child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
2270 linux_process_target::low_stopped_by_watchpoint ()
2276 linux_process_target::low_stopped_data_address ()
2281 /* Return the ptrace options that we want to try to enable. */
2284 linux_low_ptrace_options (int attached
)
2286 client_state
&cs
= get_client_state ();
2290 options
|= PTRACE_O_EXITKILL
;
2292 if (cs
.report_fork_events
)
2293 options
|= PTRACE_O_TRACEFORK
;
2295 if (cs
.report_vfork_events
)
2296 options
|= (PTRACE_O_TRACEVFORK
| PTRACE_O_TRACEVFORKDONE
);
2298 if (cs
.report_exec_events
)
2299 options
|= PTRACE_O_TRACEEXEC
;
2301 options
|= PTRACE_O_TRACESYSGOOD
;
2307 linux_process_target::filter_event (int lwpid
, int wstat
)
2309 client_state
&cs
= get_client_state ();
2310 struct lwp_info
*child
;
2311 struct thread_info
*thread
;
2312 int have_stop_pc
= 0;
2314 child
= find_lwp_pid (ptid_t (lwpid
));
2316 /* Check for stop events reported by a process we didn't already
2317 know about - anything not already in our LWP list.
2319 If we're expecting to receive stopped processes after
2320 fork, vfork, and clone events, then we'll just add the
2321 new one to our list and go back to waiting for the event
2322 to be reported - the stopped process might be returned
2323 from waitpid before or after the event is.
2325 But note the case of a non-leader thread exec'ing after the
2326 leader having exited, and gone from our lists (because
2327 check_zombie_leaders deleted it). The non-leader thread
2328 changes its tid to the tgid. */
2330 if (WIFSTOPPED (wstat
) && child
== NULL
&& WSTOPSIG (wstat
) == SIGTRAP
2331 && linux_ptrace_get_extended_event (wstat
) == PTRACE_EVENT_EXEC
)
2335 /* A multi-thread exec after we had seen the leader exiting. */
2338 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2339 "after exec.\n", lwpid
);
2342 child_ptid
= ptid_t (lwpid
, lwpid
, 0);
2343 child
= add_lwp (child_ptid
);
2345 current_thread
= child
->thread
;
2348 /* If we didn't find a process, one of two things presumably happened:
2349 - A process we started and then detached from has exited. Ignore it.
2350 - A process we are controlling has forked and the new child's stop
2351 was reported to us by the kernel. Save its PID. */
2352 if (child
== NULL
&& WIFSTOPPED (wstat
))
2354 add_to_pid_list (&stopped_pids
, lwpid
, wstat
);
2357 else if (child
== NULL
)
2360 thread
= get_lwp_thread (child
);
2364 child
->last_status
= wstat
;
2366 /* Check if the thread has exited. */
2367 if ((WIFEXITED (wstat
) || WIFSIGNALED (wstat
)))
2370 debug_printf ("LLFE: %d exited.\n", lwpid
);
2372 if (finish_step_over (child
))
2374 /* Unsuspend all other LWPs, and set them back running again. */
2375 unsuspend_all_lwps (child
);
2378 /* If there is at least one more LWP, then the exit signal was
2379 not the end of the debugged application and should be
2380 ignored, unless GDB wants to hear about thread exits. */
2381 if (cs
.report_thread_events
2382 || last_thread_of_process_p (pid_of (thread
)))
2384 /* Since events are serialized to GDB core, and we can't
2385 report this one right now. Leave the status pending for
2386 the next time we're able to report it. */
2387 mark_lwp_dead (child
, wstat
);
2397 gdb_assert (WIFSTOPPED (wstat
));
2399 if (WIFSTOPPED (wstat
))
2401 struct process_info
*proc
;
2403 /* Architecture-specific setup after inferior is running. */
2404 proc
= find_process_pid (pid_of (thread
));
2405 if (proc
->tdesc
== NULL
)
2409 /* This needs to happen after we have attached to the
2410 inferior and it is stopped for the first time, but
2411 before we access any inferior registers. */
2412 arch_setup_thread (thread
);
2416 /* The process is started, but GDBserver will do
2417 architecture-specific setup after the program stops at
2418 the first instruction. */
2419 child
->status_pending_p
= 1;
2420 child
->status_pending
= wstat
;
2426 if (WIFSTOPPED (wstat
) && child
->must_set_ptrace_flags
)
2428 struct process_info
*proc
= find_process_pid (pid_of (thread
));
2429 int options
= linux_low_ptrace_options (proc
->attached
);
2431 linux_enable_event_reporting (lwpid
, options
);
2432 child
->must_set_ptrace_flags
= 0;
2435 /* Always update syscall_state, even if it will be filtered later. */
2436 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SYSCALL_SIGTRAP
)
2438 child
->syscall_state
2439 = (child
->syscall_state
== TARGET_WAITKIND_SYSCALL_ENTRY
2440 ? TARGET_WAITKIND_SYSCALL_RETURN
2441 : TARGET_WAITKIND_SYSCALL_ENTRY
);
2445 /* Almost all other ptrace-stops are known to be outside of system
2446 calls, with further exceptions in handle_extended_wait. */
2447 child
->syscall_state
= TARGET_WAITKIND_IGNORE
;
2450 /* Be careful to not overwrite stop_pc until save_stop_reason is
2452 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGTRAP
2453 && linux_is_extended_waitstatus (wstat
))
2455 child
->stop_pc
= get_pc (child
);
2456 if (handle_extended_wait (&child
, wstat
))
2458 /* The event has been handled, so just return without
2464 if (linux_wstatus_maybe_breakpoint (wstat
))
2466 if (save_stop_reason (child
))
2471 child
->stop_pc
= get_pc (child
);
2473 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGSTOP
2474 && child
->stop_expected
)
2477 debug_printf ("Expected stop.\n");
2478 child
->stop_expected
= 0;
2480 if (thread
->last_resume_kind
== resume_stop
)
2482 /* We want to report the stop to the core. Treat the
2483 SIGSTOP as a normal event. */
2485 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2486 target_pid_to_str (ptid_of (thread
)));
2488 else if (stopping_threads
!= NOT_STOPPING_THREADS
)
2490 /* Stopping threads. We don't want this SIGSTOP to end up
2493 debug_printf ("LLW: SIGSTOP caught for %s "
2494 "while stopping threads.\n",
2495 target_pid_to_str (ptid_of (thread
)));
2500 /* This is a delayed SIGSTOP. Filter out the event. */
2502 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2503 child
->stepping
? "step" : "continue",
2504 target_pid_to_str (ptid_of (thread
)));
2506 resume_one_lwp (child
, child
->stepping
, 0, NULL
);
2511 child
->status_pending_p
= 1;
2512 child
->status_pending
= wstat
;
2516 /* Return true if THREAD is doing hardware single step. */
2519 maybe_hw_step (struct thread_info
*thread
)
2521 if (can_hardware_single_step ())
2525 /* GDBserver must insert single-step breakpoint for software
2527 gdb_assert (has_single_step_breakpoints (thread
));
2533 linux_process_target::resume_stopped_resumed_lwps (thread_info
*thread
)
2535 struct lwp_info
*lp
= get_thread_lwp (thread
);
2539 && !lp
->status_pending_p
2540 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
)
2544 if (thread
->last_resume_kind
== resume_step
)
2545 step
= maybe_hw_step (thread
);
2548 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2549 target_pid_to_str (ptid_of (thread
)),
2550 paddress (lp
->stop_pc
),
2553 resume_one_lwp (lp
, step
, GDB_SIGNAL_0
, NULL
);
2558 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid
,
2560 int *wstatp
, int options
)
2562 struct thread_info
*event_thread
;
2563 struct lwp_info
*event_child
, *requested_child
;
2564 sigset_t block_mask
, prev_mask
;
2567 /* N.B. event_thread points to the thread_info struct that contains
2568 event_child. Keep them in sync. */
2569 event_thread
= NULL
;
2571 requested_child
= NULL
;
2573 /* Check for a lwp with a pending status. */
2575 if (filter_ptid
== minus_one_ptid
|| filter_ptid
.is_pid ())
2577 event_thread
= find_thread_in_random ([&] (thread_info
*thread
)
2579 return status_pending_p_callback (thread
, filter_ptid
);
2582 if (event_thread
!= NULL
)
2583 event_child
= get_thread_lwp (event_thread
);
2584 if (debug_threads
&& event_thread
)
2585 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread
));
2587 else if (filter_ptid
!= null_ptid
)
2589 requested_child
= find_lwp_pid (filter_ptid
);
2591 if (stopping_threads
== NOT_STOPPING_THREADS
2592 && requested_child
->status_pending_p
2593 && (requested_child
->collecting_fast_tracepoint
2594 != fast_tpoint_collect_result::not_collecting
))
2596 enqueue_one_deferred_signal (requested_child
,
2597 &requested_child
->status_pending
);
2598 requested_child
->status_pending_p
= 0;
2599 requested_child
->status_pending
= 0;
2600 resume_one_lwp (requested_child
, 0, 0, NULL
);
2603 if (requested_child
->suspended
2604 && requested_child
->status_pending_p
)
2606 internal_error (__FILE__
, __LINE__
,
2607 "requesting an event out of a"
2608 " suspended child?");
2611 if (requested_child
->status_pending_p
)
2613 event_child
= requested_child
;
2614 event_thread
= get_lwp_thread (event_child
);
2618 if (event_child
!= NULL
)
2621 debug_printf ("Got an event from pending child %ld (%04x)\n",
2622 lwpid_of (event_thread
), event_child
->status_pending
);
2623 *wstatp
= event_child
->status_pending
;
2624 event_child
->status_pending_p
= 0;
2625 event_child
->status_pending
= 0;
2626 current_thread
= event_thread
;
2627 return lwpid_of (event_thread
);
2630 /* But if we don't find a pending event, we'll have to wait.
2632 We only enter this loop if no process has a pending wait status.
2633 Thus any action taken in response to a wait status inside this
2634 loop is responding as soon as we detect the status, not after any
2637 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2638 all signals while here. */
2639 sigfillset (&block_mask
);
2640 gdb_sigmask (SIG_BLOCK
, &block_mask
, &prev_mask
);
2642 /* Always pull all events out of the kernel. We'll randomly select
2643 an event LWP out of all that have events, to prevent
2645 while (event_child
== NULL
)
2649 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2652 - If the thread group leader exits while other threads in the
2653 thread group still exist, waitpid(TGID, ...) hangs. That
2654 waitpid won't return an exit status until the other threads
2655 in the group are reaped.
2657 - When a non-leader thread execs, that thread just vanishes
2658 without reporting an exit (so we'd hang if we waited for it
2659 explicitly in that case). The exec event is reported to
2662 ret
= my_waitpid (-1, wstatp
, options
| WNOHANG
);
2665 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2666 ret
, errno
? safe_strerror (errno
) : "ERRNO-OK");
2672 debug_printf ("LLW: waitpid %ld received %s\n",
2673 (long) ret
, status_to_str (*wstatp
));
2676 /* Filter all events. IOW, leave all events pending. We'll
2677 randomly select an event LWP out of all that have events
2679 filter_event (ret
, *wstatp
);
2680 /* Retry until nothing comes out of waitpid. A single
2681 SIGCHLD can indicate more than one child stopped. */
2685 /* Now that we've pulled all events out of the kernel, resume
2686 LWPs that don't have an interesting event to report. */
2687 if (stopping_threads
== NOT_STOPPING_THREADS
)
2688 for_each_thread ([this] (thread_info
*thread
)
2690 resume_stopped_resumed_lwps (thread
);
2693 /* ... and find an LWP with a status to report to the core, if
2695 event_thread
= find_thread_in_random ([&] (thread_info
*thread
)
2697 return status_pending_p_callback (thread
, filter_ptid
);
2700 if (event_thread
!= NULL
)
2702 event_child
= get_thread_lwp (event_thread
);
2703 *wstatp
= event_child
->status_pending
;
2704 event_child
->status_pending_p
= 0;
2705 event_child
->status_pending
= 0;
2709 /* Check for zombie thread group leaders. Those can't be reaped
2710 until all other threads in the thread group are. */
2711 check_zombie_leaders ();
2713 auto not_stopped
= [&] (thread_info
*thread
)
2715 return not_stopped_callback (thread
, wait_ptid
);
2718 /* If there are no resumed children left in the set of LWPs we
2719 want to wait for, bail. We can't just block in
2720 waitpid/sigsuspend, because lwps might have been left stopped
2721 in trace-stop state, and we'd be stuck forever waiting for
2722 their status to change (which would only happen if we resumed
2723 them). Even if WNOHANG is set, this return code is preferred
2724 over 0 (below), as it is more detailed. */
2725 if (find_thread (not_stopped
) == NULL
)
2728 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2729 gdb_sigmask (SIG_SETMASK
, &prev_mask
, NULL
);
2733 /* No interesting event to report to the caller. */
2734 if ((options
& WNOHANG
))
2737 debug_printf ("WNOHANG set, no event found\n");
2739 gdb_sigmask (SIG_SETMASK
, &prev_mask
, NULL
);
2743 /* Block until we get an event reported with SIGCHLD. */
2745 debug_printf ("sigsuspend'ing\n");
2747 sigsuspend (&prev_mask
);
2748 gdb_sigmask (SIG_SETMASK
, &prev_mask
, NULL
);
2752 gdb_sigmask (SIG_SETMASK
, &prev_mask
, NULL
);
2754 current_thread
= event_thread
;
2756 return lwpid_of (event_thread
);
2760 linux_process_target::wait_for_event (ptid_t ptid
, int *wstatp
, int options
)
2762 return wait_for_event_filtered (ptid
, ptid
, wstatp
, options
);
2765 /* Select one LWP out of those that have events pending. */
2768 select_event_lwp (struct lwp_info
**orig_lp
)
2770 struct thread_info
*event_thread
= NULL
;
2772 /* In all-stop, give preference to the LWP that is being
2773 single-stepped. There will be at most one, and it's the LWP that
2774 the core is most interested in. If we didn't do this, then we'd
2775 have to handle pending step SIGTRAPs somehow in case the core
2776 later continues the previously-stepped thread, otherwise we'd
2777 report the pending SIGTRAP, and the core, not having stepped the
2778 thread, wouldn't understand what the trap was for, and therefore
2779 would report it to the user as a random signal. */
2782 event_thread
= find_thread ([] (thread_info
*thread
)
2784 lwp_info
*lp
= get_thread_lwp (thread
);
2786 return (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2787 && thread
->last_resume_kind
== resume_step
2788 && lp
->status_pending_p
);
2791 if (event_thread
!= NULL
)
2794 debug_printf ("SEL: Select single-step %s\n",
2795 target_pid_to_str (ptid_of (event_thread
)));
2798 if (event_thread
== NULL
)
2800 /* No single-stepping LWP. Select one at random, out of those
2801 which have had events. */
2803 event_thread
= find_thread_in_random ([&] (thread_info
*thread
)
2805 lwp_info
*lp
= get_thread_lwp (thread
);
2807 /* Only resumed LWPs that have an event pending. */
2808 return (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2809 && lp
->status_pending_p
);
2813 if (event_thread
!= NULL
)
2815 struct lwp_info
*event_lp
= get_thread_lwp (event_thread
);
2817 /* Switch the event LWP. */
2818 *orig_lp
= event_lp
;
2822 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2826 unsuspend_all_lwps (struct lwp_info
*except
)
2828 for_each_thread ([&] (thread_info
*thread
)
2830 lwp_info
*lwp
= get_thread_lwp (thread
);
2833 lwp_suspended_decr (lwp
);
2837 static bool stuck_in_jump_pad_callback (thread_info
*thread
);
2838 static bool lwp_running (thread_info
*thread
);
2840 /* Stabilize threads (move out of jump pads).
2842 If a thread is midway collecting a fast tracepoint, we need to
2843 finish the collection and move it out of the jump pad before
2844 reporting the signal.
2846 This avoids recursion while collecting (when a signal arrives
2847 midway, and the signal handler itself collects), which would trash
2848 the trace buffer. In case the user set a breakpoint in a signal
2849 handler, this avoids the backtrace showing the jump pad, etc..
2850 Most importantly, there are certain things we can't do safely if
2851 threads are stopped in a jump pad (or in its callee's). For
2854 - starting a new trace run. A thread still collecting the
2855 previous run, could trash the trace buffer when resumed. The trace
2856 buffer control structures would have been reset but the thread had
2857 no way to tell. The thread could even midway memcpy'ing to the
2858 buffer, which would mean that when resumed, it would clobber the
2859 trace buffer that had been set for a new run.
2861 - we can't rewrite/reuse the jump pads for new tracepoints
2862 safely. Say you do tstart while a thread is stopped midway while
2863 collecting. When the thread is later resumed, it finishes the
2864 collection, and returns to the jump pad, to execute the original
2865 instruction that was under the tracepoint jump at the time the
2866 older run had been started. If the jump pad had been rewritten
2867 since for something else in the new run, the thread would now
2868 execute the wrong / random instructions. */
2871 linux_process_target::stabilize_threads ()
2873 thread_info
*thread_stuck
= find_thread (stuck_in_jump_pad_callback
);
2875 if (thread_stuck
!= NULL
)
2878 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2879 lwpid_of (thread_stuck
));
2883 thread_info
*saved_thread
= current_thread
;
2885 stabilizing_threads
= 1;
2888 for_each_thread ([this] (thread_info
*thread
)
2890 move_out_of_jump_pad (thread
);
2893 /* Loop until all are stopped out of the jump pads. */
2894 while (find_thread (lwp_running
) != NULL
)
2896 struct target_waitstatus ourstatus
;
2897 struct lwp_info
*lwp
;
2900 /* Note that we go through the full wait even loop. While
2901 moving threads out of jump pad, we need to be able to step
2902 over internal breakpoints and such. */
2903 wait_1 (minus_one_ptid
, &ourstatus
, 0);
2905 if (ourstatus
.kind
== TARGET_WAITKIND_STOPPED
)
2907 lwp
= get_thread_lwp (current_thread
);
2910 lwp_suspended_inc (lwp
);
2912 if (ourstatus
.value
.sig
!= GDB_SIGNAL_0
2913 || current_thread
->last_resume_kind
== resume_stop
)
2915 wstat
= W_STOPCODE (gdb_signal_to_host (ourstatus
.value
.sig
));
2916 enqueue_one_deferred_signal (lwp
, &wstat
);
2921 unsuspend_all_lwps (NULL
);
2923 stabilizing_threads
= 0;
2925 current_thread
= saved_thread
;
2929 thread_stuck
= find_thread (stuck_in_jump_pad_callback
);
2931 if (thread_stuck
!= NULL
)
2932 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2933 lwpid_of (thread_stuck
));
2937 /* Convenience function that is called when the kernel reports an
2938 event that is not passed out to GDB. */
2941 ignore_event (struct target_waitstatus
*ourstatus
)
2943 /* If we got an event, there may still be others, as a single
2944 SIGCHLD can indicate more than one child stopped. This forces
2945 another target_wait call. */
2948 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2953 linux_process_target::filter_exit_event (lwp_info
*event_child
,
2954 target_waitstatus
*ourstatus
)
2956 client_state
&cs
= get_client_state ();
2957 struct thread_info
*thread
= get_lwp_thread (event_child
);
2958 ptid_t ptid
= ptid_of (thread
);
2960 if (!last_thread_of_process_p (pid_of (thread
)))
2962 if (cs
.report_thread_events
)
2963 ourstatus
->kind
= TARGET_WAITKIND_THREAD_EXITED
;
2965 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2967 delete_lwp (event_child
);
2972 /* Returns 1 if GDB is interested in any event_child syscalls. */
2975 gdb_catching_syscalls_p (struct lwp_info
*event_child
)
2977 struct thread_info
*thread
= get_lwp_thread (event_child
);
2978 struct process_info
*proc
= get_thread_process (thread
);
2980 return !proc
->syscalls_to_catch
.empty ();
2983 /* Returns 1 if GDB is interested in the event_child syscall.
2984 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
2987 gdb_catch_this_syscall_p (struct lwp_info
*event_child
)
2990 struct thread_info
*thread
= get_lwp_thread (event_child
);
2991 struct process_info
*proc
= get_thread_process (thread
);
2993 if (proc
->syscalls_to_catch
.empty ())
2996 if (proc
->syscalls_to_catch
[0] == ANY_SYSCALL
)
2999 get_syscall_trapinfo (event_child
, &sysno
);
3001 for (int iter
: proc
->syscalls_to_catch
)
3009 linux_process_target::wait_1 (ptid_t ptid
, target_waitstatus
*ourstatus
,
3012 client_state
&cs
= get_client_state ();
3014 struct lwp_info
*event_child
;
3017 int step_over_finished
;
3018 int bp_explains_trap
;
3019 int maybe_internal_trap
;
3028 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid
));
3031 /* Translate generic target options into linux options. */
3033 if (target_options
& TARGET_WNOHANG
)
3036 bp_explains_trap
= 0;
3039 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
3041 auto status_pending_p_any
= [&] (thread_info
*thread
)
3043 return status_pending_p_callback (thread
, minus_one_ptid
);
3046 auto not_stopped
= [&] (thread_info
*thread
)
3048 return not_stopped_callback (thread
, minus_one_ptid
);
3051 /* Find a resumed LWP, if any. */
3052 if (find_thread (status_pending_p_any
) != NULL
)
3054 else if (find_thread (not_stopped
) != NULL
)
3059 if (step_over_bkpt
== null_ptid
)
3060 pid
= wait_for_event (ptid
, &w
, options
);
3064 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3065 target_pid_to_str (step_over_bkpt
));
3066 pid
= wait_for_event (step_over_bkpt
, &w
, options
& ~WNOHANG
);
3069 if (pid
== 0 || (pid
== -1 && !any_resumed
))
3071 gdb_assert (target_options
& TARGET_WNOHANG
);
3075 debug_printf ("wait_1 ret = null_ptid, "
3076 "TARGET_WAITKIND_IGNORE\n");
3080 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
3087 debug_printf ("wait_1 ret = null_ptid, "
3088 "TARGET_WAITKIND_NO_RESUMED\n");
3092 ourstatus
->kind
= TARGET_WAITKIND_NO_RESUMED
;
3096 event_child
= get_thread_lwp (current_thread
);
3098 /* wait_for_event only returns an exit status for the last
3099 child of a process. Report it. */
3100 if (WIFEXITED (w
) || WIFSIGNALED (w
))
3104 ourstatus
->kind
= TARGET_WAITKIND_EXITED
;
3105 ourstatus
->value
.integer
= WEXITSTATUS (w
);
3109 debug_printf ("wait_1 ret = %s, exited with "
3111 target_pid_to_str (ptid_of (current_thread
)),
3118 ourstatus
->kind
= TARGET_WAITKIND_SIGNALLED
;
3119 ourstatus
->value
.sig
= gdb_signal_from_host (WTERMSIG (w
));
3123 debug_printf ("wait_1 ret = %s, terminated with "
3125 target_pid_to_str (ptid_of (current_thread
)),
3131 if (ourstatus
->kind
== TARGET_WAITKIND_EXITED
)
3132 return filter_exit_event (event_child
, ourstatus
);
3134 return ptid_of (current_thread
);
3137 /* If step-over executes a breakpoint instruction, in the case of a
3138 hardware single step it means a gdb/gdbserver breakpoint had been
3139 planted on top of a permanent breakpoint, in the case of a software
3140 single step it may just mean that gdbserver hit the reinsert breakpoint.
3141 The PC has been adjusted by save_stop_reason to point at
3142 the breakpoint address.
3143 So in the case of the hardware single step advance the PC manually
3144 past the breakpoint and in the case of software single step advance only
3145 if it's not the single_step_breakpoint we are hitting.
3146 This avoids that a program would keep trapping a permanent breakpoint
3148 if (step_over_bkpt
!= null_ptid
3149 && event_child
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
3150 && (event_child
->stepping
3151 || !single_step_breakpoint_inserted_here (event_child
->stop_pc
)))
3153 int increment_pc
= 0;
3154 int breakpoint_kind
= 0;
3155 CORE_ADDR stop_pc
= event_child
->stop_pc
;
3157 breakpoint_kind
= breakpoint_kind_from_current_state (&stop_pc
);
3158 sw_breakpoint_from_kind (breakpoint_kind
, &increment_pc
);
3162 debug_printf ("step-over for %s executed software breakpoint\n",
3163 target_pid_to_str (ptid_of (current_thread
)));
3166 if (increment_pc
!= 0)
3168 struct regcache
*regcache
3169 = get_thread_regcache (current_thread
, 1);
3171 event_child
->stop_pc
+= increment_pc
;
3172 low_set_pc (regcache
, event_child
->stop_pc
);
3174 if (!low_breakpoint_at (event_child
->stop_pc
))
3175 event_child
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
3179 /* If this event was not handled before, and is not a SIGTRAP, we
3180 report it. SIGILL and SIGSEGV are also treated as traps in case
3181 a breakpoint is inserted at the current PC. If this target does
3182 not support internal breakpoints at all, we also report the
3183 SIGTRAP without further processing; it's of no concern to us. */
3185 = (low_supports_breakpoints ()
3186 && (WSTOPSIG (w
) == SIGTRAP
3187 || ((WSTOPSIG (w
) == SIGILL
3188 || WSTOPSIG (w
) == SIGSEGV
)
3189 && low_breakpoint_at (event_child
->stop_pc
))));
3191 if (maybe_internal_trap
)
3193 /* Handle anything that requires bookkeeping before deciding to
3194 report the event or continue waiting. */
3196 /* First check if we can explain the SIGTRAP with an internal
3197 breakpoint, or if we should possibly report the event to GDB.
3198 Do this before anything that may remove or insert a
3200 bp_explains_trap
= breakpoint_inserted_here (event_child
->stop_pc
);
3202 /* We have a SIGTRAP, possibly a step-over dance has just
3203 finished. If so, tweak the state machine accordingly,
3204 reinsert breakpoints and delete any single-step
3206 step_over_finished
= finish_step_over (event_child
);
3208 /* Now invoke the callbacks of any internal breakpoints there. */
3209 check_breakpoints (event_child
->stop_pc
);
3211 /* Handle tracepoint data collecting. This may overflow the
3212 trace buffer, and cause a tracing stop, removing
3214 trace_event
= handle_tracepoints (event_child
);
3216 if (bp_explains_trap
)
3219 debug_printf ("Hit a gdbserver breakpoint.\n");
3224 /* We have some other signal, possibly a step-over dance was in
3225 progress, and it should be cancelled too. */
3226 step_over_finished
= finish_step_over (event_child
);
3229 /* We have all the data we need. Either report the event to GDB, or
3230 resume threads and keep waiting for more. */
3232 /* If we're collecting a fast tracepoint, finish the collection and
3233 move out of the jump pad before delivering a signal. See
3234 linux_stabilize_threads. */
3237 && WSTOPSIG (w
) != SIGTRAP
3238 && supports_fast_tracepoints ()
3239 && agent_loaded_p ())
3242 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3243 "to defer or adjust it.\n",
3244 WSTOPSIG (w
), lwpid_of (current_thread
));
3246 /* Allow debugging the jump pad itself. */
3247 if (current_thread
->last_resume_kind
!= resume_step
3248 && maybe_move_out_of_jump_pad (event_child
, &w
))
3250 enqueue_one_deferred_signal (event_child
, &w
);
3253 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3254 WSTOPSIG (w
), lwpid_of (current_thread
));
3256 resume_one_lwp (event_child
, 0, 0, NULL
);
3260 return ignore_event (ourstatus
);
3264 if (event_child
->collecting_fast_tracepoint
3265 != fast_tpoint_collect_result::not_collecting
)
3268 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3269 "Check if we're already there.\n",
3270 lwpid_of (current_thread
),
3271 (int) event_child
->collecting_fast_tracepoint
);
3275 event_child
->collecting_fast_tracepoint
3276 = linux_fast_tracepoint_collecting (event_child
, NULL
);
3278 if (event_child
->collecting_fast_tracepoint
3279 != fast_tpoint_collect_result::before_insn
)
3281 /* No longer need this breakpoint. */
3282 if (event_child
->exit_jump_pad_bkpt
!= NULL
)
3285 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3286 "stopping all threads momentarily.\n");
3288 /* Other running threads could hit this breakpoint.
3289 We don't handle moribund locations like GDB does,
3290 instead we always pause all threads when removing
3291 breakpoints, so that any step-over or
3292 decr_pc_after_break adjustment is always taken
3293 care of while the breakpoint is still
3295 stop_all_lwps (1, event_child
);
3297 delete_breakpoint (event_child
->exit_jump_pad_bkpt
);
3298 event_child
->exit_jump_pad_bkpt
= NULL
;
3300 unstop_all_lwps (1, event_child
);
3302 gdb_assert (event_child
->suspended
>= 0);
3306 if (event_child
->collecting_fast_tracepoint
3307 == fast_tpoint_collect_result::not_collecting
)
3310 debug_printf ("fast tracepoint finished "
3311 "collecting successfully.\n");
3313 /* We may have a deferred signal to report. */
3314 if (dequeue_one_deferred_signal (event_child
, &w
))
3317 debug_printf ("dequeued one signal.\n");
3322 debug_printf ("no deferred signals.\n");
3324 if (stabilizing_threads
)
3326 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
3327 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
3331 debug_printf ("wait_1 ret = %s, stopped "
3332 "while stabilizing threads\n",
3333 target_pid_to_str (ptid_of (current_thread
)));
3337 return ptid_of (current_thread
);
3343 /* Check whether GDB would be interested in this event. */
3345 /* Check if GDB is interested in this syscall. */
3347 && WSTOPSIG (w
) == SYSCALL_SIGTRAP
3348 && !gdb_catch_this_syscall_p (event_child
))
3352 debug_printf ("Ignored syscall for LWP %ld.\n",
3353 lwpid_of (current_thread
));
3356 resume_one_lwp (event_child
, event_child
->stepping
, 0, NULL
);
3360 return ignore_event (ourstatus
);
3363 /* If GDB is not interested in this signal, don't stop other
3364 threads, and don't report it to GDB. Just resume the inferior
3365 right away. We do this for threading-related signals as well as
3366 any that GDB specifically requested we ignore. But never ignore
3367 SIGSTOP if we sent it ourselves, and do not ignore signals when
3368 stepping - they may require special handling to skip the signal
3369 handler. Also never ignore signals that could be caused by a
3372 && current_thread
->last_resume_kind
!= resume_step
3374 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3375 (current_process ()->priv
->thread_db
!= NULL
3376 && (WSTOPSIG (w
) == __SIGRTMIN
3377 || WSTOPSIG (w
) == __SIGRTMIN
+ 1))
3380 (cs
.pass_signals
[gdb_signal_from_host (WSTOPSIG (w
))]
3381 && !(WSTOPSIG (w
) == SIGSTOP
3382 && current_thread
->last_resume_kind
== resume_stop
)
3383 && !linux_wstatus_maybe_breakpoint (w
))))
3385 siginfo_t info
, *info_p
;
3388 debug_printf ("Ignored signal %d for LWP %ld.\n",
3389 WSTOPSIG (w
), lwpid_of (current_thread
));
3391 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
3392 (PTRACE_TYPE_ARG3
) 0, &info
) == 0)
3397 if (step_over_finished
)
3399 /* We cancelled this thread's step-over above. We still
3400 need to unsuspend all other LWPs, and set them back
3401 running again while the signal handler runs. */
3402 unsuspend_all_lwps (event_child
);
3404 /* Enqueue the pending signal info so that proceed_all_lwps
3406 enqueue_pending_signal (event_child
, WSTOPSIG (w
), info_p
);
3408 proceed_all_lwps ();
3412 resume_one_lwp (event_child
, event_child
->stepping
,
3413 WSTOPSIG (w
), info_p
);
3419 return ignore_event (ourstatus
);
3422 /* Note that all addresses are always "out of the step range" when
3423 there's no range to begin with. */
3424 in_step_range
= lwp_in_step_range (event_child
);
3426 /* If GDB wanted this thread to single step, and the thread is out
3427 of the step range, we always want to report the SIGTRAP, and let
3428 GDB handle it. Watchpoints should always be reported. So should
3429 signals we can't explain. A SIGTRAP we can't explain could be a
3430 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3431 do, we're be able to handle GDB breakpoints on top of internal
3432 breakpoints, by handling the internal breakpoint and still
3433 reporting the event to GDB. If we don't, we're out of luck, GDB
3434 won't see the breakpoint hit. If we see a single-step event but
3435 the thread should be continuing, don't pass the trap to gdb.
3436 That indicates that we had previously finished a single-step but
3437 left the single-step pending -- see
3438 complete_ongoing_step_over. */
3439 report_to_gdb
= (!maybe_internal_trap
3440 || (current_thread
->last_resume_kind
== resume_step
3442 || event_child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
3444 && !bp_explains_trap
3446 && !step_over_finished
3447 && !(current_thread
->last_resume_kind
== resume_continue
3448 && event_child
->stop_reason
== TARGET_STOPPED_BY_SINGLE_STEP
))
3449 || (gdb_breakpoint_here (event_child
->stop_pc
)
3450 && gdb_condition_true_at_breakpoint (event_child
->stop_pc
)
3451 && gdb_no_commands_at_breakpoint (event_child
->stop_pc
))
3452 || event_child
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
);
3454 run_breakpoint_commands (event_child
->stop_pc
);
3456 /* We found no reason GDB would want us to stop. We either hit one
3457 of our own breakpoints, or finished an internal step GDB
3458 shouldn't know about. */
3463 if (bp_explains_trap
)
3464 debug_printf ("Hit a gdbserver breakpoint.\n");
3465 if (step_over_finished
)
3466 debug_printf ("Step-over finished.\n");
3468 debug_printf ("Tracepoint event.\n");
3469 if (lwp_in_step_range (event_child
))
3470 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3471 paddress (event_child
->stop_pc
),
3472 paddress (event_child
->step_range_start
),
3473 paddress (event_child
->step_range_end
));
3476 /* We're not reporting this breakpoint to GDB, so apply the
3477 decr_pc_after_break adjustment to the inferior's regcache
3480 if (low_supports_breakpoints ())
3482 struct regcache
*regcache
3483 = get_thread_regcache (current_thread
, 1);
3484 low_set_pc (regcache
, event_child
->stop_pc
);
3487 if (step_over_finished
)
3489 /* If we have finished stepping over a breakpoint, we've
3490 stopped and suspended all LWPs momentarily except the
3491 stepping one. This is where we resume them all again.
3492 We're going to keep waiting, so use proceed, which
3493 handles stepping over the next breakpoint. */
3494 unsuspend_all_lwps (event_child
);
3498 /* Remove the single-step breakpoints if any. Note that
3499 there isn't single-step breakpoint if we finished stepping
3501 if (supports_software_single_step ()
3502 && has_single_step_breakpoints (current_thread
))
3504 stop_all_lwps (0, event_child
);
3505 delete_single_step_breakpoints (current_thread
);
3506 unstop_all_lwps (0, event_child
);
3511 debug_printf ("proceeding all threads.\n");
3512 proceed_all_lwps ();
3517 return ignore_event (ourstatus
);
3522 if (event_child
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
3525 = target_waitstatus_to_string (&event_child
->waitstatus
);
3527 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3528 lwpid_of (get_lwp_thread (event_child
)), str
.c_str ());
3530 if (current_thread
->last_resume_kind
== resume_step
)
3532 if (event_child
->step_range_start
== event_child
->step_range_end
)
3533 debug_printf ("GDB wanted to single-step, reporting event.\n");
3534 else if (!lwp_in_step_range (event_child
))
3535 debug_printf ("Out of step range, reporting event.\n");
3537 if (event_child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
)
3538 debug_printf ("Stopped by watchpoint.\n");
3539 else if (gdb_breakpoint_here (event_child
->stop_pc
))
3540 debug_printf ("Stopped by GDB breakpoint.\n");
3542 debug_printf ("Hit a non-gdbserver trap event.\n");
3545 /* Alright, we're going to report a stop. */
3547 /* Remove single-step breakpoints. */
3548 if (supports_software_single_step ())
3550 /* Remove single-step breakpoints or not. It it is true, stop all
3551 lwps, so that other threads won't hit the breakpoint in the
3553 int remove_single_step_breakpoints_p
= 0;
3557 remove_single_step_breakpoints_p
3558 = has_single_step_breakpoints (current_thread
);
3562 /* In all-stop, a stop reply cancels all previous resume
3563 requests. Delete all single-step breakpoints. */
3565 find_thread ([&] (thread_info
*thread
) {
3566 if (has_single_step_breakpoints (thread
))
3568 remove_single_step_breakpoints_p
= 1;
3576 if (remove_single_step_breakpoints_p
)
3578 /* If we remove single-step breakpoints from memory, stop all lwps,
3579 so that other threads won't hit the breakpoint in the staled
3581 stop_all_lwps (0, event_child
);
3585 gdb_assert (has_single_step_breakpoints (current_thread
));
3586 delete_single_step_breakpoints (current_thread
);
3590 for_each_thread ([] (thread_info
*thread
){
3591 if (has_single_step_breakpoints (thread
))
3592 delete_single_step_breakpoints (thread
);
3596 unstop_all_lwps (0, event_child
);
3600 if (!stabilizing_threads
)
3602 /* In all-stop, stop all threads. */
3604 stop_all_lwps (0, NULL
);
3606 if (step_over_finished
)
3610 /* If we were doing a step-over, all other threads but
3611 the stepping one had been paused in start_step_over,
3612 with their suspend counts incremented. We don't want
3613 to do a full unstop/unpause, because we're in
3614 all-stop mode (so we want threads stopped), but we
3615 still need to unsuspend the other threads, to
3616 decrement their `suspended' count back. */
3617 unsuspend_all_lwps (event_child
);
3621 /* If we just finished a step-over, then all threads had
3622 been momentarily paused. In all-stop, that's fine,
3623 we want threads stopped by now anyway. In non-stop,
3624 we need to re-resume threads that GDB wanted to be
3626 unstop_all_lwps (1, event_child
);
3630 /* If we're not waiting for a specific LWP, choose an event LWP
3631 from among those that have had events. Giving equal priority
3632 to all LWPs that have had events helps prevent
3634 if (ptid
== minus_one_ptid
)
3636 event_child
->status_pending_p
= 1;
3637 event_child
->status_pending
= w
;
3639 select_event_lwp (&event_child
);
3641 /* current_thread and event_child must stay in sync. */
3642 current_thread
= get_lwp_thread (event_child
);
3644 event_child
->status_pending_p
= 0;
3645 w
= event_child
->status_pending
;
3649 /* Stabilize threads (move out of jump pads). */
3651 target_stabilize_threads ();
3655 /* If we just finished a step-over, then all threads had been
3656 momentarily paused. In all-stop, that's fine, we want
3657 threads stopped by now anyway. In non-stop, we need to
3658 re-resume threads that GDB wanted to be running. */
3659 if (step_over_finished
)
3660 unstop_all_lwps (1, event_child
);
3663 if (event_child
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
3665 /* If the reported event is an exit, fork, vfork or exec, let
3668 /* Break the unreported fork relationship chain. */
3669 if (event_child
->waitstatus
.kind
== TARGET_WAITKIND_FORKED
3670 || event_child
->waitstatus
.kind
== TARGET_WAITKIND_VFORKED
)
3672 event_child
->fork_relative
->fork_relative
= NULL
;
3673 event_child
->fork_relative
= NULL
;
3676 *ourstatus
= event_child
->waitstatus
;
3677 /* Clear the event lwp's waitstatus since we handled it already. */
3678 event_child
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
3681 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
3683 /* Now that we've selected our final event LWP, un-adjust its PC if
3684 it was a software breakpoint, and the client doesn't know we can
3685 adjust the breakpoint ourselves. */
3686 if (event_child
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
3687 && !cs
.swbreak_feature
)
3689 int decr_pc
= low_decr_pc_after_break ();
3693 struct regcache
*regcache
3694 = get_thread_regcache (current_thread
, 1);
3695 low_set_pc (regcache
, event_child
->stop_pc
+ decr_pc
);
3699 if (WSTOPSIG (w
) == SYSCALL_SIGTRAP
)
3701 get_syscall_trapinfo (event_child
,
3702 &ourstatus
->value
.syscall_number
);
3703 ourstatus
->kind
= event_child
->syscall_state
;
3705 else if (current_thread
->last_resume_kind
== resume_stop
3706 && WSTOPSIG (w
) == SIGSTOP
)
3708 /* A thread that has been requested to stop by GDB with vCont;t,
3709 and it stopped cleanly, so report as SIG0. The use of
3710 SIGSTOP is an implementation detail. */
3711 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
3713 else if (current_thread
->last_resume_kind
== resume_stop
3714 && WSTOPSIG (w
) != SIGSTOP
)
3716 /* A thread that has been requested to stop by GDB with vCont;t,
3717 but, it stopped for other reasons. */
3718 ourstatus
->value
.sig
= gdb_signal_from_host (WSTOPSIG (w
));
3720 else if (ourstatus
->kind
== TARGET_WAITKIND_STOPPED
)
3722 ourstatus
->value
.sig
= gdb_signal_from_host (WSTOPSIG (w
));
3725 gdb_assert (step_over_bkpt
== null_ptid
);
3729 debug_printf ("wait_1 ret = %s, %d, %d\n",
3730 target_pid_to_str (ptid_of (current_thread
)),
3731 ourstatus
->kind
, ourstatus
->value
.sig
);
3735 if (ourstatus
->kind
== TARGET_WAITKIND_EXITED
)
3736 return filter_exit_event (event_child
, ourstatus
);
3738 return ptid_of (current_thread
);
3741 /* Get rid of any pending event in the pipe. */
3743 async_file_flush (void)
3749 ret
= read (linux_event_pipe
[0], &buf
, 1);
3750 while (ret
>= 0 || (ret
== -1 && errno
== EINTR
));
3753 /* Put something in the pipe, so the event loop wakes up. */
3755 async_file_mark (void)
3759 async_file_flush ();
3762 ret
= write (linux_event_pipe
[1], "+", 1);
3763 while (ret
== 0 || (ret
== -1 && errno
== EINTR
));
3765 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3766 be awakened anyway. */
3770 linux_process_target::wait (ptid_t ptid
,
3771 target_waitstatus
*ourstatus
,
3776 /* Flush the async file first. */
3777 if (target_is_async_p ())
3778 async_file_flush ();
3782 event_ptid
= wait_1 (ptid
, ourstatus
, target_options
);
3784 while ((target_options
& TARGET_WNOHANG
) == 0
3785 && event_ptid
== null_ptid
3786 && ourstatus
->kind
== TARGET_WAITKIND_IGNORE
);
3788 /* If at least one stop was reported, there may be more. A single
3789 SIGCHLD can signal more than one child stop. */
3790 if (target_is_async_p ()
3791 && (target_options
& TARGET_WNOHANG
) != 0
3792 && event_ptid
!= null_ptid
)
3798 /* Send a signal to an LWP. */
3801 kill_lwp (unsigned long lwpid
, int signo
)
3806 ret
= syscall (__NR_tkill
, lwpid
, signo
);
3807 if (errno
== ENOSYS
)
3809 /* If tkill fails, then we are not using nptl threads, a
3810 configuration we no longer support. */
3811 perror_with_name (("tkill"));
3817 linux_stop_lwp (struct lwp_info
*lwp
)
3823 send_sigstop (struct lwp_info
*lwp
)
3827 pid
= lwpid_of (get_lwp_thread (lwp
));
3829 /* If we already have a pending stop signal for this process, don't
3831 if (lwp
->stop_expected
)
3834 debug_printf ("Have pending sigstop for lwp %d\n", pid
);
3840 debug_printf ("Sending sigstop to lwp %d\n", pid
);
3842 lwp
->stop_expected
= 1;
3843 kill_lwp (pid
, SIGSTOP
);
3847 send_sigstop (thread_info
*thread
, lwp_info
*except
)
3849 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3851 /* Ignore EXCEPT. */
3861 /* Increment the suspend count of an LWP, and stop it, if not stopped
3864 suspend_and_send_sigstop (thread_info
*thread
, lwp_info
*except
)
3866 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3868 /* Ignore EXCEPT. */
3872 lwp_suspended_inc (lwp
);
3874 send_sigstop (thread
, except
);
3878 mark_lwp_dead (struct lwp_info
*lwp
, int wstat
)
3880 /* Store the exit status for later. */
3881 lwp
->status_pending_p
= 1;
3882 lwp
->status_pending
= wstat
;
3884 /* Store in waitstatus as well, as there's nothing else to process
3886 if (WIFEXITED (wstat
))
3888 lwp
->waitstatus
.kind
= TARGET_WAITKIND_EXITED
;
3889 lwp
->waitstatus
.value
.integer
= WEXITSTATUS (wstat
);
3891 else if (WIFSIGNALED (wstat
))
3893 lwp
->waitstatus
.kind
= TARGET_WAITKIND_SIGNALLED
;
3894 lwp
->waitstatus
.value
.sig
= gdb_signal_from_host (WTERMSIG (wstat
));
3897 /* Prevent trying to stop it. */
3900 /* No further stops are expected from a dead lwp. */
3901 lwp
->stop_expected
= 0;
3904 /* Return true if LWP has exited already, and has a pending exit event
3905 to report to GDB. */
3908 lwp_is_marked_dead (struct lwp_info
*lwp
)
3910 return (lwp
->status_pending_p
3911 && (WIFEXITED (lwp
->status_pending
)
3912 || WIFSIGNALED (lwp
->status_pending
)));
3916 linux_process_target::wait_for_sigstop ()
3918 struct thread_info
*saved_thread
;
3923 saved_thread
= current_thread
;
3924 if (saved_thread
!= NULL
)
3925 saved_tid
= saved_thread
->id
;
3927 saved_tid
= null_ptid
; /* avoid bogus unused warning */
3930 debug_printf ("wait_for_sigstop: pulling events\n");
3932 /* Passing NULL_PTID as filter indicates we want all events to be
3933 left pending. Eventually this returns when there are no
3934 unwaited-for children left. */
3935 ret
= wait_for_event_filtered (minus_one_ptid
, null_ptid
, &wstat
, __WALL
);
3936 gdb_assert (ret
== -1);
3938 if (saved_thread
== NULL
|| mythread_alive (saved_tid
))
3939 current_thread
= saved_thread
;
3943 debug_printf ("Previously current thread died.\n");
3945 /* We can't change the current inferior behind GDB's back,
3946 otherwise, a subsequent command may apply to the wrong
3948 current_thread
= NULL
;
3952 /* Returns true if THREAD is stopped in a jump pad, and we can't
3953 move it out, because we need to report the stop event to GDB. For
3954 example, if the user puts a breakpoint in the jump pad, it's
3955 because she wants to debug it. */
3958 stuck_in_jump_pad_callback (thread_info
*thread
)
3960 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3962 if (lwp
->suspended
!= 0)
3964 internal_error (__FILE__
, __LINE__
,
3965 "LWP %ld is suspended, suspended=%d\n",
3966 lwpid_of (thread
), lwp
->suspended
);
3968 gdb_assert (lwp
->stopped
);
3970 /* Allow debugging the jump pad, gdb_collect, etc.. */
3971 return (supports_fast_tracepoints ()
3972 && agent_loaded_p ()
3973 && (gdb_breakpoint_here (lwp
->stop_pc
)
3974 || lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
3975 || thread
->last_resume_kind
== resume_step
)
3976 && (linux_fast_tracepoint_collecting (lwp
, NULL
)
3977 != fast_tpoint_collect_result::not_collecting
));
3981 linux_process_target::move_out_of_jump_pad (thread_info
*thread
)
3983 struct thread_info
*saved_thread
;
3984 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3987 if (lwp
->suspended
!= 0)
3989 internal_error (__FILE__
, __LINE__
,
3990 "LWP %ld is suspended, suspended=%d\n",
3991 lwpid_of (thread
), lwp
->suspended
);
3993 gdb_assert (lwp
->stopped
);
3995 /* For gdb_breakpoint_here. */
3996 saved_thread
= current_thread
;
3997 current_thread
= thread
;
3999 wstat
= lwp
->status_pending_p
? &lwp
->status_pending
: NULL
;
4001 /* Allow debugging the jump pad, gdb_collect, etc. */
4002 if (!gdb_breakpoint_here (lwp
->stop_pc
)
4003 && lwp
->stop_reason
!= TARGET_STOPPED_BY_WATCHPOINT
4004 && thread
->last_resume_kind
!= resume_step
4005 && maybe_move_out_of_jump_pad (lwp
, wstat
))
4008 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4013 lwp
->status_pending_p
= 0;
4014 enqueue_one_deferred_signal (lwp
, wstat
);
4017 debug_printf ("Signal %d for LWP %ld deferred "
4019 WSTOPSIG (*wstat
), lwpid_of (thread
));
4022 resume_one_lwp (lwp
, 0, 0, NULL
);
4025 lwp_suspended_inc (lwp
);
4027 current_thread
= saved_thread
;
4031 lwp_running (thread_info
*thread
)
4033 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4035 if (lwp_is_marked_dead (lwp
))
4038 return !lwp
->stopped
;
4042 linux_process_target::stop_all_lwps (int suspend
, lwp_info
*except
)
4044 /* Should not be called recursively. */
4045 gdb_assert (stopping_threads
== NOT_STOPPING_THREADS
);
4050 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4051 suspend
? "stop-and-suspend" : "stop",
4053 ? target_pid_to_str (ptid_of (get_lwp_thread (except
)))
4057 stopping_threads
= (suspend
4058 ? STOPPING_AND_SUSPENDING_THREADS
4059 : STOPPING_THREADS
);
4062 for_each_thread ([&] (thread_info
*thread
)
4064 suspend_and_send_sigstop (thread
, except
);
4067 for_each_thread ([&] (thread_info
*thread
)
4069 send_sigstop (thread
, except
);
4072 wait_for_sigstop ();
4073 stopping_threads
= NOT_STOPPING_THREADS
;
4077 debug_printf ("stop_all_lwps done, setting stopping_threads "
4078 "back to !stopping\n");
4083 /* Enqueue one signal in the chain of signals which need to be
4084 delivered to this process on next resume. */
4087 enqueue_pending_signal (struct lwp_info
*lwp
, int signal
, siginfo_t
*info
)
4089 struct pending_signals
*p_sig
= XNEW (struct pending_signals
);
4091 p_sig
->prev
= lwp
->pending_signals
;
4092 p_sig
->signal
= signal
;
4094 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
4096 memcpy (&p_sig
->info
, info
, sizeof (siginfo_t
));
4097 lwp
->pending_signals
= p_sig
;
4101 linux_process_target::install_software_single_step_breakpoints (lwp_info
*lwp
)
4103 struct thread_info
*thread
= get_lwp_thread (lwp
);
4104 struct regcache
*regcache
= get_thread_regcache (thread
, 1);
4106 scoped_restore save_current_thread
= make_scoped_restore (¤t_thread
);
4108 current_thread
= thread
;
4109 std::vector
<CORE_ADDR
> next_pcs
= low_get_next_pcs (regcache
);
4111 for (CORE_ADDR pc
: next_pcs
)
4112 set_single_step_breakpoint (pc
, current_ptid
);
4116 linux_process_target::single_step (lwp_info
* lwp
)
4120 if (can_hardware_single_step ())
4124 else if (supports_software_single_step ())
4126 install_software_single_step_breakpoints (lwp
);
4132 debug_printf ("stepping is not implemented on this target");
4138 /* The signal can be delivered to the inferior if we are not trying to
4139 finish a fast tracepoint collect. Since signal can be delivered in
4140 the step-over, the program may go to signal handler and trap again
4141 after return from the signal handler. We can live with the spurious
4145 lwp_signal_can_be_delivered (struct lwp_info
*lwp
)
4147 return (lwp
->collecting_fast_tracepoint
4148 == fast_tpoint_collect_result::not_collecting
);
4152 linux_process_target::resume_one_lwp_throw (lwp_info
*lwp
, int step
,
4153 int signal
, siginfo_t
*info
)
4155 struct thread_info
*thread
= get_lwp_thread (lwp
);
4156 struct thread_info
*saved_thread
;
4158 struct process_info
*proc
= get_thread_process (thread
);
4160 /* Note that target description may not be initialised
4161 (proc->tdesc == NULL) at this point because the program hasn't
4162 stopped at the first instruction yet. It means GDBserver skips
4163 the extra traps from the wrapper program (see option --wrapper).
4164 Code in this function that requires register access should be
4165 guarded by proc->tdesc == NULL or something else. */
4167 if (lwp
->stopped
== 0)
4170 gdb_assert (lwp
->waitstatus
.kind
== TARGET_WAITKIND_IGNORE
);
4172 fast_tpoint_collect_result fast_tp_collecting
4173 = lwp
->collecting_fast_tracepoint
;
4175 gdb_assert (!stabilizing_threads
4176 || (fast_tp_collecting
4177 != fast_tpoint_collect_result::not_collecting
));
4179 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4180 user used the "jump" command, or "set $pc = foo"). */
4181 if (thread
->while_stepping
!= NULL
&& lwp
->stop_pc
!= get_pc (lwp
))
4183 /* Collecting 'while-stepping' actions doesn't make sense
4185 release_while_stepping_state_list (thread
);
4188 /* If we have pending signals or status, and a new signal, enqueue the
4189 signal. Also enqueue the signal if it can't be delivered to the
4190 inferior right now. */
4192 && (lwp
->status_pending_p
4193 || lwp
->pending_signals
!= NULL
4194 || !lwp_signal_can_be_delivered (lwp
)))
4196 enqueue_pending_signal (lwp
, signal
, info
);
4198 /* Postpone any pending signal. It was enqueued above. */
4202 if (lwp
->status_pending_p
)
4205 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4206 " has pending status\n",
4207 lwpid_of (thread
), step
? "step" : "continue",
4208 lwp
->stop_expected
? "expected" : "not expected");
4212 saved_thread
= current_thread
;
4213 current_thread
= thread
;
4215 /* This bit needs some thinking about. If we get a signal that
4216 we must report while a single-step reinsert is still pending,
4217 we often end up resuming the thread. It might be better to
4218 (ew) allow a stack of pending events; then we could be sure that
4219 the reinsert happened right away and not lose any signals.
4221 Making this stack would also shrink the window in which breakpoints are
4222 uninserted (see comment in linux_wait_for_lwp) but not enough for
4223 complete correctness, so it won't solve that problem. It may be
4224 worthwhile just to solve this one, however. */
4225 if (lwp
->bp_reinsert
!= 0)
4228 debug_printf (" pending reinsert at 0x%s\n",
4229 paddress (lwp
->bp_reinsert
));
4231 if (can_hardware_single_step ())
4233 if (fast_tp_collecting
== fast_tpoint_collect_result::not_collecting
)
4236 warning ("BAD - reinserting but not stepping.");
4238 warning ("BAD - reinserting and suspended(%d).",
4243 step
= maybe_hw_step (thread
);
4246 if (fast_tp_collecting
== fast_tpoint_collect_result::before_insn
)
4249 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4250 " (exit-jump-pad-bkpt)\n",
4253 else if (fast_tp_collecting
== fast_tpoint_collect_result::at_insn
)
4256 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4257 " single-stepping\n",
4260 if (can_hardware_single_step ())
4264 internal_error (__FILE__
, __LINE__
,
4265 "moving out of jump pad single-stepping"
4266 " not implemented on this target");
4270 /* If we have while-stepping actions in this thread set it stepping.
4271 If we have a signal to deliver, it may or may not be set to
4272 SIG_IGN, we don't know. Assume so, and allow collecting
4273 while-stepping into a signal handler. A possible smart thing to
4274 do would be to set an internal breakpoint at the signal return
4275 address, continue, and carry on catching this while-stepping
4276 action only when that breakpoint is hit. A future
4278 if (thread
->while_stepping
!= NULL
)
4281 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4284 step
= single_step (lwp
);
4287 if (proc
->tdesc
!= NULL
&& low_supports_breakpoints ())
4289 struct regcache
*regcache
= get_thread_regcache (current_thread
, 1);
4291 lwp
->stop_pc
= low_get_pc (regcache
);
4295 debug_printf (" %s from pc 0x%lx\n", step
? "step" : "continue",
4296 (long) lwp
->stop_pc
);
4300 /* If we have pending signals, consume one if it can be delivered to
4302 if (lwp
->pending_signals
!= NULL
&& lwp_signal_can_be_delivered (lwp
))
4304 struct pending_signals
**p_sig
;
4306 p_sig
= &lwp
->pending_signals
;
4307 while ((*p_sig
)->prev
!= NULL
)
4308 p_sig
= &(*p_sig
)->prev
;
4310 signal
= (*p_sig
)->signal
;
4311 if ((*p_sig
)->info
.si_signo
!= 0)
4312 ptrace (PTRACE_SETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
4320 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4321 lwpid_of (thread
), step
? "step" : "continue", signal
,
4322 lwp
->stop_expected
? "expected" : "not expected");
4324 low_prepare_to_resume (lwp
);
4326 regcache_invalidate_thread (thread
);
4328 lwp
->stepping
= step
;
4330 ptrace_request
= PTRACE_SINGLESTEP
;
4331 else if (gdb_catching_syscalls_p (lwp
))
4332 ptrace_request
= PTRACE_SYSCALL
;
4334 ptrace_request
= PTRACE_CONT
;
4335 ptrace (ptrace_request
,
4337 (PTRACE_TYPE_ARG3
) 0,
4338 /* Coerce to a uintptr_t first to avoid potential gcc warning
4339 of coercing an 8 byte integer to a 4 byte pointer. */
4340 (PTRACE_TYPE_ARG4
) (uintptr_t) signal
);
4342 current_thread
= saved_thread
;
4344 perror_with_name ("resuming thread");
4346 /* Successfully resumed. Clear state that no longer makes sense,
4347 and mark the LWP as running. Must not do this before resuming
4348 otherwise if that fails other code will be confused. E.g., we'd
4349 later try to stop the LWP and hang forever waiting for a stop
4350 status. Note that we must not throw after this is cleared,
4351 otherwise handle_zombie_lwp_error would get confused. */
4353 lwp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
4357 linux_process_target::low_prepare_to_resume (lwp_info
*lwp
)
4362 /* Called when we try to resume a stopped LWP and that errors out. If
4363 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4364 or about to become), discard the error, clear any pending status
4365 the LWP may have, and return true (we'll collect the exit status
4366 soon enough). Otherwise, return false. */
4369 check_ptrace_stopped_lwp_gone (struct lwp_info
*lp
)
4371 struct thread_info
*thread
= get_lwp_thread (lp
);
4373 /* If we get an error after resuming the LWP successfully, we'd
4374 confuse !T state for the LWP being gone. */
4375 gdb_assert (lp
->stopped
);
4377 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4378 because even if ptrace failed with ESRCH, the tracee may be "not
4379 yet fully dead", but already refusing ptrace requests. In that
4380 case the tracee has 'R (Running)' state for a little bit
4381 (observed in Linux 3.18). See also the note on ESRCH in the
4382 ptrace(2) man page. Instead, check whether the LWP has any state
4383 other than ptrace-stopped. */
4385 /* Don't assume anything if /proc/PID/status can't be read. */
4386 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread
)) == 0)
4388 lp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
4389 lp
->status_pending_p
= 0;
4396 linux_process_target::resume_one_lwp (lwp_info
*lwp
, int step
, int signal
,
4401 resume_one_lwp_throw (lwp
, step
, signal
, info
);
4403 catch (const gdb_exception_error
&ex
)
4405 if (!check_ptrace_stopped_lwp_gone (lwp
))
4410 /* This function is called once per thread via for_each_thread.
4411 We look up which resume request applies to THREAD and mark it with a
4412 pointer to the appropriate resume request.
4414 This algorithm is O(threads * resume elements), but resume elements
4415 is small (and will remain small at least until GDB supports thread
4419 linux_set_resume_request (thread_info
*thread
, thread_resume
*resume
, size_t n
)
4421 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4423 for (int ndx
= 0; ndx
< n
; ndx
++)
4425 ptid_t ptid
= resume
[ndx
].thread
;
4426 if (ptid
== minus_one_ptid
4427 || ptid
== thread
->id
4428 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4430 || (ptid
.pid () == pid_of (thread
)
4432 || ptid
.lwp () == -1)))
4434 if (resume
[ndx
].kind
== resume_stop
4435 && thread
->last_resume_kind
== resume_stop
)
4438 debug_printf ("already %s LWP %ld at GDB's request\n",
4439 (thread
->last_status
.kind
4440 == TARGET_WAITKIND_STOPPED
)
4448 /* Ignore (wildcard) resume requests for already-resumed
4450 if (resume
[ndx
].kind
!= resume_stop
4451 && thread
->last_resume_kind
!= resume_stop
)
4454 debug_printf ("already %s LWP %ld at GDB's request\n",
4455 (thread
->last_resume_kind
4463 /* Don't let wildcard resumes resume fork children that GDB
4464 does not yet know are new fork children. */
4465 if (lwp
->fork_relative
!= NULL
)
4467 struct lwp_info
*rel
= lwp
->fork_relative
;
4469 if (rel
->status_pending_p
4470 && (rel
->waitstatus
.kind
== TARGET_WAITKIND_FORKED
4471 || rel
->waitstatus
.kind
== TARGET_WAITKIND_VFORKED
))
4474 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4480 /* If the thread has a pending event that has already been
4481 reported to GDBserver core, but GDB has not pulled the
4482 event out of the vStopped queue yet, likewise, ignore the
4483 (wildcard) resume request. */
4484 if (in_queued_stop_replies (thread
->id
))
4487 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4492 lwp
->resume
= &resume
[ndx
];
4493 thread
->last_resume_kind
= lwp
->resume
->kind
;
4495 lwp
->step_range_start
= lwp
->resume
->step_range_start
;
4496 lwp
->step_range_end
= lwp
->resume
->step_range_end
;
4498 /* If we had a deferred signal to report, dequeue one now.
4499 This can happen if LWP gets more than one signal while
4500 trying to get out of a jump pad. */
4502 && !lwp
->status_pending_p
4503 && dequeue_one_deferred_signal (lwp
, &lwp
->status_pending
))
4505 lwp
->status_pending_p
= 1;
4508 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4509 "leaving status pending.\n",
4510 WSTOPSIG (lwp
->status_pending
),
4518 /* No resume action for this thread. */
4523 linux_process_target::resume_status_pending (thread_info
*thread
)
4525 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4527 /* LWPs which will not be resumed are not interesting, because
4528 we might not wait for them next time through linux_wait. */
4529 if (lwp
->resume
== NULL
)
4532 return thread_still_has_status_pending (thread
);
4536 linux_process_target::thread_needs_step_over (thread_info
*thread
)
4538 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4539 struct thread_info
*saved_thread
;
4541 struct process_info
*proc
= get_thread_process (thread
);
4543 /* GDBserver is skipping the extra traps from the wrapper program,
4544 don't have to do step over. */
4545 if (proc
->tdesc
== NULL
)
4548 /* LWPs which will not be resumed are not interesting, because we
4549 might not wait for them next time through linux_wait. */
4554 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4559 if (thread
->last_resume_kind
== resume_stop
)
4562 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4568 gdb_assert (lwp
->suspended
>= 0);
4573 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4578 if (lwp
->status_pending_p
)
4581 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4587 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4591 /* If the PC has changed since we stopped, then don't do anything,
4592 and let the breakpoint/tracepoint be hit. This happens if, for
4593 instance, GDB handled the decr_pc_after_break subtraction itself,
4594 GDB is OOL stepping this thread, or the user has issued a "jump"
4595 command, or poked thread's registers herself. */
4596 if (pc
!= lwp
->stop_pc
)
4599 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4600 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4602 paddress (lwp
->stop_pc
), paddress (pc
));
4606 /* On software single step target, resume the inferior with signal
4607 rather than stepping over. */
4608 if (supports_software_single_step ()
4609 && lwp
->pending_signals
!= NULL
4610 && lwp_signal_can_be_delivered (lwp
))
4613 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4620 saved_thread
= current_thread
;
4621 current_thread
= thread
;
4623 /* We can only step over breakpoints we know about. */
4624 if (breakpoint_here (pc
) || fast_tracepoint_jump_here (pc
))
4626 /* Don't step over a breakpoint that GDB expects to hit
4627 though. If the condition is being evaluated on the target's side
4628 and it evaluate to false, step over this breakpoint as well. */
4629 if (gdb_breakpoint_here (pc
)
4630 && gdb_condition_true_at_breakpoint (pc
)
4631 && gdb_no_commands_at_breakpoint (pc
))
4634 debug_printf ("Need step over [LWP %ld]? yes, but found"
4635 " GDB breakpoint at 0x%s; skipping step over\n",
4636 lwpid_of (thread
), paddress (pc
));
4638 current_thread
= saved_thread
;
4644 debug_printf ("Need step over [LWP %ld]? yes, "
4645 "found breakpoint at 0x%s\n",
4646 lwpid_of (thread
), paddress (pc
));
4648 /* We've found an lwp that needs stepping over --- return 1 so
4649 that find_thread stops looking. */
4650 current_thread
= saved_thread
;
4656 current_thread
= saved_thread
;
4659 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4661 lwpid_of (thread
), paddress (pc
));
4667 linux_process_target::start_step_over (lwp_info
*lwp
)
4669 struct thread_info
*thread
= get_lwp_thread (lwp
);
4670 struct thread_info
*saved_thread
;
4675 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4678 stop_all_lwps (1, lwp
);
4680 if (lwp
->suspended
!= 0)
4682 internal_error (__FILE__
, __LINE__
,
4683 "LWP %ld suspended=%d\n", lwpid_of (thread
),
4688 debug_printf ("Done stopping all threads for step-over.\n");
4690 /* Note, we should always reach here with an already adjusted PC,
4691 either by GDB (if we're resuming due to GDB's request), or by our
4692 caller, if we just finished handling an internal breakpoint GDB
4693 shouldn't care about. */
4696 saved_thread
= current_thread
;
4697 current_thread
= thread
;
4699 lwp
->bp_reinsert
= pc
;
4700 uninsert_breakpoints_at (pc
);
4701 uninsert_fast_tracepoint_jumps_at (pc
);
4703 step
= single_step (lwp
);
4705 current_thread
= saved_thread
;
4707 resume_one_lwp (lwp
, step
, 0, NULL
);
4709 /* Require next event from this LWP. */
4710 step_over_bkpt
= thread
->id
;
4713 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4714 start_step_over, if still there, and delete any single-step
4715 breakpoints we've set, on non hardware single-step targets. */
4718 finish_step_over (struct lwp_info
*lwp
)
4720 if (lwp
->bp_reinsert
!= 0)
4722 struct thread_info
*saved_thread
= current_thread
;
4725 debug_printf ("Finished step over.\n");
4727 current_thread
= get_lwp_thread (lwp
);
4729 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4730 may be no breakpoint to reinsert there by now. */
4731 reinsert_breakpoints_at (lwp
->bp_reinsert
);
4732 reinsert_fast_tracepoint_jumps_at (lwp
->bp_reinsert
);
4734 lwp
->bp_reinsert
= 0;
4736 /* Delete any single-step breakpoints. No longer needed. We
4737 don't have to worry about other threads hitting this trap,
4738 and later not being able to explain it, because we were
4739 stepping over a breakpoint, and we hold all threads but
4740 LWP stopped while doing that. */
4741 if (!can_hardware_single_step ())
4743 gdb_assert (has_single_step_breakpoints (current_thread
));
4744 delete_single_step_breakpoints (current_thread
);
4747 step_over_bkpt
= null_ptid
;
4748 current_thread
= saved_thread
;
4756 linux_process_target::complete_ongoing_step_over ()
4758 if (step_over_bkpt
!= null_ptid
)
4760 struct lwp_info
*lwp
;
4765 debug_printf ("detach: step over in progress, finish it first\n");
4767 /* Passing NULL_PTID as filter indicates we want all events to
4768 be left pending. Eventually this returns when there are no
4769 unwaited-for children left. */
4770 ret
= wait_for_event_filtered (minus_one_ptid
, null_ptid
, &wstat
,
4772 gdb_assert (ret
== -1);
4774 lwp
= find_lwp_pid (step_over_bkpt
);
4776 finish_step_over (lwp
);
4777 step_over_bkpt
= null_ptid
;
4778 unsuspend_all_lwps (lwp
);
4783 linux_process_target::resume_one_thread (thread_info
*thread
,
4784 bool leave_all_stopped
)
4786 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4789 if (lwp
->resume
== NULL
)
4792 if (lwp
->resume
->kind
== resume_stop
)
4795 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread
));
4800 debug_printf ("stopping LWP %ld\n", lwpid_of (thread
));
4802 /* Stop the thread, and wait for the event asynchronously,
4803 through the event loop. */
4809 debug_printf ("already stopped LWP %ld\n",
4812 /* The LWP may have been stopped in an internal event that
4813 was not meant to be notified back to GDB (e.g., gdbserver
4814 breakpoint), so we should be reporting a stop event in
4817 /* If the thread already has a pending SIGSTOP, this is a
4818 no-op. Otherwise, something later will presumably resume
4819 the thread and this will cause it to cancel any pending
4820 operation, due to last_resume_kind == resume_stop. If
4821 the thread already has a pending status to report, we
4822 will still report it the next time we wait - see
4823 status_pending_p_callback. */
4825 /* If we already have a pending signal to report, then
4826 there's no need to queue a SIGSTOP, as this means we're
4827 midway through moving the LWP out of the jumppad, and we
4828 will report the pending signal as soon as that is
4830 if (lwp
->pending_signals_to_report
== NULL
)
4834 /* For stop requests, we're done. */
4836 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
4840 /* If this thread which is about to be resumed has a pending status,
4841 then don't resume it - we can just report the pending status.
4842 Likewise if it is suspended, because e.g., another thread is
4843 stepping past a breakpoint. Make sure to queue any signals that
4844 would otherwise be sent. In all-stop mode, we do this decision
4845 based on if *any* thread has a pending status. If there's a
4846 thread that needs the step-over-breakpoint dance, then don't
4847 resume any other thread but that particular one. */
4848 leave_pending
= (lwp
->suspended
4849 || lwp
->status_pending_p
4850 || leave_all_stopped
);
4852 /* If we have a new signal, enqueue the signal. */
4853 if (lwp
->resume
->sig
!= 0)
4855 siginfo_t info
, *info_p
;
4857 /* If this is the same signal we were previously stopped by,
4858 make sure to queue its siginfo. */
4859 if (WIFSTOPPED (lwp
->last_status
)
4860 && WSTOPSIG (lwp
->last_status
) == lwp
->resume
->sig
4861 && ptrace (PTRACE_GETSIGINFO
, lwpid_of (thread
),
4862 (PTRACE_TYPE_ARG3
) 0, &info
) == 0)
4867 enqueue_pending_signal (lwp
, lwp
->resume
->sig
, info_p
);
4873 debug_printf ("resuming LWP %ld\n", lwpid_of (thread
));
4875 proceed_one_lwp (thread
, NULL
);
4880 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread
));
4883 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
4888 linux_process_target::resume (thread_resume
*resume_info
, size_t n
)
4890 struct thread_info
*need_step_over
= NULL
;
4895 debug_printf ("linux_resume:\n");
4898 for_each_thread ([&] (thread_info
*thread
)
4900 linux_set_resume_request (thread
, resume_info
, n
);
4903 /* If there is a thread which would otherwise be resumed, which has
4904 a pending status, then don't resume any threads - we can just
4905 report the pending status. Make sure to queue any signals that
4906 would otherwise be sent. In non-stop mode, we'll apply this
4907 logic to each thread individually. We consume all pending events
4908 before considering to start a step-over (in all-stop). */
4909 bool any_pending
= false;
4911 any_pending
= find_thread ([this] (thread_info
*thread
)
4913 return resume_status_pending (thread
);
4916 /* If there is a thread which would otherwise be resumed, which is
4917 stopped at a breakpoint that needs stepping over, then don't
4918 resume any threads - have it step over the breakpoint with all
4919 other threads stopped, then resume all threads again. Make sure
4920 to queue any signals that would otherwise be delivered or
4922 if (!any_pending
&& low_supports_breakpoints ())
4923 need_step_over
= find_thread ([this] (thread_info
*thread
)
4925 return thread_needs_step_over (thread
);
4928 bool leave_all_stopped
= (need_step_over
!= NULL
|| any_pending
);
4932 if (need_step_over
!= NULL
)
4933 debug_printf ("Not resuming all, need step over\n");
4934 else if (any_pending
)
4935 debug_printf ("Not resuming, all-stop and found "
4936 "an LWP with pending status\n");
4938 debug_printf ("Resuming, no pending status or step over needed\n");
4941 /* Even if we're leaving threads stopped, queue all signals we'd
4942 otherwise deliver. */
4943 for_each_thread ([&] (thread_info
*thread
)
4945 resume_one_thread (thread
, leave_all_stopped
);
4949 start_step_over (get_thread_lwp (need_step_over
));
4953 debug_printf ("linux_resume done\n");
4957 /* We may have events that were pending that can/should be sent to
4958 the client now. Trigger a linux_wait call. */
4959 if (target_is_async_p ())
4964 linux_process_target::proceed_one_lwp (thread_info
*thread
, lwp_info
*except
)
4966 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4973 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread
));
4978 debug_printf (" LWP %ld already running\n", lwpid_of (thread
));
4982 if (thread
->last_resume_kind
== resume_stop
4983 && thread
->last_status
.kind
!= TARGET_WAITKIND_IGNORE
)
4986 debug_printf (" client wants LWP to remain %ld stopped\n",
4991 if (lwp
->status_pending_p
)
4994 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4999 gdb_assert (lwp
->suspended
>= 0);
5004 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread
));
5008 if (thread
->last_resume_kind
== resume_stop
5009 && lwp
->pending_signals_to_report
== NULL
5010 && (lwp
->collecting_fast_tracepoint
5011 == fast_tpoint_collect_result::not_collecting
))
5013 /* We haven't reported this LWP as stopped yet (otherwise, the
5014 last_status.kind check above would catch it, and we wouldn't
5015 reach here. This LWP may have been momentarily paused by a
5016 stop_all_lwps call while handling for example, another LWP's
5017 step-over. In that case, the pending expected SIGSTOP signal
5018 that was queued at vCont;t handling time will have already
5019 been consumed by wait_for_sigstop, and so we need to requeue
5020 another one here. Note that if the LWP already has a SIGSTOP
5021 pending, this is a no-op. */
5024 debug_printf ("Client wants LWP %ld to stop. "
5025 "Making sure it has a SIGSTOP pending\n",
5031 if (thread
->last_resume_kind
== resume_step
)
5034 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5037 /* If resume_step is requested by GDB, install single-step
5038 breakpoints when the thread is about to be actually resumed if
5039 the single-step breakpoints weren't removed. */
5040 if (supports_software_single_step ()
5041 && !has_single_step_breakpoints (thread
))
5042 install_software_single_step_breakpoints (lwp
);
5044 step
= maybe_hw_step (thread
);
5046 else if (lwp
->bp_reinsert
!= 0)
5049 debug_printf (" stepping LWP %ld, reinsert set\n",
5052 step
= maybe_hw_step (thread
);
5057 resume_one_lwp (lwp
, step
, 0, NULL
);
5061 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info
*thread
,
5064 struct lwp_info
*lwp
= get_thread_lwp (thread
);
5069 lwp_suspended_decr (lwp
);
5071 proceed_one_lwp (thread
, except
);
5075 linux_process_target::proceed_all_lwps ()
5077 struct thread_info
*need_step_over
;
5079 /* If there is a thread which would otherwise be resumed, which is
5080 stopped at a breakpoint that needs stepping over, then don't
5081 resume any threads - have it step over the breakpoint with all
5082 other threads stopped, then resume all threads again. */
5084 if (low_supports_breakpoints ())
5086 need_step_over
= find_thread ([this] (thread_info
*thread
)
5088 return thread_needs_step_over (thread
);
5091 if (need_step_over
!= NULL
)
5094 debug_printf ("proceed_all_lwps: found "
5095 "thread %ld needing a step-over\n",
5096 lwpid_of (need_step_over
));
5098 start_step_over (get_thread_lwp (need_step_over
));
5104 debug_printf ("Proceeding, no step-over needed\n");
5106 for_each_thread ([this] (thread_info
*thread
)
5108 proceed_one_lwp (thread
, NULL
);
5113 linux_process_target::unstop_all_lwps (int unsuspend
, lwp_info
*except
)
5119 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5120 lwpid_of (get_lwp_thread (except
)));
5122 debug_printf ("unstopping all lwps\n");
5126 for_each_thread ([&] (thread_info
*thread
)
5128 unsuspend_and_proceed_one_lwp (thread
, except
);
5131 for_each_thread ([&] (thread_info
*thread
)
5133 proceed_one_lwp (thread
, except
);
5138 debug_printf ("unstop_all_lwps done\n");
5144 #ifdef HAVE_LINUX_REGSETS
5146 #define use_linux_regsets 1
5148 /* Returns true if REGSET has been disabled. */
5151 regset_disabled (struct regsets_info
*info
, struct regset_info
*regset
)
5153 return (info
->disabled_regsets
!= NULL
5154 && info
->disabled_regsets
[regset
- info
->regsets
]);
5157 /* Disable REGSET. */
5160 disable_regset (struct regsets_info
*info
, struct regset_info
*regset
)
5164 dr_offset
= regset
- info
->regsets
;
5165 if (info
->disabled_regsets
== NULL
)
5166 info
->disabled_regsets
= (char *) xcalloc (1, info
->num_regsets
);
5167 info
->disabled_regsets
[dr_offset
] = 1;
5171 regsets_fetch_inferior_registers (struct regsets_info
*regsets_info
,
5172 struct regcache
*regcache
)
5174 struct regset_info
*regset
;
5175 int saw_general_regs
= 0;
5179 pid
= lwpid_of (current_thread
);
5180 for (regset
= regsets_info
->regsets
; regset
->size
>= 0; regset
++)
5185 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
))
5188 buf
= xmalloc (regset
->size
);
5190 nt_type
= regset
->nt_type
;
5194 iov
.iov_len
= regset
->size
;
5195 data
= (void *) &iov
;
5201 res
= ptrace (regset
->get_request
, pid
,
5202 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
5204 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
5209 || (errno
== EINVAL
&& regset
->type
== OPTIONAL_REGS
))
5211 /* If we get EIO on a regset, or an EINVAL and the regset is
5212 optional, do not try it again for this process mode. */
5213 disable_regset (regsets_info
, regset
);
5215 else if (errno
== ENODATA
)
5217 /* ENODATA may be returned if the regset is currently
5218 not "active". This can happen in normal operation,
5219 so suppress the warning in this case. */
5221 else if (errno
== ESRCH
)
5223 /* At this point, ESRCH should mean the process is
5224 already gone, in which case we simply ignore attempts
5225 to read its registers. */
5230 sprintf (s
, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5237 if (regset
->type
== GENERAL_REGS
)
5238 saw_general_regs
= 1;
5239 regset
->store_function (regcache
, buf
);
5243 if (saw_general_regs
)
5250 regsets_store_inferior_registers (struct regsets_info
*regsets_info
,
5251 struct regcache
*regcache
)
5253 struct regset_info
*regset
;
5254 int saw_general_regs
= 0;
5258 pid
= lwpid_of (current_thread
);
5259 for (regset
= regsets_info
->regsets
; regset
->size
>= 0; regset
++)
5264 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
)
5265 || regset
->fill_function
== NULL
)
5268 buf
= xmalloc (regset
->size
);
5270 /* First fill the buffer with the current register set contents,
5271 in case there are any items in the kernel's regset that are
5272 not in gdbserver's regcache. */
5274 nt_type
= regset
->nt_type
;
5278 iov
.iov_len
= regset
->size
;
5279 data
= (void *) &iov
;
5285 res
= ptrace (regset
->get_request
, pid
,
5286 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
5288 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
5293 /* Then overlay our cached registers on that. */
5294 regset
->fill_function (regcache
, buf
);
5296 /* Only now do we write the register set. */
5298 res
= ptrace (regset
->set_request
, pid
,
5299 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
5301 res
= ptrace (regset
->set_request
, pid
, data
, nt_type
);
5308 || (errno
== EINVAL
&& regset
->type
== OPTIONAL_REGS
))
5310 /* If we get EIO on a regset, or an EINVAL and the regset is
5311 optional, do not try it again for this process mode. */
5312 disable_regset (regsets_info
, regset
);
5314 else if (errno
== ESRCH
)
5316 /* At this point, ESRCH should mean the process is
5317 already gone, in which case we simply ignore attempts
5318 to change its registers. See also the related
5319 comment in resume_one_lwp. */
5325 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5328 else if (regset
->type
== GENERAL_REGS
)
5329 saw_general_regs
= 1;
5332 if (saw_general_regs
)
5338 #else /* !HAVE_LINUX_REGSETS */
5340 #define use_linux_regsets 0
5341 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5342 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5346 /* Return 1 if register REGNO is supported by one of the regset ptrace
5347 calls or 0 if it has to be transferred individually. */
5350 linux_register_in_regsets (const struct regs_info
*regs_info
, int regno
)
5352 unsigned char mask
= 1 << (regno
% 8);
5353 size_t index
= regno
/ 8;
5355 return (use_linux_regsets
5356 && (regs_info
->regset_bitmap
== NULL
5357 || (regs_info
->regset_bitmap
[index
] & mask
) != 0));
5360 #ifdef HAVE_LINUX_USRREGS
5363 register_addr (const struct usrregs_info
*usrregs
, int regnum
)
5367 if (regnum
< 0 || regnum
>= usrregs
->num_regs
)
5368 error ("Invalid register number %d.", regnum
);
5370 addr
= usrregs
->regmap
[regnum
];
5377 linux_process_target::fetch_register (const usrregs_info
*usrregs
,
5378 regcache
*regcache
, int regno
)
5385 if (regno
>= usrregs
->num_regs
)
5387 if (low_cannot_fetch_register (regno
))
5390 regaddr
= register_addr (usrregs
, regno
);
5394 size
= ((register_size (regcache
->tdesc
, regno
)
5395 + sizeof (PTRACE_XFER_TYPE
) - 1)
5396 & -sizeof (PTRACE_XFER_TYPE
));
5397 buf
= (char *) alloca (size
);
5399 pid
= lwpid_of (current_thread
);
5400 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
5403 *(PTRACE_XFER_TYPE
*) (buf
+ i
) =
5404 ptrace (PTRACE_PEEKUSER
, pid
,
5405 /* Coerce to a uintptr_t first to avoid potential gcc warning
5406 of coercing an 8 byte integer to a 4 byte pointer. */
5407 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
, (PTRACE_TYPE_ARG4
) 0);
5408 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
5411 /* Mark register REGNO unavailable. */
5412 supply_register (regcache
, regno
, NULL
);
5417 low_supply_ptrace_register (regcache
, regno
, buf
);
5421 linux_process_target::store_register (const usrregs_info
*usrregs
,
5422 regcache
*regcache
, int regno
)
5429 if (regno
>= usrregs
->num_regs
)
5431 if (low_cannot_store_register (regno
))
5434 regaddr
= register_addr (usrregs
, regno
);
5438 size
= ((register_size (regcache
->tdesc
, regno
)
5439 + sizeof (PTRACE_XFER_TYPE
) - 1)
5440 & -sizeof (PTRACE_XFER_TYPE
));
5441 buf
= (char *) alloca (size
);
5442 memset (buf
, 0, size
);
5444 low_collect_ptrace_register (regcache
, regno
, buf
);
5446 pid
= lwpid_of (current_thread
);
5447 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
5450 ptrace (PTRACE_POKEUSER
, pid
,
5451 /* Coerce to a uintptr_t first to avoid potential gcc warning
5452 about coercing an 8 byte integer to a 4 byte pointer. */
5453 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
,
5454 (PTRACE_TYPE_ARG4
) *(PTRACE_XFER_TYPE
*) (buf
+ i
));
5457 /* At this point, ESRCH should mean the process is
5458 already gone, in which case we simply ignore attempts
5459 to change its registers. See also the related
5460 comment in resume_one_lwp. */
5465 if (!low_cannot_store_register (regno
))
5466 error ("writing register %d: %s", regno
, safe_strerror (errno
));
5468 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
5471 #endif /* HAVE_LINUX_USRREGS */
5474 linux_process_target::low_collect_ptrace_register (regcache
*regcache
,
5475 int regno
, char *buf
)
5477 collect_register (regcache
, regno
, buf
);
5481 linux_process_target::low_supply_ptrace_register (regcache
*regcache
,
5482 int regno
, const char *buf
)
5484 supply_register (regcache
, regno
, buf
);
5488 linux_process_target::usr_fetch_inferior_registers (const regs_info
*regs_info
,
5492 #ifdef HAVE_LINUX_USRREGS
5493 struct usrregs_info
*usr
= regs_info
->usrregs
;
5497 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
5498 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
5499 fetch_register (usr
, regcache
, regno
);
5502 fetch_register (usr
, regcache
, regno
);
5507 linux_process_target::usr_store_inferior_registers (const regs_info
*regs_info
,
5511 #ifdef HAVE_LINUX_USRREGS
5512 struct usrregs_info
*usr
= regs_info
->usrregs
;
5516 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
5517 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
5518 store_register (usr
, regcache
, regno
);
5521 store_register (usr
, regcache
, regno
);
5526 linux_process_target::fetch_registers (regcache
*regcache
, int regno
)
5530 const regs_info
*regs_info
= get_regs_info ();
5534 if (regs_info
->usrregs
!= NULL
)
5535 for (regno
= 0; regno
< regs_info
->usrregs
->num_regs
; regno
++)
5536 low_fetch_register (regcache
, regno
);
5538 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
, regcache
);
5539 if (regs_info
->usrregs
!= NULL
)
5540 usr_fetch_inferior_registers (regs_info
, regcache
, -1, all
);
5544 if (low_fetch_register (regcache
, regno
))
5547 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
5549 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
,
5551 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
5552 usr_fetch_inferior_registers (regs_info
, regcache
, regno
, 1);
5557 linux_process_target::store_registers (regcache
*regcache
, int regno
)
5561 const regs_info
*regs_info
= get_regs_info ();
5565 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
5567 if (regs_info
->usrregs
!= NULL
)
5568 usr_store_inferior_registers (regs_info
, regcache
, regno
, all
);
5572 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
5574 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
5576 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
5577 usr_store_inferior_registers (regs_info
, regcache
, regno
, 1);
5582 linux_process_target::low_fetch_register (regcache
*regcache
, int regno
)
5587 /* A wrapper for the read_memory target op. */
5590 linux_read_memory (CORE_ADDR memaddr
, unsigned char *myaddr
, int len
)
5592 return the_target
->read_memory (memaddr
, myaddr
, len
);
5595 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5596 to debugger memory starting at MYADDR. */
5599 linux_process_target::read_memory (CORE_ADDR memaddr
,
5600 unsigned char *myaddr
, int len
)
5602 int pid
= lwpid_of (current_thread
);
5603 PTRACE_XFER_TYPE
*buffer
;
5611 /* Try using /proc. Don't bother for one word. */
5612 if (len
>= 3 * sizeof (long))
5616 /* We could keep this file open and cache it - possibly one per
5617 thread. That requires some juggling, but is even faster. */
5618 sprintf (filename
, "/proc/%d/mem", pid
);
5619 fd
= open (filename
, O_RDONLY
| O_LARGEFILE
);
5623 /* If pread64 is available, use it. It's faster if the kernel
5624 supports it (only one syscall), and it's 64-bit safe even on
5625 32-bit platforms (for instance, SPARC debugging a SPARC64
5628 bytes
= pread64 (fd
, myaddr
, len
, memaddr
);
5631 if (lseek (fd
, memaddr
, SEEK_SET
) != -1)
5632 bytes
= read (fd
, myaddr
, len
);
5639 /* Some data was read, we'll try to get the rest with ptrace. */
5649 /* Round starting address down to longword boundary. */
5650 addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
5651 /* Round ending address up; get number of longwords that makes. */
5652 count
= ((((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
5653 / sizeof (PTRACE_XFER_TYPE
));
5654 /* Allocate buffer of that many longwords. */
5655 buffer
= XALLOCAVEC (PTRACE_XFER_TYPE
, count
);
5657 /* Read all the longwords */
5659 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
5661 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5662 about coercing an 8 byte integer to a 4 byte pointer. */
5663 buffer
[i
] = ptrace (PTRACE_PEEKTEXT
, pid
,
5664 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5665 (PTRACE_TYPE_ARG4
) 0);
5671 /* Copy appropriate bytes out of the buffer. */
5674 i
*= sizeof (PTRACE_XFER_TYPE
);
5675 i
-= memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1);
5677 (char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
5684 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5685 memory at MEMADDR. On failure (cannot write to the inferior)
5686 returns the value of errno. Always succeeds if LEN is zero. */
5689 linux_process_target::write_memory (CORE_ADDR memaddr
,
5690 const unsigned char *myaddr
, int len
)
5693 /* Round starting address down to longword boundary. */
5694 CORE_ADDR addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
5695 /* Round ending address up; get number of longwords that makes. */
5697 = (((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
5698 / sizeof (PTRACE_XFER_TYPE
);
5700 /* Allocate buffer of that many longwords. */
5701 PTRACE_XFER_TYPE
*buffer
= XALLOCAVEC (PTRACE_XFER_TYPE
, count
);
5703 int pid
= lwpid_of (current_thread
);
5707 /* Zero length write always succeeds. */
5713 /* Dump up to four bytes. */
5714 char str
[4 * 2 + 1];
5716 int dump
= len
< 4 ? len
: 4;
5718 for (i
= 0; i
< dump
; i
++)
5720 sprintf (p
, "%02x", myaddr
[i
]);
5725 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5726 str
, (long) memaddr
, pid
);
5729 /* Fill start and end extra bytes of buffer with existing memory data. */
5732 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5733 about coercing an 8 byte integer to a 4 byte pointer. */
5734 buffer
[0] = ptrace (PTRACE_PEEKTEXT
, pid
,
5735 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5736 (PTRACE_TYPE_ARG4
) 0);
5744 = ptrace (PTRACE_PEEKTEXT
, pid
,
5745 /* Coerce to a uintptr_t first to avoid potential gcc warning
5746 about coercing an 8 byte integer to a 4 byte pointer. */
5747 (PTRACE_TYPE_ARG3
) (uintptr_t) (addr
+ (count
- 1)
5748 * sizeof (PTRACE_XFER_TYPE
)),
5749 (PTRACE_TYPE_ARG4
) 0);
5754 /* Copy data to be written over corresponding part of buffer. */
5756 memcpy ((char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
5759 /* Write the entire buffer. */
5761 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
5764 ptrace (PTRACE_POKETEXT
, pid
,
5765 /* Coerce to a uintptr_t first to avoid potential gcc warning
5766 about coercing an 8 byte integer to a 4 byte pointer. */
5767 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5768 (PTRACE_TYPE_ARG4
) buffer
[i
]);
5777 linux_process_target::look_up_symbols ()
5779 #ifdef USE_THREAD_DB
5780 struct process_info
*proc
= current_process ();
5782 if (proc
->priv
->thread_db
!= NULL
)
5790 linux_process_target::request_interrupt ()
5792 /* Send a SIGINT to the process group. This acts just like the user
5793 typed a ^C on the controlling terminal. */
5794 ::kill (-signal_pid
, SIGINT
);
5798 linux_process_target::supports_read_auxv ()
5803 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5804 to debugger memory starting at MYADDR. */
5807 linux_process_target::read_auxv (CORE_ADDR offset
, unsigned char *myaddr
,
5810 char filename
[PATH_MAX
];
5812 int pid
= lwpid_of (current_thread
);
5814 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
5816 fd
= open (filename
, O_RDONLY
);
5820 if (offset
!= (CORE_ADDR
) 0
5821 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
5824 n
= read (fd
, myaddr
, len
);
5832 linux_process_target::insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
5833 int size
, raw_breakpoint
*bp
)
5835 if (type
== raw_bkpt_type_sw
)
5836 return insert_memory_breakpoint (bp
);
5838 return low_insert_point (type
, addr
, size
, bp
);
5842 linux_process_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
5843 int size
, raw_breakpoint
*bp
)
5845 /* Unsupported (see target.h). */
5850 linux_process_target::remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
5851 int size
, raw_breakpoint
*bp
)
5853 if (type
== raw_bkpt_type_sw
)
5854 return remove_memory_breakpoint (bp
);
5856 return low_remove_point (type
, addr
, size
, bp
);
5860 linux_process_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
5861 int size
, raw_breakpoint
*bp
)
5863 /* Unsupported (see target.h). */
5867 /* Implement the stopped_by_sw_breakpoint target_ops
5871 linux_process_target::stopped_by_sw_breakpoint ()
5873 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5875 return (lwp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
);
5878 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5882 linux_process_target::supports_stopped_by_sw_breakpoint ()
5884 return USE_SIGTRAP_SIGINFO
;
5887 /* Implement the stopped_by_hw_breakpoint target_ops
5891 linux_process_target::stopped_by_hw_breakpoint ()
5893 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5895 return (lwp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
);
5898 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5902 linux_process_target::supports_stopped_by_hw_breakpoint ()
5904 return USE_SIGTRAP_SIGINFO
;
5907 /* Implement the supports_hardware_single_step target_ops method. */
5910 linux_process_target::supports_hardware_single_step ()
5912 return can_hardware_single_step ();
5916 linux_process_target::stopped_by_watchpoint ()
5918 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5920 return lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
5924 linux_process_target::stopped_data_address ()
5926 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5928 return lwp
->stopped_data_address
;
5931 /* This is only used for targets that define PT_TEXT_ADDR,
5932 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5933 the target has different ways of acquiring this information, like
5937 linux_process_target::supports_read_offsets ()
5939 #ifdef SUPPORTS_READ_OFFSETS
5946 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5947 to tell gdb about. */
5950 linux_process_target::read_offsets (CORE_ADDR
*text_p
, CORE_ADDR
*data_p
)
5952 #ifdef SUPPORTS_READ_OFFSETS
5953 unsigned long text
, text_end
, data
;
5954 int pid
= lwpid_of (current_thread
);
5958 text
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_ADDR
,
5959 (PTRACE_TYPE_ARG4
) 0);
5960 text_end
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_END_ADDR
,
5961 (PTRACE_TYPE_ARG4
) 0);
5962 data
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_DATA_ADDR
,
5963 (PTRACE_TYPE_ARG4
) 0);
5967 /* Both text and data offsets produced at compile-time (and so
5968 used by gdb) are relative to the beginning of the program,
5969 with the data segment immediately following the text segment.
5970 However, the actual runtime layout in memory may put the data
5971 somewhere else, so when we send gdb a data base-address, we
5972 use the real data base address and subtract the compile-time
5973 data base-address from it (which is just the length of the
5974 text segment). BSS immediately follows data in both
5977 *data_p
= data
- (text_end
- text
);
5983 gdb_assert_not_reached ("target op read_offsets not supported");
5988 linux_process_target::supports_get_tls_address ()
5990 #ifdef USE_THREAD_DB
5998 linux_process_target::get_tls_address (thread_info
*thread
,
6000 CORE_ADDR load_module
,
6003 #ifdef USE_THREAD_DB
6004 return thread_db_get_tls_address (thread
, offset
, load_module
, address
);
6011 linux_process_target::supports_qxfer_osdata ()
6017 linux_process_target::qxfer_osdata (const char *annex
,
6018 unsigned char *readbuf
,
6019 unsigned const char *writebuf
,
6020 CORE_ADDR offset
, int len
)
6022 return linux_common_xfer_osdata (annex
, readbuf
, offset
, len
);
6026 linux_process_target::siginfo_fixup (siginfo_t
*siginfo
,
6027 gdb_byte
*inf_siginfo
, int direction
)
6029 bool done
= low_siginfo_fixup (siginfo
, inf_siginfo
, direction
);
6031 /* If there was no callback, or the callback didn't do anything,
6032 then just do a straight memcpy. */
6036 memcpy (siginfo
, inf_siginfo
, sizeof (siginfo_t
));
6038 memcpy (inf_siginfo
, siginfo
, sizeof (siginfo_t
));
6043 linux_process_target::low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
6050 linux_process_target::supports_qxfer_siginfo ()
6056 linux_process_target::qxfer_siginfo (const char *annex
,
6057 unsigned char *readbuf
,
6058 unsigned const char *writebuf
,
6059 CORE_ADDR offset
, int len
)
6063 gdb_byte inf_siginfo
[sizeof (siginfo_t
)];
6065 if (current_thread
== NULL
)
6068 pid
= lwpid_of (current_thread
);
6071 debug_printf ("%s siginfo for lwp %d.\n",
6072 readbuf
!= NULL
? "Reading" : "Writing",
6075 if (offset
>= sizeof (siginfo
))
6078 if (ptrace (PTRACE_GETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
6081 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6082 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6083 inferior with a 64-bit GDBSERVER should look the same as debugging it
6084 with a 32-bit GDBSERVER, we need to convert it. */
6085 siginfo_fixup (&siginfo
, inf_siginfo
, 0);
6087 if (offset
+ len
> sizeof (siginfo
))
6088 len
= sizeof (siginfo
) - offset
;
6090 if (readbuf
!= NULL
)
6091 memcpy (readbuf
, inf_siginfo
+ offset
, len
);
6094 memcpy (inf_siginfo
+ offset
, writebuf
, len
);
6096 /* Convert back to ptrace layout before flushing it out. */
6097 siginfo_fixup (&siginfo
, inf_siginfo
, 1);
6099 if (ptrace (PTRACE_SETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
6106 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6107 so we notice when children change state; as the handler for the
6108 sigsuspend in my_waitpid. */
6111 sigchld_handler (int signo
)
6113 int old_errno
= errno
;
6119 /* Use the async signal safe debug function. */
6120 if (debug_write ("sigchld_handler\n",
6121 sizeof ("sigchld_handler\n") - 1) < 0)
6122 break; /* just ignore */
6126 if (target_is_async_p ())
6127 async_file_mark (); /* trigger a linux_wait */
6133 linux_process_target::supports_non_stop ()
6139 linux_process_target::async (bool enable
)
6141 bool previous
= target_is_async_p ();
6144 debug_printf ("linux_async (%d), previous=%d\n",
6147 if (previous
!= enable
)
6150 sigemptyset (&mask
);
6151 sigaddset (&mask
, SIGCHLD
);
6153 gdb_sigmask (SIG_BLOCK
, &mask
, NULL
);
6157 if (pipe (linux_event_pipe
) == -1)
6159 linux_event_pipe
[0] = -1;
6160 linux_event_pipe
[1] = -1;
6161 gdb_sigmask (SIG_UNBLOCK
, &mask
, NULL
);
6163 warning ("creating event pipe failed.");
6167 fcntl (linux_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
6168 fcntl (linux_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
6170 /* Register the event loop handler. */
6171 add_file_handler (linux_event_pipe
[0],
6172 handle_target_event
, NULL
);
6174 /* Always trigger a linux_wait. */
6179 delete_file_handler (linux_event_pipe
[0]);
6181 close (linux_event_pipe
[0]);
6182 close (linux_event_pipe
[1]);
6183 linux_event_pipe
[0] = -1;
6184 linux_event_pipe
[1] = -1;
6187 gdb_sigmask (SIG_UNBLOCK
, &mask
, NULL
);
6194 linux_process_target::start_non_stop (bool nonstop
)
6196 /* Register or unregister from event-loop accordingly. */
6197 target_async (nonstop
);
6199 if (target_is_async_p () != (nonstop
!= false))
6206 linux_process_target::supports_multi_process ()
6211 /* Check if fork events are supported. */
6214 linux_process_target::supports_fork_events ()
6216 return linux_supports_tracefork ();
6219 /* Check if vfork events are supported. */
6222 linux_process_target::supports_vfork_events ()
6224 return linux_supports_tracefork ();
6227 /* Check if exec events are supported. */
6230 linux_process_target::supports_exec_events ()
6232 return linux_supports_traceexec ();
6235 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6236 ptrace flags for all inferiors. This is in case the new GDB connection
6237 doesn't support the same set of events that the previous one did. */
6240 linux_process_target::handle_new_gdb_connection ()
6242 /* Request that all the lwps reset their ptrace options. */
6243 for_each_thread ([] (thread_info
*thread
)
6245 struct lwp_info
*lwp
= get_thread_lwp (thread
);
6249 /* Stop the lwp so we can modify its ptrace options. */
6250 lwp
->must_set_ptrace_flags
= 1;
6251 linux_stop_lwp (lwp
);
6255 /* Already stopped; go ahead and set the ptrace options. */
6256 struct process_info
*proc
= find_process_pid (pid_of (thread
));
6257 int options
= linux_low_ptrace_options (proc
->attached
);
6259 linux_enable_event_reporting (lwpid_of (thread
), options
);
6260 lwp
->must_set_ptrace_flags
= 0;
6266 linux_process_target::handle_monitor_command (char *mon
)
6268 #ifdef USE_THREAD_DB
6269 return thread_db_handle_monitor_command (mon
);
6276 linux_process_target::core_of_thread (ptid_t ptid
)
6278 return linux_common_core_of_thread (ptid
);
6282 linux_process_target::supports_disable_randomization ()
6284 #ifdef HAVE_PERSONALITY
6292 linux_process_target::supports_agent ()
6298 linux_process_target::supports_range_stepping ()
6300 if (supports_software_single_step ())
6302 if (*the_low_target
.supports_range_stepping
== NULL
)
6305 return (*the_low_target
.supports_range_stepping
) ();
6309 linux_process_target::supports_pid_to_exec_file ()
6315 linux_process_target::pid_to_exec_file (int pid
)
6317 return linux_proc_pid_to_exec_file (pid
);
6321 linux_process_target::supports_multifs ()
6327 linux_process_target::multifs_open (int pid
, const char *filename
,
6328 int flags
, mode_t mode
)
6330 return linux_mntns_open_cloexec (pid
, filename
, flags
, mode
);
6334 linux_process_target::multifs_unlink (int pid
, const char *filename
)
6336 return linux_mntns_unlink (pid
, filename
);
6340 linux_process_target::multifs_readlink (int pid
, const char *filename
,
6341 char *buf
, size_t bufsiz
)
6343 return linux_mntns_readlink (pid
, filename
, buf
, bufsiz
);
6346 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6347 struct target_loadseg
6349 /* Core address to which the segment is mapped. */
6351 /* VMA recorded in the program header. */
6353 /* Size of this segment in memory. */
6357 # if defined PT_GETDSBT
6358 struct target_loadmap
6360 /* Protocol version number, must be zero. */
6362 /* Pointer to the DSBT table, its size, and the DSBT index. */
6363 unsigned *dsbt_table
;
6364 unsigned dsbt_size
, dsbt_index
;
6365 /* Number of segments in this map. */
6367 /* The actual memory map. */
6368 struct target_loadseg segs
[/*nsegs*/];
6370 # define LINUX_LOADMAP PT_GETDSBT
6371 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6372 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6374 struct target_loadmap
6376 /* Protocol version number, must be zero. */
6378 /* Number of segments in this map. */
6380 /* The actual memory map. */
6381 struct target_loadseg segs
[/*nsegs*/];
6383 # define LINUX_LOADMAP PTRACE_GETFDPIC
6384 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6385 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6389 linux_process_target::supports_read_loadmap ()
6395 linux_process_target::read_loadmap (const char *annex
, CORE_ADDR offset
,
6396 unsigned char *myaddr
, unsigned int len
)
6398 int pid
= lwpid_of (current_thread
);
6400 struct target_loadmap
*data
= NULL
;
6401 unsigned int actual_length
, copy_length
;
6403 if (strcmp (annex
, "exec") == 0)
6404 addr
= (int) LINUX_LOADMAP_EXEC
;
6405 else if (strcmp (annex
, "interp") == 0)
6406 addr
= (int) LINUX_LOADMAP_INTERP
;
6410 if (ptrace (LINUX_LOADMAP
, pid
, addr
, &data
) != 0)
6416 actual_length
= sizeof (struct target_loadmap
)
6417 + sizeof (struct target_loadseg
) * data
->nsegs
;
6419 if (offset
< 0 || offset
> actual_length
)
6422 copy_length
= actual_length
- offset
< len
? actual_length
- offset
: len
;
6423 memcpy (myaddr
, (char *) data
+ offset
, copy_length
);
6426 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6429 linux_process_target::process_qsupported (char **features
, int count
)
6431 if (the_low_target
.process_qsupported
!= NULL
)
6432 the_low_target
.process_qsupported (features
, count
);
6436 linux_process_target::supports_catch_syscall ()
6438 return (the_low_target
.get_syscall_trapinfo
!= NULL
6439 && linux_supports_tracesysgood ());
6443 linux_process_target::get_ipa_tdesc_idx ()
6445 if (the_low_target
.get_ipa_tdesc_idx
== NULL
)
6448 return (*the_low_target
.get_ipa_tdesc_idx
) ();
6452 linux_process_target::supports_tracepoints ()
6454 if (*the_low_target
.supports_tracepoints
== NULL
)
6457 return (*the_low_target
.supports_tracepoints
) ();
6461 linux_process_target::read_pc (regcache
*regcache
)
6463 if (!low_supports_breakpoints ())
6466 return low_get_pc (regcache
);
6470 linux_process_target::write_pc (regcache
*regcache
, CORE_ADDR pc
)
6472 gdb_assert (low_supports_breakpoints ());
6474 low_set_pc (regcache
, pc
);
6478 linux_process_target::supports_thread_stopped ()
6484 linux_process_target::thread_stopped (thread_info
*thread
)
6486 return get_thread_lwp (thread
)->stopped
;
6489 /* This exposes stop-all-threads functionality to other modules. */
6492 linux_process_target::pause_all (bool freeze
)
6494 stop_all_lwps (freeze
, NULL
);
6497 /* This exposes unstop-all-threads functionality to other gdbserver
6501 linux_process_target::unpause_all (bool unfreeze
)
6503 unstop_all_lwps (unfreeze
, NULL
);
6507 linux_process_target::prepare_to_access_memory ()
6509 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6512 target_pause_all (true);
6517 linux_process_target::done_accessing_memory ()
6519 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6522 target_unpause_all (true);
6526 linux_process_target::supports_fast_tracepoints ()
6528 return the_low_target
.install_fast_tracepoint_jump_pad
!= nullptr;
6532 linux_process_target::install_fast_tracepoint_jump_pad
6533 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
6534 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
6535 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
6536 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
6537 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
6540 return (*the_low_target
.install_fast_tracepoint_jump_pad
)
6541 (tpoint
, tpaddr
, collector
, lockaddr
, orig_size
,
6542 jump_entry
, trampoline
, trampoline_size
,
6543 jjump_pad_insn
, jjump_pad_insn_size
,
6544 adjusted_insn_addr
, adjusted_insn_addr_end
,
6549 linux_process_target::emit_ops ()
6551 if (the_low_target
.emit_ops
!= NULL
)
6552 return (*the_low_target
.emit_ops
) ();
6558 linux_process_target::get_min_fast_tracepoint_insn_len ()
6560 return (*the_low_target
.get_min_fast_tracepoint_insn_len
) ();
6563 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6566 get_phdr_phnum_from_proc_auxv (const int pid
, const int is_elf64
,
6567 CORE_ADDR
*phdr_memaddr
, int *num_phdr
)
6569 char filename
[PATH_MAX
];
6571 const int auxv_size
= is_elf64
6572 ? sizeof (Elf64_auxv_t
) : sizeof (Elf32_auxv_t
);
6573 char buf
[sizeof (Elf64_auxv_t
)]; /* The larger of the two. */
6575 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
6577 fd
= open (filename
, O_RDONLY
);
6583 while (read (fd
, buf
, auxv_size
) == auxv_size
6584 && (*phdr_memaddr
== 0 || *num_phdr
== 0))
6588 Elf64_auxv_t
*const aux
= (Elf64_auxv_t
*) buf
;
6590 switch (aux
->a_type
)
6593 *phdr_memaddr
= aux
->a_un
.a_val
;
6596 *num_phdr
= aux
->a_un
.a_val
;
6602 Elf32_auxv_t
*const aux
= (Elf32_auxv_t
*) buf
;
6604 switch (aux
->a_type
)
6607 *phdr_memaddr
= aux
->a_un
.a_val
;
6610 *num_phdr
= aux
->a_un
.a_val
;
6618 if (*phdr_memaddr
== 0 || *num_phdr
== 0)
6620 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6621 "phdr_memaddr = %ld, phdr_num = %d",
6622 (long) *phdr_memaddr
, *num_phdr
);
6629 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6632 get_dynamic (const int pid
, const int is_elf64
)
6634 CORE_ADDR phdr_memaddr
, relocation
;
6636 unsigned char *phdr_buf
;
6637 const int phdr_size
= is_elf64
? sizeof (Elf64_Phdr
) : sizeof (Elf32_Phdr
);
6639 if (get_phdr_phnum_from_proc_auxv (pid
, is_elf64
, &phdr_memaddr
, &num_phdr
))
6642 gdb_assert (num_phdr
< 100); /* Basic sanity check. */
6643 phdr_buf
= (unsigned char *) alloca (num_phdr
* phdr_size
);
6645 if (linux_read_memory (phdr_memaddr
, phdr_buf
, num_phdr
* phdr_size
))
6648 /* Compute relocation: it is expected to be 0 for "regular" executables,
6649 non-zero for PIE ones. */
6651 for (i
= 0; relocation
== -1 && i
< num_phdr
; i
++)
6654 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6656 if (p
->p_type
== PT_PHDR
)
6657 relocation
= phdr_memaddr
- p
->p_vaddr
;
6661 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6663 if (p
->p_type
== PT_PHDR
)
6664 relocation
= phdr_memaddr
- p
->p_vaddr
;
6667 if (relocation
== -1)
6669 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6670 any real world executables, including PIE executables, have always
6671 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6672 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6673 or present DT_DEBUG anyway (fpc binaries are statically linked).
6675 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6677 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6682 for (i
= 0; i
< num_phdr
; i
++)
6686 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6688 if (p
->p_type
== PT_DYNAMIC
)
6689 return p
->p_vaddr
+ relocation
;
6693 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6695 if (p
->p_type
== PT_DYNAMIC
)
6696 return p
->p_vaddr
+ relocation
;
6703 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6704 can be 0 if the inferior does not yet have the library list initialized.
6705 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6706 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6709 get_r_debug (const int pid
, const int is_elf64
)
6711 CORE_ADDR dynamic_memaddr
;
6712 const int dyn_size
= is_elf64
? sizeof (Elf64_Dyn
) : sizeof (Elf32_Dyn
);
6713 unsigned char buf
[sizeof (Elf64_Dyn
)]; /* The larger of the two. */
6716 dynamic_memaddr
= get_dynamic (pid
, is_elf64
);
6717 if (dynamic_memaddr
== 0)
6720 while (linux_read_memory (dynamic_memaddr
, buf
, dyn_size
) == 0)
6724 Elf64_Dyn
*const dyn
= (Elf64_Dyn
*) buf
;
6725 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6729 unsigned char buf
[sizeof (Elf64_Xword
)];
6733 #ifdef DT_MIPS_RLD_MAP
6734 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
6736 if (linux_read_memory (dyn
->d_un
.d_val
,
6737 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6742 #endif /* DT_MIPS_RLD_MAP */
6743 #ifdef DT_MIPS_RLD_MAP_REL
6744 if (dyn
->d_tag
== DT_MIPS_RLD_MAP_REL
)
6746 if (linux_read_memory (dyn
->d_un
.d_val
+ dynamic_memaddr
,
6747 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6752 #endif /* DT_MIPS_RLD_MAP_REL */
6754 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
6755 map
= dyn
->d_un
.d_val
;
6757 if (dyn
->d_tag
== DT_NULL
)
6762 Elf32_Dyn
*const dyn
= (Elf32_Dyn
*) buf
;
6763 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6767 unsigned char buf
[sizeof (Elf32_Word
)];
6771 #ifdef DT_MIPS_RLD_MAP
6772 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
6774 if (linux_read_memory (dyn
->d_un
.d_val
,
6775 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6780 #endif /* DT_MIPS_RLD_MAP */
6781 #ifdef DT_MIPS_RLD_MAP_REL
6782 if (dyn
->d_tag
== DT_MIPS_RLD_MAP_REL
)
6784 if (linux_read_memory (dyn
->d_un
.d_val
+ dynamic_memaddr
,
6785 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6790 #endif /* DT_MIPS_RLD_MAP_REL */
6792 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
6793 map
= dyn
->d_un
.d_val
;
6795 if (dyn
->d_tag
== DT_NULL
)
6799 dynamic_memaddr
+= dyn_size
;
6805 /* Read one pointer from MEMADDR in the inferior. */
6808 read_one_ptr (CORE_ADDR memaddr
, CORE_ADDR
*ptr
, int ptr_size
)
6812 /* Go through a union so this works on either big or little endian
6813 hosts, when the inferior's pointer size is smaller than the size
6814 of CORE_ADDR. It is assumed the inferior's endianness is the
6815 same of the superior's. */
6818 CORE_ADDR core_addr
;
6823 ret
= linux_read_memory (memaddr
, &addr
.uc
, ptr_size
);
6826 if (ptr_size
== sizeof (CORE_ADDR
))
6827 *ptr
= addr
.core_addr
;
6828 else if (ptr_size
== sizeof (unsigned int))
6831 gdb_assert_not_reached ("unhandled pointer size");
6837 linux_process_target::supports_qxfer_libraries_svr4 ()
6842 struct link_map_offsets
6844 /* Offset and size of r_debug.r_version. */
6845 int r_version_offset
;
6847 /* Offset and size of r_debug.r_map. */
6850 /* Offset to l_addr field in struct link_map. */
6853 /* Offset to l_name field in struct link_map. */
6856 /* Offset to l_ld field in struct link_map. */
6859 /* Offset to l_next field in struct link_map. */
6862 /* Offset to l_prev field in struct link_map. */
6866 /* Construct qXfer:libraries-svr4:read reply. */
6869 linux_process_target::qxfer_libraries_svr4 (const char *annex
,
6870 unsigned char *readbuf
,
6871 unsigned const char *writebuf
,
6872 CORE_ADDR offset
, int len
)
6874 struct process_info_private
*const priv
= current_process ()->priv
;
6875 char filename
[PATH_MAX
];
6878 static const struct link_map_offsets lmo_32bit_offsets
=
6880 0, /* r_version offset. */
6881 4, /* r_debug.r_map offset. */
6882 0, /* l_addr offset in link_map. */
6883 4, /* l_name offset in link_map. */
6884 8, /* l_ld offset in link_map. */
6885 12, /* l_next offset in link_map. */
6886 16 /* l_prev offset in link_map. */
6889 static const struct link_map_offsets lmo_64bit_offsets
=
6891 0, /* r_version offset. */
6892 8, /* r_debug.r_map offset. */
6893 0, /* l_addr offset in link_map. */
6894 8, /* l_name offset in link_map. */
6895 16, /* l_ld offset in link_map. */
6896 24, /* l_next offset in link_map. */
6897 32 /* l_prev offset in link_map. */
6899 const struct link_map_offsets
*lmo
;
6900 unsigned int machine
;
6902 CORE_ADDR lm_addr
= 0, lm_prev
= 0;
6903 CORE_ADDR l_name
, l_addr
, l_ld
, l_next
, l_prev
;
6904 int header_done
= 0;
6906 if (writebuf
!= NULL
)
6908 if (readbuf
== NULL
)
6911 pid
= lwpid_of (current_thread
);
6912 xsnprintf (filename
, sizeof filename
, "/proc/%d/exe", pid
);
6913 is_elf64
= elf_64_file_p (filename
, &machine
);
6914 lmo
= is_elf64
? &lmo_64bit_offsets
: &lmo_32bit_offsets
;
6915 ptr_size
= is_elf64
? 8 : 4;
6917 while (annex
[0] != '\0')
6923 sep
= strchr (annex
, '=');
6927 name_len
= sep
- annex
;
6928 if (name_len
== 5 && startswith (annex
, "start"))
6930 else if (name_len
== 4 && startswith (annex
, "prev"))
6934 annex
= strchr (sep
, ';');
6941 annex
= decode_address_to_semicolon (addrp
, sep
+ 1);
6948 if (priv
->r_debug
== 0)
6949 priv
->r_debug
= get_r_debug (pid
, is_elf64
);
6951 /* We failed to find DT_DEBUG. Such situation will not change
6952 for this inferior - do not retry it. Report it to GDB as
6953 E01, see for the reasons at the GDB solib-svr4.c side. */
6954 if (priv
->r_debug
== (CORE_ADDR
) -1)
6957 if (priv
->r_debug
!= 0)
6959 if (linux_read_memory (priv
->r_debug
+ lmo
->r_version_offset
,
6960 (unsigned char *) &r_version
,
6961 sizeof (r_version
)) != 0
6964 warning ("unexpected r_debug version %d", r_version
);
6966 else if (read_one_ptr (priv
->r_debug
+ lmo
->r_map_offset
,
6967 &lm_addr
, ptr_size
) != 0)
6969 warning ("unable to read r_map from 0x%lx",
6970 (long) priv
->r_debug
+ lmo
->r_map_offset
);
6975 std::string document
= "<library-list-svr4 version=\"1.0\"";
6978 && read_one_ptr (lm_addr
+ lmo
->l_name_offset
,
6979 &l_name
, ptr_size
) == 0
6980 && read_one_ptr (lm_addr
+ lmo
->l_addr_offset
,
6981 &l_addr
, ptr_size
) == 0
6982 && read_one_ptr (lm_addr
+ lmo
->l_ld_offset
,
6983 &l_ld
, ptr_size
) == 0
6984 && read_one_ptr (lm_addr
+ lmo
->l_prev_offset
,
6985 &l_prev
, ptr_size
) == 0
6986 && read_one_ptr (lm_addr
+ lmo
->l_next_offset
,
6987 &l_next
, ptr_size
) == 0)
6989 unsigned char libname
[PATH_MAX
];
6991 if (lm_prev
!= l_prev
)
6993 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6994 (long) lm_prev
, (long) l_prev
);
6998 /* Ignore the first entry even if it has valid name as the first entry
6999 corresponds to the main executable. The first entry should not be
7000 skipped if the dynamic loader was loaded late by a static executable
7001 (see solib-svr4.c parameter ignore_first). But in such case the main
7002 executable does not have PT_DYNAMIC present and this function already
7003 exited above due to failed get_r_debug. */
7005 string_appendf (document
, " main-lm=\"0x%lx\"", (unsigned long) lm_addr
);
7008 /* Not checking for error because reading may stop before
7009 we've got PATH_MAX worth of characters. */
7011 linux_read_memory (l_name
, libname
, sizeof (libname
) - 1);
7012 libname
[sizeof (libname
) - 1] = '\0';
7013 if (libname
[0] != '\0')
7017 /* Terminate `<library-list-svr4'. */
7022 string_appendf (document
, "<library name=\"");
7023 xml_escape_text_append (&document
, (char *) libname
);
7024 string_appendf (document
, "\" lm=\"0x%lx\" "
7025 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7026 (unsigned long) lm_addr
, (unsigned long) l_addr
,
7027 (unsigned long) l_ld
);
7037 /* Empty list; terminate `<library-list-svr4'. */
7041 document
+= "</library-list-svr4>";
7043 int document_len
= document
.length ();
7044 if (offset
< document_len
)
7045 document_len
-= offset
;
7048 if (len
> document_len
)
7051 memcpy (readbuf
, document
.data () + offset
, len
);
7056 #ifdef HAVE_LINUX_BTRACE
7058 btrace_target_info
*
7059 linux_process_target::enable_btrace (ptid_t ptid
,
7060 const btrace_config
*conf
)
7062 return linux_enable_btrace (ptid
, conf
);
7065 /* See to_disable_btrace target method. */
7068 linux_process_target::disable_btrace (btrace_target_info
*tinfo
)
7070 enum btrace_error err
;
7072 err
= linux_disable_btrace (tinfo
);
7073 return (err
== BTRACE_ERR_NONE
? 0 : -1);
7076 /* Encode an Intel Processor Trace configuration. */
7079 linux_low_encode_pt_config (struct buffer
*buffer
,
7080 const struct btrace_data_pt_config
*config
)
7082 buffer_grow_str (buffer
, "<pt-config>\n");
7084 switch (config
->cpu
.vendor
)
7087 buffer_xml_printf (buffer
, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7088 "model=\"%u\" stepping=\"%u\"/>\n",
7089 config
->cpu
.family
, config
->cpu
.model
,
7090 config
->cpu
.stepping
);
7097 buffer_grow_str (buffer
, "</pt-config>\n");
7100 /* Encode a raw buffer. */
7103 linux_low_encode_raw (struct buffer
*buffer
, const gdb_byte
*data
,
7109 /* We use hex encoding - see gdbsupport/rsp-low.h. */
7110 buffer_grow_str (buffer
, "<raw>\n");
7116 elem
[0] = tohex ((*data
>> 4) & 0xf);
7117 elem
[1] = tohex (*data
++ & 0xf);
7119 buffer_grow (buffer
, elem
, 2);
7122 buffer_grow_str (buffer
, "</raw>\n");
7125 /* See to_read_btrace target method. */
7128 linux_process_target::read_btrace (btrace_target_info
*tinfo
,
7130 enum btrace_read_type type
)
7132 struct btrace_data btrace
;
7133 enum btrace_error err
;
7135 err
= linux_read_btrace (&btrace
, tinfo
, type
);
7136 if (err
!= BTRACE_ERR_NONE
)
7138 if (err
== BTRACE_ERR_OVERFLOW
)
7139 buffer_grow_str0 (buffer
, "E.Overflow.");
7141 buffer_grow_str0 (buffer
, "E.Generic Error.");
7146 switch (btrace
.format
)
7148 case BTRACE_FORMAT_NONE
:
7149 buffer_grow_str0 (buffer
, "E.No Trace.");
7152 case BTRACE_FORMAT_BTS
:
7153 buffer_grow_str (buffer
, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7154 buffer_grow_str (buffer
, "<btrace version=\"1.0\">\n");
7156 for (const btrace_block
&block
: *btrace
.variant
.bts
.blocks
)
7157 buffer_xml_printf (buffer
, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7158 paddress (block
.begin
), paddress (block
.end
));
7160 buffer_grow_str0 (buffer
, "</btrace>\n");
7163 case BTRACE_FORMAT_PT
:
7164 buffer_grow_str (buffer
, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7165 buffer_grow_str (buffer
, "<btrace version=\"1.0\">\n");
7166 buffer_grow_str (buffer
, "<pt>\n");
7168 linux_low_encode_pt_config (buffer
, &btrace
.variant
.pt
.config
);
7170 linux_low_encode_raw (buffer
, btrace
.variant
.pt
.data
,
7171 btrace
.variant
.pt
.size
);
7173 buffer_grow_str (buffer
, "</pt>\n");
7174 buffer_grow_str0 (buffer
, "</btrace>\n");
7178 buffer_grow_str0 (buffer
, "E.Unsupported Trace Format.");
7185 /* See to_btrace_conf target method. */
7188 linux_process_target::read_btrace_conf (const btrace_target_info
*tinfo
,
7191 const struct btrace_config
*conf
;
7193 buffer_grow_str (buffer
, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7194 buffer_grow_str (buffer
, "<btrace-conf version=\"1.0\">\n");
7196 conf
= linux_btrace_conf (tinfo
);
7199 switch (conf
->format
)
7201 case BTRACE_FORMAT_NONE
:
7204 case BTRACE_FORMAT_BTS
:
7205 buffer_xml_printf (buffer
, "<bts");
7206 buffer_xml_printf (buffer
, " size=\"0x%x\"", conf
->bts
.size
);
7207 buffer_xml_printf (buffer
, " />\n");
7210 case BTRACE_FORMAT_PT
:
7211 buffer_xml_printf (buffer
, "<pt");
7212 buffer_xml_printf (buffer
, " size=\"0x%x\"", conf
->pt
.size
);
7213 buffer_xml_printf (buffer
, "/>\n");
7218 buffer_grow_str0 (buffer
, "</btrace-conf>\n");
7221 #endif /* HAVE_LINUX_BTRACE */
7223 /* See nat/linux-nat.h. */
7226 current_lwp_ptid (void)
7228 return ptid_of (current_thread
);
7232 linux_process_target::thread_name (ptid_t thread
)
7234 return linux_proc_tid_get_name (thread
);
7239 linux_process_target::thread_handle (ptid_t ptid
, gdb_byte
**handle
,
7242 return thread_db_thread_handle (ptid
, handle
, handle_len
);
7246 /* Default implementation of linux_target_ops method "set_pc" for
7247 32-bit pc register which is literally named "pc". */
7250 linux_set_pc_32bit (struct regcache
*regcache
, CORE_ADDR pc
)
7252 uint32_t newpc
= pc
;
7254 supply_register_by_name (regcache
, "pc", &newpc
);
7257 /* Default implementation of linux_target_ops method "get_pc" for
7258 32-bit pc register which is literally named "pc". */
7261 linux_get_pc_32bit (struct regcache
*regcache
)
7265 collect_register_by_name (regcache
, "pc", &pc
);
7267 debug_printf ("stop pc is 0x%" PRIx32
"\n", pc
);
7271 /* Default implementation of linux_target_ops method "set_pc" for
7272 64-bit pc register which is literally named "pc". */
7275 linux_set_pc_64bit (struct regcache
*regcache
, CORE_ADDR pc
)
7277 uint64_t newpc
= pc
;
7279 supply_register_by_name (regcache
, "pc", &newpc
);
7282 /* Default implementation of linux_target_ops method "get_pc" for
7283 64-bit pc register which is literally named "pc". */
7286 linux_get_pc_64bit (struct regcache
*regcache
)
7290 collect_register_by_name (regcache
, "pc", &pc
);
7292 debug_printf ("stop pc is 0x%" PRIx64
"\n", pc
);
7296 /* See linux-low.h. */
7299 linux_get_auxv (int wordsize
, CORE_ADDR match
, CORE_ADDR
*valp
)
7301 gdb_byte
*data
= (gdb_byte
*) alloca (2 * wordsize
);
7304 gdb_assert (wordsize
== 4 || wordsize
== 8);
7306 while (the_target
->read_auxv (offset
, data
, 2 * wordsize
) == 2 * wordsize
)
7310 uint32_t *data_p
= (uint32_t *) data
;
7311 if (data_p
[0] == match
)
7319 uint64_t *data_p
= (uint64_t *) data
;
7320 if (data_p
[0] == match
)
7327 offset
+= 2 * wordsize
;
7333 /* See linux-low.h. */
7336 linux_get_hwcap (int wordsize
)
7338 CORE_ADDR hwcap
= 0;
7339 linux_get_auxv (wordsize
, AT_HWCAP
, &hwcap
);
7343 /* See linux-low.h. */
7346 linux_get_hwcap2 (int wordsize
)
7348 CORE_ADDR hwcap2
= 0;
7349 linux_get_auxv (wordsize
, AT_HWCAP2
, &hwcap2
);
7353 #ifdef HAVE_LINUX_REGSETS
7355 initialize_regsets_info (struct regsets_info
*info
)
7357 for (info
->num_regsets
= 0;
7358 info
->regsets
[info
->num_regsets
].size
>= 0;
7359 info
->num_regsets
++)
7365 initialize_low (void)
7367 struct sigaction sigchld_action
;
7369 memset (&sigchld_action
, 0, sizeof (sigchld_action
));
7370 set_target_ops (the_linux_target
);
7372 linux_ptrace_init_warnings ();
7373 linux_proc_init_warnings ();
7375 sigchld_action
.sa_handler
= sigchld_handler
;
7376 sigemptyset (&sigchld_action
.sa_mask
);
7377 sigchld_action
.sa_flags
= SA_RESTART
;
7378 sigaction (SIGCHLD
, &sigchld_action
, NULL
);
7380 initialize_low_arch ();
7382 linux_check_ptrace_features ();