1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2020 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
34 #include <sys/ioctl.h>
37 #include <sys/syscall.h>
41 #include <sys/types.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
61 #include "nat/linux-namespaces.h"
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
78 /* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81 #if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84 #if defined(__mcoldfire__)
85 /* These are still undefined in 3.10 kernels. */
86 #define PT_TEXT_ADDR 49*4
87 #define PT_DATA_ADDR 50*4
88 #define PT_TEXT_END_ADDR 51*4
89 /* BFIN already defines these since at least 2.6.32 kernels. */
91 #define PT_TEXT_ADDR 220
92 #define PT_TEXT_END_ADDR 224
93 #define PT_DATA_ADDR 228
94 /* These are still undefined in 3.10 kernels. */
95 #elif defined(__TMS320C6X__)
96 #define PT_TEXT_ADDR (0x10000*4)
97 #define PT_DATA_ADDR (0x10004*4)
98 #define PT_TEXT_END_ADDR (0x10008*4)
102 #if (defined(__UCLIBC__) \
103 && defined(HAS_NOMMU) \
104 && defined(PT_TEXT_ADDR) \
105 && defined(PT_DATA_ADDR) \
106 && defined(PT_TEXT_END_ADDR))
107 #define SUPPORTS_READ_OFFSETS
110 #ifdef HAVE_LINUX_BTRACE
111 # include "nat/linux-btrace.h"
112 # include "gdbsupport/btrace-common.h"
115 #ifndef HAVE_ELF32_AUXV_T
116 /* Copied from glibc's elf.h. */
119 uint32_t a_type
; /* Entry type */
122 uint32_t a_val
; /* Integer value */
123 /* We use to have pointer elements added here. We cannot do that,
124 though, since it does not work when using 32-bit definitions
125 on 64-bit platforms and vice versa. */
130 #ifndef HAVE_ELF64_AUXV_T
131 /* Copied from glibc's elf.h. */
134 uint64_t a_type
; /* Entry type */
137 uint64_t a_val
; /* Integer value */
138 /* We use to have pointer elements added here. We cannot do that,
139 though, since it does not work when using 32-bit definitions
140 on 64-bit platforms and vice versa. */
145 /* Does the current host support PTRACE_GETREGSET? */
146 int have_ptrace_getregset
= -1;
150 /* See nat/linux-nat.h. */
153 ptid_of_lwp (struct lwp_info
*lwp
)
155 return ptid_of (get_lwp_thread (lwp
));
158 /* See nat/linux-nat.h. */
161 lwp_set_arch_private_info (struct lwp_info
*lwp
,
162 struct arch_lwp_info
*info
)
164 lwp
->arch_private
= info
;
167 /* See nat/linux-nat.h. */
169 struct arch_lwp_info
*
170 lwp_arch_private_info (struct lwp_info
*lwp
)
172 return lwp
->arch_private
;
175 /* See nat/linux-nat.h. */
178 lwp_is_stopped (struct lwp_info
*lwp
)
183 /* See nat/linux-nat.h. */
185 enum target_stop_reason
186 lwp_stop_reason (struct lwp_info
*lwp
)
188 return lwp
->stop_reason
;
191 /* See nat/linux-nat.h. */
194 lwp_is_stepping (struct lwp_info
*lwp
)
196 return lwp
->stepping
;
199 /* A list of all unknown processes which receive stop signals. Some
200 other process will presumably claim each of these as forked
201 children momentarily. */
203 struct simple_pid_list
205 /* The process ID. */
208 /* The status as reported by waitpid. */
212 struct simple_pid_list
*next
;
214 struct simple_pid_list
*stopped_pids
;
216 /* Trivial list manipulation functions to keep track of a list of new
217 stopped processes. */
220 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
222 struct simple_pid_list
*new_pid
= XNEW (struct simple_pid_list
);
225 new_pid
->status
= status
;
226 new_pid
->next
= *listp
;
231 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *statusp
)
233 struct simple_pid_list
**p
;
235 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
236 if ((*p
)->pid
== pid
)
238 struct simple_pid_list
*next
= (*p
)->next
;
240 *statusp
= (*p
)->status
;
248 enum stopping_threads_kind
250 /* Not stopping threads presently. */
251 NOT_STOPPING_THREADS
,
253 /* Stopping threads. */
256 /* Stopping and suspending threads. */
257 STOPPING_AND_SUSPENDING_THREADS
260 /* This is set while stop_all_lwps is in effect. */
261 enum stopping_threads_kind stopping_threads
= NOT_STOPPING_THREADS
;
263 /* FIXME make into a target method? */
264 int using_threads
= 1;
266 /* True if we're presently stabilizing threads (moving them out of
268 static int stabilizing_threads
;
270 static void unsuspend_all_lwps (struct lwp_info
*except
);
271 static void mark_lwp_dead (struct lwp_info
*lwp
, int wstat
);
272 static int lwp_is_marked_dead (struct lwp_info
*lwp
);
273 static int finish_step_over (struct lwp_info
*lwp
);
274 static int kill_lwp (unsigned long lwpid
, int signo
);
275 static void enqueue_pending_signal (struct lwp_info
*lwp
, int signal
, siginfo_t
*info
);
276 static int linux_low_ptrace_options (int attached
);
277 static int check_ptrace_stopped_lwp_gone (struct lwp_info
*lp
);
279 /* When the event-loop is doing a step-over, this points at the thread
281 ptid_t step_over_bkpt
;
283 /* True if the low target can hardware single-step. */
286 can_hardware_single_step (void)
288 if (the_low_target
.supports_hardware_single_step
!= NULL
)
289 return the_low_target
.supports_hardware_single_step ();
295 linux_process_target::low_supports_breakpoints ()
301 linux_process_target::low_get_pc (regcache
*regcache
)
307 linux_process_target::low_set_pc (regcache
*regcache
, CORE_ADDR newpc
)
309 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
312 std::vector
<CORE_ADDR
>
313 linux_process_target::low_get_next_pcs (regcache
*regcache
)
315 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
320 linux_process_target::low_decr_pc_after_break ()
325 /* True if LWP is stopped in its stepping range. */
328 lwp_in_step_range (struct lwp_info
*lwp
)
330 CORE_ADDR pc
= lwp
->stop_pc
;
332 return (pc
>= lwp
->step_range_start
&& pc
< lwp
->step_range_end
);
335 struct pending_signals
339 struct pending_signals
*prev
;
342 /* The read/write ends of the pipe registered as waitable file in the
344 static int linux_event_pipe
[2] = { -1, -1 };
346 /* True if we're currently in async mode. */
347 #define target_is_async_p() (linux_event_pipe[0] != -1)
349 static void send_sigstop (struct lwp_info
*lwp
);
351 /* Return non-zero if HEADER is a 64-bit ELF file. */
354 elf_64_header_p (const Elf64_Ehdr
*header
, unsigned int *machine
)
356 if (header
->e_ident
[EI_MAG0
] == ELFMAG0
357 && header
->e_ident
[EI_MAG1
] == ELFMAG1
358 && header
->e_ident
[EI_MAG2
] == ELFMAG2
359 && header
->e_ident
[EI_MAG3
] == ELFMAG3
)
361 *machine
= header
->e_machine
;
362 return header
->e_ident
[EI_CLASS
] == ELFCLASS64
;
369 /* Return non-zero if FILE is a 64-bit ELF file,
370 zero if the file is not a 64-bit ELF file,
371 and -1 if the file is not accessible or doesn't exist. */
374 elf_64_file_p (const char *file
, unsigned int *machine
)
379 fd
= open (file
, O_RDONLY
);
383 if (read (fd
, &header
, sizeof (header
)) != sizeof (header
))
390 return elf_64_header_p (&header
, machine
);
393 /* Accepts an integer PID; Returns true if the executable PID is
394 running is a 64-bit ELF file.. */
397 linux_pid_exe_is_elf_64_file (int pid
, unsigned int *machine
)
401 sprintf (file
, "/proc/%d/exe", pid
);
402 return elf_64_file_p (file
, machine
);
406 linux_process_target::delete_lwp (lwp_info
*lwp
)
408 struct thread_info
*thr
= get_lwp_thread (lwp
);
411 debug_printf ("deleting %ld\n", lwpid_of (thr
));
415 low_delete_thread (lwp
->arch_private
);
421 linux_process_target::low_delete_thread (arch_lwp_info
*info
)
423 /* Default implementation should be overridden if architecture-specific
424 info is being used. */
425 gdb_assert (info
== nullptr);
429 linux_process_target::add_linux_process (int pid
, int attached
)
431 struct process_info
*proc
;
433 proc
= add_process (pid
, attached
);
434 proc
->priv
= XCNEW (struct process_info_private
);
436 proc
->priv
->arch_private
= low_new_process ();
442 linux_process_target::low_new_process ()
448 linux_process_target::low_delete_process (arch_process_info
*info
)
450 /* Default implementation must be overridden if architecture-specific
452 gdb_assert (info
== nullptr);
456 linux_process_target::low_new_fork (process_info
*parent
, process_info
*child
)
462 linux_process_target::arch_setup_thread (thread_info
*thread
)
464 struct thread_info
*saved_thread
;
466 saved_thread
= current_thread
;
467 current_thread
= thread
;
471 current_thread
= saved_thread
;
475 linux_process_target::handle_extended_wait (lwp_info
**orig_event_lwp
,
478 client_state
&cs
= get_client_state ();
479 struct lwp_info
*event_lwp
= *orig_event_lwp
;
480 int event
= linux_ptrace_get_extended_event (wstat
);
481 struct thread_info
*event_thr
= get_lwp_thread (event_lwp
);
482 struct lwp_info
*new_lwp
;
484 gdb_assert (event_lwp
->waitstatus
.kind
== TARGET_WAITKIND_IGNORE
);
486 /* All extended events we currently use are mid-syscall. Only
487 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
488 you have to be using PTRACE_SEIZE to get that. */
489 event_lwp
->syscall_state
= TARGET_WAITKIND_SYSCALL_ENTRY
;
491 if ((event
== PTRACE_EVENT_FORK
) || (event
== PTRACE_EVENT_VFORK
)
492 || (event
== PTRACE_EVENT_CLONE
))
495 unsigned long new_pid
;
498 /* Get the pid of the new lwp. */
499 ptrace (PTRACE_GETEVENTMSG
, lwpid_of (event_thr
), (PTRACE_TYPE_ARG3
) 0,
502 /* If we haven't already seen the new PID stop, wait for it now. */
503 if (!pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
505 /* The new child has a pending SIGSTOP. We can't affect it until it
506 hits the SIGSTOP, but we're already attached. */
508 ret
= my_waitpid (new_pid
, &status
, __WALL
);
511 perror_with_name ("waiting for new child");
512 else if (ret
!= new_pid
)
513 warning ("wait returned unexpected PID %d", ret
);
514 else if (!WIFSTOPPED (status
))
515 warning ("wait returned unexpected status 0x%x", status
);
518 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
)
520 struct process_info
*parent_proc
;
521 struct process_info
*child_proc
;
522 struct lwp_info
*child_lwp
;
523 struct thread_info
*child_thr
;
524 struct target_desc
*tdesc
;
526 ptid
= ptid_t (new_pid
, new_pid
, 0);
530 debug_printf ("HEW: Got fork event from LWP %ld, "
532 ptid_of (event_thr
).lwp (),
536 /* Add the new process to the tables and clone the breakpoint
537 lists of the parent. We need to do this even if the new process
538 will be detached, since we will need the process object and the
539 breakpoints to remove any breakpoints from memory when we
540 detach, and the client side will access registers. */
541 child_proc
= add_linux_process (new_pid
, 0);
542 gdb_assert (child_proc
!= NULL
);
543 child_lwp
= add_lwp (ptid
);
544 gdb_assert (child_lwp
!= NULL
);
545 child_lwp
->stopped
= 1;
546 child_lwp
->must_set_ptrace_flags
= 1;
547 child_lwp
->status_pending_p
= 0;
548 child_thr
= get_lwp_thread (child_lwp
);
549 child_thr
->last_resume_kind
= resume_stop
;
550 child_thr
->last_status
.kind
= TARGET_WAITKIND_STOPPED
;
552 /* If we're suspending all threads, leave this one suspended
553 too. If the fork/clone parent is stepping over a breakpoint,
554 all other threads have been suspended already. Leave the
555 child suspended too. */
556 if (stopping_threads
== STOPPING_AND_SUSPENDING_THREADS
557 || event_lwp
->bp_reinsert
!= 0)
560 debug_printf ("HEW: leaving child suspended\n");
561 child_lwp
->suspended
= 1;
564 parent_proc
= get_thread_process (event_thr
);
565 child_proc
->attached
= parent_proc
->attached
;
567 if (event_lwp
->bp_reinsert
!= 0
568 && supports_software_single_step ()
569 && event
== PTRACE_EVENT_VFORK
)
571 /* If we leave single-step breakpoints there, child will
572 hit it, so uninsert single-step breakpoints from parent
573 (and child). Once vfork child is done, reinsert
574 them back to parent. */
575 uninsert_single_step_breakpoints (event_thr
);
578 clone_all_breakpoints (child_thr
, event_thr
);
580 tdesc
= allocate_target_description ();
581 copy_target_description (tdesc
, parent_proc
->tdesc
);
582 child_proc
->tdesc
= tdesc
;
584 /* Clone arch-specific process data. */
585 low_new_fork (parent_proc
, child_proc
);
587 /* Save fork info in the parent thread. */
588 if (event
== PTRACE_EVENT_FORK
)
589 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_FORKED
;
590 else if (event
== PTRACE_EVENT_VFORK
)
591 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_VFORKED
;
593 event_lwp
->waitstatus
.value
.related_pid
= ptid
;
595 /* The status_pending field contains bits denoting the
596 extended event, so when the pending event is handled,
597 the handler will look at lwp->waitstatus. */
598 event_lwp
->status_pending_p
= 1;
599 event_lwp
->status_pending
= wstat
;
601 /* Link the threads until the parent event is passed on to
603 event_lwp
->fork_relative
= child_lwp
;
604 child_lwp
->fork_relative
= event_lwp
;
606 /* If the parent thread is doing step-over with single-step
607 breakpoints, the list of single-step breakpoints are cloned
608 from the parent's. Remove them from the child process.
609 In case of vfork, we'll reinsert them back once vforked
611 if (event_lwp
->bp_reinsert
!= 0
612 && supports_software_single_step ())
614 /* The child process is forked and stopped, so it is safe
615 to access its memory without stopping all other threads
616 from other processes. */
617 delete_single_step_breakpoints (child_thr
);
619 gdb_assert (has_single_step_breakpoints (event_thr
));
620 gdb_assert (!has_single_step_breakpoints (child_thr
));
623 /* Report the event. */
628 debug_printf ("HEW: Got clone event "
629 "from LWP %ld, new child is LWP %ld\n",
630 lwpid_of (event_thr
), new_pid
);
632 ptid
= ptid_t (pid_of (event_thr
), new_pid
, 0);
633 new_lwp
= add_lwp (ptid
);
635 /* Either we're going to immediately resume the new thread
636 or leave it stopped. resume_one_lwp is a nop if it
637 thinks the thread is currently running, so set this first
638 before calling resume_one_lwp. */
639 new_lwp
->stopped
= 1;
641 /* If we're suspending all threads, leave this one suspended
642 too. If the fork/clone parent is stepping over a breakpoint,
643 all other threads have been suspended already. Leave the
644 child suspended too. */
645 if (stopping_threads
== STOPPING_AND_SUSPENDING_THREADS
646 || event_lwp
->bp_reinsert
!= 0)
647 new_lwp
->suspended
= 1;
649 /* Normally we will get the pending SIGSTOP. But in some cases
650 we might get another signal delivered to the group first.
651 If we do get another signal, be sure not to lose it. */
652 if (WSTOPSIG (status
) != SIGSTOP
)
654 new_lwp
->stop_expected
= 1;
655 new_lwp
->status_pending_p
= 1;
656 new_lwp
->status_pending
= status
;
658 else if (cs
.report_thread_events
)
660 new_lwp
->waitstatus
.kind
= TARGET_WAITKIND_THREAD_CREATED
;
661 new_lwp
->status_pending_p
= 1;
662 new_lwp
->status_pending
= status
;
666 thread_db_notice_clone (event_thr
, ptid
);
669 /* Don't report the event. */
672 else if (event
== PTRACE_EVENT_VFORK_DONE
)
674 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_VFORK_DONE
;
676 if (event_lwp
->bp_reinsert
!= 0 && supports_software_single_step ())
678 reinsert_single_step_breakpoints (event_thr
);
680 gdb_assert (has_single_step_breakpoints (event_thr
));
683 /* Report the event. */
686 else if (event
== PTRACE_EVENT_EXEC
&& cs
.report_exec_events
)
688 struct process_info
*proc
;
689 std::vector
<int> syscalls_to_catch
;
695 debug_printf ("HEW: Got exec event from LWP %ld\n",
696 lwpid_of (event_thr
));
699 /* Get the event ptid. */
700 event_ptid
= ptid_of (event_thr
);
701 event_pid
= event_ptid
.pid ();
703 /* Save the syscall list from the execing process. */
704 proc
= get_thread_process (event_thr
);
705 syscalls_to_catch
= std::move (proc
->syscalls_to_catch
);
707 /* Delete the execing process and all its threads. */
709 current_thread
= NULL
;
711 /* Create a new process/lwp/thread. */
712 proc
= add_linux_process (event_pid
, 0);
713 event_lwp
= add_lwp (event_ptid
);
714 event_thr
= get_lwp_thread (event_lwp
);
715 gdb_assert (current_thread
== event_thr
);
716 arch_setup_thread (event_thr
);
718 /* Set the event status. */
719 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_EXECD
;
720 event_lwp
->waitstatus
.value
.execd_pathname
721 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr
)));
723 /* Mark the exec status as pending. */
724 event_lwp
->stopped
= 1;
725 event_lwp
->status_pending_p
= 1;
726 event_lwp
->status_pending
= wstat
;
727 event_thr
->last_resume_kind
= resume_continue
;
728 event_thr
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
730 /* Update syscall state in the new lwp, effectively mid-syscall too. */
731 event_lwp
->syscall_state
= TARGET_WAITKIND_SYSCALL_ENTRY
;
733 /* Restore the list to catch. Don't rely on the client, which is free
734 to avoid sending a new list when the architecture doesn't change.
735 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
736 proc
->syscalls_to_catch
= std::move (syscalls_to_catch
);
738 /* Report the event. */
739 *orig_event_lwp
= event_lwp
;
743 internal_error (__FILE__
, __LINE__
, _("unknown ptrace event %d"), event
);
747 linux_process_target::get_pc (lwp_info
*lwp
)
749 struct thread_info
*saved_thread
;
750 struct regcache
*regcache
;
753 if (!low_supports_breakpoints ())
756 saved_thread
= current_thread
;
757 current_thread
= get_lwp_thread (lwp
);
759 regcache
= get_thread_regcache (current_thread
, 1);
760 pc
= low_get_pc (regcache
);
763 debug_printf ("pc is 0x%lx\n", (long) pc
);
765 current_thread
= saved_thread
;
769 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
770 Fill *SYSNO with the syscall nr trapped. */
773 get_syscall_trapinfo (struct lwp_info
*lwp
, int *sysno
)
775 struct thread_info
*saved_thread
;
776 struct regcache
*regcache
;
778 if (the_low_target
.get_syscall_trapinfo
== NULL
)
780 /* If we cannot get the syscall trapinfo, report an unknown
781 system call number. */
782 *sysno
= UNKNOWN_SYSCALL
;
786 saved_thread
= current_thread
;
787 current_thread
= get_lwp_thread (lwp
);
789 regcache
= get_thread_regcache (current_thread
, 1);
790 (*the_low_target
.get_syscall_trapinfo
) (regcache
, sysno
);
793 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno
);
795 current_thread
= saved_thread
;
799 linux_process_target::save_stop_reason (lwp_info
*lwp
)
802 CORE_ADDR sw_breakpoint_pc
;
803 struct thread_info
*saved_thread
;
804 #if USE_SIGTRAP_SIGINFO
808 if (!low_supports_breakpoints ())
812 sw_breakpoint_pc
= pc
- low_decr_pc_after_break ();
814 /* breakpoint_at reads from the current thread. */
815 saved_thread
= current_thread
;
816 current_thread
= get_lwp_thread (lwp
);
818 #if USE_SIGTRAP_SIGINFO
819 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
820 (PTRACE_TYPE_ARG3
) 0, &siginfo
) == 0)
822 if (siginfo
.si_signo
== SIGTRAP
)
824 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo
.si_code
)
825 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo
.si_code
))
827 /* The si_code is ambiguous on this arch -- check debug
829 if (!check_stopped_by_watchpoint (lwp
))
830 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
832 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo
.si_code
))
834 /* If we determine the LWP stopped for a SW breakpoint,
835 trust it. Particularly don't check watchpoint
836 registers, because at least on s390, we'd find
837 stopped-by-watchpoint as long as there's a watchpoint
839 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
841 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo
.si_code
))
843 /* This can indicate either a hardware breakpoint or
844 hardware watchpoint. Check debug registers. */
845 if (!check_stopped_by_watchpoint (lwp
))
846 lwp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
848 else if (siginfo
.si_code
== TRAP_TRACE
)
850 /* We may have single stepped an instruction that
851 triggered a watchpoint. In that case, on some
852 architectures (such as x86), instead of TRAP_HWBKPT,
853 si_code indicates TRAP_TRACE, and we need to check
854 the debug registers separately. */
855 if (!check_stopped_by_watchpoint (lwp
))
856 lwp
->stop_reason
= TARGET_STOPPED_BY_SINGLE_STEP
;
861 /* We may have just stepped a breakpoint instruction. E.g., in
862 non-stop mode, GDB first tells the thread A to step a range, and
863 then the user inserts a breakpoint inside the range. In that
864 case we need to report the breakpoint PC. */
865 if ((!lwp
->stepping
|| lwp
->stop_pc
== sw_breakpoint_pc
)
866 && low_breakpoint_at (sw_breakpoint_pc
))
867 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
869 if (hardware_breakpoint_inserted_here (pc
))
870 lwp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
872 if (lwp
->stop_reason
== TARGET_STOPPED_BY_NO_REASON
)
873 check_stopped_by_watchpoint (lwp
);
876 if (lwp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
)
880 struct thread_info
*thr
= get_lwp_thread (lwp
);
882 debug_printf ("CSBB: %s stopped by software breakpoint\n",
883 target_pid_to_str (ptid_of (thr
)));
886 /* Back up the PC if necessary. */
887 if (pc
!= sw_breakpoint_pc
)
889 struct regcache
*regcache
890 = get_thread_regcache (current_thread
, 1);
891 low_set_pc (regcache
, sw_breakpoint_pc
);
894 /* Update this so we record the correct stop PC below. */
895 pc
= sw_breakpoint_pc
;
897 else if (lwp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
)
901 struct thread_info
*thr
= get_lwp_thread (lwp
);
903 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
904 target_pid_to_str (ptid_of (thr
)));
907 else if (lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
)
911 struct thread_info
*thr
= get_lwp_thread (lwp
);
913 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
914 target_pid_to_str (ptid_of (thr
)));
917 else if (lwp
->stop_reason
== TARGET_STOPPED_BY_SINGLE_STEP
)
921 struct thread_info
*thr
= get_lwp_thread (lwp
);
923 debug_printf ("CSBB: %s stopped by trace\n",
924 target_pid_to_str (ptid_of (thr
)));
929 current_thread
= saved_thread
;
934 linux_process_target::add_lwp (ptid_t ptid
)
936 struct lwp_info
*lwp
;
938 lwp
= XCNEW (struct lwp_info
);
940 lwp
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
942 lwp
->thread
= add_thread (ptid
, lwp
);
944 low_new_thread (lwp
);
950 linux_process_target::low_new_thread (lwp_info
*info
)
955 /* Callback to be used when calling fork_inferior, responsible for
956 actually initiating the tracing of the inferior. */
961 if (ptrace (PTRACE_TRACEME
, 0, (PTRACE_TYPE_ARG3
) 0,
962 (PTRACE_TYPE_ARG4
) 0) < 0)
963 trace_start_error_with_name ("ptrace");
965 if (setpgid (0, 0) < 0)
966 trace_start_error_with_name ("setpgid");
968 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
969 stdout to stderr so that inferior i/o doesn't corrupt the connection.
970 Also, redirect stdin to /dev/null. */
971 if (remote_connection_is_stdio ())
974 trace_start_error_with_name ("close");
975 if (open ("/dev/null", O_RDONLY
) < 0)
976 trace_start_error_with_name ("open");
978 trace_start_error_with_name ("dup2");
979 if (write (2, "stdin/stdout redirected\n",
980 sizeof ("stdin/stdout redirected\n") - 1) < 0)
982 /* Errors ignored. */;
987 /* Start an inferior process and returns its pid.
988 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
989 are its arguments. */
992 linux_process_target::create_inferior (const char *program
,
993 const std::vector
<char *> &program_args
)
995 client_state
&cs
= get_client_state ();
996 struct lwp_info
*new_lwp
;
1001 maybe_disable_address_space_randomization restore_personality
1002 (cs
.disable_randomization
);
1003 std::string str_program_args
= stringify_argv (program_args
);
1005 pid
= fork_inferior (program
,
1006 str_program_args
.c_str (),
1007 get_environ ()->envp (), linux_ptrace_fun
,
1008 NULL
, NULL
, NULL
, NULL
);
1011 add_linux_process (pid
, 0);
1013 ptid
= ptid_t (pid
, pid
, 0);
1014 new_lwp
= add_lwp (ptid
);
1015 new_lwp
->must_set_ptrace_flags
= 1;
1017 post_fork_inferior (pid
, program
);
1022 /* Implement the post_create_inferior target_ops method. */
1025 linux_process_target::post_create_inferior ()
1027 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
1031 if (lwp
->must_set_ptrace_flags
)
1033 struct process_info
*proc
= current_process ();
1034 int options
= linux_low_ptrace_options (proc
->attached
);
1036 linux_enable_event_reporting (lwpid_of (current_thread
), options
);
1037 lwp
->must_set_ptrace_flags
= 0;
1042 linux_process_target::attach_lwp (ptid_t ptid
)
1044 struct lwp_info
*new_lwp
;
1045 int lwpid
= ptid
.lwp ();
1047 if (ptrace (PTRACE_ATTACH
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0)
1051 new_lwp
= add_lwp (ptid
);
1053 /* We need to wait for SIGSTOP before being able to make the next
1054 ptrace call on this LWP. */
1055 new_lwp
->must_set_ptrace_flags
= 1;
1057 if (linux_proc_pid_is_stopped (lwpid
))
1060 debug_printf ("Attached to a stopped process\n");
1062 /* The process is definitely stopped. It is in a job control
1063 stop, unless the kernel predates the TASK_STOPPED /
1064 TASK_TRACED distinction, in which case it might be in a
1065 ptrace stop. Make sure it is in a ptrace stop; from there we
1066 can kill it, signal it, et cetera.
1068 First make sure there is a pending SIGSTOP. Since we are
1069 already attached, the process can not transition from stopped
1070 to running without a PTRACE_CONT; so we know this signal will
1071 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1072 probably already in the queue (unless this kernel is old
1073 enough to use TASK_STOPPED for ptrace stops); but since
1074 SIGSTOP is not an RT signal, it can only be queued once. */
1075 kill_lwp (lwpid
, SIGSTOP
);
1077 /* Finally, resume the stopped process. This will deliver the
1078 SIGSTOP (or a higher priority signal, just like normal
1079 PTRACE_ATTACH), which we'll catch later on. */
1080 ptrace (PTRACE_CONT
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
1083 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1084 brings it to a halt.
1086 There are several cases to consider here:
1088 1) gdbserver has already attached to the process and is being notified
1089 of a new thread that is being created.
1090 In this case we should ignore that SIGSTOP and resume the
1091 process. This is handled below by setting stop_expected = 1,
1092 and the fact that add_thread sets last_resume_kind ==
1095 2) This is the first thread (the process thread), and we're attaching
1096 to it via attach_inferior.
1097 In this case we want the process thread to stop.
1098 This is handled by having linux_attach set last_resume_kind ==
1099 resume_stop after we return.
1101 If the pid we are attaching to is also the tgid, we attach to and
1102 stop all the existing threads. Otherwise, we attach to pid and
1103 ignore any other threads in the same group as this pid.
1105 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1107 In this case we want the thread to stop.
1108 FIXME: This case is currently not properly handled.
1109 We should wait for the SIGSTOP but don't. Things work apparently
1110 because enough time passes between when we ptrace (ATTACH) and when
1111 gdb makes the next ptrace call on the thread.
1113 On the other hand, if we are currently trying to stop all threads, we
1114 should treat the new thread as if we had sent it a SIGSTOP. This works
1115 because we are guaranteed that the add_lwp call above added us to the
1116 end of the list, and so the new thread has not yet reached
1117 wait_for_sigstop (but will). */
1118 new_lwp
->stop_expected
= 1;
1123 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1124 already attached. Returns true if a new LWP is found, false
1128 attach_proc_task_lwp_callback (ptid_t ptid
)
1130 /* Is this a new thread? */
1131 if (find_thread_ptid (ptid
) == NULL
)
1133 int lwpid
= ptid
.lwp ();
1137 debug_printf ("Found new lwp %d\n", lwpid
);
1139 err
= the_linux_target
->attach_lwp (ptid
);
1141 /* Be quiet if we simply raced with the thread exiting. EPERM
1142 is returned if the thread's task still exists, and is marked
1143 as exited or zombie, as well as other conditions, so in that
1144 case, confirm the status in /proc/PID/status. */
1146 || (err
== EPERM
&& linux_proc_pid_is_gone (lwpid
)))
1150 debug_printf ("Cannot attach to lwp %d: "
1151 "thread is gone (%d: %s)\n",
1152 lwpid
, err
, safe_strerror (err
));
1158 = linux_ptrace_attach_fail_reason_string (ptid
, err
);
1160 warning (_("Cannot attach to lwp %d: %s"), lwpid
, reason
.c_str ());
1168 static void async_file_mark (void);
1170 /* Attach to PID. If PID is the tgid, attach to it and all
1174 linux_process_target::attach (unsigned long pid
)
1176 struct process_info
*proc
;
1177 struct thread_info
*initial_thread
;
1178 ptid_t ptid
= ptid_t (pid
, pid
, 0);
1181 proc
= add_linux_process (pid
, 1);
1183 /* Attach to PID. We will check for other threads
1185 err
= attach_lwp (ptid
);
1188 remove_process (proc
);
1190 std::string reason
= linux_ptrace_attach_fail_reason_string (ptid
, err
);
1191 error ("Cannot attach to process %ld: %s", pid
, reason
.c_str ());
1194 /* Don't ignore the initial SIGSTOP if we just attached to this
1195 process. It will be collected by wait shortly. */
1196 initial_thread
= find_thread_ptid (ptid_t (pid
, pid
, 0));
1197 initial_thread
->last_resume_kind
= resume_stop
;
1199 /* We must attach to every LWP. If /proc is mounted, use that to
1200 find them now. On the one hand, the inferior may be using raw
1201 clone instead of using pthreads. On the other hand, even if it
1202 is using pthreads, GDB may not be connected yet (thread_db needs
1203 to do symbol lookups, through qSymbol). Also, thread_db walks
1204 structures in the inferior's address space to find the list of
1205 threads/LWPs, and those structures may well be corrupted. Note
1206 that once thread_db is loaded, we'll still use it to list threads
1207 and associate pthread info with each LWP. */
1208 linux_proc_attach_tgid_threads (pid
, attach_proc_task_lwp_callback
);
1210 /* GDB will shortly read the xml target description for this
1211 process, to figure out the process' architecture. But the target
1212 description is only filled in when the first process/thread in
1213 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1214 that now, otherwise, if GDB is fast enough, it could read the
1215 target description _before_ that initial stop. */
1218 struct lwp_info
*lwp
;
1220 ptid_t pid_ptid
= ptid_t (pid
);
1222 lwpid
= wait_for_event_filtered (pid_ptid
, pid_ptid
, &wstat
, __WALL
);
1223 gdb_assert (lwpid
> 0);
1225 lwp
= find_lwp_pid (ptid_t (lwpid
));
1227 if (!WIFSTOPPED (wstat
) || WSTOPSIG (wstat
) != SIGSTOP
)
1229 lwp
->status_pending_p
= 1;
1230 lwp
->status_pending
= wstat
;
1233 initial_thread
->last_resume_kind
= resume_continue
;
1237 gdb_assert (proc
->tdesc
!= NULL
);
1244 last_thread_of_process_p (int pid
)
1246 bool seen_one
= false;
1248 thread_info
*thread
= find_thread (pid
, [&] (thread_info
*thr_arg
)
1252 /* This is the first thread of this process we see. */
1258 /* This is the second thread of this process we see. */
1263 return thread
== NULL
;
1269 linux_kill_one_lwp (struct lwp_info
*lwp
)
1271 struct thread_info
*thr
= get_lwp_thread (lwp
);
1272 int pid
= lwpid_of (thr
);
1274 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1275 there is no signal context, and ptrace(PTRACE_KILL) (or
1276 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1277 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1278 alternative is to kill with SIGKILL. We only need one SIGKILL
1279 per process, not one for each thread. But since we still support
1280 support debugging programs using raw clone without CLONE_THREAD,
1281 we send one for each thread. For years, we used PTRACE_KILL
1282 only, so we're being a bit paranoid about some old kernels where
1283 PTRACE_KILL might work better (dubious if there are any such, but
1284 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1285 second, and so we're fine everywhere. */
1288 kill_lwp (pid
, SIGKILL
);
1291 int save_errno
= errno
;
1293 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1294 target_pid_to_str (ptid_of (thr
)),
1295 save_errno
? safe_strerror (save_errno
) : "OK");
1299 ptrace (PTRACE_KILL
, pid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
1302 int save_errno
= errno
;
1304 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1305 target_pid_to_str (ptid_of (thr
)),
1306 save_errno
? safe_strerror (save_errno
) : "OK");
1310 /* Kill LWP and wait for it to die. */
1313 kill_wait_lwp (struct lwp_info
*lwp
)
1315 struct thread_info
*thr
= get_lwp_thread (lwp
);
1316 int pid
= ptid_of (thr
).pid ();
1317 int lwpid
= ptid_of (thr
).lwp ();
1322 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid
, pid
);
1326 linux_kill_one_lwp (lwp
);
1328 /* Make sure it died. Notes:
1330 - The loop is most likely unnecessary.
1332 - We don't use wait_for_event as that could delete lwps
1333 while we're iterating over them. We're not interested in
1334 any pending status at this point, only in making sure all
1335 wait status on the kernel side are collected until the
1338 - We don't use __WALL here as the __WALL emulation relies on
1339 SIGCHLD, and killing a stopped process doesn't generate
1340 one, nor an exit status.
1342 res
= my_waitpid (lwpid
, &wstat
, 0);
1343 if (res
== -1 && errno
== ECHILD
)
1344 res
= my_waitpid (lwpid
, &wstat
, __WCLONE
);
1345 } while (res
> 0 && WIFSTOPPED (wstat
));
1347 /* Even if it was stopped, the child may have already disappeared.
1348 E.g., if it was killed by SIGKILL. */
1349 if (res
< 0 && errno
!= ECHILD
)
1350 perror_with_name ("kill_wait_lwp");
1353 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1354 except the leader. */
1357 kill_one_lwp_callback (thread_info
*thread
, int pid
)
1359 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1361 /* We avoid killing the first thread here, because of a Linux kernel (at
1362 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1363 the children get a chance to be reaped, it will remain a zombie
1366 if (lwpid_of (thread
) == pid
)
1369 debug_printf ("lkop: is last of process %s\n",
1370 target_pid_to_str (thread
->id
));
1374 kill_wait_lwp (lwp
);
1378 linux_process_target::kill (process_info
*process
)
1380 int pid
= process
->pid
;
1382 /* If we're killing a running inferior, make sure it is stopped
1383 first, as PTRACE_KILL will not work otherwise. */
1384 stop_all_lwps (0, NULL
);
1386 for_each_thread (pid
, [&] (thread_info
*thread
)
1388 kill_one_lwp_callback (thread
, pid
);
1391 /* See the comment in linux_kill_one_lwp. We did not kill the first
1392 thread in the list, so do so now. */
1393 lwp_info
*lwp
= find_lwp_pid (ptid_t (pid
));
1398 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1402 kill_wait_lwp (lwp
);
1406 /* Since we presently can only stop all lwps of all processes, we
1407 need to unstop lwps of other processes. */
1408 unstop_all_lwps (0, NULL
);
1412 /* Get pending signal of THREAD, for detaching purposes. This is the
1413 signal the thread last stopped for, which we need to deliver to the
1414 thread when detaching, otherwise, it'd be suppressed/lost. */
1417 get_detach_signal (struct thread_info
*thread
)
1419 client_state
&cs
= get_client_state ();
1420 enum gdb_signal signo
= GDB_SIGNAL_0
;
1422 struct lwp_info
*lp
= get_thread_lwp (thread
);
1424 if (lp
->status_pending_p
)
1425 status
= lp
->status_pending
;
1428 /* If the thread had been suspended by gdbserver, and it stopped
1429 cleanly, then it'll have stopped with SIGSTOP. But we don't
1430 want to deliver that SIGSTOP. */
1431 if (thread
->last_status
.kind
!= TARGET_WAITKIND_STOPPED
1432 || thread
->last_status
.value
.sig
== GDB_SIGNAL_0
)
1435 /* Otherwise, we may need to deliver the signal we
1437 status
= lp
->last_status
;
1440 if (!WIFSTOPPED (status
))
1443 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1444 target_pid_to_str (ptid_of (thread
)));
1448 /* Extended wait statuses aren't real SIGTRAPs. */
1449 if (WSTOPSIG (status
) == SIGTRAP
&& linux_is_extended_waitstatus (status
))
1452 debug_printf ("GPS: lwp %s had stopped with extended "
1453 "status: no pending signal\n",
1454 target_pid_to_str (ptid_of (thread
)));
1458 signo
= gdb_signal_from_host (WSTOPSIG (status
));
1460 if (cs
.program_signals_p
&& !cs
.program_signals
[signo
])
1463 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1464 target_pid_to_str (ptid_of (thread
)),
1465 gdb_signal_to_string (signo
));
1468 else if (!cs
.program_signals_p
1469 /* If we have no way to know which signals GDB does not
1470 want to have passed to the program, assume
1471 SIGTRAP/SIGINT, which is GDB's default. */
1472 && (signo
== GDB_SIGNAL_TRAP
|| signo
== GDB_SIGNAL_INT
))
1475 debug_printf ("GPS: lwp %s had signal %s, "
1476 "but we don't know if we should pass it. "
1477 "Default to not.\n",
1478 target_pid_to_str (ptid_of (thread
)),
1479 gdb_signal_to_string (signo
));
1485 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1486 target_pid_to_str (ptid_of (thread
)),
1487 gdb_signal_to_string (signo
));
1489 return WSTOPSIG (status
);
1494 linux_process_target::detach_one_lwp (lwp_info
*lwp
)
1496 struct thread_info
*thread
= get_lwp_thread (lwp
);
1500 /* If there is a pending SIGSTOP, get rid of it. */
1501 if (lwp
->stop_expected
)
1504 debug_printf ("Sending SIGCONT to %s\n",
1505 target_pid_to_str (ptid_of (thread
)));
1507 kill_lwp (lwpid_of (thread
), SIGCONT
);
1508 lwp
->stop_expected
= 0;
1511 /* Pass on any pending signal for this thread. */
1512 sig
= get_detach_signal (thread
);
1514 /* Preparing to resume may try to write registers, and fail if the
1515 lwp is zombie. If that happens, ignore the error. We'll handle
1516 it below, when detach fails with ESRCH. */
1519 /* Flush any pending changes to the process's registers. */
1520 regcache_invalidate_thread (thread
);
1522 /* Finally, let it resume. */
1523 low_prepare_to_resume (lwp
);
1525 catch (const gdb_exception_error
&ex
)
1527 if (!check_ptrace_stopped_lwp_gone (lwp
))
1531 lwpid
= lwpid_of (thread
);
1532 if (ptrace (PTRACE_DETACH
, lwpid
, (PTRACE_TYPE_ARG3
) 0,
1533 (PTRACE_TYPE_ARG4
) (long) sig
) < 0)
1535 int save_errno
= errno
;
1537 /* We know the thread exists, so ESRCH must mean the lwp is
1538 zombie. This can happen if one of the already-detached
1539 threads exits the whole thread group. In that case we're
1540 still attached, and must reap the lwp. */
1541 if (save_errno
== ESRCH
)
1545 ret
= my_waitpid (lwpid
, &status
, __WALL
);
1548 warning (_("Couldn't reap LWP %d while detaching: %s"),
1549 lwpid
, safe_strerror (errno
));
1551 else if (!WIFEXITED (status
) && !WIFSIGNALED (status
))
1553 warning (_("Reaping LWP %d while detaching "
1554 "returned unexpected status 0x%x"),
1560 error (_("Can't detach %s: %s"),
1561 target_pid_to_str (ptid_of (thread
)),
1562 safe_strerror (save_errno
));
1565 else if (debug_threads
)
1567 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1568 target_pid_to_str (ptid_of (thread
)),
1576 linux_process_target::detach (process_info
*process
)
1578 struct lwp_info
*main_lwp
;
1580 /* As there's a step over already in progress, let it finish first,
1581 otherwise nesting a stabilize_threads operation on top gets real
1583 complete_ongoing_step_over ();
1585 /* Stop all threads before detaching. First, ptrace requires that
1586 the thread is stopped to successfully detach. Second, thread_db
1587 may need to uninstall thread event breakpoints from memory, which
1588 only works with a stopped process anyway. */
1589 stop_all_lwps (0, NULL
);
1591 #ifdef USE_THREAD_DB
1592 thread_db_detach (process
);
1595 /* Stabilize threads (move out of jump pads). */
1596 target_stabilize_threads ();
1598 /* Detach from the clone lwps first. If the thread group exits just
1599 while we're detaching, we must reap the clone lwps before we're
1600 able to reap the leader. */
1601 for_each_thread (process
->pid
, [this] (thread_info
*thread
)
1603 /* We don't actually detach from the thread group leader just yet.
1604 If the thread group exits, we must reap the zombie clone lwps
1605 before we're able to reap the leader. */
1606 if (thread
->id
.pid () == thread
->id
.lwp ())
1609 lwp_info
*lwp
= get_thread_lwp (thread
);
1610 detach_one_lwp (lwp
);
1613 main_lwp
= find_lwp_pid (ptid_t (process
->pid
));
1614 detach_one_lwp (main_lwp
);
1618 /* Since we presently can only stop all lwps of all processes, we
1619 need to unstop lwps of other processes. */
1620 unstop_all_lwps (0, NULL
);
1624 /* Remove all LWPs that belong to process PROC from the lwp list. */
1627 linux_process_target::mourn (process_info
*process
)
1629 struct process_info_private
*priv
;
1631 #ifdef USE_THREAD_DB
1632 thread_db_mourn (process
);
1635 for_each_thread (process
->pid
, [this] (thread_info
*thread
)
1637 delete_lwp (get_thread_lwp (thread
));
1640 /* Freeing all private data. */
1641 priv
= process
->priv
;
1642 low_delete_process (priv
->arch_private
);
1644 process
->priv
= NULL
;
1646 remove_process (process
);
1650 linux_process_target::join (int pid
)
1655 ret
= my_waitpid (pid
, &status
, 0);
1656 if (WIFEXITED (status
) || WIFSIGNALED (status
))
1658 } while (ret
!= -1 || errno
!= ECHILD
);
1661 /* Return true if the given thread is still alive. */
1664 linux_process_target::thread_alive (ptid_t ptid
)
1666 struct lwp_info
*lwp
= find_lwp_pid (ptid
);
1668 /* We assume we always know if a thread exits. If a whole process
1669 exited but we still haven't been able to report it to GDB, we'll
1670 hold on to the last lwp of the dead process. */
1672 return !lwp_is_marked_dead (lwp
);
1678 linux_process_target::thread_still_has_status_pending (thread_info
*thread
)
1680 struct lwp_info
*lp
= get_thread_lwp (thread
);
1682 if (!lp
->status_pending_p
)
1685 if (thread
->last_resume_kind
!= resume_stop
1686 && (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
1687 || lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
))
1689 struct thread_info
*saved_thread
;
1693 gdb_assert (lp
->last_status
!= 0);
1697 saved_thread
= current_thread
;
1698 current_thread
= thread
;
1700 if (pc
!= lp
->stop_pc
)
1703 debug_printf ("PC of %ld changed\n",
1708 #if !USE_SIGTRAP_SIGINFO
1709 else if (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
1710 && !low_breakpoint_at (pc
))
1713 debug_printf ("previous SW breakpoint of %ld gone\n",
1717 else if (lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
1718 && !hardware_breakpoint_inserted_here (pc
))
1721 debug_printf ("previous HW breakpoint of %ld gone\n",
1727 current_thread
= saved_thread
;
1732 debug_printf ("discarding pending breakpoint status\n");
1733 lp
->status_pending_p
= 0;
1741 /* Returns true if LWP is resumed from the client's perspective. */
1744 lwp_resumed (struct lwp_info
*lwp
)
1746 struct thread_info
*thread
= get_lwp_thread (lwp
);
1748 if (thread
->last_resume_kind
!= resume_stop
)
1751 /* Did gdb send us a `vCont;t', but we haven't reported the
1752 corresponding stop to gdb yet? If so, the thread is still
1753 resumed/running from gdb's perspective. */
1754 if (thread
->last_resume_kind
== resume_stop
1755 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
)
1762 linux_process_target::status_pending_p_callback (thread_info
*thread
,
1765 struct lwp_info
*lp
= get_thread_lwp (thread
);
1767 /* Check if we're only interested in events from a specific process
1768 or a specific LWP. */
1769 if (!thread
->id
.matches (ptid
))
1772 if (!lwp_resumed (lp
))
1775 if (lp
->status_pending_p
1776 && !thread_still_has_status_pending (thread
))
1778 resume_one_lwp (lp
, lp
->stepping
, GDB_SIGNAL_0
, NULL
);
1782 return lp
->status_pending_p
;
1786 find_lwp_pid (ptid_t ptid
)
1788 thread_info
*thread
= find_thread ([&] (thread_info
*thr_arg
)
1790 int lwp
= ptid
.lwp () != 0 ? ptid
.lwp () : ptid
.pid ();
1791 return thr_arg
->id
.lwp () == lwp
;
1797 return get_thread_lwp (thread
);
1800 /* Return the number of known LWPs in the tgid given by PID. */
1807 for_each_thread (pid
, [&] (thread_info
*thread
)
1815 /* See nat/linux-nat.h. */
1818 iterate_over_lwps (ptid_t filter
,
1819 gdb::function_view
<iterate_over_lwps_ftype
> callback
)
1821 thread_info
*thread
= find_thread (filter
, [&] (thread_info
*thr_arg
)
1823 lwp_info
*lwp
= get_thread_lwp (thr_arg
);
1825 return callback (lwp
);
1831 return get_thread_lwp (thread
);
1835 linux_process_target::check_zombie_leaders ()
1837 for_each_process ([this] (process_info
*proc
) {
1838 pid_t leader_pid
= pid_of (proc
);
1839 struct lwp_info
*leader_lp
;
1841 leader_lp
= find_lwp_pid (ptid_t (leader_pid
));
1844 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1845 "num_lwps=%d, zombie=%d\n",
1846 leader_pid
, leader_lp
!= NULL
, num_lwps (leader_pid
),
1847 linux_proc_pid_is_zombie (leader_pid
));
1849 if (leader_lp
!= NULL
&& !leader_lp
->stopped
1850 /* Check if there are other threads in the group, as we may
1851 have raced with the inferior simply exiting. */
1852 && !last_thread_of_process_p (leader_pid
)
1853 && linux_proc_pid_is_zombie (leader_pid
))
1855 /* A leader zombie can mean one of two things:
1857 - It exited, and there's an exit status pending
1858 available, or only the leader exited (not the whole
1859 program). In the latter case, we can't waitpid the
1860 leader's exit status until all other threads are gone.
1862 - There are 3 or more threads in the group, and a thread
1863 other than the leader exec'd. On an exec, the Linux
1864 kernel destroys all other threads (except the execing
1865 one) in the thread group, and resets the execing thread's
1866 tid to the tgid. No exit notification is sent for the
1867 execing thread -- from the ptracer's perspective, it
1868 appears as though the execing thread just vanishes.
1869 Until we reap all other threads except the leader and the
1870 execing thread, the leader will be zombie, and the
1871 execing thread will be in `D (disc sleep)'. As soon as
1872 all other threads are reaped, the execing thread changes
1873 it's tid to the tgid, and the previous (zombie) leader
1874 vanishes, giving place to the "new" leader. We could try
1875 distinguishing the exit and exec cases, by waiting once
1876 more, and seeing if something comes out, but it doesn't
1877 sound useful. The previous leader _does_ go away, and
1878 we'll re-add the new one once we see the exec event
1879 (which is just the same as what would happen if the
1880 previous leader did exit voluntarily before some other
1884 debug_printf ("CZL: Thread group leader %d zombie "
1885 "(it exited, or another thread execd).\n",
1888 delete_lwp (leader_lp
);
1893 /* Callback for `find_thread'. Returns the first LWP that is not
1897 not_stopped_callback (thread_info
*thread
, ptid_t filter
)
1899 if (!thread
->id
.matches (filter
))
1902 lwp_info
*lwp
= get_thread_lwp (thread
);
1904 return !lwp
->stopped
;
1907 /* Increment LWP's suspend count. */
1910 lwp_suspended_inc (struct lwp_info
*lwp
)
1914 if (debug_threads
&& lwp
->suspended
> 4)
1916 struct thread_info
*thread
= get_lwp_thread (lwp
);
1918 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1919 " suspended=%d\n", lwpid_of (thread
), lwp
->suspended
);
1923 /* Decrement LWP's suspend count. */
1926 lwp_suspended_decr (struct lwp_info
*lwp
)
1930 if (lwp
->suspended
< 0)
1932 struct thread_info
*thread
= get_lwp_thread (lwp
);
1934 internal_error (__FILE__
, __LINE__
,
1935 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread
),
1940 /* This function should only be called if the LWP got a SIGTRAP.
1942 Handle any tracepoint steps or hits. Return true if a tracepoint
1943 event was handled, 0 otherwise. */
1946 handle_tracepoints (struct lwp_info
*lwp
)
1948 struct thread_info
*tinfo
= get_lwp_thread (lwp
);
1949 int tpoint_related_event
= 0;
1951 gdb_assert (lwp
->suspended
== 0);
1953 /* If this tracepoint hit causes a tracing stop, we'll immediately
1954 uninsert tracepoints. To do this, we temporarily pause all
1955 threads, unpatch away, and then unpause threads. We need to make
1956 sure the unpausing doesn't resume LWP too. */
1957 lwp_suspended_inc (lwp
);
1959 /* And we need to be sure that any all-threads-stopping doesn't try
1960 to move threads out of the jump pads, as it could deadlock the
1961 inferior (LWP could be in the jump pad, maybe even holding the
1964 /* Do any necessary step collect actions. */
1965 tpoint_related_event
|= tracepoint_finished_step (tinfo
, lwp
->stop_pc
);
1967 tpoint_related_event
|= handle_tracepoint_bkpts (tinfo
, lwp
->stop_pc
);
1969 /* See if we just hit a tracepoint and do its main collect
1971 tpoint_related_event
|= tracepoint_was_hit (tinfo
, lwp
->stop_pc
);
1973 lwp_suspended_decr (lwp
);
1975 gdb_assert (lwp
->suspended
== 0);
1976 gdb_assert (!stabilizing_threads
1977 || (lwp
->collecting_fast_tracepoint
1978 != fast_tpoint_collect_result::not_collecting
));
1980 if (tpoint_related_event
)
1983 debug_printf ("got a tracepoint event\n");
1990 fast_tpoint_collect_result
1991 linux_process_target::linux_fast_tracepoint_collecting
1992 (lwp_info
*lwp
, fast_tpoint_collect_status
*status
)
1994 CORE_ADDR thread_area
;
1995 struct thread_info
*thread
= get_lwp_thread (lwp
);
1997 /* Get the thread area address. This is used to recognize which
1998 thread is which when tracing with the in-process agent library.
1999 We don't read anything from the address, and treat it as opaque;
2000 it's the address itself that we assume is unique per-thread. */
2001 if (low_get_thread_area (lwpid_of (thread
), &thread_area
) == -1)
2002 return fast_tpoint_collect_result::not_collecting
;
2004 return fast_tracepoint_collecting (thread_area
, lwp
->stop_pc
, status
);
2008 linux_process_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
2014 linux_process_target::maybe_move_out_of_jump_pad (lwp_info
*lwp
, int *wstat
)
2016 struct thread_info
*saved_thread
;
2018 saved_thread
= current_thread
;
2019 current_thread
= get_lwp_thread (lwp
);
2022 || (WIFSTOPPED (*wstat
) && WSTOPSIG (*wstat
) != SIGTRAP
))
2023 && supports_fast_tracepoints ()
2024 && agent_loaded_p ())
2026 struct fast_tpoint_collect_status status
;
2029 debug_printf ("Checking whether LWP %ld needs to move out of the "
2031 lwpid_of (current_thread
));
2033 fast_tpoint_collect_result r
2034 = linux_fast_tracepoint_collecting (lwp
, &status
);
2037 || (WSTOPSIG (*wstat
) != SIGILL
2038 && WSTOPSIG (*wstat
) != SIGFPE
2039 && WSTOPSIG (*wstat
) != SIGSEGV
2040 && WSTOPSIG (*wstat
) != SIGBUS
))
2042 lwp
->collecting_fast_tracepoint
= r
;
2044 if (r
!= fast_tpoint_collect_result::not_collecting
)
2046 if (r
== fast_tpoint_collect_result::before_insn
2047 && lwp
->exit_jump_pad_bkpt
== NULL
)
2049 /* Haven't executed the original instruction yet.
2050 Set breakpoint there, and wait till it's hit,
2051 then single-step until exiting the jump pad. */
2052 lwp
->exit_jump_pad_bkpt
2053 = set_breakpoint_at (status
.adjusted_insn_addr
, NULL
);
2057 debug_printf ("Checking whether LWP %ld needs to move out of "
2058 "the jump pad...it does\n",
2059 lwpid_of (current_thread
));
2060 current_thread
= saved_thread
;
2067 /* If we get a synchronous signal while collecting, *and*
2068 while executing the (relocated) original instruction,
2069 reset the PC to point at the tpoint address, before
2070 reporting to GDB. Otherwise, it's an IPA lib bug: just
2071 report the signal to GDB, and pray for the best. */
2073 lwp
->collecting_fast_tracepoint
2074 = fast_tpoint_collect_result::not_collecting
;
2076 if (r
!= fast_tpoint_collect_result::not_collecting
2077 && (status
.adjusted_insn_addr
<= lwp
->stop_pc
2078 && lwp
->stop_pc
< status
.adjusted_insn_addr_end
))
2081 struct regcache
*regcache
;
2083 /* The si_addr on a few signals references the address
2084 of the faulting instruction. Adjust that as
2086 if ((WSTOPSIG (*wstat
) == SIGILL
2087 || WSTOPSIG (*wstat
) == SIGFPE
2088 || WSTOPSIG (*wstat
) == SIGBUS
2089 || WSTOPSIG (*wstat
) == SIGSEGV
)
2090 && ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
2091 (PTRACE_TYPE_ARG3
) 0, &info
) == 0
2092 /* Final check just to make sure we don't clobber
2093 the siginfo of non-kernel-sent signals. */
2094 && (uintptr_t) info
.si_addr
== lwp
->stop_pc
)
2096 info
.si_addr
= (void *) (uintptr_t) status
.tpoint_addr
;
2097 ptrace (PTRACE_SETSIGINFO
, lwpid_of (current_thread
),
2098 (PTRACE_TYPE_ARG3
) 0, &info
);
2101 regcache
= get_thread_regcache (current_thread
, 1);
2102 low_set_pc (regcache
, status
.tpoint_addr
);
2103 lwp
->stop_pc
= status
.tpoint_addr
;
2105 /* Cancel any fast tracepoint lock this thread was
2107 force_unlock_trace_buffer ();
2110 if (lwp
->exit_jump_pad_bkpt
!= NULL
)
2113 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2114 "stopping all threads momentarily.\n");
2116 stop_all_lwps (1, lwp
);
2118 delete_breakpoint (lwp
->exit_jump_pad_bkpt
);
2119 lwp
->exit_jump_pad_bkpt
= NULL
;
2121 unstop_all_lwps (1, lwp
);
2123 gdb_assert (lwp
->suspended
>= 0);
2129 debug_printf ("Checking whether LWP %ld needs to move out of the "
2131 lwpid_of (current_thread
));
2133 current_thread
= saved_thread
;
2137 /* Enqueue one signal in the "signals to report later when out of the
2141 enqueue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
2143 struct pending_signals
*p_sig
;
2144 struct thread_info
*thread
= get_lwp_thread (lwp
);
2147 debug_printf ("Deferring signal %d for LWP %ld.\n",
2148 WSTOPSIG (*wstat
), lwpid_of (thread
));
2152 struct pending_signals
*sig
;
2154 for (sig
= lwp
->pending_signals_to_report
;
2157 debug_printf (" Already queued %d\n",
2160 debug_printf (" (no more currently queued signals)\n");
2163 /* Don't enqueue non-RT signals if they are already in the deferred
2164 queue. (SIGSTOP being the easiest signal to see ending up here
2166 if (WSTOPSIG (*wstat
) < __SIGRTMIN
)
2168 struct pending_signals
*sig
;
2170 for (sig
= lwp
->pending_signals_to_report
;
2174 if (sig
->signal
== WSTOPSIG (*wstat
))
2177 debug_printf ("Not requeuing already queued non-RT signal %d"
2186 p_sig
= XCNEW (struct pending_signals
);
2187 p_sig
->prev
= lwp
->pending_signals_to_report
;
2188 p_sig
->signal
= WSTOPSIG (*wstat
);
2190 ptrace (PTRACE_GETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
2193 lwp
->pending_signals_to_report
= p_sig
;
2196 /* Dequeue one signal from the "signals to report later when out of
2197 the jump pad" list. */
2200 dequeue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
2202 struct thread_info
*thread
= get_lwp_thread (lwp
);
2204 if (lwp
->pending_signals_to_report
!= NULL
)
2206 struct pending_signals
**p_sig
;
2208 p_sig
= &lwp
->pending_signals_to_report
;
2209 while ((*p_sig
)->prev
!= NULL
)
2210 p_sig
= &(*p_sig
)->prev
;
2212 *wstat
= W_STOPCODE ((*p_sig
)->signal
);
2213 if ((*p_sig
)->info
.si_signo
!= 0)
2214 ptrace (PTRACE_SETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
2220 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2221 WSTOPSIG (*wstat
), lwpid_of (thread
));
2225 struct pending_signals
*sig
;
2227 for (sig
= lwp
->pending_signals_to_report
;
2230 debug_printf (" Still queued %d\n",
2233 debug_printf (" (no more queued signals)\n");
2243 linux_process_target::check_stopped_by_watchpoint (lwp_info
*child
)
2245 struct thread_info
*saved_thread
= current_thread
;
2246 current_thread
= get_lwp_thread (child
);
2248 if (low_stopped_by_watchpoint ())
2250 child
->stop_reason
= TARGET_STOPPED_BY_WATCHPOINT
;
2251 child
->stopped_data_address
= low_stopped_data_address ();
2254 current_thread
= saved_thread
;
2256 return child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
2260 linux_process_target::low_stopped_by_watchpoint ()
2266 linux_process_target::low_stopped_data_address ()
2271 /* Return the ptrace options that we want to try to enable. */
2274 linux_low_ptrace_options (int attached
)
2276 client_state
&cs
= get_client_state ();
2280 options
|= PTRACE_O_EXITKILL
;
2282 if (cs
.report_fork_events
)
2283 options
|= PTRACE_O_TRACEFORK
;
2285 if (cs
.report_vfork_events
)
2286 options
|= (PTRACE_O_TRACEVFORK
| PTRACE_O_TRACEVFORKDONE
);
2288 if (cs
.report_exec_events
)
2289 options
|= PTRACE_O_TRACEEXEC
;
2291 options
|= PTRACE_O_TRACESYSGOOD
;
2297 linux_process_target::filter_event (int lwpid
, int wstat
)
2299 client_state
&cs
= get_client_state ();
2300 struct lwp_info
*child
;
2301 struct thread_info
*thread
;
2302 int have_stop_pc
= 0;
2304 child
= find_lwp_pid (ptid_t (lwpid
));
2306 /* Check for stop events reported by a process we didn't already
2307 know about - anything not already in our LWP list.
2309 If we're expecting to receive stopped processes after
2310 fork, vfork, and clone events, then we'll just add the
2311 new one to our list and go back to waiting for the event
2312 to be reported - the stopped process might be returned
2313 from waitpid before or after the event is.
2315 But note the case of a non-leader thread exec'ing after the
2316 leader having exited, and gone from our lists (because
2317 check_zombie_leaders deleted it). The non-leader thread
2318 changes its tid to the tgid. */
2320 if (WIFSTOPPED (wstat
) && child
== NULL
&& WSTOPSIG (wstat
) == SIGTRAP
2321 && linux_ptrace_get_extended_event (wstat
) == PTRACE_EVENT_EXEC
)
2325 /* A multi-thread exec after we had seen the leader exiting. */
2328 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2329 "after exec.\n", lwpid
);
2332 child_ptid
= ptid_t (lwpid
, lwpid
, 0);
2333 child
= add_lwp (child_ptid
);
2335 current_thread
= child
->thread
;
2338 /* If we didn't find a process, one of two things presumably happened:
2339 - A process we started and then detached from has exited. Ignore it.
2340 - A process we are controlling has forked and the new child's stop
2341 was reported to us by the kernel. Save its PID. */
2342 if (child
== NULL
&& WIFSTOPPED (wstat
))
2344 add_to_pid_list (&stopped_pids
, lwpid
, wstat
);
2347 else if (child
== NULL
)
2350 thread
= get_lwp_thread (child
);
2354 child
->last_status
= wstat
;
2356 /* Check if the thread has exited. */
2357 if ((WIFEXITED (wstat
) || WIFSIGNALED (wstat
)))
2360 debug_printf ("LLFE: %d exited.\n", lwpid
);
2362 if (finish_step_over (child
))
2364 /* Unsuspend all other LWPs, and set them back running again. */
2365 unsuspend_all_lwps (child
);
2368 /* If there is at least one more LWP, then the exit signal was
2369 not the end of the debugged application and should be
2370 ignored, unless GDB wants to hear about thread exits. */
2371 if (cs
.report_thread_events
2372 || last_thread_of_process_p (pid_of (thread
)))
2374 /* Since events are serialized to GDB core, and we can't
2375 report this one right now. Leave the status pending for
2376 the next time we're able to report it. */
2377 mark_lwp_dead (child
, wstat
);
2387 gdb_assert (WIFSTOPPED (wstat
));
2389 if (WIFSTOPPED (wstat
))
2391 struct process_info
*proc
;
2393 /* Architecture-specific setup after inferior is running. */
2394 proc
= find_process_pid (pid_of (thread
));
2395 if (proc
->tdesc
== NULL
)
2399 /* This needs to happen after we have attached to the
2400 inferior and it is stopped for the first time, but
2401 before we access any inferior registers. */
2402 arch_setup_thread (thread
);
2406 /* The process is started, but GDBserver will do
2407 architecture-specific setup after the program stops at
2408 the first instruction. */
2409 child
->status_pending_p
= 1;
2410 child
->status_pending
= wstat
;
2416 if (WIFSTOPPED (wstat
) && child
->must_set_ptrace_flags
)
2418 struct process_info
*proc
= find_process_pid (pid_of (thread
));
2419 int options
= linux_low_ptrace_options (proc
->attached
);
2421 linux_enable_event_reporting (lwpid
, options
);
2422 child
->must_set_ptrace_flags
= 0;
2425 /* Always update syscall_state, even if it will be filtered later. */
2426 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SYSCALL_SIGTRAP
)
2428 child
->syscall_state
2429 = (child
->syscall_state
== TARGET_WAITKIND_SYSCALL_ENTRY
2430 ? TARGET_WAITKIND_SYSCALL_RETURN
2431 : TARGET_WAITKIND_SYSCALL_ENTRY
);
2435 /* Almost all other ptrace-stops are known to be outside of system
2436 calls, with further exceptions in handle_extended_wait. */
2437 child
->syscall_state
= TARGET_WAITKIND_IGNORE
;
2440 /* Be careful to not overwrite stop_pc until save_stop_reason is
2442 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGTRAP
2443 && linux_is_extended_waitstatus (wstat
))
2445 child
->stop_pc
= get_pc (child
);
2446 if (handle_extended_wait (&child
, wstat
))
2448 /* The event has been handled, so just return without
2454 if (linux_wstatus_maybe_breakpoint (wstat
))
2456 if (save_stop_reason (child
))
2461 child
->stop_pc
= get_pc (child
);
2463 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGSTOP
2464 && child
->stop_expected
)
2467 debug_printf ("Expected stop.\n");
2468 child
->stop_expected
= 0;
2470 if (thread
->last_resume_kind
== resume_stop
)
2472 /* We want to report the stop to the core. Treat the
2473 SIGSTOP as a normal event. */
2475 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2476 target_pid_to_str (ptid_of (thread
)));
2478 else if (stopping_threads
!= NOT_STOPPING_THREADS
)
2480 /* Stopping threads. We don't want this SIGSTOP to end up
2483 debug_printf ("LLW: SIGSTOP caught for %s "
2484 "while stopping threads.\n",
2485 target_pid_to_str (ptid_of (thread
)));
2490 /* This is a delayed SIGSTOP. Filter out the event. */
2492 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2493 child
->stepping
? "step" : "continue",
2494 target_pid_to_str (ptid_of (thread
)));
2496 resume_one_lwp (child
, child
->stepping
, 0, NULL
);
2501 child
->status_pending_p
= 1;
2502 child
->status_pending
= wstat
;
2506 /* Return true if THREAD is doing hardware single step. */
2509 maybe_hw_step (struct thread_info
*thread
)
2511 if (can_hardware_single_step ())
2515 /* GDBserver must insert single-step breakpoint for software
2517 gdb_assert (has_single_step_breakpoints (thread
));
2523 linux_process_target::resume_stopped_resumed_lwps (thread_info
*thread
)
2525 struct lwp_info
*lp
= get_thread_lwp (thread
);
2529 && !lp
->status_pending_p
2530 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
)
2534 if (thread
->last_resume_kind
== resume_step
)
2535 step
= maybe_hw_step (thread
);
2538 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2539 target_pid_to_str (ptid_of (thread
)),
2540 paddress (lp
->stop_pc
),
2543 resume_one_lwp (lp
, step
, GDB_SIGNAL_0
, NULL
);
2548 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid
,
2550 int *wstatp
, int options
)
2552 struct thread_info
*event_thread
;
2553 struct lwp_info
*event_child
, *requested_child
;
2554 sigset_t block_mask
, prev_mask
;
2557 /* N.B. event_thread points to the thread_info struct that contains
2558 event_child. Keep them in sync. */
2559 event_thread
= NULL
;
2561 requested_child
= NULL
;
2563 /* Check for a lwp with a pending status. */
2565 if (filter_ptid
== minus_one_ptid
|| filter_ptid
.is_pid ())
2567 event_thread
= find_thread_in_random ([&] (thread_info
*thread
)
2569 return status_pending_p_callback (thread
, filter_ptid
);
2572 if (event_thread
!= NULL
)
2573 event_child
= get_thread_lwp (event_thread
);
2574 if (debug_threads
&& event_thread
)
2575 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread
));
2577 else if (filter_ptid
!= null_ptid
)
2579 requested_child
= find_lwp_pid (filter_ptid
);
2581 if (stopping_threads
== NOT_STOPPING_THREADS
2582 && requested_child
->status_pending_p
2583 && (requested_child
->collecting_fast_tracepoint
2584 != fast_tpoint_collect_result::not_collecting
))
2586 enqueue_one_deferred_signal (requested_child
,
2587 &requested_child
->status_pending
);
2588 requested_child
->status_pending_p
= 0;
2589 requested_child
->status_pending
= 0;
2590 resume_one_lwp (requested_child
, 0, 0, NULL
);
2593 if (requested_child
->suspended
2594 && requested_child
->status_pending_p
)
2596 internal_error (__FILE__
, __LINE__
,
2597 "requesting an event out of a"
2598 " suspended child?");
2601 if (requested_child
->status_pending_p
)
2603 event_child
= requested_child
;
2604 event_thread
= get_lwp_thread (event_child
);
2608 if (event_child
!= NULL
)
2611 debug_printf ("Got an event from pending child %ld (%04x)\n",
2612 lwpid_of (event_thread
), event_child
->status_pending
);
2613 *wstatp
= event_child
->status_pending
;
2614 event_child
->status_pending_p
= 0;
2615 event_child
->status_pending
= 0;
2616 current_thread
= event_thread
;
2617 return lwpid_of (event_thread
);
2620 /* But if we don't find a pending event, we'll have to wait.
2622 We only enter this loop if no process has a pending wait status.
2623 Thus any action taken in response to a wait status inside this
2624 loop is responding as soon as we detect the status, not after any
2627 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2628 all signals while here. */
2629 sigfillset (&block_mask
);
2630 gdb_sigmask (SIG_BLOCK
, &block_mask
, &prev_mask
);
2632 /* Always pull all events out of the kernel. We'll randomly select
2633 an event LWP out of all that have events, to prevent
2635 while (event_child
== NULL
)
2639 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2642 - If the thread group leader exits while other threads in the
2643 thread group still exist, waitpid(TGID, ...) hangs. That
2644 waitpid won't return an exit status until the other threads
2645 in the group are reaped.
2647 - When a non-leader thread execs, that thread just vanishes
2648 without reporting an exit (so we'd hang if we waited for it
2649 explicitly in that case). The exec event is reported to
2652 ret
= my_waitpid (-1, wstatp
, options
| WNOHANG
);
2655 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2656 ret
, errno
? safe_strerror (errno
) : "ERRNO-OK");
2662 debug_printf ("LLW: waitpid %ld received %s\n",
2663 (long) ret
, status_to_str (*wstatp
));
2666 /* Filter all events. IOW, leave all events pending. We'll
2667 randomly select an event LWP out of all that have events
2669 filter_event (ret
, *wstatp
);
2670 /* Retry until nothing comes out of waitpid. A single
2671 SIGCHLD can indicate more than one child stopped. */
2675 /* Now that we've pulled all events out of the kernel, resume
2676 LWPs that don't have an interesting event to report. */
2677 if (stopping_threads
== NOT_STOPPING_THREADS
)
2678 for_each_thread ([this] (thread_info
*thread
)
2680 resume_stopped_resumed_lwps (thread
);
2683 /* ... and find an LWP with a status to report to the core, if
2685 event_thread
= find_thread_in_random ([&] (thread_info
*thread
)
2687 return status_pending_p_callback (thread
, filter_ptid
);
2690 if (event_thread
!= NULL
)
2692 event_child
= get_thread_lwp (event_thread
);
2693 *wstatp
= event_child
->status_pending
;
2694 event_child
->status_pending_p
= 0;
2695 event_child
->status_pending
= 0;
2699 /* Check for zombie thread group leaders. Those can't be reaped
2700 until all other threads in the thread group are. */
2701 check_zombie_leaders ();
2703 auto not_stopped
= [&] (thread_info
*thread
)
2705 return not_stopped_callback (thread
, wait_ptid
);
2708 /* If there are no resumed children left in the set of LWPs we
2709 want to wait for, bail. We can't just block in
2710 waitpid/sigsuspend, because lwps might have been left stopped
2711 in trace-stop state, and we'd be stuck forever waiting for
2712 their status to change (which would only happen if we resumed
2713 them). Even if WNOHANG is set, this return code is preferred
2714 over 0 (below), as it is more detailed. */
2715 if (find_thread (not_stopped
) == NULL
)
2718 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2719 gdb_sigmask (SIG_SETMASK
, &prev_mask
, NULL
);
2723 /* No interesting event to report to the caller. */
2724 if ((options
& WNOHANG
))
2727 debug_printf ("WNOHANG set, no event found\n");
2729 gdb_sigmask (SIG_SETMASK
, &prev_mask
, NULL
);
2733 /* Block until we get an event reported with SIGCHLD. */
2735 debug_printf ("sigsuspend'ing\n");
2737 sigsuspend (&prev_mask
);
2738 gdb_sigmask (SIG_SETMASK
, &prev_mask
, NULL
);
2742 gdb_sigmask (SIG_SETMASK
, &prev_mask
, NULL
);
2744 current_thread
= event_thread
;
2746 return lwpid_of (event_thread
);
2750 linux_process_target::wait_for_event (ptid_t ptid
, int *wstatp
, int options
)
2752 return wait_for_event_filtered (ptid
, ptid
, wstatp
, options
);
2755 /* Select one LWP out of those that have events pending. */
2758 select_event_lwp (struct lwp_info
**orig_lp
)
2760 struct thread_info
*event_thread
= NULL
;
2762 /* In all-stop, give preference to the LWP that is being
2763 single-stepped. There will be at most one, and it's the LWP that
2764 the core is most interested in. If we didn't do this, then we'd
2765 have to handle pending step SIGTRAPs somehow in case the core
2766 later continues the previously-stepped thread, otherwise we'd
2767 report the pending SIGTRAP, and the core, not having stepped the
2768 thread, wouldn't understand what the trap was for, and therefore
2769 would report it to the user as a random signal. */
2772 event_thread
= find_thread ([] (thread_info
*thread
)
2774 lwp_info
*lp
= get_thread_lwp (thread
);
2776 return (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2777 && thread
->last_resume_kind
== resume_step
2778 && lp
->status_pending_p
);
2781 if (event_thread
!= NULL
)
2784 debug_printf ("SEL: Select single-step %s\n",
2785 target_pid_to_str (ptid_of (event_thread
)));
2788 if (event_thread
== NULL
)
2790 /* No single-stepping LWP. Select one at random, out of those
2791 which have had events. */
2793 event_thread
= find_thread_in_random ([&] (thread_info
*thread
)
2795 lwp_info
*lp
= get_thread_lwp (thread
);
2797 /* Only resumed LWPs that have an event pending. */
2798 return (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2799 && lp
->status_pending_p
);
2803 if (event_thread
!= NULL
)
2805 struct lwp_info
*event_lp
= get_thread_lwp (event_thread
);
2807 /* Switch the event LWP. */
2808 *orig_lp
= event_lp
;
2812 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2816 unsuspend_all_lwps (struct lwp_info
*except
)
2818 for_each_thread ([&] (thread_info
*thread
)
2820 lwp_info
*lwp
= get_thread_lwp (thread
);
2823 lwp_suspended_decr (lwp
);
2827 static bool lwp_running (thread_info
*thread
);
2829 /* Stabilize threads (move out of jump pads).
2831 If a thread is midway collecting a fast tracepoint, we need to
2832 finish the collection and move it out of the jump pad before
2833 reporting the signal.
2835 This avoids recursion while collecting (when a signal arrives
2836 midway, and the signal handler itself collects), which would trash
2837 the trace buffer. In case the user set a breakpoint in a signal
2838 handler, this avoids the backtrace showing the jump pad, etc..
2839 Most importantly, there are certain things we can't do safely if
2840 threads are stopped in a jump pad (or in its callee's). For
2843 - starting a new trace run. A thread still collecting the
2844 previous run, could trash the trace buffer when resumed. The trace
2845 buffer control structures would have been reset but the thread had
2846 no way to tell. The thread could even midway memcpy'ing to the
2847 buffer, which would mean that when resumed, it would clobber the
2848 trace buffer that had been set for a new run.
2850 - we can't rewrite/reuse the jump pads for new tracepoints
2851 safely. Say you do tstart while a thread is stopped midway while
2852 collecting. When the thread is later resumed, it finishes the
2853 collection, and returns to the jump pad, to execute the original
2854 instruction that was under the tracepoint jump at the time the
2855 older run had been started. If the jump pad had been rewritten
2856 since for something else in the new run, the thread would now
2857 execute the wrong / random instructions. */
2860 linux_process_target::stabilize_threads ()
2862 thread_info
*thread_stuck
= find_thread ([this] (thread_info
*thread
)
2864 return stuck_in_jump_pad (thread
);
2867 if (thread_stuck
!= NULL
)
2870 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2871 lwpid_of (thread_stuck
));
2875 thread_info
*saved_thread
= current_thread
;
2877 stabilizing_threads
= 1;
2880 for_each_thread ([this] (thread_info
*thread
)
2882 move_out_of_jump_pad (thread
);
2885 /* Loop until all are stopped out of the jump pads. */
2886 while (find_thread (lwp_running
) != NULL
)
2888 struct target_waitstatus ourstatus
;
2889 struct lwp_info
*lwp
;
2892 /* Note that we go through the full wait even loop. While
2893 moving threads out of jump pad, we need to be able to step
2894 over internal breakpoints and such. */
2895 wait_1 (minus_one_ptid
, &ourstatus
, 0);
2897 if (ourstatus
.kind
== TARGET_WAITKIND_STOPPED
)
2899 lwp
= get_thread_lwp (current_thread
);
2902 lwp_suspended_inc (lwp
);
2904 if (ourstatus
.value
.sig
!= GDB_SIGNAL_0
2905 || current_thread
->last_resume_kind
== resume_stop
)
2907 wstat
= W_STOPCODE (gdb_signal_to_host (ourstatus
.value
.sig
));
2908 enqueue_one_deferred_signal (lwp
, &wstat
);
2913 unsuspend_all_lwps (NULL
);
2915 stabilizing_threads
= 0;
2917 current_thread
= saved_thread
;
2921 thread_stuck
= find_thread ([this] (thread_info
*thread
)
2923 return stuck_in_jump_pad (thread
);
2926 if (thread_stuck
!= NULL
)
2927 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2928 lwpid_of (thread_stuck
));
2932 /* Convenience function that is called when the kernel reports an
2933 event that is not passed out to GDB. */
2936 ignore_event (struct target_waitstatus
*ourstatus
)
2938 /* If we got an event, there may still be others, as a single
2939 SIGCHLD can indicate more than one child stopped. This forces
2940 another target_wait call. */
2943 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2948 linux_process_target::filter_exit_event (lwp_info
*event_child
,
2949 target_waitstatus
*ourstatus
)
2951 client_state
&cs
= get_client_state ();
2952 struct thread_info
*thread
= get_lwp_thread (event_child
);
2953 ptid_t ptid
= ptid_of (thread
);
2955 if (!last_thread_of_process_p (pid_of (thread
)))
2957 if (cs
.report_thread_events
)
2958 ourstatus
->kind
= TARGET_WAITKIND_THREAD_EXITED
;
2960 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2962 delete_lwp (event_child
);
2967 /* Returns 1 if GDB is interested in any event_child syscalls. */
2970 gdb_catching_syscalls_p (struct lwp_info
*event_child
)
2972 struct thread_info
*thread
= get_lwp_thread (event_child
);
2973 struct process_info
*proc
= get_thread_process (thread
);
2975 return !proc
->syscalls_to_catch
.empty ();
2978 /* Returns 1 if GDB is interested in the event_child syscall.
2979 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
2982 gdb_catch_this_syscall_p (struct lwp_info
*event_child
)
2985 struct thread_info
*thread
= get_lwp_thread (event_child
);
2986 struct process_info
*proc
= get_thread_process (thread
);
2988 if (proc
->syscalls_to_catch
.empty ())
2991 if (proc
->syscalls_to_catch
[0] == ANY_SYSCALL
)
2994 get_syscall_trapinfo (event_child
, &sysno
);
2996 for (int iter
: proc
->syscalls_to_catch
)
3004 linux_process_target::wait_1 (ptid_t ptid
, target_waitstatus
*ourstatus
,
3007 client_state
&cs
= get_client_state ();
3009 struct lwp_info
*event_child
;
3012 int step_over_finished
;
3013 int bp_explains_trap
;
3014 int maybe_internal_trap
;
3023 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid
));
3026 /* Translate generic target options into linux options. */
3028 if (target_options
& TARGET_WNOHANG
)
3031 bp_explains_trap
= 0;
3034 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
3036 auto status_pending_p_any
= [&] (thread_info
*thread
)
3038 return status_pending_p_callback (thread
, minus_one_ptid
);
3041 auto not_stopped
= [&] (thread_info
*thread
)
3043 return not_stopped_callback (thread
, minus_one_ptid
);
3046 /* Find a resumed LWP, if any. */
3047 if (find_thread (status_pending_p_any
) != NULL
)
3049 else if (find_thread (not_stopped
) != NULL
)
3054 if (step_over_bkpt
== null_ptid
)
3055 pid
= wait_for_event (ptid
, &w
, options
);
3059 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3060 target_pid_to_str (step_over_bkpt
));
3061 pid
= wait_for_event (step_over_bkpt
, &w
, options
& ~WNOHANG
);
3064 if (pid
== 0 || (pid
== -1 && !any_resumed
))
3066 gdb_assert (target_options
& TARGET_WNOHANG
);
3070 debug_printf ("wait_1 ret = null_ptid, "
3071 "TARGET_WAITKIND_IGNORE\n");
3075 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
3082 debug_printf ("wait_1 ret = null_ptid, "
3083 "TARGET_WAITKIND_NO_RESUMED\n");
3087 ourstatus
->kind
= TARGET_WAITKIND_NO_RESUMED
;
3091 event_child
= get_thread_lwp (current_thread
);
3093 /* wait_for_event only returns an exit status for the last
3094 child of a process. Report it. */
3095 if (WIFEXITED (w
) || WIFSIGNALED (w
))
3099 ourstatus
->kind
= TARGET_WAITKIND_EXITED
;
3100 ourstatus
->value
.integer
= WEXITSTATUS (w
);
3104 debug_printf ("wait_1 ret = %s, exited with "
3106 target_pid_to_str (ptid_of (current_thread
)),
3113 ourstatus
->kind
= TARGET_WAITKIND_SIGNALLED
;
3114 ourstatus
->value
.sig
= gdb_signal_from_host (WTERMSIG (w
));
3118 debug_printf ("wait_1 ret = %s, terminated with "
3120 target_pid_to_str (ptid_of (current_thread
)),
3126 if (ourstatus
->kind
== TARGET_WAITKIND_EXITED
)
3127 return filter_exit_event (event_child
, ourstatus
);
3129 return ptid_of (current_thread
);
3132 /* If step-over executes a breakpoint instruction, in the case of a
3133 hardware single step it means a gdb/gdbserver breakpoint had been
3134 planted on top of a permanent breakpoint, in the case of a software
3135 single step it may just mean that gdbserver hit the reinsert breakpoint.
3136 The PC has been adjusted by save_stop_reason to point at
3137 the breakpoint address.
3138 So in the case of the hardware single step advance the PC manually
3139 past the breakpoint and in the case of software single step advance only
3140 if it's not the single_step_breakpoint we are hitting.
3141 This avoids that a program would keep trapping a permanent breakpoint
3143 if (step_over_bkpt
!= null_ptid
3144 && event_child
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
3145 && (event_child
->stepping
3146 || !single_step_breakpoint_inserted_here (event_child
->stop_pc
)))
3148 int increment_pc
= 0;
3149 int breakpoint_kind
= 0;
3150 CORE_ADDR stop_pc
= event_child
->stop_pc
;
3152 breakpoint_kind
= breakpoint_kind_from_current_state (&stop_pc
);
3153 sw_breakpoint_from_kind (breakpoint_kind
, &increment_pc
);
3157 debug_printf ("step-over for %s executed software breakpoint\n",
3158 target_pid_to_str (ptid_of (current_thread
)));
3161 if (increment_pc
!= 0)
3163 struct regcache
*regcache
3164 = get_thread_regcache (current_thread
, 1);
3166 event_child
->stop_pc
+= increment_pc
;
3167 low_set_pc (regcache
, event_child
->stop_pc
);
3169 if (!low_breakpoint_at (event_child
->stop_pc
))
3170 event_child
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
3174 /* If this event was not handled before, and is not a SIGTRAP, we
3175 report it. SIGILL and SIGSEGV are also treated as traps in case
3176 a breakpoint is inserted at the current PC. If this target does
3177 not support internal breakpoints at all, we also report the
3178 SIGTRAP without further processing; it's of no concern to us. */
3180 = (low_supports_breakpoints ()
3181 && (WSTOPSIG (w
) == SIGTRAP
3182 || ((WSTOPSIG (w
) == SIGILL
3183 || WSTOPSIG (w
) == SIGSEGV
)
3184 && low_breakpoint_at (event_child
->stop_pc
))));
3186 if (maybe_internal_trap
)
3188 /* Handle anything that requires bookkeeping before deciding to
3189 report the event or continue waiting. */
3191 /* First check if we can explain the SIGTRAP with an internal
3192 breakpoint, or if we should possibly report the event to GDB.
3193 Do this before anything that may remove or insert a
3195 bp_explains_trap
= breakpoint_inserted_here (event_child
->stop_pc
);
3197 /* We have a SIGTRAP, possibly a step-over dance has just
3198 finished. If so, tweak the state machine accordingly,
3199 reinsert breakpoints and delete any single-step
3201 step_over_finished
= finish_step_over (event_child
);
3203 /* Now invoke the callbacks of any internal breakpoints there. */
3204 check_breakpoints (event_child
->stop_pc
);
3206 /* Handle tracepoint data collecting. This may overflow the
3207 trace buffer, and cause a tracing stop, removing
3209 trace_event
= handle_tracepoints (event_child
);
3211 if (bp_explains_trap
)
3214 debug_printf ("Hit a gdbserver breakpoint.\n");
3219 /* We have some other signal, possibly a step-over dance was in
3220 progress, and it should be cancelled too. */
3221 step_over_finished
= finish_step_over (event_child
);
3224 /* We have all the data we need. Either report the event to GDB, or
3225 resume threads and keep waiting for more. */
3227 /* If we're collecting a fast tracepoint, finish the collection and
3228 move out of the jump pad before delivering a signal. See
3229 linux_stabilize_threads. */
3232 && WSTOPSIG (w
) != SIGTRAP
3233 && supports_fast_tracepoints ()
3234 && agent_loaded_p ())
3237 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3238 "to defer or adjust it.\n",
3239 WSTOPSIG (w
), lwpid_of (current_thread
));
3241 /* Allow debugging the jump pad itself. */
3242 if (current_thread
->last_resume_kind
!= resume_step
3243 && maybe_move_out_of_jump_pad (event_child
, &w
))
3245 enqueue_one_deferred_signal (event_child
, &w
);
3248 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3249 WSTOPSIG (w
), lwpid_of (current_thread
));
3251 resume_one_lwp (event_child
, 0, 0, NULL
);
3255 return ignore_event (ourstatus
);
3259 if (event_child
->collecting_fast_tracepoint
3260 != fast_tpoint_collect_result::not_collecting
)
3263 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3264 "Check if we're already there.\n",
3265 lwpid_of (current_thread
),
3266 (int) event_child
->collecting_fast_tracepoint
);
3270 event_child
->collecting_fast_tracepoint
3271 = linux_fast_tracepoint_collecting (event_child
, NULL
);
3273 if (event_child
->collecting_fast_tracepoint
3274 != fast_tpoint_collect_result::before_insn
)
3276 /* No longer need this breakpoint. */
3277 if (event_child
->exit_jump_pad_bkpt
!= NULL
)
3280 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3281 "stopping all threads momentarily.\n");
3283 /* Other running threads could hit this breakpoint.
3284 We don't handle moribund locations like GDB does,
3285 instead we always pause all threads when removing
3286 breakpoints, so that any step-over or
3287 decr_pc_after_break adjustment is always taken
3288 care of while the breakpoint is still
3290 stop_all_lwps (1, event_child
);
3292 delete_breakpoint (event_child
->exit_jump_pad_bkpt
);
3293 event_child
->exit_jump_pad_bkpt
= NULL
;
3295 unstop_all_lwps (1, event_child
);
3297 gdb_assert (event_child
->suspended
>= 0);
3301 if (event_child
->collecting_fast_tracepoint
3302 == fast_tpoint_collect_result::not_collecting
)
3305 debug_printf ("fast tracepoint finished "
3306 "collecting successfully.\n");
3308 /* We may have a deferred signal to report. */
3309 if (dequeue_one_deferred_signal (event_child
, &w
))
3312 debug_printf ("dequeued one signal.\n");
3317 debug_printf ("no deferred signals.\n");
3319 if (stabilizing_threads
)
3321 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
3322 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
3326 debug_printf ("wait_1 ret = %s, stopped "
3327 "while stabilizing threads\n",
3328 target_pid_to_str (ptid_of (current_thread
)));
3332 return ptid_of (current_thread
);
3338 /* Check whether GDB would be interested in this event. */
3340 /* Check if GDB is interested in this syscall. */
3342 && WSTOPSIG (w
) == SYSCALL_SIGTRAP
3343 && !gdb_catch_this_syscall_p (event_child
))
3347 debug_printf ("Ignored syscall for LWP %ld.\n",
3348 lwpid_of (current_thread
));
3351 resume_one_lwp (event_child
, event_child
->stepping
, 0, NULL
);
3355 return ignore_event (ourstatus
);
3358 /* If GDB is not interested in this signal, don't stop other
3359 threads, and don't report it to GDB. Just resume the inferior
3360 right away. We do this for threading-related signals as well as
3361 any that GDB specifically requested we ignore. But never ignore
3362 SIGSTOP if we sent it ourselves, and do not ignore signals when
3363 stepping - they may require special handling to skip the signal
3364 handler. Also never ignore signals that could be caused by a
3367 && current_thread
->last_resume_kind
!= resume_step
3369 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3370 (current_process ()->priv
->thread_db
!= NULL
3371 && (WSTOPSIG (w
) == __SIGRTMIN
3372 || WSTOPSIG (w
) == __SIGRTMIN
+ 1))
3375 (cs
.pass_signals
[gdb_signal_from_host (WSTOPSIG (w
))]
3376 && !(WSTOPSIG (w
) == SIGSTOP
3377 && current_thread
->last_resume_kind
== resume_stop
)
3378 && !linux_wstatus_maybe_breakpoint (w
))))
3380 siginfo_t info
, *info_p
;
3383 debug_printf ("Ignored signal %d for LWP %ld.\n",
3384 WSTOPSIG (w
), lwpid_of (current_thread
));
3386 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
3387 (PTRACE_TYPE_ARG3
) 0, &info
) == 0)
3392 if (step_over_finished
)
3394 /* We cancelled this thread's step-over above. We still
3395 need to unsuspend all other LWPs, and set them back
3396 running again while the signal handler runs. */
3397 unsuspend_all_lwps (event_child
);
3399 /* Enqueue the pending signal info so that proceed_all_lwps
3401 enqueue_pending_signal (event_child
, WSTOPSIG (w
), info_p
);
3403 proceed_all_lwps ();
3407 resume_one_lwp (event_child
, event_child
->stepping
,
3408 WSTOPSIG (w
), info_p
);
3414 return ignore_event (ourstatus
);
3417 /* Note that all addresses are always "out of the step range" when
3418 there's no range to begin with. */
3419 in_step_range
= lwp_in_step_range (event_child
);
3421 /* If GDB wanted this thread to single step, and the thread is out
3422 of the step range, we always want to report the SIGTRAP, and let
3423 GDB handle it. Watchpoints should always be reported. So should
3424 signals we can't explain. A SIGTRAP we can't explain could be a
3425 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3426 do, we're be able to handle GDB breakpoints on top of internal
3427 breakpoints, by handling the internal breakpoint and still
3428 reporting the event to GDB. If we don't, we're out of luck, GDB
3429 won't see the breakpoint hit. If we see a single-step event but
3430 the thread should be continuing, don't pass the trap to gdb.
3431 That indicates that we had previously finished a single-step but
3432 left the single-step pending -- see
3433 complete_ongoing_step_over. */
3434 report_to_gdb
= (!maybe_internal_trap
3435 || (current_thread
->last_resume_kind
== resume_step
3437 || event_child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
3439 && !bp_explains_trap
3441 && !step_over_finished
3442 && !(current_thread
->last_resume_kind
== resume_continue
3443 && event_child
->stop_reason
== TARGET_STOPPED_BY_SINGLE_STEP
))
3444 || (gdb_breakpoint_here (event_child
->stop_pc
)
3445 && gdb_condition_true_at_breakpoint (event_child
->stop_pc
)
3446 && gdb_no_commands_at_breakpoint (event_child
->stop_pc
))
3447 || event_child
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
);
3449 run_breakpoint_commands (event_child
->stop_pc
);
3451 /* We found no reason GDB would want us to stop. We either hit one
3452 of our own breakpoints, or finished an internal step GDB
3453 shouldn't know about. */
3458 if (bp_explains_trap
)
3459 debug_printf ("Hit a gdbserver breakpoint.\n");
3460 if (step_over_finished
)
3461 debug_printf ("Step-over finished.\n");
3463 debug_printf ("Tracepoint event.\n");
3464 if (lwp_in_step_range (event_child
))
3465 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3466 paddress (event_child
->stop_pc
),
3467 paddress (event_child
->step_range_start
),
3468 paddress (event_child
->step_range_end
));
3471 /* We're not reporting this breakpoint to GDB, so apply the
3472 decr_pc_after_break adjustment to the inferior's regcache
3475 if (low_supports_breakpoints ())
3477 struct regcache
*regcache
3478 = get_thread_regcache (current_thread
, 1);
3479 low_set_pc (regcache
, event_child
->stop_pc
);
3482 if (step_over_finished
)
3484 /* If we have finished stepping over a breakpoint, we've
3485 stopped and suspended all LWPs momentarily except the
3486 stepping one. This is where we resume them all again.
3487 We're going to keep waiting, so use proceed, which
3488 handles stepping over the next breakpoint. */
3489 unsuspend_all_lwps (event_child
);
3493 /* Remove the single-step breakpoints if any. Note that
3494 there isn't single-step breakpoint if we finished stepping
3496 if (supports_software_single_step ()
3497 && has_single_step_breakpoints (current_thread
))
3499 stop_all_lwps (0, event_child
);
3500 delete_single_step_breakpoints (current_thread
);
3501 unstop_all_lwps (0, event_child
);
3506 debug_printf ("proceeding all threads.\n");
3507 proceed_all_lwps ();
3512 return ignore_event (ourstatus
);
3517 if (event_child
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
3520 = target_waitstatus_to_string (&event_child
->waitstatus
);
3522 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3523 lwpid_of (get_lwp_thread (event_child
)), str
.c_str ());
3525 if (current_thread
->last_resume_kind
== resume_step
)
3527 if (event_child
->step_range_start
== event_child
->step_range_end
)
3528 debug_printf ("GDB wanted to single-step, reporting event.\n");
3529 else if (!lwp_in_step_range (event_child
))
3530 debug_printf ("Out of step range, reporting event.\n");
3532 if (event_child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
)
3533 debug_printf ("Stopped by watchpoint.\n");
3534 else if (gdb_breakpoint_here (event_child
->stop_pc
))
3535 debug_printf ("Stopped by GDB breakpoint.\n");
3537 debug_printf ("Hit a non-gdbserver trap event.\n");
3540 /* Alright, we're going to report a stop. */
3542 /* Remove single-step breakpoints. */
3543 if (supports_software_single_step ())
3545 /* Remove single-step breakpoints or not. It it is true, stop all
3546 lwps, so that other threads won't hit the breakpoint in the
3548 int remove_single_step_breakpoints_p
= 0;
3552 remove_single_step_breakpoints_p
3553 = has_single_step_breakpoints (current_thread
);
3557 /* In all-stop, a stop reply cancels all previous resume
3558 requests. Delete all single-step breakpoints. */
3560 find_thread ([&] (thread_info
*thread
) {
3561 if (has_single_step_breakpoints (thread
))
3563 remove_single_step_breakpoints_p
= 1;
3571 if (remove_single_step_breakpoints_p
)
3573 /* If we remove single-step breakpoints from memory, stop all lwps,
3574 so that other threads won't hit the breakpoint in the staled
3576 stop_all_lwps (0, event_child
);
3580 gdb_assert (has_single_step_breakpoints (current_thread
));
3581 delete_single_step_breakpoints (current_thread
);
3585 for_each_thread ([] (thread_info
*thread
){
3586 if (has_single_step_breakpoints (thread
))
3587 delete_single_step_breakpoints (thread
);
3591 unstop_all_lwps (0, event_child
);
3595 if (!stabilizing_threads
)
3597 /* In all-stop, stop all threads. */
3599 stop_all_lwps (0, NULL
);
3601 if (step_over_finished
)
3605 /* If we were doing a step-over, all other threads but
3606 the stepping one had been paused in start_step_over,
3607 with their suspend counts incremented. We don't want
3608 to do a full unstop/unpause, because we're in
3609 all-stop mode (so we want threads stopped), but we
3610 still need to unsuspend the other threads, to
3611 decrement their `suspended' count back. */
3612 unsuspend_all_lwps (event_child
);
3616 /* If we just finished a step-over, then all threads had
3617 been momentarily paused. In all-stop, that's fine,
3618 we want threads stopped by now anyway. In non-stop,
3619 we need to re-resume threads that GDB wanted to be
3621 unstop_all_lwps (1, event_child
);
3625 /* If we're not waiting for a specific LWP, choose an event LWP
3626 from among those that have had events. Giving equal priority
3627 to all LWPs that have had events helps prevent
3629 if (ptid
== minus_one_ptid
)
3631 event_child
->status_pending_p
= 1;
3632 event_child
->status_pending
= w
;
3634 select_event_lwp (&event_child
);
3636 /* current_thread and event_child must stay in sync. */
3637 current_thread
= get_lwp_thread (event_child
);
3639 event_child
->status_pending_p
= 0;
3640 w
= event_child
->status_pending
;
3644 /* Stabilize threads (move out of jump pads). */
3646 target_stabilize_threads ();
3650 /* If we just finished a step-over, then all threads had been
3651 momentarily paused. In all-stop, that's fine, we want
3652 threads stopped by now anyway. In non-stop, we need to
3653 re-resume threads that GDB wanted to be running. */
3654 if (step_over_finished
)
3655 unstop_all_lwps (1, event_child
);
3658 if (event_child
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
3660 /* If the reported event is an exit, fork, vfork or exec, let
3663 /* Break the unreported fork relationship chain. */
3664 if (event_child
->waitstatus
.kind
== TARGET_WAITKIND_FORKED
3665 || event_child
->waitstatus
.kind
== TARGET_WAITKIND_VFORKED
)
3667 event_child
->fork_relative
->fork_relative
= NULL
;
3668 event_child
->fork_relative
= NULL
;
3671 *ourstatus
= event_child
->waitstatus
;
3672 /* Clear the event lwp's waitstatus since we handled it already. */
3673 event_child
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
3676 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
3678 /* Now that we've selected our final event LWP, un-adjust its PC if
3679 it was a software breakpoint, and the client doesn't know we can
3680 adjust the breakpoint ourselves. */
3681 if (event_child
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
3682 && !cs
.swbreak_feature
)
3684 int decr_pc
= low_decr_pc_after_break ();
3688 struct regcache
*regcache
3689 = get_thread_regcache (current_thread
, 1);
3690 low_set_pc (regcache
, event_child
->stop_pc
+ decr_pc
);
3694 if (WSTOPSIG (w
) == SYSCALL_SIGTRAP
)
3696 get_syscall_trapinfo (event_child
,
3697 &ourstatus
->value
.syscall_number
);
3698 ourstatus
->kind
= event_child
->syscall_state
;
3700 else if (current_thread
->last_resume_kind
== resume_stop
3701 && WSTOPSIG (w
) == SIGSTOP
)
3703 /* A thread that has been requested to stop by GDB with vCont;t,
3704 and it stopped cleanly, so report as SIG0. The use of
3705 SIGSTOP is an implementation detail. */
3706 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
3708 else if (current_thread
->last_resume_kind
== resume_stop
3709 && WSTOPSIG (w
) != SIGSTOP
)
3711 /* A thread that has been requested to stop by GDB with vCont;t,
3712 but, it stopped for other reasons. */
3713 ourstatus
->value
.sig
= gdb_signal_from_host (WSTOPSIG (w
));
3715 else if (ourstatus
->kind
== TARGET_WAITKIND_STOPPED
)
3717 ourstatus
->value
.sig
= gdb_signal_from_host (WSTOPSIG (w
));
3720 gdb_assert (step_over_bkpt
== null_ptid
);
3724 debug_printf ("wait_1 ret = %s, %d, %d\n",
3725 target_pid_to_str (ptid_of (current_thread
)),
3726 ourstatus
->kind
, ourstatus
->value
.sig
);
3730 if (ourstatus
->kind
== TARGET_WAITKIND_EXITED
)
3731 return filter_exit_event (event_child
, ourstatus
);
3733 return ptid_of (current_thread
);
3736 /* Get rid of any pending event in the pipe. */
3738 async_file_flush (void)
3744 ret
= read (linux_event_pipe
[0], &buf
, 1);
3745 while (ret
>= 0 || (ret
== -1 && errno
== EINTR
));
3748 /* Put something in the pipe, so the event loop wakes up. */
3750 async_file_mark (void)
3754 async_file_flush ();
3757 ret
= write (linux_event_pipe
[1], "+", 1);
3758 while (ret
== 0 || (ret
== -1 && errno
== EINTR
));
3760 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3761 be awakened anyway. */
3765 linux_process_target::wait (ptid_t ptid
,
3766 target_waitstatus
*ourstatus
,
3771 /* Flush the async file first. */
3772 if (target_is_async_p ())
3773 async_file_flush ();
3777 event_ptid
= wait_1 (ptid
, ourstatus
, target_options
);
3779 while ((target_options
& TARGET_WNOHANG
) == 0
3780 && event_ptid
== null_ptid
3781 && ourstatus
->kind
== TARGET_WAITKIND_IGNORE
);
3783 /* If at least one stop was reported, there may be more. A single
3784 SIGCHLD can signal more than one child stop. */
3785 if (target_is_async_p ()
3786 && (target_options
& TARGET_WNOHANG
) != 0
3787 && event_ptid
!= null_ptid
)
3793 /* Send a signal to an LWP. */
3796 kill_lwp (unsigned long lwpid
, int signo
)
3801 ret
= syscall (__NR_tkill
, lwpid
, signo
);
3802 if (errno
== ENOSYS
)
3804 /* If tkill fails, then we are not using nptl threads, a
3805 configuration we no longer support. */
3806 perror_with_name (("tkill"));
3812 linux_stop_lwp (struct lwp_info
*lwp
)
3818 send_sigstop (struct lwp_info
*lwp
)
3822 pid
= lwpid_of (get_lwp_thread (lwp
));
3824 /* If we already have a pending stop signal for this process, don't
3826 if (lwp
->stop_expected
)
3829 debug_printf ("Have pending sigstop for lwp %d\n", pid
);
3835 debug_printf ("Sending sigstop to lwp %d\n", pid
);
3837 lwp
->stop_expected
= 1;
3838 kill_lwp (pid
, SIGSTOP
);
3842 send_sigstop (thread_info
*thread
, lwp_info
*except
)
3844 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3846 /* Ignore EXCEPT. */
3856 /* Increment the suspend count of an LWP, and stop it, if not stopped
3859 suspend_and_send_sigstop (thread_info
*thread
, lwp_info
*except
)
3861 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3863 /* Ignore EXCEPT. */
3867 lwp_suspended_inc (lwp
);
3869 send_sigstop (thread
, except
);
3873 mark_lwp_dead (struct lwp_info
*lwp
, int wstat
)
3875 /* Store the exit status for later. */
3876 lwp
->status_pending_p
= 1;
3877 lwp
->status_pending
= wstat
;
3879 /* Store in waitstatus as well, as there's nothing else to process
3881 if (WIFEXITED (wstat
))
3883 lwp
->waitstatus
.kind
= TARGET_WAITKIND_EXITED
;
3884 lwp
->waitstatus
.value
.integer
= WEXITSTATUS (wstat
);
3886 else if (WIFSIGNALED (wstat
))
3888 lwp
->waitstatus
.kind
= TARGET_WAITKIND_SIGNALLED
;
3889 lwp
->waitstatus
.value
.sig
= gdb_signal_from_host (WTERMSIG (wstat
));
3892 /* Prevent trying to stop it. */
3895 /* No further stops are expected from a dead lwp. */
3896 lwp
->stop_expected
= 0;
3899 /* Return true if LWP has exited already, and has a pending exit event
3900 to report to GDB. */
3903 lwp_is_marked_dead (struct lwp_info
*lwp
)
3905 return (lwp
->status_pending_p
3906 && (WIFEXITED (lwp
->status_pending
)
3907 || WIFSIGNALED (lwp
->status_pending
)));
3911 linux_process_target::wait_for_sigstop ()
3913 struct thread_info
*saved_thread
;
3918 saved_thread
= current_thread
;
3919 if (saved_thread
!= NULL
)
3920 saved_tid
= saved_thread
->id
;
3922 saved_tid
= null_ptid
; /* avoid bogus unused warning */
3925 debug_printf ("wait_for_sigstop: pulling events\n");
3927 /* Passing NULL_PTID as filter indicates we want all events to be
3928 left pending. Eventually this returns when there are no
3929 unwaited-for children left. */
3930 ret
= wait_for_event_filtered (minus_one_ptid
, null_ptid
, &wstat
, __WALL
);
3931 gdb_assert (ret
== -1);
3933 if (saved_thread
== NULL
|| mythread_alive (saved_tid
))
3934 current_thread
= saved_thread
;
3938 debug_printf ("Previously current thread died.\n");
3940 /* We can't change the current inferior behind GDB's back,
3941 otherwise, a subsequent command may apply to the wrong
3943 current_thread
= NULL
;
3948 linux_process_target::stuck_in_jump_pad (thread_info
*thread
)
3950 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3952 if (lwp
->suspended
!= 0)
3954 internal_error (__FILE__
, __LINE__
,
3955 "LWP %ld is suspended, suspended=%d\n",
3956 lwpid_of (thread
), lwp
->suspended
);
3958 gdb_assert (lwp
->stopped
);
3960 /* Allow debugging the jump pad, gdb_collect, etc.. */
3961 return (supports_fast_tracepoints ()
3962 && agent_loaded_p ()
3963 && (gdb_breakpoint_here (lwp
->stop_pc
)
3964 || lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
3965 || thread
->last_resume_kind
== resume_step
)
3966 && (linux_fast_tracepoint_collecting (lwp
, NULL
)
3967 != fast_tpoint_collect_result::not_collecting
));
3971 linux_process_target::move_out_of_jump_pad (thread_info
*thread
)
3973 struct thread_info
*saved_thread
;
3974 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3977 if (lwp
->suspended
!= 0)
3979 internal_error (__FILE__
, __LINE__
,
3980 "LWP %ld is suspended, suspended=%d\n",
3981 lwpid_of (thread
), lwp
->suspended
);
3983 gdb_assert (lwp
->stopped
);
3985 /* For gdb_breakpoint_here. */
3986 saved_thread
= current_thread
;
3987 current_thread
= thread
;
3989 wstat
= lwp
->status_pending_p
? &lwp
->status_pending
: NULL
;
3991 /* Allow debugging the jump pad, gdb_collect, etc. */
3992 if (!gdb_breakpoint_here (lwp
->stop_pc
)
3993 && lwp
->stop_reason
!= TARGET_STOPPED_BY_WATCHPOINT
3994 && thread
->last_resume_kind
!= resume_step
3995 && maybe_move_out_of_jump_pad (lwp
, wstat
))
3998 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4003 lwp
->status_pending_p
= 0;
4004 enqueue_one_deferred_signal (lwp
, wstat
);
4007 debug_printf ("Signal %d for LWP %ld deferred "
4009 WSTOPSIG (*wstat
), lwpid_of (thread
));
4012 resume_one_lwp (lwp
, 0, 0, NULL
);
4015 lwp_suspended_inc (lwp
);
4017 current_thread
= saved_thread
;
4021 lwp_running (thread_info
*thread
)
4023 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4025 if (lwp_is_marked_dead (lwp
))
4028 return !lwp
->stopped
;
4032 linux_process_target::stop_all_lwps (int suspend
, lwp_info
*except
)
4034 /* Should not be called recursively. */
4035 gdb_assert (stopping_threads
== NOT_STOPPING_THREADS
);
4040 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4041 suspend
? "stop-and-suspend" : "stop",
4043 ? target_pid_to_str (ptid_of (get_lwp_thread (except
)))
4047 stopping_threads
= (suspend
4048 ? STOPPING_AND_SUSPENDING_THREADS
4049 : STOPPING_THREADS
);
4052 for_each_thread ([&] (thread_info
*thread
)
4054 suspend_and_send_sigstop (thread
, except
);
4057 for_each_thread ([&] (thread_info
*thread
)
4059 send_sigstop (thread
, except
);
4062 wait_for_sigstop ();
4063 stopping_threads
= NOT_STOPPING_THREADS
;
4067 debug_printf ("stop_all_lwps done, setting stopping_threads "
4068 "back to !stopping\n");
4073 /* Enqueue one signal in the chain of signals which need to be
4074 delivered to this process on next resume. */
4077 enqueue_pending_signal (struct lwp_info
*lwp
, int signal
, siginfo_t
*info
)
4079 struct pending_signals
*p_sig
= XNEW (struct pending_signals
);
4081 p_sig
->prev
= lwp
->pending_signals
;
4082 p_sig
->signal
= signal
;
4084 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
4086 memcpy (&p_sig
->info
, info
, sizeof (siginfo_t
));
4087 lwp
->pending_signals
= p_sig
;
4091 linux_process_target::install_software_single_step_breakpoints (lwp_info
*lwp
)
4093 struct thread_info
*thread
= get_lwp_thread (lwp
);
4094 struct regcache
*regcache
= get_thread_regcache (thread
, 1);
4096 scoped_restore save_current_thread
= make_scoped_restore (¤t_thread
);
4098 current_thread
= thread
;
4099 std::vector
<CORE_ADDR
> next_pcs
= low_get_next_pcs (regcache
);
4101 for (CORE_ADDR pc
: next_pcs
)
4102 set_single_step_breakpoint (pc
, current_ptid
);
4106 linux_process_target::single_step (lwp_info
* lwp
)
4110 if (can_hardware_single_step ())
4114 else if (supports_software_single_step ())
4116 install_software_single_step_breakpoints (lwp
);
4122 debug_printf ("stepping is not implemented on this target");
4128 /* The signal can be delivered to the inferior if we are not trying to
4129 finish a fast tracepoint collect. Since signal can be delivered in
4130 the step-over, the program may go to signal handler and trap again
4131 after return from the signal handler. We can live with the spurious
4135 lwp_signal_can_be_delivered (struct lwp_info
*lwp
)
4137 return (lwp
->collecting_fast_tracepoint
4138 == fast_tpoint_collect_result::not_collecting
);
4142 linux_process_target::resume_one_lwp_throw (lwp_info
*lwp
, int step
,
4143 int signal
, siginfo_t
*info
)
4145 struct thread_info
*thread
= get_lwp_thread (lwp
);
4146 struct thread_info
*saved_thread
;
4148 struct process_info
*proc
= get_thread_process (thread
);
4150 /* Note that target description may not be initialised
4151 (proc->tdesc == NULL) at this point because the program hasn't
4152 stopped at the first instruction yet. It means GDBserver skips
4153 the extra traps from the wrapper program (see option --wrapper).
4154 Code in this function that requires register access should be
4155 guarded by proc->tdesc == NULL or something else. */
4157 if (lwp
->stopped
== 0)
4160 gdb_assert (lwp
->waitstatus
.kind
== TARGET_WAITKIND_IGNORE
);
4162 fast_tpoint_collect_result fast_tp_collecting
4163 = lwp
->collecting_fast_tracepoint
;
4165 gdb_assert (!stabilizing_threads
4166 || (fast_tp_collecting
4167 != fast_tpoint_collect_result::not_collecting
));
4169 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4170 user used the "jump" command, or "set $pc = foo"). */
4171 if (thread
->while_stepping
!= NULL
&& lwp
->stop_pc
!= get_pc (lwp
))
4173 /* Collecting 'while-stepping' actions doesn't make sense
4175 release_while_stepping_state_list (thread
);
4178 /* If we have pending signals or status, and a new signal, enqueue the
4179 signal. Also enqueue the signal if it can't be delivered to the
4180 inferior right now. */
4182 && (lwp
->status_pending_p
4183 || lwp
->pending_signals
!= NULL
4184 || !lwp_signal_can_be_delivered (lwp
)))
4186 enqueue_pending_signal (lwp
, signal
, info
);
4188 /* Postpone any pending signal. It was enqueued above. */
4192 if (lwp
->status_pending_p
)
4195 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4196 " has pending status\n",
4197 lwpid_of (thread
), step
? "step" : "continue",
4198 lwp
->stop_expected
? "expected" : "not expected");
4202 saved_thread
= current_thread
;
4203 current_thread
= thread
;
4205 /* This bit needs some thinking about. If we get a signal that
4206 we must report while a single-step reinsert is still pending,
4207 we often end up resuming the thread. It might be better to
4208 (ew) allow a stack of pending events; then we could be sure that
4209 the reinsert happened right away and not lose any signals.
4211 Making this stack would also shrink the window in which breakpoints are
4212 uninserted (see comment in linux_wait_for_lwp) but not enough for
4213 complete correctness, so it won't solve that problem. It may be
4214 worthwhile just to solve this one, however. */
4215 if (lwp
->bp_reinsert
!= 0)
4218 debug_printf (" pending reinsert at 0x%s\n",
4219 paddress (lwp
->bp_reinsert
));
4221 if (can_hardware_single_step ())
4223 if (fast_tp_collecting
== fast_tpoint_collect_result::not_collecting
)
4226 warning ("BAD - reinserting but not stepping.");
4228 warning ("BAD - reinserting and suspended(%d).",
4233 step
= maybe_hw_step (thread
);
4236 if (fast_tp_collecting
== fast_tpoint_collect_result::before_insn
)
4239 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4240 " (exit-jump-pad-bkpt)\n",
4243 else if (fast_tp_collecting
== fast_tpoint_collect_result::at_insn
)
4246 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4247 " single-stepping\n",
4250 if (can_hardware_single_step ())
4254 internal_error (__FILE__
, __LINE__
,
4255 "moving out of jump pad single-stepping"
4256 " not implemented on this target");
4260 /* If we have while-stepping actions in this thread set it stepping.
4261 If we have a signal to deliver, it may or may not be set to
4262 SIG_IGN, we don't know. Assume so, and allow collecting
4263 while-stepping into a signal handler. A possible smart thing to
4264 do would be to set an internal breakpoint at the signal return
4265 address, continue, and carry on catching this while-stepping
4266 action only when that breakpoint is hit. A future
4268 if (thread
->while_stepping
!= NULL
)
4271 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4274 step
= single_step (lwp
);
4277 if (proc
->tdesc
!= NULL
&& low_supports_breakpoints ())
4279 struct regcache
*regcache
= get_thread_regcache (current_thread
, 1);
4281 lwp
->stop_pc
= low_get_pc (regcache
);
4285 debug_printf (" %s from pc 0x%lx\n", step
? "step" : "continue",
4286 (long) lwp
->stop_pc
);
4290 /* If we have pending signals, consume one if it can be delivered to
4292 if (lwp
->pending_signals
!= NULL
&& lwp_signal_can_be_delivered (lwp
))
4294 struct pending_signals
**p_sig
;
4296 p_sig
= &lwp
->pending_signals
;
4297 while ((*p_sig
)->prev
!= NULL
)
4298 p_sig
= &(*p_sig
)->prev
;
4300 signal
= (*p_sig
)->signal
;
4301 if ((*p_sig
)->info
.si_signo
!= 0)
4302 ptrace (PTRACE_SETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
4310 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4311 lwpid_of (thread
), step
? "step" : "continue", signal
,
4312 lwp
->stop_expected
? "expected" : "not expected");
4314 low_prepare_to_resume (lwp
);
4316 regcache_invalidate_thread (thread
);
4318 lwp
->stepping
= step
;
4320 ptrace_request
= PTRACE_SINGLESTEP
;
4321 else if (gdb_catching_syscalls_p (lwp
))
4322 ptrace_request
= PTRACE_SYSCALL
;
4324 ptrace_request
= PTRACE_CONT
;
4325 ptrace (ptrace_request
,
4327 (PTRACE_TYPE_ARG3
) 0,
4328 /* Coerce to a uintptr_t first to avoid potential gcc warning
4329 of coercing an 8 byte integer to a 4 byte pointer. */
4330 (PTRACE_TYPE_ARG4
) (uintptr_t) signal
);
4332 current_thread
= saved_thread
;
4334 perror_with_name ("resuming thread");
4336 /* Successfully resumed. Clear state that no longer makes sense,
4337 and mark the LWP as running. Must not do this before resuming
4338 otherwise if that fails other code will be confused. E.g., we'd
4339 later try to stop the LWP and hang forever waiting for a stop
4340 status. Note that we must not throw after this is cleared,
4341 otherwise handle_zombie_lwp_error would get confused. */
4343 lwp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
4347 linux_process_target::low_prepare_to_resume (lwp_info
*lwp
)
4352 /* Called when we try to resume a stopped LWP and that errors out. If
4353 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4354 or about to become), discard the error, clear any pending status
4355 the LWP may have, and return true (we'll collect the exit status
4356 soon enough). Otherwise, return false. */
4359 check_ptrace_stopped_lwp_gone (struct lwp_info
*lp
)
4361 struct thread_info
*thread
= get_lwp_thread (lp
);
4363 /* If we get an error after resuming the LWP successfully, we'd
4364 confuse !T state for the LWP being gone. */
4365 gdb_assert (lp
->stopped
);
4367 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4368 because even if ptrace failed with ESRCH, the tracee may be "not
4369 yet fully dead", but already refusing ptrace requests. In that
4370 case the tracee has 'R (Running)' state for a little bit
4371 (observed in Linux 3.18). See also the note on ESRCH in the
4372 ptrace(2) man page. Instead, check whether the LWP has any state
4373 other than ptrace-stopped. */
4375 /* Don't assume anything if /proc/PID/status can't be read. */
4376 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread
)) == 0)
4378 lp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
4379 lp
->status_pending_p
= 0;
4386 linux_process_target::resume_one_lwp (lwp_info
*lwp
, int step
, int signal
,
4391 resume_one_lwp_throw (lwp
, step
, signal
, info
);
4393 catch (const gdb_exception_error
&ex
)
4395 if (!check_ptrace_stopped_lwp_gone (lwp
))
4400 /* This function is called once per thread via for_each_thread.
4401 We look up which resume request applies to THREAD and mark it with a
4402 pointer to the appropriate resume request.
4404 This algorithm is O(threads * resume elements), but resume elements
4405 is small (and will remain small at least until GDB supports thread
4409 linux_set_resume_request (thread_info
*thread
, thread_resume
*resume
, size_t n
)
4411 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4413 for (int ndx
= 0; ndx
< n
; ndx
++)
4415 ptid_t ptid
= resume
[ndx
].thread
;
4416 if (ptid
== minus_one_ptid
4417 || ptid
== thread
->id
4418 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4420 || (ptid
.pid () == pid_of (thread
)
4422 || ptid
.lwp () == -1)))
4424 if (resume
[ndx
].kind
== resume_stop
4425 && thread
->last_resume_kind
== resume_stop
)
4428 debug_printf ("already %s LWP %ld at GDB's request\n",
4429 (thread
->last_status
.kind
4430 == TARGET_WAITKIND_STOPPED
)
4438 /* Ignore (wildcard) resume requests for already-resumed
4440 if (resume
[ndx
].kind
!= resume_stop
4441 && thread
->last_resume_kind
!= resume_stop
)
4444 debug_printf ("already %s LWP %ld at GDB's request\n",
4445 (thread
->last_resume_kind
4453 /* Don't let wildcard resumes resume fork children that GDB
4454 does not yet know are new fork children. */
4455 if (lwp
->fork_relative
!= NULL
)
4457 struct lwp_info
*rel
= lwp
->fork_relative
;
4459 if (rel
->status_pending_p
4460 && (rel
->waitstatus
.kind
== TARGET_WAITKIND_FORKED
4461 || rel
->waitstatus
.kind
== TARGET_WAITKIND_VFORKED
))
4464 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4470 /* If the thread has a pending event that has already been
4471 reported to GDBserver core, but GDB has not pulled the
4472 event out of the vStopped queue yet, likewise, ignore the
4473 (wildcard) resume request. */
4474 if (in_queued_stop_replies (thread
->id
))
4477 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4482 lwp
->resume
= &resume
[ndx
];
4483 thread
->last_resume_kind
= lwp
->resume
->kind
;
4485 lwp
->step_range_start
= lwp
->resume
->step_range_start
;
4486 lwp
->step_range_end
= lwp
->resume
->step_range_end
;
4488 /* If we had a deferred signal to report, dequeue one now.
4489 This can happen if LWP gets more than one signal while
4490 trying to get out of a jump pad. */
4492 && !lwp
->status_pending_p
4493 && dequeue_one_deferred_signal (lwp
, &lwp
->status_pending
))
4495 lwp
->status_pending_p
= 1;
4498 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4499 "leaving status pending.\n",
4500 WSTOPSIG (lwp
->status_pending
),
4508 /* No resume action for this thread. */
4513 linux_process_target::resume_status_pending (thread_info
*thread
)
4515 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4517 /* LWPs which will not be resumed are not interesting, because
4518 we might not wait for them next time through linux_wait. */
4519 if (lwp
->resume
== NULL
)
4522 return thread_still_has_status_pending (thread
);
4526 linux_process_target::thread_needs_step_over (thread_info
*thread
)
4528 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4529 struct thread_info
*saved_thread
;
4531 struct process_info
*proc
= get_thread_process (thread
);
4533 /* GDBserver is skipping the extra traps from the wrapper program,
4534 don't have to do step over. */
4535 if (proc
->tdesc
== NULL
)
4538 /* LWPs which will not be resumed are not interesting, because we
4539 might not wait for them next time through linux_wait. */
4544 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4549 if (thread
->last_resume_kind
== resume_stop
)
4552 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4558 gdb_assert (lwp
->suspended
>= 0);
4563 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4568 if (lwp
->status_pending_p
)
4571 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4577 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4581 /* If the PC has changed since we stopped, then don't do anything,
4582 and let the breakpoint/tracepoint be hit. This happens if, for
4583 instance, GDB handled the decr_pc_after_break subtraction itself,
4584 GDB is OOL stepping this thread, or the user has issued a "jump"
4585 command, or poked thread's registers herself. */
4586 if (pc
!= lwp
->stop_pc
)
4589 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4590 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4592 paddress (lwp
->stop_pc
), paddress (pc
));
4596 /* On software single step target, resume the inferior with signal
4597 rather than stepping over. */
4598 if (supports_software_single_step ()
4599 && lwp
->pending_signals
!= NULL
4600 && lwp_signal_can_be_delivered (lwp
))
4603 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4610 saved_thread
= current_thread
;
4611 current_thread
= thread
;
4613 /* We can only step over breakpoints we know about. */
4614 if (breakpoint_here (pc
) || fast_tracepoint_jump_here (pc
))
4616 /* Don't step over a breakpoint that GDB expects to hit
4617 though. If the condition is being evaluated on the target's side
4618 and it evaluate to false, step over this breakpoint as well. */
4619 if (gdb_breakpoint_here (pc
)
4620 && gdb_condition_true_at_breakpoint (pc
)
4621 && gdb_no_commands_at_breakpoint (pc
))
4624 debug_printf ("Need step over [LWP %ld]? yes, but found"
4625 " GDB breakpoint at 0x%s; skipping step over\n",
4626 lwpid_of (thread
), paddress (pc
));
4628 current_thread
= saved_thread
;
4634 debug_printf ("Need step over [LWP %ld]? yes, "
4635 "found breakpoint at 0x%s\n",
4636 lwpid_of (thread
), paddress (pc
));
4638 /* We've found an lwp that needs stepping over --- return 1 so
4639 that find_thread stops looking. */
4640 current_thread
= saved_thread
;
4646 current_thread
= saved_thread
;
4649 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4651 lwpid_of (thread
), paddress (pc
));
4657 linux_process_target::start_step_over (lwp_info
*lwp
)
4659 struct thread_info
*thread
= get_lwp_thread (lwp
);
4660 struct thread_info
*saved_thread
;
4665 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4668 stop_all_lwps (1, lwp
);
4670 if (lwp
->suspended
!= 0)
4672 internal_error (__FILE__
, __LINE__
,
4673 "LWP %ld suspended=%d\n", lwpid_of (thread
),
4678 debug_printf ("Done stopping all threads for step-over.\n");
4680 /* Note, we should always reach here with an already adjusted PC,
4681 either by GDB (if we're resuming due to GDB's request), or by our
4682 caller, if we just finished handling an internal breakpoint GDB
4683 shouldn't care about. */
4686 saved_thread
= current_thread
;
4687 current_thread
= thread
;
4689 lwp
->bp_reinsert
= pc
;
4690 uninsert_breakpoints_at (pc
);
4691 uninsert_fast_tracepoint_jumps_at (pc
);
4693 step
= single_step (lwp
);
4695 current_thread
= saved_thread
;
4697 resume_one_lwp (lwp
, step
, 0, NULL
);
4699 /* Require next event from this LWP. */
4700 step_over_bkpt
= thread
->id
;
4703 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4704 start_step_over, if still there, and delete any single-step
4705 breakpoints we've set, on non hardware single-step targets. */
4708 finish_step_over (struct lwp_info
*lwp
)
4710 if (lwp
->bp_reinsert
!= 0)
4712 struct thread_info
*saved_thread
= current_thread
;
4715 debug_printf ("Finished step over.\n");
4717 current_thread
= get_lwp_thread (lwp
);
4719 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4720 may be no breakpoint to reinsert there by now. */
4721 reinsert_breakpoints_at (lwp
->bp_reinsert
);
4722 reinsert_fast_tracepoint_jumps_at (lwp
->bp_reinsert
);
4724 lwp
->bp_reinsert
= 0;
4726 /* Delete any single-step breakpoints. No longer needed. We
4727 don't have to worry about other threads hitting this trap,
4728 and later not being able to explain it, because we were
4729 stepping over a breakpoint, and we hold all threads but
4730 LWP stopped while doing that. */
4731 if (!can_hardware_single_step ())
4733 gdb_assert (has_single_step_breakpoints (current_thread
));
4734 delete_single_step_breakpoints (current_thread
);
4737 step_over_bkpt
= null_ptid
;
4738 current_thread
= saved_thread
;
4746 linux_process_target::complete_ongoing_step_over ()
4748 if (step_over_bkpt
!= null_ptid
)
4750 struct lwp_info
*lwp
;
4755 debug_printf ("detach: step over in progress, finish it first\n");
4757 /* Passing NULL_PTID as filter indicates we want all events to
4758 be left pending. Eventually this returns when there are no
4759 unwaited-for children left. */
4760 ret
= wait_for_event_filtered (minus_one_ptid
, null_ptid
, &wstat
,
4762 gdb_assert (ret
== -1);
4764 lwp
= find_lwp_pid (step_over_bkpt
);
4766 finish_step_over (lwp
);
4767 step_over_bkpt
= null_ptid
;
4768 unsuspend_all_lwps (lwp
);
4773 linux_process_target::resume_one_thread (thread_info
*thread
,
4774 bool leave_all_stopped
)
4776 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4779 if (lwp
->resume
== NULL
)
4782 if (lwp
->resume
->kind
== resume_stop
)
4785 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread
));
4790 debug_printf ("stopping LWP %ld\n", lwpid_of (thread
));
4792 /* Stop the thread, and wait for the event asynchronously,
4793 through the event loop. */
4799 debug_printf ("already stopped LWP %ld\n",
4802 /* The LWP may have been stopped in an internal event that
4803 was not meant to be notified back to GDB (e.g., gdbserver
4804 breakpoint), so we should be reporting a stop event in
4807 /* If the thread already has a pending SIGSTOP, this is a
4808 no-op. Otherwise, something later will presumably resume
4809 the thread and this will cause it to cancel any pending
4810 operation, due to last_resume_kind == resume_stop. If
4811 the thread already has a pending status to report, we
4812 will still report it the next time we wait - see
4813 status_pending_p_callback. */
4815 /* If we already have a pending signal to report, then
4816 there's no need to queue a SIGSTOP, as this means we're
4817 midway through moving the LWP out of the jumppad, and we
4818 will report the pending signal as soon as that is
4820 if (lwp
->pending_signals_to_report
== NULL
)
4824 /* For stop requests, we're done. */
4826 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
4830 /* If this thread which is about to be resumed has a pending status,
4831 then don't resume it - we can just report the pending status.
4832 Likewise if it is suspended, because e.g., another thread is
4833 stepping past a breakpoint. Make sure to queue any signals that
4834 would otherwise be sent. In all-stop mode, we do this decision
4835 based on if *any* thread has a pending status. If there's a
4836 thread that needs the step-over-breakpoint dance, then don't
4837 resume any other thread but that particular one. */
4838 leave_pending
= (lwp
->suspended
4839 || lwp
->status_pending_p
4840 || leave_all_stopped
);
4842 /* If we have a new signal, enqueue the signal. */
4843 if (lwp
->resume
->sig
!= 0)
4845 siginfo_t info
, *info_p
;
4847 /* If this is the same signal we were previously stopped by,
4848 make sure to queue its siginfo. */
4849 if (WIFSTOPPED (lwp
->last_status
)
4850 && WSTOPSIG (lwp
->last_status
) == lwp
->resume
->sig
4851 && ptrace (PTRACE_GETSIGINFO
, lwpid_of (thread
),
4852 (PTRACE_TYPE_ARG3
) 0, &info
) == 0)
4857 enqueue_pending_signal (lwp
, lwp
->resume
->sig
, info_p
);
4863 debug_printf ("resuming LWP %ld\n", lwpid_of (thread
));
4865 proceed_one_lwp (thread
, NULL
);
4870 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread
));
4873 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
4878 linux_process_target::resume (thread_resume
*resume_info
, size_t n
)
4880 struct thread_info
*need_step_over
= NULL
;
4885 debug_printf ("linux_resume:\n");
4888 for_each_thread ([&] (thread_info
*thread
)
4890 linux_set_resume_request (thread
, resume_info
, n
);
4893 /* If there is a thread which would otherwise be resumed, which has
4894 a pending status, then don't resume any threads - we can just
4895 report the pending status. Make sure to queue any signals that
4896 would otherwise be sent. In non-stop mode, we'll apply this
4897 logic to each thread individually. We consume all pending events
4898 before considering to start a step-over (in all-stop). */
4899 bool any_pending
= false;
4901 any_pending
= find_thread ([this] (thread_info
*thread
)
4903 return resume_status_pending (thread
);
4906 /* If there is a thread which would otherwise be resumed, which is
4907 stopped at a breakpoint that needs stepping over, then don't
4908 resume any threads - have it step over the breakpoint with all
4909 other threads stopped, then resume all threads again. Make sure
4910 to queue any signals that would otherwise be delivered or
4912 if (!any_pending
&& low_supports_breakpoints ())
4913 need_step_over
= find_thread ([this] (thread_info
*thread
)
4915 return thread_needs_step_over (thread
);
4918 bool leave_all_stopped
= (need_step_over
!= NULL
|| any_pending
);
4922 if (need_step_over
!= NULL
)
4923 debug_printf ("Not resuming all, need step over\n");
4924 else if (any_pending
)
4925 debug_printf ("Not resuming, all-stop and found "
4926 "an LWP with pending status\n");
4928 debug_printf ("Resuming, no pending status or step over needed\n");
4931 /* Even if we're leaving threads stopped, queue all signals we'd
4932 otherwise deliver. */
4933 for_each_thread ([&] (thread_info
*thread
)
4935 resume_one_thread (thread
, leave_all_stopped
);
4939 start_step_over (get_thread_lwp (need_step_over
));
4943 debug_printf ("linux_resume done\n");
4947 /* We may have events that were pending that can/should be sent to
4948 the client now. Trigger a linux_wait call. */
4949 if (target_is_async_p ())
4954 linux_process_target::proceed_one_lwp (thread_info
*thread
, lwp_info
*except
)
4956 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4963 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread
));
4968 debug_printf (" LWP %ld already running\n", lwpid_of (thread
));
4972 if (thread
->last_resume_kind
== resume_stop
4973 && thread
->last_status
.kind
!= TARGET_WAITKIND_IGNORE
)
4976 debug_printf (" client wants LWP to remain %ld stopped\n",
4981 if (lwp
->status_pending_p
)
4984 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4989 gdb_assert (lwp
->suspended
>= 0);
4994 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread
));
4998 if (thread
->last_resume_kind
== resume_stop
4999 && lwp
->pending_signals_to_report
== NULL
5000 && (lwp
->collecting_fast_tracepoint
5001 == fast_tpoint_collect_result::not_collecting
))
5003 /* We haven't reported this LWP as stopped yet (otherwise, the
5004 last_status.kind check above would catch it, and we wouldn't
5005 reach here. This LWP may have been momentarily paused by a
5006 stop_all_lwps call while handling for example, another LWP's
5007 step-over. In that case, the pending expected SIGSTOP signal
5008 that was queued at vCont;t handling time will have already
5009 been consumed by wait_for_sigstop, and so we need to requeue
5010 another one here. Note that if the LWP already has a SIGSTOP
5011 pending, this is a no-op. */
5014 debug_printf ("Client wants LWP %ld to stop. "
5015 "Making sure it has a SIGSTOP pending\n",
5021 if (thread
->last_resume_kind
== resume_step
)
5024 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5027 /* If resume_step is requested by GDB, install single-step
5028 breakpoints when the thread is about to be actually resumed if
5029 the single-step breakpoints weren't removed. */
5030 if (supports_software_single_step ()
5031 && !has_single_step_breakpoints (thread
))
5032 install_software_single_step_breakpoints (lwp
);
5034 step
= maybe_hw_step (thread
);
5036 else if (lwp
->bp_reinsert
!= 0)
5039 debug_printf (" stepping LWP %ld, reinsert set\n",
5042 step
= maybe_hw_step (thread
);
5047 resume_one_lwp (lwp
, step
, 0, NULL
);
5051 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info
*thread
,
5054 struct lwp_info
*lwp
= get_thread_lwp (thread
);
5059 lwp_suspended_decr (lwp
);
5061 proceed_one_lwp (thread
, except
);
5065 linux_process_target::proceed_all_lwps ()
5067 struct thread_info
*need_step_over
;
5069 /* If there is a thread which would otherwise be resumed, which is
5070 stopped at a breakpoint that needs stepping over, then don't
5071 resume any threads - have it step over the breakpoint with all
5072 other threads stopped, then resume all threads again. */
5074 if (low_supports_breakpoints ())
5076 need_step_over
= find_thread ([this] (thread_info
*thread
)
5078 return thread_needs_step_over (thread
);
5081 if (need_step_over
!= NULL
)
5084 debug_printf ("proceed_all_lwps: found "
5085 "thread %ld needing a step-over\n",
5086 lwpid_of (need_step_over
));
5088 start_step_over (get_thread_lwp (need_step_over
));
5094 debug_printf ("Proceeding, no step-over needed\n");
5096 for_each_thread ([this] (thread_info
*thread
)
5098 proceed_one_lwp (thread
, NULL
);
5103 linux_process_target::unstop_all_lwps (int unsuspend
, lwp_info
*except
)
5109 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5110 lwpid_of (get_lwp_thread (except
)));
5112 debug_printf ("unstopping all lwps\n");
5116 for_each_thread ([&] (thread_info
*thread
)
5118 unsuspend_and_proceed_one_lwp (thread
, except
);
5121 for_each_thread ([&] (thread_info
*thread
)
5123 proceed_one_lwp (thread
, except
);
5128 debug_printf ("unstop_all_lwps done\n");
5134 #ifdef HAVE_LINUX_REGSETS
5136 #define use_linux_regsets 1
5138 /* Returns true if REGSET has been disabled. */
5141 regset_disabled (struct regsets_info
*info
, struct regset_info
*regset
)
5143 return (info
->disabled_regsets
!= NULL
5144 && info
->disabled_regsets
[regset
- info
->regsets
]);
5147 /* Disable REGSET. */
5150 disable_regset (struct regsets_info
*info
, struct regset_info
*regset
)
5154 dr_offset
= regset
- info
->regsets
;
5155 if (info
->disabled_regsets
== NULL
)
5156 info
->disabled_regsets
= (char *) xcalloc (1, info
->num_regsets
);
5157 info
->disabled_regsets
[dr_offset
] = 1;
5161 regsets_fetch_inferior_registers (struct regsets_info
*regsets_info
,
5162 struct regcache
*regcache
)
5164 struct regset_info
*regset
;
5165 int saw_general_regs
= 0;
5169 pid
= lwpid_of (current_thread
);
5170 for (regset
= regsets_info
->regsets
; regset
->size
>= 0; regset
++)
5175 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
))
5178 buf
= xmalloc (regset
->size
);
5180 nt_type
= regset
->nt_type
;
5184 iov
.iov_len
= regset
->size
;
5185 data
= (void *) &iov
;
5191 res
= ptrace (regset
->get_request
, pid
,
5192 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
5194 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
5199 || (errno
== EINVAL
&& regset
->type
== OPTIONAL_REGS
))
5201 /* If we get EIO on a regset, or an EINVAL and the regset is
5202 optional, do not try it again for this process mode. */
5203 disable_regset (regsets_info
, regset
);
5205 else if (errno
== ENODATA
)
5207 /* ENODATA may be returned if the regset is currently
5208 not "active". This can happen in normal operation,
5209 so suppress the warning in this case. */
5211 else if (errno
== ESRCH
)
5213 /* At this point, ESRCH should mean the process is
5214 already gone, in which case we simply ignore attempts
5215 to read its registers. */
5220 sprintf (s
, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5227 if (regset
->type
== GENERAL_REGS
)
5228 saw_general_regs
= 1;
5229 regset
->store_function (regcache
, buf
);
5233 if (saw_general_regs
)
5240 regsets_store_inferior_registers (struct regsets_info
*regsets_info
,
5241 struct regcache
*regcache
)
5243 struct regset_info
*regset
;
5244 int saw_general_regs
= 0;
5248 pid
= lwpid_of (current_thread
);
5249 for (regset
= regsets_info
->regsets
; regset
->size
>= 0; regset
++)
5254 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
)
5255 || regset
->fill_function
== NULL
)
5258 buf
= xmalloc (regset
->size
);
5260 /* First fill the buffer with the current register set contents,
5261 in case there are any items in the kernel's regset that are
5262 not in gdbserver's regcache. */
5264 nt_type
= regset
->nt_type
;
5268 iov
.iov_len
= regset
->size
;
5269 data
= (void *) &iov
;
5275 res
= ptrace (regset
->get_request
, pid
,
5276 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
5278 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
5283 /* Then overlay our cached registers on that. */
5284 regset
->fill_function (regcache
, buf
);
5286 /* Only now do we write the register set. */
5288 res
= ptrace (regset
->set_request
, pid
,
5289 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
5291 res
= ptrace (regset
->set_request
, pid
, data
, nt_type
);
5298 || (errno
== EINVAL
&& regset
->type
== OPTIONAL_REGS
))
5300 /* If we get EIO on a regset, or an EINVAL and the regset is
5301 optional, do not try it again for this process mode. */
5302 disable_regset (regsets_info
, regset
);
5304 else if (errno
== ESRCH
)
5306 /* At this point, ESRCH should mean the process is
5307 already gone, in which case we simply ignore attempts
5308 to change its registers. See also the related
5309 comment in resume_one_lwp. */
5315 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5318 else if (regset
->type
== GENERAL_REGS
)
5319 saw_general_regs
= 1;
5322 if (saw_general_regs
)
5328 #else /* !HAVE_LINUX_REGSETS */
5330 #define use_linux_regsets 0
5331 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5332 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5336 /* Return 1 if register REGNO is supported by one of the regset ptrace
5337 calls or 0 if it has to be transferred individually. */
5340 linux_register_in_regsets (const struct regs_info
*regs_info
, int regno
)
5342 unsigned char mask
= 1 << (regno
% 8);
5343 size_t index
= regno
/ 8;
5345 return (use_linux_regsets
5346 && (regs_info
->regset_bitmap
== NULL
5347 || (regs_info
->regset_bitmap
[index
] & mask
) != 0));
5350 #ifdef HAVE_LINUX_USRREGS
5353 register_addr (const struct usrregs_info
*usrregs
, int regnum
)
5357 if (regnum
< 0 || regnum
>= usrregs
->num_regs
)
5358 error ("Invalid register number %d.", regnum
);
5360 addr
= usrregs
->regmap
[regnum
];
5367 linux_process_target::fetch_register (const usrregs_info
*usrregs
,
5368 regcache
*regcache
, int regno
)
5375 if (regno
>= usrregs
->num_regs
)
5377 if (low_cannot_fetch_register (regno
))
5380 regaddr
= register_addr (usrregs
, regno
);
5384 size
= ((register_size (regcache
->tdesc
, regno
)
5385 + sizeof (PTRACE_XFER_TYPE
) - 1)
5386 & -sizeof (PTRACE_XFER_TYPE
));
5387 buf
= (char *) alloca (size
);
5389 pid
= lwpid_of (current_thread
);
5390 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
5393 *(PTRACE_XFER_TYPE
*) (buf
+ i
) =
5394 ptrace (PTRACE_PEEKUSER
, pid
,
5395 /* Coerce to a uintptr_t first to avoid potential gcc warning
5396 of coercing an 8 byte integer to a 4 byte pointer. */
5397 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
, (PTRACE_TYPE_ARG4
) 0);
5398 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
5401 /* Mark register REGNO unavailable. */
5402 supply_register (regcache
, regno
, NULL
);
5407 low_supply_ptrace_register (regcache
, regno
, buf
);
5411 linux_process_target::store_register (const usrregs_info
*usrregs
,
5412 regcache
*regcache
, int regno
)
5419 if (regno
>= usrregs
->num_regs
)
5421 if (low_cannot_store_register (regno
))
5424 regaddr
= register_addr (usrregs
, regno
);
5428 size
= ((register_size (regcache
->tdesc
, regno
)
5429 + sizeof (PTRACE_XFER_TYPE
) - 1)
5430 & -sizeof (PTRACE_XFER_TYPE
));
5431 buf
= (char *) alloca (size
);
5432 memset (buf
, 0, size
);
5434 low_collect_ptrace_register (regcache
, regno
, buf
);
5436 pid
= lwpid_of (current_thread
);
5437 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
5440 ptrace (PTRACE_POKEUSER
, pid
,
5441 /* Coerce to a uintptr_t first to avoid potential gcc warning
5442 about coercing an 8 byte integer to a 4 byte pointer. */
5443 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
,
5444 (PTRACE_TYPE_ARG4
) *(PTRACE_XFER_TYPE
*) (buf
+ i
));
5447 /* At this point, ESRCH should mean the process is
5448 already gone, in which case we simply ignore attempts
5449 to change its registers. See also the related
5450 comment in resume_one_lwp. */
5455 if (!low_cannot_store_register (regno
))
5456 error ("writing register %d: %s", regno
, safe_strerror (errno
));
5458 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
5461 #endif /* HAVE_LINUX_USRREGS */
5464 linux_process_target::low_collect_ptrace_register (regcache
*regcache
,
5465 int regno
, char *buf
)
5467 collect_register (regcache
, regno
, buf
);
5471 linux_process_target::low_supply_ptrace_register (regcache
*regcache
,
5472 int regno
, const char *buf
)
5474 supply_register (regcache
, regno
, buf
);
5478 linux_process_target::usr_fetch_inferior_registers (const regs_info
*regs_info
,
5482 #ifdef HAVE_LINUX_USRREGS
5483 struct usrregs_info
*usr
= regs_info
->usrregs
;
5487 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
5488 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
5489 fetch_register (usr
, regcache
, regno
);
5492 fetch_register (usr
, regcache
, regno
);
5497 linux_process_target::usr_store_inferior_registers (const regs_info
*regs_info
,
5501 #ifdef HAVE_LINUX_USRREGS
5502 struct usrregs_info
*usr
= regs_info
->usrregs
;
5506 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
5507 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
5508 store_register (usr
, regcache
, regno
);
5511 store_register (usr
, regcache
, regno
);
5516 linux_process_target::fetch_registers (regcache
*regcache
, int regno
)
5520 const regs_info
*regs_info
= get_regs_info ();
5524 if (regs_info
->usrregs
!= NULL
)
5525 for (regno
= 0; regno
< regs_info
->usrregs
->num_regs
; regno
++)
5526 low_fetch_register (regcache
, regno
);
5528 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
, regcache
);
5529 if (regs_info
->usrregs
!= NULL
)
5530 usr_fetch_inferior_registers (regs_info
, regcache
, -1, all
);
5534 if (low_fetch_register (regcache
, regno
))
5537 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
5539 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
,
5541 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
5542 usr_fetch_inferior_registers (regs_info
, regcache
, regno
, 1);
5547 linux_process_target::store_registers (regcache
*regcache
, int regno
)
5551 const regs_info
*regs_info
= get_regs_info ();
5555 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
5557 if (regs_info
->usrregs
!= NULL
)
5558 usr_store_inferior_registers (regs_info
, regcache
, regno
, all
);
5562 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
5564 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
5566 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
5567 usr_store_inferior_registers (regs_info
, regcache
, regno
, 1);
5572 linux_process_target::low_fetch_register (regcache
*regcache
, int regno
)
5577 /* A wrapper for the read_memory target op. */
5580 linux_read_memory (CORE_ADDR memaddr
, unsigned char *myaddr
, int len
)
5582 return the_target
->read_memory (memaddr
, myaddr
, len
);
5585 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5586 to debugger memory starting at MYADDR. */
5589 linux_process_target::read_memory (CORE_ADDR memaddr
,
5590 unsigned char *myaddr
, int len
)
5592 int pid
= lwpid_of (current_thread
);
5593 PTRACE_XFER_TYPE
*buffer
;
5601 /* Try using /proc. Don't bother for one word. */
5602 if (len
>= 3 * sizeof (long))
5606 /* We could keep this file open and cache it - possibly one per
5607 thread. That requires some juggling, but is even faster. */
5608 sprintf (filename
, "/proc/%d/mem", pid
);
5609 fd
= open (filename
, O_RDONLY
| O_LARGEFILE
);
5613 /* If pread64 is available, use it. It's faster if the kernel
5614 supports it (only one syscall), and it's 64-bit safe even on
5615 32-bit platforms (for instance, SPARC debugging a SPARC64
5618 bytes
= pread64 (fd
, myaddr
, len
, memaddr
);
5621 if (lseek (fd
, memaddr
, SEEK_SET
) != -1)
5622 bytes
= read (fd
, myaddr
, len
);
5629 /* Some data was read, we'll try to get the rest with ptrace. */
5639 /* Round starting address down to longword boundary. */
5640 addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
5641 /* Round ending address up; get number of longwords that makes. */
5642 count
= ((((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
5643 / sizeof (PTRACE_XFER_TYPE
));
5644 /* Allocate buffer of that many longwords. */
5645 buffer
= XALLOCAVEC (PTRACE_XFER_TYPE
, count
);
5647 /* Read all the longwords */
5649 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
5651 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5652 about coercing an 8 byte integer to a 4 byte pointer. */
5653 buffer
[i
] = ptrace (PTRACE_PEEKTEXT
, pid
,
5654 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5655 (PTRACE_TYPE_ARG4
) 0);
5661 /* Copy appropriate bytes out of the buffer. */
5664 i
*= sizeof (PTRACE_XFER_TYPE
);
5665 i
-= memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1);
5667 (char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
5674 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5675 memory at MEMADDR. On failure (cannot write to the inferior)
5676 returns the value of errno. Always succeeds if LEN is zero. */
5679 linux_process_target::write_memory (CORE_ADDR memaddr
,
5680 const unsigned char *myaddr
, int len
)
5683 /* Round starting address down to longword boundary. */
5684 CORE_ADDR addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
5685 /* Round ending address up; get number of longwords that makes. */
5687 = (((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
5688 / sizeof (PTRACE_XFER_TYPE
);
5690 /* Allocate buffer of that many longwords. */
5691 PTRACE_XFER_TYPE
*buffer
= XALLOCAVEC (PTRACE_XFER_TYPE
, count
);
5693 int pid
= lwpid_of (current_thread
);
5697 /* Zero length write always succeeds. */
5703 /* Dump up to four bytes. */
5704 char str
[4 * 2 + 1];
5706 int dump
= len
< 4 ? len
: 4;
5708 for (i
= 0; i
< dump
; i
++)
5710 sprintf (p
, "%02x", myaddr
[i
]);
5715 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5716 str
, (long) memaddr
, pid
);
5719 /* Fill start and end extra bytes of buffer with existing memory data. */
5722 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5723 about coercing an 8 byte integer to a 4 byte pointer. */
5724 buffer
[0] = ptrace (PTRACE_PEEKTEXT
, pid
,
5725 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5726 (PTRACE_TYPE_ARG4
) 0);
5734 = ptrace (PTRACE_PEEKTEXT
, pid
,
5735 /* Coerce to a uintptr_t first to avoid potential gcc warning
5736 about coercing an 8 byte integer to a 4 byte pointer. */
5737 (PTRACE_TYPE_ARG3
) (uintptr_t) (addr
+ (count
- 1)
5738 * sizeof (PTRACE_XFER_TYPE
)),
5739 (PTRACE_TYPE_ARG4
) 0);
5744 /* Copy data to be written over corresponding part of buffer. */
5746 memcpy ((char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
5749 /* Write the entire buffer. */
5751 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
5754 ptrace (PTRACE_POKETEXT
, pid
,
5755 /* Coerce to a uintptr_t first to avoid potential gcc warning
5756 about coercing an 8 byte integer to a 4 byte pointer. */
5757 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5758 (PTRACE_TYPE_ARG4
) buffer
[i
]);
5767 linux_process_target::look_up_symbols ()
5769 #ifdef USE_THREAD_DB
5770 struct process_info
*proc
= current_process ();
5772 if (proc
->priv
->thread_db
!= NULL
)
5780 linux_process_target::request_interrupt ()
5782 /* Send a SIGINT to the process group. This acts just like the user
5783 typed a ^C on the controlling terminal. */
5784 ::kill (-signal_pid
, SIGINT
);
5788 linux_process_target::supports_read_auxv ()
5793 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5794 to debugger memory starting at MYADDR. */
5797 linux_process_target::read_auxv (CORE_ADDR offset
, unsigned char *myaddr
,
5800 char filename
[PATH_MAX
];
5802 int pid
= lwpid_of (current_thread
);
5804 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
5806 fd
= open (filename
, O_RDONLY
);
5810 if (offset
!= (CORE_ADDR
) 0
5811 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
5814 n
= read (fd
, myaddr
, len
);
5822 linux_process_target::insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
5823 int size
, raw_breakpoint
*bp
)
5825 if (type
== raw_bkpt_type_sw
)
5826 return insert_memory_breakpoint (bp
);
5828 return low_insert_point (type
, addr
, size
, bp
);
5832 linux_process_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
5833 int size
, raw_breakpoint
*bp
)
5835 /* Unsupported (see target.h). */
5840 linux_process_target::remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
5841 int size
, raw_breakpoint
*bp
)
5843 if (type
== raw_bkpt_type_sw
)
5844 return remove_memory_breakpoint (bp
);
5846 return low_remove_point (type
, addr
, size
, bp
);
5850 linux_process_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
5851 int size
, raw_breakpoint
*bp
)
5853 /* Unsupported (see target.h). */
5857 /* Implement the stopped_by_sw_breakpoint target_ops
5861 linux_process_target::stopped_by_sw_breakpoint ()
5863 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5865 return (lwp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
);
5868 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5872 linux_process_target::supports_stopped_by_sw_breakpoint ()
5874 return USE_SIGTRAP_SIGINFO
;
5877 /* Implement the stopped_by_hw_breakpoint target_ops
5881 linux_process_target::stopped_by_hw_breakpoint ()
5883 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5885 return (lwp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
);
5888 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5892 linux_process_target::supports_stopped_by_hw_breakpoint ()
5894 return USE_SIGTRAP_SIGINFO
;
5897 /* Implement the supports_hardware_single_step target_ops method. */
5900 linux_process_target::supports_hardware_single_step ()
5902 return can_hardware_single_step ();
5906 linux_process_target::stopped_by_watchpoint ()
5908 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5910 return lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
5914 linux_process_target::stopped_data_address ()
5916 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5918 return lwp
->stopped_data_address
;
5921 /* This is only used for targets that define PT_TEXT_ADDR,
5922 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5923 the target has different ways of acquiring this information, like
5927 linux_process_target::supports_read_offsets ()
5929 #ifdef SUPPORTS_READ_OFFSETS
5936 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5937 to tell gdb about. */
5940 linux_process_target::read_offsets (CORE_ADDR
*text_p
, CORE_ADDR
*data_p
)
5942 #ifdef SUPPORTS_READ_OFFSETS
5943 unsigned long text
, text_end
, data
;
5944 int pid
= lwpid_of (current_thread
);
5948 text
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_ADDR
,
5949 (PTRACE_TYPE_ARG4
) 0);
5950 text_end
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_END_ADDR
,
5951 (PTRACE_TYPE_ARG4
) 0);
5952 data
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_DATA_ADDR
,
5953 (PTRACE_TYPE_ARG4
) 0);
5957 /* Both text and data offsets produced at compile-time (and so
5958 used by gdb) are relative to the beginning of the program,
5959 with the data segment immediately following the text segment.
5960 However, the actual runtime layout in memory may put the data
5961 somewhere else, so when we send gdb a data base-address, we
5962 use the real data base address and subtract the compile-time
5963 data base-address from it (which is just the length of the
5964 text segment). BSS immediately follows data in both
5967 *data_p
= data
- (text_end
- text
);
5973 gdb_assert_not_reached ("target op read_offsets not supported");
5978 linux_process_target::supports_get_tls_address ()
5980 #ifdef USE_THREAD_DB
5988 linux_process_target::get_tls_address (thread_info
*thread
,
5990 CORE_ADDR load_module
,
5993 #ifdef USE_THREAD_DB
5994 return thread_db_get_tls_address (thread
, offset
, load_module
, address
);
6001 linux_process_target::supports_qxfer_osdata ()
6007 linux_process_target::qxfer_osdata (const char *annex
,
6008 unsigned char *readbuf
,
6009 unsigned const char *writebuf
,
6010 CORE_ADDR offset
, int len
)
6012 return linux_common_xfer_osdata (annex
, readbuf
, offset
, len
);
6016 linux_process_target::siginfo_fixup (siginfo_t
*siginfo
,
6017 gdb_byte
*inf_siginfo
, int direction
)
6019 bool done
= low_siginfo_fixup (siginfo
, inf_siginfo
, direction
);
6021 /* If there was no callback, or the callback didn't do anything,
6022 then just do a straight memcpy. */
6026 memcpy (siginfo
, inf_siginfo
, sizeof (siginfo_t
));
6028 memcpy (inf_siginfo
, siginfo
, sizeof (siginfo_t
));
6033 linux_process_target::low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
6040 linux_process_target::supports_qxfer_siginfo ()
6046 linux_process_target::qxfer_siginfo (const char *annex
,
6047 unsigned char *readbuf
,
6048 unsigned const char *writebuf
,
6049 CORE_ADDR offset
, int len
)
6053 gdb_byte inf_siginfo
[sizeof (siginfo_t
)];
6055 if (current_thread
== NULL
)
6058 pid
= lwpid_of (current_thread
);
6061 debug_printf ("%s siginfo for lwp %d.\n",
6062 readbuf
!= NULL
? "Reading" : "Writing",
6065 if (offset
>= sizeof (siginfo
))
6068 if (ptrace (PTRACE_GETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
6071 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6072 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6073 inferior with a 64-bit GDBSERVER should look the same as debugging it
6074 with a 32-bit GDBSERVER, we need to convert it. */
6075 siginfo_fixup (&siginfo
, inf_siginfo
, 0);
6077 if (offset
+ len
> sizeof (siginfo
))
6078 len
= sizeof (siginfo
) - offset
;
6080 if (readbuf
!= NULL
)
6081 memcpy (readbuf
, inf_siginfo
+ offset
, len
);
6084 memcpy (inf_siginfo
+ offset
, writebuf
, len
);
6086 /* Convert back to ptrace layout before flushing it out. */
6087 siginfo_fixup (&siginfo
, inf_siginfo
, 1);
6089 if (ptrace (PTRACE_SETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
6096 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6097 so we notice when children change state; as the handler for the
6098 sigsuspend in my_waitpid. */
6101 sigchld_handler (int signo
)
6103 int old_errno
= errno
;
6109 /* Use the async signal safe debug function. */
6110 if (debug_write ("sigchld_handler\n",
6111 sizeof ("sigchld_handler\n") - 1) < 0)
6112 break; /* just ignore */
6116 if (target_is_async_p ())
6117 async_file_mark (); /* trigger a linux_wait */
6123 linux_process_target::supports_non_stop ()
6129 linux_process_target::async (bool enable
)
6131 bool previous
= target_is_async_p ();
6134 debug_printf ("linux_async (%d), previous=%d\n",
6137 if (previous
!= enable
)
6140 sigemptyset (&mask
);
6141 sigaddset (&mask
, SIGCHLD
);
6143 gdb_sigmask (SIG_BLOCK
, &mask
, NULL
);
6147 if (pipe (linux_event_pipe
) == -1)
6149 linux_event_pipe
[0] = -1;
6150 linux_event_pipe
[1] = -1;
6151 gdb_sigmask (SIG_UNBLOCK
, &mask
, NULL
);
6153 warning ("creating event pipe failed.");
6157 fcntl (linux_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
6158 fcntl (linux_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
6160 /* Register the event loop handler. */
6161 add_file_handler (linux_event_pipe
[0],
6162 handle_target_event
, NULL
);
6164 /* Always trigger a linux_wait. */
6169 delete_file_handler (linux_event_pipe
[0]);
6171 close (linux_event_pipe
[0]);
6172 close (linux_event_pipe
[1]);
6173 linux_event_pipe
[0] = -1;
6174 linux_event_pipe
[1] = -1;
6177 gdb_sigmask (SIG_UNBLOCK
, &mask
, NULL
);
6184 linux_process_target::start_non_stop (bool nonstop
)
6186 /* Register or unregister from event-loop accordingly. */
6187 target_async (nonstop
);
6189 if (target_is_async_p () != (nonstop
!= false))
6196 linux_process_target::supports_multi_process ()
6201 /* Check if fork events are supported. */
6204 linux_process_target::supports_fork_events ()
6206 return linux_supports_tracefork ();
6209 /* Check if vfork events are supported. */
6212 linux_process_target::supports_vfork_events ()
6214 return linux_supports_tracefork ();
6217 /* Check if exec events are supported. */
6220 linux_process_target::supports_exec_events ()
6222 return linux_supports_traceexec ();
6225 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6226 ptrace flags for all inferiors. This is in case the new GDB connection
6227 doesn't support the same set of events that the previous one did. */
6230 linux_process_target::handle_new_gdb_connection ()
6232 /* Request that all the lwps reset their ptrace options. */
6233 for_each_thread ([] (thread_info
*thread
)
6235 struct lwp_info
*lwp
= get_thread_lwp (thread
);
6239 /* Stop the lwp so we can modify its ptrace options. */
6240 lwp
->must_set_ptrace_flags
= 1;
6241 linux_stop_lwp (lwp
);
6245 /* Already stopped; go ahead and set the ptrace options. */
6246 struct process_info
*proc
= find_process_pid (pid_of (thread
));
6247 int options
= linux_low_ptrace_options (proc
->attached
);
6249 linux_enable_event_reporting (lwpid_of (thread
), options
);
6250 lwp
->must_set_ptrace_flags
= 0;
6256 linux_process_target::handle_monitor_command (char *mon
)
6258 #ifdef USE_THREAD_DB
6259 return thread_db_handle_monitor_command (mon
);
6266 linux_process_target::core_of_thread (ptid_t ptid
)
6268 return linux_common_core_of_thread (ptid
);
6272 linux_process_target::supports_disable_randomization ()
6274 #ifdef HAVE_PERSONALITY
6282 linux_process_target::supports_agent ()
6288 linux_process_target::supports_range_stepping ()
6290 if (supports_software_single_step ())
6292 if (*the_low_target
.supports_range_stepping
== NULL
)
6295 return (*the_low_target
.supports_range_stepping
) ();
6299 linux_process_target::supports_pid_to_exec_file ()
6305 linux_process_target::pid_to_exec_file (int pid
)
6307 return linux_proc_pid_to_exec_file (pid
);
6311 linux_process_target::supports_multifs ()
6317 linux_process_target::multifs_open (int pid
, const char *filename
,
6318 int flags
, mode_t mode
)
6320 return linux_mntns_open_cloexec (pid
, filename
, flags
, mode
);
6324 linux_process_target::multifs_unlink (int pid
, const char *filename
)
6326 return linux_mntns_unlink (pid
, filename
);
6330 linux_process_target::multifs_readlink (int pid
, const char *filename
,
6331 char *buf
, size_t bufsiz
)
6333 return linux_mntns_readlink (pid
, filename
, buf
, bufsiz
);
6336 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6337 struct target_loadseg
6339 /* Core address to which the segment is mapped. */
6341 /* VMA recorded in the program header. */
6343 /* Size of this segment in memory. */
6347 # if defined PT_GETDSBT
6348 struct target_loadmap
6350 /* Protocol version number, must be zero. */
6352 /* Pointer to the DSBT table, its size, and the DSBT index. */
6353 unsigned *dsbt_table
;
6354 unsigned dsbt_size
, dsbt_index
;
6355 /* Number of segments in this map. */
6357 /* The actual memory map. */
6358 struct target_loadseg segs
[/*nsegs*/];
6360 # define LINUX_LOADMAP PT_GETDSBT
6361 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6362 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6364 struct target_loadmap
6366 /* Protocol version number, must be zero. */
6368 /* Number of segments in this map. */
6370 /* The actual memory map. */
6371 struct target_loadseg segs
[/*nsegs*/];
6373 # define LINUX_LOADMAP PTRACE_GETFDPIC
6374 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6375 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6379 linux_process_target::supports_read_loadmap ()
6385 linux_process_target::read_loadmap (const char *annex
, CORE_ADDR offset
,
6386 unsigned char *myaddr
, unsigned int len
)
6388 int pid
= lwpid_of (current_thread
);
6390 struct target_loadmap
*data
= NULL
;
6391 unsigned int actual_length
, copy_length
;
6393 if (strcmp (annex
, "exec") == 0)
6394 addr
= (int) LINUX_LOADMAP_EXEC
;
6395 else if (strcmp (annex
, "interp") == 0)
6396 addr
= (int) LINUX_LOADMAP_INTERP
;
6400 if (ptrace (LINUX_LOADMAP
, pid
, addr
, &data
) != 0)
6406 actual_length
= sizeof (struct target_loadmap
)
6407 + sizeof (struct target_loadseg
) * data
->nsegs
;
6409 if (offset
< 0 || offset
> actual_length
)
6412 copy_length
= actual_length
- offset
< len
? actual_length
- offset
: len
;
6413 memcpy (myaddr
, (char *) data
+ offset
, copy_length
);
6416 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6419 linux_process_target::supports_catch_syscall ()
6421 return (the_low_target
.get_syscall_trapinfo
!= NULL
6422 && linux_supports_tracesysgood ());
6426 linux_process_target::get_ipa_tdesc_idx ()
6428 if (the_low_target
.get_ipa_tdesc_idx
== NULL
)
6431 return (*the_low_target
.get_ipa_tdesc_idx
) ();
6435 linux_process_target::read_pc (regcache
*regcache
)
6437 if (!low_supports_breakpoints ())
6440 return low_get_pc (regcache
);
6444 linux_process_target::write_pc (regcache
*regcache
, CORE_ADDR pc
)
6446 gdb_assert (low_supports_breakpoints ());
6448 low_set_pc (regcache
, pc
);
6452 linux_process_target::supports_thread_stopped ()
6458 linux_process_target::thread_stopped (thread_info
*thread
)
6460 return get_thread_lwp (thread
)->stopped
;
6463 /* This exposes stop-all-threads functionality to other modules. */
6466 linux_process_target::pause_all (bool freeze
)
6468 stop_all_lwps (freeze
, NULL
);
6471 /* This exposes unstop-all-threads functionality to other gdbserver
6475 linux_process_target::unpause_all (bool unfreeze
)
6477 unstop_all_lwps (unfreeze
, NULL
);
6481 linux_process_target::prepare_to_access_memory ()
6483 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6486 target_pause_all (true);
6491 linux_process_target::done_accessing_memory ()
6493 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6496 target_unpause_all (true);
6499 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6502 get_phdr_phnum_from_proc_auxv (const int pid
, const int is_elf64
,
6503 CORE_ADDR
*phdr_memaddr
, int *num_phdr
)
6505 char filename
[PATH_MAX
];
6507 const int auxv_size
= is_elf64
6508 ? sizeof (Elf64_auxv_t
) : sizeof (Elf32_auxv_t
);
6509 char buf
[sizeof (Elf64_auxv_t
)]; /* The larger of the two. */
6511 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
6513 fd
= open (filename
, O_RDONLY
);
6519 while (read (fd
, buf
, auxv_size
) == auxv_size
6520 && (*phdr_memaddr
== 0 || *num_phdr
== 0))
6524 Elf64_auxv_t
*const aux
= (Elf64_auxv_t
*) buf
;
6526 switch (aux
->a_type
)
6529 *phdr_memaddr
= aux
->a_un
.a_val
;
6532 *num_phdr
= aux
->a_un
.a_val
;
6538 Elf32_auxv_t
*const aux
= (Elf32_auxv_t
*) buf
;
6540 switch (aux
->a_type
)
6543 *phdr_memaddr
= aux
->a_un
.a_val
;
6546 *num_phdr
= aux
->a_un
.a_val
;
6554 if (*phdr_memaddr
== 0 || *num_phdr
== 0)
6556 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6557 "phdr_memaddr = %ld, phdr_num = %d",
6558 (long) *phdr_memaddr
, *num_phdr
);
6565 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6568 get_dynamic (const int pid
, const int is_elf64
)
6570 CORE_ADDR phdr_memaddr
, relocation
;
6572 unsigned char *phdr_buf
;
6573 const int phdr_size
= is_elf64
? sizeof (Elf64_Phdr
) : sizeof (Elf32_Phdr
);
6575 if (get_phdr_phnum_from_proc_auxv (pid
, is_elf64
, &phdr_memaddr
, &num_phdr
))
6578 gdb_assert (num_phdr
< 100); /* Basic sanity check. */
6579 phdr_buf
= (unsigned char *) alloca (num_phdr
* phdr_size
);
6581 if (linux_read_memory (phdr_memaddr
, phdr_buf
, num_phdr
* phdr_size
))
6584 /* Compute relocation: it is expected to be 0 for "regular" executables,
6585 non-zero for PIE ones. */
6587 for (i
= 0; relocation
== -1 && i
< num_phdr
; i
++)
6590 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6592 if (p
->p_type
== PT_PHDR
)
6593 relocation
= phdr_memaddr
- p
->p_vaddr
;
6597 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6599 if (p
->p_type
== PT_PHDR
)
6600 relocation
= phdr_memaddr
- p
->p_vaddr
;
6603 if (relocation
== -1)
6605 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6606 any real world executables, including PIE executables, have always
6607 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6608 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6609 or present DT_DEBUG anyway (fpc binaries are statically linked).
6611 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6613 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6618 for (i
= 0; i
< num_phdr
; i
++)
6622 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6624 if (p
->p_type
== PT_DYNAMIC
)
6625 return p
->p_vaddr
+ relocation
;
6629 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6631 if (p
->p_type
== PT_DYNAMIC
)
6632 return p
->p_vaddr
+ relocation
;
6639 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6640 can be 0 if the inferior does not yet have the library list initialized.
6641 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6642 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6645 get_r_debug (const int pid
, const int is_elf64
)
6647 CORE_ADDR dynamic_memaddr
;
6648 const int dyn_size
= is_elf64
? sizeof (Elf64_Dyn
) : sizeof (Elf32_Dyn
);
6649 unsigned char buf
[sizeof (Elf64_Dyn
)]; /* The larger of the two. */
6652 dynamic_memaddr
= get_dynamic (pid
, is_elf64
);
6653 if (dynamic_memaddr
== 0)
6656 while (linux_read_memory (dynamic_memaddr
, buf
, dyn_size
) == 0)
6660 Elf64_Dyn
*const dyn
= (Elf64_Dyn
*) buf
;
6661 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6665 unsigned char buf
[sizeof (Elf64_Xword
)];
6669 #ifdef DT_MIPS_RLD_MAP
6670 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
6672 if (linux_read_memory (dyn
->d_un
.d_val
,
6673 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6678 #endif /* DT_MIPS_RLD_MAP */
6679 #ifdef DT_MIPS_RLD_MAP_REL
6680 if (dyn
->d_tag
== DT_MIPS_RLD_MAP_REL
)
6682 if (linux_read_memory (dyn
->d_un
.d_val
+ dynamic_memaddr
,
6683 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6688 #endif /* DT_MIPS_RLD_MAP_REL */
6690 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
6691 map
= dyn
->d_un
.d_val
;
6693 if (dyn
->d_tag
== DT_NULL
)
6698 Elf32_Dyn
*const dyn
= (Elf32_Dyn
*) buf
;
6699 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6703 unsigned char buf
[sizeof (Elf32_Word
)];
6707 #ifdef DT_MIPS_RLD_MAP
6708 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
6710 if (linux_read_memory (dyn
->d_un
.d_val
,
6711 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6716 #endif /* DT_MIPS_RLD_MAP */
6717 #ifdef DT_MIPS_RLD_MAP_REL
6718 if (dyn
->d_tag
== DT_MIPS_RLD_MAP_REL
)
6720 if (linux_read_memory (dyn
->d_un
.d_val
+ dynamic_memaddr
,
6721 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6726 #endif /* DT_MIPS_RLD_MAP_REL */
6728 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
6729 map
= dyn
->d_un
.d_val
;
6731 if (dyn
->d_tag
== DT_NULL
)
6735 dynamic_memaddr
+= dyn_size
;
6741 /* Read one pointer from MEMADDR in the inferior. */
6744 read_one_ptr (CORE_ADDR memaddr
, CORE_ADDR
*ptr
, int ptr_size
)
6748 /* Go through a union so this works on either big or little endian
6749 hosts, when the inferior's pointer size is smaller than the size
6750 of CORE_ADDR. It is assumed the inferior's endianness is the
6751 same of the superior's. */
6754 CORE_ADDR core_addr
;
6759 ret
= linux_read_memory (memaddr
, &addr
.uc
, ptr_size
);
6762 if (ptr_size
== sizeof (CORE_ADDR
))
6763 *ptr
= addr
.core_addr
;
6764 else if (ptr_size
== sizeof (unsigned int))
6767 gdb_assert_not_reached ("unhandled pointer size");
6773 linux_process_target::supports_qxfer_libraries_svr4 ()
6778 struct link_map_offsets
6780 /* Offset and size of r_debug.r_version. */
6781 int r_version_offset
;
6783 /* Offset and size of r_debug.r_map. */
6786 /* Offset to l_addr field in struct link_map. */
6789 /* Offset to l_name field in struct link_map. */
6792 /* Offset to l_ld field in struct link_map. */
6795 /* Offset to l_next field in struct link_map. */
6798 /* Offset to l_prev field in struct link_map. */
6802 /* Construct qXfer:libraries-svr4:read reply. */
6805 linux_process_target::qxfer_libraries_svr4 (const char *annex
,
6806 unsigned char *readbuf
,
6807 unsigned const char *writebuf
,
6808 CORE_ADDR offset
, int len
)
6810 struct process_info_private
*const priv
= current_process ()->priv
;
6811 char filename
[PATH_MAX
];
6814 static const struct link_map_offsets lmo_32bit_offsets
=
6816 0, /* r_version offset. */
6817 4, /* r_debug.r_map offset. */
6818 0, /* l_addr offset in link_map. */
6819 4, /* l_name offset in link_map. */
6820 8, /* l_ld offset in link_map. */
6821 12, /* l_next offset in link_map. */
6822 16 /* l_prev offset in link_map. */
6825 static const struct link_map_offsets lmo_64bit_offsets
=
6827 0, /* r_version offset. */
6828 8, /* r_debug.r_map offset. */
6829 0, /* l_addr offset in link_map. */
6830 8, /* l_name offset in link_map. */
6831 16, /* l_ld offset in link_map. */
6832 24, /* l_next offset in link_map. */
6833 32 /* l_prev offset in link_map. */
6835 const struct link_map_offsets
*lmo
;
6836 unsigned int machine
;
6838 CORE_ADDR lm_addr
= 0, lm_prev
= 0;
6839 CORE_ADDR l_name
, l_addr
, l_ld
, l_next
, l_prev
;
6840 int header_done
= 0;
6842 if (writebuf
!= NULL
)
6844 if (readbuf
== NULL
)
6847 pid
= lwpid_of (current_thread
);
6848 xsnprintf (filename
, sizeof filename
, "/proc/%d/exe", pid
);
6849 is_elf64
= elf_64_file_p (filename
, &machine
);
6850 lmo
= is_elf64
? &lmo_64bit_offsets
: &lmo_32bit_offsets
;
6851 ptr_size
= is_elf64
? 8 : 4;
6853 while (annex
[0] != '\0')
6859 sep
= strchr (annex
, '=');
6863 name_len
= sep
- annex
;
6864 if (name_len
== 5 && startswith (annex
, "start"))
6866 else if (name_len
== 4 && startswith (annex
, "prev"))
6870 annex
= strchr (sep
, ';');
6877 annex
= decode_address_to_semicolon (addrp
, sep
+ 1);
6884 if (priv
->r_debug
== 0)
6885 priv
->r_debug
= get_r_debug (pid
, is_elf64
);
6887 /* We failed to find DT_DEBUG. Such situation will not change
6888 for this inferior - do not retry it. Report it to GDB as
6889 E01, see for the reasons at the GDB solib-svr4.c side. */
6890 if (priv
->r_debug
== (CORE_ADDR
) -1)
6893 if (priv
->r_debug
!= 0)
6895 if (linux_read_memory (priv
->r_debug
+ lmo
->r_version_offset
,
6896 (unsigned char *) &r_version
,
6897 sizeof (r_version
)) != 0
6900 warning ("unexpected r_debug version %d", r_version
);
6902 else if (read_one_ptr (priv
->r_debug
+ lmo
->r_map_offset
,
6903 &lm_addr
, ptr_size
) != 0)
6905 warning ("unable to read r_map from 0x%lx",
6906 (long) priv
->r_debug
+ lmo
->r_map_offset
);
6911 std::string document
= "<library-list-svr4 version=\"1.0\"";
6914 && read_one_ptr (lm_addr
+ lmo
->l_name_offset
,
6915 &l_name
, ptr_size
) == 0
6916 && read_one_ptr (lm_addr
+ lmo
->l_addr_offset
,
6917 &l_addr
, ptr_size
) == 0
6918 && read_one_ptr (lm_addr
+ lmo
->l_ld_offset
,
6919 &l_ld
, ptr_size
) == 0
6920 && read_one_ptr (lm_addr
+ lmo
->l_prev_offset
,
6921 &l_prev
, ptr_size
) == 0
6922 && read_one_ptr (lm_addr
+ lmo
->l_next_offset
,
6923 &l_next
, ptr_size
) == 0)
6925 unsigned char libname
[PATH_MAX
];
6927 if (lm_prev
!= l_prev
)
6929 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6930 (long) lm_prev
, (long) l_prev
);
6934 /* Ignore the first entry even if it has valid name as the first entry
6935 corresponds to the main executable. The first entry should not be
6936 skipped if the dynamic loader was loaded late by a static executable
6937 (see solib-svr4.c parameter ignore_first). But in such case the main
6938 executable does not have PT_DYNAMIC present and this function already
6939 exited above due to failed get_r_debug. */
6941 string_appendf (document
, " main-lm=\"0x%lx\"", (unsigned long) lm_addr
);
6944 /* Not checking for error because reading may stop before
6945 we've got PATH_MAX worth of characters. */
6947 linux_read_memory (l_name
, libname
, sizeof (libname
) - 1);
6948 libname
[sizeof (libname
) - 1] = '\0';
6949 if (libname
[0] != '\0')
6953 /* Terminate `<library-list-svr4'. */
6958 string_appendf (document
, "<library name=\"");
6959 xml_escape_text_append (&document
, (char *) libname
);
6960 string_appendf (document
, "\" lm=\"0x%lx\" "
6961 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6962 (unsigned long) lm_addr
, (unsigned long) l_addr
,
6963 (unsigned long) l_ld
);
6973 /* Empty list; terminate `<library-list-svr4'. */
6977 document
+= "</library-list-svr4>";
6979 int document_len
= document
.length ();
6980 if (offset
< document_len
)
6981 document_len
-= offset
;
6984 if (len
> document_len
)
6987 memcpy (readbuf
, document
.data () + offset
, len
);
6992 #ifdef HAVE_LINUX_BTRACE
6994 btrace_target_info
*
6995 linux_process_target::enable_btrace (ptid_t ptid
,
6996 const btrace_config
*conf
)
6998 return linux_enable_btrace (ptid
, conf
);
7001 /* See to_disable_btrace target method. */
7004 linux_process_target::disable_btrace (btrace_target_info
*tinfo
)
7006 enum btrace_error err
;
7008 err
= linux_disable_btrace (tinfo
);
7009 return (err
== BTRACE_ERR_NONE
? 0 : -1);
7012 /* Encode an Intel Processor Trace configuration. */
7015 linux_low_encode_pt_config (struct buffer
*buffer
,
7016 const struct btrace_data_pt_config
*config
)
7018 buffer_grow_str (buffer
, "<pt-config>\n");
7020 switch (config
->cpu
.vendor
)
7023 buffer_xml_printf (buffer
, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7024 "model=\"%u\" stepping=\"%u\"/>\n",
7025 config
->cpu
.family
, config
->cpu
.model
,
7026 config
->cpu
.stepping
);
7033 buffer_grow_str (buffer
, "</pt-config>\n");
7036 /* Encode a raw buffer. */
7039 linux_low_encode_raw (struct buffer
*buffer
, const gdb_byte
*data
,
7045 /* We use hex encoding - see gdbsupport/rsp-low.h. */
7046 buffer_grow_str (buffer
, "<raw>\n");
7052 elem
[0] = tohex ((*data
>> 4) & 0xf);
7053 elem
[1] = tohex (*data
++ & 0xf);
7055 buffer_grow (buffer
, elem
, 2);
7058 buffer_grow_str (buffer
, "</raw>\n");
7061 /* See to_read_btrace target method. */
7064 linux_process_target::read_btrace (btrace_target_info
*tinfo
,
7066 enum btrace_read_type type
)
7068 struct btrace_data btrace
;
7069 enum btrace_error err
;
7071 err
= linux_read_btrace (&btrace
, tinfo
, type
);
7072 if (err
!= BTRACE_ERR_NONE
)
7074 if (err
== BTRACE_ERR_OVERFLOW
)
7075 buffer_grow_str0 (buffer
, "E.Overflow.");
7077 buffer_grow_str0 (buffer
, "E.Generic Error.");
7082 switch (btrace
.format
)
7084 case BTRACE_FORMAT_NONE
:
7085 buffer_grow_str0 (buffer
, "E.No Trace.");
7088 case BTRACE_FORMAT_BTS
:
7089 buffer_grow_str (buffer
, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7090 buffer_grow_str (buffer
, "<btrace version=\"1.0\">\n");
7092 for (const btrace_block
&block
: *btrace
.variant
.bts
.blocks
)
7093 buffer_xml_printf (buffer
, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7094 paddress (block
.begin
), paddress (block
.end
));
7096 buffer_grow_str0 (buffer
, "</btrace>\n");
7099 case BTRACE_FORMAT_PT
:
7100 buffer_grow_str (buffer
, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7101 buffer_grow_str (buffer
, "<btrace version=\"1.0\">\n");
7102 buffer_grow_str (buffer
, "<pt>\n");
7104 linux_low_encode_pt_config (buffer
, &btrace
.variant
.pt
.config
);
7106 linux_low_encode_raw (buffer
, btrace
.variant
.pt
.data
,
7107 btrace
.variant
.pt
.size
);
7109 buffer_grow_str (buffer
, "</pt>\n");
7110 buffer_grow_str0 (buffer
, "</btrace>\n");
7114 buffer_grow_str0 (buffer
, "E.Unsupported Trace Format.");
7121 /* See to_btrace_conf target method. */
7124 linux_process_target::read_btrace_conf (const btrace_target_info
*tinfo
,
7127 const struct btrace_config
*conf
;
7129 buffer_grow_str (buffer
, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7130 buffer_grow_str (buffer
, "<btrace-conf version=\"1.0\">\n");
7132 conf
= linux_btrace_conf (tinfo
);
7135 switch (conf
->format
)
7137 case BTRACE_FORMAT_NONE
:
7140 case BTRACE_FORMAT_BTS
:
7141 buffer_xml_printf (buffer
, "<bts");
7142 buffer_xml_printf (buffer
, " size=\"0x%x\"", conf
->bts
.size
);
7143 buffer_xml_printf (buffer
, " />\n");
7146 case BTRACE_FORMAT_PT
:
7147 buffer_xml_printf (buffer
, "<pt");
7148 buffer_xml_printf (buffer
, " size=\"0x%x\"", conf
->pt
.size
);
7149 buffer_xml_printf (buffer
, "/>\n");
7154 buffer_grow_str0 (buffer
, "</btrace-conf>\n");
7157 #endif /* HAVE_LINUX_BTRACE */
7159 /* See nat/linux-nat.h. */
7162 current_lwp_ptid (void)
7164 return ptid_of (current_thread
);
7168 linux_process_target::thread_name (ptid_t thread
)
7170 return linux_proc_tid_get_name (thread
);
7175 linux_process_target::thread_handle (ptid_t ptid
, gdb_byte
**handle
,
7178 return thread_db_thread_handle (ptid
, handle
, handle_len
);
7182 /* Default implementation of linux_target_ops method "set_pc" for
7183 32-bit pc register which is literally named "pc". */
7186 linux_set_pc_32bit (struct regcache
*regcache
, CORE_ADDR pc
)
7188 uint32_t newpc
= pc
;
7190 supply_register_by_name (regcache
, "pc", &newpc
);
7193 /* Default implementation of linux_target_ops method "get_pc" for
7194 32-bit pc register which is literally named "pc". */
7197 linux_get_pc_32bit (struct regcache
*regcache
)
7201 collect_register_by_name (regcache
, "pc", &pc
);
7203 debug_printf ("stop pc is 0x%" PRIx32
"\n", pc
);
7207 /* Default implementation of linux_target_ops method "set_pc" for
7208 64-bit pc register which is literally named "pc". */
7211 linux_set_pc_64bit (struct regcache
*regcache
, CORE_ADDR pc
)
7213 uint64_t newpc
= pc
;
7215 supply_register_by_name (regcache
, "pc", &newpc
);
7218 /* Default implementation of linux_target_ops method "get_pc" for
7219 64-bit pc register which is literally named "pc". */
7222 linux_get_pc_64bit (struct regcache
*regcache
)
7226 collect_register_by_name (regcache
, "pc", &pc
);
7228 debug_printf ("stop pc is 0x%" PRIx64
"\n", pc
);
7232 /* See linux-low.h. */
7235 linux_get_auxv (int wordsize
, CORE_ADDR match
, CORE_ADDR
*valp
)
7237 gdb_byte
*data
= (gdb_byte
*) alloca (2 * wordsize
);
7240 gdb_assert (wordsize
== 4 || wordsize
== 8);
7242 while (the_target
->read_auxv (offset
, data
, 2 * wordsize
) == 2 * wordsize
)
7246 uint32_t *data_p
= (uint32_t *) data
;
7247 if (data_p
[0] == match
)
7255 uint64_t *data_p
= (uint64_t *) data
;
7256 if (data_p
[0] == match
)
7263 offset
+= 2 * wordsize
;
7269 /* See linux-low.h. */
7272 linux_get_hwcap (int wordsize
)
7274 CORE_ADDR hwcap
= 0;
7275 linux_get_auxv (wordsize
, AT_HWCAP
, &hwcap
);
7279 /* See linux-low.h. */
7282 linux_get_hwcap2 (int wordsize
)
7284 CORE_ADDR hwcap2
= 0;
7285 linux_get_auxv (wordsize
, AT_HWCAP2
, &hwcap2
);
7289 #ifdef HAVE_LINUX_REGSETS
7291 initialize_regsets_info (struct regsets_info
*info
)
7293 for (info
->num_regsets
= 0;
7294 info
->regsets
[info
->num_regsets
].size
>= 0;
7295 info
->num_regsets
++)
7301 initialize_low (void)
7303 struct sigaction sigchld_action
;
7305 memset (&sigchld_action
, 0, sizeof (sigchld_action
));
7306 set_target_ops (the_linux_target
);
7308 linux_ptrace_init_warnings ();
7309 linux_proc_init_warnings ();
7311 sigchld_action
.sa_handler
= sigchld_handler
;
7312 sigemptyset (&sigchld_action
.sa_mask
);
7313 sigchld_action
.sa_flags
= SA_RESTART
;
7314 sigaction (SIGCHLD
, &sigchld_action
, NULL
);
7316 initialize_low_arch ();
7318 linux_check_ptrace_features ();