/* Low level interface to ptrace, for the remote server for GDB.
- Copyright (C) 1995-2015 Free Software Foundation, Inc.
+ Copyright (C) 1995-2016 Free Software Foundation, Inc.
This file is part of GDB.
#include "filestuff.h"
#include "tracepoint.h"
#include "hostio.h"
+#include <inttypes.h>
#ifndef ELFMAG0
/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
then ELFMAG0 will have been defined. If it didn't get included by
#define O_LARGEFILE 0
#endif
-#ifndef W_STOPCODE
-#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
-#endif
-
-/* This is the kernel's hard limit. Not to be confused with
- SIGRTMIN. */
-#ifndef __SIGRTMIN
-#define __SIGRTMIN 32
-#endif
-
/* Some targets did not define these ptrace constants from the start,
so gdbserver defines them locally here. In the future, these may
be removed after they are added to asm/ptrace.h. */
static void
add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
{
- struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
+ struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
new_pid->pid = pid;
new_pid->status = status;
int *wstat, int options);
static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
static struct lwp_info *add_lwp (ptid_t ptid);
+static void linux_mourn (struct process_info *process);
static int linux_stopped_by_watchpoint (void);
static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
+static int lwp_is_marked_dead (struct lwp_info *lwp);
static void proceed_all_lwps (void);
static int finish_step_over (struct lwp_info *lwp);
static int kill_lwp (unsigned long lwpid, int signo);
+static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
+static void complete_ongoing_step_over (void);
+static int linux_low_ptrace_options (int attached);
/* When the event-loop is doing a step-over, this points at the thread
being stepped. */
ptid_t step_over_bkpt;
-/* True if the low target can hardware single-step. Such targets
- don't need a BREAKPOINT_REINSERT_ADDR callback. */
+/* True if the low target can hardware single-step. */
static int
can_hardware_single_step (void)
{
- return (the_low_target.breakpoint_reinsert_addr == NULL);
+ if (the_low_target.supports_hardware_single_step != NULL)
+ return the_low_target.supports_hardware_single_step ();
+ else
+ return 0;
+}
+
+/* True if the low target can software single-step. Such targets
+ implement the GET_NEXT_PCS callback. */
+
+static int
+can_software_single_step (void)
+{
+ return (the_low_target.get_next_pcs != NULL);
}
/* True if the low target supports memory breakpoints. If so, we'll
struct process_info *proc;
proc = add_process (pid, attached);
- proc->priv = xcalloc (1, sizeof (*proc->priv));
+ proc->priv = XCNEW (struct process_info_private);
if (the_low_target.new_process != NULL)
proc->priv->arch_private = the_low_target.new_process ();
static CORE_ADDR get_pc (struct lwp_info *lwp);
-/* Handle a GNU/Linux extended wait response. If we see a clone
- event, we need to add the new LWP to our list (and return 0 so as
- not to report the trap to higher layers). */
+/* Call the target arch_setup function on the current thread. */
+
+static void
+linux_arch_setup (void)
+{
+ the_low_target.arch_setup ();
+}
+
+/* Call the target arch_setup function on THREAD. */
+
+static void
+linux_arch_setup_thread (struct thread_info *thread)
+{
+ struct thread_info *saved_thread;
+
+ saved_thread = current_thread;
+ current_thread = thread;
+
+ linux_arch_setup ();
+
+ current_thread = saved_thread;
+}
+
+/* Handle a GNU/Linux extended wait response. If we see a clone,
+ fork, or vfork event, we need to add the new LWP to our list
+ (and return 0 so as not to report the trap to higher layers).
+ If we see an exec event, we will modify ORIG_EVENT_LWP to point
+ to a new LWP representing the new program. */
static int
-handle_extended_wait (struct lwp_info *event_lwp, int wstat)
+handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
{
+ struct lwp_info *event_lwp = *orig_event_lwp;
int event = linux_ptrace_get_extended_event (wstat);
struct thread_info *event_thr = get_lwp_thread (event_lwp);
struct lwp_info *new_lwp;
+ gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
+
+ /* All extended events we currently use are mid-syscall. Only
+ PTRACE_EVENT_STOP is delivered more like a signal-stop, but
+ you have to be using PTRACE_SEIZE to get that. */
+ event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
+
if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
|| (event == PTRACE_EVENT_CLONE))
{
child_thr->last_resume_kind = resume_stop;
child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
+ /* If we're suspending all threads, leave this one suspended
+ too. */
+ if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
+ {
+ if (debug_threads)
+ debug_printf ("HEW: leaving child suspended\n");
+ child_lwp->suspended = 1;
+ }
+
parent_proc = get_thread_process (event_thr);
child_proc->attached = parent_proc->attached;
clone_all_breakpoints (&child_proc->breakpoints,
&child_proc->raw_breakpoints,
parent_proc->breakpoints);
- tdesc = xmalloc (sizeof (struct target_desc));
+ tdesc = XNEW (struct target_desc);
copy_target_description (tdesc, parent_proc->tdesc);
child_proc->tdesc = tdesc;
new_lwp->status_pending_p = 1;
new_lwp->status_pending = status;
}
+ else if (report_thread_events)
+ {
+ new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
+ new_lwp->status_pending_p = 1;
+ new_lwp->status_pending = status;
+ }
/* Don't report the event. */
return 1;
/* Report the event. */
return 0;
}
+ else if (event == PTRACE_EVENT_EXEC && report_exec_events)
+ {
+ struct process_info *proc;
+ VEC (int) *syscalls_to_catch;
+ ptid_t event_ptid;
+ pid_t event_pid;
+
+ if (debug_threads)
+ {
+ debug_printf ("HEW: Got exec event from LWP %ld\n",
+ lwpid_of (event_thr));
+ }
+
+ /* Get the event ptid. */
+ event_ptid = ptid_of (event_thr);
+ event_pid = ptid_get_pid (event_ptid);
+
+ /* Save the syscall list from the execing process. */
+ proc = get_thread_process (event_thr);
+ syscalls_to_catch = proc->syscalls_to_catch;
+ proc->syscalls_to_catch = NULL;
+
+ /* Delete the execing process and all its threads. */
+ linux_mourn (proc);
+ current_thread = NULL;
+
+ /* Create a new process/lwp/thread. */
+ proc = linux_add_process (event_pid, 0);
+ event_lwp = add_lwp (event_ptid);
+ event_thr = get_lwp_thread (event_lwp);
+ gdb_assert (current_thread == event_thr);
+ linux_arch_setup_thread (event_thr);
+
+ /* Set the event status. */
+ event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
+ event_lwp->waitstatus.value.execd_pathname
+ = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
+
+ /* Mark the exec status as pending. */
+ event_lwp->stopped = 1;
+ event_lwp->status_pending_p = 1;
+ event_lwp->status_pending = wstat;
+ event_thr->last_resume_kind = resume_continue;
+ event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
+
+ /* Update syscall state in the new lwp, effectively mid-syscall too. */
+ event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
+
+ /* Restore the list to catch. Don't rely on the client, which is free
+ to avoid sending a new list when the architecture doesn't change.
+ Also, for ANY_SYSCALL, the architecture doesn't really matter. */
+ proc->syscalls_to_catch = syscalls_to_catch;
+
+ /* Report the event. */
+ *orig_event_lwp = event_lwp;
+ return 0;
+ }
internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
}
return pc;
}
-/* This function should only be called if LWP got a SIGTRAP.
- The SIGTRAP could mean several things.
+/* This function should only be called if LWP got a SYSCALL_SIGTRAP.
+ Fill *SYSNO with the syscall nr trapped. Fill *SYSRET with the
+ return code. */
- On i386, where decr_pc_after_break is non-zero:
+static void
+get_syscall_trapinfo (struct lwp_info *lwp, int *sysno, int *sysret)
+{
+ struct thread_info *saved_thread;
+ struct regcache *regcache;
- If we were single-stepping this process using PTRACE_SINGLESTEP, we
- will get only the one SIGTRAP. The value of $eip will be the next
- instruction. If the instruction we stepped over was a breakpoint,
- we need to decrement the PC.
+ if (the_low_target.get_syscall_trapinfo == NULL)
+ {
+ /* If we cannot get the syscall trapinfo, report an unknown
+ system call number and -ENOSYS return value. */
+ *sysno = UNKNOWN_SYSCALL;
+ *sysret = -ENOSYS;
+ return;
+ }
+
+ saved_thread = current_thread;
+ current_thread = get_lwp_thread (lwp);
+
+ regcache = get_thread_regcache (current_thread, 1);
+ (*the_low_target.get_syscall_trapinfo) (regcache, sysno, sysret);
+
+ if (debug_threads)
+ {
+ debug_printf ("get_syscall_trapinfo sysno %d sysret %d\n",
+ *sysno, *sysret);
+ }
- If we continue the process using PTRACE_CONT, we will get a
- SIGTRAP when we hit a breakpoint. The value of $eip will be
- the instruction after the breakpoint (i.e. needs to be
- decremented). If we report the SIGTRAP to GDB, we must also
- report the undecremented PC. If the breakpoint is removed, we
- must resume at the decremented PC.
+ current_thread = saved_thread;
+}
- On a non-decr_pc_after_break machine with hardware or kernel
- single-step:
+static int check_stopped_by_watchpoint (struct lwp_info *child);
- If we either single-step a breakpoint instruction, or continue and
- hit a breakpoint instruction, our PC will point at the breakpoint
- instruction. */
+/* Called when the LWP stopped for a signal/trap. If it stopped for a
+ trap check what caused it (breakpoint, watchpoint, trace, etc.),
+ and save the result in the LWP's stop_reason field. If it stopped
+ for a breakpoint, decrement the PC if necessary on the lwp's
+ architecture. Returns true if we now have the LWP's stop PC. */
static int
-check_stopped_by_breakpoint (struct lwp_info *lwp)
+save_stop_reason (struct lwp_info *lwp)
{
CORE_ADDR pc;
CORE_ADDR sw_breakpoint_pc;
{
if (siginfo.si_signo == SIGTRAP)
{
- if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
+ if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
+ && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
{
- if (debug_threads)
- {
- struct thread_info *thr = get_lwp_thread (lwp);
-
- debug_printf ("CSBB: %s stopped by software breakpoint\n",
- target_pid_to_str (ptid_of (thr)));
- }
-
- /* Back up the PC if necessary. */
- if (pc != sw_breakpoint_pc)
- {
- struct regcache *regcache
- = get_thread_regcache (current_thread, 1);
- (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
- }
-
- lwp->stop_pc = sw_breakpoint_pc;
+ /* The si_code is ambiguous on this arch -- check debug
+ registers. */
+ if (!check_stopped_by_watchpoint (lwp))
+ lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
+ }
+ else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
+ {
+ /* If we determine the LWP stopped for a SW breakpoint,
+ trust it. Particularly don't check watchpoint
+ registers, because at least on s390, we'd find
+ stopped-by-watchpoint as long as there's a watchpoint
+ set. */
lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
- current_thread = saved_thread;
- return 1;
}
- else if (siginfo.si_code == TRAP_HWBKPT)
+ else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
{
- if (debug_threads)
- {
- struct thread_info *thr = get_lwp_thread (lwp);
-
- debug_printf ("CSBB: %s stopped by hardware "
- "breakpoint/watchpoint\n",
- target_pid_to_str (ptid_of (thr)));
- }
-
- lwp->stop_pc = pc;
- lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
- current_thread = saved_thread;
- return 1;
+ /* This can indicate either a hardware breakpoint or
+ hardware watchpoint. Check debug registers. */
+ if (!check_stopped_by_watchpoint (lwp))
+ lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
}
else if (siginfo.si_code == TRAP_TRACE)
{
- if (debug_threads)
- {
- struct thread_info *thr = get_lwp_thread (lwp);
-
- debug_printf ("CSBB: %s stopped by trace\n",
- target_pid_to_str (ptid_of (thr)));
- }
+ /* We may have single stepped an instruction that
+ triggered a watchpoint. In that case, on some
+ architectures (such as x86), instead of TRAP_HWBKPT,
+ si_code indicates TRAP_TRACE, and we need to check
+ the debug registers separately. */
+ if (!check_stopped_by_watchpoint (lwp))
+ lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
}
}
}
case we need to report the breakpoint PC. */
if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
&& (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
+ lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
+
+ if (hardware_breakpoint_inserted_here (pc))
+ lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
+
+ if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
+ check_stopped_by_watchpoint (lwp);
+#endif
+
+ if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
{
if (debug_threads)
{
/* Back up the PC if necessary. */
if (pc != sw_breakpoint_pc)
- {
+ {
struct regcache *regcache
= get_thread_regcache (current_thread, 1);
(*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
}
- lwp->stop_pc = sw_breakpoint_pc;
- lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
- current_thread = saved_thread;
- return 1;
+ /* Update this so we record the correct stop PC below. */
+ pc = sw_breakpoint_pc;
}
-
- if (hardware_breakpoint_inserted_here (pc))
+ else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
{
if (debug_threads)
{
debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
target_pid_to_str (ptid_of (thr)));
}
+ }
+ else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
+ {
+ if (debug_threads)
+ {
+ struct thread_info *thr = get_lwp_thread (lwp);
- lwp->stop_pc = pc;
- lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
- current_thread = saved_thread;
- return 1;
+ debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
+ target_pid_to_str (ptid_of (thr)));
+ }
+ }
+ else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
+ {
+ if (debug_threads)
+ {
+ struct thread_info *thr = get_lwp_thread (lwp);
+
+ debug_printf ("CSBB: %s stopped by trace\n",
+ target_pid_to_str (ptid_of (thr)));
+ }
}
-#endif
+ lwp->stop_pc = pc;
current_thread = saved_thread;
- return 0;
+ return 1;
}
static struct lwp_info *
{
struct lwp_info *lwp;
- lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
- memset (lwp, 0, sizeof (*lwp));
+ lwp = XCNEW (struct lwp_info);
+
+ lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
if (the_low_target.new_thread != NULL)
the_low_target.new_thread (lwp);
close_most_fds ();
ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
-#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
- signal (__SIGRTMIN + 1, SIG_DFL);
-#endif
-
setpgid (0, 0);
/* If gdbserver is connected to gdb via stdio, redirect the inferior's
return pid;
}
-/* Implement the arch_setup target_ops method. */
+/* Implement the post_create_inferior target_ops method. */
static void
-linux_arch_setup (void)
+linux_post_create_inferior (void)
{
- the_low_target.arch_setup ();
+ struct lwp_info *lwp = get_thread_lwp (current_thread);
+
+ linux_arch_setup ();
+
+ if (lwp->must_set_ptrace_flags)
+ {
+ struct process_info *proc = current_process ();
+ int options = linux_low_ptrace_options (proc->attached);
+
+ linux_enable_event_reporting (lwpid_of (current_thread), options);
+ lwp->must_set_ptrace_flags = 0;
+ }
}
/* Attach to an inferior process. Returns 0 on success, ERRNO on
return 0;
}
+static void async_file_mark (void);
+
/* Attach to PID. If PID is the tgid, attach to it and all
of its threads. */
static int
linux_attach (unsigned long pid)
{
+ struct process_info *proc;
+ struct thread_info *initial_thread;
ptid_t ptid = ptid_build (pid, pid, 0);
int err;
error ("Cannot attach to process %ld: %s",
pid, linux_ptrace_attach_fail_reason_string (ptid, err));
- linux_add_process (pid, 1);
+ proc = linux_add_process (pid, 1);
- if (!non_stop)
- {
- struct thread_info *thread;
-
- /* Don't ignore the initial SIGSTOP if we just attached to this
- process. It will be collected by wait shortly. */
- thread = find_thread_ptid (ptid_build (pid, pid, 0));
- thread->last_resume_kind = resume_stop;
- }
+ /* Don't ignore the initial SIGSTOP if we just attached to this
+ process. It will be collected by wait shortly. */
+ initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
+ initial_thread->last_resume_kind = resume_stop;
/* We must attach to every LWP. If /proc is mounted, use that to
find them now. On the one hand, the inferior may be using raw
that once thread_db is loaded, we'll still use it to list threads
and associate pthread info with each LWP. */
linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
+
+ /* GDB will shortly read the xml target description for this
+ process, to figure out the process' architecture. But the target
+ description is only filled in when the first process/thread in
+ the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
+ that now, otherwise, if GDB is fast enough, it could read the
+ target description _before_ that initial stop. */
+ if (non_stop)
+ {
+ struct lwp_info *lwp;
+ int wstat, lwpid;
+ ptid_t pid_ptid = pid_to_ptid (pid);
+
+ lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
+ &wstat, __WALL);
+ gdb_assert (lwpid > 0);
+
+ lwp = find_lwp_pid (pid_to_ptid (lwpid));
+
+ if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
+ {
+ lwp->status_pending_p = 1;
+ lwp->status_pending = wstat;
+ }
+
+ initial_thread->last_resume_kind = resume_continue;
+
+ async_file_mark ();
+
+ gdb_assert (proc->tdesc != NULL);
+ }
+
return 0;
}
static int
second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
{
- struct counter *counter = args;
+ struct counter *counter = (struct counter *) args;
if (ptid_get_pid (entry->id) == counter->pid)
{
ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
alternative is to kill with SIGKILL. We only need one SIGKILL
per process, not one for each thread. But since we still support
- linuxthreads, and we also support debugging programs using raw
- clone without CLONE_THREAD, we send one for each thread. For
- years, we used PTRACE_KILL only, so we're being a bit paranoid
- about some old kernels where PTRACE_KILL might work better
- (dubious if there are any such, but that's why it's paranoia), so
- we try SIGKILL first, PTRACE_KILL second, and so we're fine
- everywhere. */
+ support debugging programs using raw clone without CLONE_THREAD,
+ we send one for each thread. For years, we used PTRACE_KILL
+ only, so we're being a bit paranoid about some old kernels where
+ PTRACE_KILL might work better (dubious if there are any such, but
+ that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
+ second, and so we're fine everywhere. */
errno = 0;
kill_lwp (pid, SIGKILL);
if (process == NULL)
return -1;
+ /* As there's a step over already in progress, let it finish first,
+ otherwise nesting a stabilize_threads operation on top gets real
+ messy. */
+ complete_ongoing_step_over ();
+
/* Stop all threads before detaching. First, ptrace requires that
the thread is stopped to sucessfully detach. Second, thread_db
may need to uninstall thread event breakpoints from memory, which
{
struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
- struct process_info *process = proc;
+ struct process_info *process = (struct process_info *) proc;
if (pid_of (thread) == pid_of (process))
delete_lwp (lwp);
exited but we still haven't been able to report it to GDB, we'll
hold on to the last lwp of the dead process. */
if (lwp != NULL)
- return !lwp->dead;
+ return !lwp_is_marked_dead (lwp);
else
return 0;
}
if (!lp->status_pending_p)
return 0;
- /* If we got a `vCont;t', but we haven't reported a stop yet, do
- report any status pending the LWP may have. */
- if (thread->last_resume_kind == resume_stop
- && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
- return 0;
-
if (thread->last_resume_kind != resume_stop
&& (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
|| lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
return 1;
}
+/* Returns true if LWP is resumed from the client's perspective. */
+
+static int
+lwp_resumed (struct lwp_info *lwp)
+{
+ struct thread_info *thread = get_lwp_thread (lwp);
+
+ if (thread->last_resume_kind != resume_stop)
+ return 1;
+
+ /* Did gdb send us a `vCont;t', but we haven't reported the
+ corresponding stop to gdb yet? If so, the thread is still
+ resumed/running from gdb's perspective. */
+ if (thread->last_resume_kind == resume_stop
+ && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
+ return 1;
+
+ return 0;
+}
+
/* Return 1 if this lwp has an interesting status pending. */
static int
status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
if (!ptid_match (ptid_of (thread), ptid))
return 0;
+ if (!lwp_resumed (lp))
+ return 0;
+
if (lp->status_pending_p
&& !thread_still_has_status_pending_p (thread))
{
leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
linux_proc_pid_is_zombie (leader_pid));
- if (leader_lp != NULL
+ if (leader_lp != NULL && !leader_lp->stopped
/* Check if there are other threads in the group, as we may
have raced with the inferior simply exiting. */
&& !last_thread_of_process_p (leader_pid)
return 0;
}
+/* Increment LWP's suspend count. */
+
+static void
+lwp_suspended_inc (struct lwp_info *lwp)
+{
+ lwp->suspended++;
+
+ if (debug_threads && lwp->suspended > 4)
+ {
+ struct thread_info *thread = get_lwp_thread (lwp);
+
+ debug_printf ("LWP %ld has a suspiciously high suspend count,"
+ " suspended=%d\n", lwpid_of (thread), lwp->suspended);
+ }
+}
+
+/* Decrement LWP's suspend count. */
+
+static void
+lwp_suspended_decr (struct lwp_info *lwp)
+{
+ lwp->suspended--;
+
+ if (lwp->suspended < 0)
+ {
+ struct thread_info *thread = get_lwp_thread (lwp);
+
+ internal_error (__FILE__, __LINE__,
+ "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
+ lwp->suspended);
+ }
+}
+
/* This function should only be called if the LWP got a SIGTRAP.
Handle any tracepoint steps or hits. Return true if a tracepoint
uninsert tracepoints. To do this, we temporarily pause all
threads, unpatch away, and then unpause threads. We need to make
sure the unpausing doesn't resume LWP too. */
- lwp->suspended++;
+ lwp_suspended_inc (lwp);
/* And we need to be sure that any all-threads-stopping doesn't try
to move threads out of the jump pads, as it could deadlock the
actions. */
tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
- lwp->suspended--;
+ lwp_suspended_decr (lwp);
gdb_assert (lwp->suspended == 0);
gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
}
}
- p_sig = xmalloc (sizeof (*p_sig));
+ p_sig = XCNEW (struct pending_signals);
p_sig->prev = lwp->pending_signals_to_report;
p_sig->signal = WSTOPSIG (*wstat);
- memset (&p_sig->info, 0, sizeof (siginfo_t));
+
ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
&p_sig->info);
if (report_vfork_events)
options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
+ if (report_exec_events)
+ options |= PTRACE_O_TRACEEXEC;
+
+ options |= PTRACE_O_TRACESYSGOOD;
+
return options;
}
child = find_lwp_pid (pid_to_ptid (lwpid));
+ /* Check for stop events reported by a process we didn't already
+ know about - anything not already in our LWP list.
+
+ If we're expecting to receive stopped processes after
+ fork, vfork, and clone events, then we'll just add the
+ new one to our list and go back to waiting for the event
+ to be reported - the stopped process might be returned
+ from waitpid before or after the event is.
+
+ But note the case of a non-leader thread exec'ing after the
+ leader having exited, and gone from our lists (because
+ check_zombie_leaders deleted it). The non-leader thread
+ changes its tid to the tgid. */
+
+ if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
+ && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
+ {
+ ptid_t child_ptid;
+
+ /* A multi-thread exec after we had seen the leader exiting. */
+ if (debug_threads)
+ {
+ debug_printf ("LLW: Re-adding thread group leader LWP %d"
+ "after exec.\n", lwpid);
+ }
+
+ child_ptid = ptid_build (lwpid, lwpid, 0);
+ child = add_lwp (child_ptid);
+ child->stopped = 1;
+ current_thread = child->thread;
+ }
+
/* If we didn't find a process, one of two things presumably happened:
- A process we started and then detached from has exited. Ignore it.
- A process we are controlling has forked and the new child's stop
{
if (debug_threads)
debug_printf ("LLFE: %d exited.\n", lwpid);
- if (num_lwps (pid_of (thread)) > 1)
+ /* If there is at least one more LWP, then the exit signal was
+ not the end of the debugged application and should be
+ ignored, unless GDB wants to hear about thread exits. */
+ if (report_thread_events
+ || last_thread_of_process_p (pid_of (thread)))
{
-
- /* If there is at least one more LWP, then the exit signal was
- not the end of the debugged application and should be
- ignored. */
- delete_lwp (child);
- return NULL;
+ /* Since events are serialized to GDB core, and we can't
+ report this one right now. Leave the status pending for
+ the next time we're able to report it. */
+ mark_lwp_dead (child, wstat);
+ return child;
}
else
{
- /* This was the last lwp in the process. Since events are
- serialized to GDB core, and we can't report this one
- right now, but GDB core and the other target layers will
- want to be notified about the exit code/signal, leave the
- status pending for the next time we're able to report
- it. */
- mark_lwp_dead (child, wstat);
- return child;
+ delete_lwp (child);
+ return NULL;
}
}
{
if (proc->attached)
{
- struct thread_info *saved_thread;
-
/* This needs to happen after we have attached to the
inferior and it is stopped for the first time, but
before we access any inferior registers. */
- saved_thread = current_thread;
- current_thread = thread;
-
- the_low_target.arch_setup ();
-
- current_thread = saved_thread;
+ linux_arch_setup_thread (thread);
}
else
{
child->must_set_ptrace_flags = 0;
}
- /* Be careful to not overwrite stop_pc until
- check_stopped_by_breakpoint is called. */
+ /* Always update syscall_state, even if it will be filtered later. */
+ if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
+ {
+ child->syscall_state
+ = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
+ ? TARGET_WAITKIND_SYSCALL_RETURN
+ : TARGET_WAITKIND_SYSCALL_ENTRY);
+ }
+ else
+ {
+ /* Almost all other ptrace-stops are known to be outside of system
+ calls, with further exceptions in handle_extended_wait. */
+ child->syscall_state = TARGET_WAITKIND_IGNORE;
+ }
+
+ /* Be careful to not overwrite stop_pc until save_stop_reason is
+ called. */
if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
&& linux_is_extended_waitstatus (wstat))
{
child->stop_pc = get_pc (child);
- if (handle_extended_wait (child, wstat))
+ if (handle_extended_wait (&child, wstat))
{
/* The event has been handled, so just return without
reporting it. */
}
}
- /* Check first whether this was a SW/HW breakpoint before checking
- watchpoints, because at least s390 can't tell the data address of
- hardware watchpoint hits, and returns stopped-by-watchpoint as
- long as there's a watchpoint set. */
if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
{
- if (check_stopped_by_breakpoint (child))
+ if (save_stop_reason (child))
have_stop_pc = 1;
}
- /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
- or hardware watchpoint. Check which is which if we got
- TARGET_STOPPED_BY_HW_BREAKPOINT. */
- if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
- && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
- || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
- check_stopped_by_watchpoint (child);
-
if (!have_stop_pc)
child->stop_pc = get_pc (child);
struct lwp_info *lp = get_thread_lwp (thread);
if (lp->stopped
+ && !lp->suspended
&& !lp->status_pending_p
- && thread->last_resume_kind != resume_stop
&& thread->last_status.kind == TARGET_WAITKIND_IGNORE)
{
int step = thread->last_resume_kind == resume_step;
- When a non-leader thread execs, that thread just vanishes
without reporting an exit (so we'd hang if we waited for it
explicitly in that case). The exec event is reported to
- the TGID pid (although we don't currently enable exec
- events). */
+ the TGID pid. */
errno = 0;
ret = my_waitpid (-1, wstatp, options | WNOHANG);
current_thread = event_thread;
- /* Check for thread exit. */
- if (! WIFSTOPPED (*wstatp))
- {
- gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
-
- if (debug_threads)
- debug_printf ("LWP %d is the last lwp of process. "
- "Process %ld exiting.\n",
- pid_of (event_thread), lwpid_of (event_thread));
- return lwpid_of (event_thread);
- }
-
return lwpid_of (event_thread);
}
{
struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lp = get_thread_lwp (thread);
- int *count = data;
+ int *count = (int *) data;
gdb_assert (count != NULL);
{
struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lp = get_thread_lwp (thread);
- int *selector = data;
+ int *selector = (int *) data;
gdb_assert (selector != NULL);
if (lwp == except)
return 0;
- lwp->suspended--;
-
- gdb_assert (lwp->suspended >= 0);
+ lwp_suspended_decr (lwp);
return 0;
}
lwp = get_thread_lwp (current_thread);
/* Lock it. */
- lwp->suspended++;
+ lwp_suspended_inc (lwp);
if (ourstatus.value.sig != GDB_SIGNAL_0
|| current_thread->last_resume_kind == resume_stop)
}
}
-static void async_file_mark (void);
-
/* Convenience function that is called when the kernel reports an
event that is not passed out to GDB. */
return null_ptid;
}
-/* Return non-zero if WAITSTATUS reflects an extended linux
- event. Otherwise, return zero. */
+/* Convenience function that is called when the kernel reports an exit
+ event. This decides whether to report the event to GDB as a
+ process exit event, a thread exit event, or to suppress the
+ event. */
+
+static ptid_t
+filter_exit_event (struct lwp_info *event_child,
+ struct target_waitstatus *ourstatus)
+{
+ struct thread_info *thread = get_lwp_thread (event_child);
+ ptid_t ptid = ptid_of (thread);
+
+ if (!last_thread_of_process_p (pid_of (thread)))
+ {
+ if (report_thread_events)
+ ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
+ else
+ ourstatus->kind = TARGET_WAITKIND_IGNORE;
+
+ delete_lwp (event_child);
+ }
+ return ptid;
+}
+
+/* Returns 1 if GDB is interested in any event_child syscalls. */
static int
-extended_event_reported (const struct target_waitstatus *waitstatus)
+gdb_catching_syscalls_p (struct lwp_info *event_child)
{
- if (waitstatus == NULL)
+ struct thread_info *thread = get_lwp_thread (event_child);
+ struct process_info *proc = get_thread_process (thread);
+
+ return !VEC_empty (int, proc->syscalls_to_catch);
+}
+
+/* Returns 1 if GDB is interested in the event_child syscall.
+ Only to be called when stopped reason is SYSCALL_SIGTRAP. */
+
+static int
+gdb_catch_this_syscall_p (struct lwp_info *event_child)
+{
+ int i, iter;
+ int sysno, sysret;
+ struct thread_info *thread = get_lwp_thread (event_child);
+ struct process_info *proc = get_thread_process (thread);
+
+ if (VEC_empty (int, proc->syscalls_to_catch))
return 0;
- return (waitstatus->kind == TARGET_WAITKIND_FORKED
- || waitstatus->kind == TARGET_WAITKIND_VFORKED
- || waitstatus->kind == TARGET_WAITKIND_VFORK_DONE);
+ if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
+ return 1;
+
+ get_syscall_trapinfo (event_child, &sysno, &sysret);
+ for (i = 0;
+ VEC_iterate (int, proc->syscalls_to_catch, i, iter);
+ i++)
+ if (iter == sysno)
+ return 1;
+
+ return 0;
}
/* Wait for process, returns status. */
int report_to_gdb;
int trace_event;
int in_step_range;
+ int any_resumed;
if (debug_threads)
{
in_step_range = 0;
ourstatus->kind = TARGET_WAITKIND_IGNORE;
+ /* Find a resumed LWP, if any. */
+ if (find_inferior (&all_threads,
+ status_pending_p_callback,
+ &minus_one_ptid) != NULL)
+ any_resumed = 1;
+ else if ((find_inferior (&all_threads,
+ not_stopped_callback,
+ &minus_one_ptid) != NULL))
+ any_resumed = 1;
+ else
+ any_resumed = 0;
+
if (ptid_equal (step_over_bkpt, null_ptid))
pid = linux_wait_for_event (ptid, &w, options);
else
pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
}
- if (pid == 0)
+ if (pid == 0 || (pid == -1 && !any_resumed))
{
gdb_assert (target_options & TARGET_WNOHANG);
}
}
+ if (ourstatus->kind == TARGET_WAITKIND_EXITED)
+ return filter_exit_event (event_child, ourstatus);
+
return ptid_of (current_thread);
}
- /* If step-over executes a breakpoint instruction, it means a
- gdb/gdbserver breakpoint had been planted on top of a permanent
- breakpoint. The PC has been adjusted by
- check_stopped_by_breakpoint to point at the breakpoint address.
- Advance the PC manually past the breakpoint, otherwise the
- program would keep trapping the permanent breakpoint forever. */
+ /* If step-over executes a breakpoint instruction, in the case of a
+ hardware single step it means a gdb/gdbserver breakpoint had been
+ planted on top of a permanent breakpoint, in the case of a software
+ single step it may just mean that gdbserver hit the reinsert breakpoint.
+ The PC has been adjusted by save_stop_reason to point at
+ the breakpoint address.
+ So in the case of the hardware single step advance the PC manually
+ past the breakpoint and in the case of software single step advance only
+ if it's not the reinsert_breakpoint we are hitting.
+ This avoids that a program would keep trapping a permanent breakpoint
+ forever. */
if (!ptid_equal (step_over_bkpt, null_ptid)
- && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
+ && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
+ && (event_child->stepping
+ || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
{
- unsigned int increment_pc = the_low_target.breakpoint_len;
+ int increment_pc = 0;
+ int breakpoint_kind = 0;
+ CORE_ADDR stop_pc = event_child->stop_pc;
+
+ breakpoint_kind =
+ the_target->breakpoint_kind_from_current_state (&stop_pc);
+ the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
if (debug_threads)
{
/* Check whether GDB would be interested in this event. */
+ /* Check if GDB is interested in this syscall. */
+ if (WIFSTOPPED (w)
+ && WSTOPSIG (w) == SYSCALL_SIGTRAP
+ && !gdb_catch_this_syscall_p (event_child))
+ {
+ if (debug_threads)
+ {
+ debug_printf ("Ignored syscall for LWP %ld.\n",
+ lwpid_of (current_thread));
+ }
+
+ linux_resume_one_lwp (event_child, event_child->stepping,
+ 0, NULL);
+ return ignore_event (ourstatus);
+ }
+
/* If GDB is not interested in this signal, don't stop other
threads, and don't report it to GDB. Just resume the inferior
right away. We do this for threading-related signals as well as
stepping - they may require special handling to skip the signal
handler. Also never ignore signals that could be caused by a
breakpoint. */
- /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
- thread library? */
if (WIFSTOPPED (w)
&& current_thread->last_resume_kind != resume_step
&& (
debug_printf ("Ignored signal %d for LWP %ld.\n",
WSTOPSIG (w), lwpid_of (current_thread));
- if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
- (PTRACE_TYPE_ARG3) 0, &info) == 0)
- info_p = &info;
+ if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
+ (PTRACE_TYPE_ARG3) 0, &info) == 0)
+ info_p = &info;
+ else
+ info_p = NULL;
+
+ if (step_over_finished)
+ {
+ /* We cancelled this thread's step-over above. We still
+ need to unsuspend all other LWPs, and set them back
+ running again while the signal handler runs. */
+ unsuspend_all_lwps (event_child);
+
+ /* Enqueue the pending signal info so that proceed_all_lwps
+ doesn't lose it. */
+ enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
+
+ proceed_all_lwps ();
+ }
else
- info_p = NULL;
- linux_resume_one_lwp (event_child, event_child->stepping,
- WSTOPSIG (w), info_p);
+ {
+ linux_resume_one_lwp (event_child, event_child->stepping,
+ WSTOPSIG (w), info_p);
+ }
return ignore_event (ourstatus);
}
do, we're be able to handle GDB breakpoints on top of internal
breakpoints, by handling the internal breakpoint and still
reporting the event to GDB. If we don't, we're out of luck, GDB
- won't see the breakpoint hit. */
+ won't see the breakpoint hit. If we see a single-step event but
+ the thread should be continuing, don't pass the trap to gdb.
+ That indicates that we had previously finished a single-step but
+ left the single-step pending -- see
+ complete_ongoing_step_over. */
report_to_gdb = (!maybe_internal_trap
|| (current_thread->last_resume_kind == resume_step
&& !in_step_range)
|| event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
- || (!step_over_finished && !in_step_range
- && !bp_explains_trap && !trace_event)
+ || (!in_step_range
+ && !bp_explains_trap
+ && !trace_event
+ && !step_over_finished
+ && !(current_thread->last_resume_kind == resume_continue
+ && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
|| (gdb_breakpoint_here (event_child->stop_pc)
&& gdb_condition_true_at_breakpoint (event_child->stop_pc)
&& gdb_no_commands_at_breakpoint (event_child->stop_pc))
- || extended_event_reported (&event_child->waitstatus));
+ || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
run_breakpoint_commands (event_child->stop_pc);
paddress (event_child->stop_pc),
paddress (event_child->step_range_start),
paddress (event_child->step_range_end));
- if (extended_event_reported (&event_child->waitstatus))
- {
- char *str = target_waitstatus_to_string (ourstatus);
- debug_printf ("LWP %ld: extended event with waitstatus %s\n",
- lwpid_of (get_lwp_thread (event_child)), str);
- xfree (str);
- }
}
/* We're not reporting this breakpoint to GDB, so apply the
if (debug_threads)
{
+ if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
+ {
+ char *str;
+
+ str = target_waitstatus_to_string (&event_child->waitstatus);
+ debug_printf ("LWP %ld: extended event with waitstatus %s\n",
+ lwpid_of (get_lwp_thread (event_child)), str);
+ xfree (str);
+ }
if (current_thread->last_resume_kind == resume_step)
{
if (event_child->step_range_start == event_child->step_range_end)
unstop_all_lwps (1, event_child);
}
- if (extended_event_reported (&event_child->waitstatus))
+ if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
{
- /* If the reported event is a fork, vfork or exec, let GDB know. */
- ourstatus->kind = event_child->waitstatus.kind;
- ourstatus->value = event_child->waitstatus.value;
-
+ /* If the reported event is an exit, fork, vfork or exec, let
+ GDB know. */
+ *ourstatus = event_child->waitstatus;
/* Clear the event lwp's waitstatus since we handled it already. */
event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
}
}
}
- if (current_thread->last_resume_kind == resume_stop
- && WSTOPSIG (w) == SIGSTOP)
+ if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
+ {
+ int sysret;
+
+ get_syscall_trapinfo (event_child,
+ &ourstatus->value.syscall_number, &sysret);
+ ourstatus->kind = event_child->syscall_state;
+ }
+ else if (current_thread->last_resume_kind == resume_stop
+ && WSTOPSIG (w) == SIGSTOP)
{
/* A thread that has been requested to stop by GDB with vCont;t,
and it stopped cleanly, so report as SIG0. The use of
debug_exit ();
}
+ if (ourstatus->kind == TARGET_WAITKIND_EXITED)
+ return filter_exit_event (event_child, ourstatus);
+
return ptid_of (current_thread);
}
static int
kill_lwp (unsigned long lwpid, int signo)
{
- /* Use tkill, if possible, in case we are using nptl threads. If tkill
- fails, then we are not using nptl threads and we should be using kill. */
-
-#ifdef __NR_tkill
- {
- static int tkill_failed;
-
- if (!tkill_failed)
- {
- int ret;
-
- errno = 0;
- ret = syscall (__NR_tkill, lwpid, signo);
- if (errno != ENOSYS)
- return ret;
- tkill_failed = 1;
- }
- }
-#endif
+ int ret;
- return kill (lwpid, signo);
+ errno = 0;
+ ret = syscall (__NR_tkill, lwpid, signo);
+ if (errno == ENOSYS)
+ {
+ /* If tkill fails, then we are not using nptl threads, a
+ configuration we no longer support. */
+ perror_with_name (("tkill"));
+ }
+ return ret;
}
void
if (lwp == except)
return 0;
- lwp->suspended++;
+ lwp_suspended_inc (lwp);
return send_sigstop_callback (entry, except);
}
static void
mark_lwp_dead (struct lwp_info *lwp, int wstat)
{
- /* It's dead, really. */
- lwp->dead = 1;
-
/* Store the exit status for later. */
lwp->status_pending_p = 1;
lwp->status_pending = wstat;
+ /* Store in waitstatus as well, as there's nothing else to process
+ for this event. */
+ if (WIFEXITED (wstat))
+ {
+ lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
+ lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
+ }
+ else if (WIFSIGNALED (wstat))
+ {
+ lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
+ lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
+ }
+
/* Prevent trying to stop it. */
lwp->stopped = 1;
lwp->stop_expected = 0;
}
+/* Return true if LWP has exited already, and has a pending exit event
+ to report to GDB. */
+
+static int
+lwp_is_marked_dead (struct lwp_info *lwp)
+{
+ return (lwp->status_pending_p
+ && (WIFEXITED (lwp->status_pending)
+ || WIFSIGNALED (lwp->status_pending)));
+}
+
/* Wait for all children to stop for the SIGSTOPs we just queued. */
static void
if (debug_threads)
debug_printf ("Previously current thread died.\n");
- if (non_stop)
- {
- /* We can't change the current inferior behind GDB's back,
- otherwise, a subsequent command may apply to the wrong
- process. */
- current_thread = NULL;
- }
- else
- {
- /* Set a valid thread as current. */
- set_desired_thread (0);
- }
+ /* We can't change the current inferior behind GDB's back,
+ otherwise, a subsequent command may apply to the wrong
+ process. */
+ current_thread = NULL;
}
}
struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
- gdb_assert (lwp->suspended == 0);
+ if (lwp->suspended != 0)
+ {
+ internal_error (__FILE__, __LINE__,
+ "LWP %ld is suspended, suspended=%d\n",
+ lwpid_of (thread), lwp->suspended);
+ }
gdb_assert (lwp->stopped);
/* Allow debugging the jump pad, gdb_collect, etc.. */
move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
{
struct thread_info *thread = (struct thread_info *) entry;
+ struct thread_info *saved_thread;
struct lwp_info *lwp = get_thread_lwp (thread);
int *wstat;
- gdb_assert (lwp->suspended == 0);
+ if (lwp->suspended != 0)
+ {
+ internal_error (__FILE__, __LINE__,
+ "LWP %ld is suspended, suspended=%d\n",
+ lwpid_of (thread), lwp->suspended);
+ }
gdb_assert (lwp->stopped);
+ /* For gdb_breakpoint_here. */
+ saved_thread = current_thread;
+ current_thread = thread;
+
wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
/* Allow debugging the jump pad, gdb_collect, etc. */
linux_resume_one_lwp (lwp, 0, 0, NULL);
}
else
- lwp->suspended++;
+ lwp_suspended_inc (lwp);
+
+ current_thread = saved_thread;
}
static int
struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
- if (lwp->dead)
+ if (lwp_is_marked_dead (lwp))
return 0;
if (lwp->stopped)
return 0;
}
}
+/* Enqueue one signal in the chain of signals which need to be
+ delivered to this process on next resume. */
+
+static void
+enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
+{
+ struct pending_signals *p_sig = XNEW (struct pending_signals);
+
+ p_sig->prev = lwp->pending_signals;
+ p_sig->signal = signal;
+ if (info == NULL)
+ memset (&p_sig->info, 0, sizeof (siginfo_t));
+ else
+ memcpy (&p_sig->info, info, sizeof (siginfo_t));
+ lwp->pending_signals = p_sig;
+}
+
+/* Install breakpoints for software single stepping. */
+
+static void
+install_software_single_step_breakpoints (struct lwp_info *lwp)
+{
+ int i;
+ CORE_ADDR pc;
+ struct regcache *regcache = get_thread_regcache (current_thread, 1);
+ VEC (CORE_ADDR) *next_pcs = NULL;
+ struct cleanup *old_chain = make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
+
+ next_pcs = (*the_low_target.get_next_pcs) (regcache);
+
+ for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
+ set_reinsert_breakpoint (pc);
+
+ do_cleanups (old_chain);
+}
+
+/* Single step via hardware or software single step.
+ Return 1 if hardware single stepping, 0 if software single stepping
+ or can't single step. */
+
+static int
+single_step (struct lwp_info* lwp)
+{
+ int step = 0;
+
+ if (can_hardware_single_step ())
+ {
+ step = 1;
+ }
+ else if (can_software_single_step ())
+ {
+ install_software_single_step_breakpoints (lwp);
+ step = 0;
+ }
+ else
+ {
+ if (debug_threads)
+ debug_printf ("stepping is not implemented on this target");
+ }
+
+ return step;
+}
+
/* Resume execution of LWP. If STEP is nonzero, single-step it. If
SIGNAL is nonzero, give it that signal. */
struct thread_info *thread = get_lwp_thread (lwp);
struct thread_info *saved_thread;
int fast_tp_collecting;
+ int ptrace_request;
struct process_info *proc = get_thread_process (thread);
/* Note that target description may not be initialised
if (lwp->stopped == 0)
return;
+ gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
+
fast_tp_collecting = lwp->collecting_fast_tracepoint;
gdb_assert (!stabilizing_threads || fast_tp_collecting);
|| lwp->pending_signals != NULL
|| lwp->bp_reinsert != 0
|| fast_tp_collecting))
- {
- struct pending_signals *p_sig;
- p_sig = xmalloc (sizeof (*p_sig));
- p_sig->prev = lwp->pending_signals;
- p_sig->signal = signal;
- if (info == NULL)
- memset (&p_sig->info, 0, sizeof (siginfo_t));
- else
- memcpy (&p_sig->info, info, sizeof (siginfo_t));
- lwp->pending_signals = p_sig;
- }
+ enqueue_pending_signal (lwp, signal, info);
if (lwp->status_pending_p)
{
address, continue, and carry on catching this while-stepping
action only when that breakpoint is hit. A future
enhancement. */
- if (thread->while_stepping != NULL
- && can_hardware_single_step ())
+ if (thread->while_stepping != NULL)
{
if (debug_threads)
debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
lwpid_of (thread));
- step = 1;
+
+ step = single_step (lwp);
}
if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
regcache_invalidate_thread (thread);
errno = 0;
lwp->stepping = step;
- ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
+ if (step)
+ ptrace_request = PTRACE_SINGLESTEP;
+ else if (gdb_catching_syscalls_p (lwp))
+ ptrace_request = PTRACE_SYSCALL;
+ else
+ ptrace_request = PTRACE_CONT;
+ ptrace (ptrace_request,
+ lwpid_of (thread),
(PTRACE_TYPE_ARG3) 0,
/* Coerce to a uintptr_t first to avoid potential gcc warning
of coercing an 8 byte integer to a 4 byte pointer. */
int ndx;
struct thread_resume_array *r;
- r = arg;
+ r = (struct thread_resume_array *) arg;
for (ndx = 0; ndx < r->n; ndx++)
{
lwpid_of (thread));
stop_all_lwps (1, lwp);
- gdb_assert (lwp->suspended == 0);
+
+ if (lwp->suspended != 0)
+ {
+ internal_error (__FILE__, __LINE__,
+ "LWP %ld suspended=%d\n", lwpid_of (thread),
+ lwp->suspended);
+ }
if (debug_threads)
debug_printf ("Done stopping all threads for step-over.\n");
uninsert_breakpoints_at (pc);
uninsert_fast_tracepoint_jumps_at (pc);
- if (can_hardware_single_step ())
- {
- step = 1;
- }
- else
- {
- CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
- set_reinsert_breakpoint (raddr);
- step = 0;
- }
+ step = single_step (lwp);
current_thread = saved_thread;
return 0;
}
+/* If there's a step over in progress, wait until all threads stop
+ (that is, until the stepping thread finishes its step), and
+ unsuspend all lwps. The stepping thread ends with its status
+ pending, which is processed later when we get back to processing
+ events. */
+
+static void
+complete_ongoing_step_over (void)
+{
+ if (!ptid_equal (step_over_bkpt, null_ptid))
+ {
+ struct lwp_info *lwp;
+ int wstat;
+ int ret;
+
+ if (debug_threads)
+ debug_printf ("detach: step over in progress, finish it first\n");
+
+ /* Passing NULL_PTID as filter indicates we want all events to
+ be left pending. Eventually this returns when there are no
+ unwaited-for children left. */
+ ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
+ &wstat, __WALL);
+ gdb_assert (ret == -1);
+
+ lwp = find_lwp_pid (step_over_bkpt);
+ if (lwp != NULL)
+ finish_step_over (lwp);
+ step_over_bkpt = null_ptid;
+ unsuspend_all_lwps (lwp);
+ }
+}
+
/* This function is called once per thread. We check the thread's resume
request, which will tell us whether to resume, step, or leave the thread
stopped; and what signal, if any, it should be sent.
}
/* If this thread which is about to be resumed has a pending status,
- then don't resume any threads - we can just report the pending
- status. Make sure to queue any signals that would otherwise be
- sent. In all-stop mode, we do this decision based on if *any*
- thread has a pending status. If there's a thread that needs the
- step-over-breakpoint dance, then don't resume any other thread
- but that particular one. */
- leave_pending = (lwp->status_pending_p || leave_all_stopped);
+ then don't resume it - we can just report the pending status.
+ Likewise if it is suspended, because e.g., another thread is
+ stepping past a breakpoint. Make sure to queue any signals that
+ would otherwise be sent. In all-stop mode, we do this decision
+ based on if *any* thread has a pending status. If there's a
+ thread that needs the step-over-breakpoint dance, then don't
+ resume any other thread but that particular one. */
+ leave_pending = (lwp->suspended
+ || lwp->status_pending_p
+ || leave_all_stopped);
if (!leave_pending)
{
/* If we have a new signal, enqueue the signal. */
if (lwp->resume->sig != 0)
{
- struct pending_signals *p_sig;
- p_sig = xmalloc (sizeof (*p_sig));
+ struct pending_signals *p_sig = XCNEW (struct pending_signals);
+
p_sig->prev = lwp->pending_signals;
p_sig->signal = lwp->resume->sig;
- memset (&p_sig->info, 0, sizeof (siginfo_t));
/* If this is the same signal we were previously stopped by,
make sure to queue its siginfo. We can ignore the return
debug_printf ("linux_resume done\n");
debug_exit ();
}
+
+ /* We may have events that were pending that can/should be sent to
+ the client now. Trigger a linux_wait call. */
+ if (target_is_async_p ())
+ async_file_mark ();
}
/* This function is called once per thread. We check the thread's
send_sigstop (lwp);
}
- step = thread->last_resume_kind == resume_step;
+ if (thread->last_resume_kind == resume_step)
+ {
+ if (debug_threads)
+ debug_printf (" stepping LWP %ld, client wants it stepping\n",
+ lwpid_of (thread));
+ step = 1;
+ }
+ else if (lwp->bp_reinsert != 0)
+ {
+ if (debug_threads)
+ debug_printf (" stepping LWP %ld, reinsert set\n",
+ lwpid_of (thread));
+ step = 1;
+ }
+ else
+ step = 0;
+
linux_resume_one_lwp (lwp, step, 0, NULL);
return 0;
}
if (lwp == except)
return 0;
- lwp->suspended--;
- gdb_assert (lwp->suspended >= 0);
+ lwp_suspended_decr (lwp);
return proceed_one_lwp (entry, except);
}
dr_offset = regset - info->regsets;
if (info->disabled_regsets == NULL)
- info->disabled_regsets = xcalloc (1, info->num_regsets);
+ info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
info->disabled_regsets[dr_offset] = 1;
}
#ifdef HAVE_LINUX_USRREGS
-int
+static int
register_addr (const struct usrregs_info *usrregs, int regnum)
{
int addr;
size = ((register_size (regcache->tdesc, regno)
+ sizeof (PTRACE_XFER_TYPE) - 1)
& -sizeof (PTRACE_XFER_TYPE));
- buf = alloca (size);
+ buf = (char *) alloca (size);
pid = lwpid_of (current_thread);
for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
size = ((register_size (regcache->tdesc, regno)
+ sizeof (PTRACE_XFER_TYPE) - 1)
& -sizeof (PTRACE_XFER_TYPE));
- buf = alloca (size);
+ buf = (char *) alloca (size);
memset (buf, 0, size);
if (the_low_target.collect_ptrace_register)
#endif
-void
+static void
linux_fetch_registers (struct regcache *regcache, int regno)
{
int use_regsets;
}
}
-void
+static void
linux_store_registers (struct regcache *regcache, int regno)
{
int use_regsets;
count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
/ sizeof (PTRACE_XFER_TYPE));
/* Allocate buffer of that many longwords. */
- buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
+ buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
/* Read all the longwords */
errno = 0;
/ sizeof (PTRACE_XFER_TYPE);
/* Allocate buffer of that many longwords. */
- register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
- alloca (count * sizeof (PTRACE_XFER_TYPE));
+ register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
int pid = lwpid_of (current_thread);
if (debug_threads)
{
/* Dump up to four bytes. */
- unsigned int val = * (unsigned int *) myaddr;
- if (len == 1)
- val = val & 0xff;
- else if (len == 2)
- val = val & 0xffff;
- else if (len == 3)
- val = val & 0xffffff;
- debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
- 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
+ char str[4 * 2 + 1];
+ char *p = str;
+ int dump = len < 4 ? len : 4;
+
+ for (i = 0; i < dump; i++)
+ {
+ sprintf (p, "%02x", myaddr[i]);
+ p += 2;
+ }
+ *p = '\0';
+
+ debug_printf ("Writing %s to 0x%08lx in process %d\n",
+ str, (long) memaddr, pid);
}
/* Fill start and end extra bytes of buffer with existing memory data. */
if (proc->priv->thread_db != NULL)
return;
- /* If the kernel supports tracing clones, then we don't need to
- use the magic thread event breakpoint to learn about
- threads. */
- thread_db_init (!linux_supports_traceclone ());
+ thread_db_init ();
#endif
}
return USE_SIGTRAP_SIGINFO;
}
-/* Implement the supports_conditional_breakpoints target_ops
- method. */
+/* Implement the supports_hardware_single_step target_ops method. */
static int
-linux_supports_conditional_breakpoints (void)
+linux_supports_hardware_single_step (void)
{
- /* GDBserver needs to step over the breakpoint if the condition is
- false. GDBserver software single step is too simple, so disable
- conditional breakpoints if the target doesn't have hardware single
- step. */
return can_hardware_single_step ();
}
+static int
+linux_supports_software_single_step (void)
+{
+ return can_software_single_step ();
+}
+
static int
linux_stopped_by_watchpoint (void)
{
layout of the inferiors' architecture. */
static void
-siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
+siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
{
int done = 0;
{
int pid;
siginfo_t siginfo;
- char inf_siginfo[sizeof (siginfo_t)];
+ gdb_byte inf_siginfo[sizeof (siginfo_t)];
if (current_thread == NULL)
return -1;
return linux_supports_tracefork ();
}
+/* Check if exec events are supported. */
+
+static int
+linux_supports_exec_events (void)
+{
+ return linux_supports_traceexec ();
+}
+
/* Callback for 'find_inferior'. Set the (possibly changed) ptrace
options for the specified lwp. */
#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
static void
-linux_process_qsupported (const char *query)
+linux_process_qsupported (char **features, int count)
{
if (the_low_target.process_qsupported != NULL)
- the_low_target.process_qsupported (query);
+ the_low_target.process_qsupported (features, count);
+}
+
+static int
+linux_supports_catch_syscall (void)
+{
+ return (the_low_target.get_syscall_trapinfo != NULL
+ && linux_supports_tracesysgood ());
+}
+
+static int
+linux_get_ipa_tdesc_idx (void)
+{
+ if (the_low_target.get_ipa_tdesc_idx == NULL)
+ return 0;
+
+ return (*the_low_target.get_ipa_tdesc_idx) ();
}
static int
return 0;
gdb_assert (num_phdr < 100); /* Basic sanity check. */
- phdr_buf = alloca (num_phdr * phdr_size);
+ phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
return 0;
if (is_elf64)
{
Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
-#ifdef DT_MIPS_RLD_MAP
+#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
union
{
Elf64_Xword map;
unsigned char buf[sizeof (Elf64_Xword)];
}
rld_map;
-
+#endif
+#ifdef DT_MIPS_RLD_MAP
if (dyn->d_tag == DT_MIPS_RLD_MAP)
{
if (linux_read_memory (dyn->d_un.d_val,
break;
}
#endif /* DT_MIPS_RLD_MAP */
+#ifdef DT_MIPS_RLD_MAP_REL
+ if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
+ {
+ if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
+ rld_map.buf, sizeof (rld_map.buf)) == 0)
+ return rld_map.map;
+ else
+ break;
+ }
+#endif /* DT_MIPS_RLD_MAP_REL */
if (dyn->d_tag == DT_DEBUG && map == -1)
map = dyn->d_un.d_val;
else
{
Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
-#ifdef DT_MIPS_RLD_MAP
+#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
union
{
Elf32_Word map;
unsigned char buf[sizeof (Elf32_Word)];
}
rld_map;
-
+#endif
+#ifdef DT_MIPS_RLD_MAP
if (dyn->d_tag == DT_MIPS_RLD_MAP)
{
if (linux_read_memory (dyn->d_un.d_val,
break;
}
#endif /* DT_MIPS_RLD_MAP */
+#ifdef DT_MIPS_RLD_MAP_REL
+ if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
+ {
+ if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
+ rld_map.buf, sizeof (rld_map.buf)) == 0)
+ return rld_map.map;
+ else
+ break;
+ }
+#endif /* DT_MIPS_RLD_MAP_REL */
if (dyn->d_tag == DT_DEBUG && map == -1)
map = dyn->d_un.d_val;
}
}
- document = xmalloc (allocated);
+ document = (char *) xmalloc (allocated);
strcpy (document, "<library-list-svr4 version=\"1.0\"");
p = document + strlen (document);
/* Expand to guarantee sufficient storage. */
uintptr_t document_len = p - document;
- document = xrealloc (document, 2 * allocated);
+ document = (char *) xrealloc (document, 2 * allocated);
allocated *= 2;
p = document + document_len;
}
#ifdef HAVE_LINUX_BTRACE
-/* See to_enable_btrace target method. */
-
-static struct btrace_target_info *
-linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
-{
- struct btrace_target_info *tinfo;
-
- tinfo = linux_enable_btrace (ptid, conf);
-
- if (tinfo != NULL && tinfo->ptr_bits == 0)
- {
- struct thread_info *thread = find_thread_ptid (ptid);
- struct regcache *regcache = get_thread_regcache (thread, 0);
-
- tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
- }
-
- return tinfo;
-}
-
/* See to_disable_btrace target method. */
static int
return (err == BTRACE_ERR_NONE ? 0 : -1);
}
-/* Encode an Intel(R) Processor Trace configuration. */
+/* Encode an Intel Processor Trace configuration. */
static void
linux_low_encode_pt_config (struct buffer *buffer,
static int
linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
- int type)
+ enum btrace_read_type type)
{
struct btrace_data btrace;
struct btrace_block *block;
return ptid_of (current_thread);
}
+/* Implementation of the target_ops method "breakpoint_kind_from_pc". */
+
+static int
+linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
+{
+ if (the_low_target.breakpoint_kind_from_pc != NULL)
+ return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
+ else
+ return default_breakpoint_kind_from_pc (pcptr);
+}
+
+/* Implementation of the target_ops method "sw_breakpoint_from_kind". */
+
+static const gdb_byte *
+linux_sw_breakpoint_from_kind (int kind, int *size)
+{
+ gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
+
+ return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
+}
+
+/* Implementation of the target_ops method
+ "breakpoint_kind_from_current_state". */
+
+static int
+linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
+{
+ if (the_low_target.breakpoint_kind_from_current_state != NULL)
+ return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
+ else
+ return linux_breakpoint_kind_from_pc (pcptr);
+}
+
+/* Default implementation of linux_target_ops method "set_pc" for
+ 32-bit pc register which is literally named "pc". */
+
+void
+linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
+{
+ uint32_t newpc = pc;
+
+ supply_register_by_name (regcache, "pc", &newpc);
+}
+
+/* Default implementation of linux_target_ops method "get_pc" for
+ 32-bit pc register which is literally named "pc". */
+
+CORE_ADDR
+linux_get_pc_32bit (struct regcache *regcache)
+{
+ uint32_t pc;
+
+ collect_register_by_name (regcache, "pc", &pc);
+ if (debug_threads)
+ debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
+ return pc;
+}
+
+/* Default implementation of linux_target_ops method "set_pc" for
+ 64-bit pc register which is literally named "pc". */
+
+void
+linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
+{
+ uint64_t newpc = pc;
+
+ supply_register_by_name (regcache, "pc", &newpc);
+}
+
+/* Default implementation of linux_target_ops method "get_pc" for
+ 64-bit pc register which is literally named "pc". */
+
+CORE_ADDR
+linux_get_pc_64bit (struct regcache *regcache)
+{
+ uint64_t pc;
+
+ collect_register_by_name (regcache, "pc", &pc);
+ if (debug_threads)
+ debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
+ return pc;
+}
+
+
static struct target_ops linux_target_ops = {
linux_create_inferior,
- linux_arch_setup,
+ linux_post_create_inferior,
linux_attach,
linux_kill,
linux_detach,
linux_supports_stopped_by_sw_breakpoint,
linux_stopped_by_hw_breakpoint,
linux_supports_stopped_by_hw_breakpoint,
- linux_supports_conditional_breakpoints,
+ linux_supports_hardware_single_step,
linux_stopped_by_watchpoint,
linux_stopped_data_address,
#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
linux_supports_multi_process,
linux_supports_fork_events,
linux_supports_vfork_events,
+ linux_supports_exec_events,
linux_handle_new_gdb_connection,
#ifdef USE_THREAD_DB
thread_db_handle_monitor_command,
linux_supports_agent,
#ifdef HAVE_LINUX_BTRACE
linux_supports_btrace,
- linux_low_enable_btrace,
+ linux_enable_btrace,
linux_low_disable_btrace,
linux_low_read_btrace,
linux_low_btrace_conf,
linux_mntns_open_cloexec,
linux_mntns_unlink,
linux_mntns_readlink,
+ linux_breakpoint_kind_from_pc,
+ linux_sw_breakpoint_from_kind,
+ linux_proc_tid_get_name,
+ linux_breakpoint_kind_from_current_state,
+ linux_supports_software_single_step,
+ linux_supports_catch_syscall,
+ linux_get_ipa_tdesc_idx,
};
-static void
-linux_init_signals ()
-{
- /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
- to find what the cancel signal actually is. */
-#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
- signal (__SIGRTMIN+1, SIG_IGN);
-#endif
-}
-
#ifdef HAVE_LINUX_REGSETS
void
initialize_regsets_info (struct regsets_info *info)
initialize_low (void)
{
struct sigaction sigchld_action;
+
memset (&sigchld_action, 0, sizeof (sigchld_action));
set_target_ops (&linux_target_ops);
- set_breakpoint_data (the_low_target.breakpoint,
- the_low_target.breakpoint_len);
- linux_init_signals ();
+
linux_ptrace_init_warnings ();
sigchld_action.sa_handler = sigchld_handler;