/* Low level interface to ptrace, for the remote server for GDB.
- Copyright (C) 1995-2015 Free Software Foundation, Inc.
+ Copyright (C) 1995-2016 Free Software Foundation, Inc.
This file is part of GDB.
#include "linux-low.h"
#include "nat/linux-osdata.h"
#include "agent.h"
+#include "tdesc.h"
+#include "rsp-low.h"
#include "nat/linux-nat.h"
#include "nat/linux-waitpid.h"
#include "gdb_wait.h"
-#include <sys/ptrace.h>
+#include "nat/gdb_ptrace.h"
#include "nat/linux-ptrace.h"
#include "nat/linux-procfs.h"
#include "nat/linux-personality.h"
#include "filestuff.h"
#include "tracepoint.h"
#include "hostio.h"
+#include <inttypes.h>
#ifndef ELFMAG0
/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
then ELFMAG0 will have been defined. If it didn't get included by
definition of elf_fpregset_t. */
#include <elf.h>
#endif
+#include "nat/linux-namespaces.h"
#ifndef SPUFS_MAGIC
#define SPUFS_MAGIC 0x23c9b64e
#define O_LARGEFILE 0
#endif
-#ifndef W_STOPCODE
-#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
-#endif
-
-/* This is the kernel's hard limit. Not to be confused with
- SIGRTMIN. */
-#ifndef __SIGRTMIN
-#define __SIGRTMIN 32
-#endif
-
/* Some targets did not define these ptrace constants from the start,
so gdbserver defines them locally here. In the future, these may
be removed after they are added to asm/ptrace.h. */
} Elf64_auxv_t;
#endif
+/* Does the current host support PTRACE_GETREGSET? */
+int have_ptrace_getregset = -1;
+
+/* LWP accessors. */
+
+/* See nat/linux-nat.h. */
+
+ptid_t
+ptid_of_lwp (struct lwp_info *lwp)
+{
+ return ptid_of (get_lwp_thread (lwp));
+}
+
+/* See nat/linux-nat.h. */
+
+void
+lwp_set_arch_private_info (struct lwp_info *lwp,
+ struct arch_lwp_info *info)
+{
+ lwp->arch_private = info;
+}
+
+/* See nat/linux-nat.h. */
+
+struct arch_lwp_info *
+lwp_arch_private_info (struct lwp_info *lwp)
+{
+ return lwp->arch_private;
+}
+
+/* See nat/linux-nat.h. */
+
+int
+lwp_is_stopped (struct lwp_info *lwp)
+{
+ return lwp->stopped;
+}
+
+/* See nat/linux-nat.h. */
+
+enum target_stop_reason
+lwp_stop_reason (struct lwp_info *lwp)
+{
+ return lwp->stop_reason;
+}
+
/* A list of all unknown processes which receive stop signals. Some
other process will presumably claim each of these as forked
children momentarily. */
static void
add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
{
- struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
+ struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
new_pid->pid = pid;
new_pid->status = status;
int *wstat, int options);
static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
static struct lwp_info *add_lwp (ptid_t ptid);
+static void linux_mourn (struct process_info *process);
static int linux_stopped_by_watchpoint (void);
static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
+static int lwp_is_marked_dead (struct lwp_info *lwp);
static void proceed_all_lwps (void);
static int finish_step_over (struct lwp_info *lwp);
static int kill_lwp (unsigned long lwpid, int signo);
+static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
+static void complete_ongoing_step_over (void);
+static int linux_low_ptrace_options (int attached);
/* When the event-loop is doing a step-over, this points at the thread
being stepped. */
ptid_t step_over_bkpt;
-/* True if the low target can hardware single-step. Such targets
- don't need a BREAKPOINT_REINSERT_ADDR callback. */
+/* True if the low target can hardware single-step. */
static int
can_hardware_single_step (void)
{
- return (the_low_target.breakpoint_reinsert_addr == NULL);
+ if (the_low_target.supports_hardware_single_step != NULL)
+ return the_low_target.supports_hardware_single_step ();
+ else
+ return 0;
+}
+
+/* True if the low target can software single-step. Such targets
+ implement the GET_NEXT_PCS callback. */
+
+static int
+can_software_single_step (void)
+{
+ return (the_low_target.get_next_pcs != NULL);
}
/* True if the low target supports memory breakpoints. If so, we'll
struct process_info *proc;
proc = add_process (pid, attached);
- proc->private = xcalloc (1, sizeof (*proc->private));
-
- /* Set the arch when the first LWP stops. */
- proc->private->new_inferior = 1;
+ proc->priv = XCNEW (struct process_info_private);
if (the_low_target.new_process != NULL)
- proc->private->arch_private = the_low_target.new_process ();
+ proc->priv->arch_private = the_low_target.new_process ();
return proc;
}
static CORE_ADDR get_pc (struct lwp_info *lwp);
-/* Handle a GNU/Linux extended wait response. If we see a clone
- event, we need to add the new LWP to our list (and not report the
- trap to higher layers). */
+/* Call the target arch_setup function on the current thread. */
static void
-handle_extended_wait (struct lwp_info *event_child, int wstat)
+linux_arch_setup (void)
{
+ the_low_target.arch_setup ();
+}
+
+/* Call the target arch_setup function on THREAD. */
+
+static void
+linux_arch_setup_thread (struct thread_info *thread)
+{
+ struct thread_info *saved_thread;
+
+ saved_thread = current_thread;
+ current_thread = thread;
+
+ linux_arch_setup ();
+
+ current_thread = saved_thread;
+}
+
+/* Handle a GNU/Linux extended wait response. If we see a clone,
+ fork, or vfork event, we need to add the new LWP to our list
+ (and return 0 so as not to report the trap to higher layers).
+ If we see an exec event, we will modify ORIG_EVENT_LWP to point
+ to a new LWP representing the new program. */
+
+static int
+handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
+{
+ struct lwp_info *event_lwp = *orig_event_lwp;
int event = linux_ptrace_get_extended_event (wstat);
- struct thread_info *event_thr = get_lwp_thread (event_child);
+ struct thread_info *event_thr = get_lwp_thread (event_lwp);
struct lwp_info *new_lwp;
- if (event == PTRACE_EVENT_CLONE)
+ gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
+
+ /* All extended events we currently use are mid-syscall. Only
+ PTRACE_EVENT_STOP is delivered more like a signal-stop, but
+ you have to be using PTRACE_SEIZE to get that. */
+ event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
+
+ if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
+ || (event == PTRACE_EVENT_CLONE))
{
ptid_t ptid;
unsigned long new_pid;
int ret, status;
+ /* Get the pid of the new lwp. */
ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
&new_pid);
warning ("wait returned unexpected status 0x%x", status);
}
+ if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
+ {
+ struct process_info *parent_proc;
+ struct process_info *child_proc;
+ struct lwp_info *child_lwp;
+ struct thread_info *child_thr;
+ struct target_desc *tdesc;
+
+ ptid = ptid_build (new_pid, new_pid, 0);
+
+ if (debug_threads)
+ {
+ debug_printf ("HEW: Got fork event from LWP %ld, "
+ "new child is %d\n",
+ ptid_get_lwp (ptid_of (event_thr)),
+ ptid_get_pid (ptid));
+ }
+
+ /* Add the new process to the tables and clone the breakpoint
+ lists of the parent. We need to do this even if the new process
+ will be detached, since we will need the process object and the
+ breakpoints to remove any breakpoints from memory when we
+ detach, and the client side will access registers. */
+ child_proc = linux_add_process (new_pid, 0);
+ gdb_assert (child_proc != NULL);
+ child_lwp = add_lwp (ptid);
+ gdb_assert (child_lwp != NULL);
+ child_lwp->stopped = 1;
+ child_lwp->must_set_ptrace_flags = 1;
+ child_lwp->status_pending_p = 0;
+ child_thr = get_lwp_thread (child_lwp);
+ child_thr->last_resume_kind = resume_stop;
+ child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
+
+ /* If we're suspending all threads, leave this one suspended
+ too. */
+ if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
+ {
+ if (debug_threads)
+ debug_printf ("HEW: leaving child suspended\n");
+ child_lwp->suspended = 1;
+ }
+
+ parent_proc = get_thread_process (event_thr);
+ child_proc->attached = parent_proc->attached;
+ clone_all_breakpoints (&child_proc->breakpoints,
+ &child_proc->raw_breakpoints,
+ parent_proc->breakpoints);
+
+ tdesc = XNEW (struct target_desc);
+ copy_target_description (tdesc, parent_proc->tdesc);
+ child_proc->tdesc = tdesc;
+
+ /* Clone arch-specific process data. */
+ if (the_low_target.new_fork != NULL)
+ the_low_target.new_fork (parent_proc, child_proc);
+
+ /* Save fork info in the parent thread. */
+ if (event == PTRACE_EVENT_FORK)
+ event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
+ else if (event == PTRACE_EVENT_VFORK)
+ event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
+
+ event_lwp->waitstatus.value.related_pid = ptid;
+
+ /* The status_pending field contains bits denoting the
+ extended event, so when the pending event is handled,
+ the handler will look at lwp->waitstatus. */
+ event_lwp->status_pending_p = 1;
+ event_lwp->status_pending = wstat;
+
+ /* Report the event. */
+ return 0;
+ }
+
if (debug_threads)
debug_printf ("HEW: Got clone event "
"from LWP %ld, new child is LWP %ld\n",
new_lwp->status_pending_p = 1;
new_lwp->status_pending = status;
}
+ else if (report_thread_events)
+ {
+ new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
+ new_lwp->status_pending_p = 1;
+ new_lwp->status_pending = status;
+ }
+
+ /* Don't report the event. */
+ return 1;
}
+ else if (event == PTRACE_EVENT_VFORK_DONE)
+ {
+ event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
+
+ /* Report the event. */
+ return 0;
+ }
+ else if (event == PTRACE_EVENT_EXEC && report_exec_events)
+ {
+ struct process_info *proc;
+ VEC (int) *syscalls_to_catch;
+ ptid_t event_ptid;
+ pid_t event_pid;
+
+ if (debug_threads)
+ {
+ debug_printf ("HEW: Got exec event from LWP %ld\n",
+ lwpid_of (event_thr));
+ }
+
+ /* Get the event ptid. */
+ event_ptid = ptid_of (event_thr);
+ event_pid = ptid_get_pid (event_ptid);
+
+ /* Save the syscall list from the execing process. */
+ proc = get_thread_process (event_thr);
+ syscalls_to_catch = proc->syscalls_to_catch;
+ proc->syscalls_to_catch = NULL;
+
+ /* Delete the execing process and all its threads. */
+ linux_mourn (proc);
+ current_thread = NULL;
+
+ /* Create a new process/lwp/thread. */
+ proc = linux_add_process (event_pid, 0);
+ event_lwp = add_lwp (event_ptid);
+ event_thr = get_lwp_thread (event_lwp);
+ gdb_assert (current_thread == event_thr);
+ linux_arch_setup_thread (event_thr);
+
+ /* Set the event status. */
+ event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
+ event_lwp->waitstatus.value.execd_pathname
+ = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
+
+ /* Mark the exec status as pending. */
+ event_lwp->stopped = 1;
+ event_lwp->status_pending_p = 1;
+ event_lwp->status_pending = wstat;
+ event_thr->last_resume_kind = resume_continue;
+ event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
+
+ /* Update syscall state in the new lwp, effectively mid-syscall too. */
+ event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
+
+ /* Restore the list to catch. Don't rely on the client, which is free
+ to avoid sending a new list when the architecture doesn't change.
+ Also, for ANY_SYSCALL, the architecture doesn't really matter. */
+ proc->syscalls_to_catch = syscalls_to_catch;
+
+ /* Report the event. */
+ *orig_event_lwp = event_lwp;
+ return 0;
+ }
+
+ internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
}
/* Return the PC as read from the regcache of LWP, without any
return pc;
}
+/* This function should only be called if LWP got a SYSCALL_SIGTRAP.
+ Fill *SYSNO with the syscall nr trapped. Fill *SYSRET with the
+ return code. */
+
+static void
+get_syscall_trapinfo (struct lwp_info *lwp, int *sysno, int *sysret)
+{
+ struct thread_info *saved_thread;
+ struct regcache *regcache;
+
+ if (the_low_target.get_syscall_trapinfo == NULL)
+ {
+ /* If we cannot get the syscall trapinfo, report an unknown
+ system call number and -ENOSYS return value. */
+ *sysno = UNKNOWN_SYSCALL;
+ *sysret = -ENOSYS;
+ return;
+ }
+
+ saved_thread = current_thread;
+ current_thread = get_lwp_thread (lwp);
+
+ regcache = get_thread_regcache (current_thread, 1);
+ (*the_low_target.get_syscall_trapinfo) (regcache, sysno, sysret);
+
+ if (debug_threads)
+ {
+ debug_printf ("get_syscall_trapinfo sysno %d sysret %d\n",
+ *sysno, *sysret);
+ }
+
+ current_thread = saved_thread;
+}
+
/* This function should only be called if LWP got a SIGTRAP.
The SIGTRAP could mean several things.
CORE_ADDR pc;
CORE_ADDR sw_breakpoint_pc;
struct thread_info *saved_thread;
+#if USE_SIGTRAP_SIGINFO
+ siginfo_t siginfo;
+#endif
if (the_low_target.get_pc == NULL)
return 0;
saved_thread = current_thread;
current_thread = get_lwp_thread (lwp);
+#if USE_SIGTRAP_SIGINFO
+ if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
+ (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
+ {
+ if (siginfo.si_signo == SIGTRAP)
+ {
+ if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
+ {
+ if (debug_threads)
+ {
+ struct thread_info *thr = get_lwp_thread (lwp);
+
+ debug_printf ("CSBB: %s stopped by software breakpoint\n",
+ target_pid_to_str (ptid_of (thr)));
+ }
+
+ /* Back up the PC if necessary. */
+ if (pc != sw_breakpoint_pc)
+ {
+ struct regcache *regcache
+ = get_thread_regcache (current_thread, 1);
+ (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
+ }
+
+ lwp->stop_pc = sw_breakpoint_pc;
+ lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
+ current_thread = saved_thread;
+ return 1;
+ }
+ else if (siginfo.si_code == TRAP_HWBKPT)
+ {
+ if (debug_threads)
+ {
+ struct thread_info *thr = get_lwp_thread (lwp);
+
+ debug_printf ("CSBB: %s stopped by hardware "
+ "breakpoint/watchpoint\n",
+ target_pid_to_str (ptid_of (thr)));
+ }
+
+ lwp->stop_pc = pc;
+ lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
+ current_thread = saved_thread;
+ return 1;
+ }
+ else if (siginfo.si_code == TRAP_TRACE)
+ {
+ if (debug_threads)
+ {
+ struct thread_info *thr = get_lwp_thread (lwp);
+
+ debug_printf ("CSBB: %s stopped by trace\n",
+ target_pid_to_str (ptid_of (thr)));
+ }
+
+ lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
+ }
+ }
+ }
+#else
/* We may have just stepped a breakpoint instruction. E.g., in
non-stop mode, GDB first tells the thread A to step a range, and
then the user inserts a breakpoint inside the range. In that
- case, we need to report the breakpoint PC. But, when we're
- trying to step past one of our own breakpoints, that happens to
- have been placed on top of a permanent breakpoint instruction, we
- shouldn't adjust the PC, otherwise the program would keep
- trapping the permanent breakpoint forever. */
- if ((!lwp->stepping
- || (!ptid_equal (ptid_of (current_thread), step_over_bkpt)
- && lwp->stop_pc == sw_breakpoint_pc))
+ case we need to report the breakpoint PC. */
+ if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
&& (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
{
if (debug_threads)
}
lwp->stop_pc = sw_breakpoint_pc;
- lwp->stop_reason = LWP_STOPPED_BY_SW_BREAKPOINT;
+ lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
current_thread = saved_thread;
return 1;
}
}
lwp->stop_pc = pc;
- lwp->stop_reason = LWP_STOPPED_BY_HW_BREAKPOINT;
+ lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
current_thread = saved_thread;
return 1;
}
+#endif
current_thread = saved_thread;
return 0;
{
struct lwp_info *lwp;
- lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
- memset (lwp, 0, sizeof (*lwp));
+ lwp = XCNEW (struct lwp_info);
+
+ lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
if (the_low_target.new_thread != NULL)
- lwp->arch_private = the_low_target.new_thread ();
+ the_low_target.new_thread (lwp);
lwp->thread = add_thread (ptid, lwp);
close_most_fds ();
ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
-#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
- signal (__SIGRTMIN + 1, SIG_DFL);
-#endif
-
setpgid (0, 0);
/* If gdbserver is connected to gdb via stdio, redirect the inferior's
return pid;
}
+/* Implement the post_create_inferior target_ops method. */
+
+static void
+linux_post_create_inferior (void)
+{
+ struct lwp_info *lwp = get_thread_lwp (current_thread);
+
+ linux_arch_setup ();
+
+ if (lwp->must_set_ptrace_flags)
+ {
+ struct process_info *proc = current_process ();
+ int options = linux_low_ptrace_options (proc->attached);
+
+ linux_enable_event_reporting (lwpid_of (current_thread), options);
+ lwp->must_set_ptrace_flags = 0;
+ }
+}
+
/* Attach to an inferior process. Returns 0 on success, ERRNO on
error. */
return 0;
}
+static void async_file_mark (void);
+
/* Attach to PID. If PID is the tgid, attach to it and all
of its threads. */
static int
linux_attach (unsigned long pid)
{
+ struct process_info *proc;
+ struct thread_info *initial_thread;
ptid_t ptid = ptid_build (pid, pid, 0);
int err;
error ("Cannot attach to process %ld: %s",
pid, linux_ptrace_attach_fail_reason_string (ptid, err));
- linux_add_process (pid, 1);
+ proc = linux_add_process (pid, 1);
- if (!non_stop)
- {
- struct thread_info *thread;
-
- /* Don't ignore the initial SIGSTOP if we just attached to this
- process. It will be collected by wait shortly. */
- thread = find_thread_ptid (ptid_build (pid, pid, 0));
- thread->last_resume_kind = resume_stop;
- }
+ /* Don't ignore the initial SIGSTOP if we just attached to this
+ process. It will be collected by wait shortly. */
+ initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
+ initial_thread->last_resume_kind = resume_stop;
/* We must attach to every LWP. If /proc is mounted, use that to
find them now. On the one hand, the inferior may be using raw
that once thread_db is loaded, we'll still use it to list threads
and associate pthread info with each LWP. */
linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
+
+ /* GDB will shortly read the xml target description for this
+ process, to figure out the process' architecture. But the target
+ description is only filled in when the first process/thread in
+ the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
+ that now, otherwise, if GDB is fast enough, it could read the
+ target description _before_ that initial stop. */
+ if (non_stop)
+ {
+ struct lwp_info *lwp;
+ int wstat, lwpid;
+ ptid_t pid_ptid = pid_to_ptid (pid);
+
+ lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
+ &wstat, __WALL);
+ gdb_assert (lwpid > 0);
+
+ lwp = find_lwp_pid (pid_to_ptid (lwpid));
+
+ if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
+ {
+ lwp->status_pending_p = 1;
+ lwp->status_pending = wstat;
+ }
+
+ initial_thread->last_resume_kind = resume_continue;
+
+ async_file_mark ();
+
+ gdb_assert (proc->tdesc != NULL);
+ }
+
return 0;
}
static int
second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
{
- struct counter *counter = args;
+ struct counter *counter = (struct counter *) args;
if (ptid_get_pid (entry->id) == counter->pid)
{
ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
alternative is to kill with SIGKILL. We only need one SIGKILL
per process, not one for each thread. But since we still support
- linuxthreads, and we also support debugging programs using raw
- clone without CLONE_THREAD, we send one for each thread. For
- years, we used PTRACE_KILL only, so we're being a bit paranoid
- about some old kernels where PTRACE_KILL might work better
- (dubious if there are any such, but that's why it's paranoia), so
- we try SIGKILL first, PTRACE_KILL second, and so we're fine
- everywhere. */
+ support debugging programs using raw clone without CLONE_THREAD,
+ we send one for each thread. For years, we used PTRACE_KILL
+ only, so we're being a bit paranoid about some old kernels where
+ PTRACE_KILL might work better (dubious if there are any such, but
+ that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
+ second, and so we're fine everywhere. */
errno = 0;
kill_lwp (pid, SIGKILL);
res = my_waitpid (lwpid, &wstat, __WCLONE);
} while (res > 0 && WIFSTOPPED (wstat));
- gdb_assert (res > 0);
+ /* Even if it was stopped, the child may have already disappeared.
+ E.g., if it was killed by SIGKILL. */
+ if (res < 0 && errno != ECHILD)
+ perror_with_name ("kill_wait_lwp");
}
/* Callback for `find_inferior'. Kills an lwp of a given process,
if (process == NULL)
return -1;
+ /* As there's a step over already in progress, let it finish first,
+ otherwise nesting a stabilize_threads operation on top gets real
+ messy. */
+ complete_ongoing_step_over ();
+
/* Stop all threads before detaching. First, ptrace requires that
the thread is stopped to sucessfully detach. Second, thread_db
may need to uninstall thread event breakpoints from memory, which
{
struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
- struct process_info *process = proc;
+ struct process_info *process = (struct process_info *) proc;
if (pid_of (thread) == pid_of (process))
delete_lwp (lwp);
find_inferior (&all_threads, delete_lwp_callback, process);
/* Freeing all private data. */
- priv = process->private;
+ priv = process->priv;
free (priv->arch_private);
free (priv);
- process->private = NULL;
+ process->priv = NULL;
remove_process (process);
}
exited but we still haven't been able to report it to GDB, we'll
hold on to the last lwp of the dead process. */
if (lwp != NULL)
- return !lwp->dead;
+ return !lwp_is_marked_dead (lwp);
else
return 0;
}
if (!lp->status_pending_p)
return 0;
- /* If we got a `vCont;t', but we haven't reported a stop yet, do
- report any status pending the LWP may have. */
- if (thread->last_resume_kind == resume_stop
- && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
- return 0;
-
if (thread->last_resume_kind != resume_stop
- && (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT
- || lp->stop_reason == LWP_STOPPED_BY_HW_BREAKPOINT))
+ && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
+ || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
{
struct thread_info *saved_thread;
CORE_ADDR pc;
lwpid_of (thread));
discard = 1;
}
- else if (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT
+
+#if !USE_SIGTRAP_SIGINFO
+ else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
&& !(*the_low_target.breakpoint_at) (pc))
{
if (debug_threads)
lwpid_of (thread));
discard = 1;
}
- else if (lp->stop_reason == LWP_STOPPED_BY_HW_BREAKPOINT
+ else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
&& !hardware_breakpoint_inserted_here (pc))
{
if (debug_threads)
lwpid_of (thread));
discard = 1;
}
+#endif
current_thread = saved_thread;
return 1;
}
+/* Returns true if LWP is resumed from the client's perspective. */
+
+static int
+lwp_resumed (struct lwp_info *lwp)
+{
+ struct thread_info *thread = get_lwp_thread (lwp);
+
+ if (thread->last_resume_kind != resume_stop)
+ return 1;
+
+ /* Did gdb send us a `vCont;t', but we haven't reported the
+ corresponding stop to gdb yet? If so, the thread is still
+ resumed/running from gdb's perspective. */
+ if (thread->last_resume_kind == resume_stop
+ && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
+ return 1;
+
+ return 0;
+}
+
/* Return 1 if this lwp has an interesting status pending. */
static int
status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
ptid_t ptid = * (ptid_t *) arg;
/* Check if we're only interested in events from a specific process
- or its lwps. */
- if (!ptid_equal (minus_one_ptid, ptid)
- && ptid_get_pid (ptid) != ptid_get_pid (thread->entry.id))
+ or a specific LWP. */
+ if (!ptid_match (ptid_of (thread), ptid))
+ return 0;
+
+ if (!lwp_resumed (lp))
return 0;
if (lp->status_pending_p
return count;
}
+/* The arguments passed to iterate_over_lwps. */
+
+struct iterate_over_lwps_args
+{
+ /* The FILTER argument passed to iterate_over_lwps. */
+ ptid_t filter;
+
+ /* The CALLBACK argument passed to iterate_over_lwps. */
+ iterate_over_lwps_ftype *callback;
+
+ /* The DATA argument passed to iterate_over_lwps. */
+ void *data;
+};
+
+/* Callback for find_inferior used by iterate_over_lwps to filter
+ calls to the callback supplied to that function. Returning a
+ nonzero value causes find_inferiors to stop iterating and return
+ the current inferior_list_entry. Returning zero indicates that
+ find_inferiors should continue iterating. */
+
+static int
+iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
+{
+ struct iterate_over_lwps_args *args
+ = (struct iterate_over_lwps_args *) args_p;
+
+ if (ptid_match (entry->id, args->filter))
+ {
+ struct thread_info *thr = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thr);
+
+ return (*args->callback) (lwp, args->data);
+ }
+
+ return 0;
+}
+
+/* See nat/linux-nat.h. */
+
+struct lwp_info *
+iterate_over_lwps (ptid_t filter,
+ iterate_over_lwps_ftype callback,
+ void *data)
+{
+ struct iterate_over_lwps_args args = {filter, callback, data};
+ struct inferior_list_entry *entry;
+
+ entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
+ if (entry == NULL)
+ return NULL;
+
+ return get_thread_lwp ((struct thread_info *) entry);
+}
+
/* Detect zombie thread group leaders, and "exit" them. We can't reap
their exits until all other threads in the group have exited. */
leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
linux_proc_pid_is_zombie (leader_pid));
- if (leader_lp != NULL
+ if (leader_lp != NULL && !leader_lp->stopped
/* Check if there are other threads in the group, as we may
have raced with the inferior simply exiting. */
&& !last_thread_of_process_p (leader_pid)
return 0;
}
+/* Increment LWP's suspend count. */
+
+static void
+lwp_suspended_inc (struct lwp_info *lwp)
+{
+ lwp->suspended++;
+
+ if (debug_threads && lwp->suspended > 4)
+ {
+ struct thread_info *thread = get_lwp_thread (lwp);
+
+ debug_printf ("LWP %ld has a suspiciously high suspend count,"
+ " suspended=%d\n", lwpid_of (thread), lwp->suspended);
+ }
+}
+
+/* Decrement LWP's suspend count. */
+
+static void
+lwp_suspended_decr (struct lwp_info *lwp)
+{
+ lwp->suspended--;
+
+ if (lwp->suspended < 0)
+ {
+ struct thread_info *thread = get_lwp_thread (lwp);
+
+ internal_error (__FILE__, __LINE__,
+ "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
+ lwp->suspended);
+ }
+}
+
/* This function should only be called if the LWP got a SIGTRAP.
Handle any tracepoint steps or hits. Return true if a tracepoint
uninsert tracepoints. To do this, we temporarily pause all
threads, unpatch away, and then unpause threads. We need to make
sure the unpausing doesn't resume LWP too. */
- lwp->suspended++;
+ lwp_suspended_inc (lwp);
/* And we need to be sure that any all-threads-stopping doesn't try
to move threads out of the jump pads, as it could deadlock the
actions. */
tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
- lwp->suspended--;
+ lwp_suspended_decr (lwp);
gdb_assert (lwp->suspended == 0);
gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
}
}
- p_sig = xmalloc (sizeof (*p_sig));
+ p_sig = XCNEW (struct pending_signals);
p_sig->prev = lwp->pending_signals_to_report;
p_sig->signal = WSTOPSIG (*wstat);
- memset (&p_sig->info, 0, sizeof (siginfo_t));
+
ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
&p_sig->info);
return 0;
}
-/* Return true if the event in LP may be caused by breakpoint. */
-
-static int
-wstatus_maybe_breakpoint (int wstatus)
-{
- return (WIFSTOPPED (wstatus)
- && (WSTOPSIG (wstatus) == SIGTRAP
- /* SIGILL and SIGSEGV are also treated as traps in case a
- breakpoint is inserted at the current PC. */
- || WSTOPSIG (wstatus) == SIGILL
- || WSTOPSIG (wstatus) == SIGSEGV));
-}
-
/* Fetch the possibly triggered data watchpoint info and store it in
CHILD.
if (the_low_target.stopped_by_watchpoint ())
{
- child->stop_reason = LWP_STOPPED_BY_WATCHPOINT;
+ child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
if (the_low_target.stopped_data_address != NULL)
child->stopped_data_address
current_thread = saved_thread;
}
- return child->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
+ return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
+}
+
+/* Return the ptrace options that we want to try to enable. */
+
+static int
+linux_low_ptrace_options (int attached)
+{
+ int options = 0;
+
+ if (!attached)
+ options |= PTRACE_O_EXITKILL;
+
+ if (report_fork_events)
+ options |= PTRACE_O_TRACEFORK;
+
+ if (report_vfork_events)
+ options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
+
+ if (report_exec_events)
+ options |= PTRACE_O_TRACEEXEC;
+
+ options |= PTRACE_O_TRACESYSGOOD;
+
+ return options;
}
/* Do low-level handling of the event, and check if we should go on
child = find_lwp_pid (pid_to_ptid (lwpid));
+ /* Check for stop events reported by a process we didn't already
+ know about - anything not already in our LWP list.
+
+ If we're expecting to receive stopped processes after
+ fork, vfork, and clone events, then we'll just add the
+ new one to our list and go back to waiting for the event
+ to be reported - the stopped process might be returned
+ from waitpid before or after the event is.
+
+ But note the case of a non-leader thread exec'ing after the
+ leader having exited, and gone from our lists (because
+ check_zombie_leaders deleted it). The non-leader thread
+ changes its tid to the tgid. */
+
+ if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
+ && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
+ {
+ ptid_t child_ptid;
+
+ /* A multi-thread exec after we had seen the leader exiting. */
+ if (debug_threads)
+ {
+ debug_printf ("LLW: Re-adding thread group leader LWP %d"
+ "after exec.\n", lwpid);
+ }
+
+ child_ptid = ptid_build (lwpid, lwpid, 0);
+ child = add_lwp (child_ptid);
+ child->stopped = 1;
+ current_thread = child->thread;
+ }
+
/* If we didn't find a process, one of two things presumably happened:
- A process we started and then detached from has exited. Ignore it.
- A process we are controlling has forked and the new child's stop
{
if (debug_threads)
debug_printf ("LLFE: %d exited.\n", lwpid);
- if (num_lwps (pid_of (thread)) > 1)
- {
-
- /* If there is at least one more LWP, then the exit signal was
- not the end of the debugged application and should be
- ignored. */
- delete_lwp (child);
- return NULL;
- }
- else
+ /* If there is at least one more LWP, then the exit signal was
+ not the end of the debugged application and should be
+ ignored, unless GDB wants to hear about thread exits. */
+ if (report_thread_events
+ || last_thread_of_process_p (pid_of (thread)))
{
- /* This was the last lwp in the process. Since events are
- serialized to GDB core, and we can't report this one
- right now, but GDB core and the other target layers will
- want to be notified about the exit code/signal, leave the
- status pending for the next time we're able to report
- it. */
+ /* Since events are serialized to GDB core, and we can't
+ report this one right now. Leave the status pending for
+ the next time we're able to report it. */
mark_lwp_dead (child, wstat);
return child;
}
+ else
+ {
+ delete_lwp (child);
+ return NULL;
+ }
}
gdb_assert (WIFSTOPPED (wstat));
{
struct process_info *proc;
- /* Architecture-specific setup after inferior is running. This
- needs to happen after we have attached to the inferior and it
- is stopped for the first time, but before we access any
- inferior registers. */
+ /* Architecture-specific setup after inferior is running. */
proc = find_process_pid (pid_of (thread));
- if (proc->private->new_inferior)
+ if (proc->tdesc == NULL)
{
- struct thread_info *saved_thread;
-
- saved_thread = current_thread;
- current_thread = thread;
-
- the_low_target.arch_setup ();
-
- current_thread = saved_thread;
-
- proc->private->new_inferior = 0;
+ if (proc->attached)
+ {
+ /* This needs to happen after we have attached to the
+ inferior and it is stopped for the first time, but
+ before we access any inferior registers. */
+ linux_arch_setup_thread (thread);
+ }
+ else
+ {
+ /* The process is started, but GDBserver will do
+ architecture-specific setup after the program stops at
+ the first instruction. */
+ child->status_pending_p = 1;
+ child->status_pending = wstat;
+ return child;
+ }
}
}
if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
{
struct process_info *proc = find_process_pid (pid_of (thread));
+ int options = linux_low_ptrace_options (proc->attached);
- linux_enable_event_reporting (lwpid, proc->attached);
+ linux_enable_event_reporting (lwpid, options);
child->must_set_ptrace_flags = 0;
}
+ /* Always update syscall_state, even if it will be filtered later. */
+ if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
+ {
+ child->syscall_state
+ = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
+ ? TARGET_WAITKIND_SYSCALL_RETURN
+ : TARGET_WAITKIND_SYSCALL_ENTRY);
+ }
+ else
+ {
+ /* Almost all other ptrace-stops are known to be outside of system
+ calls, with further exceptions in handle_extended_wait. */
+ child->syscall_state = TARGET_WAITKIND_IGNORE;
+ }
+
/* Be careful to not overwrite stop_pc until
check_stopped_by_breakpoint is called. */
if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
&& linux_is_extended_waitstatus (wstat))
{
child->stop_pc = get_pc (child);
- handle_extended_wait (child, wstat);
- return NULL;
+ if (handle_extended_wait (&child, wstat))
+ {
+ /* The event has been handled, so just return without
+ reporting it. */
+ return NULL;
+ }
}
- if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
- && check_stopped_by_watchpoint (child))
- ;
- else if (WIFSTOPPED (wstat) && wstatus_maybe_breakpoint (wstat))
+ /* Check first whether this was a SW/HW breakpoint before checking
+ watchpoints, because at least s390 can't tell the data address of
+ hardware watchpoint hits, and returns stopped-by-watchpoint as
+ long as there's a watchpoint set. */
+ if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
{
if (check_stopped_by_breakpoint (child))
have_stop_pc = 1;
}
+ /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
+ or hardware watchpoint. Check which is which if we got
+ TARGET_STOPPED_BY_HW_BREAKPOINT. Likewise, we may have single
+ stepped an instruction that triggered a watchpoint. In that
+ case, on some architectures (such as x86), instead of
+ TRAP_HWBKPT, si_code indicates TRAP_TRACE, and we need to check
+ the debug registers separately. */
+ if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
+ && child->stop_reason != TARGET_STOPPED_BY_SW_BREAKPOINT)
+ check_stopped_by_watchpoint (child);
+
if (!have_stop_pc)
child->stop_pc = get_pc (child);
{
/* We want to report the stop to the core. Treat the
SIGSTOP as a normal event. */
+ if (debug_threads)
+ debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
+ target_pid_to_str (ptid_of (thread)));
}
else if (stopping_threads != NOT_STOPPING_THREADS)
{
/* Stopping threads. We don't want this SIGSTOP to end up
pending. */
+ if (debug_threads)
+ debug_printf ("LLW: SIGSTOP caught for %s "
+ "while stopping threads.\n",
+ target_pid_to_str (ptid_of (thread)));
return NULL;
}
else
{
- /* Filter out the event. */
+ /* This is a delayed SIGSTOP. Filter out the event. */
+ if (debug_threads)
+ debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
+ child->stepping ? "step" : "continue",
+ target_pid_to_str (ptid_of (thread)));
+
linux_resume_one_lwp (child, child->stepping, 0, NULL);
return NULL;
}
struct lwp_info *lp = get_thread_lwp (thread);
if (lp->stopped
+ && !lp->suspended
&& !lp->status_pending_p
- && thread->last_resume_kind != resume_stop
&& thread->last_status.kind == TARGET_WAITKIND_IGNORE)
{
int step = thread->last_resume_kind == resume_step;
- When a non-leader thread execs, that thread just vanishes
without reporting an exit (so we'd hang if we waited for it
explicitly in that case). The exec event is reported to
- the TGID pid (although we don't currently enable exec
- events). */
+ the TGID pid. */
errno = 0;
ret = my_waitpid (-1, wstatp, options | WNOHANG);
current_thread = event_thread;
- /* Check for thread exit. */
- if (! WIFSTOPPED (*wstatp))
- {
- gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
-
- if (debug_threads)
- debug_printf ("LWP %d is the last lwp of process. "
- "Process %ld exiting.\n",
- pid_of (event_thread), lwpid_of (event_thread));
- return lwpid_of (event_thread);
- }
-
return lwpid_of (event_thread);
}
count_events_callback (struct inferior_list_entry *entry, void *data)
{
struct thread_info *thread = (struct thread_info *) entry;
- int *count = data;
+ struct lwp_info *lp = get_thread_lwp (thread);
+ int *count = (int *) data;
gdb_assert (count != NULL);
/* Count only resumed LWPs that have an event pending. */
if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
- && thread->last_resume_kind != resume_stop
- && thread->status_pending_p)
+ && lp->status_pending_p)
(*count)++;
return 0;
return 0;
}
-/* Select the Nth LWP that has had a SIGTRAP event that should be
- reported to GDB. */
+/* Select the Nth LWP that has had an event. */
static int
select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
{
struct thread_info *thread = (struct thread_info *) entry;
- int *selector = data;
+ struct lwp_info *lp = get_thread_lwp (thread);
+ int *selector = (int *) data;
gdb_assert (selector != NULL);
/* Select only resumed LWPs that have an event pending. */
- if (thread->last_resume_kind != resume_stop
- && thread->last_status.kind == TARGET_WAITKIND_IGNORE
- && thread->status_pending_p)
+ if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
+ && lp->status_pending_p)
if ((*selector)-- == 0)
return 1;
if (event_thread == NULL)
{
/* No single-stepping LWP. Select one at random, out of those
- which have had SIGTRAP events. */
+ which have had events. */
- /* First see how many SIGTRAP events we have. */
+ /* First see how many events we have. */
find_inferior (&all_threads, count_events_callback, &num_events);
+ gdb_assert (num_events > 0);
- /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
+ /* Now randomly pick a LWP out of those that have had
+ events. */
random_selector = (int)
((num_events * (double) rand ()) / (RAND_MAX + 1.0));
if (lwp == except)
return 0;
- lwp->suspended--;
-
- gdb_assert (lwp->suspended >= 0);
+ lwp_suspended_decr (lwp);
return 0;
}
lwp = get_thread_lwp (current_thread);
/* Lock it. */
- lwp->suspended++;
+ lwp_suspended_inc (lwp);
if (ourstatus.value.sig != GDB_SIGNAL_0
|| current_thread->last_resume_kind == resume_stop)
}
}
-static void async_file_mark (void);
-
/* Convenience function that is called when the kernel reports an
event that is not passed out to GDB. */
return null_ptid;
}
+/* Convenience function that is called when the kernel reports an exit
+ event. This decides whether to report the event to GDB as a
+ process exit event, a thread exit event, or to suppress the
+ event. */
+
+static ptid_t
+filter_exit_event (struct lwp_info *event_child,
+ struct target_waitstatus *ourstatus)
+{
+ struct thread_info *thread = get_lwp_thread (event_child);
+ ptid_t ptid = ptid_of (thread);
+
+ if (!last_thread_of_process_p (pid_of (thread)))
+ {
+ if (report_thread_events)
+ ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
+ else
+ ourstatus->kind = TARGET_WAITKIND_IGNORE;
+
+ delete_lwp (event_child);
+ }
+ return ptid;
+}
+
+/* Returns 1 if GDB is interested in any event_child syscalls. */
+
+static int
+gdb_catching_syscalls_p (struct lwp_info *event_child)
+{
+ struct thread_info *thread = get_lwp_thread (event_child);
+ struct process_info *proc = get_thread_process (thread);
+
+ return !VEC_empty (int, proc->syscalls_to_catch);
+}
+
+/* Returns 1 if GDB is interested in the event_child syscall.
+ Only to be called when stopped reason is SYSCALL_SIGTRAP. */
+
+static int
+gdb_catch_this_syscall_p (struct lwp_info *event_child)
+{
+ int i, iter;
+ int sysno, sysret;
+ struct thread_info *thread = get_lwp_thread (event_child);
+ struct process_info *proc = get_thread_process (thread);
+
+ if (VEC_empty (int, proc->syscalls_to_catch))
+ return 0;
+
+ if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
+ return 1;
+
+ get_syscall_trapinfo (event_child, &sysno, &sysret);
+ for (i = 0;
+ VEC_iterate (int, proc->syscalls_to_catch, i, iter);
+ i++)
+ if (iter == sysno)
+ return 1;
+
+ return 0;
+}
+
/* Wait for process, returns status. */
static ptid_t
int report_to_gdb;
int trace_event;
int in_step_range;
+ int any_resumed;
if (debug_threads)
{
in_step_range = 0;
ourstatus->kind = TARGET_WAITKIND_IGNORE;
+ /* Find a resumed LWP, if any. */
+ if (find_inferior (&all_threads,
+ status_pending_p_callback,
+ &minus_one_ptid) != NULL)
+ any_resumed = 1;
+ else if ((find_inferior (&all_threads,
+ not_stopped_callback,
+ &minus_one_ptid) != NULL))
+ any_resumed = 1;
+ else
+ any_resumed = 0;
+
if (ptid_equal (step_over_bkpt, null_ptid))
pid = linux_wait_for_event (ptid, &w, options);
else
pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
}
- if (pid == 0)
+ if (pid == 0 || (pid == -1 && !any_resumed))
{
gdb_assert (target_options & TARGET_WNOHANG);
}
}
+ if (ourstatus->kind == TARGET_WAITKIND_EXITED)
+ return filter_exit_event (event_child, ourstatus);
+
return ptid_of (current_thread);
}
+ /* If step-over executes a breakpoint instruction, in the case of a
+ hardware single step it means a gdb/gdbserver breakpoint had been
+ planted on top of a permanent breakpoint, in the case of a software
+ single step it may just mean that gdbserver hit the reinsert breakpoint.
+ The PC has been adjusted by check_stopped_by_breakpoint to point at
+ the breakpoint address.
+ So in the case of the hardware single step advance the PC manually
+ past the breakpoint and in the case of software single step advance only
+ if it's not the reinsert_breakpoint we are hitting.
+ This avoids that a program would keep trapping a permanent breakpoint
+ forever. */
+ if (!ptid_equal (step_over_bkpt, null_ptid)
+ && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
+ && (event_child->stepping
+ || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
+ {
+ int increment_pc = 0;
+ int breakpoint_kind = 0;
+ CORE_ADDR stop_pc = event_child->stop_pc;
+
+ breakpoint_kind =
+ the_target->breakpoint_kind_from_current_state (&stop_pc);
+ the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
+
+ if (debug_threads)
+ {
+ debug_printf ("step-over for %s executed software breakpoint\n",
+ target_pid_to_str (ptid_of (current_thread)));
+ }
+
+ if (increment_pc != 0)
+ {
+ struct regcache *regcache
+ = get_thread_regcache (current_thread, 1);
+
+ event_child->stop_pc += increment_pc;
+ (*the_low_target.set_pc) (regcache, event_child->stop_pc);
+
+ if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
+ event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
+ }
+ }
+
/* If this event was not handled before, and is not a SIGTRAP, we
report it. SIGILL and SIGSEGV are also treated as traps in case
a breakpoint is inserted at the current PC. If this target does
/* Check whether GDB would be interested in this event. */
+ /* Check if GDB is interested in this syscall. */
+ if (WIFSTOPPED (w)
+ && WSTOPSIG (w) == SYSCALL_SIGTRAP
+ && !gdb_catch_this_syscall_p (event_child))
+ {
+ if (debug_threads)
+ {
+ debug_printf ("Ignored syscall for LWP %ld.\n",
+ lwpid_of (current_thread));
+ }
+
+ linux_resume_one_lwp (event_child, event_child->stepping,
+ 0, NULL);
+ return ignore_event (ourstatus);
+ }
+
/* If GDB is not interested in this signal, don't stop other
threads, and don't report it to GDB. Just resume the inferior
right away. We do this for threading-related signals as well as
any that GDB specifically requested we ignore. But never ignore
SIGSTOP if we sent it ourselves, and do not ignore signals when
stepping - they may require special handling to skip the signal
- handler. */
- /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
- thread library? */
+ handler. Also never ignore signals that could be caused by a
+ breakpoint. */
if (WIFSTOPPED (w)
&& current_thread->last_resume_kind != resume_step
&& (
#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
- (current_process ()->private->thread_db != NULL
+ (current_process ()->priv->thread_db != NULL
&& (WSTOPSIG (w) == __SIGRTMIN
|| WSTOPSIG (w) == __SIGRTMIN + 1))
||
#endif
(pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
&& !(WSTOPSIG (w) == SIGSTOP
- && current_thread->last_resume_kind == resume_stop))))
+ && current_thread->last_resume_kind == resume_stop)
+ && !linux_wstatus_maybe_breakpoint (w))))
{
siginfo_t info, *info_p;
info_p = &info;
else
info_p = NULL;
- linux_resume_one_lwp (event_child, event_child->stepping,
- WSTOPSIG (w), info_p);
+
+ if (step_over_finished)
+ {
+ /* We cancelled this thread's step-over above. We still
+ need to unsuspend all other LWPs, and set them back
+ running again while the signal handler runs. */
+ unsuspend_all_lwps (event_child);
+
+ /* Enqueue the pending signal info so that proceed_all_lwps
+ doesn't lose it. */
+ enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
+
+ proceed_all_lwps ();
+ }
+ else
+ {
+ linux_resume_one_lwp (event_child, event_child->stepping,
+ WSTOPSIG (w), info_p);
+ }
return ignore_event (ourstatus);
}
do, we're be able to handle GDB breakpoints on top of internal
breakpoints, by handling the internal breakpoint and still
reporting the event to GDB. If we don't, we're out of luck, GDB
- won't see the breakpoint hit. */
+ won't see the breakpoint hit. If we see a single-step event but
+ the thread should be continuing, don't pass the trap to gdb.
+ That indicates that we had previously finished a single-step but
+ left the single-step pending -- see
+ complete_ongoing_step_over. */
report_to_gdb = (!maybe_internal_trap
|| (current_thread->last_resume_kind == resume_step
&& !in_step_range)
- || event_child->stop_reason == LWP_STOPPED_BY_WATCHPOINT
- || (!step_over_finished && !in_step_range
- && !bp_explains_trap && !trace_event)
+ || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
+ || (!in_step_range
+ && !bp_explains_trap
+ && !trace_event
+ && !step_over_finished
+ && !(current_thread->last_resume_kind == resume_continue
+ && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
|| (gdb_breakpoint_here (event_child->stop_pc)
&& gdb_condition_true_at_breakpoint (event_child->stop_pc)
- && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
+ && gdb_no_commands_at_breakpoint (event_child->stop_pc))
+ || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
run_breakpoint_commands (event_child->stop_pc);
if (debug_threads)
{
+ if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
+ {
+ char *str;
+
+ str = target_waitstatus_to_string (&event_child->waitstatus);
+ debug_printf ("LWP %ld: extended event with waitstatus %s\n",
+ lwpid_of (get_lwp_thread (event_child)), str);
+ xfree (str);
+ }
if (current_thread->last_resume_kind == resume_step)
{
if (event_child->step_range_start == event_child->step_range_end)
else if (!lwp_in_step_range (event_child))
debug_printf ("Out of step range, reporting event.\n");
}
- if (event_child->stop_reason == LWP_STOPPED_BY_WATCHPOINT)
+ if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
debug_printf ("Stopped by watchpoint.\n");
else if (gdb_breakpoint_here (event_child->stop_pc))
debug_printf ("Stopped by GDB breakpoint.\n");
unstop_all_lwps (1, event_child);
}
- ourstatus->kind = TARGET_WAITKIND_STOPPED;
+ if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
+ {
+ /* If the reported event is an exit, fork, vfork or exec, let
+ GDB know. */
+ *ourstatus = event_child->waitstatus;
+ /* Clear the event lwp's waitstatus since we handled it already. */
+ event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
+ }
+ else
+ ourstatus->kind = TARGET_WAITKIND_STOPPED;
/* Now that we've selected our final event LWP, un-adjust its PC if
- it was a software breakpoint. */
- if (event_child->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT)
+ it was a software breakpoint, and the client doesn't know we can
+ adjust the breakpoint ourselves. */
+ if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
+ && !swbreak_feature)
{
int decr_pc = the_low_target.decr_pc_after_break;
}
}
- if (current_thread->last_resume_kind == resume_stop
- && WSTOPSIG (w) == SIGSTOP)
+ if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
+ {
+ int sysret;
+
+ get_syscall_trapinfo (event_child,
+ &ourstatus->value.syscall_number, &sysret);
+ ourstatus->kind = event_child->syscall_state;
+ }
+ else if (current_thread->last_resume_kind == resume_stop
+ && WSTOPSIG (w) == SIGSTOP)
{
/* A thread that has been requested to stop by GDB with vCont;t,
and it stopped cleanly, so report as SIG0. The use of
but, it stopped for other reasons. */
ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
}
- else
+ else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
{
ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
}
debug_exit ();
}
+ if (ourstatus->kind == TARGET_WAITKIND_EXITED)
+ return filter_exit_event (event_child, ourstatus);
+
return ptid_of (current_thread);
}
static int
kill_lwp (unsigned long lwpid, int signo)
{
- /* Use tkill, if possible, in case we are using nptl threads. If tkill
- fails, then we are not using nptl threads and we should be using kill. */
-
-#ifdef __NR_tkill
- {
- static int tkill_failed;
-
- if (!tkill_failed)
- {
- int ret;
-
- errno = 0;
- ret = syscall (__NR_tkill, lwpid, signo);
- if (errno != ENOSYS)
- return ret;
- tkill_failed = 1;
- }
- }
-#endif
+ int ret;
- return kill (lwpid, signo);
+ errno = 0;
+ ret = syscall (__NR_tkill, lwpid, signo);
+ if (errno == ENOSYS)
+ {
+ /* If tkill fails, then we are not using nptl threads, a
+ configuration we no longer support. */
+ perror_with_name (("tkill"));
+ }
+ return ret;
}
void
if (lwp == except)
return 0;
- lwp->suspended++;
+ lwp_suspended_inc (lwp);
return send_sigstop_callback (entry, except);
}
static void
mark_lwp_dead (struct lwp_info *lwp, int wstat)
{
- /* It's dead, really. */
- lwp->dead = 1;
-
/* Store the exit status for later. */
lwp->status_pending_p = 1;
lwp->status_pending = wstat;
+ /* Store in waitstatus as well, as there's nothing else to process
+ for this event. */
+ if (WIFEXITED (wstat))
+ {
+ lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
+ lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
+ }
+ else if (WIFSIGNALED (wstat))
+ {
+ lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
+ lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
+ }
+
/* Prevent trying to stop it. */
lwp->stopped = 1;
lwp->stop_expected = 0;
}
+/* Return true if LWP has exited already, and has a pending exit event
+ to report to GDB. */
+
+static int
+lwp_is_marked_dead (struct lwp_info *lwp)
+{
+ return (lwp->status_pending_p
+ && (WIFEXITED (lwp->status_pending)
+ || WIFSIGNALED (lwp->status_pending)));
+}
+
/* Wait for all children to stop for the SIGSTOPs we just queued. */
static void
if (debug_threads)
debug_printf ("Previously current thread died.\n");
- if (non_stop)
- {
- /* We can't change the current inferior behind GDB's back,
- otherwise, a subsequent command may apply to the wrong
- process. */
- current_thread = NULL;
- }
- else
- {
- /* Set a valid thread as current. */
- set_desired_thread (0);
- }
+ /* We can't change the current inferior behind GDB's back,
+ otherwise, a subsequent command may apply to the wrong
+ process. */
+ current_thread = NULL;
}
}
struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
- gdb_assert (lwp->suspended == 0);
+ if (lwp->suspended != 0)
+ {
+ internal_error (__FILE__, __LINE__,
+ "LWP %ld is suspended, suspended=%d\n",
+ lwpid_of (thread), lwp->suspended);
+ }
gdb_assert (lwp->stopped);
/* Allow debugging the jump pad, gdb_collect, etc.. */
return (supports_fast_tracepoints ()
&& agent_loaded_p ()
&& (gdb_breakpoint_here (lwp->stop_pc)
- || lwp->stop_reason == LWP_STOPPED_BY_WATCHPOINT
+ || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
|| thread->last_resume_kind == resume_step)
&& linux_fast_tracepoint_collecting (lwp, NULL));
}
move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
{
struct thread_info *thread = (struct thread_info *) entry;
+ struct thread_info *saved_thread;
struct lwp_info *lwp = get_thread_lwp (thread);
int *wstat;
- gdb_assert (lwp->suspended == 0);
+ if (lwp->suspended != 0)
+ {
+ internal_error (__FILE__, __LINE__,
+ "LWP %ld is suspended, suspended=%d\n",
+ lwpid_of (thread), lwp->suspended);
+ }
gdb_assert (lwp->stopped);
+ /* For gdb_breakpoint_here. */
+ saved_thread = current_thread;
+ current_thread = thread;
+
wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
/* Allow debugging the jump pad, gdb_collect, etc. */
if (!gdb_breakpoint_here (lwp->stop_pc)
- && lwp->stop_reason != LWP_STOPPED_BY_WATCHPOINT
+ && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
&& thread->last_resume_kind != resume_step
&& maybe_move_out_of_jump_pad (lwp, wstat))
{
linux_resume_one_lwp (lwp, 0, 0, NULL);
}
else
- lwp->suspended++;
+ lwp_suspended_inc (lwp);
+
+ current_thread = saved_thread;
}
static int
struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
- if (lwp->dead)
+ if (lwp_is_marked_dead (lwp))
return 0;
if (lwp->stopped)
return 0;
}
}
-/* Resume execution of the inferior process.
- If STEP is nonzero, single-step it.
- If SIGNAL is nonzero, give it that signal. */
+/* Enqueue one signal in the chain of signals which need to be
+ delivered to this process on next resume. */
static void
-linux_resume_one_lwp (struct lwp_info *lwp,
- int step, int signal, siginfo_t *info)
+enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
+{
+ struct pending_signals *p_sig = XNEW (struct pending_signals);
+
+ p_sig->prev = lwp->pending_signals;
+ p_sig->signal = signal;
+ if (info == NULL)
+ memset (&p_sig->info, 0, sizeof (siginfo_t));
+ else
+ memcpy (&p_sig->info, info, sizeof (siginfo_t));
+ lwp->pending_signals = p_sig;
+}
+
+/* Install breakpoints for software single stepping. */
+
+static void
+install_software_single_step_breakpoints (struct lwp_info *lwp)
+{
+ int i;
+ CORE_ADDR pc;
+ struct regcache *regcache = get_thread_regcache (current_thread, 1);
+ VEC (CORE_ADDR) *next_pcs = NULL;
+ struct cleanup *old_chain = make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
+
+ pc = get_pc (lwp);
+ next_pcs = (*the_low_target.get_next_pcs) (pc, regcache);
+
+ for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
+ set_reinsert_breakpoint (pc);
+
+ do_cleanups (old_chain);
+}
+
+/* Single step via hardware or software single step.
+ Return 1 if hardware single stepping, 0 if software single stepping
+ or can't single step. */
+
+static int
+single_step (struct lwp_info* lwp)
+{
+ int step = 0;
+
+ if (can_hardware_single_step ())
+ {
+ step = 1;
+ }
+ else if (can_software_single_step ())
+ {
+ install_software_single_step_breakpoints (lwp);
+ step = 0;
+ }
+ else
+ {
+ if (debug_threads)
+ debug_printf ("stepping is not implemented on this target");
+ }
+
+ return step;
+}
+
+/* Resume execution of LWP. If STEP is nonzero, single-step it. If
+ SIGNAL is nonzero, give it that signal. */
+
+static void
+linux_resume_one_lwp_throw (struct lwp_info *lwp,
+ int step, int signal, siginfo_t *info)
{
struct thread_info *thread = get_lwp_thread (lwp);
struct thread_info *saved_thread;
int fast_tp_collecting;
+ int ptrace_request;
+ struct process_info *proc = get_thread_process (thread);
+
+ /* Note that target description may not be initialised
+ (proc->tdesc == NULL) at this point because the program hasn't
+ stopped at the first instruction yet. It means GDBserver skips
+ the extra traps from the wrapper program (see option --wrapper).
+ Code in this function that requires register access should be
+ guarded by proc->tdesc == NULL or something else. */
if (lwp->stopped == 0)
return;
+ gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
+
fast_tp_collecting = lwp->collecting_fast_tracepoint;
gdb_assert (!stabilizing_threads || fast_tp_collecting);
/* Cancel actions that rely on GDB not changing the PC (e.g., the
user used the "jump" command, or "set $pc = foo"). */
- if (lwp->stop_pc != get_pc (lwp))
+ if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
{
/* Collecting 'while-stepping' actions doesn't make sense
anymore. */
|| lwp->bp_reinsert != 0
|| fast_tp_collecting))
{
- struct pending_signals *p_sig;
- p_sig = xmalloc (sizeof (*p_sig));
+ struct pending_signals *p_sig = XNEW (struct pending_signals);
+
p_sig->prev = lwp->pending_signals;
p_sig->signal = signal;
if (info == NULL)
address, continue, and carry on catching this while-stepping
action only when that breakpoint is hit. A future
enhancement. */
- if (thread->while_stepping != NULL
- && can_hardware_single_step ())
+ if (thread->while_stepping != NULL)
{
if (debug_threads)
debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
lwpid_of (thread));
- step = 1;
+
+ step = single_step (lwp);
}
- if (the_low_target.get_pc != NULL)
+ if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
{
struct regcache *regcache = get_thread_regcache (current_thread, 1);
regcache_invalidate_thread (thread);
errno = 0;
- lwp->stopped = 0;
- lwp->stop_reason = LWP_STOPPED_BY_NO_REASON;
lwp->stepping = step;
- ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
+ if (step)
+ ptrace_request = PTRACE_SINGLESTEP;
+ else if (gdb_catching_syscalls_p (lwp))
+ ptrace_request = PTRACE_SYSCALL;
+ else
+ ptrace_request = PTRACE_CONT;
+ ptrace (ptrace_request,
+ lwpid_of (thread),
(PTRACE_TYPE_ARG3) 0,
/* Coerce to a uintptr_t first to avoid potential gcc warning
of coercing an 8 byte integer to a 4 byte pointer. */
current_thread = saved_thread;
if (errno)
+ perror_with_name ("resuming thread");
+
+ /* Successfully resumed. Clear state that no longer makes sense,
+ and mark the LWP as running. Must not do this before resuming
+ otherwise if that fails other code will be confused. E.g., we'd
+ later try to stop the LWP and hang forever waiting for a stop
+ status. Note that we must not throw after this is cleared,
+ otherwise handle_zombie_lwp_error would get confused. */
+ lwp->stopped = 0;
+ lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
+}
+
+/* Called when we try to resume a stopped LWP and that errors out. If
+ the LWP is no longer in ptrace-stopped state (meaning it's zombie,
+ or about to become), discard the error, clear any pending status
+ the LWP may have, and return true (we'll collect the exit status
+ soon enough). Otherwise, return false. */
+
+static int
+check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
+{
+ struct thread_info *thread = get_lwp_thread (lp);
+
+ /* If we get an error after resuming the LWP successfully, we'd
+ confuse !T state for the LWP being gone. */
+ gdb_assert (lp->stopped);
+
+ /* We can't just check whether the LWP is in 'Z (Zombie)' state,
+ because even if ptrace failed with ESRCH, the tracee may be "not
+ yet fully dead", but already refusing ptrace requests. In that
+ case the tracee has 'R (Running)' state for a little bit
+ (observed in Linux 3.18). See also the note on ESRCH in the
+ ptrace(2) man page. Instead, check whether the LWP has any state
+ other than ptrace-stopped. */
+
+ /* Don't assume anything if /proc/PID/status can't be read. */
+ if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
{
- /* ESRCH from ptrace either means that the thread was already
- running (an error) or that it is gone (a race condition). If
- it's gone, we will get a notification the next time we wait,
- so we can ignore the error. We could differentiate these
- two, but it's tricky without waiting; the thread still exists
- as a zombie, so sending it signal 0 would succeed. So just
- ignore ESRCH. */
- if (errno == ESRCH)
- return;
+ lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
+ lp->status_pending_p = 0;
+ return 1;
+ }
+ return 0;
+}
- perror_with_name ("ptrace");
+/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
+ disappears while we try to resume it. */
+
+static void
+linux_resume_one_lwp (struct lwp_info *lwp,
+ int step, int signal, siginfo_t *info)
+{
+ TRY
+ {
+ linux_resume_one_lwp_throw (lwp, step, signal, info);
+ }
+ CATCH (ex, RETURN_MASK_ERROR)
+ {
+ if (!check_ptrace_stopped_lwp_gone (lwp))
+ throw_exception (ex);
}
+ END_CATCH
}
struct thread_resume_array
int ndx;
struct thread_resume_array *r;
- r = arg;
+ r = (struct thread_resume_array *) arg;
for (ndx = 0; ndx < r->n; ndx++)
{
struct lwp_info *lwp = get_thread_lwp (thread);
struct thread_info *saved_thread;
CORE_ADDR pc;
+ struct process_info *proc = get_thread_process (thread);
+
+ /* GDBserver is skipping the extra traps from the wrapper program,
+ don't have to do step over. */
+ if (proc->tdesc == NULL)
+ return 0;
/* LWPs which will not be resumed are not interesting, because we
might not wait for them next time through linux_wait. */
lwpid_of (thread));
stop_all_lwps (1, lwp);
- gdb_assert (lwp->suspended == 0);
+
+ if (lwp->suspended != 0)
+ {
+ internal_error (__FILE__, __LINE__,
+ "LWP %ld suspended=%d\n", lwpid_of (thread),
+ lwp->suspended);
+ }
if (debug_threads)
debug_printf ("Done stopping all threads for step-over.\n");
lwp->bp_reinsert = pc;
uninsert_breakpoints_at (pc);
- uninsert_fast_tracepoint_jumps_at (pc);
-
- if (can_hardware_single_step ())
- {
- step = 1;
- }
- else
- {
- CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
- set_reinsert_breakpoint (raddr);
- step = 0;
- }
+ uninsert_fast_tracepoint_jumps_at (pc);
+
+ step = single_step (lwp);
current_thread = saved_thread;
return 0;
}
+/* If there's a step over in progress, wait until all threads stop
+ (that is, until the stepping thread finishes its step), and
+ unsuspend all lwps. The stepping thread ends with its status
+ pending, which is processed later when we get back to processing
+ events. */
+
+static void
+complete_ongoing_step_over (void)
+{
+ if (!ptid_equal (step_over_bkpt, null_ptid))
+ {
+ struct lwp_info *lwp;
+ int wstat;
+ int ret;
+
+ if (debug_threads)
+ debug_printf ("detach: step over in progress, finish it first\n");
+
+ /* Passing NULL_PTID as filter indicates we want all events to
+ be left pending. Eventually this returns when there are no
+ unwaited-for children left. */
+ ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
+ &wstat, __WALL);
+ gdb_assert (ret == -1);
+
+ lwp = find_lwp_pid (step_over_bkpt);
+ if (lwp != NULL)
+ finish_step_over (lwp);
+ step_over_bkpt = null_ptid;
+ unsuspend_all_lwps (lwp);
+ }
+}
+
/* This function is called once per thread. We check the thread's resume
request, which will tell us whether to resume, step, or leave the thread
stopped; and what signal, if any, it should be sent.
}
/* If this thread which is about to be resumed has a pending status,
- then don't resume any threads - we can just report the pending
- status. Make sure to queue any signals that would otherwise be
- sent. In all-stop mode, we do this decision based on if *any*
- thread has a pending status. If there's a thread that needs the
- step-over-breakpoint dance, then don't resume any other thread
- but that particular one. */
- leave_pending = (lwp->status_pending_p || leave_all_stopped);
+ then don't resume it - we can just report the pending status.
+ Likewise if it is suspended, because e.g., another thread is
+ stepping past a breakpoint. Make sure to queue any signals that
+ would otherwise be sent. In all-stop mode, we do this decision
+ based on if *any* thread has a pending status. If there's a
+ thread that needs the step-over-breakpoint dance, then don't
+ resume any other thread but that particular one. */
+ leave_pending = (lwp->suspended
+ || lwp->status_pending_p
+ || leave_all_stopped);
if (!leave_pending)
{
/* If we have a new signal, enqueue the signal. */
if (lwp->resume->sig != 0)
{
- struct pending_signals *p_sig;
- p_sig = xmalloc (sizeof (*p_sig));
+ struct pending_signals *p_sig = XCNEW (struct pending_signals);
+
p_sig->prev = lwp->pending_signals;
p_sig->signal = lwp->resume->sig;
- memset (&p_sig->info, 0, sizeof (siginfo_t));
/* If this is the same signal we were previously stopped by,
make sure to queue its siginfo. We can ignore the return
debug_printf ("linux_resume done\n");
debug_exit ();
}
+
+ /* We may have events that were pending that can/should be sent to
+ the client now. Trigger a linux_wait call. */
+ if (target_is_async_p ())
+ async_file_mark ();
}
/* This function is called once per thread. We check the thread's
send_sigstop (lwp);
}
- step = thread->last_resume_kind == resume_step;
+ if (thread->last_resume_kind == resume_step)
+ {
+ if (debug_threads)
+ debug_printf (" stepping LWP %ld, client wants it stepping\n",
+ lwpid_of (thread));
+ step = 1;
+ }
+ else if (lwp->bp_reinsert != 0)
+ {
+ if (debug_threads)
+ debug_printf (" stepping LWP %ld, reinsert set\n",
+ lwpid_of (thread));
+ step = 1;
+ }
+ else
+ step = 0;
+
linux_resume_one_lwp (lwp, step, 0, NULL);
return 0;
}
if (lwp == except)
return 0;
- lwp->suspended--;
- gdb_assert (lwp->suspended >= 0);
+ lwp_suspended_decr (lwp);
return proceed_one_lwp (entry, except);
}
dr_offset = regset - info->regsets;
if (info->disabled_regsets == NULL)
- info->disabled_regsets = xcalloc (1, info->num_regsets);
+ info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
info->disabled_regsets[dr_offset] = 1;
}
#ifdef HAVE_LINUX_USRREGS
-int
+static int
register_addr (const struct usrregs_info *usrregs, int regnum)
{
int addr;
size = ((register_size (regcache->tdesc, regno)
+ sizeof (PTRACE_XFER_TYPE) - 1)
& -sizeof (PTRACE_XFER_TYPE));
- buf = alloca (size);
+ buf = (char *) alloca (size);
pid = lwpid_of (current_thread);
for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
size = ((register_size (regcache->tdesc, regno)
+ sizeof (PTRACE_XFER_TYPE) - 1)
& -sizeof (PTRACE_XFER_TYPE));
- buf = alloca (size);
+ buf = (char *) alloca (size);
memset (buf, 0, size);
if (the_low_target.collect_ptrace_register)
#endif
-void
+static void
linux_fetch_registers (struct regcache *regcache, int regno)
{
int use_regsets;
}
}
-void
+static void
linux_store_registers (struct regcache *regcache, int regno)
{
int use_regsets;
count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
/ sizeof (PTRACE_XFER_TYPE));
/* Allocate buffer of that many longwords. */
- buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
+ buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
/* Read all the longwords */
errno = 0;
/ sizeof (PTRACE_XFER_TYPE);
/* Allocate buffer of that many longwords. */
- register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
- alloca (count * sizeof (PTRACE_XFER_TYPE));
+ register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
int pid = lwpid_of (current_thread);
if (debug_threads)
{
/* Dump up to four bytes. */
- unsigned int val = * (unsigned int *) myaddr;
- if (len == 1)
- val = val & 0xff;
- else if (len == 2)
- val = val & 0xffff;
- else if (len == 3)
- val = val & 0xffffff;
- debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
- val, (long)memaddr);
+ char str[4 * 2 + 1];
+ char *p = str;
+ int dump = len < 4 ? len : 4;
+
+ for (i = 0; i < dump; i++)
+ {
+ sprintf (p, "%02x", myaddr[i]);
+ p += 2;
+ }
+ *p = '\0';
+
+ debug_printf ("Writing %s to 0x%08lx in process %d\n",
+ str, (long) memaddr, pid);
}
/* Fill start and end extra bytes of buffer with existing memory data. */
#ifdef USE_THREAD_DB
struct process_info *proc = current_process ();
- if (proc->private->thread_db != NULL)
+ if (proc->priv->thread_db != NULL)
return;
- /* If the kernel supports tracing clones, then we don't need to
- use the magic thread event breakpoint to learn about
- threads. */
- thread_db_init (!linux_supports_traceclone ());
+ thread_db_init ();
#endif
}
linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
int size, struct raw_breakpoint *bp)
{
- if (the_low_target.insert_point != NULL)
+ if (type == raw_bkpt_type_sw)
+ return insert_memory_breakpoint (bp);
+ else if (the_low_target.insert_point != NULL)
return the_low_target.insert_point (type, addr, size, bp);
else
/* Unsupported (see target.h). */
linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
int size, struct raw_breakpoint *bp)
{
- if (the_low_target.remove_point != NULL)
+ if (type == raw_bkpt_type_sw)
+ return remove_memory_breakpoint (bp);
+ else if (the_low_target.remove_point != NULL)
return the_low_target.remove_point (type, addr, size, bp);
else
/* Unsupported (see target.h). */
return 1;
}
+/* Implement the to_stopped_by_sw_breakpoint target_ops
+ method. */
+
+static int
+linux_stopped_by_sw_breakpoint (void)
+{
+ struct lwp_info *lwp = get_thread_lwp (current_thread);
+
+ return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
+}
+
+/* Implement the to_supports_stopped_by_sw_breakpoint target_ops
+ method. */
+
+static int
+linux_supports_stopped_by_sw_breakpoint (void)
+{
+ return USE_SIGTRAP_SIGINFO;
+}
+
+/* Implement the to_stopped_by_hw_breakpoint target_ops
+ method. */
+
+static int
+linux_stopped_by_hw_breakpoint (void)
+{
+ struct lwp_info *lwp = get_thread_lwp (current_thread);
+
+ return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
+}
+
+/* Implement the to_supports_stopped_by_hw_breakpoint target_ops
+ method. */
+
+static int
+linux_supports_stopped_by_hw_breakpoint (void)
+{
+ return USE_SIGTRAP_SIGINFO;
+}
+
+/* Implement the supports_hardware_single_step target_ops method. */
+
+static int
+linux_supports_hardware_single_step (void)
+{
+ return can_hardware_single_step ();
+}
+
+static int
+linux_supports_software_single_step (void)
+{
+ return can_software_single_step ();
+}
+
static int
linux_stopped_by_watchpoint (void)
{
struct lwp_info *lwp = get_thread_lwp (current_thread);
- return lwp->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
+ return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
}
static CORE_ADDR
linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
{
unsigned long text, text_end, data;
- int pid = lwpid_of (get_thread_lwp (current_thread));
+ int pid = lwpid_of (current_thread);
errno = 0;
return 1;
}
+/* Check if fork events are supported. */
+
+static int
+linux_supports_fork_events (void)
+{
+ return linux_supports_tracefork ();
+}
+
+/* Check if vfork events are supported. */
+
+static int
+linux_supports_vfork_events (void)
+{
+ return linux_supports_tracefork ();
+}
+
+/* Check if exec events are supported. */
+
+static int
+linux_supports_exec_events (void)
+{
+ return linux_supports_traceexec ();
+}
+
+/* Callback for 'find_inferior'. Set the (possibly changed) ptrace
+ options for the specified lwp. */
+
+static int
+reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
+ void *args)
+{
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
+
+ if (!lwp->stopped)
+ {
+ /* Stop the lwp so we can modify its ptrace options. */
+ lwp->must_set_ptrace_flags = 1;
+ linux_stop_lwp (lwp);
+ }
+ else
+ {
+ /* Already stopped; go ahead and set the ptrace options. */
+ struct process_info *proc = find_process_pid (pid_of (thread));
+ int options = linux_low_ptrace_options (proc->attached);
+
+ linux_enable_event_reporting (lwpid_of (thread), options);
+ lwp->must_set_ptrace_flags = 0;
+ }
+
+ return 0;
+}
+
+/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
+ ptrace flags for all inferiors. This is in case the new GDB connection
+ doesn't support the same set of events that the previous one did. */
+
+static void
+linux_handle_new_gdb_connection (void)
+{
+ pid_t pid;
+
+ /* Request that all the lwps reset their ptrace options. */
+ find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
+}
+
static int
linux_supports_disable_randomization (void)
{
#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
static void
-linux_process_qsupported (const char *query)
+linux_process_qsupported (char **features, int count)
{
if (the_low_target.process_qsupported != NULL)
- the_low_target.process_qsupported (query);
+ the_low_target.process_qsupported (features, count);
+}
+
+static int
+linux_supports_catch_syscall (void)
+{
+ return (the_low_target.get_syscall_trapinfo != NULL
+ && linux_supports_tracesysgood ());
}
static int
return 0;
gdb_assert (num_phdr < 100); /* Basic sanity check. */
- phdr_buf = alloca (num_phdr * phdr_size);
+ phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
return 0;
if (is_elf64)
{
Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
-#ifdef DT_MIPS_RLD_MAP
+#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
union
{
Elf64_Xword map;
unsigned char buf[sizeof (Elf64_Xword)];
}
rld_map;
-
+#endif
+#ifdef DT_MIPS_RLD_MAP
if (dyn->d_tag == DT_MIPS_RLD_MAP)
{
if (linux_read_memory (dyn->d_un.d_val,
break;
}
#endif /* DT_MIPS_RLD_MAP */
+#ifdef DT_MIPS_RLD_MAP_REL
+ if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
+ {
+ if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
+ rld_map.buf, sizeof (rld_map.buf)) == 0)
+ return rld_map.map;
+ else
+ break;
+ }
+#endif /* DT_MIPS_RLD_MAP_REL */
if (dyn->d_tag == DT_DEBUG && map == -1)
map = dyn->d_un.d_val;
else
{
Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
-#ifdef DT_MIPS_RLD_MAP
+#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
union
{
Elf32_Word map;
unsigned char buf[sizeof (Elf32_Word)];
}
rld_map;
-
+#endif
+#ifdef DT_MIPS_RLD_MAP
if (dyn->d_tag == DT_MIPS_RLD_MAP)
{
if (linux_read_memory (dyn->d_un.d_val,
break;
}
#endif /* DT_MIPS_RLD_MAP */
+#ifdef DT_MIPS_RLD_MAP_REL
+ if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
+ {
+ if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
+ rld_map.buf, sizeof (rld_map.buf)) == 0)
+ return rld_map.map;
+ else
+ break;
+ }
+#endif /* DT_MIPS_RLD_MAP_REL */
if (dyn->d_tag == DT_DEBUG && map == -1)
map = dyn->d_un.d_val;
{
char *document;
unsigned document_len;
- struct process_info_private *const priv = current_process ()->private;
+ struct process_info_private *const priv = current_process ()->priv;
char filename[PATH_MAX];
int pid, is_elf64;
break;
len = sep - annex;
- if (len == 5 && strncmp (annex, "start", 5) == 0)
+ if (len == 5 && startswith (annex, "start"))
addrp = &lm_addr;
- else if (len == 4 && strncmp (annex, "prev", 4) == 0)
+ else if (len == 4 && startswith (annex, "prev"))
addrp = &lm_prev;
else
{
}
}
- document = xmalloc (allocated);
+ document = (char *) xmalloc (allocated);
strcpy (document, "<library-list-svr4 version=\"1.0\"");
p = document + strlen (document);
/* Expand to guarantee sufficient storage. */
uintptr_t document_len = p - document;
- document = xrealloc (document, 2 * allocated);
+ document = (char *) xrealloc (document, 2 * allocated);
allocated *= 2;
p = document + document_len;
}
#ifdef HAVE_LINUX_BTRACE
-/* See to_enable_btrace target method. */
+/* See to_disable_btrace target method. */
-static struct btrace_target_info *
-linux_low_enable_btrace (ptid_t ptid)
+static int
+linux_low_disable_btrace (struct btrace_target_info *tinfo)
{
- struct btrace_target_info *tinfo;
+ enum btrace_error err;
+
+ err = linux_disable_btrace (tinfo);
+ return (err == BTRACE_ERR_NONE ? 0 : -1);
+}
+
+/* Encode an Intel Processor Trace configuration. */
- tinfo = linux_enable_btrace (ptid);
+static void
+linux_low_encode_pt_config (struct buffer *buffer,
+ const struct btrace_data_pt_config *config)
+{
+ buffer_grow_str (buffer, "<pt-config>\n");
- if (tinfo != NULL)
+ switch (config->cpu.vendor)
{
- struct thread_info *thread = find_thread_ptid (ptid);
- struct regcache *regcache = get_thread_regcache (thread, 0);
+ case CV_INTEL:
+ buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
+ "model=\"%u\" stepping=\"%u\"/>\n",
+ config->cpu.family, config->cpu.model,
+ config->cpu.stepping);
+ break;
- tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
+ default:
+ break;
}
- return tinfo;
+ buffer_grow_str (buffer, "</pt-config>\n");
}
-/* See to_disable_btrace target method. */
+/* Encode a raw buffer. */
-static int
-linux_low_disable_btrace (struct btrace_target_info *tinfo)
+static void
+linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
+ unsigned int size)
{
- enum btrace_error err;
+ if (size == 0)
+ return;
- err = linux_disable_btrace (tinfo);
- return (err == BTRACE_ERR_NONE ? 0 : -1);
+ /* We use hex encoding - see common/rsp-low.h. */
+ buffer_grow_str (buffer, "<raw>\n");
+
+ while (size-- > 0)
+ {
+ char elem[2];
+
+ elem[0] = tohex ((*data >> 4) & 0xf);
+ elem[1] = tohex (*data++ & 0xf);
+
+ buffer_grow (buffer, elem, 2);
+ }
+
+ buffer_grow_str (buffer, "</raw>\n");
}
/* See to_read_btrace target method. */
static int
linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
- int type)
+ enum btrace_read_type type)
{
struct btrace_data btrace;
struct btrace_block *block;
else
buffer_grow_str0 (buffer, "E.Generic Error.");
- btrace_data_fini (&btrace);
- return -1;
+ goto err;
}
switch (btrace.format)
{
case BTRACE_FORMAT_NONE:
buffer_grow_str0 (buffer, "E.No Trace.");
- break;
+ goto err;
case BTRACE_FORMAT_BTS:
buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
buffer_grow_str0 (buffer, "</btrace>\n");
break;
- default:
- buffer_grow_str0 (buffer, "E.Unknown Trace Format.");
+ case BTRACE_FORMAT_PT:
+ buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
+ buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
+ buffer_grow_str (buffer, "<pt>\n");
+
+ linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
+
+ linux_low_encode_raw (buffer, btrace.variant.pt.data,
+ btrace.variant.pt.size);
+
+ buffer_grow_str (buffer, "</pt>\n");
+ buffer_grow_str0 (buffer, "</btrace>\n");
+ break;
- btrace_data_fini (&btrace);
- return -1;
+ default:
+ buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
+ goto err;
}
btrace_data_fini (&btrace);
return 0;
+
+err:
+ btrace_data_fini (&btrace);
+ return -1;
+}
+
+/* See to_btrace_conf target method. */
+
+static int
+linux_low_btrace_conf (const struct btrace_target_info *tinfo,
+ struct buffer *buffer)
+{
+ const struct btrace_config *conf;
+
+ buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
+ buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
+
+ conf = linux_btrace_conf (tinfo);
+ if (conf != NULL)
+ {
+ switch (conf->format)
+ {
+ case BTRACE_FORMAT_NONE:
+ break;
+
+ case BTRACE_FORMAT_BTS:
+ buffer_xml_printf (buffer, "<bts");
+ buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
+ buffer_xml_printf (buffer, " />\n");
+ break;
+
+ case BTRACE_FORMAT_PT:
+ buffer_xml_printf (buffer, "<pt");
+ buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
+ buffer_xml_printf (buffer, "/>\n");
+ break;
+ }
+ }
+
+ buffer_grow_str0 (buffer, "</btrace-conf>\n");
+ return 0;
}
#endif /* HAVE_LINUX_BTRACE */
+/* See nat/linux-nat.h. */
+
+ptid_t
+current_lwp_ptid (void)
+{
+ return ptid_of (current_thread);
+}
+
+/* Implementation of the target_ops method "breakpoint_kind_from_pc". */
+
+static int
+linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
+{
+ if (the_low_target.breakpoint_kind_from_pc != NULL)
+ return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
+ else
+ return default_breakpoint_kind_from_pc (pcptr);
+}
+
+/* Implementation of the target_ops method "sw_breakpoint_from_kind". */
+
+static const gdb_byte *
+linux_sw_breakpoint_from_kind (int kind, int *size)
+{
+ gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
+
+ return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
+}
+
+/* Implementation of the target_ops method
+ "breakpoint_kind_from_current_state". */
+
+static int
+linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
+{
+ if (the_low_target.breakpoint_kind_from_current_state != NULL)
+ return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
+ else
+ return linux_breakpoint_kind_from_pc (pcptr);
+}
+
+/* Default implementation of linux_target_ops method "set_pc" for
+ 32-bit pc register which is literally named "pc". */
+
+void
+linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
+{
+ uint32_t newpc = pc;
+
+ supply_register_by_name (regcache, "pc", &newpc);
+}
+
+/* Default implementation of linux_target_ops method "get_pc" for
+ 32-bit pc register which is literally named "pc". */
+
+CORE_ADDR
+linux_get_pc_32bit (struct regcache *regcache)
+{
+ uint32_t pc;
+
+ collect_register_by_name (regcache, "pc", &pc);
+ if (debug_threads)
+ debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
+ return pc;
+}
+
+/* Default implementation of linux_target_ops method "set_pc" for
+ 64-bit pc register which is literally named "pc". */
+
+void
+linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
+{
+ uint64_t newpc = pc;
+
+ supply_register_by_name (regcache, "pc", &newpc);
+}
+
+/* Default implementation of linux_target_ops method "get_pc" for
+ 64-bit pc register which is literally named "pc". */
+
+CORE_ADDR
+linux_get_pc_64bit (struct regcache *regcache)
+{
+ uint64_t pc;
+
+ collect_register_by_name (regcache, "pc", &pc);
+ if (debug_threads)
+ debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
+ return pc;
+}
+
+
static struct target_ops linux_target_ops = {
linux_create_inferior,
+ linux_post_create_inferior,
linux_attach,
linux_kill,
linux_detach,
linux_supports_z_point_type,
linux_insert_point,
linux_remove_point,
+ linux_stopped_by_sw_breakpoint,
+ linux_supports_stopped_by_sw_breakpoint,
+ linux_stopped_by_hw_breakpoint,
+ linux_supports_stopped_by_hw_breakpoint,
+ linux_supports_hardware_single_step,
linux_stopped_by_watchpoint,
linux_stopped_data_address,
#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
linux_async,
linux_start_non_stop,
linux_supports_multi_process,
+ linux_supports_fork_events,
+ linux_supports_vfork_events,
+ linux_supports_exec_events,
+ linux_handle_new_gdb_connection,
#ifdef USE_THREAD_DB
thread_db_handle_monitor_command,
#else
linux_supports_agent,
#ifdef HAVE_LINUX_BTRACE
linux_supports_btrace,
- linux_low_enable_btrace,
+ linux_enable_btrace,
linux_low_disable_btrace,
linux_low_read_btrace,
+ linux_low_btrace_conf,
#else
NULL,
NULL,
NULL,
NULL,
+ NULL,
#endif
linux_supports_range_stepping,
+ linux_proc_pid_to_exec_file,
+ linux_mntns_open_cloexec,
+ linux_mntns_unlink,
+ linux_mntns_readlink,
+ linux_breakpoint_kind_from_pc,
+ linux_sw_breakpoint_from_kind,
+ linux_proc_tid_get_name,
+ linux_breakpoint_kind_from_current_state,
+ linux_supports_software_single_step,
+ linux_supports_catch_syscall,
};
-static void
-linux_init_signals ()
-{
- /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
- to find what the cancel signal actually is. */
-#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
- signal (__SIGRTMIN+1, SIG_IGN);
-#endif
-}
-
#ifdef HAVE_LINUX_REGSETS
void
initialize_regsets_info (struct regsets_info *info)
initialize_low (void)
{
struct sigaction sigchld_action;
+
memset (&sigchld_action, 0, sizeof (sigchld_action));
set_target_ops (&linux_target_ops);
- set_breakpoint_data (the_low_target.breakpoint,
- the_low_target.breakpoint_len);
- linux_init_signals ();
+
linux_ptrace_init_warnings ();
sigchld_action.sa_handler = sigchld_handler;
sigaction (SIGCHLD, &sigchld_action, NULL);
initialize_low_arch ();
+
+ linux_check_ptrace_features ();
}