event loop. */
static int linux_nat_event_pipe[2] = { -1, -1 };
+/* True if we're currently in async mode. */
+#define linux_is_async_p() (linux_nat_event_pipe[0] != -1)
+
/* Flush the event pipe. */
static void
static int kill_lwp (int lwpid, int signo);
static int stop_callback (struct lwp_info *lp, void *data);
+static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
static void block_child_signals (sigset_t *prev_mask);
static void restore_child_signals_mask (sigset_t *prev_mask);
/* If we're in async mode, need to tell the event loop
there's something here to process. */
- if (target_can_async_p ())
+ if (target_is_async_p ())
async_file_mark ();
}
}
static int stop_wait_callback (struct lwp_info *lp, void *data);
static int linux_thread_alive (ptid_t ptid);
static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
+static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
\f
lp = find_lwp_pid (ptid);
lwpid = ptid_get_lwp (ptid);
- /* We assume that we're already attached to any LWP that has an id
- equal to the overall process id, and to any LWP that is already
+ /* We assume that we're already attached to any LWP that is already
in our list of LWPs. If we're not seeing exit events from threads
and we've had PID wraparound since we last tried to stop all threads,
this assumption might be wrong; fortunately, this is very unlikely
to happen. */
- if (lwpid != ptid_get_pid (ptid) && lp == NULL)
+ if (lp == NULL)
{
int status, cloned = 0, signalled = 0;
/* We've already seen this thread stop, but we
haven't seen the PTRACE_EVENT_CLONE extended
event yet. */
- return 0;
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LLAL: attach failed, but already seen "
+ "this thread %s stop\n",
+ target_pid_to_str (ptid));
+ return 1;
}
else
{
int new_pid;
int status;
- /* See if we've got a stop for this new child
- pending. If so, we're already attached. */
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LLAL: attach failed, and haven't seen "
+ "this thread %s stop yet\n",
+ target_pid_to_str (ptid));
+
+ /* We may or may not be attached to the LWP already.
+ Try waitpid on it. If that errors, we're not
+ attached to the LWP yet. Otherwise, we're
+ already attached. */
gdb_assert (lwpid > 0);
new_pid = my_waitpid (lwpid, &status, WNOHANG);
if (new_pid == -1 && errno == ECHILD)
new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
if (new_pid != -1)
{
- if (WIFSTOPPED (status))
- add_to_pid_list (&stopped_pids, lwpid, status);
+ if (new_pid == 0)
+ {
+ /* The child hasn't stopped for its initial
+ SIGSTOP stop yet. */
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LLAL: child hasn't "
+ "stopped yet\n");
+ }
+ else if (WIFSTOPPED (status))
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LLAL: adding to stopped_pids\n");
+ add_to_pid_list (&stopped_pids, lwpid, status);
+ }
return 1;
}
}
lp = add_lwp (ptid);
lp->stopped = 1;
+ lp->last_resume_kind = resume_stop;
lp->cloned = cloned;
lp->signalled = signalled;
if (WSTOPSIG (status) != SIGSTOP)
status_to_str (status));
}
}
- else
- {
- /* We assume that the LWP representing the original process is
- already stopped. Mark it as stopped in the data structure
- that the GNU/linux ptrace layer uses to keep track of
- threads. Note that this won't have already been done since
- the main thread will have, we assume, been stopped by an
- attach from a different layer. */
- if (lp == NULL)
- lp = add_lwp (ptid);
- lp->stopped = 1;
- }
- lp->last_resume_kind = resume_stop;
return 0;
}
status = 0;
}
- if (non_stop)
+ /* If the thread_db layer is active, let it record the user
+ level thread id and status, and add the thread to GDB's
+ list. */
+ if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
{
- /* Add the new thread to GDB's lists as soon as possible
- so that:
-
- 1) the frontend doesn't have to wait for a stop to
- display them, and,
-
- 2) we tag it with the correct running state. */
-
- /* If the thread_db layer is active, let it know about
- this new thread, and add it to GDB's list. */
- if (!thread_db_attach_lwp (new_lp->ptid))
- {
- /* We're not using thread_db. Add it to GDB's
- list. */
- target_post_attach (ptid_get_lwp (new_lp->ptid));
- add_thread (new_lp->ptid);
- }
+ /* The process is not using thread_db. Add the LWP to
+ GDB's list. */
+ target_post_attach (ptid_get_lwp (new_lp->ptid));
+ add_thread (new_lp->ptid);
+ }
- if (!stopping)
- {
- set_running (new_lp->ptid, 1);
- set_executing (new_lp->ptid, 1);
- /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
- resume_stop. */
- new_lp->last_resume_kind = resume_continue;
- }
+ if (!stopping)
+ {
+ set_running (new_lp->ptid, 1);
+ set_executing (new_lp->ptid, 1);
+ /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
+ resume_stop. */
+ new_lp->last_resume_kind = resume_continue;
}
if (status != 0)
new_lp->status = status;
}
- /* Note the need to use the low target ops to resume, to
- handle resuming with PT_SYSCALL if we have syscall
- catchpoints. */
- if (!stopping)
- {
- new_lp->resumed = 1;
-
- if (status == 0)
- {
- gdb_assert (new_lp->last_resume_kind == resume_continue);
- if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog,
- "LHEW: resuming new LWP %ld\n",
- ptid_get_lwp (new_lp->ptid));
- linux_resume_one_lwp (new_lp, 0, GDB_SIGNAL_0);
- }
- }
-
- if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog,
- "LHEW: resuming parent LWP %d\n", pid);
- linux_resume_one_lwp (lp, 0, GDB_SIGNAL_0);
+ new_lp->resumed = !stopping;
return 1;
}
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
"LHEW: Got PTRACE_EVENT_VFORK_DONE "
- "from LWP %ld: resuming\n",
+ "from LWP %ld: ignoring\n",
ptid_get_lwp (lp->ptid));
- ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
return 1;
}
fprintf_unfiltered (gdb_stdlog,
"WL: Handling extended status 0x%06x\n",
status);
- if (linux_handle_extended_wait (lp, status, 1))
- return wait_lwp (lp);
+ linux_handle_extended_wait (lp, status, 1);
+ return 0;
}
return status;
stop_callback (lwp, NULL);
}
+/* See linux-nat.h */
+
+void
+linux_stop_and_wait_all_lwps (void)
+{
+ /* Stop all LWP's ... */
+ iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
+
+ /* ... and wait until all of them have reported back that
+ they're no longer running. */
+ iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
+}
+
+/* See linux-nat.h */
+
+void
+linux_unstop_all_lwps (void)
+{
+ iterate_over_lwps (minus_one_ptid,
+ resume_stopped_resumed_lwps, &minus_one_ptid);
+}
+
/* Return non-zero if LWP PID has a pending SIGINT. */
static int
if (WIFSTOPPED (status) && !lp)
{
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LHEW: saving LWP %ld status %s in stopped_pids list\n",
+ (long) lwpid, status_to_str (status));
add_to_pid_list (&stopped_pids, lwpid, status);
return NULL;
}
}
/* When using hardware single-step, we need to report every signal.
- Otherwise, signals in pass_mask may be short-circuited. */
+ Otherwise, signals in pass_mask may be short-circuited
+ except signals that might be caused by a breakpoint. */
if (!lp->step
- && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
+ && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
+ && !linux_wstatus_maybe_breakpoint (status))
{
linux_resume_one_lwp (lp, lp->step, signo);
if (debug_linux_nat)
target_pid_to_str (lp->ptid));
}
- if (!target_can_async_p ())
+ if (!target_is_async_p ())
{
/* Causes SIGINT to be passed on to the attached process. */
set_sigint_trap ();
continue;
}
- /* Now that we've pulled all events out of the kernel, check if
- there's any LWP with a status to report to the core. */
+ /* Now that we've pulled all events out of the kernel, resume
+ LWPs that don't have an interesting event to report. */
+ iterate_over_lwps (minus_one_ptid,
+ resume_stopped_resumed_lwps, &minus_one_ptid);
+
+ /* ... and find an LWP with a status to report to the core, if
+ any. */
lp = iterate_over_lwps (ptid, status_callback, NULL);
if (lp != NULL)
break;
ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
- if (!target_can_async_p ())
+ if (!target_is_async_p ())
clear_sigint_trap ();
restore_child_signals_mask (&prev_mask);
sigsuspend (&suspend_mask);
}
- if (!target_can_async_p ())
+ if (!target_is_async_p ())
clear_sigint_trap ();
gdb_assert (lp);
struct gdbarch *gdbarch = get_regcache_arch (regcache);
CORE_ADDR pc = regcache_read_pc (regcache);
- gdb_assert (is_executing (lp->ptid));
-
/* Don't bother if there's a breakpoint at PC that we'd hit
immediately, and we're not waiting for this LWP. */
if (!ptid_match (lp->ptid, *wait_ptid_p))
}
/* Flush the async file first. */
- if (target_can_async_p ())
+ if (target_is_async_p ())
async_file_flush ();
/* Resume LWPs that are currently stopped without any pending status
/* If we requested any event, and something came out, assume there
may be more. If we requested a specific lwp or process, also
assume there may be more. */
- if (target_can_async_p ()
+ if (target_is_async_p ()
&& ((ourstatus->kind != TARGET_WAITKIND_IGNORE
&& ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
|| !ptid_equal (ptid, minus_one_ptid)))
async_file_mark ();
- /* Get ready for the next event. */
- if (target_can_async_p ())
- target_async (inferior_event_handler, 0);
-
return event_ptid;
}
static int
linux_nat_is_async_p (struct target_ops *ops)
{
- /* NOTE: palves 2008-03-21: We're only async when the user requests
- it explicitly with the "set target-async" command.
- Someday, linux will always be async. */
- return target_async_permitted;
+ return linux_is_async_p ();
}
/* target_can_async_p implementation. */
static void
linux_nat_terminal_inferior (struct target_ops *self)
{
- if (!target_is_async_p ())
+ /* Like target_terminal_inferior, use target_can_async_p, not
+ target_is_async_p, since at this point the target is not async
+ yet. If it can async, then we know it will become async prior to
+ resume. */
+ if (!target_can_async_p ())
{
/* Async mode is disabled. */
child_terminal_inferior (self);
static void
linux_nat_terminal_ours (struct target_ops *self)
{
- if (!target_is_async_p ())
- {
- /* Async mode is disabled. */
- child_terminal_ours (self);
- return;
- }
-
/* GDB should never give the terminal to the inferior if the
inferior is running in the background (run&, continue&, etc.),
but claiming it sure should. */
static int
linux_async_pipe (int enable)
{
- int previous = (linux_nat_event_pipe[0] != -1);
+ int previous = linux_is_async_p ();
if (previous != enable)
{