static void wait_for_inferior (inferior *inf);
+static void restart_threads (struct thread_info *event_thread, inferior *inf);
+
+static bool start_step_over (void);
+
+static bool step_over_info_valid_p (void);
+
/* Asynchronous signal handler registered as event loop source for
when we have pending events ready to be passed to the core. */
static struct async_event_handler *infrun_async_inferior_event_token;
insert breakpoints, so that we can debug it. A
subsequent child exec or exit is enough to know when does
the child stops using the parent's address space. */
- parent_inf->waiting_for_vfork_done = detach_fork;
+ gdb_assert (parent_inf->thread_waiting_for_vfork_done == nullptr);
+ parent_inf->thread_waiting_for_vfork_done
+ = detach_fork ? inferior_thread () : nullptr;
parent_inf->pspace->breakpoints_not_allowed = detach_fork;
}
}
child_inf->pending_detach = 0;
parent_inf->vfork_child = child_inf;
parent_inf->pending_detach = detach_fork;
- parent_inf->waiting_for_vfork_done = 0;
+ parent_inf->thread_waiting_for_vfork_done = nullptr;
}
else if (detach_fork)
{
parent = inferior_ptid;
child = tp->pending_follow.value.related_pid;
+ if (tp->pending_follow.kind == TARGET_WAITKIND_VFORKED
+ && target_is_non_stop_p ())
+ stop_all_threads ("handling vfork", tp->inf);
+
process_stratum_target *parent_targ = tp->inf->process_target ();
/* Set up inferior(s) as specified by the caller, and tell the
target to do whatever is necessary to follow either parent
}
}
+static void
+handle_vfork_done (thread_info *event_thread)
+{
+ if (event_thread->inf->thread_waiting_for_vfork_done == nullptr)
+ return;
+
+ gdb_assert (event_thread->inf->thread_waiting_for_vfork_done == event_thread);
+ event_thread->inf->thread_waiting_for_vfork_done = nullptr;
+ current_inferior ()->pspace->breakpoints_not_allowed = 0;
+
+ INFRUN_SCOPED_DEBUG_ENTER_EXIT;
+
+ if (target_is_non_stop_p ())
+ {
+ scoped_restore_current_thread restore_thread;
+
+ insert_breakpoints ();
+ restart_threads (event_thread, event_thread->inf);
+ start_step_over ();
+ }
+}
+
/* Enum strings for "set|show follow-exec-mode". */
static const char follow_exec_mode_new[] = "new";
|| stepping_past_nonsteppable_watchpoint ());
}
-\f
/* Displaced stepping. */
/* In non-stop debugging mode, we must take special care to manage
continue;
}
+ if (tp->inf->thread_waiting_for_vfork_done)
+ {
+ /* FIXME COMMENT */
+ continue;
+ }
+
/* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
while we try to prepare the displaced step, we don't add it back to
the global step over chain. This is to avoid a thread staying in the
return a wildcard ptid. */
if (target_is_non_stop_p ())
return inferior_ptid;
- else
- return user_visible_resume_ptid (user_step);
+
+ /* The rest of the function assumes non-stop==off and
+ target-non-stop==off. */
+
+ /* If a thread in the resumption set is waiting for a vfork-done event (the
+ vfork child is not under GDB's control), resume just that thread.
+
+ If the target_resume interface was more flexible, we could be smarter
+ here when schedule-multiple is on . For example, imagine 3 inferiors with
+ 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads 2.1 and 3.2 are
+ both waiting for a vfork-done event. Then we could ask the target(s) to
+ resume:
+
+ - All threads of inferior 1
+ - Thread 2.1
+ - Thread 3.2
+
+ Since we don't have that flexibility, just resume the first thread waiting
+ for a vfork-done event we find (e.g. thread 2.1). */
+ if (sched_multi)
+ {
+ for (inferior *inf : all_non_exited_inferiors ())
+ if (inf->thread_waiting_for_vfork_done != nullptr)
+ return inf->thread_waiting_for_vfork_done->ptid;
+ }
+ else if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
+ return current_inferior ()->thread_waiting_for_vfork_done->ptid;
+
+ /* If an inferior (so, under GDB's control) is a vfork child of another
+ continue just that inferior. */
+ if (sched_multi)
+ {
+ for (inferior *inf : all_non_exited_inferiors ())
+ if (inf->vfork_parent != nullptr)
+ return ptid_t (inf->pid);
+ }
+ else if (current_inferior ()->vfork_parent != nullptr)
+ return ptid_t (current_inferior ()->pid);
+
+ return user_visible_resume_ptid (user_step);
}
/* Wrapper for target_resume, that handles infrun-specific
else
target_pass_signals (signal_pass);
+ infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
+ resume_ptid.to_string ().c_str (),
+ step, gdb_signal_to_symbol_string (sig));
target_resume (resume_ptid, step, sig);
if (target_can_async_p ())
struct gdbarch *gdbarch = regcache->arch ();
struct thread_info *tp = inferior_thread ();
const address_space *aspace = regcache->aspace ();
- ptid_t resume_ptid;
/* This represents the user's step vs continue request. When
deciding whether "set scheduler-locking step" applies, it's the
user's intention that counts. */
gdb_assert (!tp->stop_requested);
gdb_assert (!thread_is_in_step_over_chain (tp));
+ gdb_assert (tp->inf->thread_waiting_for_vfork_done == nullptr
+ || tp->inf->thread_waiting_for_vfork_done == tp);
if (tp->suspend.waitstatus_pending_p)
{
/* Depends on stepped_breakpoint. */
step = currently_stepping (tp);
- if (current_inferior ()->waiting_for_vfork_done)
+ if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
{
/* Don't try to single-step a vfork parent that is waiting for
the child to get out of the shared memory region (by exec'ing
insert_single_step_breakpoint (gdbarch, aspace, pc);
insert_breakpoints ();
- resume_ptid = internal_resume_ptid (user_step);
+ ptid_t resume_ptid = internal_resume_ptid (user_step);
do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
tp->resumed = true;
return;
&& use_displaced_stepping (tp)
&& !step_over_info_valid_p ()
&& sig == GDB_SIGNAL_0
- && !current_inferior ()->waiting_for_vfork_done)
+ && current_inferior ()->thread_waiting_for_vfork_done == nullptr)
{
displaced_step_prepare_status prepare_status
= displaced_step_prepare (tp);
/* Fallback to stepping over the breakpoint in-line. */
if (target_is_non_stop_p ())
- stop_all_threads ();
+ stop_all_threads ("displaced stepping falling back on inline stepping");
set_step_over_info (regcache->aspace (),
regcache_read_pc (regcache), 0, tp->global_num);
gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
/* Decide the set of threads to ask the target to resume. */
+ ptid_t resume_ptid;
if (tp->control.trap_expected)
{
/* We're allowing a thread to run past a breakpoint it has
CORE_ADDR pc;
struct execution_control_state ecss;
struct execution_control_state *ecs = &ecss;
- bool started;
/* If we're stopped at a fork/vfork, follow the branch set by the
"set follow-fork-mode" command; otherwise, we'll just proceed
{
scoped_disable_commit_resumed disable_commit_resumed ("proceeding");
- started = start_step_over ();
+ bool displaced_step_started = start_step_over ();
if (step_over_info_valid_p ())
{
other thread was already doing one. In either case, don't
resume anything else until the step-over is finished. */
}
- else if (started && !target_is_non_stop_p ())
+ else if (displaced_step_started && !target_is_non_stop_p ())
{
/* A new displaced stepping sequence was started. In all-stop,
we can't talk to the target anymore until it next stops. */
continue;
}
+ if (tp->inf->thread_waiting_for_vfork_done != nullptr
+ && tp != tp->inf->thread_waiting_for_vfork_done)
+ {
+ infrun_debug_printf ("[%s] a thread of this inferior is waiting for vfork-done",
+ tp->ptid.to_string ().c_str ());
+ continue;
+ }
+
+ //if (tp->inf->pending_detach)
+ // {
+ //infrun_debug_printf ("[%s] inferior pending detach",
+ // tp->ptid.to_string ().c_str ());
+ //continue;
+ // }
+
infrun_debug_printf ("resuming %s",
target_pid_to_str (tp->ptid).c_str ());
error (_("Command aborted."));
}
}
- else if (!cur_thr->resumed && !thread_is_in_step_over_chain (cur_thr))
+ else if (!cur_thr->resumed
+ && !thread_is_in_step_over_chain (cur_thr)
+ && !(non_stop && cur_thr->inf->thread_waiting_for_vfork_done))
{
/* The thread wasn't started, and isn't queued, run it now. */
reset_ecs (ecs, cur_thr);
};
static bool handle_one (const wait_one_event &event);
-static void restart_threads (struct thread_info *event_thread);
/* Prepare and stabilize the inferior for detaching it. E.g.,
detaching while a thread is displaced stepping is a recipe for
previously-stepping thread, since that one is still
running). */
if (!step_over_info_valid_p ())
- restart_threads (thr);
+ restart_threads (thr, nullptr);
}
}
}
/* See infrun.h. */
void
-stop_all_threads (void)
+stop_all_threads (const char *reason, inferior *inf)
{
/* We may need multiple passes to discover all threads. */
int pass;
gdb_assert (exists_non_stop_target ());
- infrun_debug_printf ("starting");
+ INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason, inf != nullptr ? inf->num : -1);
scoped_restore_current_thread restore_thread;
/* Enable thread events of all targets. */
for (auto *target : all_non_exited_process_targets ())
{
+ if (inf != nullptr && inf->process_target () != target)
+ continue;
+
switch_to_target_no_thread (target);
target_thread_events (true);
}
/* Disable thread events of all targets. */
for (auto *target : all_non_exited_process_targets ())
{
+ if (inf != nullptr && inf->process_target () != target)
+ continue;
+
switch_to_target_no_thread (target);
target_thread_events (false);
}
for (auto *target : all_non_exited_process_targets ())
{
+ if (inf != nullptr && inf->process_target () != target)
+ continue;
+
switch_to_target_no_thread (target);
update_thread_list ();
}
to tell the target to stop. */
for (thread_info *t : all_non_exited_threads ())
{
+ if (inf != nullptr && t->inf != inf)
+ continue;
+
/* For a single-target setting with an all-stop target,
we would not even arrive here. For a multi-target
setting, until GDB is able to handle a mixture of
child->set_running (true);
/* In non-stop mode, also resume the other branch. */
- if (!detach_fork && (non_stop
- || (sched_multi && target_is_non_stop_p ())))
+ if (!detach_fork
+ && (non_stop || (sched_multi && target_is_non_stop_p ())))
{
if (follow_child)
switch_to_thread (parent);
ecs->event_thread = inferior_thread ();
ecs->ptid = inferior_ptid;
- keep_going (ecs);
+ if (current_inferior ()->vfork_child == nullptr)
+ keep_going (ecs);
}
if (follow_child)
ecs->ptid = inferior_ptid;
if (should_resume)
- keep_going (ecs);
+ {
+ if (ecs->ws.kind == TARGET_WAITKIND_VFORKED)
+ {
+ if (current_inferior ()->vfork_child != nullptr
+ && target_is_non_stop_p ())
+ prepare_to_wait (ecs);
+ else
+ keep_going (ecs);
+ }
+ else
+ if (!switch_back_to_stepped_thread (ecs))
+ keep_going (ecs);
+ }
else
stop_waiting (ecs);
return;
context_switch (ecs);
- current_inferior ()->waiting_for_vfork_done = 0;
- current_inferior ()->pspace->breakpoints_not_allowed = 0;
+ handle_vfork_done (ecs->event_thread);
+
+ gdb_assert (inferior_thread () == ecs->event_thread);
if (handle_stop_requested (ecs))
return;
/* This also takes care of reinserting breakpoints in the
previously locked inferior. */
- keep_going (ecs);
+ if (!switch_back_to_stepped_thread (ecs))
+ {
+ gdb_assert (inferior_thread () == ecs->event_thread);
+ keep_going (ecs);
+ }
return;
case TARGET_WAITKIND_EXECD:
ignored. */
static void
-restart_threads (struct thread_info *event_thread)
+restart_threads (struct thread_info *event_thread, inferior *inf)
{
+ INFRUN_SCOPED_DEBUG_ENTER_EXIT;
+
+ gdb_assert (target_is_non_stop_p ());
+
+ scoped_restore_current_thread restore_thread;
+
/* In case the instruction just stepped spawned a new thread. */
update_thread_list ();
for (thread_info *tp : all_non_exited_threads ())
{
+ if (inf != nullptr && tp->inf != inf)
+ continue;
+
if (tp->inf->detaching)
{
infrun_debug_printf ("restart threads: [%s] inferior detaching",
context_switch (ecs);
insert_breakpoints ();
- restart_threads (ecs->event_thread);
+ restart_threads (ecs->event_thread, nullptr);
/* If we have events pending, go through handle_inferior_event
again, picking up a pending event at random. This avoids
/* If all-stop, but there exists a non-stop target, stop all
threads now that we're presenting the stop to the user. */
if (!non_stop && exists_non_stop_target ())
- stop_all_threads ();
+ stop_all_threads ("presenting stop to user in all-stop");
}
/* Like keep_going, but passes the signal to the inferior, even if the
we're about to step over, otherwise other threads could miss
it. */
if (step_over_info_valid_p () && target_is_non_stop_p ())
- stop_all_threads ();
+ stop_all_threads ("starting in-line step-over");
/* Stop stepping if inserting breakpoints fails. */
try