+ return;
+ }
+
+ if (debug_threads)
+ fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
+
+ lwp->stop_expected = 1;
+ kill_lwp (pid, SIGSTOP);
+}
+
+static int
+send_sigstop_callback (struct inferior_list_entry *entry, void *except)
+{
+ struct lwp_info *lwp = (struct lwp_info *) entry;
+
+ /* Ignore EXCEPT. */
+ if (lwp == except)
+ return 0;
+
+ if (lwp->stopped)
+ return 0;
+
+ send_sigstop (lwp);
+ return 0;
+}
+
+/* Increment the suspend count of an LWP, and stop it, if not stopped
+ yet. */
+static int
+suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
+ void *except)
+{
+ struct lwp_info *lwp = (struct lwp_info *) entry;
+
+ /* Ignore EXCEPT. */
+ if (lwp == except)
+ return 0;
+
+ lwp->suspended++;
+
+ return send_sigstop_callback (entry, except);
+}
+
+static void
+mark_lwp_dead (struct lwp_info *lwp, int wstat)
+{
+ /* It's dead, really. */
+ lwp->dead = 1;
+
+ /* Store the exit status for later. */
+ lwp->status_pending_p = 1;
+ lwp->status_pending = wstat;
+
+ /* Prevent trying to stop it. */
+ lwp->stopped = 1;
+
+ /* No further stops are expected from a dead lwp. */
+ lwp->stop_expected = 0;
+}
+
+static void
+wait_for_sigstop (struct inferior_list_entry *entry)
+{
+ struct lwp_info *lwp = (struct lwp_info *) entry;
+ struct thread_info *saved_inferior;
+ int wstat;
+ ptid_t saved_tid;
+ ptid_t ptid;
+ int pid;
+
+ if (lwp->stopped)
+ {
+ if (debug_threads)
+ fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
+ lwpid_of (lwp));
+ return;
+ }
+
+ saved_inferior = current_inferior;
+ if (saved_inferior != NULL)
+ saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
+ else
+ saved_tid = null_ptid; /* avoid bogus unused warning */
+
+ ptid = lwp->head.id;
+
+ if (debug_threads)
+ fprintf (stderr, "wait_for_sigstop: pulling one event\n");
+
+ pid = linux_wait_for_event (ptid, &wstat, __WALL);
+
+ /* If we stopped with a non-SIGSTOP signal, save it for later
+ and record the pending SIGSTOP. If the process exited, just
+ return. */
+ if (WIFSTOPPED (wstat))
+ {
+ if (debug_threads)
+ fprintf (stderr, "LWP %ld stopped with signal %d\n",
+ lwpid_of (lwp), WSTOPSIG (wstat));
+
+ if (WSTOPSIG (wstat) != SIGSTOP)
+ {
+ if (debug_threads)
+ fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
+ lwpid_of (lwp), wstat);
+
+ lwp->status_pending_p = 1;
+ lwp->status_pending = wstat;
+ }
+ }
+ else
+ {
+ if (debug_threads)
+ fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
+
+ lwp = find_lwp_pid (pid_to_ptid (pid));
+ if (lwp)
+ {
+ /* Leave this status pending for the next time we're able to
+ report it. In the mean time, we'll report this lwp as
+ dead to GDB, so GDB doesn't try to read registers and
+ memory from it. This can only happen if this was the
+ last thread of the process; otherwise, PID is removed
+ from the thread tables before linux_wait_for_event
+ returns. */
+ mark_lwp_dead (lwp, wstat);
+ }
+ }
+
+ if (saved_inferior == NULL || linux_thread_alive (saved_tid))
+ current_inferior = saved_inferior;
+ else
+ {
+ if (debug_threads)
+ fprintf (stderr, "Previously current thread died.\n");
+
+ if (non_stop)
+ {
+ /* We can't change the current inferior behind GDB's back,
+ otherwise, a subsequent command may apply to the wrong
+ process. */
+ current_inferior = NULL;
+ }
+ else
+ {
+ /* Set a valid thread as current. */
+ set_desired_inferior (0);
+ }
+ }
+}
+
+/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
+ move it out, because we need to report the stop event to GDB. For
+ example, if the user puts a breakpoint in the jump pad, it's
+ because she wants to debug it. */
+
+static int
+stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
+{
+ struct lwp_info *lwp = (struct lwp_info *) entry;
+ struct thread_info *thread = get_lwp_thread (lwp);
+
+ gdb_assert (lwp->suspended == 0);
+ gdb_assert (lwp->stopped);
+
+ /* Allow debugging the jump pad, gdb_collect, etc.. */
+ return (supports_fast_tracepoints ()
+ && in_process_agent_loaded ()
+ && (gdb_breakpoint_here (lwp->stop_pc)
+ || lwp->stopped_by_watchpoint
+ || thread->last_resume_kind == resume_step)
+ && linux_fast_tracepoint_collecting (lwp, NULL));
+}
+
+static void
+move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
+{
+ struct lwp_info *lwp = (struct lwp_info *) entry;
+ struct thread_info *thread = get_lwp_thread (lwp);
+ int *wstat;
+
+ gdb_assert (lwp->suspended == 0);
+ gdb_assert (lwp->stopped);
+
+ wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
+
+ /* Allow debugging the jump pad, gdb_collect, etc. */
+ if (!gdb_breakpoint_here (lwp->stop_pc)
+ && !lwp->stopped_by_watchpoint
+ && thread->last_resume_kind != resume_step
+ && maybe_move_out_of_jump_pad (lwp, wstat))
+ {
+ if (debug_threads)
+ fprintf (stderr,
+ "LWP %ld needs stabilizing (in jump pad)\n",
+ lwpid_of (lwp));
+
+ if (wstat)
+ {
+ lwp->status_pending_p = 0;
+ enqueue_one_deferred_signal (lwp, wstat);
+
+ if (debug_threads)
+ fprintf (stderr,
+ "Signal %d for LWP %ld deferred "
+ "(in jump pad)\n",
+ WSTOPSIG (*wstat), lwpid_of (lwp));
+ }
+
+ linux_resume_one_lwp (lwp, 0, 0, NULL);
+ }
+ else
+ lwp->suspended++;
+}
+
+static int
+lwp_running (struct inferior_list_entry *entry, void *data)
+{
+ struct lwp_info *lwp = (struct lwp_info *) entry;
+
+ if (lwp->dead)
+ return 0;
+ if (lwp->stopped)
+ return 0;
+ return 1;
+}
+
+/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
+ If SUSPEND, then also increase the suspend count of every LWP,
+ except EXCEPT. */
+
+static void
+stop_all_lwps (int suspend, struct lwp_info *except)
+{
+ stopping_threads = 1;
+
+ if (suspend)
+ find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
+ else
+ find_inferior (&all_lwps, send_sigstop_callback, except);
+ for_each_inferior (&all_lwps, wait_for_sigstop);
+ stopping_threads = 0;
+}
+
+/* Resume execution of the inferior process.
+ If STEP is nonzero, single-step it.
+ If SIGNAL is nonzero, give it that signal. */
+
+static void
+linux_resume_one_lwp (struct lwp_info *lwp,
+ int step, int signal, siginfo_t *info)
+{
+ struct thread_info *saved_inferior;
+ int fast_tp_collecting;
+
+ if (lwp->stopped == 0)
+ return;
+
+ fast_tp_collecting = lwp->collecting_fast_tracepoint;
+
+ gdb_assert (!stabilizing_threads || fast_tp_collecting);
+
+ /* Cancel actions that rely on GDB not changing the PC (e.g., the
+ user used the "jump" command, or "set $pc = foo"). */
+ if (lwp->stop_pc != get_pc (lwp))
+ {
+ /* Collecting 'while-stepping' actions doesn't make sense
+ anymore. */
+ release_while_stepping_state_list (get_lwp_thread (lwp));
+ }
+
+ /* If we have pending signals or status, and a new signal, enqueue the
+ signal. Also enqueue the signal if we are waiting to reinsert a
+ breakpoint; it will be picked up again below. */
+ if (signal != 0
+ && (lwp->status_pending_p
+ || lwp->pending_signals != NULL
+ || lwp->bp_reinsert != 0
+ || fast_tp_collecting))
+ {
+ struct pending_signals *p_sig;
+ p_sig = xmalloc (sizeof (*p_sig));
+ p_sig->prev = lwp->pending_signals;
+ p_sig->signal = signal;
+ if (info == NULL)
+ memset (&p_sig->info, 0, sizeof (siginfo_t));
+ else
+ memcpy (&p_sig->info, info, sizeof (siginfo_t));
+ lwp->pending_signals = p_sig;
+ }
+
+ if (lwp->status_pending_p)
+ {
+ if (debug_threads)
+ fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
+ " has pending status\n",
+ lwpid_of (lwp), step ? "step" : "continue", signal,
+ lwp->stop_expected ? "expected" : "not expected");
+ return;
+ }
+
+ saved_inferior = current_inferior;
+ current_inferior = get_lwp_thread (lwp);
+
+ if (debug_threads)
+ fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
+ lwpid_of (lwp), step ? "step" : "continue", signal,
+ lwp->stop_expected ? "expected" : "not expected");
+
+ /* This bit needs some thinking about. If we get a signal that
+ we must report while a single-step reinsert is still pending,
+ we often end up resuming the thread. It might be better to
+ (ew) allow a stack of pending events; then we could be sure that
+ the reinsert happened right away and not lose any signals.
+
+ Making this stack would also shrink the window in which breakpoints are
+ uninserted (see comment in linux_wait_for_lwp) but not enough for
+ complete correctness, so it won't solve that problem. It may be
+ worthwhile just to solve this one, however. */
+ if (lwp->bp_reinsert != 0)
+ {
+ if (debug_threads)
+ fprintf (stderr, " pending reinsert at 0x%s\n",
+ paddress (lwp->bp_reinsert));
+
+ if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
+ {
+ if (fast_tp_collecting == 0)
+ {
+ if (step == 0)
+ fprintf (stderr, "BAD - reinserting but not stepping.\n");
+ if (lwp->suspended)
+ fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
+ lwp->suspended);
+ }
+
+ step = 1;
+ }
+
+ /* Postpone any pending signal. It was enqueued above. */
+ signal = 0;
+ }
+
+ if (fast_tp_collecting == 1)
+ {
+ if (debug_threads)
+ fprintf (stderr, "\
+lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
+ lwpid_of (lwp));
+
+ /* Postpone any pending signal. It was enqueued above. */
+ signal = 0;
+ }
+ else if (fast_tp_collecting == 2)
+ {
+ if (debug_threads)
+ fprintf (stderr, "\
+lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
+ lwpid_of (lwp));
+
+ if (can_hardware_single_step ())
+ step = 1;
+ else
+ fatal ("moving out of jump pad single-stepping"
+ " not implemented on this target");
+
+ /* Postpone any pending signal. It was enqueued above. */
+ signal = 0;
+ }
+
+ /* If we have while-stepping actions in this thread set it stepping.
+ If we have a signal to deliver, it may or may not be set to
+ SIG_IGN, we don't know. Assume so, and allow collecting
+ while-stepping into a signal handler. A possible smart thing to
+ do would be to set an internal breakpoint at the signal return
+ address, continue, and carry on catching this while-stepping
+ action only when that breakpoint is hit. A future
+ enhancement. */
+ if (get_lwp_thread (lwp)->while_stepping != NULL
+ && can_hardware_single_step ())
+ {
+ if (debug_threads)
+ fprintf (stderr,
+ "lwp %ld has a while-stepping action -> forcing step.\n",
+ lwpid_of (lwp));
+ step = 1;
+ }
+
+ if (debug_threads && the_low_target.get_pc != NULL)
+ {
+ struct regcache *regcache = get_thread_regcache (current_inferior, 1);
+ CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
+ fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
+ }
+
+ /* If we have pending signals, consume one unless we are trying to
+ reinsert a breakpoint or we're trying to finish a fast tracepoint
+ collect. */
+ if (lwp->pending_signals != NULL
+ && lwp->bp_reinsert == 0
+ && fast_tp_collecting == 0)
+ {
+ struct pending_signals **p_sig;
+
+ p_sig = &lwp->pending_signals;
+ while ((*p_sig)->prev != NULL)
+ p_sig = &(*p_sig)->prev;
+
+ signal = (*p_sig)->signal;
+ if ((*p_sig)->info.si_signo != 0)
+ ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
+
+ free (*p_sig);
+ *p_sig = NULL;
+ }
+
+ if (the_low_target.prepare_to_resume != NULL)
+ the_low_target.prepare_to_resume (lwp);
+
+ regcache_invalidate_one ((struct inferior_list_entry *)
+ get_lwp_thread (lwp));
+ errno = 0;
+ lwp->stopped = 0;
+ lwp->stopped_by_watchpoint = 0;
+ lwp->stepping = step;
+ ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
+ /* Coerce to a uintptr_t first to avoid potential gcc warning
+ of coercing an 8 byte integer to a 4 byte pointer. */
+ (PTRACE_ARG4_TYPE) (uintptr_t) signal);
+
+ current_inferior = saved_inferior;
+ if (errno)
+ {
+ /* ESRCH from ptrace either means that the thread was already
+ running (an error) or that it is gone (a race condition). If
+ it's gone, we will get a notification the next time we wait,
+ so we can ignore the error. We could differentiate these
+ two, but it's tricky without waiting; the thread still exists
+ as a zombie, so sending it signal 0 would succeed. So just
+ ignore ESRCH. */
+ if (errno == ESRCH)
+ return;
+
+ perror_with_name ("ptrace");
+ }
+}
+
+struct thread_resume_array
+{
+ struct thread_resume *resume;
+ size_t n;
+};
+
+/* This function is called once per thread. We look up the thread
+ in RESUME_PTR, and mark the thread with a pointer to the appropriate
+ resume request.
+
+ This algorithm is O(threads * resume elements), but resume elements
+ is small (and will remain small at least until GDB supports thread
+ suspension). */
+static int
+linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
+{
+ struct lwp_info *lwp;
+ struct thread_info *thread;
+ int ndx;
+ struct thread_resume_array *r;
+
+ thread = (struct thread_info *) entry;
+ lwp = get_thread_lwp (thread);
+ r = arg;
+
+ for (ndx = 0; ndx < r->n; ndx++)
+ {
+ ptid_t ptid = r->resume[ndx].thread;
+ if (ptid_equal (ptid, minus_one_ptid)
+ || ptid_equal (ptid, entry->id)
+ || (ptid_is_pid (ptid)
+ && (ptid_get_pid (ptid) == pid_of (lwp)))
+ || (ptid_get_lwp (ptid) == -1
+ && (ptid_get_pid (ptid) == pid_of (lwp))))
+ {
+ if (r->resume[ndx].kind == resume_stop
+ && thread->last_resume_kind == resume_stop)
+ {
+ if (debug_threads)
+ fprintf (stderr, "already %s LWP %ld at GDB's request\n",
+ thread->last_status.kind == TARGET_WAITKIND_STOPPED
+ ? "stopped"
+ : "stopping",
+ lwpid_of (lwp));
+
+ continue;
+ }
+
+ lwp->resume = &r->resume[ndx];
+ thread->last_resume_kind = lwp->resume->kind;
+
+ /* If we had a deferred signal to report, dequeue one now.
+ This can happen if LWP gets more than one signal while
+ trying to get out of a jump pad. */
+ if (lwp->stopped
+ && !lwp->status_pending_p
+ && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
+ {
+ lwp->status_pending_p = 1;
+
+ if (debug_threads)
+ fprintf (stderr,
+ "Dequeueing deferred signal %d for LWP %ld, "
+ "leaving status pending.\n",
+ WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
+ }
+
+ return 0;
+ }
+ }
+
+ /* No resume action for this thread. */
+ lwp->resume = NULL;
+
+ return 0;
+}
+
+
+/* Set *FLAG_P if this lwp has an interesting status pending. */
+static int
+resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
+{
+ struct lwp_info *lwp = (struct lwp_info *) entry;
+
+ /* LWPs which will not be resumed are not interesting, because
+ we might not wait for them next time through linux_wait. */
+ if (lwp->resume == NULL)
+ return 0;
+
+ if (lwp->status_pending_p)
+ * (int *) flag_p = 1;
+
+ return 0;
+}
+
+/* Return 1 if this lwp that GDB wants running is stopped at an
+ internal breakpoint that we need to step over. It assumes that any
+ required STOP_PC adjustment has already been propagated to the
+ inferior's regcache. */
+
+static int
+need_step_over_p (struct inferior_list_entry *entry, void *dummy)
+{
+ struct lwp_info *lwp = (struct lwp_info *) entry;
+ struct thread_info *thread;
+ struct thread_info *saved_inferior;
+ CORE_ADDR pc;
+
+ /* LWPs which will not be resumed are not interesting, because we
+ might not wait for them next time through linux_wait. */
+
+ if (!lwp->stopped)
+ {
+ if (debug_threads)
+ fprintf (stderr,
+ "Need step over [LWP %ld]? Ignoring, not stopped\n",
+ lwpid_of (lwp));
+ return 0;
+ }
+
+ thread = get_lwp_thread (lwp);
+
+ if (thread->last_resume_kind == resume_stop)
+ {
+ if (debug_threads)
+ fprintf (stderr,
+ "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
+ lwpid_of (lwp));
+ return 0;
+ }
+
+ gdb_assert (lwp->suspended >= 0);
+
+ if (lwp->suspended)
+ {
+ if (debug_threads)
+ fprintf (stderr,
+ "Need step over [LWP %ld]? Ignoring, suspended\n",
+ lwpid_of (lwp));
+ return 0;
+ }
+
+ if (!lwp->need_step_over)
+ {
+ if (debug_threads)
+ fprintf (stderr,
+ "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
+ }
+
+ if (lwp->status_pending_p)
+ {
+ if (debug_threads)
+ fprintf (stderr,
+ "Need step over [LWP %ld]? Ignoring, has pending status.\n",
+ lwpid_of (lwp));
+ return 0;
+ }
+
+ /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
+ or we have. */
+ pc = get_pc (lwp);
+
+ /* If the PC has changed since we stopped, then don't do anything,
+ and let the breakpoint/tracepoint be hit. This happens if, for
+ instance, GDB handled the decr_pc_after_break subtraction itself,
+ GDB is OOL stepping this thread, or the user has issued a "jump"
+ command, or poked thread's registers herself. */
+ if (pc != lwp->stop_pc)
+ {
+ if (debug_threads)
+ fprintf (stderr,
+ "Need step over [LWP %ld]? Cancelling, PC was changed. "
+ "Old stop_pc was 0x%s, PC is now 0x%s\n",
+ lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
+
+ lwp->need_step_over = 0;
+ return 0;
+ }
+
+ saved_inferior = current_inferior;
+ current_inferior = thread;
+
+ /* We can only step over breakpoints we know about. */
+ if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
+ {
+ /* Don't step over a breakpoint that GDB expects to hit
+ though. If the condition is being evaluated on the target's side
+ and it evaluate to false, step over this breakpoint as well. */
+ if (gdb_breakpoint_here (pc)
+ && gdb_condition_true_at_breakpoint (pc))
+ {
+ if (debug_threads)
+ fprintf (stderr,
+ "Need step over [LWP %ld]? yes, but found"
+ " GDB breakpoint at 0x%s; skipping step over\n",
+ lwpid_of (lwp), paddress (pc));
+
+ current_inferior = saved_inferior;
+ return 0;
+ }
+ else
+ {
+ if (debug_threads)
+ fprintf (stderr,
+ "Need step over [LWP %ld]? yes, "
+ "found breakpoint at 0x%s\n",
+ lwpid_of (lwp), paddress (pc));
+
+ /* We've found an lwp that needs stepping over --- return 1 so
+ that find_inferior stops looking. */
+ current_inferior = saved_inferior;
+
+ /* If the step over is cancelled, this is set again. */
+ lwp->need_step_over = 0;
+ return 1;
+ }
+ }
+
+ current_inferior = saved_inferior;
+
+ if (debug_threads)
+ fprintf (stderr,
+ "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
+ lwpid_of (lwp), paddress (pc));
+
+ return 0;
+}
+
+/* Start a step-over operation on LWP. When LWP stopped at a
+ breakpoint, to make progress, we need to remove the breakpoint out
+ of the way. If we let other threads run while we do that, they may
+ pass by the breakpoint location and miss hitting it. To avoid
+ that, a step-over momentarily stops all threads while LWP is
+ single-stepped while the breakpoint is temporarily uninserted from
+ the inferior. When the single-step finishes, we reinsert the
+ breakpoint, and let all threads that are supposed to be running,
+ run again.
+
+ On targets that don't support hardware single-step, we don't
+ currently support full software single-stepping. Instead, we only
+ support stepping over the thread event breakpoint, by asking the
+ low target where to place a reinsert breakpoint. Since this
+ routine assumes the breakpoint being stepped over is a thread event
+ breakpoint, it usually assumes the return address of the current
+ function is a good enough place to set the reinsert breakpoint. */
+
+static int
+start_step_over (struct lwp_info *lwp)
+{
+ struct thread_info *saved_inferior;
+ CORE_ADDR pc;
+ int step;
+
+ if (debug_threads)
+ fprintf (stderr,
+ "Starting step-over on LWP %ld. Stopping all threads\n",
+ lwpid_of (lwp));
+
+ stop_all_lwps (1, lwp);
+ gdb_assert (lwp->suspended == 0);
+
+ if (debug_threads)
+ fprintf (stderr, "Done stopping all threads for step-over.\n");
+
+ /* Note, we should always reach here with an already adjusted PC,
+ either by GDB (if we're resuming due to GDB's request), or by our
+ caller, if we just finished handling an internal breakpoint GDB
+ shouldn't care about. */
+ pc = get_pc (lwp);
+
+ saved_inferior = current_inferior;
+ current_inferior = get_lwp_thread (lwp);
+
+ lwp->bp_reinsert = pc;
+ uninsert_breakpoints_at (pc);
+ uninsert_fast_tracepoint_jumps_at (pc);
+
+ if (can_hardware_single_step ())
+ {
+ step = 1;
+ }
+ else
+ {
+ CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
+ set_reinsert_breakpoint (raddr);
+ step = 0;
+ }
+
+ current_inferior = saved_inferior;
+
+ linux_resume_one_lwp (lwp, step, 0, NULL);
+
+ /* Require next event from this LWP. */
+ step_over_bkpt = lwp->head.id;
+ return 1;
+}
+
+/* Finish a step-over. Reinsert the breakpoint we had uninserted in
+ start_step_over, if still there, and delete any reinsert
+ breakpoints we've set, on non hardware single-step targets. */
+
+static int
+finish_step_over (struct lwp_info *lwp)
+{
+ if (lwp->bp_reinsert != 0)
+ {
+ if (debug_threads)
+ fprintf (stderr, "Finished step over.\n");
+
+ /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
+ may be no breakpoint to reinsert there by now. */
+ reinsert_breakpoints_at (lwp->bp_reinsert);
+ reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
+
+ lwp->bp_reinsert = 0;
+
+ /* Delete any software-single-step reinsert breakpoints. No
+ longer needed. We don't have to worry about other threads
+ hitting this trap, and later not being able to explain it,
+ because we were stepping over a breakpoint, and we hold all
+ threads but LWP stopped while doing that. */
+ if (!can_hardware_single_step ())
+ delete_reinsert_breakpoints ();
+
+ step_over_bkpt = null_ptid;
+ return 1;
+ }
+ else
+ return 0;
+}
+
+/* This function is called once per thread. We check the thread's resume
+ request, which will tell us whether to resume, step, or leave the thread
+ stopped; and what signal, if any, it should be sent.
+
+ For threads which we aren't explicitly told otherwise, we preserve
+ the stepping flag; this is used for stepping over gdbserver-placed
+ breakpoints.
+
+ If pending_flags was set in any thread, we queue any needed
+ signals, since we won't actually resume. We already have a pending
+ event to report, so we don't need to preserve any step requests;
+ they should be re-issued if necessary. */
+
+static int
+linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
+{
+ struct lwp_info *lwp;
+ struct thread_info *thread;
+ int step;
+ int leave_all_stopped = * (int *) arg;
+ int leave_pending;
+
+ thread = (struct thread_info *) entry;
+ lwp = get_thread_lwp (thread);
+
+ if (lwp->resume == NULL)
+ return 0;
+
+ if (lwp->resume->kind == resume_stop)
+ {
+ if (debug_threads)
+ fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
+
+ if (!lwp->stopped)
+ {
+ if (debug_threads)
+ fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
+
+ /* Stop the thread, and wait for the event asynchronously,
+ through the event loop. */
+ send_sigstop (lwp);
+ }
+ else
+ {
+ if (debug_threads)
+ fprintf (stderr, "already stopped LWP %ld\n",
+ lwpid_of (lwp));
+
+ /* The LWP may have been stopped in an internal event that
+ was not meant to be notified back to GDB (e.g., gdbserver
+ breakpoint), so we should be reporting a stop event in
+ this case too. */
+
+ /* If the thread already has a pending SIGSTOP, this is a
+ no-op. Otherwise, something later will presumably resume
+ the thread and this will cause it to cancel any pending
+ operation, due to last_resume_kind == resume_stop. If
+ the thread already has a pending status to report, we
+ will still report it the next time we wait - see
+ status_pending_p_callback. */
+
+ /* If we already have a pending signal to report, then
+ there's no need to queue a SIGSTOP, as this means we're
+ midway through moving the LWP out of the jumppad, and we
+ will report the pending signal as soon as that is
+ finished. */
+ if (lwp->pending_signals_to_report == NULL)
+ send_sigstop (lwp);
+ }
+
+ /* For stop requests, we're done. */
+ lwp->resume = NULL;
+ thread->last_status.kind = TARGET_WAITKIND_IGNORE;
+ return 0;
+ }
+
+ /* If this thread which is about to be resumed has a pending status,
+ then don't resume any threads - we can just report the pending
+ status. Make sure to queue any signals that would otherwise be
+ sent. In all-stop mode, we do this decision based on if *any*
+ thread has a pending status. If there's a thread that needs the
+ step-over-breakpoint dance, then don't resume any other thread
+ but that particular one. */
+ leave_pending = (lwp->status_pending_p || leave_all_stopped);
+
+ if (!leave_pending)
+ {
+ if (debug_threads)
+ fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
+
+ step = (lwp->resume->kind == resume_step);
+ linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
+ }
+ else
+ {
+ if (debug_threads)
+ fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
+
+ /* If we have a new signal, enqueue the signal. */
+ if (lwp->resume->sig != 0)
+ {
+ struct pending_signals *p_sig;
+ p_sig = xmalloc (sizeof (*p_sig));
+ p_sig->prev = lwp->pending_signals;
+ p_sig->signal = lwp->resume->sig;
+ memset (&p_sig->info, 0, sizeof (siginfo_t));
+
+ /* If this is the same signal we were previously stopped by,
+ make sure to queue its siginfo. We can ignore the return
+ value of ptrace; if it fails, we'll skip
+ PTRACE_SETSIGINFO. */
+ if (WIFSTOPPED (lwp->last_status)
+ && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
+ ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
+
+ lwp->pending_signals = p_sig;
+ }
+ }
+
+ thread->last_status.kind = TARGET_WAITKIND_IGNORE;
+ lwp->resume = NULL;
+ return 0;
+}
+
+static void
+linux_resume (struct thread_resume *resume_info, size_t n)
+{
+ struct thread_resume_array array = { resume_info, n };
+ struct lwp_info *need_step_over = NULL;
+ int any_pending;
+ int leave_all_stopped;
+
+ find_inferior (&all_threads, linux_set_resume_request, &array);
+
+ /* If there is a thread which would otherwise be resumed, which has
+ a pending status, then don't resume any threads - we can just
+ report the pending status. Make sure to queue any signals that
+ would otherwise be sent. In non-stop mode, we'll apply this
+ logic to each thread individually. We consume all pending events
+ before considering to start a step-over (in all-stop). */
+ any_pending = 0;
+ if (!non_stop)
+ find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
+
+ /* If there is a thread which would otherwise be resumed, which is
+ stopped at a breakpoint that needs stepping over, then don't
+ resume any threads - have it step over the breakpoint with all
+ other threads stopped, then resume all threads again. Make sure
+ to queue any signals that would otherwise be delivered or
+ queued. */
+ if (!any_pending && supports_breakpoints ())
+ need_step_over
+ = (struct lwp_info *) find_inferior (&all_lwps,
+ need_step_over_p, NULL);
+
+ leave_all_stopped = (need_step_over != NULL || any_pending);
+
+ if (debug_threads)
+ {
+ if (need_step_over != NULL)
+ fprintf (stderr, "Not resuming all, need step over\n");
+ else if (any_pending)
+ fprintf (stderr,
+ "Not resuming, all-stop and found "
+ "an LWP with pending status\n");
+ else
+ fprintf (stderr, "Resuming, no pending status or step over needed\n");
+ }
+
+ /* Even if we're leaving threads stopped, queue all signals we'd
+ otherwise deliver. */
+ find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
+
+ if (need_step_over)
+ start_step_over (need_step_over);
+}
+
+/* This function is called once per thread. We check the thread's
+ last resume request, which will tell us whether to resume, step, or
+ leave the thread stopped. Any signal the client requested to be
+ delivered has already been enqueued at this point.
+
+ If any thread that GDB wants running is stopped at an internal
+ breakpoint that needs stepping over, we start a step-over operation
+ on that particular thread, and leave all others stopped. */
+
+static int
+proceed_one_lwp (struct inferior_list_entry *entry, void *except)
+{
+ struct lwp_info *lwp = (struct lwp_info *) entry;
+ struct thread_info *thread;
+ int step;
+
+ if (lwp == except)
+ return 0;
+
+ if (debug_threads)
+ fprintf (stderr,
+ "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
+
+ if (!lwp->stopped)
+ {
+ if (debug_threads)
+ fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
+ return 0;
+ }
+
+ thread = get_lwp_thread (lwp);
+
+ if (thread->last_resume_kind == resume_stop
+ && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
+ {
+ if (debug_threads)
+ fprintf (stderr, " client wants LWP to remain %ld stopped\n",
+ lwpid_of (lwp));
+ return 0;
+ }
+
+ if (lwp->status_pending_p)
+ {
+ if (debug_threads)
+ fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
+ lwpid_of (lwp));
+ return 0;
+ }
+
+ gdb_assert (lwp->suspended >= 0);
+
+ if (lwp->suspended)
+ {
+ if (debug_threads)
+ fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
+ return 0;
+ }
+
+ if (thread->last_resume_kind == resume_stop
+ && lwp->pending_signals_to_report == NULL
+ && lwp->collecting_fast_tracepoint == 0)
+ {
+ /* We haven't reported this LWP as stopped yet (otherwise, the
+ last_status.kind check above would catch it, and we wouldn't
+ reach here. This LWP may have been momentarily paused by a
+ stop_all_lwps call while handling for example, another LWP's
+ step-over. In that case, the pending expected SIGSTOP signal
+ that was queued at vCont;t handling time will have already
+ been consumed by wait_for_sigstop, and so we need to requeue
+ another one here. Note that if the LWP already has a SIGSTOP
+ pending, this is a no-op. */
+
+ if (debug_threads)
+ fprintf (stderr,
+ "Client wants LWP %ld to stop. "
+ "Making sure it has a SIGSTOP pending\n",
+ lwpid_of (lwp));
+
+ send_sigstop (lwp);
+ }
+
+ step = thread->last_resume_kind == resume_step;
+ linux_resume_one_lwp (lwp, step, 0, NULL);
+ return 0;
+}
+
+static int
+unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
+{
+ struct lwp_info *lwp = (struct lwp_info *) entry;
+
+ if (lwp == except)
+ return 0;
+
+ lwp->suspended--;
+ gdb_assert (lwp->suspended >= 0);
+
+ return proceed_one_lwp (entry, except);
+}
+
+/* When we finish a step-over, set threads running again. If there's
+ another thread that may need a step-over, now's the time to start
+ it. Eventually, we'll move all threads past their breakpoints. */
+
+static void
+proceed_all_lwps (void)
+{
+ struct lwp_info *need_step_over;
+
+ /* If there is a thread which would otherwise be resumed, which is
+ stopped at a breakpoint that needs stepping over, then don't
+ resume any threads - have it step over the breakpoint with all
+ other threads stopped, then resume all threads again. */
+
+ if (supports_breakpoints ())
+ {
+ need_step_over
+ = (struct lwp_info *) find_inferior (&all_lwps,
+ need_step_over_p, NULL);
+
+ if (need_step_over != NULL)
+ {
+ if (debug_threads)
+ fprintf (stderr, "proceed_all_lwps: found "
+ "thread %ld needing a step-over\n",
+ lwpid_of (need_step_over));
+
+ start_step_over (need_step_over);
+ return;
+ }
+ }
+
+ if (debug_threads)
+ fprintf (stderr, "Proceeding, no step-over needed\n");
+
+ find_inferior (&all_lwps, proceed_one_lwp, NULL);
+}
+
+/* Stopped LWPs that the client wanted to be running, that don't have
+ pending statuses, are set to run again, except for EXCEPT, if not
+ NULL. This undoes a stop_all_lwps call. */
+
+static void
+unstop_all_lwps (int unsuspend, struct lwp_info *except)
+{
+ if (debug_threads)
+ {
+ if (except)
+ fprintf (stderr,
+ "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
+ else
+ fprintf (stderr,
+ "unstopping all lwps\n");
+ }
+
+ if (unsuspend)
+ find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
+ else
+ find_inferior (&all_lwps, proceed_one_lwp, except);
+}
+
+#ifdef HAVE_LINUX_USRREGS
+
+int
+register_addr (int regnum)
+{
+ int addr;
+
+ if (regnum < 0 || regnum >= the_low_target.num_regs)
+ error ("Invalid register number %d.", regnum);
+
+ addr = the_low_target.regmap[regnum];
+
+ return addr;
+}
+
+/* Fetch one register. */
+static void
+fetch_register (struct regcache *regcache, int regno)
+{
+ CORE_ADDR regaddr;
+ int i, size;
+ char *buf;
+ int pid;
+
+ if (regno >= the_low_target.num_regs)
+ return;
+ if ((*the_low_target.cannot_fetch_register) (regno))
+ return;
+
+ regaddr = register_addr (regno);
+ if (regaddr == -1)
+ return;
+
+ size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
+ & -sizeof (PTRACE_XFER_TYPE));
+ buf = alloca (size);
+
+ pid = lwpid_of (get_thread_lwp (current_inferior));
+ for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
+ {
+ errno = 0;
+ *(PTRACE_XFER_TYPE *) (buf + i) =
+ ptrace (PTRACE_PEEKUSER, pid,
+ /* Coerce to a uintptr_t first to avoid potential gcc warning
+ of coercing an 8 byte integer to a 4 byte pointer. */
+ (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
+ regaddr += sizeof (PTRACE_XFER_TYPE);
+ if (errno != 0)
+ error ("reading register %d: %s", regno, strerror (errno));
+ }
+
+ if (the_low_target.supply_ptrace_register)
+ the_low_target.supply_ptrace_register (regcache, regno, buf);
+ else
+ supply_register (regcache, regno, buf);
+}
+
+/* Store one register. */
+static void
+store_register (struct regcache *regcache, int regno)
+{
+ CORE_ADDR regaddr;
+ int i, size;
+ char *buf;
+ int pid;
+
+ if (regno >= the_low_target.num_regs)
+ return;
+ if ((*the_low_target.cannot_store_register) (regno))
+ return;
+
+ regaddr = register_addr (regno);
+ if (regaddr == -1)
+ return;
+
+ size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
+ & -sizeof (PTRACE_XFER_TYPE));
+ buf = alloca (size);
+ memset (buf, 0, size);
+
+ if (the_low_target.collect_ptrace_register)
+ the_low_target.collect_ptrace_register (regcache, regno, buf);
+ else
+ collect_register (regcache, regno, buf);
+
+ pid = lwpid_of (get_thread_lwp (current_inferior));
+ for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
+ {
+ errno = 0;
+ ptrace (PTRACE_POKEUSER, pid,
+ /* Coerce to a uintptr_t first to avoid potential gcc warning
+ about coercing an 8 byte integer to a 4 byte pointer. */
+ (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
+ (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
+ if (errno != 0)
+ {
+ /* At this point, ESRCH should mean the process is
+ already gone, in which case we simply ignore attempts
+ to change its registers. See also the related
+ comment in linux_resume_one_lwp. */
+ if (errno == ESRCH)
+ return;
+
+ if ((*the_low_target.cannot_store_register) (regno) == 0)
+ error ("writing register %d: %s", regno, strerror (errno));
+ }
+ regaddr += sizeof (PTRACE_XFER_TYPE);
+ }
+}
+
+/* Fetch all registers, or just one, from the child process. */
+static void
+usr_fetch_inferior_registers (struct regcache *regcache, int regno)
+{
+ if (regno == -1)
+ for (regno = 0; regno < the_low_target.num_regs; regno++)
+ fetch_register (regcache, regno);
+ else
+ fetch_register (regcache, regno);
+}
+
+/* Store our register values back into the inferior.
+ If REGNO is -1, do this for all registers.
+ Otherwise, REGNO specifies which register (so we can save time). */
+static void
+usr_store_inferior_registers (struct regcache *regcache, int regno)
+{
+ if (regno == -1)
+ for (regno = 0; regno < the_low_target.num_regs; regno++)
+ store_register (regcache, regno);
+ else
+ store_register (regcache, regno);
+}
+#endif /* HAVE_LINUX_USRREGS */
+
+
+
+#ifdef HAVE_LINUX_REGSETS
+
+static int
+regsets_fetch_inferior_registers (struct regcache *regcache)
+{
+ struct regset_info *regset;
+ int saw_general_regs = 0;
+ int pid;
+ struct iovec iov;
+
+ regset = target_regsets;
+
+ pid = lwpid_of (get_thread_lwp (current_inferior));
+ while (regset->size >= 0)
+ {
+ void *buf, *data;
+ int nt_type, res;
+
+ if (regset->size == 0 || disabled_regsets[regset - target_regsets])
+ {
+ regset ++;
+ continue;
+ }
+
+ buf = xmalloc (regset->size);
+
+ nt_type = regset->nt_type;
+ if (nt_type)
+ {
+ iov.iov_base = buf;
+ iov.iov_len = regset->size;
+ data = (void *) &iov;
+ }
+ else
+ data = buf;
+
+#ifndef __sparc__
+ res = ptrace (regset->get_request, pid, nt_type, data);
+#else
+ res = ptrace (regset->get_request, pid, data, nt_type);
+#endif
+ if (res < 0)
+ {
+ if (errno == EIO)
+ {
+ /* If we get EIO on a regset, do not try it again for
+ this process. */
+ disabled_regsets[regset - target_regsets] = 1;
+ free (buf);
+ continue;
+ }
+ else
+ {
+ char s[256];
+ sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
+ pid);
+ perror (s);
+ }
+ }
+ else if (regset->type == GENERAL_REGS)
+ saw_general_regs = 1;
+ regset->store_function (regcache, buf);
+ regset ++;
+ free (buf);
+ }
+ if (saw_general_regs)
+ return 0;
+ else
+ return 1;
+}
+
+static int
+regsets_store_inferior_registers (struct regcache *regcache)
+{
+ struct regset_info *regset;
+ int saw_general_regs = 0;
+ int pid;
+ struct iovec iov;
+
+ regset = target_regsets;
+
+ pid = lwpid_of (get_thread_lwp (current_inferior));
+ while (regset->size >= 0)
+ {
+ void *buf, *data;
+ int nt_type, res;
+
+ if (regset->size == 0 || disabled_regsets[regset - target_regsets])
+ {