gdb: fix handling of vfork by multi-threaded program (follow-fork-mode=parent, detach...
[deliverable/binutils-gdb.git] / gdb / infrun.c
CommitLineData
ca557f44
AC
1/* Target-struct-independent code to start (run) and stop an inferior
2 process.
8926118c 3
88b9d363 4 Copyright (C) 1986-2022 Free Software Foundation, Inc.
c906108c 5
c5aa993b 6 This file is part of GDB.
c906108c 7
c5aa993b
JM
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
c5aa993b 11 (at your option) any later version.
c906108c 12
c5aa993b
JM
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
c906108c 17
c5aa993b 18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
c906108c
SS
20
21#include "defs.h"
bab37966 22#include "displaced-stepping.h"
45741a9c 23#include "infrun.h"
c906108c
SS
24#include <ctype.h>
25#include "symtab.h"
26#include "frame.h"
27#include "inferior.h"
28#include "breakpoint.h"
c906108c
SS
29#include "gdbcore.h"
30#include "gdbcmd.h"
31#include "target.h"
2f4fcf00 32#include "target-connection.h"
c906108c
SS
33#include "gdbthread.h"
34#include "annotate.h"
1adeb98a 35#include "symfile.h"
7a292a7a 36#include "top.h"
2acceee2 37#include "inf-loop.h"
4e052eda 38#include "regcache.h"
fd0407d6 39#include "value.h"
76727919 40#include "observable.h"
f636b87d 41#include "language.h"
a77053c2 42#include "solib.h"
f17517ea 43#include "main.h"
186c406b 44#include "block.h"
034dad6f 45#include "mi/mi-common.h"
4f8d22e3 46#include "event-top.h"
96429cc8 47#include "record.h"
d02ed0bb 48#include "record-full.h"
edb3359d 49#include "inline-frame.h"
4efc6507 50#include "jit.h"
06cd862c 51#include "tracepoint.h"
1bfeeb0f 52#include "skip.h"
28106bc2
SDJ
53#include "probe.h"
54#include "objfiles.h"
de0bea00 55#include "completer.h"
9107fc8d 56#include "target-descriptions.h"
f15cb84a 57#include "target-dcache.h"
d83ad864 58#include "terminal.h"
ff862be4 59#include "solist.h"
400b5eca 60#include "gdbsupport/event-loop.h"
243a9253 61#include "thread-fsm.h"
268a13a5 62#include "gdbsupport/enum-flags.h"
5ed8105e 63#include "progspace-and-thread.h"
268a13a5 64#include "gdbsupport/gdb_optional.h"
46a62268 65#include "arch-utils.h"
268a13a5
TT
66#include "gdbsupport/scope-exit.h"
67#include "gdbsupport/forward-scope-exit.h"
06cc9596 68#include "gdbsupport/gdb_select.h"
5b6d1e4f 69#include <unordered_map>
93b54c8e 70#include "async-event.h"
b161a60d
SM
71#include "gdbsupport/selftest.h"
72#include "scoped-mock-context.h"
73#include "test-target.h"
ba988419 74#include "gdbsupport/common-debug.h"
c906108c
SS
75
76/* Prototypes for local functions */
77
2ea28649 78static void sig_print_info (enum gdb_signal);
c906108c 79
96baa820 80static void sig_print_header (void);
c906108c 81
d83ad864
DB
82static void follow_inferior_reset_breakpoints (void);
83
c4464ade 84static bool currently_stepping (struct thread_info *tp);
a289b8f6 85
2c03e5be 86static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
2484c66b
UW
87
88static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
89
2484c66b
UW
90static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
91
c4464ade 92static bool maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
8550d3b3 93
aff4e175
AB
94static void resume (gdb_signal sig);
95
5b6d1e4f
PA
96static void wait_for_inferior (inferior *inf);
97
81d92403
SM
98static void restart_threads (struct thread_info *event_thread,
99 inferior *inf = nullptr);
100
101static bool start_step_over (void);
102
372316f1
PA
103/* Asynchronous signal handler registered as event loop source for
104 when we have pending events ready to be passed to the core. */
105static struct async_event_handler *infrun_async_inferior_event_token;
106
107/* Stores whether infrun_async was previously enabled or disabled.
108 Starts off as -1, indicating "never enabled/disabled". */
109static int infrun_is_async = -1;
110
111/* See infrun.h. */
112
113void
114infrun_async (int enable)
115{
116 if (infrun_is_async != enable)
117 {
118 infrun_is_async = enable;
119
1eb8556f 120 infrun_debug_printf ("enable=%d", enable);
372316f1
PA
121
122 if (enable)
123 mark_async_event_handler (infrun_async_inferior_event_token);
124 else
125 clear_async_event_handler (infrun_async_inferior_event_token);
126 }
127}
128
0b333c5e
PA
129/* See infrun.h. */
130
131void
132mark_infrun_async_event_handler (void)
133{
134 mark_async_event_handler (infrun_async_inferior_event_token);
135}
136
5fbbeb29
CF
137/* When set, stop the 'step' command if we enter a function which has
138 no line number information. The normal behavior is that we step
139 over such function. */
491144b5 140bool step_stop_if_no_debug = false;
920d2a44
AC
141static void
142show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
143 struct cmd_list_element *c, const char *value)
144{
145 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
146}
5fbbeb29 147
b9f437de
PA
148/* proceed and normal_stop use this to notify the user when the
149 inferior stopped in a different thread than it had been running
150 in. */
96baa820 151
39f77062 152static ptid_t previous_inferior_ptid;
7a292a7a 153
07107ca6
LM
154/* If set (default for legacy reasons), when following a fork, GDB
155 will detach from one of the fork branches, child or parent.
156 Exactly which branch is detached depends on 'set follow-fork-mode'
157 setting. */
158
491144b5 159static bool detach_fork = true;
6c95b8df 160
94ba44a6 161bool debug_infrun = false;
920d2a44
AC
162static void
163show_debug_infrun (struct ui_file *file, int from_tty,
164 struct cmd_list_element *c, const char *value)
165{
166 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
167}
527159b7 168
03583c20
UW
169/* Support for disabling address space randomization. */
170
491144b5 171bool disable_randomization = true;
03583c20
UW
172
173static void
174show_disable_randomization (struct ui_file *file, int from_tty,
175 struct cmd_list_element *c, const char *value)
176{
177 if (target_supports_disable_randomization ())
178 fprintf_filtered (file,
179 _("Disabling randomization of debuggee's "
180 "virtual address space is %s.\n"),
181 value);
182 else
183 fputs_filtered (_("Disabling randomization of debuggee's "
184 "virtual address space is unsupported on\n"
185 "this platform.\n"), file);
186}
187
188static void
eb4c3f4a 189set_disable_randomization (const char *args, int from_tty,
03583c20
UW
190 struct cmd_list_element *c)
191{
192 if (!target_supports_disable_randomization ())
193 error (_("Disabling randomization of debuggee's "
194 "virtual address space is unsupported on\n"
195 "this platform."));
196}
197
d32dc48e
PA
198/* User interface for non-stop mode. */
199
491144b5
CB
200bool non_stop = false;
201static bool non_stop_1 = false;
d32dc48e
PA
202
203static void
eb4c3f4a 204set_non_stop (const char *args, int from_tty,
d32dc48e
PA
205 struct cmd_list_element *c)
206{
55f6301a 207 if (target_has_execution ())
d32dc48e
PA
208 {
209 non_stop_1 = non_stop;
210 error (_("Cannot change this setting while the inferior is running."));
211 }
212
213 non_stop = non_stop_1;
214}
215
216static void
217show_non_stop (struct ui_file *file, int from_tty,
218 struct cmd_list_element *c, const char *value)
219{
220 fprintf_filtered (file,
221 _("Controlling the inferior in non-stop mode is %s.\n"),
222 value);
223}
224
d914c394
SS
225/* "Observer mode" is somewhat like a more extreme version of
226 non-stop, in which all GDB operations that might affect the
227 target's execution have been disabled. */
228
6bd434d6 229static bool observer_mode = false;
491144b5 230static bool observer_mode_1 = false;
d914c394
SS
231
232static void
eb4c3f4a 233set_observer_mode (const char *args, int from_tty,
d914c394
SS
234 struct cmd_list_element *c)
235{
55f6301a 236 if (target_has_execution ())
d914c394
SS
237 {
238 observer_mode_1 = observer_mode;
239 error (_("Cannot change this setting while the inferior is running."));
240 }
241
242 observer_mode = observer_mode_1;
243
244 may_write_registers = !observer_mode;
245 may_write_memory = !observer_mode;
246 may_insert_breakpoints = !observer_mode;
247 may_insert_tracepoints = !observer_mode;
248 /* We can insert fast tracepoints in or out of observer mode,
249 but enable them if we're going into this mode. */
250 if (observer_mode)
491144b5 251 may_insert_fast_tracepoints = true;
d914c394
SS
252 may_stop = !observer_mode;
253 update_target_permissions ();
254
255 /* Going *into* observer mode we must force non-stop, then
256 going out we leave it that way. */
257 if (observer_mode)
258 {
d914c394 259 pagination_enabled = 0;
491144b5 260 non_stop = non_stop_1 = true;
d914c394
SS
261 }
262
263 if (from_tty)
264 printf_filtered (_("Observer mode is now %s.\n"),
265 (observer_mode ? "on" : "off"));
266}
267
268static void
269show_observer_mode (struct ui_file *file, int from_tty,
270 struct cmd_list_element *c, const char *value)
271{
272 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
273}
274
275/* This updates the value of observer mode based on changes in
276 permissions. Note that we are deliberately ignoring the values of
277 may-write-registers and may-write-memory, since the user may have
278 reason to enable these during a session, for instance to turn on a
279 debugging-related global. */
280
281void
282update_observer_mode (void)
283{
491144b5
CB
284 bool newval = (!may_insert_breakpoints
285 && !may_insert_tracepoints
286 && may_insert_fast_tracepoints
287 && !may_stop
288 && non_stop);
d914c394
SS
289
290 /* Let the user know if things change. */
291 if (newval != observer_mode)
292 printf_filtered (_("Observer mode is now %s.\n"),
293 (newval ? "on" : "off"));
294
295 observer_mode = observer_mode_1 = newval;
296}
c2c6d25f 297
c906108c
SS
298/* Tables of how to react to signals; the user sets them. */
299
adc6a863
PA
300static unsigned char signal_stop[GDB_SIGNAL_LAST];
301static unsigned char signal_print[GDB_SIGNAL_LAST];
302static unsigned char signal_program[GDB_SIGNAL_LAST];
c906108c 303
ab04a2af
TT
304/* Table of signals that are registered with "catch signal". A
305 non-zero entry indicates that the signal is caught by some "catch
adc6a863
PA
306 signal" command. */
307static unsigned char signal_catch[GDB_SIGNAL_LAST];
ab04a2af 308
2455069d
UW
309/* Table of signals that the target may silently handle.
310 This is automatically determined from the flags above,
311 and simply cached here. */
adc6a863 312static unsigned char signal_pass[GDB_SIGNAL_LAST];
2455069d 313
c906108c
SS
314#define SET_SIGS(nsigs,sigs,flags) \
315 do { \
316 int signum = (nsigs); \
317 while (signum-- > 0) \
318 if ((sigs)[signum]) \
319 (flags)[signum] = 1; \
320 } while (0)
321
322#define UNSET_SIGS(nsigs,sigs,flags) \
323 do { \
324 int signum = (nsigs); \
325 while (signum-- > 0) \
326 if ((sigs)[signum]) \
327 (flags)[signum] = 0; \
328 } while (0)
329
9b224c5e
PA
330/* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
331 this function is to avoid exporting `signal_program'. */
332
333void
334update_signals_program_target (void)
335{
adc6a863 336 target_program_signals (signal_program);
9b224c5e
PA
337}
338
1777feb0 339/* Value to pass to target_resume() to cause all threads to resume. */
39f77062 340
edb3359d 341#define RESUME_ALL minus_one_ptid
c906108c
SS
342
343/* Command list pointer for the "stop" placeholder. */
344
345static struct cmd_list_element *stop_command;
346
c906108c
SS
347/* Nonzero if we want to give control to the user when we're notified
348 of shared library events by the dynamic linker. */
628fe4e4 349int stop_on_solib_events;
f9e14852
GB
350
351/* Enable or disable optional shared library event breakpoints
352 as appropriate when the above flag is changed. */
353
354static void
eb4c3f4a
TT
355set_stop_on_solib_events (const char *args,
356 int from_tty, struct cmd_list_element *c)
f9e14852
GB
357{
358 update_solib_breakpoints ();
359}
360
920d2a44
AC
361static void
362show_stop_on_solib_events (struct ui_file *file, int from_tty,
363 struct cmd_list_element *c, const char *value)
364{
365 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
366 value);
367}
c906108c 368
c4464ade 369/* True after stop if current stack frame should be printed. */
c906108c 370
c4464ade 371static bool stop_print_frame;
c906108c 372
5b6d1e4f
PA
373/* This is a cached copy of the target/ptid/waitstatus of the last
374 event returned by target_wait()/deprecated_target_wait_hook().
375 This information is returned by get_last_target_status(). */
376static process_stratum_target *target_last_proc_target;
39f77062 377static ptid_t target_last_wait_ptid;
e02bc4cc
DS
378static struct target_waitstatus target_last_waitstatus;
379
4e1c45ea 380void init_thread_stepping_state (struct thread_info *tss);
0d1e5fa7 381
53904c9e
AC
382static const char follow_fork_mode_child[] = "child";
383static const char follow_fork_mode_parent[] = "parent";
384
40478521 385static const char *const follow_fork_mode_kind_names[] = {
53904c9e
AC
386 follow_fork_mode_child,
387 follow_fork_mode_parent,
388 NULL
ef346e04 389};
c906108c 390
53904c9e 391static const char *follow_fork_mode_string = follow_fork_mode_parent;
920d2a44
AC
392static void
393show_follow_fork_mode_string (struct ui_file *file, int from_tty,
394 struct cmd_list_element *c, const char *value)
395{
3e43a32a
MS
396 fprintf_filtered (file,
397 _("Debugger response to a program "
398 "call of fork or vfork is \"%s\".\n"),
920d2a44
AC
399 value);
400}
c906108c
SS
401\f
402
d83ad864
DB
403/* Handle changes to the inferior list based on the type of fork,
404 which process is being followed, and whether the other process
405 should be detached. On entry inferior_ptid must be the ptid of
406 the fork parent. At return inferior_ptid is the ptid of the
407 followed inferior. */
408
5ab2fbf1
SM
409static bool
410follow_fork_inferior (bool follow_child, bool detach_fork)
d83ad864
DB
411{
412 int has_vforked;
79639e11 413 ptid_t parent_ptid, child_ptid;
d83ad864
DB
414
415 has_vforked = (inferior_thread ()->pending_follow.kind
416 == TARGET_WAITKIND_VFORKED);
79639e11
PA
417 parent_ptid = inferior_ptid;
418 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
d83ad864
DB
419
420 if (has_vforked
421 && !non_stop /* Non-stop always resumes both branches. */
3b12939d 422 && current_ui->prompt_state == PROMPT_BLOCKED
d83ad864
DB
423 && !(follow_child || detach_fork || sched_multi))
424 {
425 /* The parent stays blocked inside the vfork syscall until the
426 child execs or exits. If we don't let the child run, then
427 the parent stays blocked. If we're telling the parent to run
428 in the foreground, the user will not be able to ctrl-c to get
429 back the terminal, effectively hanging the debug session. */
430 fprintf_filtered (gdb_stderr, _("\
431Can not resume the parent process over vfork in the foreground while\n\
432holding the child stopped. Try \"set detach-on-fork\" or \
433\"set schedule-multiple\".\n"));
e97007b6 434 return true;
d83ad864
DB
435 }
436
81d92403
SM
437 inferior *parent_inf = current_inferior ();
438 gdb_assert (parent_inf->thread_waiting_for_vfork_done == nullptr);
439
d83ad864
DB
440 if (!follow_child)
441 {
442 /* Detach new forked process? */
443 if (detach_fork)
444 {
d83ad864
DB
445 /* Before detaching from the child, remove all breakpoints
446 from it. If we forked, then this has already been taken
447 care of by infrun.c. If we vforked however, any
448 breakpoint inserted in the parent is visible in the
449 child, even those added while stopped in a vfork
450 catchpoint. This will remove the breakpoints from the
451 parent also, but they'll be reinserted below. */
452 if (has_vforked)
453 {
454 /* Keep breakpoints list in sync. */
00431a78 455 remove_breakpoints_inf (current_inferior ());
d83ad864
DB
456 }
457
f67c0c91 458 if (print_inferior_events)
d83ad864 459 {
8dd06f7a 460 /* Ensure that we have a process ptid. */
e99b03dc 461 ptid_t process_ptid = ptid_t (child_ptid.pid ());
8dd06f7a 462
223ffa71 463 target_terminal::ours_for_output ();
d83ad864 464 fprintf_filtered (gdb_stdlog,
f67c0c91 465 _("[Detaching after %s from child %s]\n"),
6f259a23 466 has_vforked ? "vfork" : "fork",
a068643d 467 target_pid_to_str (process_ptid).c_str ());
d83ad864
DB
468 }
469 }
470 else
471 {
472 struct inferior *parent_inf, *child_inf;
d83ad864
DB
473
474 /* Add process to GDB's tables. */
e99b03dc 475 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
476
477 parent_inf = current_inferior ();
478 child_inf->attach_flag = parent_inf->attach_flag;
479 copy_terminal_info (child_inf, parent_inf);
480 child_inf->gdbarch = parent_inf->gdbarch;
481 copy_inferior_target_desc_info (child_inf, parent_inf);
482
5ed8105e 483 scoped_restore_current_pspace_and_thread restore_pspace_thread;
d83ad864 484
2a00d7ce 485 set_current_inferior (child_inf);
5b6d1e4f 486 switch_to_no_thread ();
d83ad864 487 child_inf->symfile_flags = SYMFILE_NO_READ;
02980c56 488 child_inf->push_target (parent_inf->process_target ());
18493a00
PA
489 thread_info *child_thr
490 = add_thread_silent (child_inf->process_target (), child_ptid);
d83ad864
DB
491
492 /* If this is a vfork child, then the address-space is
493 shared with the parent. */
494 if (has_vforked)
495 {
496 child_inf->pspace = parent_inf->pspace;
497 child_inf->aspace = parent_inf->aspace;
498
5b6d1e4f
PA
499 exec_on_vfork ();
500
d83ad864
DB
501 /* The parent will be frozen until the child is done
502 with the shared region. Keep track of the
503 parent. */
504 child_inf->vfork_parent = parent_inf;
505 child_inf->pending_detach = 0;
506 parent_inf->vfork_child = child_inf;
507 parent_inf->pending_detach = 0;
18493a00
PA
508
509 /* Now that the inferiors and program spaces are all
510 wired up, we can switch to the child thread (which
511 switches inferior and program space too). */
512 switch_to_thread (child_thr);
d83ad864
DB
513 }
514 else
515 {
516 child_inf->aspace = new_address_space ();
564b1e3f 517 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
518 child_inf->removable = 1;
519 set_current_program_space (child_inf->pspace);
520 clone_program_space (child_inf->pspace, parent_inf->pspace);
521
18493a00
PA
522 /* solib_create_inferior_hook relies on the current
523 thread. */
524 switch_to_thread (child_thr);
525
d83ad864
DB
526 /* Let the shared library layer (e.g., solib-svr4) learn
527 about this new process, relocate the cloned exec, pull
528 in shared libraries, and install the solib event
529 breakpoint. If a "cloned-VM" event was propagated
530 better throughout the core, this wouldn't be
531 required. */
122373f7
SM
532 scoped_restore restore_in_initial_library_scan
533 = make_scoped_restore (&child_inf->in_initial_library_scan,
534 true);
d83ad864
DB
535 solib_create_inferior_hook (0);
536 }
d83ad864
DB
537 }
538
539 if (has_vforked)
540 {
541 struct inferior *parent_inf;
542
543 parent_inf = current_inferior ();
544
545 /* If we detached from the child, then we have to be careful
546 to not insert breakpoints in the parent until the child
547 is done with the shared memory region. However, if we're
548 staying attached to the child, then we can and should
549 insert breakpoints, so that we can debug it. A
550 subsequent child exec or exit is enough to know when does
551 the child stops using the parent's address space. */
060f2ef8
SM
552 parent_inf->thread_waiting_for_vfork_done
553 = detach_fork ? inferior_thread () : nullptr;
d83ad864
DB
554 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
555 }
556 }
557 else
558 {
559 /* Follow the child. */
560 struct inferior *parent_inf, *child_inf;
561 struct program_space *parent_pspace;
562
f67c0c91 563 if (print_inferior_events)
d83ad864 564 {
f67c0c91
SDJ
565 std::string parent_pid = target_pid_to_str (parent_ptid);
566 std::string child_pid = target_pid_to_str (child_ptid);
567
223ffa71 568 target_terminal::ours_for_output ();
6f259a23 569 fprintf_filtered (gdb_stdlog,
f67c0c91
SDJ
570 _("[Attaching after %s %s to child %s]\n"),
571 parent_pid.c_str (),
6f259a23 572 has_vforked ? "vfork" : "fork",
f67c0c91 573 child_pid.c_str ());
d83ad864
DB
574 }
575
576 /* Add the new inferior first, so that the target_detach below
577 doesn't unpush the target. */
578
e99b03dc 579 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
580
581 parent_inf = current_inferior ();
582 child_inf->attach_flag = parent_inf->attach_flag;
583 copy_terminal_info (child_inf, parent_inf);
584 child_inf->gdbarch = parent_inf->gdbarch;
585 copy_inferior_target_desc_info (child_inf, parent_inf);
586
587 parent_pspace = parent_inf->pspace;
588
5b6d1e4f 589 process_stratum_target *target = parent_inf->process_target ();
d83ad864 590
5b6d1e4f
PA
591 {
592 /* Hold a strong reference to the target while (maybe)
593 detaching the parent. Otherwise detaching could close the
594 target. */
595 auto target_ref = target_ops_ref::new_reference (target);
596
597 /* If we're vforking, we want to hold on to the parent until
598 the child exits or execs. At child exec or exit time we
599 can remove the old breakpoints from the parent and detach
600 or resume debugging it. Otherwise, detach the parent now;
601 we'll want to reuse it's program/address spaces, but we
602 can't set them to the child before removing breakpoints
603 from the parent, otherwise, the breakpoints module could
604 decide to remove breakpoints from the wrong process (since
605 they'd be assigned to the same address space). */
606
607 if (has_vforked)
608 {
609 gdb_assert (child_inf->vfork_parent == NULL);
610 gdb_assert (parent_inf->vfork_child == NULL);
611 child_inf->vfork_parent = parent_inf;
612 child_inf->pending_detach = 0;
613 parent_inf->vfork_child = child_inf;
614 parent_inf->pending_detach = detach_fork;
5b6d1e4f
PA
615 }
616 else if (detach_fork)
617 {
618 if (print_inferior_events)
619 {
620 /* Ensure that we have a process ptid. */
621 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
622
623 target_terminal::ours_for_output ();
624 fprintf_filtered (gdb_stdlog,
625 _("[Detaching after fork from "
626 "parent %s]\n"),
627 target_pid_to_str (process_ptid).c_str ());
628 }
8dd06f7a 629
5b6d1e4f
PA
630 target_detach (parent_inf, 0);
631 parent_inf = NULL;
632 }
6f259a23 633
5b6d1e4f 634 /* Note that the detach above makes PARENT_INF dangling. */
d83ad864 635
5b6d1e4f
PA
636 /* Add the child thread to the appropriate lists, and switch
637 to this new thread, before cloning the program space, and
638 informing the solib layer about this new process. */
d83ad864 639
5b6d1e4f 640 set_current_inferior (child_inf);
02980c56 641 child_inf->push_target (target);
5b6d1e4f 642 }
d83ad864 643
18493a00 644 thread_info *child_thr = add_thread_silent (target, child_ptid);
d83ad864
DB
645
646 /* If this is a vfork child, then the address-space is shared
647 with the parent. If we detached from the parent, then we can
648 reuse the parent's program/address spaces. */
649 if (has_vforked || detach_fork)
650 {
651 child_inf->pspace = parent_pspace;
652 child_inf->aspace = child_inf->pspace->aspace;
5b6d1e4f
PA
653
654 exec_on_vfork ();
d83ad864
DB
655 }
656 else
657 {
658 child_inf->aspace = new_address_space ();
564b1e3f 659 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
660 child_inf->removable = 1;
661 child_inf->symfile_flags = SYMFILE_NO_READ;
662 set_current_program_space (child_inf->pspace);
663 clone_program_space (child_inf->pspace, parent_pspace);
664
665 /* Let the shared library layer (e.g., solib-svr4) learn
666 about this new process, relocate the cloned exec, pull in
667 shared libraries, and install the solib event breakpoint.
668 If a "cloned-VM" event was propagated better throughout
669 the core, this wouldn't be required. */
122373f7
SM
670 scoped_restore restore_in_initial_library_scan
671 = make_scoped_restore (&child_inf->in_initial_library_scan, true);
d83ad864
DB
672 solib_create_inferior_hook (0);
673 }
18493a00
PA
674
675 switch_to_thread (child_thr);
d83ad864
DB
676 }
677
e97007b6
SM
678 target_follow_fork (follow_child, detach_fork);
679
680 return false;
d83ad864
DB
681}
682
e58b0e63
PA
683/* Tell the target to follow the fork we're stopped at. Returns true
684 if the inferior should be resumed; false, if the target for some
685 reason decided it's best not to resume. */
686
5ab2fbf1
SM
687static bool
688follow_fork ()
c906108c 689{
5ab2fbf1
SM
690 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
691 bool should_resume = true;
e58b0e63
PA
692 struct thread_info *tp;
693
694 /* Copy user stepping state to the new inferior thread. FIXME: the
695 followed fork child thread should have a copy of most of the
4e3990f4
DE
696 parent thread structure's run control related fields, not just these.
697 Initialized to avoid "may be used uninitialized" warnings from gcc. */
698 struct breakpoint *step_resume_breakpoint = NULL;
186c406b 699 struct breakpoint *exception_resume_breakpoint = NULL;
4e3990f4
DE
700 CORE_ADDR step_range_start = 0;
701 CORE_ADDR step_range_end = 0;
bf4cb9be
TV
702 int current_line = 0;
703 symtab *current_symtab = NULL;
4e3990f4 704 struct frame_id step_frame_id = { 0 };
8980e177 705 struct thread_fsm *thread_fsm = NULL;
e58b0e63
PA
706
707 if (!non_stop)
708 {
5b6d1e4f 709 process_stratum_target *wait_target;
e58b0e63
PA
710 ptid_t wait_ptid;
711 struct target_waitstatus wait_status;
712
713 /* Get the last target status returned by target_wait(). */
5b6d1e4f 714 get_last_target_status (&wait_target, &wait_ptid, &wait_status);
e58b0e63
PA
715
716 /* If not stopped at a fork event, then there's nothing else to
717 do. */
718 if (wait_status.kind != TARGET_WAITKIND_FORKED
719 && wait_status.kind != TARGET_WAITKIND_VFORKED)
720 return 1;
721
722 /* Check if we switched over from WAIT_PTID, since the event was
723 reported. */
00431a78 724 if (wait_ptid != minus_one_ptid
5b6d1e4f
PA
725 && (current_inferior ()->process_target () != wait_target
726 || inferior_ptid != wait_ptid))
e58b0e63
PA
727 {
728 /* We did. Switch back to WAIT_PTID thread, to tell the
729 target to follow it (in either direction). We'll
730 afterwards refuse to resume, and inform the user what
731 happened. */
5b6d1e4f 732 thread_info *wait_thread = find_thread_ptid (wait_target, wait_ptid);
00431a78 733 switch_to_thread (wait_thread);
5ab2fbf1 734 should_resume = false;
e58b0e63
PA
735 }
736 }
737
738 tp = inferior_thread ();
739
740 /* If there were any forks/vforks that were caught and are now to be
741 followed, then do so now. */
742 switch (tp->pending_follow.kind)
743 {
744 case TARGET_WAITKIND_FORKED:
745 case TARGET_WAITKIND_VFORKED:
746 {
747 ptid_t parent, child;
748
749 /* If the user did a next/step, etc, over a fork call,
750 preserve the stepping state in the fork child. */
751 if (follow_child && should_resume)
752 {
8358c15c
JK
753 step_resume_breakpoint = clone_momentary_breakpoint
754 (tp->control.step_resume_breakpoint);
16c381f0
JK
755 step_range_start = tp->control.step_range_start;
756 step_range_end = tp->control.step_range_end;
bf4cb9be
TV
757 current_line = tp->current_line;
758 current_symtab = tp->current_symtab;
16c381f0 759 step_frame_id = tp->control.step_frame_id;
186c406b
TT
760 exception_resume_breakpoint
761 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
8980e177 762 thread_fsm = tp->thread_fsm;
e58b0e63
PA
763
764 /* For now, delete the parent's sr breakpoint, otherwise,
765 parent/child sr breakpoints are considered duplicates,
766 and the child version will not be installed. Remove
767 this when the breakpoints module becomes aware of
768 inferiors and address spaces. */
769 delete_step_resume_breakpoint (tp);
16c381f0
JK
770 tp->control.step_range_start = 0;
771 tp->control.step_range_end = 0;
772 tp->control.step_frame_id = null_frame_id;
186c406b 773 delete_exception_resume_breakpoint (tp);
8980e177 774 tp->thread_fsm = NULL;
e58b0e63
PA
775 }
776
777 parent = inferior_ptid;
778 child = tp->pending_follow.value.related_pid;
779
81d92403
SM
780 /* If handling a vfork, stop all the inferior's threads, they will be
781 restarted when the vfork shared region is complete. */
782 if (tp->pending_follow.kind == TARGET_WAITKIND_VFORKED
783 && target_is_non_stop_p ())
784 stop_all_threads ("handling vfork", tp->inf);
785
5b6d1e4f 786 process_stratum_target *parent_targ = tp->inf->process_target ();
d83ad864
DB
787 /* Set up inferior(s) as specified by the caller, and tell the
788 target to do whatever is necessary to follow either parent
789 or child. */
790 if (follow_fork_inferior (follow_child, detach_fork))
e58b0e63
PA
791 {
792 /* Target refused to follow, or there's some other reason
793 we shouldn't resume. */
794 should_resume = 0;
795 }
796 else
797 {
798 /* This pending follow fork event is now handled, one way
799 or another. The previous selected thread may be gone
800 from the lists by now, but if it is still around, need
801 to clear the pending follow request. */
5b6d1e4f 802 tp = find_thread_ptid (parent_targ, parent);
e58b0e63
PA
803 if (tp)
804 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
805
806 /* This makes sure we don't try to apply the "Switched
807 over from WAIT_PID" logic above. */
808 nullify_last_target_wait_ptid ();
809
1777feb0 810 /* If we followed the child, switch to it... */
e58b0e63
PA
811 if (follow_child)
812 {
5b6d1e4f 813 thread_info *child_thr = find_thread_ptid (parent_targ, child);
00431a78 814 switch_to_thread (child_thr);
e58b0e63
PA
815
816 /* ... and preserve the stepping state, in case the
817 user was stepping over the fork call. */
818 if (should_resume)
819 {
820 tp = inferior_thread ();
8358c15c
JK
821 tp->control.step_resume_breakpoint
822 = step_resume_breakpoint;
16c381f0
JK
823 tp->control.step_range_start = step_range_start;
824 tp->control.step_range_end = step_range_end;
bf4cb9be
TV
825 tp->current_line = current_line;
826 tp->current_symtab = current_symtab;
16c381f0 827 tp->control.step_frame_id = step_frame_id;
186c406b
TT
828 tp->control.exception_resume_breakpoint
829 = exception_resume_breakpoint;
8980e177 830 tp->thread_fsm = thread_fsm;
e58b0e63
PA
831 }
832 else
833 {
834 /* If we get here, it was because we're trying to
835 resume from a fork catchpoint, but, the user
836 has switched threads away from the thread that
837 forked. In that case, the resume command
838 issued is most likely not applicable to the
839 child, so just warn, and refuse to resume. */
3e43a32a 840 warning (_("Not resuming: switched threads "
fd7dcb94 841 "before following fork child."));
e58b0e63
PA
842 }
843
844 /* Reset breakpoints in the child as appropriate. */
845 follow_inferior_reset_breakpoints ();
846 }
e58b0e63
PA
847 }
848 }
849 break;
850 case TARGET_WAITKIND_SPURIOUS:
851 /* Nothing to follow. */
852 break;
853 default:
854 internal_error (__FILE__, __LINE__,
855 "Unexpected pending_follow.kind %d\n",
856 tp->pending_follow.kind);
857 break;
858 }
c906108c 859
e58b0e63 860 return should_resume;
c906108c
SS
861}
862
d83ad864 863static void
6604731b 864follow_inferior_reset_breakpoints (void)
c906108c 865{
4e1c45ea
PA
866 struct thread_info *tp = inferior_thread ();
867
6604731b
DJ
868 /* Was there a step_resume breakpoint? (There was if the user
869 did a "next" at the fork() call.) If so, explicitly reset its
a1aa2221
LM
870 thread number. Cloned step_resume breakpoints are disabled on
871 creation, so enable it here now that it is associated with the
872 correct thread.
6604731b
DJ
873
874 step_resumes are a form of bp that are made to be per-thread.
875 Since we created the step_resume bp when the parent process
876 was being debugged, and now are switching to the child process,
877 from the breakpoint package's viewpoint, that's a switch of
878 "threads". We must update the bp's notion of which thread
879 it is for, or it'll be ignored when it triggers. */
880
8358c15c 881 if (tp->control.step_resume_breakpoint)
a1aa2221
LM
882 {
883 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
884 tp->control.step_resume_breakpoint->loc->enabled = 1;
885 }
6604731b 886
a1aa2221 887 /* Treat exception_resume breakpoints like step_resume breakpoints. */
186c406b 888 if (tp->control.exception_resume_breakpoint)
a1aa2221
LM
889 {
890 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
891 tp->control.exception_resume_breakpoint->loc->enabled = 1;
892 }
186c406b 893
6604731b
DJ
894 /* Reinsert all breakpoints in the child. The user may have set
895 breakpoints after catching the fork, in which case those
896 were never set in the child, but only in the parent. This makes
897 sure the inserted breakpoints match the breakpoint list. */
898
899 breakpoint_re_set ();
900 insert_breakpoints ();
c906108c 901}
c906108c 902
6c95b8df
PA
903/* The child has exited or execed: resume threads of the parent the
904 user wanted to be executing. */
905
906static int
907proceed_after_vfork_done (struct thread_info *thread,
908 void *arg)
909{
910 int pid = * (int *) arg;
911
00431a78
PA
912 if (thread->ptid.pid () == pid
913 && thread->state == THREAD_RUNNING
914 && !thread->executing
6c95b8df 915 && !thread->stop_requested
a493e3e2 916 && thread->suspend.stop_signal == GDB_SIGNAL_0)
6c95b8df 917 {
1eb8556f
SM
918 infrun_debug_printf ("resuming vfork parent thread %s",
919 target_pid_to_str (thread->ptid).c_str ());
6c95b8df 920
00431a78 921 switch_to_thread (thread);
70509625 922 clear_proceed_status (0);
64ce06e4 923 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
6c95b8df
PA
924 }
925
926 return 0;
927}
928
929/* Called whenever we notice an exec or exit event, to handle
930 detaching or resuming a vfork parent. */
931
932static void
933handle_vfork_child_exec_or_exit (int exec)
934{
935 struct inferior *inf = current_inferior ();
936
937 if (inf->vfork_parent)
938 {
939 int resume_parent = -1;
940
941 /* This exec or exit marks the end of the shared memory region
b73715df
TV
942 between the parent and the child. Break the bonds. */
943 inferior *vfork_parent = inf->vfork_parent;
944 inf->vfork_parent->vfork_child = NULL;
945 inf->vfork_parent = NULL;
6c95b8df 946
b73715df
TV
947 /* If the user wanted to detach from the parent, now is the
948 time. */
949 if (vfork_parent->pending_detach)
6c95b8df 950 {
6c95b8df
PA
951 struct program_space *pspace;
952 struct address_space *aspace;
953
1777feb0 954 /* follow-fork child, detach-on-fork on. */
6c95b8df 955
b73715df 956 vfork_parent->pending_detach = 0;
68c9da30 957
18493a00 958 scoped_restore_current_pspace_and_thread restore_thread;
6c95b8df
PA
959
960 /* We're letting loose of the parent. */
18493a00 961 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
00431a78 962 switch_to_thread (tp);
6c95b8df
PA
963
964 /* We're about to detach from the parent, which implicitly
965 removes breakpoints from its address space. There's a
966 catch here: we want to reuse the spaces for the child,
967 but, parent/child are still sharing the pspace at this
968 point, although the exec in reality makes the kernel give
969 the child a fresh set of new pages. The problem here is
970 that the breakpoints module being unaware of this, would
971 likely chose the child process to write to the parent
972 address space. Swapping the child temporarily away from
973 the spaces has the desired effect. Yes, this is "sort
974 of" a hack. */
975
976 pspace = inf->pspace;
977 aspace = inf->aspace;
978 inf->aspace = NULL;
979 inf->pspace = NULL;
980
f67c0c91 981 if (print_inferior_events)
6c95b8df 982 {
a068643d 983 std::string pidstr
b73715df 984 = target_pid_to_str (ptid_t (vfork_parent->pid));
f67c0c91 985
223ffa71 986 target_terminal::ours_for_output ();
6c95b8df
PA
987
988 if (exec)
6f259a23
DB
989 {
990 fprintf_filtered (gdb_stdlog,
f67c0c91 991 _("[Detaching vfork parent %s "
a068643d 992 "after child exec]\n"), pidstr.c_str ());
6f259a23 993 }
6c95b8df 994 else
6f259a23
DB
995 {
996 fprintf_filtered (gdb_stdlog,
f67c0c91 997 _("[Detaching vfork parent %s "
a068643d 998 "after child exit]\n"), pidstr.c_str ());
6f259a23 999 }
6c95b8df
PA
1000 }
1001
b73715df 1002 target_detach (vfork_parent, 0);
6c95b8df
PA
1003
1004 /* Put it back. */
1005 inf->pspace = pspace;
1006 inf->aspace = aspace;
6c95b8df
PA
1007 }
1008 else if (exec)
1009 {
1010 /* We're staying attached to the parent, so, really give the
1011 child a new address space. */
564b1e3f 1012 inf->pspace = new program_space (maybe_new_address_space ());
6c95b8df
PA
1013 inf->aspace = inf->pspace->aspace;
1014 inf->removable = 1;
1015 set_current_program_space (inf->pspace);
1016
b73715df 1017 resume_parent = vfork_parent->pid;
6c95b8df
PA
1018 }
1019 else
1020 {
6c95b8df
PA
1021 /* If this is a vfork child exiting, then the pspace and
1022 aspaces were shared with the parent. Since we're
1023 reporting the process exit, we'll be mourning all that is
1024 found in the address space, and switching to null_ptid,
1025 preparing to start a new inferior. But, since we don't
1026 want to clobber the parent's address/program spaces, we
1027 go ahead and create a new one for this exiting
1028 inferior. */
1029
18493a00 1030 /* Switch to no-thread while running clone_program_space, so
5ed8105e
PA
1031 that clone_program_space doesn't want to read the
1032 selected frame of a dead process. */
18493a00
PA
1033 scoped_restore_current_thread restore_thread;
1034 switch_to_no_thread ();
6c95b8df 1035
53af73bf
PA
1036 inf->pspace = new program_space (maybe_new_address_space ());
1037 inf->aspace = inf->pspace->aspace;
1038 set_current_program_space (inf->pspace);
6c95b8df 1039 inf->removable = 1;
7dcd53a0 1040 inf->symfile_flags = SYMFILE_NO_READ;
53af73bf 1041 clone_program_space (inf->pspace, vfork_parent->pspace);
6c95b8df 1042
b73715df 1043 resume_parent = vfork_parent->pid;
6c95b8df
PA
1044 }
1045
6c95b8df
PA
1046 gdb_assert (current_program_space == inf->pspace);
1047
1048 if (non_stop && resume_parent != -1)
1049 {
1050 /* If the user wanted the parent to be running, let it go
1051 free now. */
5ed8105e 1052 scoped_restore_current_thread restore_thread;
6c95b8df 1053
1eb8556f
SM
1054 infrun_debug_printf ("resuming vfork parent process %d",
1055 resume_parent);
6c95b8df
PA
1056
1057 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
6c95b8df
PA
1058 }
1059 }
1060}
1061
81d92403
SM
1062/* Handle TARGET_WAITKIND_VFORK_DONE. */
1063
1064static void
1065handle_vfork_done (thread_info *event_thread)
1066{
1067 /* We only care about this event if inferior::thread_waiting_for_vfork_done is
1068 set, that is if we are waiting for a vfork child not under our control
1069 (because we detached it) to exec or exit.
1070
1071 If an inferior has vforked and we are debugging the child, we don't use
1072 the vfork-done event to get notified about the end of the shared address
1073 space window). We rely instead on the child's exec or exit event, and the
1074 inferior::vfork_{parent,child} fields are used instead. See
1075 handle_vfork_child_exec_or_exit for that. */
1076 if (event_thread->inf->thread_waiting_for_vfork_done == nullptr)
1077 {
1078 infrun_debug_printf ("not waiting for a vfork-done event");
1079 return;
1080 }
1081
1082 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1083
1084 /* We stopped all threads (other than the vforking thread) of the inferior in
1085 follow_fork and kept them stopped until now. It should therefore not be
1086 possible for another thread to have reported a vfork during that window.
1087 If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
1088 vfork-done we are handling right now. */
1089 gdb_assert (event_thread->inf->thread_waiting_for_vfork_done == event_thread);
1090
1091 event_thread->inf->thread_waiting_for_vfork_done = nullptr;
1092 event_thread->inf->pspace->breakpoints_not_allowed = 0;
1093
1094 /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
1095 resume them now. On all-stop targets, everything that needs to be resumed
1096 will be when we resume the event thread. */
1097 if (target_is_non_stop_p ())
1098 {
1099 /* restart_threads and start_step_over may change the current thread, make
1100 sure we leave the event thread as the current thread. */
1101 scoped_restore_current_thread restore_thread;
1102
1103 insert_breakpoints ();
1104 restart_threads (event_thread, event_thread->inf);
1105 start_step_over ();
1106 }
1107}
1108
eb6c553b 1109/* Enum strings for "set|show follow-exec-mode". */
6c95b8df
PA
1110
1111static const char follow_exec_mode_new[] = "new";
1112static const char follow_exec_mode_same[] = "same";
40478521 1113static const char *const follow_exec_mode_names[] =
6c95b8df
PA
1114{
1115 follow_exec_mode_new,
1116 follow_exec_mode_same,
1117 NULL,
1118};
1119
1120static const char *follow_exec_mode_string = follow_exec_mode_same;
1121static void
1122show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1123 struct cmd_list_element *c, const char *value)
1124{
1125 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1126}
1127
ecf45d2c 1128/* EXEC_FILE_TARGET is assumed to be non-NULL. */
1adeb98a 1129
c906108c 1130static void
4ca51187 1131follow_exec (ptid_t ptid, const char *exec_file_target)
c906108c 1132{
e99b03dc 1133 int pid = ptid.pid ();
94585166 1134 ptid_t process_ptid;
7a292a7a 1135
65d2b333
PW
1136 /* Switch terminal for any messages produced e.g. by
1137 breakpoint_re_set. */
1138 target_terminal::ours_for_output ();
1139
c906108c
SS
1140 /* This is an exec event that we actually wish to pay attention to.
1141 Refresh our symbol table to the newly exec'd program, remove any
1142 momentary bp's, etc.
1143
1144 If there are breakpoints, they aren't really inserted now,
1145 since the exec() transformed our inferior into a fresh set
1146 of instructions.
1147
1148 We want to preserve symbolic breakpoints on the list, since
1149 we have hopes that they can be reset after the new a.out's
1150 symbol table is read.
1151
1152 However, any "raw" breakpoints must be removed from the list
1153 (e.g., the solib bp's), since their address is probably invalid
1154 now.
1155
1156 And, we DON'T want to call delete_breakpoints() here, since
1157 that may write the bp's "shadow contents" (the instruction
85102364 1158 value that was overwritten with a TRAP instruction). Since
1777feb0 1159 we now have a new a.out, those shadow contents aren't valid. */
6c95b8df
PA
1160
1161 mark_breakpoints_out ();
1162
95e50b27
PA
1163 /* The target reports the exec event to the main thread, even if
1164 some other thread does the exec, and even if the main thread was
1165 stopped or already gone. We may still have non-leader threads of
1166 the process on our list. E.g., on targets that don't have thread
1167 exit events (like remote); or on native Linux in non-stop mode if
1168 there were only two threads in the inferior and the non-leader
1169 one is the one that execs (and nothing forces an update of the
1170 thread list up to here). When debugging remotely, it's best to
1171 avoid extra traffic, when possible, so avoid syncing the thread
1172 list with the target, and instead go ahead and delete all threads
1173 of the process but one that reported the event. Note this must
1174 be done before calling update_breakpoints_after_exec, as
1175 otherwise clearing the threads' resources would reference stale
1176 thread breakpoints -- it may have been one of these threads that
1177 stepped across the exec. We could just clear their stepping
1178 states, but as long as we're iterating, might as well delete
1179 them. Deleting them now rather than at the next user-visible
1180 stop provides a nicer sequence of events for user and MI
1181 notifications. */
08036331 1182 for (thread_info *th : all_threads_safe ())
d7e15655 1183 if (th->ptid.pid () == pid && th->ptid != ptid)
00431a78 1184 delete_thread (th);
95e50b27
PA
1185
1186 /* We also need to clear any left over stale state for the
1187 leader/event thread. E.g., if there was any step-resume
1188 breakpoint or similar, it's gone now. We cannot truly
1189 step-to-next statement through an exec(). */
08036331 1190 thread_info *th = inferior_thread ();
8358c15c 1191 th->control.step_resume_breakpoint = NULL;
186c406b 1192 th->control.exception_resume_breakpoint = NULL;
34b7e8a6 1193 th->control.single_step_breakpoints = NULL;
16c381f0
JK
1194 th->control.step_range_start = 0;
1195 th->control.step_range_end = 0;
c906108c 1196
95e50b27
PA
1197 /* The user may have had the main thread held stopped in the
1198 previous image (e.g., schedlock on, or non-stop). Release
1199 it now. */
a75724bc
PA
1200 th->stop_requested = 0;
1201
95e50b27
PA
1202 update_breakpoints_after_exec ();
1203
1777feb0 1204 /* What is this a.out's name? */
f2907e49 1205 process_ptid = ptid_t (pid);
6c95b8df 1206 printf_unfiltered (_("%s is executing new program: %s\n"),
a068643d 1207 target_pid_to_str (process_ptid).c_str (),
ecf45d2c 1208 exec_file_target);
c906108c
SS
1209
1210 /* We've followed the inferior through an exec. Therefore, the
1777feb0 1211 inferior has essentially been killed & reborn. */
7a292a7a 1212
6ca15a4b 1213 breakpoint_init_inferior (inf_execd);
e85a822c 1214
797bc1cb
TT
1215 gdb::unique_xmalloc_ptr<char> exec_file_host
1216 = exec_file_find (exec_file_target, NULL);
ff862be4 1217
ecf45d2c
SL
1218 /* If we were unable to map the executable target pathname onto a host
1219 pathname, tell the user that. Otherwise GDB's subsequent behavior
1220 is confusing. Maybe it would even be better to stop at this point
1221 so that the user can specify a file manually before continuing. */
1222 if (exec_file_host == NULL)
1223 warning (_("Could not load symbols for executable %s.\n"
1224 "Do you need \"set sysroot\"?"),
1225 exec_file_target);
c906108c 1226
cce9b6bf
PA
1227 /* Reset the shared library package. This ensures that we get a
1228 shlib event when the child reaches "_start", at which point the
1229 dld will have had a chance to initialize the child. */
1230 /* Also, loading a symbol file below may trigger symbol lookups, and
1231 we don't want those to be satisfied by the libraries of the
1232 previous incarnation of this process. */
1233 no_shared_libraries (NULL, 0);
1234
294c36eb
SM
1235 struct inferior *inf = current_inferior ();
1236
6c95b8df
PA
1237 if (follow_exec_mode_string == follow_exec_mode_new)
1238 {
6c95b8df
PA
1239 /* The user wants to keep the old inferior and program spaces
1240 around. Create a new fresh one, and switch to it. */
1241
35ed81d4
SM
1242 /* Do exit processing for the original inferior before setting the new
1243 inferior's pid. Having two inferiors with the same pid would confuse
1244 find_inferior_p(t)id. Transfer the terminal state and info from the
1245 old to the new inferior. */
294c36eb
SM
1246 inferior *new_inferior = add_inferior_with_spaces ();
1247
1248 swap_terminal_info (new_inferior, inf);
1249 exit_inferior_silent (inf);
1250
1251 new_inferior->pid = pid;
1252 target_follow_exec (new_inferior, ptid, exec_file_target);
1253
1254 /* We continue with the new inferior. */
1255 inf = new_inferior;
6c95b8df 1256 }
9107fc8d
PA
1257 else
1258 {
1259 /* The old description may no longer be fit for the new image.
1260 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1261 old description; we'll read a new one below. No need to do
1262 this on "follow-exec-mode new", as the old inferior stays
1263 around (its description is later cleared/refetched on
1264 restart). */
1265 target_clear_description ();
294c36eb 1266 target_follow_exec (inf, ptid, exec_file_target);
9107fc8d 1267 }
6c95b8df 1268
294c36eb 1269 gdb_assert (current_inferior () == inf);
6c95b8df
PA
1270 gdb_assert (current_program_space == inf->pspace);
1271
ecf45d2c
SL
1272 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1273 because the proper displacement for a PIE (Position Independent
1274 Executable) main symbol file will only be computed by
1275 solib_create_inferior_hook below. breakpoint_re_set would fail
1276 to insert the breakpoints with the zero displacement. */
797bc1cb 1277 try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
c906108c 1278
9107fc8d
PA
1279 /* If the target can specify a description, read it. Must do this
1280 after flipping to the new executable (because the target supplied
1281 description must be compatible with the executable's
1282 architecture, and the old executable may e.g., be 32-bit, while
1283 the new one 64-bit), and before anything involving memory or
1284 registers. */
1285 target_find_description ();
1286
42a4fec5 1287 gdb::observers::inferior_execd.notify (inf);
4efc6507 1288
c1e56572
JK
1289 breakpoint_re_set ();
1290
c906108c
SS
1291 /* Reinsert all breakpoints. (Those which were symbolic have
1292 been reset to the proper address in the new a.out, thanks
1777feb0 1293 to symbol_file_command...). */
c906108c
SS
1294 insert_breakpoints ();
1295
1296 /* The next resume of this inferior should bring it to the shlib
1297 startup breakpoints. (If the user had also set bp's on
1298 "main" from the old (parent) process, then they'll auto-
1777feb0 1299 matically get reset there in the new process.). */
c906108c
SS
1300}
1301
28d5518b 1302/* The chain of threads that need to do a step-over operation to get
c2829269
PA
1303 past e.g., a breakpoint. What technique is used to step over the
1304 breakpoint/watchpoint does not matter -- all threads end up in the
1305 same queue, to maintain rough temporal order of execution, in order
1306 to avoid starvation, otherwise, we could e.g., find ourselves
1307 constantly stepping the same couple threads past their breakpoints
1308 over and over, if the single-step finish fast enough. */
28d5518b 1309struct thread_info *global_thread_step_over_chain_head;
c2829269 1310
6c4cfb24
PA
1311/* Bit flags indicating what the thread needs to step over. */
1312
8d297bbf 1313enum step_over_what_flag
6c4cfb24
PA
1314 {
1315 /* Step over a breakpoint. */
1316 STEP_OVER_BREAKPOINT = 1,
1317
1318 /* Step past a non-continuable watchpoint, in order to let the
1319 instruction execute so we can evaluate the watchpoint
1320 expression. */
1321 STEP_OVER_WATCHPOINT = 2
1322 };
8d297bbf 1323DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
6c4cfb24 1324
963f9c80 1325/* Info about an instruction that is being stepped over. */
31e77af2
PA
1326
1327struct step_over_info
1328{
963f9c80
PA
1329 /* If we're stepping past a breakpoint, this is the address space
1330 and address of the instruction the breakpoint is set at. We'll
1331 skip inserting all breakpoints here. Valid iff ASPACE is
1332 non-NULL. */
ac7d717c
PA
1333 const address_space *aspace = nullptr;
1334 CORE_ADDR address = 0;
963f9c80
PA
1335
1336 /* The instruction being stepped over triggers a nonsteppable
1337 watchpoint. If true, we'll skip inserting watchpoints. */
ac7d717c 1338 int nonsteppable_watchpoint_p = 0;
21edc42f
YQ
1339
1340 /* The thread's global number. */
ac7d717c 1341 int thread = -1;
31e77af2
PA
1342};
1343
1344/* The step-over info of the location that is being stepped over.
1345
1346 Note that with async/breakpoint always-inserted mode, a user might
1347 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1348 being stepped over. As setting a new breakpoint inserts all
1349 breakpoints, we need to make sure the breakpoint being stepped over
1350 isn't inserted then. We do that by only clearing the step-over
1351 info when the step-over is actually finished (or aborted).
1352
1353 Presently GDB can only step over one breakpoint at any given time.
1354 Given threads that can't run code in the same address space as the
1355 breakpoint's can't really miss the breakpoint, GDB could be taught
1356 to step-over at most one breakpoint per address space (so this info
1357 could move to the address space object if/when GDB is extended).
1358 The set of breakpoints being stepped over will normally be much
1359 smaller than the set of all breakpoints, so a flag in the
1360 breakpoint location structure would be wasteful. A separate list
1361 also saves complexity and run-time, as otherwise we'd have to go
1362 through all breakpoint locations clearing their flag whenever we
1363 start a new sequence. Similar considerations weigh against storing
1364 this info in the thread object. Plus, not all step overs actually
1365 have breakpoint locations -- e.g., stepping past a single-step
1366 breakpoint, or stepping to complete a non-continuable
1367 watchpoint. */
1368static struct step_over_info step_over_info;
1369
1370/* Record the address of the breakpoint/instruction we're currently
ce0db137
DE
1371 stepping over.
1372 N.B. We record the aspace and address now, instead of say just the thread,
1373 because when we need the info later the thread may be running. */
31e77af2
PA
1374
1375static void
8b86c959 1376set_step_over_info (const address_space *aspace, CORE_ADDR address,
21edc42f
YQ
1377 int nonsteppable_watchpoint_p,
1378 int thread)
31e77af2
PA
1379{
1380 step_over_info.aspace = aspace;
1381 step_over_info.address = address;
963f9c80 1382 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
21edc42f 1383 step_over_info.thread = thread;
31e77af2
PA
1384}
1385
1386/* Called when we're not longer stepping over a breakpoint / an
1387 instruction, so all breakpoints are free to be (re)inserted. */
1388
1389static void
1390clear_step_over_info (void)
1391{
1eb8556f 1392 infrun_debug_printf ("clearing step over info");
31e77af2
PA
1393 step_over_info.aspace = NULL;
1394 step_over_info.address = 0;
963f9c80 1395 step_over_info.nonsteppable_watchpoint_p = 0;
21edc42f 1396 step_over_info.thread = -1;
31e77af2
PA
1397}
1398
7f89fd65 1399/* See infrun.h. */
31e77af2
PA
1400
1401int
1402stepping_past_instruction_at (struct address_space *aspace,
1403 CORE_ADDR address)
1404{
1405 return (step_over_info.aspace != NULL
1406 && breakpoint_address_match (aspace, address,
1407 step_over_info.aspace,
1408 step_over_info.address));
1409}
1410
963f9c80
PA
1411/* See infrun.h. */
1412
21edc42f
YQ
1413int
1414thread_is_stepping_over_breakpoint (int thread)
1415{
1416 return (step_over_info.thread != -1
1417 && thread == step_over_info.thread);
1418}
1419
1420/* See infrun.h. */
1421
963f9c80
PA
1422int
1423stepping_past_nonsteppable_watchpoint (void)
1424{
1425 return step_over_info.nonsteppable_watchpoint_p;
1426}
1427
6cc83d2a
PA
1428/* Returns true if step-over info is valid. */
1429
c4464ade 1430static bool
6cc83d2a
PA
1431step_over_info_valid_p (void)
1432{
963f9c80
PA
1433 return (step_over_info.aspace != NULL
1434 || stepping_past_nonsteppable_watchpoint ());
6cc83d2a
PA
1435}
1436
c906108c 1437\f
237fc4c9
PA
1438/* Displaced stepping. */
1439
1440/* In non-stop debugging mode, we must take special care to manage
1441 breakpoints properly; in particular, the traditional strategy for
1442 stepping a thread past a breakpoint it has hit is unsuitable.
1443 'Displaced stepping' is a tactic for stepping one thread past a
1444 breakpoint it has hit while ensuring that other threads running
1445 concurrently will hit the breakpoint as they should.
1446
1447 The traditional way to step a thread T off a breakpoint in a
1448 multi-threaded program in all-stop mode is as follows:
1449
1450 a0) Initially, all threads are stopped, and breakpoints are not
1451 inserted.
1452 a1) We single-step T, leaving breakpoints uninserted.
1453 a2) We insert breakpoints, and resume all threads.
1454
1455 In non-stop debugging, however, this strategy is unsuitable: we
1456 don't want to have to stop all threads in the system in order to
1457 continue or step T past a breakpoint. Instead, we use displaced
1458 stepping:
1459
1460 n0) Initially, T is stopped, other threads are running, and
1461 breakpoints are inserted.
1462 n1) We copy the instruction "under" the breakpoint to a separate
1463 location, outside the main code stream, making any adjustments
1464 to the instruction, register, and memory state as directed by
1465 T's architecture.
1466 n2) We single-step T over the instruction at its new location.
1467 n3) We adjust the resulting register and memory state as directed
1468 by T's architecture. This includes resetting T's PC to point
1469 back into the main instruction stream.
1470 n4) We resume T.
1471
1472 This approach depends on the following gdbarch methods:
1473
1474 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1475 indicate where to copy the instruction, and how much space must
1476 be reserved there. We use these in step n1.
1477
1478 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1479 address, and makes any necessary adjustments to the instruction,
1480 register contents, and memory. We use this in step n1.
1481
1482 - gdbarch_displaced_step_fixup adjusts registers and memory after
85102364 1483 we have successfully single-stepped the instruction, to yield the
237fc4c9
PA
1484 same effect the instruction would have had if we had executed it
1485 at its original address. We use this in step n3.
1486
237fc4c9
PA
1487 The gdbarch_displaced_step_copy_insn and
1488 gdbarch_displaced_step_fixup functions must be written so that
1489 copying an instruction with gdbarch_displaced_step_copy_insn,
1490 single-stepping across the copied instruction, and then applying
1491 gdbarch_displaced_insn_fixup should have the same effects on the
1492 thread's memory and registers as stepping the instruction in place
1493 would have. Exactly which responsibilities fall to the copy and
1494 which fall to the fixup is up to the author of those functions.
1495
1496 See the comments in gdbarch.sh for details.
1497
1498 Note that displaced stepping and software single-step cannot
1499 currently be used in combination, although with some care I think
1500 they could be made to. Software single-step works by placing
1501 breakpoints on all possible subsequent instructions; if the
1502 displaced instruction is a PC-relative jump, those breakpoints
1503 could fall in very strange places --- on pages that aren't
1504 executable, or at addresses that are not proper instruction
1505 boundaries. (We do generally let other threads run while we wait
1506 to hit the software single-step breakpoint, and they might
1507 encounter such a corrupted instruction.) One way to work around
1508 this would be to have gdbarch_displaced_step_copy_insn fully
1509 simulate the effect of PC-relative instructions (and return NULL)
1510 on architectures that use software single-stepping.
1511
1512 In non-stop mode, we can have independent and simultaneous step
1513 requests, so more than one thread may need to simultaneously step
1514 over a breakpoint. The current implementation assumes there is
1515 only one scratch space per process. In this case, we have to
1516 serialize access to the scratch space. If thread A wants to step
1517 over a breakpoint, but we are currently waiting for some other
1518 thread to complete a displaced step, we leave thread A stopped and
1519 place it in the displaced_step_request_queue. Whenever a displaced
1520 step finishes, we pick the next thread in the queue and start a new
1521 displaced step operation on it. See displaced_step_prepare and
7def77a1 1522 displaced_step_finish for details. */
237fc4c9 1523
a46d1843 1524/* Return true if THREAD is doing a displaced step. */
c0987663 1525
c4464ade 1526static bool
00431a78 1527displaced_step_in_progress_thread (thread_info *thread)
c0987663 1528{
00431a78 1529 gdb_assert (thread != NULL);
c0987663 1530
187b041e 1531 return thread->displaced_step_state.in_progress ();
c0987663
YQ
1532}
1533
a46d1843 1534/* Return true if INF has a thread doing a displaced step. */
8f572e5c 1535
c4464ade 1536static bool
00431a78 1537displaced_step_in_progress (inferior *inf)
8f572e5c 1538{
187b041e 1539 return inf->displaced_step_state.in_progress_count > 0;
fc1cf338
PA
1540}
1541
187b041e 1542/* Return true if any thread is doing a displaced step. */
a42244db 1543
187b041e
SM
1544static bool
1545displaced_step_in_progress_any_thread ()
a42244db 1546{
187b041e
SM
1547 for (inferior *inf : all_non_exited_inferiors ())
1548 {
1549 if (displaced_step_in_progress (inf))
1550 return true;
1551 }
a42244db 1552
187b041e 1553 return false;
a42244db
YQ
1554}
1555
fc1cf338
PA
1556static void
1557infrun_inferior_exit (struct inferior *inf)
1558{
d20172fc 1559 inf->displaced_step_state.reset ();
060f2ef8 1560 inf->thread_waiting_for_vfork_done = nullptr;
fc1cf338 1561}
237fc4c9 1562
3b7a962d
SM
1563static void
1564infrun_inferior_execd (inferior *inf)
1565{
187b041e
SM
1566 /* If some threads where was doing a displaced step in this inferior at the
1567 moment of the exec, they no longer exist. Even if the exec'ing thread
3b7a962d
SM
1568 doing a displaced step, we don't want to to any fixup nor restore displaced
1569 stepping buffer bytes. */
1570 inf->displaced_step_state.reset ();
1571
187b041e
SM
1572 for (thread_info *thread : inf->threads ())
1573 thread->displaced_step_state.reset ();
1574
3b7a962d
SM
1575 /* Since an in-line step is done with everything else stopped, if there was
1576 one in progress at the time of the exec, it must have been the exec'ing
1577 thread. */
1578 clear_step_over_info ();
060f2ef8
SM
1579
1580 inf->thread_waiting_for_vfork_done = nullptr;
3b7a962d
SM
1581}
1582
fff08868
HZ
1583/* If ON, and the architecture supports it, GDB will use displaced
1584 stepping to step over breakpoints. If OFF, or if the architecture
1585 doesn't support it, GDB will instead use the traditional
1586 hold-and-step approach. If AUTO (which is the default), GDB will
1587 decide which technique to use to step over breakpoints depending on
9822cb57 1588 whether the target works in a non-stop way (see use_displaced_stepping). */
fff08868 1589
72d0e2c5 1590static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
fff08868 1591
237fc4c9
PA
1592static void
1593show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1594 struct cmd_list_element *c,
1595 const char *value)
1596{
72d0e2c5 1597 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
3e43a32a
MS
1598 fprintf_filtered (file,
1599 _("Debugger's willingness to use displaced stepping "
1600 "to step over breakpoints is %s (currently %s).\n"),
fbea99ea 1601 value, target_is_non_stop_p () ? "on" : "off");
fff08868 1602 else
3e43a32a
MS
1603 fprintf_filtered (file,
1604 _("Debugger's willingness to use displaced stepping "
1605 "to step over breakpoints is %s.\n"), value);
237fc4c9
PA
1606}
1607
9822cb57
SM
1608/* Return true if the gdbarch implements the required methods to use
1609 displaced stepping. */
1610
1611static bool
1612gdbarch_supports_displaced_stepping (gdbarch *arch)
1613{
187b041e
SM
1614 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1615 that if `prepare` is provided, so is `finish`. */
1616 return gdbarch_displaced_step_prepare_p (arch);
9822cb57
SM
1617}
1618
fff08868 1619/* Return non-zero if displaced stepping can/should be used to step
3fc8eb30 1620 over breakpoints of thread TP. */
fff08868 1621
9822cb57
SM
1622static bool
1623use_displaced_stepping (thread_info *tp)
237fc4c9 1624{
9822cb57
SM
1625 /* If the user disabled it explicitly, don't use displaced stepping. */
1626 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1627 return false;
1628
1629 /* If "auto", only use displaced stepping if the target operates in a non-stop
1630 way. */
1631 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1632 && !target_is_non_stop_p ())
1633 return false;
1634
1635 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1636
1637 /* If the architecture doesn't implement displaced stepping, don't use
1638 it. */
1639 if (!gdbarch_supports_displaced_stepping (gdbarch))
1640 return false;
1641
1642 /* If recording, don't use displaced stepping. */
1643 if (find_record_target () != nullptr)
1644 return false;
1645
9822cb57
SM
1646 /* If displaced stepping failed before for this inferior, don't bother trying
1647 again. */
f5f01699 1648 if (tp->inf->displaced_step_state.failed_before)
9822cb57
SM
1649 return false;
1650
1651 return true;
237fc4c9
PA
1652}
1653
187b041e 1654/* Simple function wrapper around displaced_step_thread_state::reset. */
d8d83535 1655
237fc4c9 1656static void
187b041e 1657displaced_step_reset (displaced_step_thread_state *displaced)
237fc4c9 1658{
d8d83535 1659 displaced->reset ();
237fc4c9
PA
1660}
1661
d8d83535
SM
1662/* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1663 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1664
1665using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
237fc4c9 1666
136821d9
SM
1667/* See infrun.h. */
1668
1669std::string
1670displaced_step_dump_bytes (const gdb_byte *buf, size_t len)
237fc4c9 1671{
136821d9 1672 std::string ret;
237fc4c9 1673
136821d9
SM
1674 for (size_t i = 0; i < len; i++)
1675 {
1676 if (i == 0)
1677 ret += string_printf ("%02x", buf[i]);
1678 else
1679 ret += string_printf (" %02x", buf[i]);
1680 }
1681
1682 return ret;
237fc4c9
PA
1683}
1684
1685/* Prepare to single-step, using displaced stepping.
1686
1687 Note that we cannot use displaced stepping when we have a signal to
1688 deliver. If we have a signal to deliver and an instruction to step
1689 over, then after the step, there will be no indication from the
1690 target whether the thread entered a signal handler or ignored the
1691 signal and stepped over the instruction successfully --- both cases
1692 result in a simple SIGTRAP. In the first case we mustn't do a
1693 fixup, and in the second case we must --- but we can't tell which.
1694 Comments in the code for 'random signals' in handle_inferior_event
1695 explain how we handle this case instead.
1696
bab37966
SM
1697 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1698 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1699 if displaced stepping this thread got queued; or
1700 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1701 stepped. */
7f03bd92 1702
bab37966 1703static displaced_step_prepare_status
00431a78 1704displaced_step_prepare_throw (thread_info *tp)
237fc4c9 1705{
00431a78 1706 regcache *regcache = get_thread_regcache (tp);
ac7936df 1707 struct gdbarch *gdbarch = regcache->arch ();
187b041e
SM
1708 displaced_step_thread_state &disp_step_thread_state
1709 = tp->displaced_step_state;
237fc4c9
PA
1710
1711 /* We should never reach this function if the architecture does not
1712 support displaced stepping. */
9822cb57 1713 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
237fc4c9 1714
c2829269
PA
1715 /* Nor if the thread isn't meant to step over a breakpoint. */
1716 gdb_assert (tp->control.trap_expected);
1717
c1e36e3e
PA
1718 /* Disable range stepping while executing in the scratch pad. We
1719 want a single-step even if executing the displaced instruction in
1720 the scratch buffer lands within the stepping range (e.g., a
1721 jump/branch). */
1722 tp->control.may_range_step = 0;
1723
187b041e
SM
1724 /* We are about to start a displaced step for this thread. If one is already
1725 in progress, something's wrong. */
1726 gdb_assert (!disp_step_thread_state.in_progress ());
237fc4c9 1727
187b041e 1728 if (tp->inf->displaced_step_state.unavailable)
237fc4c9 1729 {
187b041e
SM
1730 /* The gdbarch tells us it's not worth asking to try a prepare because
1731 it is likely that it will return unavailable, so don't bother asking. */
237fc4c9 1732
136821d9
SM
1733 displaced_debug_printf ("deferring step of %s",
1734 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1735
28d5518b 1736 global_thread_step_over_chain_enqueue (tp);
bab37966 1737 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
237fc4c9 1738 }
237fc4c9 1739
187b041e
SM
1740 displaced_debug_printf ("displaced-stepping %s now",
1741 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1742
00431a78
PA
1743 scoped_restore_current_thread restore_thread;
1744
1745 switch_to_thread (tp);
ad53cd71 1746
187b041e
SM
1747 CORE_ADDR original_pc = regcache_read_pc (regcache);
1748 CORE_ADDR displaced_pc;
237fc4c9 1749
187b041e
SM
1750 displaced_step_prepare_status status
1751 = gdbarch_displaced_step_prepare (gdbarch, tp, displaced_pc);
237fc4c9 1752
187b041e 1753 if (status == DISPLACED_STEP_PREPARE_STATUS_CANT)
d35ae833 1754 {
187b041e
SM
1755 displaced_debug_printf ("failed to prepare (%s)",
1756 target_pid_to_str (tp->ptid).c_str ());
d35ae833 1757
bab37966 1758 return DISPLACED_STEP_PREPARE_STATUS_CANT;
d35ae833 1759 }
187b041e 1760 else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
7f03bd92 1761 {
187b041e
SM
1762 /* Not enough displaced stepping resources available, defer this
1763 request by placing it the queue. */
1764
1765 displaced_debug_printf ("not enough resources available, "
1766 "deferring step of %s",
1767 target_pid_to_str (tp->ptid).c_str ());
1768
1769 global_thread_step_over_chain_enqueue (tp);
1770
1771 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
7f03bd92 1772 }
237fc4c9 1773
187b041e
SM
1774 gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
1775
9f5a595d
UW
1776 /* Save the information we need to fix things up if the step
1777 succeeds. */
187b041e 1778 disp_step_thread_state.set (gdbarch);
9f5a595d 1779
187b041e 1780 tp->inf->displaced_step_state.in_progress_count++;
ad53cd71 1781
187b041e
SM
1782 displaced_debug_printf ("prepared successfully thread=%s, "
1783 "original_pc=%s, displaced_pc=%s",
1784 target_pid_to_str (tp->ptid).c_str (),
1785 paddress (gdbarch, original_pc),
1786 paddress (gdbarch, displaced_pc));
237fc4c9 1787
bab37966 1788 return DISPLACED_STEP_PREPARE_STATUS_OK;
237fc4c9
PA
1789}
1790
3fc8eb30
PA
1791/* Wrapper for displaced_step_prepare_throw that disabled further
1792 attempts at displaced stepping if we get a memory error. */
1793
bab37966 1794static displaced_step_prepare_status
00431a78 1795displaced_step_prepare (thread_info *thread)
3fc8eb30 1796{
bab37966
SM
1797 displaced_step_prepare_status status
1798 = DISPLACED_STEP_PREPARE_STATUS_CANT;
3fc8eb30 1799
a70b8144 1800 try
3fc8eb30 1801 {
bab37966 1802 status = displaced_step_prepare_throw (thread);
3fc8eb30 1803 }
230d2906 1804 catch (const gdb_exception_error &ex)
3fc8eb30 1805 {
16b41842
PA
1806 if (ex.error != MEMORY_ERROR
1807 && ex.error != NOT_SUPPORTED_ERROR)
eedc3f4f 1808 throw;
3fc8eb30 1809
1eb8556f
SM
1810 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1811 ex.what ());
3fc8eb30
PA
1812
1813 /* Be verbose if "set displaced-stepping" is "on", silent if
1814 "auto". */
1815 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1816 {
fd7dcb94 1817 warning (_("disabling displaced stepping: %s"),
3d6e9d23 1818 ex.what ());
3fc8eb30
PA
1819 }
1820
1821 /* Disable further displaced stepping attempts. */
f5f01699 1822 thread->inf->displaced_step_state.failed_before = 1;
3fc8eb30 1823 }
3fc8eb30 1824
bab37966 1825 return status;
3fc8eb30
PA
1826}
1827
bab37966
SM
1828/* If we displaced stepped an instruction successfully, adjust registers and
1829 memory to yield the same effect the instruction would have had if we had
1830 executed it at its original address, and return
1831 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
1832 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
372316f1 1833
bab37966
SM
1834 If the thread wasn't displaced stepping, return
1835 DISPLACED_STEP_FINISH_STATUS_OK as well. */
1836
1837static displaced_step_finish_status
7def77a1 1838displaced_step_finish (thread_info *event_thread, enum gdb_signal signal)
237fc4c9 1839{
187b041e 1840 displaced_step_thread_state *displaced = &event_thread->displaced_step_state;
fc1cf338 1841
187b041e
SM
1842 /* Was this thread performing a displaced step? */
1843 if (!displaced->in_progress ())
bab37966 1844 return DISPLACED_STEP_FINISH_STATUS_OK;
237fc4c9 1845
187b041e
SM
1846 gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0);
1847 event_thread->inf->displaced_step_state.in_progress_count--;
1848
cb71640d
PA
1849 /* Fixup may need to read memory/registers. Switch to the thread
1850 that we're fixing up. Also, target_stopped_by_watchpoint checks
d43b7a2d 1851 the current thread, and displaced_step_restore performs ptid-dependent
328d42d8 1852 memory accesses using current_inferior(). */
00431a78 1853 switch_to_thread (event_thread);
cb71640d 1854
d43b7a2d
TBA
1855 displaced_step_reset_cleanup cleanup (displaced);
1856
187b041e
SM
1857 /* Do the fixup, and release the resources acquired to do the displaced
1858 step. */
1859 return gdbarch_displaced_step_finish (displaced->get_original_gdbarch (),
1860 event_thread, signal);
c2829269 1861}
1c5cfe86 1862
4d9d9d04
PA
1863/* Data to be passed around while handling an event. This data is
1864 discarded between events. */
1865struct execution_control_state
1866{
5b6d1e4f 1867 process_stratum_target *target;
4d9d9d04
PA
1868 ptid_t ptid;
1869 /* The thread that got the event, if this was a thread event; NULL
1870 otherwise. */
1871 struct thread_info *event_thread;
1872
1873 struct target_waitstatus ws;
1874 int stop_func_filled_in;
1875 CORE_ADDR stop_func_start;
1876 CORE_ADDR stop_func_end;
1877 const char *stop_func_name;
1878 int wait_some_more;
1879
1880 /* True if the event thread hit the single-step breakpoint of
1881 another thread. Thus the event doesn't cause a stop, the thread
1882 needs to be single-stepped past the single-step breakpoint before
1883 we can switch back to the original stepping thread. */
1884 int hit_singlestep_breakpoint;
1885};
1886
1887/* Clear ECS and set it to point at TP. */
c2829269
PA
1888
1889static void
4d9d9d04
PA
1890reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1891{
1892 memset (ecs, 0, sizeof (*ecs));
1893 ecs->event_thread = tp;
1894 ecs->ptid = tp->ptid;
1895}
1896
1897static void keep_going_pass_signal (struct execution_control_state *ecs);
1898static void prepare_to_wait (struct execution_control_state *ecs);
c4464ade 1899static bool keep_going_stepped_thread (struct thread_info *tp);
8d297bbf 1900static step_over_what thread_still_needs_step_over (struct thread_info *tp);
4d9d9d04
PA
1901
1902/* Are there any pending step-over requests? If so, run all we can
1903 now and return true. Otherwise, return false. */
1904
c4464ade 1905static bool
c2829269
PA
1906start_step_over (void)
1907{
3ec3145c
SM
1908 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1909
187b041e 1910 thread_info *next;
c2829269 1911
372316f1
PA
1912 /* Don't start a new step-over if we already have an in-line
1913 step-over operation ongoing. */
1914 if (step_over_info_valid_p ())
c4464ade 1915 return false;
372316f1 1916
187b041e
SM
1917 /* Steal the global thread step over chain. As we try to initiate displaced
1918 steps, threads will be enqueued in the global chain if no buffers are
1919 available. If we iterated on the global chain directly, we might iterate
1920 indefinitely. */
1921 thread_info *threads_to_step = global_thread_step_over_chain_head;
1922 global_thread_step_over_chain_head = NULL;
1923
1924 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
1925 thread_step_over_chain_length (threads_to_step));
1926
1927 bool started = false;
1928
1929 /* On scope exit (whatever the reason, return or exception), if there are
1930 threads left in the THREADS_TO_STEP chain, put back these threads in the
1931 global list. */
1932 SCOPE_EXIT
1933 {
1934 if (threads_to_step == nullptr)
1935 infrun_debug_printf ("step-over queue now empty");
1936 else
1937 {
1938 infrun_debug_printf ("putting back %d threads to step in global queue",
1939 thread_step_over_chain_length (threads_to_step));
1940
1941 global_thread_step_over_chain_enqueue_chain (threads_to_step);
1942 }
1943 };
1944
1945 for (thread_info *tp = threads_to_step; tp != NULL; tp = next)
237fc4c9 1946 {
4d9d9d04
PA
1947 struct execution_control_state ecss;
1948 struct execution_control_state *ecs = &ecss;
8d297bbf 1949 step_over_what step_what;
372316f1 1950 int must_be_in_line;
c2829269 1951
c65d6b55
PA
1952 gdb_assert (!tp->stop_requested);
1953
187b041e 1954 next = thread_step_over_chain_next (threads_to_step, tp);
237fc4c9 1955
187b041e
SM
1956 if (tp->inf->displaced_step_state.unavailable)
1957 {
1958 /* The arch told us to not even try preparing another displaced step
1959 for this inferior. Just leave the thread in THREADS_TO_STEP, it
1960 will get moved to the global chain on scope exit. */
1961 continue;
1962 }
1963
81d92403
SM
1964 if (tp->inf->thread_waiting_for_vfork_done)
1965 {
1966 /* When we stop all threads, handling a vfork, any thread in the step
1967 over chain remains there. A user could also try to continue a
1968 thread stopped at a breakpoint while another thread is waiting for
1969 a vfork-done event. In any case, we don't want to start a step
1970 over right now. */
1971 continue;
1972 }
1973
187b041e
SM
1974 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
1975 while we try to prepare the displaced step, we don't add it back to
1976 the global step over chain. This is to avoid a thread staying in the
1977 step over chain indefinitely if something goes wrong when resuming it
1978 If the error is intermittent and it still needs a step over, it will
1979 get enqueued again when we try to resume it normally. */
1980 thread_step_over_chain_remove (&threads_to_step, tp);
c2829269 1981
372316f1
PA
1982 step_what = thread_still_needs_step_over (tp);
1983 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
1984 || ((step_what & STEP_OVER_BREAKPOINT)
3fc8eb30 1985 && !use_displaced_stepping (tp)));
372316f1
PA
1986
1987 /* We currently stop all threads of all processes to step-over
1988 in-line. If we need to start a new in-line step-over, let
1989 any pending displaced steps finish first. */
187b041e
SM
1990 if (must_be_in_line && displaced_step_in_progress_any_thread ())
1991 {
1992 global_thread_step_over_chain_enqueue (tp);
1993 continue;
1994 }
c2829269 1995
372316f1
PA
1996 if (tp->control.trap_expected
1997 || tp->resumed
1998 || tp->executing)
ad53cd71 1999 {
4d9d9d04
PA
2000 internal_error (__FILE__, __LINE__,
2001 "[%s] has inconsistent state: "
372316f1 2002 "trap_expected=%d, resumed=%d, executing=%d\n",
a068643d 2003 target_pid_to_str (tp->ptid).c_str (),
4d9d9d04 2004 tp->control.trap_expected,
372316f1 2005 tp->resumed,
4d9d9d04 2006 tp->executing);
ad53cd71 2007 }
1c5cfe86 2008
1eb8556f
SM
2009 infrun_debug_printf ("resuming [%s] for step-over",
2010 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04
PA
2011
2012 /* keep_going_pass_signal skips the step-over if the breakpoint
2013 is no longer inserted. In all-stop, we want to keep looking
2014 for a thread that needs a step-over instead of resuming TP,
2015 because we wouldn't be able to resume anything else until the
2016 target stops again. In non-stop, the resume always resumes
2017 only TP, so it's OK to let the thread resume freely. */
fbea99ea 2018 if (!target_is_non_stop_p () && !step_what)
4d9d9d04 2019 continue;
8550d3b3 2020
00431a78 2021 switch_to_thread (tp);
4d9d9d04
PA
2022 reset_ecs (ecs, tp);
2023 keep_going_pass_signal (ecs);
1c5cfe86 2024
4d9d9d04
PA
2025 if (!ecs->wait_some_more)
2026 error (_("Command aborted."));
1c5cfe86 2027
187b041e
SM
2028 /* If the thread's step over could not be initiated because no buffers
2029 were available, it was re-added to the global step over chain. */
2030 if (tp->resumed)
2031 {
2032 infrun_debug_printf ("[%s] was resumed.",
2033 target_pid_to_str (tp->ptid).c_str ());
2034 gdb_assert (!thread_is_in_step_over_chain (tp));
2035 }
2036 else
2037 {
2038 infrun_debug_printf ("[%s] was NOT resumed.",
2039 target_pid_to_str (tp->ptid).c_str ());
2040 gdb_assert (thread_is_in_step_over_chain (tp));
2041 }
372316f1
PA
2042
2043 /* If we started a new in-line step-over, we're done. */
2044 if (step_over_info_valid_p ())
2045 {
2046 gdb_assert (tp->control.trap_expected);
187b041e
SM
2047 started = true;
2048 break;
372316f1
PA
2049 }
2050
fbea99ea 2051 if (!target_is_non_stop_p ())
4d9d9d04
PA
2052 {
2053 /* On all-stop, shouldn't have resumed unless we needed a
2054 step over. */
2055 gdb_assert (tp->control.trap_expected
2056 || tp->step_after_step_resume_breakpoint);
2057
2058 /* With remote targets (at least), in all-stop, we can't
2059 issue any further remote commands until the program stops
2060 again. */
187b041e
SM
2061 started = true;
2062 break;
1c5cfe86 2063 }
c2829269 2064
4d9d9d04
PA
2065 /* Either the thread no longer needed a step-over, or a new
2066 displaced stepping sequence started. Even in the latter
2067 case, continue looking. Maybe we can also start another
2068 displaced step on a thread of other process. */
237fc4c9 2069 }
4d9d9d04 2070
187b041e 2071 return started;
237fc4c9
PA
2072}
2073
5231c1fd
PA
2074/* Update global variables holding ptids to hold NEW_PTID if they were
2075 holding OLD_PTID. */
2076static void
b161a60d
SM
2077infrun_thread_ptid_changed (process_stratum_target *target,
2078 ptid_t old_ptid, ptid_t new_ptid)
5231c1fd 2079{
b161a60d
SM
2080 if (inferior_ptid == old_ptid
2081 && current_inferior ()->process_target () == target)
5231c1fd 2082 inferior_ptid = new_ptid;
5231c1fd
PA
2083}
2084
237fc4c9 2085\f
c906108c 2086
53904c9e
AC
2087static const char schedlock_off[] = "off";
2088static const char schedlock_on[] = "on";
2089static const char schedlock_step[] = "step";
f2665db5 2090static const char schedlock_replay[] = "replay";
40478521 2091static const char *const scheduler_enums[] = {
ef346e04
AC
2092 schedlock_off,
2093 schedlock_on,
2094 schedlock_step,
f2665db5 2095 schedlock_replay,
ef346e04
AC
2096 NULL
2097};
f2665db5 2098static const char *scheduler_mode = schedlock_replay;
920d2a44
AC
2099static void
2100show_scheduler_mode (struct ui_file *file, int from_tty,
2101 struct cmd_list_element *c, const char *value)
2102{
3e43a32a
MS
2103 fprintf_filtered (file,
2104 _("Mode for locking scheduler "
2105 "during execution is \"%s\".\n"),
920d2a44
AC
2106 value);
2107}
c906108c
SS
2108
2109static void
eb4c3f4a 2110set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
c906108c 2111{
8a3ecb79 2112 if (!target_can_lock_scheduler ())
eefe576e
AC
2113 {
2114 scheduler_mode = schedlock_off;
d777bf0d
SM
2115 error (_("Target '%s' cannot support this command."),
2116 target_shortname ());
eefe576e 2117 }
c906108c
SS
2118}
2119
d4db2f36
PA
2120/* True if execution commands resume all threads of all processes by
2121 default; otherwise, resume only threads of the current inferior
2122 process. */
491144b5 2123bool sched_multi = false;
d4db2f36 2124
2facfe5c 2125/* Try to setup for software single stepping over the specified location.
c4464ade 2126 Return true if target_resume() should use hardware single step.
2facfe5c
DD
2127
2128 GDBARCH the current gdbarch.
2129 PC the location to step over. */
2130
c4464ade 2131static bool
2facfe5c
DD
2132maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2133{
c4464ade 2134 bool hw_step = true;
2facfe5c 2135
f02253f1 2136 if (execution_direction == EXEC_FORWARD
93f9a11f
YQ
2137 && gdbarch_software_single_step_p (gdbarch))
2138 hw_step = !insert_single_step_breakpoints (gdbarch);
2139
2facfe5c
DD
2140 return hw_step;
2141}
c906108c 2142
f3263aa4
PA
2143/* See infrun.h. */
2144
09cee04b
PA
2145ptid_t
2146user_visible_resume_ptid (int step)
2147{
f3263aa4 2148 ptid_t resume_ptid;
09cee04b 2149
09cee04b
PA
2150 if (non_stop)
2151 {
2152 /* With non-stop mode on, threads are always handled
2153 individually. */
2154 resume_ptid = inferior_ptid;
2155 }
2156 else if ((scheduler_mode == schedlock_on)
03d46957 2157 || (scheduler_mode == schedlock_step && step))
09cee04b 2158 {
f3263aa4
PA
2159 /* User-settable 'scheduler' mode requires solo thread
2160 resume. */
09cee04b
PA
2161 resume_ptid = inferior_ptid;
2162 }
f2665db5
MM
2163 else if ((scheduler_mode == schedlock_replay)
2164 && target_record_will_replay (minus_one_ptid, execution_direction))
2165 {
2166 /* User-settable 'scheduler' mode requires solo thread resume in replay
2167 mode. */
2168 resume_ptid = inferior_ptid;
2169 }
f3263aa4
PA
2170 else if (!sched_multi && target_supports_multi_process ())
2171 {
2172 /* Resume all threads of the current process (and none of other
2173 processes). */
e99b03dc 2174 resume_ptid = ptid_t (inferior_ptid.pid ());
f3263aa4
PA
2175 }
2176 else
2177 {
2178 /* Resume all threads of all processes. */
2179 resume_ptid = RESUME_ALL;
2180 }
09cee04b
PA
2181
2182 return resume_ptid;
2183}
2184
5b6d1e4f
PA
2185/* See infrun.h. */
2186
2187process_stratum_target *
2188user_visible_resume_target (ptid_t resume_ptid)
2189{
2190 return (resume_ptid == minus_one_ptid && sched_multi
2191 ? NULL
2192 : current_inferior ()->process_target ());
2193}
2194
fbea99ea
PA
2195/* Return a ptid representing the set of threads that we will resume,
2196 in the perspective of the target, assuming run control handling
2197 does not require leaving some threads stopped (e.g., stepping past
2198 breakpoint). USER_STEP indicates whether we're about to start the
2199 target for a stepping command. */
2200
2201static ptid_t
2202internal_resume_ptid (int user_step)
2203{
2204 /* In non-stop, we always control threads individually. Note that
2205 the target may always work in non-stop mode even with "set
2206 non-stop off", in which case user_visible_resume_ptid could
2207 return a wildcard ptid. */
2208 if (target_is_non_stop_p ())
2209 return inferior_ptid;
81d92403
SM
2210
2211 /* The rest of the function assumes non-stop==off and
2212 target-non-stop==off.
2213
2214 If a thread is waiting for a vfork-done event, it means breakpoints are out
2215 for this inferior (well, program space in fact). We don't want to resume
2216 any thread other than the one waiting for vfork done, otherwise these other
2217 threads could miss breakpoints. So if a thread in the resumption set is
2218 waiting for a vfork-done event, resume only that thread.
2219
2220 The resumption set width depends on whether schedule-multiple is on or off.
2221
2222 Note that if the target_resume interface was more flexible, we could be
2223 smarter here when schedule-multiple is on. For example, imagine 3
2224 inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2225 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
2226 target(s) to resume:
2227
2228 - All threads of inferior 1
2229 - Thread 2.1
2230 - Thread 3.2
2231
2232 Since we don't have that flexibility (we can only pass one ptid), just
2233 resume the first thread waiting for a vfork-done event we find (e.g. thread
2234 2.1). */
2235 if (sched_multi)
2236 {
2237 for (inferior *inf : all_non_exited_inferiors ())
2238 if (inf->thread_waiting_for_vfork_done != nullptr)
2239 return inf->thread_waiting_for_vfork_done->ptid;
2240 }
2241 else if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
2242 return current_inferior ()->thread_waiting_for_vfork_done->ptid;
2243
2244 return user_visible_resume_ptid (user_step);
fbea99ea
PA
2245}
2246
64ce06e4
PA
2247/* Wrapper for target_resume, that handles infrun-specific
2248 bookkeeping. */
2249
2250static void
c4464ade 2251do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
64ce06e4
PA
2252{
2253 struct thread_info *tp = inferior_thread ();
2254
c65d6b55
PA
2255 gdb_assert (!tp->stop_requested);
2256
64ce06e4 2257 /* Install inferior's terminal modes. */
223ffa71 2258 target_terminal::inferior ();
64ce06e4
PA
2259
2260 /* Avoid confusing the next resume, if the next stop/resume
2261 happens to apply to another thread. */
2262 tp->suspend.stop_signal = GDB_SIGNAL_0;
2263
8f572e5c
PA
2264 /* Advise target which signals may be handled silently.
2265
2266 If we have removed breakpoints because we are stepping over one
2267 in-line (in any thread), we need to receive all signals to avoid
2268 accidentally skipping a breakpoint during execution of a signal
2269 handler.
2270
2271 Likewise if we're displaced stepping, otherwise a trap for a
2272 breakpoint in a signal handler might be confused with the
7def77a1 2273 displaced step finishing. We don't make the displaced_step_finish
8f572e5c
PA
2274 step distinguish the cases instead, because:
2275
2276 - a backtrace while stopped in the signal handler would show the
2277 scratch pad as frame older than the signal handler, instead of
2278 the real mainline code.
2279
2280 - when the thread is later resumed, the signal handler would
2281 return to the scratch pad area, which would no longer be
2282 valid. */
2283 if (step_over_info_valid_p ()
00431a78 2284 || displaced_step_in_progress (tp->inf))
adc6a863 2285 target_pass_signals ({});
64ce06e4 2286 else
adc6a863 2287 target_pass_signals (signal_pass);
64ce06e4 2288
17543e57
SM
2289 infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
2290 resume_ptid.to_string ().c_str (),
2291 step, gdb_signal_to_symbol_string (sig));
2292
64ce06e4 2293 target_resume (resume_ptid, step, sig);
85ad3aaf 2294
5b6d1e4f
PA
2295 if (target_can_async_p ())
2296 target_async (1);
64ce06e4
PA
2297}
2298
d930703d 2299/* Resume the inferior. SIG is the signal to give the inferior
71d378ae
PA
2300 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2301 call 'resume', which handles exceptions. */
c906108c 2302
71d378ae
PA
2303static void
2304resume_1 (enum gdb_signal sig)
c906108c 2305{
515630c5 2306 struct regcache *regcache = get_current_regcache ();
ac7936df 2307 struct gdbarch *gdbarch = regcache->arch ();
4e1c45ea 2308 struct thread_info *tp = inferior_thread ();
8b86c959 2309 const address_space *aspace = regcache->aspace ();
b0f16a3e 2310 ptid_t resume_ptid;
856e7dd6
PA
2311 /* This represents the user's step vs continue request. When
2312 deciding whether "set scheduler-locking step" applies, it's the
2313 user's intention that counts. */
2314 const int user_step = tp->control.stepping_command;
64ce06e4
PA
2315 /* This represents what we'll actually request the target to do.
2316 This can decay from a step to a continue, if e.g., we need to
2317 implement single-stepping with breakpoints (software
2318 single-step). */
c4464ade 2319 bool step;
c7e8a53c 2320
c65d6b55 2321 gdb_assert (!tp->stop_requested);
c2829269
PA
2322 gdb_assert (!thread_is_in_step_over_chain (tp));
2323
372316f1
PA
2324 if (tp->suspend.waitstatus_pending_p)
2325 {
1eb8556f
SM
2326 infrun_debug_printf
2327 ("thread %s has pending wait "
2328 "status %s (currently_stepping=%d).",
2329 target_pid_to_str (tp->ptid).c_str (),
2330 target_waitstatus_to_string (&tp->suspend.waitstatus).c_str (),
2331 currently_stepping (tp));
372316f1 2332
5b6d1e4f 2333 tp->inf->process_target ()->threads_executing = true;
719546c4 2334 tp->resumed = true;
372316f1
PA
2335
2336 /* FIXME: What should we do if we are supposed to resume this
2337 thread with a signal? Maybe we should maintain a queue of
2338 pending signals to deliver. */
2339 if (sig != GDB_SIGNAL_0)
2340 {
fd7dcb94 2341 warning (_("Couldn't deliver signal %s to %s."),
a068643d
TT
2342 gdb_signal_to_name (sig),
2343 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2344 }
2345
2346 tp->suspend.stop_signal = GDB_SIGNAL_0;
372316f1
PA
2347
2348 if (target_can_async_p ())
9516f85a
AB
2349 {
2350 target_async (1);
2351 /* Tell the event loop we have an event to process. */
2352 mark_async_event_handler (infrun_async_inferior_event_token);
2353 }
372316f1
PA
2354 return;
2355 }
2356
2357 tp->stepped_breakpoint = 0;
2358
6b403daa
PA
2359 /* Depends on stepped_breakpoint. */
2360 step = currently_stepping (tp);
2361
060f2ef8 2362 if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
74609e71 2363 {
48f9886d
PA
2364 /* Don't try to single-step a vfork parent that is waiting for
2365 the child to get out of the shared memory region (by exec'ing
2366 or exiting). This is particularly important on software
2367 single-step archs, as the child process would trip on the
2368 software single step breakpoint inserted for the parent
2369 process. Since the parent will not actually execute any
2370 instruction until the child is out of the shared region (such
2371 are vfork's semantics), it is safe to simply continue it.
2372 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2373 the parent, and tell it to `keep_going', which automatically
2374 re-sets it stepping. */
1eb8556f 2375 infrun_debug_printf ("resume : clear step");
c4464ade 2376 step = false;
74609e71
YQ
2377 }
2378
7ca9b62a
TBA
2379 CORE_ADDR pc = regcache_read_pc (regcache);
2380
1eb8556f
SM
2381 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2382 "current thread [%s] at %s",
2383 step, gdb_signal_to_symbol_string (sig),
2384 tp->control.trap_expected,
2385 target_pid_to_str (inferior_ptid).c_str (),
2386 paddress (gdbarch, pc));
c906108c 2387
c2c6d25f
JM
2388 /* Normally, by the time we reach `resume', the breakpoints are either
2389 removed or inserted, as appropriate. The exception is if we're sitting
2390 at a permanent breakpoint; we need to step over it, but permanent
2391 breakpoints can't be removed. So we have to test for it here. */
6c95b8df 2392 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
6d350bb5 2393 {
af48d08f
PA
2394 if (sig != GDB_SIGNAL_0)
2395 {
2396 /* We have a signal to pass to the inferior. The resume
2397 may, or may not take us to the signal handler. If this
2398 is a step, we'll need to stop in the signal handler, if
2399 there's one, (if the target supports stepping into
2400 handlers), or in the next mainline instruction, if
2401 there's no handler. If this is a continue, we need to be
2402 sure to run the handler with all breakpoints inserted.
2403 In all cases, set a breakpoint at the current address
2404 (where the handler returns to), and once that breakpoint
2405 is hit, resume skipping the permanent breakpoint. If
2406 that breakpoint isn't hit, then we've stepped into the
2407 signal handler (or hit some other event). We'll delete
2408 the step-resume breakpoint then. */
2409
1eb8556f
SM
2410 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2411 "deliver signal first");
af48d08f
PA
2412
2413 clear_step_over_info ();
2414 tp->control.trap_expected = 0;
2415
2416 if (tp->control.step_resume_breakpoint == NULL)
2417 {
2418 /* Set a "high-priority" step-resume, as we don't want
2419 user breakpoints at PC to trigger (again) when this
2420 hits. */
2421 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2422 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2423
2424 tp->step_after_step_resume_breakpoint = step;
2425 }
2426
2427 insert_breakpoints ();
2428 }
2429 else
2430 {
2431 /* There's no signal to pass, we can go ahead and skip the
2432 permanent breakpoint manually. */
1eb8556f 2433 infrun_debug_printf ("skipping permanent breakpoint");
af48d08f
PA
2434 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2435 /* Update pc to reflect the new address from which we will
2436 execute instructions. */
2437 pc = regcache_read_pc (regcache);
2438
2439 if (step)
2440 {
2441 /* We've already advanced the PC, so the stepping part
2442 is done. Now we need to arrange for a trap to be
2443 reported to handle_inferior_event. Set a breakpoint
2444 at the current PC, and run to it. Don't update
2445 prev_pc, because if we end in
44a1ee51
PA
2446 switch_back_to_stepped_thread, we want the "expected
2447 thread advanced also" branch to be taken. IOW, we
2448 don't want this thread to step further from PC
af48d08f 2449 (overstep). */
1ac806b8 2450 gdb_assert (!step_over_info_valid_p ());
af48d08f
PA
2451 insert_single_step_breakpoint (gdbarch, aspace, pc);
2452 insert_breakpoints ();
2453
fbea99ea 2454 resume_ptid = internal_resume_ptid (user_step);
c4464ade 2455 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
719546c4 2456 tp->resumed = true;
af48d08f
PA
2457 return;
2458 }
2459 }
6d350bb5 2460 }
c2c6d25f 2461
c1e36e3e
PA
2462 /* If we have a breakpoint to step over, make sure to do a single
2463 step only. Same if we have software watchpoints. */
2464 if (tp->control.trap_expected || bpstat_should_step ())
2465 tp->control.may_range_step = 0;
2466
7da6a5b9
LM
2467 /* If displaced stepping is enabled, step over breakpoints by executing a
2468 copy of the instruction at a different address.
237fc4c9
PA
2469
2470 We can't use displaced stepping when we have a signal to deliver;
2471 the comments for displaced_step_prepare explain why. The
2472 comments in the handle_inferior event for dealing with 'random
74609e71
YQ
2473 signals' explain what we do instead.
2474
2475 We can't use displaced stepping when we are waiting for vfork_done
2476 event, displaced stepping breaks the vfork child similarly as single
2477 step software breakpoint. */
3fc8eb30
PA
2478 if (tp->control.trap_expected
2479 && use_displaced_stepping (tp)
cb71640d 2480 && !step_over_info_valid_p ()
a493e3e2 2481 && sig == GDB_SIGNAL_0
060f2ef8 2482 && current_inferior ()->thread_waiting_for_vfork_done == nullptr)
237fc4c9 2483 {
bab37966
SM
2484 displaced_step_prepare_status prepare_status
2485 = displaced_step_prepare (tp);
fc1cf338 2486
bab37966 2487 if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
d56b7306 2488 {
1eb8556f 2489 infrun_debug_printf ("Got placed in step-over queue");
4d9d9d04
PA
2490
2491 tp->control.trap_expected = 0;
d56b7306
VP
2492 return;
2493 }
bab37966 2494 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT)
3fc8eb30
PA
2495 {
2496 /* Fallback to stepping over the breakpoint in-line. */
2497
2498 if (target_is_non_stop_p ())
3cebef98 2499 stop_all_threads ("displaced stepping falling back on inline stepping");
3fc8eb30 2500
a01bda52 2501 set_step_over_info (regcache->aspace (),
21edc42f 2502 regcache_read_pc (regcache), 0, tp->global_num);
3fc8eb30
PA
2503
2504 step = maybe_software_singlestep (gdbarch, pc);
2505
2506 insert_breakpoints ();
2507 }
bab37966 2508 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
3fc8eb30 2509 {
3fc8eb30
PA
2510 /* Update pc to reflect the new address from which we will
2511 execute instructions due to displaced stepping. */
00431a78 2512 pc = regcache_read_pc (get_thread_regcache (tp));
ca7781d2 2513
40a53766 2514 step = gdbarch_displaced_step_hw_singlestep (gdbarch);
3fc8eb30 2515 }
bab37966
SM
2516 else
2517 gdb_assert_not_reached (_("Invalid displaced_step_prepare_status "
2518 "value."));
237fc4c9
PA
2519 }
2520
2facfe5c 2521 /* Do we need to do it the hard way, w/temp breakpoints? */
99e40580 2522 else if (step)
2facfe5c 2523 step = maybe_software_singlestep (gdbarch, pc);
c906108c 2524
30852783
UW
2525 /* Currently, our software single-step implementation leads to different
2526 results than hardware single-stepping in one situation: when stepping
2527 into delivering a signal which has an associated signal handler,
2528 hardware single-step will stop at the first instruction of the handler,
2529 while software single-step will simply skip execution of the handler.
2530
2531 For now, this difference in behavior is accepted since there is no
2532 easy way to actually implement single-stepping into a signal handler
2533 without kernel support.
2534
2535 However, there is one scenario where this difference leads to follow-on
2536 problems: if we're stepping off a breakpoint by removing all breakpoints
2537 and then single-stepping. In this case, the software single-step
2538 behavior means that even if there is a *breakpoint* in the signal
2539 handler, GDB still would not stop.
2540
2541 Fortunately, we can at least fix this particular issue. We detect
2542 here the case where we are about to deliver a signal while software
2543 single-stepping with breakpoints removed. In this situation, we
2544 revert the decisions to remove all breakpoints and insert single-
2545 step breakpoints, and instead we install a step-resume breakpoint
2546 at the current address, deliver the signal without stepping, and
2547 once we arrive back at the step-resume breakpoint, actually step
2548 over the breakpoint we originally wanted to step over. */
34b7e8a6 2549 if (thread_has_single_step_breakpoints_set (tp)
6cc83d2a
PA
2550 && sig != GDB_SIGNAL_0
2551 && step_over_info_valid_p ())
30852783
UW
2552 {
2553 /* If we have nested signals or a pending signal is delivered
7da6a5b9 2554 immediately after a handler returns, might already have
30852783
UW
2555 a step-resume breakpoint set on the earlier handler. We cannot
2556 set another step-resume breakpoint; just continue on until the
2557 original breakpoint is hit. */
2558 if (tp->control.step_resume_breakpoint == NULL)
2559 {
2c03e5be 2560 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
30852783
UW
2561 tp->step_after_step_resume_breakpoint = 1;
2562 }
2563
34b7e8a6 2564 delete_single_step_breakpoints (tp);
30852783 2565
31e77af2 2566 clear_step_over_info ();
30852783 2567 tp->control.trap_expected = 0;
31e77af2
PA
2568
2569 insert_breakpoints ();
30852783
UW
2570 }
2571
b0f16a3e
SM
2572 /* If STEP is set, it's a request to use hardware stepping
2573 facilities. But in that case, we should never
2574 use singlestep breakpoint. */
34b7e8a6 2575 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
dfcd3bfb 2576
fbea99ea 2577 /* Decide the set of threads to ask the target to resume. */
1946c4cc 2578 if (tp->control.trap_expected)
b0f16a3e
SM
2579 {
2580 /* We're allowing a thread to run past a breakpoint it has
1946c4cc
YQ
2581 hit, either by single-stepping the thread with the breakpoint
2582 removed, or by displaced stepping, with the breakpoint inserted.
2583 In the former case, we need to single-step only this thread,
2584 and keep others stopped, as they can miss this breakpoint if
2585 allowed to run. That's not really a problem for displaced
2586 stepping, but, we still keep other threads stopped, in case
2587 another thread is also stopped for a breakpoint waiting for
2588 its turn in the displaced stepping queue. */
b0f16a3e
SM
2589 resume_ptid = inferior_ptid;
2590 }
fbea99ea
PA
2591 else
2592 resume_ptid = internal_resume_ptid (user_step);
d4db2f36 2593
7f5ef605
PA
2594 if (execution_direction != EXEC_REVERSE
2595 && step && breakpoint_inserted_here_p (aspace, pc))
b0f16a3e 2596 {
372316f1
PA
2597 /* There are two cases where we currently need to step a
2598 breakpoint instruction when we have a signal to deliver:
2599
2600 - See handle_signal_stop where we handle random signals that
2601 could take out us out of the stepping range. Normally, in
2602 that case we end up continuing (instead of stepping) over the
7f5ef605
PA
2603 signal handler with a breakpoint at PC, but there are cases
2604 where we should _always_ single-step, even if we have a
2605 step-resume breakpoint, like when a software watchpoint is
2606 set. Assuming single-stepping and delivering a signal at the
2607 same time would takes us to the signal handler, then we could
2608 have removed the breakpoint at PC to step over it. However,
2609 some hardware step targets (like e.g., Mac OS) can't step
2610 into signal handlers, and for those, we need to leave the
2611 breakpoint at PC inserted, as otherwise if the handler
2612 recurses and executes PC again, it'll miss the breakpoint.
2613 So we leave the breakpoint inserted anyway, but we need to
2614 record that we tried to step a breakpoint instruction, so
372316f1
PA
2615 that adjust_pc_after_break doesn't end up confused.
2616
dda83cd7 2617 - In non-stop if we insert a breakpoint (e.g., a step-resume)
372316f1
PA
2618 in one thread after another thread that was stepping had been
2619 momentarily paused for a step-over. When we re-resume the
2620 stepping thread, it may be resumed from that address with a
2621 breakpoint that hasn't trapped yet. Seen with
2622 gdb.threads/non-stop-fair-events.exp, on targets that don't
2623 do displaced stepping. */
2624
1eb8556f
SM
2625 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2626 target_pid_to_str (tp->ptid).c_str ());
7f5ef605
PA
2627
2628 tp->stepped_breakpoint = 1;
2629
b0f16a3e
SM
2630 /* Most targets can step a breakpoint instruction, thus
2631 executing it normally. But if this one cannot, just
2632 continue and we will hit it anyway. */
7f5ef605 2633 if (gdbarch_cannot_step_breakpoint (gdbarch))
c4464ade 2634 step = false;
b0f16a3e 2635 }
ef5cf84e 2636
b0f16a3e 2637 if (debug_displaced
cb71640d 2638 && tp->control.trap_expected
3fc8eb30 2639 && use_displaced_stepping (tp)
cb71640d 2640 && !step_over_info_valid_p ())
b0f16a3e 2641 {
00431a78 2642 struct regcache *resume_regcache = get_thread_regcache (tp);
ac7936df 2643 struct gdbarch *resume_gdbarch = resume_regcache->arch ();
b0f16a3e
SM
2644 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2645 gdb_byte buf[4];
2646
b0f16a3e 2647 read_memory (actual_pc, buf, sizeof (buf));
136821d9
SM
2648 displaced_debug_printf ("run %s: %s",
2649 paddress (resume_gdbarch, actual_pc),
2650 displaced_step_dump_bytes
2651 (buf, sizeof (buf)).c_str ());
b0f16a3e 2652 }
237fc4c9 2653
b0f16a3e
SM
2654 if (tp->control.may_range_step)
2655 {
2656 /* If we're resuming a thread with the PC out of the step
2657 range, then we're doing some nested/finer run control
2658 operation, like stepping the thread out of the dynamic
2659 linker or the displaced stepping scratch pad. We
2660 shouldn't have allowed a range step then. */
2661 gdb_assert (pc_in_thread_step_range (pc, tp));
2662 }
c1e36e3e 2663
64ce06e4 2664 do_target_resume (resume_ptid, step, sig);
719546c4 2665 tp->resumed = true;
c906108c 2666}
71d378ae
PA
2667
2668/* Resume the inferior. SIG is the signal to give the inferior
2669 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2670 rolls back state on error. */
2671
aff4e175 2672static void
71d378ae
PA
2673resume (gdb_signal sig)
2674{
a70b8144 2675 try
71d378ae
PA
2676 {
2677 resume_1 (sig);
2678 }
230d2906 2679 catch (const gdb_exception &ex)
71d378ae
PA
2680 {
2681 /* If resuming is being aborted for any reason, delete any
2682 single-step breakpoint resume_1 may have created, to avoid
2683 confusing the following resumption, and to avoid leaving
2684 single-step breakpoints perturbing other threads, in case
2685 we're running in non-stop mode. */
2686 if (inferior_ptid != null_ptid)
2687 delete_single_step_breakpoints (inferior_thread ());
eedc3f4f 2688 throw;
71d378ae 2689 }
71d378ae
PA
2690}
2691
c906108c 2692\f
237fc4c9 2693/* Proceeding. */
c906108c 2694
4c2f2a79
PA
2695/* See infrun.h. */
2696
2697/* Counter that tracks number of user visible stops. This can be used
2698 to tell whether a command has proceeded the inferior past the
2699 current location. This allows e.g., inferior function calls in
2700 breakpoint commands to not interrupt the command list. When the
2701 call finishes successfully, the inferior is standing at the same
2702 breakpoint as if nothing happened (and so we don't call
2703 normal_stop). */
2704static ULONGEST current_stop_id;
2705
2706/* See infrun.h. */
2707
2708ULONGEST
2709get_stop_id (void)
2710{
2711 return current_stop_id;
2712}
2713
2714/* Called when we report a user visible stop. */
2715
2716static void
2717new_stop_id (void)
2718{
2719 current_stop_id++;
2720}
2721
c906108c
SS
2722/* Clear out all variables saying what to do when inferior is continued.
2723 First do this, then set the ones you want, then call `proceed'. */
2724
a7212384
UW
2725static void
2726clear_proceed_status_thread (struct thread_info *tp)
c906108c 2727{
1eb8556f 2728 infrun_debug_printf ("%s", target_pid_to_str (tp->ptid).c_str ());
d6b48e9c 2729
372316f1
PA
2730 /* If we're starting a new sequence, then the previous finished
2731 single-step is no longer relevant. */
2732 if (tp->suspend.waitstatus_pending_p)
2733 {
2734 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
2735 {
1eb8556f
SM
2736 infrun_debug_printf ("pending event of %s was a finished step. "
2737 "Discarding.",
2738 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2739
2740 tp->suspend.waitstatus_pending_p = 0;
2741 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
2742 }
1eb8556f 2743 else
372316f1 2744 {
1eb8556f
SM
2745 infrun_debug_printf
2746 ("thread %s has pending wait status %s (currently_stepping=%d).",
2747 target_pid_to_str (tp->ptid).c_str (),
2748 target_waitstatus_to_string (&tp->suspend.waitstatus).c_str (),
2749 currently_stepping (tp));
372316f1
PA
2750 }
2751 }
2752
70509625
PA
2753 /* If this signal should not be seen by program, give it zero.
2754 Used for debugging signals. */
2755 if (!signal_pass_state (tp->suspend.stop_signal))
2756 tp->suspend.stop_signal = GDB_SIGNAL_0;
2757
46e3ed7f 2758 delete tp->thread_fsm;
243a9253
PA
2759 tp->thread_fsm = NULL;
2760
16c381f0
JK
2761 tp->control.trap_expected = 0;
2762 tp->control.step_range_start = 0;
2763 tp->control.step_range_end = 0;
c1e36e3e 2764 tp->control.may_range_step = 0;
16c381f0
JK
2765 tp->control.step_frame_id = null_frame_id;
2766 tp->control.step_stack_frame_id = null_frame_id;
2767 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
885eeb5b 2768 tp->control.step_start_function = NULL;
a7212384 2769 tp->stop_requested = 0;
4e1c45ea 2770
16c381f0 2771 tp->control.stop_step = 0;
32400beb 2772
16c381f0 2773 tp->control.proceed_to_finish = 0;
414c69f7 2774
856e7dd6 2775 tp->control.stepping_command = 0;
17b2616c 2776
a7212384 2777 /* Discard any remaining commands or status from previous stop. */
16c381f0 2778 bpstat_clear (&tp->control.stop_bpstat);
a7212384 2779}
32400beb 2780
a7212384 2781void
70509625 2782clear_proceed_status (int step)
a7212384 2783{
f2665db5
MM
2784 /* With scheduler-locking replay, stop replaying other threads if we're
2785 not replaying the user-visible resume ptid.
2786
2787 This is a convenience feature to not require the user to explicitly
2788 stop replaying the other threads. We're assuming that the user's
2789 intent is to resume tracing the recorded process. */
2790 if (!non_stop && scheduler_mode == schedlock_replay
2791 && target_record_is_replaying (minus_one_ptid)
2792 && !target_record_will_replay (user_visible_resume_ptid (step),
2793 execution_direction))
2794 target_record_stop_replaying ();
2795
08036331 2796 if (!non_stop && inferior_ptid != null_ptid)
6c95b8df 2797 {
08036331 2798 ptid_t resume_ptid = user_visible_resume_ptid (step);
5b6d1e4f
PA
2799 process_stratum_target *resume_target
2800 = user_visible_resume_target (resume_ptid);
70509625
PA
2801
2802 /* In all-stop mode, delete the per-thread status of all threads
2803 we're about to resume, implicitly and explicitly. */
5b6d1e4f 2804 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
08036331 2805 clear_proceed_status_thread (tp);
6c95b8df
PA
2806 }
2807
d7e15655 2808 if (inferior_ptid != null_ptid)
a7212384
UW
2809 {
2810 struct inferior *inferior;
2811
2812 if (non_stop)
2813 {
6c95b8df
PA
2814 /* If in non-stop mode, only delete the per-thread status of
2815 the current thread. */
a7212384
UW
2816 clear_proceed_status_thread (inferior_thread ());
2817 }
6c95b8df 2818
d6b48e9c 2819 inferior = current_inferior ();
16c381f0 2820 inferior->control.stop_soon = NO_STOP_QUIETLY;
4e1c45ea
PA
2821 }
2822
76727919 2823 gdb::observers::about_to_proceed.notify ();
c906108c
SS
2824}
2825
99619bea
PA
2826/* Returns true if TP is still stopped at a breakpoint that needs
2827 stepping-over in order to make progress. If the breakpoint is gone
2828 meanwhile, we can skip the whole step-over dance. */
ea67f13b 2829
c4464ade 2830static bool
6c4cfb24 2831thread_still_needs_step_over_bp (struct thread_info *tp)
99619bea
PA
2832{
2833 if (tp->stepping_over_breakpoint)
2834 {
00431a78 2835 struct regcache *regcache = get_thread_regcache (tp);
99619bea 2836
a01bda52 2837 if (breakpoint_here_p (regcache->aspace (),
af48d08f
PA
2838 regcache_read_pc (regcache))
2839 == ordinary_breakpoint_here)
c4464ade 2840 return true;
99619bea
PA
2841
2842 tp->stepping_over_breakpoint = 0;
2843 }
2844
c4464ade 2845 return false;
99619bea
PA
2846}
2847
6c4cfb24
PA
2848/* Check whether thread TP still needs to start a step-over in order
2849 to make progress when resumed. Returns an bitwise or of enum
2850 step_over_what bits, indicating what needs to be stepped over. */
2851
8d297bbf 2852static step_over_what
6c4cfb24
PA
2853thread_still_needs_step_over (struct thread_info *tp)
2854{
8d297bbf 2855 step_over_what what = 0;
6c4cfb24
PA
2856
2857 if (thread_still_needs_step_over_bp (tp))
2858 what |= STEP_OVER_BREAKPOINT;
2859
2860 if (tp->stepping_over_watchpoint
9aed480c 2861 && !target_have_steppable_watchpoint ())
6c4cfb24
PA
2862 what |= STEP_OVER_WATCHPOINT;
2863
2864 return what;
2865}
2866
483805cf
PA
2867/* Returns true if scheduler locking applies. STEP indicates whether
2868 we're about to do a step/next-like command to a thread. */
2869
c4464ade 2870static bool
856e7dd6 2871schedlock_applies (struct thread_info *tp)
483805cf
PA
2872{
2873 return (scheduler_mode == schedlock_on
2874 || (scheduler_mode == schedlock_step
f2665db5
MM
2875 && tp->control.stepping_command)
2876 || (scheduler_mode == schedlock_replay
2877 && target_record_will_replay (minus_one_ptid,
2878 execution_direction)));
483805cf
PA
2879}
2880
1192f124
SM
2881/* Set process_stratum_target::COMMIT_RESUMED_STATE in all target
2882 stacks that have threads executing and don't have threads with
2883 pending events. */
5b6d1e4f
PA
2884
2885static void
1192f124
SM
2886maybe_set_commit_resumed_all_targets ()
2887{
b4b1a226
SM
2888 scoped_restore_current_thread restore_thread;
2889
1192f124
SM
2890 for (inferior *inf : all_non_exited_inferiors ())
2891 {
2892 process_stratum_target *proc_target = inf->process_target ();
2893
2894 if (proc_target->commit_resumed_state)
2895 {
2896 /* We already set this in a previous iteration, via another
2897 inferior sharing the process_stratum target. */
2898 continue;
2899 }
2900
2901 /* If the target has no resumed threads, it would be useless to
2902 ask it to commit the resumed threads. */
2903 if (!proc_target->threads_executing)
2904 {
2905 infrun_debug_printf ("not requesting commit-resumed for target "
2906 "%s, no resumed threads",
2907 proc_target->shortname ());
2908 continue;
2909 }
2910
2911 /* As an optimization, if a thread from this target has some
2912 status to report, handle it before requiring the target to
2913 commit its resumed threads: handling the status might lead to
2914 resuming more threads. */
2915 bool has_thread_with_pending_status = false;
2916 for (thread_info *thread : all_non_exited_threads (proc_target))
2917 if (thread->resumed && thread->suspend.waitstatus_pending_p)
2918 {
2919 has_thread_with_pending_status = true;
2920 break;
2921 }
2922
2923 if (has_thread_with_pending_status)
2924 {
2925 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
2926 " thread has a pending waitstatus",
2927 proc_target->shortname ());
2928 continue;
2929 }
2930
b4b1a226
SM
2931 switch_to_inferior_no_thread (inf);
2932
2933 if (target_has_pending_events ())
2934 {
2935 infrun_debug_printf ("not requesting commit-resumed for target %s, "
2936 "target has pending events",
2937 proc_target->shortname ());
2938 continue;
2939 }
2940
1192f124
SM
2941 infrun_debug_printf ("enabling commit-resumed for target %s",
2942 proc_target->shortname ());
2943
2944 proc_target->commit_resumed_state = true;
2945 }
2946}
2947
2948/* See infrun.h. */
2949
2950void
2951maybe_call_commit_resumed_all_targets ()
5b6d1e4f
PA
2952{
2953 scoped_restore_current_thread restore_thread;
2954
1192f124
SM
2955 for (inferior *inf : all_non_exited_inferiors ())
2956 {
2957 process_stratum_target *proc_target = inf->process_target ();
2958
2959 if (!proc_target->commit_resumed_state)
2960 continue;
2961
2962 switch_to_inferior_no_thread (inf);
2963
2964 infrun_debug_printf ("calling commit_resumed for target %s",
2965 proc_target->shortname());
2966
2967 target_commit_resumed ();
2968 }
2969}
2970
2971/* To track nesting of scoped_disable_commit_resumed objects, ensuring
2972 that only the outermost one attempts to re-enable
2973 commit-resumed. */
2974static bool enable_commit_resumed = true;
2975
2976/* See infrun.h. */
2977
2978scoped_disable_commit_resumed::scoped_disable_commit_resumed
2979 (const char *reason)
2980 : m_reason (reason),
2981 m_prev_enable_commit_resumed (enable_commit_resumed)
2982{
2983 infrun_debug_printf ("reason=%s", m_reason);
2984
2985 enable_commit_resumed = false;
5b6d1e4f
PA
2986
2987 for (inferior *inf : all_non_exited_inferiors ())
1192f124
SM
2988 {
2989 process_stratum_target *proc_target = inf->process_target ();
5b6d1e4f 2990
1192f124
SM
2991 if (m_prev_enable_commit_resumed)
2992 {
2993 /* This is the outermost instance: force all
2994 COMMIT_RESUMED_STATE to false. */
2995 proc_target->commit_resumed_state = false;
2996 }
2997 else
2998 {
2999 /* This is not the outermost instance, we expect
3000 COMMIT_RESUMED_STATE to have been cleared by the
3001 outermost instance. */
3002 gdb_assert (!proc_target->commit_resumed_state);
3003 }
3004 }
3005}
3006
3007/* See infrun.h. */
3008
3009void
3010scoped_disable_commit_resumed::reset ()
3011{
3012 if (m_reset)
3013 return;
3014 m_reset = true;
3015
3016 infrun_debug_printf ("reason=%s", m_reason);
3017
3018 gdb_assert (!enable_commit_resumed);
3019
3020 enable_commit_resumed = m_prev_enable_commit_resumed;
3021
3022 if (m_prev_enable_commit_resumed)
5b6d1e4f 3023 {
1192f124
SM
3024 /* This is the outermost instance, re-enable
3025 COMMIT_RESUMED_STATE on the targets where it's possible. */
3026 maybe_set_commit_resumed_all_targets ();
3027 }
3028 else
3029 {
3030 /* This is not the outermost instance, we expect
3031 COMMIT_RESUMED_STATE to still be false. */
3032 for (inferior *inf : all_non_exited_inferiors ())
3033 {
3034 process_stratum_target *proc_target = inf->process_target ();
3035 gdb_assert (!proc_target->commit_resumed_state);
3036 }
3037 }
3038}
3039
3040/* See infrun.h. */
3041
3042scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
3043{
3044 reset ();
3045}
3046
3047/* See infrun.h. */
3048
3049void
3050scoped_disable_commit_resumed::reset_and_commit ()
3051{
3052 reset ();
3053 maybe_call_commit_resumed_all_targets ();
3054}
3055
3056/* See infrun.h. */
3057
3058scoped_enable_commit_resumed::scoped_enable_commit_resumed
3059 (const char *reason)
3060 : m_reason (reason),
3061 m_prev_enable_commit_resumed (enable_commit_resumed)
3062{
3063 infrun_debug_printf ("reason=%s", m_reason);
3064
3065 if (!enable_commit_resumed)
3066 {
3067 enable_commit_resumed = true;
3068
3069 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
3070 possible. */
3071 maybe_set_commit_resumed_all_targets ();
3072
3073 maybe_call_commit_resumed_all_targets ();
3074 }
3075}
3076
3077/* See infrun.h. */
3078
3079scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
3080{
3081 infrun_debug_printf ("reason=%s", m_reason);
3082
3083 gdb_assert (enable_commit_resumed);
3084
3085 enable_commit_resumed = m_prev_enable_commit_resumed;
3086
3087 if (!enable_commit_resumed)
3088 {
3089 /* Force all COMMIT_RESUMED_STATE back to false. */
3090 for (inferior *inf : all_non_exited_inferiors ())
3091 {
3092 process_stratum_target *proc_target = inf->process_target ();
3093 proc_target->commit_resumed_state = false;
3094 }
5b6d1e4f
PA
3095 }
3096}
3097
2f4fcf00
PA
3098/* Check that all the targets we're about to resume are in non-stop
3099 mode. Ideally, we'd only care whether all targets support
3100 target-async, but we're not there yet. E.g., stop_all_threads
3101 doesn't know how to handle all-stop targets. Also, the remote
3102 protocol in all-stop mode is synchronous, irrespective of
3103 target-async, which means that things like a breakpoint re-set
3104 triggered by one target would try to read memory from all targets
3105 and fail. */
3106
3107static void
3108check_multi_target_resumption (process_stratum_target *resume_target)
3109{
3110 if (!non_stop && resume_target == nullptr)
3111 {
3112 scoped_restore_current_thread restore_thread;
3113
3114 /* This is used to track whether we're resuming more than one
3115 target. */
3116 process_stratum_target *first_connection = nullptr;
3117
3118 /* The first inferior we see with a target that does not work in
3119 always-non-stop mode. */
3120 inferior *first_not_non_stop = nullptr;
3121
f058c521 3122 for (inferior *inf : all_non_exited_inferiors ())
2f4fcf00
PA
3123 {
3124 switch_to_inferior_no_thread (inf);
3125
55f6301a 3126 if (!target_has_execution ())
2f4fcf00
PA
3127 continue;
3128
3129 process_stratum_target *proc_target
3130 = current_inferior ()->process_target();
3131
3132 if (!target_is_non_stop_p ())
3133 first_not_non_stop = inf;
3134
3135 if (first_connection == nullptr)
3136 first_connection = proc_target;
3137 else if (first_connection != proc_target
3138 && first_not_non_stop != nullptr)
3139 {
3140 switch_to_inferior_no_thread (first_not_non_stop);
3141
3142 proc_target = current_inferior ()->process_target();
3143
3144 error (_("Connection %d (%s) does not support "
3145 "multi-target resumption."),
3146 proc_target->connection_number,
3147 make_target_connection_string (proc_target).c_str ());
3148 }
3149 }
3150 }
3151}
3152
c906108c
SS
3153/* Basic routine for continuing the program in various fashions.
3154
3155 ADDR is the address to resume at, or -1 for resume where stopped.
aff4e175
AB
3156 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3157 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
c906108c
SS
3158
3159 You should call clear_proceed_status before calling proceed. */
3160
3161void
64ce06e4 3162proceed (CORE_ADDR addr, enum gdb_signal siggnal)
c906108c 3163{
3ec3145c
SM
3164 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
3165
e58b0e63
PA
3166 struct regcache *regcache;
3167 struct gdbarch *gdbarch;
e58b0e63 3168 CORE_ADDR pc;
4d9d9d04
PA
3169 struct execution_control_state ecss;
3170 struct execution_control_state *ecs = &ecss;
c4464ade 3171 bool started;
c906108c 3172
e58b0e63
PA
3173 /* If we're stopped at a fork/vfork, follow the branch set by the
3174 "set follow-fork-mode" command; otherwise, we'll just proceed
3175 resuming the current thread. */
3176 if (!follow_fork ())
3177 {
3178 /* The target for some reason decided not to resume. */
3179 normal_stop ();
f148b27e 3180 if (target_can_async_p ())
b1a35af2 3181 inferior_event_handler (INF_EXEC_COMPLETE);
e58b0e63
PA
3182 return;
3183 }
3184
842951eb
PA
3185 /* We'll update this if & when we switch to a new thread. */
3186 previous_inferior_ptid = inferior_ptid;
3187
e58b0e63 3188 regcache = get_current_regcache ();
ac7936df 3189 gdbarch = regcache->arch ();
8b86c959
YQ
3190 const address_space *aspace = regcache->aspace ();
3191
fc75c28b
TBA
3192 pc = regcache_read_pc_protected (regcache);
3193
08036331 3194 thread_info *cur_thr = inferior_thread ();
e58b0e63 3195
99619bea 3196 /* Fill in with reasonable starting values. */
08036331 3197 init_thread_stepping_state (cur_thr);
99619bea 3198
08036331 3199 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
c2829269 3200
5b6d1e4f
PA
3201 ptid_t resume_ptid
3202 = user_visible_resume_ptid (cur_thr->control.stepping_command);
3203 process_stratum_target *resume_target
3204 = user_visible_resume_target (resume_ptid);
3205
2f4fcf00
PA
3206 check_multi_target_resumption (resume_target);
3207
2acceee2 3208 if (addr == (CORE_ADDR) -1)
c906108c 3209 {
08036331 3210 if (pc == cur_thr->suspend.stop_pc
af48d08f 3211 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
b2175913 3212 && execution_direction != EXEC_REVERSE)
3352ef37
AC
3213 /* There is a breakpoint at the address we will resume at,
3214 step one instruction before inserting breakpoints so that
3215 we do not stop right away (and report a second hit at this
b2175913
MS
3216 breakpoint).
3217
3218 Note, we don't do this in reverse, because we won't
3219 actually be executing the breakpoint insn anyway.
3220 We'll be (un-)executing the previous instruction. */
08036331 3221 cur_thr->stepping_over_breakpoint = 1;
515630c5
UW
3222 else if (gdbarch_single_step_through_delay_p (gdbarch)
3223 && gdbarch_single_step_through_delay (gdbarch,
3224 get_current_frame ()))
3352ef37
AC
3225 /* We stepped onto an instruction that needs to be stepped
3226 again before re-inserting the breakpoint, do so. */
08036331 3227 cur_thr->stepping_over_breakpoint = 1;
c906108c
SS
3228 }
3229 else
3230 {
515630c5 3231 regcache_write_pc (regcache, addr);
c906108c
SS
3232 }
3233
70509625 3234 if (siggnal != GDB_SIGNAL_DEFAULT)
08036331 3235 cur_thr->suspend.stop_signal = siggnal;
70509625 3236
4d9d9d04
PA
3237 /* If an exception is thrown from this point on, make sure to
3238 propagate GDB's knowledge of the executing state to the
3239 frontend/user running state. */
5b6d1e4f 3240 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
4d9d9d04
PA
3241
3242 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3243 threads (e.g., we might need to set threads stepping over
3244 breakpoints first), from the user/frontend's point of view, all
3245 threads in RESUME_PTID are now running. Unless we're calling an
3246 inferior function, as in that case we pretend the inferior
3247 doesn't run at all. */
08036331 3248 if (!cur_thr->control.in_infcall)
719546c4 3249 set_running (resume_target, resume_ptid, true);
17b2616c 3250
1eb8556f
SM
3251 infrun_debug_printf ("addr=%s, signal=%s", paddress (gdbarch, addr),
3252 gdb_signal_to_symbol_string (siggnal));
527159b7 3253
4d9d9d04
PA
3254 annotate_starting ();
3255
3256 /* Make sure that output from GDB appears before output from the
3257 inferior. */
3258 gdb_flush (gdb_stdout);
3259
d930703d
PA
3260 /* Since we've marked the inferior running, give it the terminal. A
3261 QUIT/Ctrl-C from here on is forwarded to the target (which can
3262 still detect attempts to unblock a stuck connection with repeated
3263 Ctrl-C from within target_pass_ctrlc). */
3264 target_terminal::inferior ();
3265
4d9d9d04
PA
3266 /* In a multi-threaded task we may select another thread and
3267 then continue or step.
3268
3269 But if a thread that we're resuming had stopped at a breakpoint,
3270 it will immediately cause another breakpoint stop without any
3271 execution (i.e. it will report a breakpoint hit incorrectly). So
3272 we must step over it first.
3273
3274 Look for threads other than the current (TP) that reported a
3275 breakpoint hit and haven't been resumed yet since. */
3276
3277 /* If scheduler locking applies, we can avoid iterating over all
3278 threads. */
08036331 3279 if (!non_stop && !schedlock_applies (cur_thr))
94cc34af 3280 {
5b6d1e4f
PA
3281 for (thread_info *tp : all_non_exited_threads (resume_target,
3282 resume_ptid))
08036331 3283 {
f3f8ece4
PA
3284 switch_to_thread_no_regs (tp);
3285
4d9d9d04
PA
3286 /* Ignore the current thread here. It's handled
3287 afterwards. */
08036331 3288 if (tp == cur_thr)
4d9d9d04 3289 continue;
c906108c 3290
4d9d9d04
PA
3291 if (!thread_still_needs_step_over (tp))
3292 continue;
3293
3294 gdb_assert (!thread_is_in_step_over_chain (tp));
c906108c 3295
1eb8556f
SM
3296 infrun_debug_printf ("need to step-over [%s] first",
3297 target_pid_to_str (tp->ptid).c_str ());
99619bea 3298
28d5518b 3299 global_thread_step_over_chain_enqueue (tp);
2adfaa28 3300 }
f3f8ece4
PA
3301
3302 switch_to_thread (cur_thr);
30852783
UW
3303 }
3304
4d9d9d04
PA
3305 /* Enqueue the current thread last, so that we move all other
3306 threads over their breakpoints first. */
08036331 3307 if (cur_thr->stepping_over_breakpoint)
28d5518b 3308 global_thread_step_over_chain_enqueue (cur_thr);
30852783 3309
4d9d9d04
PA
3310 /* If the thread isn't started, we'll still need to set its prev_pc,
3311 so that switch_back_to_stepped_thread knows the thread hasn't
3312 advanced. Must do this before resuming any thread, as in
3313 all-stop/remote, once we resume we can't send any other packet
3314 until the target stops again. */
fc75c28b 3315 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
99619bea 3316
a9bc57b9 3317 {
1192f124 3318 scoped_disable_commit_resumed disable_commit_resumed ("proceeding");
85ad3aaf 3319
a9bc57b9 3320 started = start_step_over ();
c906108c 3321
a9bc57b9
TT
3322 if (step_over_info_valid_p ())
3323 {
3324 /* Either this thread started a new in-line step over, or some
3325 other thread was already doing one. In either case, don't
3326 resume anything else until the step-over is finished. */
3327 }
3328 else if (started && !target_is_non_stop_p ())
3329 {
3330 /* A new displaced stepping sequence was started. In all-stop,
3331 we can't talk to the target anymore until it next stops. */
3332 }
3333 else if (!non_stop && target_is_non_stop_p ())
3334 {
3ec3145c
SM
3335 INFRUN_SCOPED_DEBUG_START_END
3336 ("resuming threads, all-stop-on-top-of-non-stop");
3337
a9bc57b9
TT
3338 /* In all-stop, but the target is always in non-stop mode.
3339 Start all other threads that are implicitly resumed too. */
5b6d1e4f
PA
3340 for (thread_info *tp : all_non_exited_threads (resume_target,
3341 resume_ptid))
3342 {
3343 switch_to_thread_no_regs (tp);
3344
f9fac3c8
SM
3345 if (!tp->inf->has_execution ())
3346 {
1eb8556f
SM
3347 infrun_debug_printf ("[%s] target has no execution",
3348 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3349 continue;
3350 }
f3f8ece4 3351
f9fac3c8
SM
3352 if (tp->resumed)
3353 {
1eb8556f
SM
3354 infrun_debug_printf ("[%s] resumed",
3355 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3356 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
3357 continue;
3358 }
fbea99ea 3359
f9fac3c8
SM
3360 if (thread_is_in_step_over_chain (tp))
3361 {
1eb8556f
SM
3362 infrun_debug_printf ("[%s] needs step-over",
3363 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3364 continue;
3365 }
fbea99ea 3366
81d92403
SM
3367 /* If a thread of that inferior is waiting for a vfork-done
3368 (for a detached vfork child to exec or exit), breakpoints are
3369 removed. We must not resume any thread of that inferior, other
3370 than the one waiting for the vfork-done. */
3371 if (tp->inf->thread_waiting_for_vfork_done != nullptr
3372 && tp != tp->inf->thread_waiting_for_vfork_done)
3373 {
3374 infrun_debug_printf ("[%s] another thread of this inferior is "
3375 "waiting for vfork-done",
3376 tp->ptid.to_string ().c_str ());
3377 continue;
3378 }
3379
1eb8556f 3380 infrun_debug_printf ("resuming %s",
dda83cd7 3381 target_pid_to_str (tp->ptid).c_str ());
fbea99ea 3382
f9fac3c8
SM
3383 reset_ecs (ecs, tp);
3384 switch_to_thread (tp);
3385 keep_going_pass_signal (ecs);
3386 if (!ecs->wait_some_more)
3387 error (_("Command aborted."));
3388 }
a9bc57b9 3389 }
81d92403
SM
3390 else if (!cur_thr->resumed
3391 && !thread_is_in_step_over_chain (cur_thr)
3392 /* In non-stop, forbid resume a thread if some other thread of
3393 that inferior is waiting for a vfork-done event (this means
3394 breakpoints are out for this inferior). */
3395 && !(non_stop && cur_thr->inf->thread_waiting_for_vfork_done))
a9bc57b9
TT
3396 {
3397 /* The thread wasn't started, and isn't queued, run it now. */
08036331
PA
3398 reset_ecs (ecs, cur_thr);
3399 switch_to_thread (cur_thr);
a9bc57b9
TT
3400 keep_going_pass_signal (ecs);
3401 if (!ecs->wait_some_more)
3402 error (_("Command aborted."));
3403 }
c906108c 3404
1192f124
SM
3405 disable_commit_resumed.reset_and_commit ();
3406 }
85ad3aaf 3407
731f534f 3408 finish_state.release ();
c906108c 3409
873657b9
PA
3410 /* If we've switched threads above, switch back to the previously
3411 current thread. We don't want the user to see a different
3412 selected thread. */
3413 switch_to_thread (cur_thr);
3414
0b333c5e
PA
3415 /* Tell the event loop to wait for it to stop. If the target
3416 supports asynchronous execution, it'll do this from within
3417 target_resume. */
362646f5 3418 if (!target_can_async_p ())
0b333c5e 3419 mark_async_event_handler (infrun_async_inferior_event_token);
c906108c 3420}
c906108c
SS
3421\f
3422
3423/* Start remote-debugging of a machine over a serial link. */
96baa820 3424
c906108c 3425void
8621d6a9 3426start_remote (int from_tty)
c906108c 3427{
5b6d1e4f
PA
3428 inferior *inf = current_inferior ();
3429 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
43ff13b4 3430
1777feb0 3431 /* Always go on waiting for the target, regardless of the mode. */
6426a772 3432 /* FIXME: cagney/1999-09-23: At present it isn't possible to
7e73cedf 3433 indicate to wait_for_inferior that a target should timeout if
6426a772
JM
3434 nothing is returned (instead of just blocking). Because of this,
3435 targets expecting an immediate response need to, internally, set
3436 things up so that the target_wait() is forced to eventually
1777feb0 3437 timeout. */
6426a772
JM
3438 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3439 differentiate to its caller what the state of the target is after
3440 the initial open has been performed. Here we're assuming that
3441 the target has stopped. It should be possible to eventually have
3442 target_open() return to the caller an indication that the target
3443 is currently running and GDB state should be set to the same as
1777feb0 3444 for an async run. */
5b6d1e4f 3445 wait_for_inferior (inf);
8621d6a9
DJ
3446
3447 /* Now that the inferior has stopped, do any bookkeeping like
3448 loading shared libraries. We want to do this before normal_stop,
3449 so that the displayed frame is up to date. */
a7aba266 3450 post_create_inferior (from_tty);
8621d6a9 3451
6426a772 3452 normal_stop ();
c906108c
SS
3453}
3454
3455/* Initialize static vars when a new inferior begins. */
3456
3457void
96baa820 3458init_wait_for_inferior (void)
c906108c
SS
3459{
3460 /* These are meaningless until the first time through wait_for_inferior. */
c906108c 3461
c906108c
SS
3462 breakpoint_init_inferior (inf_starting);
3463
70509625 3464 clear_proceed_status (0);
9f976b41 3465
ab1ddbcf 3466 nullify_last_target_wait_ptid ();
237fc4c9 3467
842951eb 3468 previous_inferior_ptid = inferior_ptid;
c906108c 3469}
237fc4c9 3470
c906108c 3471\f
488f131b 3472
ec9499be 3473static void handle_inferior_event (struct execution_control_state *ecs);
cd0fc7c3 3474
568d6575
UW
3475static void handle_step_into_function (struct gdbarch *gdbarch,
3476 struct execution_control_state *ecs);
3477static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3478 struct execution_control_state *ecs);
4f5d7f63 3479static void handle_signal_stop (struct execution_control_state *ecs);
186c406b 3480static void check_exception_resume (struct execution_control_state *,
28106bc2 3481 struct frame_info *);
611c83ae 3482
bdc36728 3483static void end_stepping_range (struct execution_control_state *ecs);
22bcd14b 3484static void stop_waiting (struct execution_control_state *ecs);
d4f3574e 3485static void keep_going (struct execution_control_state *ecs);
94c57d6a 3486static void process_event_stop_test (struct execution_control_state *ecs);
c4464ade 3487static bool switch_back_to_stepped_thread (struct execution_control_state *ecs);
104c1213 3488
252fbfc8
PA
3489/* This function is attached as a "thread_stop_requested" observer.
3490 Cleanup local state that assumed the PTID was to be resumed, and
3491 report the stop to the frontend. */
3492
2c0b251b 3493static void
252fbfc8
PA
3494infrun_thread_stop_requested (ptid_t ptid)
3495{
5b6d1e4f
PA
3496 process_stratum_target *curr_target = current_inferior ()->process_target ();
3497
c65d6b55
PA
3498 /* PTID was requested to stop. If the thread was already stopped,
3499 but the user/frontend doesn't know about that yet (e.g., the
3500 thread had been temporarily paused for some step-over), set up
3501 for reporting the stop now. */
5b6d1e4f 3502 for (thread_info *tp : all_threads (curr_target, ptid))
08036331
PA
3503 {
3504 if (tp->state != THREAD_RUNNING)
3505 continue;
3506 if (tp->executing)
3507 continue;
c65d6b55 3508
08036331
PA
3509 /* Remove matching threads from the step-over queue, so
3510 start_step_over doesn't try to resume them
3511 automatically. */
3512 if (thread_is_in_step_over_chain (tp))
28d5518b 3513 global_thread_step_over_chain_remove (tp);
c65d6b55 3514
08036331
PA
3515 /* If the thread is stopped, but the user/frontend doesn't
3516 know about that yet, queue a pending event, as if the
3517 thread had just stopped now. Unless the thread already had
3518 a pending event. */
3519 if (!tp->suspend.waitstatus_pending_p)
3520 {
3521 tp->suspend.waitstatus_pending_p = 1;
3522 tp->suspend.waitstatus.kind = TARGET_WAITKIND_STOPPED;
3523 tp->suspend.waitstatus.value.sig = GDB_SIGNAL_0;
3524 }
c65d6b55 3525
08036331
PA
3526 /* Clear the inline-frame state, since we're re-processing the
3527 stop. */
5b6d1e4f 3528 clear_inline_frame_state (tp);
c65d6b55 3529
08036331
PA
3530 /* If this thread was paused because some other thread was
3531 doing an inline-step over, let that finish first. Once
3532 that happens, we'll restart all threads and consume pending
3533 stop events then. */
3534 if (step_over_info_valid_p ())
3535 continue;
3536
3537 /* Otherwise we can process the (new) pending event now. Set
3538 it so this pending event is considered by
3539 do_target_wait. */
719546c4 3540 tp->resumed = true;
08036331 3541 }
252fbfc8
PA
3542}
3543
a07daef3
PA
3544static void
3545infrun_thread_thread_exit (struct thread_info *tp, int silent)
3546{
5b6d1e4f
PA
3547 if (target_last_proc_target == tp->inf->process_target ()
3548 && target_last_wait_ptid == tp->ptid)
a07daef3
PA
3549 nullify_last_target_wait_ptid ();
3550}
3551
0cbcdb96
PA
3552/* Delete the step resume, single-step and longjmp/exception resume
3553 breakpoints of TP. */
4e1c45ea 3554
0cbcdb96
PA
3555static void
3556delete_thread_infrun_breakpoints (struct thread_info *tp)
4e1c45ea 3557{
0cbcdb96
PA
3558 delete_step_resume_breakpoint (tp);
3559 delete_exception_resume_breakpoint (tp);
34b7e8a6 3560 delete_single_step_breakpoints (tp);
4e1c45ea
PA
3561}
3562
0cbcdb96
PA
3563/* If the target still has execution, call FUNC for each thread that
3564 just stopped. In all-stop, that's all the non-exited threads; in
3565 non-stop, that's the current thread, only. */
3566
3567typedef void (*for_each_just_stopped_thread_callback_func)
3568 (struct thread_info *tp);
4e1c45ea
PA
3569
3570static void
0cbcdb96 3571for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
4e1c45ea 3572{
55f6301a 3573 if (!target_has_execution () || inferior_ptid == null_ptid)
4e1c45ea
PA
3574 return;
3575
fbea99ea 3576 if (target_is_non_stop_p ())
4e1c45ea 3577 {
0cbcdb96
PA
3578 /* If in non-stop mode, only the current thread stopped. */
3579 func (inferior_thread ());
4e1c45ea
PA
3580 }
3581 else
0cbcdb96 3582 {
0cbcdb96 3583 /* In all-stop mode, all threads have stopped. */
08036331
PA
3584 for (thread_info *tp : all_non_exited_threads ())
3585 func (tp);
0cbcdb96
PA
3586 }
3587}
3588
3589/* Delete the step resume and longjmp/exception resume breakpoints of
3590 the threads that just stopped. */
3591
3592static void
3593delete_just_stopped_threads_infrun_breakpoints (void)
3594{
3595 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
34b7e8a6
PA
3596}
3597
3598/* Delete the single-step breakpoints of the threads that just
3599 stopped. */
7c16b83e 3600
34b7e8a6
PA
3601static void
3602delete_just_stopped_threads_single_step_breakpoints (void)
3603{
3604 for_each_just_stopped_thread (delete_single_step_breakpoints);
4e1c45ea
PA
3605}
3606
221e1a37 3607/* See infrun.h. */
223698f8 3608
221e1a37 3609void
223698f8
DE
3610print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3611 const struct target_waitstatus *ws)
3612{
e71daf80
SM
3613 infrun_debug_printf ("target_wait (%d.%ld.%ld [%s], status) =",
3614 waiton_ptid.pid (),
3615 waiton_ptid.lwp (),
3616 waiton_ptid.tid (),
3617 target_pid_to_str (waiton_ptid).c_str ());
3618 infrun_debug_printf (" %d.%ld.%ld [%s],",
3619 result_ptid.pid (),
3620 result_ptid.lwp (),
3621 result_ptid.tid (),
3622 target_pid_to_str (result_ptid).c_str ());
3623 infrun_debug_printf (" %s", target_waitstatus_to_string (ws).c_str ());
223698f8
DE
3624}
3625
372316f1
PA
3626/* Select a thread at random, out of those which are resumed and have
3627 had events. */
3628
3629static struct thread_info *
5b6d1e4f 3630random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
372316f1 3631{
372316f1 3632 int num_events = 0;
08036331 3633
5b6d1e4f 3634 auto has_event = [&] (thread_info *tp)
08036331 3635 {
5b6d1e4f
PA
3636 return (tp->ptid.matches (waiton_ptid)
3637 && tp->resumed
08036331
PA
3638 && tp->suspend.waitstatus_pending_p);
3639 };
372316f1
PA
3640
3641 /* First see how many events we have. Count only resumed threads
3642 that have an event pending. */
5b6d1e4f 3643 for (thread_info *tp : inf->non_exited_threads ())
08036331 3644 if (has_event (tp))
372316f1
PA
3645 num_events++;
3646
3647 if (num_events == 0)
3648 return NULL;
3649
3650 /* Now randomly pick a thread out of those that have had events. */
08036331
PA
3651 int random_selector = (int) ((num_events * (double) rand ())
3652 / (RAND_MAX + 1.0));
372316f1 3653
1eb8556f
SM
3654 if (num_events > 1)
3655 infrun_debug_printf ("Found %d events, selecting #%d",
3656 num_events, random_selector);
372316f1
PA
3657
3658 /* Select the Nth thread that has had an event. */
5b6d1e4f 3659 for (thread_info *tp : inf->non_exited_threads ())
08036331 3660 if (has_event (tp))
372316f1 3661 if (random_selector-- == 0)
08036331 3662 return tp;
372316f1 3663
08036331 3664 gdb_assert_not_reached ("event thread not found");
372316f1
PA
3665}
3666
3667/* Wrapper for target_wait that first checks whether threads have
3668 pending statuses to report before actually asking the target for
5b6d1e4f
PA
3669 more events. INF is the inferior we're using to call target_wait
3670 on. */
372316f1
PA
3671
3672static ptid_t
5b6d1e4f 3673do_target_wait_1 (inferior *inf, ptid_t ptid,
b60cea74 3674 target_waitstatus *status, target_wait_flags options)
372316f1
PA
3675{
3676 ptid_t event_ptid;
3677 struct thread_info *tp;
3678
24ed6739
AB
3679 /* We know that we are looking for an event in the target of inferior
3680 INF, but we don't know which thread the event might come from. As
3681 such we want to make sure that INFERIOR_PTID is reset so that none of
3682 the wait code relies on it - doing so is always a mistake. */
3683 switch_to_inferior_no_thread (inf);
3684
372316f1
PA
3685 /* First check if there is a resumed thread with a wait status
3686 pending. */
d7e15655 3687 if (ptid == minus_one_ptid || ptid.is_pid ())
372316f1 3688 {
5b6d1e4f 3689 tp = random_pending_event_thread (inf, ptid);
372316f1
PA
3690 }
3691 else
3692 {
1eb8556f
SM
3693 infrun_debug_printf ("Waiting for specific thread %s.",
3694 target_pid_to_str (ptid).c_str ());
372316f1
PA
3695
3696 /* We have a specific thread to check. */
5b6d1e4f 3697 tp = find_thread_ptid (inf, ptid);
372316f1
PA
3698 gdb_assert (tp != NULL);
3699 if (!tp->suspend.waitstatus_pending_p)
3700 tp = NULL;
3701 }
3702
3703 if (tp != NULL
3704 && (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3705 || tp->suspend.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
3706 {
00431a78 3707 struct regcache *regcache = get_thread_regcache (tp);
ac7936df 3708 struct gdbarch *gdbarch = regcache->arch ();
372316f1
PA
3709 CORE_ADDR pc;
3710 int discard = 0;
3711
3712 pc = regcache_read_pc (regcache);
3713
3714 if (pc != tp->suspend.stop_pc)
3715 {
1eb8556f
SM
3716 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
3717 target_pid_to_str (tp->ptid).c_str (),
3718 paddress (gdbarch, tp->suspend.stop_pc),
3719 paddress (gdbarch, pc));
372316f1
PA
3720 discard = 1;
3721 }
a01bda52 3722 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
372316f1 3723 {
1eb8556f
SM
3724 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
3725 target_pid_to_str (tp->ptid).c_str (),
3726 paddress (gdbarch, pc));
372316f1
PA
3727
3728 discard = 1;
3729 }
3730
3731 if (discard)
3732 {
1eb8556f
SM
3733 infrun_debug_printf ("pending event of %s cancelled.",
3734 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3735
3736 tp->suspend.waitstatus.kind = TARGET_WAITKIND_SPURIOUS;
3737 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3738 }
3739 }
3740
3741 if (tp != NULL)
3742 {
1eb8556f
SM
3743 infrun_debug_printf ("Using pending wait status %s for %s.",
3744 target_waitstatus_to_string
3745 (&tp->suspend.waitstatus).c_str (),
3746 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3747
3748 /* Now that we've selected our final event LWP, un-adjust its PC
3749 if it was a software breakpoint (and the target doesn't
3750 always adjust the PC itself). */
3751 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3752 && !target_supports_stopped_by_sw_breakpoint ())
3753 {
3754 struct regcache *regcache;
3755 struct gdbarch *gdbarch;
3756 int decr_pc;
3757
00431a78 3758 regcache = get_thread_regcache (tp);
ac7936df 3759 gdbarch = regcache->arch ();
372316f1
PA
3760
3761 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3762 if (decr_pc != 0)
3763 {
3764 CORE_ADDR pc;
3765
3766 pc = regcache_read_pc (regcache);
3767 regcache_write_pc (regcache, pc + decr_pc);
3768 }
3769 }
3770
3771 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3772 *status = tp->suspend.waitstatus;
3773 tp->suspend.waitstatus_pending_p = 0;
3774
3775 /* Wake up the event loop again, until all pending events are
3776 processed. */
3777 if (target_is_async_p ())
3778 mark_async_event_handler (infrun_async_inferior_event_token);
3779 return tp->ptid;
3780 }
3781
3782 /* But if we don't find one, we'll have to wait. */
3783
d3a07122
SM
3784 /* We can't ask a non-async target to do a non-blocking wait, so this will be
3785 a blocking wait. */
3786 if (!target_can_async_p ())
3787 options &= ~TARGET_WNOHANG;
3788
372316f1
PA
3789 if (deprecated_target_wait_hook)
3790 event_ptid = deprecated_target_wait_hook (ptid, status, options);
3791 else
3792 event_ptid = target_wait (ptid, status, options);
3793
3794 return event_ptid;
3795}
3796
5b6d1e4f
PA
3797/* Wrapper for target_wait that first checks whether threads have
3798 pending statuses to report before actually asking the target for
b3e3a4c1 3799 more events. Polls for events from all inferiors/targets. */
5b6d1e4f
PA
3800
3801static bool
ac0d67ed 3802do_target_wait (execution_control_state *ecs, target_wait_flags options)
5b6d1e4f
PA
3803{
3804 int num_inferiors = 0;
3805 int random_selector;
3806
b3e3a4c1
SM
3807 /* For fairness, we pick the first inferior/target to poll at random
3808 out of all inferiors that may report events, and then continue
3809 polling the rest of the inferior list starting from that one in a
3810 circular fashion until the whole list is polled once. */
5b6d1e4f 3811
ac0d67ed 3812 auto inferior_matches = [] (inferior *inf)
5b6d1e4f 3813 {
ac0d67ed 3814 return inf->process_target () != nullptr;
5b6d1e4f
PA
3815 };
3816
b3e3a4c1 3817 /* First see how many matching inferiors we have. */
5b6d1e4f
PA
3818 for (inferior *inf : all_inferiors ())
3819 if (inferior_matches (inf))
3820 num_inferiors++;
3821
3822 if (num_inferiors == 0)
3823 {
3824 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3825 return false;
3826 }
3827
b3e3a4c1 3828 /* Now randomly pick an inferior out of those that matched. */
5b6d1e4f
PA
3829 random_selector = (int)
3830 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
3831
1eb8556f
SM
3832 if (num_inferiors > 1)
3833 infrun_debug_printf ("Found %d inferiors, starting at #%d",
3834 num_inferiors, random_selector);
5b6d1e4f 3835
b3e3a4c1 3836 /* Select the Nth inferior that matched. */
5b6d1e4f
PA
3837
3838 inferior *selected = nullptr;
3839
3840 for (inferior *inf : all_inferiors ())
3841 if (inferior_matches (inf))
3842 if (random_selector-- == 0)
3843 {
3844 selected = inf;
3845 break;
3846 }
3847
b3e3a4c1 3848 /* Now poll for events out of each of the matching inferior's
5b6d1e4f
PA
3849 targets, starting from the selected one. */
3850
3851 auto do_wait = [&] (inferior *inf)
3852 {
ac0d67ed 3853 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, options);
5b6d1e4f
PA
3854 ecs->target = inf->process_target ();
3855 return (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
3856 };
3857
b3e3a4c1
SM
3858 /* Needed in 'all-stop + target-non-stop' mode, because we end up
3859 here spuriously after the target is all stopped and we've already
5b6d1e4f
PA
3860 reported the stop to the user, polling for events. */
3861 scoped_restore_current_thread restore_thread;
3862
3863 int inf_num = selected->num;
3864 for (inferior *inf = selected; inf != NULL; inf = inf->next)
3865 if (inferior_matches (inf))
3866 if (do_wait (inf))
3867 return true;
3868
3869 for (inferior *inf = inferior_list;
3870 inf != NULL && inf->num < inf_num;
3871 inf = inf->next)
3872 if (inferior_matches (inf))
3873 if (do_wait (inf))
3874 return true;
3875
3876 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3877 return false;
3878}
3879
8ff53139
PA
3880/* An event reported by wait_one. */
3881
3882struct wait_one_event
3883{
3884 /* The target the event came out of. */
3885 process_stratum_target *target;
3886
3887 /* The PTID the event was for. */
3888 ptid_t ptid;
3889
3890 /* The waitstatus. */
3891 target_waitstatus ws;
3892};
3893
3894static bool handle_one (const wait_one_event &event);
3895
24291992
PA
3896/* Prepare and stabilize the inferior for detaching it. E.g.,
3897 detaching while a thread is displaced stepping is a recipe for
3898 crashing it, as nothing would readjust the PC out of the scratch
3899 pad. */
3900
3901void
3902prepare_for_detach (void)
3903{
3904 struct inferior *inf = current_inferior ();
f2907e49 3905 ptid_t pid_ptid = ptid_t (inf->pid);
8ff53139 3906 scoped_restore_current_thread restore_thread;
24291992 3907
9bcb1f16 3908 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
24291992 3909
8ff53139
PA
3910 /* Remove all threads of INF from the global step-over chain. We
3911 want to stop any ongoing step-over, not start any new one. */
3912 thread_info *next;
3913 for (thread_info *tp = global_thread_step_over_chain_head;
3914 tp != nullptr;
3915 tp = next)
24291992 3916 {
8ff53139
PA
3917 next = global_thread_step_over_chain_next (tp);
3918 if (tp->inf == inf)
3919 global_thread_step_over_chain_remove (tp);
3920 }
24291992 3921
ac7d717c
PA
3922 /* If we were already in the middle of an inline step-over, and the
3923 thread stepping belongs to the inferior we're detaching, we need
3924 to restart the threads of other inferiors. */
3925 if (step_over_info.thread != -1)
3926 {
3927 infrun_debug_printf ("inline step-over in-process while detaching");
3928
3929 thread_info *thr = find_thread_global_id (step_over_info.thread);
3930 if (thr->inf == inf)
3931 {
3932 /* Since we removed threads of INF from the step-over chain,
3933 we know this won't start a step-over for INF. */
3934 clear_step_over_info ();
3935
3936 if (target_is_non_stop_p ())
3937 {
3938 /* Start a new step-over in another thread if there's
3939 one that needs it. */
3940 start_step_over ();
3941
3942 /* Restart all other threads (except the
3943 previously-stepping thread, since that one is still
3944 running). */
3945 if (!step_over_info_valid_p ())
3946 restart_threads (thr);
3947 }
3948 }
3949 }
3950
8ff53139
PA
3951 if (displaced_step_in_progress (inf))
3952 {
3953 infrun_debug_printf ("displaced-stepping in-process while detaching");
24291992 3954
8ff53139 3955 /* Stop threads currently displaced stepping, aborting it. */
24291992 3956
8ff53139
PA
3957 for (thread_info *thr : inf->non_exited_threads ())
3958 {
3959 if (thr->displaced_step_state.in_progress ())
3960 {
3961 if (thr->executing)
3962 {
3963 if (!thr->stop_requested)
3964 {
3965 target_stop (thr->ptid);
3966 thr->stop_requested = true;
3967 }
3968 }
3969 else
3970 thr->resumed = false;
3971 }
3972 }
24291992 3973
8ff53139
PA
3974 while (displaced_step_in_progress (inf))
3975 {
3976 wait_one_event event;
24291992 3977
8ff53139
PA
3978 event.target = inf->process_target ();
3979 event.ptid = do_target_wait_1 (inf, pid_ptid, &event.ws, 0);
24291992 3980
8ff53139
PA
3981 if (debug_infrun)
3982 print_target_wait_results (pid_ptid, event.ptid, &event.ws);
24291992 3983
8ff53139
PA
3984 handle_one (event);
3985 }
24291992 3986
8ff53139
PA
3987 /* It's OK to leave some of the threads of INF stopped, since
3988 they'll be detached shortly. */
24291992 3989 }
24291992
PA
3990}
3991
cd0fc7c3 3992/* Wait for control to return from inferior to debugger.
ae123ec6 3993
cd0fc7c3
SS
3994 If inferior gets a signal, we may decide to start it up again
3995 instead of returning. That is why there is a loop in this function.
3996 When this function actually returns it means the inferior
3997 should be left stopped and GDB should read more commands. */
3998
5b6d1e4f
PA
3999static void
4000wait_for_inferior (inferior *inf)
cd0fc7c3 4001{
1eb8556f 4002 infrun_debug_printf ("wait_for_inferior ()");
527159b7 4003
4c41382a 4004 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
cd0fc7c3 4005
e6f5c25b
PA
4006 /* If an error happens while handling the event, propagate GDB's
4007 knowledge of the executing state to the frontend/user running
4008 state. */
5b6d1e4f
PA
4009 scoped_finish_thread_state finish_state
4010 (inf->process_target (), minus_one_ptid);
e6f5c25b 4011
c906108c
SS
4012 while (1)
4013 {
ae25568b
PA
4014 struct execution_control_state ecss;
4015 struct execution_control_state *ecs = &ecss;
29f49a6a 4016
ae25568b
PA
4017 memset (ecs, 0, sizeof (*ecs));
4018
ec9499be 4019 overlay_cache_invalid = 1;
ec9499be 4020
f15cb84a
YQ
4021 /* Flush target cache before starting to handle each event.
4022 Target was running and cache could be stale. This is just a
4023 heuristic. Running threads may modify target memory, but we
4024 don't get any event. */
4025 target_dcache_invalidate ();
4026
5b6d1e4f
PA
4027 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, 0);
4028 ecs->target = inf->process_target ();
c906108c 4029
f00150c9 4030 if (debug_infrun)
5b6d1e4f 4031 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
f00150c9 4032
cd0fc7c3
SS
4033 /* Now figure out what to do with the result of the result. */
4034 handle_inferior_event (ecs);
c906108c 4035
cd0fc7c3
SS
4036 if (!ecs->wait_some_more)
4037 break;
4038 }
4e1c45ea 4039
e6f5c25b 4040 /* No error, don't finish the state yet. */
731f534f 4041 finish_state.release ();
cd0fc7c3 4042}
c906108c 4043
d3d4baed
PA
4044/* Cleanup that reinstalls the readline callback handler, if the
4045 target is running in the background. If while handling the target
4046 event something triggered a secondary prompt, like e.g., a
4047 pagination prompt, we'll have removed the callback handler (see
4048 gdb_readline_wrapper_line). Need to do this as we go back to the
4049 event loop, ready to process further input. Note this has no
4050 effect if the handler hasn't actually been removed, because calling
4051 rl_callback_handler_install resets the line buffer, thus losing
4052 input. */
4053
4054static void
d238133d 4055reinstall_readline_callback_handler_cleanup ()
d3d4baed 4056{
3b12939d
PA
4057 struct ui *ui = current_ui;
4058
4059 if (!ui->async)
6c400b59
PA
4060 {
4061 /* We're not going back to the top level event loop yet. Don't
4062 install the readline callback, as it'd prep the terminal,
4063 readline-style (raw, noecho) (e.g., --batch). We'll install
4064 it the next time the prompt is displayed, when we're ready
4065 for input. */
4066 return;
4067 }
4068
3b12939d 4069 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
d3d4baed
PA
4070 gdb_rl_callback_handler_reinstall ();
4071}
4072
243a9253
PA
4073/* Clean up the FSMs of threads that are now stopped. In non-stop,
4074 that's just the event thread. In all-stop, that's all threads. */
4075
4076static void
4077clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
4078{
08036331
PA
4079 if (ecs->event_thread != NULL
4080 && ecs->event_thread->thread_fsm != NULL)
46e3ed7f 4081 ecs->event_thread->thread_fsm->clean_up (ecs->event_thread);
243a9253
PA
4082
4083 if (!non_stop)
4084 {
08036331 4085 for (thread_info *thr : all_non_exited_threads ())
dda83cd7 4086 {
243a9253
PA
4087 if (thr->thread_fsm == NULL)
4088 continue;
4089 if (thr == ecs->event_thread)
4090 continue;
4091
00431a78 4092 switch_to_thread (thr);
46e3ed7f 4093 thr->thread_fsm->clean_up (thr);
243a9253
PA
4094 }
4095
4096 if (ecs->event_thread != NULL)
00431a78 4097 switch_to_thread (ecs->event_thread);
243a9253
PA
4098 }
4099}
4100
3b12939d
PA
4101/* Helper for all_uis_check_sync_execution_done that works on the
4102 current UI. */
4103
4104static void
4105check_curr_ui_sync_execution_done (void)
4106{
4107 struct ui *ui = current_ui;
4108
4109 if (ui->prompt_state == PROMPT_NEEDED
4110 && ui->async
4111 && !gdb_in_secondary_prompt_p (ui))
4112 {
223ffa71 4113 target_terminal::ours ();
76727919 4114 gdb::observers::sync_execution_done.notify ();
3eb7562a 4115 ui_register_input_event_handler (ui);
3b12939d
PA
4116 }
4117}
4118
4119/* See infrun.h. */
4120
4121void
4122all_uis_check_sync_execution_done (void)
4123{
0e454242 4124 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
4125 {
4126 check_curr_ui_sync_execution_done ();
4127 }
4128}
4129
a8836c93
PA
4130/* See infrun.h. */
4131
4132void
4133all_uis_on_sync_execution_starting (void)
4134{
0e454242 4135 SWITCH_THRU_ALL_UIS ()
a8836c93
PA
4136 {
4137 if (current_ui->prompt_state == PROMPT_NEEDED)
4138 async_disable_stdin ();
4139 }
4140}
4141
1777feb0 4142/* Asynchronous version of wait_for_inferior. It is called by the
43ff13b4 4143 event loop whenever a change of state is detected on the file
1777feb0
MS
4144 descriptor corresponding to the target. It can be called more than
4145 once to complete a single execution command. In such cases we need
4146 to keep the state in a global variable ECSS. If it is the last time
a474d7c2
PA
4147 that this function is called for a single execution command, then
4148 report to the user that the inferior has stopped, and do the
1777feb0 4149 necessary cleanups. */
43ff13b4
JM
4150
4151void
b1a35af2 4152fetch_inferior_event ()
43ff13b4 4153{
3ec3145c
SM
4154 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
4155
0d1e5fa7 4156 struct execution_control_state ecss;
a474d7c2 4157 struct execution_control_state *ecs = &ecss;
0f641c01 4158 int cmd_done = 0;
43ff13b4 4159
0d1e5fa7
PA
4160 memset (ecs, 0, sizeof (*ecs));
4161
c61db772
PA
4162 /* Events are always processed with the main UI as current UI. This
4163 way, warnings, debug output, etc. are always consistently sent to
4164 the main console. */
4b6749b9 4165 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
c61db772 4166
b78b3a29
TBA
4167 /* Temporarily disable pagination. Otherwise, the user would be
4168 given an option to press 'q' to quit, which would cause an early
4169 exit and could leave GDB in a half-baked state. */
4170 scoped_restore save_pagination
4171 = make_scoped_restore (&pagination_enabled, false);
4172
d3d4baed 4173 /* End up with readline processing input, if necessary. */
d238133d
TT
4174 {
4175 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
4176
4177 /* We're handling a live event, so make sure we're doing live
4178 debugging. If we're looking at traceframes while the target is
4179 running, we're going to need to get back to that mode after
4180 handling the event. */
4181 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
4182 if (non_stop)
4183 {
4184 maybe_restore_traceframe.emplace ();
4185 set_current_traceframe (-1);
4186 }
43ff13b4 4187
873657b9
PA
4188 /* The user/frontend should not notice a thread switch due to
4189 internal events. Make sure we revert to the user selected
4190 thread and frame after handling the event and running any
4191 breakpoint commands. */
4192 scoped_restore_current_thread restore_thread;
d238133d
TT
4193
4194 overlay_cache_invalid = 1;
4195 /* Flush target cache before starting to handle each event. Target
4196 was running and cache could be stale. This is just a heuristic.
4197 Running threads may modify target memory, but we don't get any
4198 event. */
4199 target_dcache_invalidate ();
4200
4201 scoped_restore save_exec_dir
4202 = make_scoped_restore (&execution_direction,
4203 target_execution_direction ());
4204
1192f124
SM
4205 /* Allow targets to pause their resumed threads while we handle
4206 the event. */
4207 scoped_disable_commit_resumed disable_commit_resumed ("handling event");
4208
ac0d67ed 4209 if (!do_target_wait (ecs, TARGET_WNOHANG))
1192f124
SM
4210 {
4211 infrun_debug_printf ("do_target_wait returned no event");
4212 disable_commit_resumed.reset_and_commit ();
4213 return;
4214 }
5b6d1e4f
PA
4215
4216 gdb_assert (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
4217
4218 /* Switch to the target that generated the event, so we can do
7f08fd51
TBA
4219 target calls. */
4220 switch_to_target_no_thread (ecs->target);
d238133d
TT
4221
4222 if (debug_infrun)
5b6d1e4f 4223 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
d238133d
TT
4224
4225 /* If an error happens while handling the event, propagate GDB's
4226 knowledge of the executing state to the frontend/user running
4227 state. */
4228 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
5b6d1e4f 4229 scoped_finish_thread_state finish_state (ecs->target, finish_ptid);
d238133d 4230
979a0d13 4231 /* Get executed before scoped_restore_current_thread above to apply
d238133d
TT
4232 still for the thread which has thrown the exception. */
4233 auto defer_bpstat_clear
4234 = make_scope_exit (bpstat_clear_actions);
4235 auto defer_delete_threads
4236 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
4237
4238 /* Now figure out what to do with the result of the result. */
4239 handle_inferior_event (ecs);
4240
4241 if (!ecs->wait_some_more)
4242 {
5b6d1e4f 4243 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
758cb810 4244 bool should_stop = true;
d238133d 4245 struct thread_info *thr = ecs->event_thread;
d6b48e9c 4246
d238133d 4247 delete_just_stopped_threads_infrun_breakpoints ();
f107f563 4248
d238133d
TT
4249 if (thr != NULL)
4250 {
4251 struct thread_fsm *thread_fsm = thr->thread_fsm;
243a9253 4252
d238133d 4253 if (thread_fsm != NULL)
46e3ed7f 4254 should_stop = thread_fsm->should_stop (thr);
d238133d 4255 }
243a9253 4256
d238133d
TT
4257 if (!should_stop)
4258 {
4259 keep_going (ecs);
4260 }
4261 else
4262 {
46e3ed7f 4263 bool should_notify_stop = true;
d238133d 4264 int proceeded = 0;
1840d81a 4265
d238133d 4266 clean_up_just_stopped_threads_fsms (ecs);
243a9253 4267
d238133d 4268 if (thr != NULL && thr->thread_fsm != NULL)
46e3ed7f 4269 should_notify_stop = thr->thread_fsm->should_notify_stop ();
388a7084 4270
d238133d
TT
4271 if (should_notify_stop)
4272 {
4273 /* We may not find an inferior if this was a process exit. */
4274 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
4275 proceeded = normal_stop ();
4276 }
243a9253 4277
d238133d
TT
4278 if (!proceeded)
4279 {
b1a35af2 4280 inferior_event_handler (INF_EXEC_COMPLETE);
d238133d
TT
4281 cmd_done = 1;
4282 }
873657b9
PA
4283
4284 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4285 previously selected thread is gone. We have two
4286 choices - switch to no thread selected, or restore the
4287 previously selected thread (now exited). We chose the
4288 later, just because that's what GDB used to do. After
4289 this, "info threads" says "The current thread <Thread
4290 ID 2> has terminated." instead of "No thread
4291 selected.". */
4292 if (!non_stop
4293 && cmd_done
4294 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
4295 restore_thread.dont_restore ();
d238133d
TT
4296 }
4297 }
4f8d22e3 4298
d238133d
TT
4299 defer_delete_threads.release ();
4300 defer_bpstat_clear.release ();
29f49a6a 4301
d238133d
TT
4302 /* No error, don't finish the thread states yet. */
4303 finish_state.release ();
731f534f 4304
1192f124
SM
4305 disable_commit_resumed.reset_and_commit ();
4306
d238133d
TT
4307 /* This scope is used to ensure that readline callbacks are
4308 reinstalled here. */
4309 }
4f8d22e3 4310
3b12939d
PA
4311 /* If a UI was in sync execution mode, and now isn't, restore its
4312 prompt (a synchronous execution command has finished, and we're
4313 ready for input). */
4314 all_uis_check_sync_execution_done ();
0f641c01
PA
4315
4316 if (cmd_done
0f641c01 4317 && exec_done_display_p
00431a78
PA
4318 && (inferior_ptid == null_ptid
4319 || inferior_thread ()->state != THREAD_RUNNING))
0f641c01 4320 printf_unfiltered (_("completed.\n"));
43ff13b4
JM
4321}
4322
29734269
SM
4323/* See infrun.h. */
4324
edb3359d 4325void
29734269
SM
4326set_step_info (thread_info *tp, struct frame_info *frame,
4327 struct symtab_and_line sal)
edb3359d 4328{
29734269
SM
4329 /* This can be removed once this function no longer implicitly relies on the
4330 inferior_ptid value. */
4331 gdb_assert (inferior_ptid == tp->ptid);
edb3359d 4332
16c381f0
JK
4333 tp->control.step_frame_id = get_frame_id (frame);
4334 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
edb3359d
DJ
4335
4336 tp->current_symtab = sal.symtab;
4337 tp->current_line = sal.line;
4338}
4339
0d1e5fa7
PA
4340/* Clear context switchable stepping state. */
4341
4342void
4e1c45ea 4343init_thread_stepping_state (struct thread_info *tss)
0d1e5fa7 4344{
7f5ef605 4345 tss->stepped_breakpoint = 0;
0d1e5fa7 4346 tss->stepping_over_breakpoint = 0;
963f9c80 4347 tss->stepping_over_watchpoint = 0;
0d1e5fa7 4348 tss->step_after_step_resume_breakpoint = 0;
cd0fc7c3
SS
4349}
4350
ab1ddbcf 4351/* See infrun.h. */
c32c64b7 4352
6efcd9a8 4353void
5b6d1e4f
PA
4354set_last_target_status (process_stratum_target *target, ptid_t ptid,
4355 target_waitstatus status)
c32c64b7 4356{
5b6d1e4f 4357 target_last_proc_target = target;
c32c64b7
DE
4358 target_last_wait_ptid = ptid;
4359 target_last_waitstatus = status;
4360}
4361
ab1ddbcf 4362/* See infrun.h. */
e02bc4cc
DS
4363
4364void
5b6d1e4f
PA
4365get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4366 target_waitstatus *status)
e02bc4cc 4367{
5b6d1e4f
PA
4368 if (target != nullptr)
4369 *target = target_last_proc_target;
ab1ddbcf
PA
4370 if (ptid != nullptr)
4371 *ptid = target_last_wait_ptid;
4372 if (status != nullptr)
4373 *status = target_last_waitstatus;
e02bc4cc
DS
4374}
4375
ab1ddbcf
PA
4376/* See infrun.h. */
4377
ac264b3b
MS
4378void
4379nullify_last_target_wait_ptid (void)
4380{
5b6d1e4f 4381 target_last_proc_target = nullptr;
ac264b3b 4382 target_last_wait_ptid = minus_one_ptid;
ab1ddbcf 4383 target_last_waitstatus = {};
ac264b3b
MS
4384}
4385
dcf4fbde 4386/* Switch thread contexts. */
dd80620e
MS
4387
4388static void
00431a78 4389context_switch (execution_control_state *ecs)
dd80620e 4390{
1eb8556f 4391 if (ecs->ptid != inferior_ptid
5b6d1e4f
PA
4392 && (inferior_ptid == null_ptid
4393 || ecs->event_thread != inferior_thread ()))
fd48f117 4394 {
1eb8556f
SM
4395 infrun_debug_printf ("Switching context from %s to %s",
4396 target_pid_to_str (inferior_ptid).c_str (),
4397 target_pid_to_str (ecs->ptid).c_str ());
fd48f117
DJ
4398 }
4399
00431a78 4400 switch_to_thread (ecs->event_thread);
dd80620e
MS
4401}
4402
d8dd4d5f
PA
4403/* If the target can't tell whether we've hit breakpoints
4404 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4405 check whether that could have been caused by a breakpoint. If so,
4406 adjust the PC, per gdbarch_decr_pc_after_break. */
4407
4fa8626c 4408static void
d8dd4d5f
PA
4409adjust_pc_after_break (struct thread_info *thread,
4410 struct target_waitstatus *ws)
4fa8626c 4411{
24a73cce
UW
4412 struct regcache *regcache;
4413 struct gdbarch *gdbarch;
118e6252 4414 CORE_ADDR breakpoint_pc, decr_pc;
4fa8626c 4415
4fa8626c
DJ
4416 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4417 we aren't, just return.
9709f61c
DJ
4418
4419 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
b798847d
UW
4420 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4421 implemented by software breakpoints should be handled through the normal
4422 breakpoint layer.
8fb3e588 4423
4fa8626c
DJ
4424 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4425 different signals (SIGILL or SIGEMT for instance), but it is less
4426 clear where the PC is pointing afterwards. It may not match
b798847d
UW
4427 gdbarch_decr_pc_after_break. I don't know any specific target that
4428 generates these signals at breakpoints (the code has been in GDB since at
4429 least 1992) so I can not guess how to handle them here.
8fb3e588 4430
e6cf7916
UW
4431 In earlier versions of GDB, a target with
4432 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
b798847d
UW
4433 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4434 target with both of these set in GDB history, and it seems unlikely to be
4435 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4fa8626c 4436
d8dd4d5f 4437 if (ws->kind != TARGET_WAITKIND_STOPPED)
4fa8626c
DJ
4438 return;
4439
d8dd4d5f 4440 if (ws->value.sig != GDB_SIGNAL_TRAP)
4fa8626c
DJ
4441 return;
4442
4058b839
PA
4443 /* In reverse execution, when a breakpoint is hit, the instruction
4444 under it has already been de-executed. The reported PC always
4445 points at the breakpoint address, so adjusting it further would
4446 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4447 architecture:
4448
4449 B1 0x08000000 : INSN1
4450 B2 0x08000001 : INSN2
4451 0x08000002 : INSN3
4452 PC -> 0x08000003 : INSN4
4453
4454 Say you're stopped at 0x08000003 as above. Reverse continuing
4455 from that point should hit B2 as below. Reading the PC when the
4456 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4457 been de-executed already.
4458
4459 B1 0x08000000 : INSN1
4460 B2 PC -> 0x08000001 : INSN2
4461 0x08000002 : INSN3
4462 0x08000003 : INSN4
4463
4464 We can't apply the same logic as for forward execution, because
4465 we would wrongly adjust the PC to 0x08000000, since there's a
4466 breakpoint at PC - 1. We'd then report a hit on B1, although
4467 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4468 behaviour. */
4469 if (execution_direction == EXEC_REVERSE)
4470 return;
4471
1cf4d951
PA
4472 /* If the target can tell whether the thread hit a SW breakpoint,
4473 trust it. Targets that can tell also adjust the PC
4474 themselves. */
4475 if (target_supports_stopped_by_sw_breakpoint ())
4476 return;
4477
4478 /* Note that relying on whether a breakpoint is planted in memory to
4479 determine this can fail. E.g,. the breakpoint could have been
4480 removed since. Or the thread could have been told to step an
4481 instruction the size of a breakpoint instruction, and only
4482 _after_ was a breakpoint inserted at its address. */
4483
24a73cce
UW
4484 /* If this target does not decrement the PC after breakpoints, then
4485 we have nothing to do. */
00431a78 4486 regcache = get_thread_regcache (thread);
ac7936df 4487 gdbarch = regcache->arch ();
118e6252 4488
527a273a 4489 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
118e6252 4490 if (decr_pc == 0)
24a73cce
UW
4491 return;
4492
8b86c959 4493 const address_space *aspace = regcache->aspace ();
6c95b8df 4494
8aad930b
AC
4495 /* Find the location where (if we've hit a breakpoint) the
4496 breakpoint would be. */
118e6252 4497 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
8aad930b 4498
1cf4d951
PA
4499 /* If the target can't tell whether a software breakpoint triggered,
4500 fallback to figuring it out based on breakpoints we think were
4501 inserted in the target, and on whether the thread was stepped or
4502 continued. */
4503
1c5cfe86
PA
4504 /* Check whether there actually is a software breakpoint inserted at
4505 that location.
4506
4507 If in non-stop mode, a race condition is possible where we've
4508 removed a breakpoint, but stop events for that breakpoint were
4509 already queued and arrive later. To suppress those spurious
4510 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
1cf4d951
PA
4511 and retire them after a number of stop events are reported. Note
4512 this is an heuristic and can thus get confused. The real fix is
4513 to get the "stopped by SW BP and needs adjustment" info out of
4514 the target/kernel (and thus never reach here; see above). */
6c95b8df 4515 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
fbea99ea
PA
4516 || (target_is_non_stop_p ()
4517 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
8aad930b 4518 {
07036511 4519 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
abbb1732 4520
8213266a 4521 if (record_full_is_used ())
07036511
TT
4522 restore_operation_disable.emplace
4523 (record_full_gdb_operation_disable_set ());
96429cc8 4524
1c0fdd0e
UW
4525 /* When using hardware single-step, a SIGTRAP is reported for both
4526 a completed single-step and a software breakpoint. Need to
4527 differentiate between the two, as the latter needs adjusting
4528 but the former does not.
4529
4530 The SIGTRAP can be due to a completed hardware single-step only if
4531 - we didn't insert software single-step breakpoints
1c0fdd0e
UW
4532 - this thread is currently being stepped
4533
4534 If any of these events did not occur, we must have stopped due
4535 to hitting a software breakpoint, and have to back up to the
4536 breakpoint address.
4537
4538 As a special case, we could have hardware single-stepped a
4539 software breakpoint. In this case (prev_pc == breakpoint_pc),
4540 we also need to back up to the breakpoint address. */
4541
d8dd4d5f
PA
4542 if (thread_has_single_step_breakpoints_set (thread)
4543 || !currently_stepping (thread)
4544 || (thread->stepped_breakpoint
4545 && thread->prev_pc == breakpoint_pc))
515630c5 4546 regcache_write_pc (regcache, breakpoint_pc);
8aad930b 4547 }
4fa8626c
DJ
4548}
4549
c4464ade 4550static bool
edb3359d
DJ
4551stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4552{
4553 for (frame = get_prev_frame (frame);
4554 frame != NULL;
4555 frame = get_prev_frame (frame))
4556 {
4557 if (frame_id_eq (get_frame_id (frame), step_frame_id))
c4464ade
SM
4558 return true;
4559
edb3359d
DJ
4560 if (get_frame_type (frame) != INLINE_FRAME)
4561 break;
4562 }
4563
c4464ade 4564 return false;
edb3359d
DJ
4565}
4566
4a4c04f1
BE
4567/* Look for an inline frame that is marked for skip.
4568 If PREV_FRAME is TRUE start at the previous frame,
4569 otherwise start at the current frame. Stop at the
4570 first non-inline frame, or at the frame where the
4571 step started. */
4572
4573static bool
4574inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
4575{
4576 struct frame_info *frame = get_current_frame ();
4577
4578 if (prev_frame)
4579 frame = get_prev_frame (frame);
4580
4581 for (; frame != NULL; frame = get_prev_frame (frame))
4582 {
4583 const char *fn = NULL;
4584 symtab_and_line sal;
4585 struct symbol *sym;
4586
4587 if (frame_id_eq (get_frame_id (frame), tp->control.step_frame_id))
4588 break;
4589 if (get_frame_type (frame) != INLINE_FRAME)
4590 break;
4591
4592 sal = find_frame_sal (frame);
4593 sym = get_frame_function (frame);
4594
4595 if (sym != NULL)
4596 fn = sym->print_name ();
4597
4598 if (sal.line != 0
4599 && function_name_is_marked_for_skip (fn, sal))
4600 return true;
4601 }
4602
4603 return false;
4604}
4605
c65d6b55
PA
4606/* If the event thread has the stop requested flag set, pretend it
4607 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4608 target_stop). */
4609
4610static bool
4611handle_stop_requested (struct execution_control_state *ecs)
4612{
4613 if (ecs->event_thread->stop_requested)
4614 {
4615 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
4616 ecs->ws.value.sig = GDB_SIGNAL_0;
4617 handle_signal_stop (ecs);
4618 return true;
4619 }
4620 return false;
4621}
4622
a96d9b2e 4623/* Auxiliary function that handles syscall entry/return events.
c4464ade
SM
4624 It returns true if the inferior should keep going (and GDB
4625 should ignore the event), or false if the event deserves to be
a96d9b2e 4626 processed. */
ca2163eb 4627
c4464ade 4628static bool
ca2163eb 4629handle_syscall_event (struct execution_control_state *ecs)
a96d9b2e 4630{
ca2163eb 4631 struct regcache *regcache;
ca2163eb
PA
4632 int syscall_number;
4633
00431a78 4634 context_switch (ecs);
ca2163eb 4635
00431a78 4636 regcache = get_thread_regcache (ecs->event_thread);
f90263c1 4637 syscall_number = ecs->ws.value.syscall_number;
f2ffa92b 4638 ecs->event_thread->suspend.stop_pc = regcache_read_pc (regcache);
ca2163eb 4639
a96d9b2e
SDJ
4640 if (catch_syscall_enabled () > 0
4641 && catching_syscall_number (syscall_number) > 0)
4642 {
1eb8556f 4643 infrun_debug_printf ("syscall number=%d", syscall_number);
a96d9b2e 4644
16c381f0 4645 ecs->event_thread->control.stop_bpstat
a01bda52 4646 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
4647 ecs->event_thread->suspend.stop_pc,
4648 ecs->event_thread, &ecs->ws);
ab04a2af 4649
c65d6b55 4650 if (handle_stop_requested (ecs))
c4464ade 4651 return false;
c65d6b55 4652
ce12b012 4653 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
ca2163eb
PA
4654 {
4655 /* Catchpoint hit. */
c4464ade 4656 return false;
ca2163eb 4657 }
a96d9b2e 4658 }
ca2163eb 4659
c65d6b55 4660 if (handle_stop_requested (ecs))
c4464ade 4661 return false;
c65d6b55 4662
ca2163eb 4663 /* If no catchpoint triggered for this, then keep going. */
ca2163eb 4664 keep_going (ecs);
c4464ade
SM
4665
4666 return true;
a96d9b2e
SDJ
4667}
4668
7e324e48
GB
4669/* Lazily fill in the execution_control_state's stop_func_* fields. */
4670
4671static void
4672fill_in_stop_func (struct gdbarch *gdbarch,
4673 struct execution_control_state *ecs)
4674{
4675 if (!ecs->stop_func_filled_in)
4676 {
98a617f8 4677 const block *block;
fe830662 4678 const general_symbol_info *gsi;
98a617f8 4679
7e324e48
GB
4680 /* Don't care about return value; stop_func_start and stop_func_name
4681 will both be 0 if it doesn't work. */
fe830662
TT
4682 find_pc_partial_function_sym (ecs->event_thread->suspend.stop_pc,
4683 &gsi,
4684 &ecs->stop_func_start,
4685 &ecs->stop_func_end,
4686 &block);
4687 ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name ();
98a617f8
KB
4688
4689 /* The call to find_pc_partial_function, above, will set
4690 stop_func_start and stop_func_end to the start and end
4691 of the range containing the stop pc. If this range
4692 contains the entry pc for the block (which is always the
4693 case for contiguous blocks), advance stop_func_start past
4694 the function's start offset and entrypoint. Note that
4695 stop_func_start is NOT advanced when in a range of a
4696 non-contiguous block that does not contain the entry pc. */
4697 if (block != nullptr
4698 && ecs->stop_func_start <= BLOCK_ENTRY_PC (block)
4699 && BLOCK_ENTRY_PC (block) < ecs->stop_func_end)
4700 {
4701 ecs->stop_func_start
4702 += gdbarch_deprecated_function_start_offset (gdbarch);
4703
4704 if (gdbarch_skip_entrypoint_p (gdbarch))
4705 ecs->stop_func_start
4706 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
4707 }
591a12a1 4708
7e324e48
GB
4709 ecs->stop_func_filled_in = 1;
4710 }
4711}
4712
4f5d7f63 4713
00431a78 4714/* Return the STOP_SOON field of the inferior pointed at by ECS. */
4f5d7f63
PA
4715
4716static enum stop_kind
00431a78 4717get_inferior_stop_soon (execution_control_state *ecs)
4f5d7f63 4718{
5b6d1e4f 4719 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4f5d7f63
PA
4720
4721 gdb_assert (inf != NULL);
4722 return inf->control.stop_soon;
4723}
4724
5b6d1e4f
PA
4725/* Poll for one event out of the current target. Store the resulting
4726 waitstatus in WS, and return the event ptid. Does not block. */
372316f1
PA
4727
4728static ptid_t
5b6d1e4f 4729poll_one_curr_target (struct target_waitstatus *ws)
372316f1
PA
4730{
4731 ptid_t event_ptid;
372316f1
PA
4732
4733 overlay_cache_invalid = 1;
4734
4735 /* Flush target cache before starting to handle each event.
4736 Target was running and cache could be stale. This is just a
4737 heuristic. Running threads may modify target memory, but we
4738 don't get any event. */
4739 target_dcache_invalidate ();
4740
4741 if (deprecated_target_wait_hook)
5b6d1e4f 4742 event_ptid = deprecated_target_wait_hook (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1 4743 else
5b6d1e4f 4744 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1
PA
4745
4746 if (debug_infrun)
5b6d1e4f 4747 print_target_wait_results (minus_one_ptid, event_ptid, ws);
372316f1
PA
4748
4749 return event_ptid;
4750}
4751
5b6d1e4f
PA
4752/* Wait for one event out of any target. */
4753
4754static wait_one_event
4755wait_one ()
4756{
4757 while (1)
4758 {
4759 for (inferior *inf : all_inferiors ())
4760 {
4761 process_stratum_target *target = inf->process_target ();
4762 if (target == NULL
4763 || !target->is_async_p ()
4764 || !target->threads_executing)
4765 continue;
4766
4767 switch_to_inferior_no_thread (inf);
4768
4769 wait_one_event event;
4770 event.target = target;
4771 event.ptid = poll_one_curr_target (&event.ws);
4772
4773 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
4774 {
4775 /* If nothing is resumed, remove the target from the
4776 event loop. */
4777 target_async (0);
4778 }
4779 else if (event.ws.kind != TARGET_WAITKIND_IGNORE)
4780 return event;
4781 }
4782
4783 /* Block waiting for some event. */
4784
4785 fd_set readfds;
4786 int nfds = 0;
4787
4788 FD_ZERO (&readfds);
4789
4790 for (inferior *inf : all_inferiors ())
4791 {
4792 process_stratum_target *target = inf->process_target ();
4793 if (target == NULL
4794 || !target->is_async_p ()
4795 || !target->threads_executing)
4796 continue;
4797
4798 int fd = target->async_wait_fd ();
4799 FD_SET (fd, &readfds);
4800 if (nfds <= fd)
4801 nfds = fd + 1;
4802 }
4803
4804 if (nfds == 0)
4805 {
4806 /* No waitable targets left. All must be stopped. */
4807 return {NULL, minus_one_ptid, {TARGET_WAITKIND_NO_RESUMED}};
4808 }
4809
4810 QUIT;
4811
4812 int numfds = interruptible_select (nfds, &readfds, 0, NULL, 0);
4813 if (numfds < 0)
4814 {
4815 if (errno == EINTR)
4816 continue;
4817 else
4818 perror_with_name ("interruptible_select");
4819 }
4820 }
4821}
4822
372316f1
PA
4823/* Save the thread's event and stop reason to process it later. */
4824
4825static void
5b6d1e4f 4826save_waitstatus (struct thread_info *tp, const target_waitstatus *ws)
372316f1 4827{
1eb8556f
SM
4828 infrun_debug_printf ("saving status %s for %d.%ld.%ld",
4829 target_waitstatus_to_string (ws).c_str (),
4830 tp->ptid.pid (),
4831 tp->ptid.lwp (),
4832 tp->ptid.tid ());
372316f1
PA
4833
4834 /* Record for later. */
4835 tp->suspend.waitstatus = *ws;
4836 tp->suspend.waitstatus_pending_p = 1;
4837
372316f1
PA
4838 if (ws->kind == TARGET_WAITKIND_STOPPED
4839 && ws->value.sig == GDB_SIGNAL_TRAP)
4840 {
89ba430c
SM
4841 struct regcache *regcache = get_thread_regcache (tp);
4842 const address_space *aspace = regcache->aspace ();
372316f1
PA
4843 CORE_ADDR pc = regcache_read_pc (regcache);
4844
4845 adjust_pc_after_break (tp, &tp->suspend.waitstatus);
4846
18493a00
PA
4847 scoped_restore_current_thread restore_thread;
4848 switch_to_thread (tp);
4849
4850 if (target_stopped_by_watchpoint ())
372316f1
PA
4851 {
4852 tp->suspend.stop_reason
4853 = TARGET_STOPPED_BY_WATCHPOINT;
4854 }
4855 else if (target_supports_stopped_by_sw_breakpoint ()
18493a00 4856 && target_stopped_by_sw_breakpoint ())
372316f1
PA
4857 {
4858 tp->suspend.stop_reason
4859 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4860 }
4861 else if (target_supports_stopped_by_hw_breakpoint ()
18493a00 4862 && target_stopped_by_hw_breakpoint ())
372316f1
PA
4863 {
4864 tp->suspend.stop_reason
4865 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4866 }
4867 else if (!target_supports_stopped_by_hw_breakpoint ()
4868 && hardware_breakpoint_inserted_here_p (aspace,
4869 pc))
4870 {
4871 tp->suspend.stop_reason
4872 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4873 }
4874 else if (!target_supports_stopped_by_sw_breakpoint ()
4875 && software_breakpoint_inserted_here_p (aspace,
4876 pc))
4877 {
4878 tp->suspend.stop_reason
4879 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4880 }
4881 else if (!thread_has_single_step_breakpoints_set (tp)
4882 && currently_stepping (tp))
4883 {
4884 tp->suspend.stop_reason
4885 = TARGET_STOPPED_BY_SINGLE_STEP;
4886 }
4887 }
4888}
4889
293b3ebc
TBA
4890/* Mark the non-executing threads accordingly. In all-stop, all
4891 threads of all processes are stopped when we get any event
4892 reported. In non-stop mode, only the event thread stops. */
4893
4894static void
4895mark_non_executing_threads (process_stratum_target *target,
4896 ptid_t event_ptid,
4897 struct target_waitstatus ws)
4898{
4899 ptid_t mark_ptid;
4900
4901 if (!target_is_non_stop_p ())
4902 mark_ptid = minus_one_ptid;
4903 else if (ws.kind == TARGET_WAITKIND_SIGNALLED
4904 || ws.kind == TARGET_WAITKIND_EXITED)
4905 {
4906 /* If we're handling a process exit in non-stop mode, even
4907 though threads haven't been deleted yet, one would think
4908 that there is nothing to do, as threads of the dead process
4909 will be soon deleted, and threads of any other process were
4910 left running. However, on some targets, threads survive a
4911 process exit event. E.g., for the "checkpoint" command,
4912 when the current checkpoint/fork exits, linux-fork.c
4913 automatically switches to another fork from within
4914 target_mourn_inferior, by associating the same
4915 inferior/thread to another fork. We haven't mourned yet at
4916 this point, but we must mark any threads left in the
4917 process as not-executing so that finish_thread_state marks
4918 them stopped (in the user's perspective) if/when we present
4919 the stop to the user. */
4920 mark_ptid = ptid_t (event_ptid.pid ());
4921 }
4922 else
4923 mark_ptid = event_ptid;
4924
4925 set_executing (target, mark_ptid, false);
4926
4927 /* Likewise the resumed flag. */
4928 set_resumed (target, mark_ptid, false);
4929}
4930
d758e62c
PA
4931/* Handle one event after stopping threads. If the eventing thread
4932 reports back any interesting event, we leave it pending. If the
4933 eventing thread was in the middle of a displaced step, we
8ff53139
PA
4934 cancel/finish it, and unless the thread's inferior is being
4935 detached, put the thread back in the step-over chain. Returns true
4936 if there are no resumed threads left in the target (thus there's no
4937 point in waiting further), false otherwise. */
d758e62c
PA
4938
4939static bool
4940handle_one (const wait_one_event &event)
4941{
4942 infrun_debug_printf
4943 ("%s %s", target_waitstatus_to_string (&event.ws).c_str (),
4944 target_pid_to_str (event.ptid).c_str ());
4945
4946 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
4947 {
4948 /* All resumed threads exited. */
4949 return true;
4950 }
4951 else if (event.ws.kind == TARGET_WAITKIND_THREAD_EXITED
4952 || event.ws.kind == TARGET_WAITKIND_EXITED
4953 || event.ws.kind == TARGET_WAITKIND_SIGNALLED)
4954 {
4955 /* One thread/process exited/signalled. */
4956
4957 thread_info *t = nullptr;
4958
4959 /* The target may have reported just a pid. If so, try
4960 the first non-exited thread. */
4961 if (event.ptid.is_pid ())
4962 {
4963 int pid = event.ptid.pid ();
4964 inferior *inf = find_inferior_pid (event.target, pid);
4965 for (thread_info *tp : inf->non_exited_threads ())
4966 {
4967 t = tp;
4968 break;
4969 }
4970
4971 /* If there is no available thread, the event would
4972 have to be appended to a per-inferior event list,
4973 which does not exist (and if it did, we'd have
4974 to adjust run control command to be able to
4975 resume such an inferior). We assert here instead
4976 of going into an infinite loop. */
4977 gdb_assert (t != nullptr);
4978
4979 infrun_debug_printf
4980 ("using %s", target_pid_to_str (t->ptid).c_str ());
4981 }
4982 else
4983 {
4984 t = find_thread_ptid (event.target, event.ptid);
4985 /* Check if this is the first time we see this thread.
4986 Don't bother adding if it individually exited. */
4987 if (t == nullptr
4988 && event.ws.kind != TARGET_WAITKIND_THREAD_EXITED)
4989 t = add_thread (event.target, event.ptid);
4990 }
4991
4992 if (t != nullptr)
4993 {
4994 /* Set the threads as non-executing to avoid
4995 another stop attempt on them. */
4996 switch_to_thread_no_regs (t);
4997 mark_non_executing_threads (event.target, event.ptid,
4998 event.ws);
4999 save_waitstatus (t, &event.ws);
5000 t->stop_requested = false;
5001 }
5002 }
5003 else
5004 {
5005 thread_info *t = find_thread_ptid (event.target, event.ptid);
5006 if (t == NULL)
5007 t = add_thread (event.target, event.ptid);
5008
5009 t->stop_requested = 0;
5010 t->executing = 0;
5011 t->resumed = false;
5012 t->control.may_range_step = 0;
5013
5014 /* This may be the first time we see the inferior report
5015 a stop. */
5016 inferior *inf = find_inferior_ptid (event.target, event.ptid);
5017 if (inf->needs_setup)
5018 {
5019 switch_to_thread_no_regs (t);
5020 setup_inferior (0);
5021 }
5022
5023 if (event.ws.kind == TARGET_WAITKIND_STOPPED
5024 && event.ws.value.sig == GDB_SIGNAL_0)
5025 {
5026 /* We caught the event that we intended to catch, so
5027 there's no event pending. */
5028 t->suspend.waitstatus.kind = TARGET_WAITKIND_IGNORE;
5029 t->suspend.waitstatus_pending_p = 0;
5030
5031 if (displaced_step_finish (t, GDB_SIGNAL_0)
5032 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5033 {
5034 /* Add it back to the step-over queue. */
5035 infrun_debug_printf
5036 ("displaced-step of %s canceled",
5037 target_pid_to_str (t->ptid).c_str ());
5038
5039 t->control.trap_expected = 0;
8ff53139
PA
5040 if (!t->inf->detaching)
5041 global_thread_step_over_chain_enqueue (t);
d758e62c
PA
5042 }
5043 }
5044 else
5045 {
5046 enum gdb_signal sig;
5047 struct regcache *regcache;
5048
5049 infrun_debug_printf
5050 ("target_wait %s, saving status for %d.%ld.%ld",
5051 target_waitstatus_to_string (&event.ws).c_str (),
5052 t->ptid.pid (), t->ptid.lwp (), t->ptid.tid ());
5053
5054 /* Record for later. */
5055 save_waitstatus (t, &event.ws);
5056
5057 sig = (event.ws.kind == TARGET_WAITKIND_STOPPED
5058 ? event.ws.value.sig : GDB_SIGNAL_0);
5059
5060 if (displaced_step_finish (t, sig)
5061 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5062 {
5063 /* Add it back to the step-over queue. */
5064 t->control.trap_expected = 0;
8ff53139
PA
5065 if (!t->inf->detaching)
5066 global_thread_step_over_chain_enqueue (t);
d758e62c
PA
5067 }
5068
5069 regcache = get_thread_regcache (t);
5070 t->suspend.stop_pc = regcache_read_pc (regcache);
5071
5072 infrun_debug_printf ("saved stop_pc=%s for %s "
5073 "(currently_stepping=%d)",
5074 paddress (target_gdbarch (),
5075 t->suspend.stop_pc),
5076 target_pid_to_str (t->ptid).c_str (),
5077 currently_stepping (t));
5078 }
5079 }
5080
5081 return false;
5082}
5083
6efcd9a8 5084/* See infrun.h. */
372316f1 5085
6efcd9a8 5086void
4ffff7d3 5087stop_all_threads (const char *reason, inferior *inf)
372316f1
PA
5088{
5089 /* We may need multiple passes to discover all threads. */
5090 int pass;
5091 int iterations = 0;
372316f1 5092
53cccef1 5093 gdb_assert (exists_non_stop_target ());
372316f1 5094
4ffff7d3
SM
5095 INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason,
5096 inf != nullptr ? inf->num : -1);
372316f1 5097
00431a78 5098 scoped_restore_current_thread restore_thread;
372316f1 5099
4ffff7d3 5100 /* Enable thread events on relevant targets. */
6ad82919
TBA
5101 for (auto *target : all_non_exited_process_targets ())
5102 {
4ffff7d3
SM
5103 if (inf != nullptr && inf->process_target () != target)
5104 continue;
5105
6ad82919
TBA
5106 switch_to_target_no_thread (target);
5107 target_thread_events (true);
5108 }
5109
5110 SCOPE_EXIT
5111 {
4ffff7d3 5112 /* Disable thread events on relevant targets. */
6ad82919
TBA
5113 for (auto *target : all_non_exited_process_targets ())
5114 {
4ffff7d3
SM
5115 if (inf != nullptr && inf->process_target () != target)
5116 continue;
5117
6ad82919
TBA
5118 switch_to_target_no_thread (target);
5119 target_thread_events (false);
5120 }
5121
17417fb0 5122 /* Use debug_prefixed_printf directly to get a meaningful function
dda83cd7 5123 name. */
6ad82919 5124 if (debug_infrun)
17417fb0 5125 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
6ad82919 5126 };
65706a29 5127
372316f1
PA
5128 /* Request threads to stop, and then wait for the stops. Because
5129 threads we already know about can spawn more threads while we're
5130 trying to stop them, and we only learn about new threads when we
5131 update the thread list, do this in a loop, and keep iterating
5132 until two passes find no threads that need to be stopped. */
5133 for (pass = 0; pass < 2; pass++, iterations++)
5134 {
1eb8556f 5135 infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations);
372316f1
PA
5136 while (1)
5137 {
29d6859f 5138 int waits_needed = 0;
372316f1 5139
a05575d3
TBA
5140 for (auto *target : all_non_exited_process_targets ())
5141 {
4ffff7d3
SM
5142 if (inf != nullptr && inf->process_target () != target)
5143 continue;
5144
a05575d3
TBA
5145 switch_to_target_no_thread (target);
5146 update_thread_list ();
5147 }
372316f1
PA
5148
5149 /* Go through all threads looking for threads that we need
5150 to tell the target to stop. */
08036331 5151 for (thread_info *t : all_non_exited_threads ())
372316f1 5152 {
4ffff7d3
SM
5153 if (inf != nullptr && t->inf != inf)
5154 continue;
5155
53cccef1
TBA
5156 /* For a single-target setting with an all-stop target,
5157 we would not even arrive here. For a multi-target
5158 setting, until GDB is able to handle a mixture of
5159 all-stop and non-stop targets, simply skip all-stop
5160 targets' threads. This should be fine due to the
5161 protection of 'check_multi_target_resumption'. */
5162
5163 switch_to_thread_no_regs (t);
5164 if (!target_is_non_stop_p ())
5165 continue;
5166
372316f1
PA
5167 if (t->executing)
5168 {
5169 /* If already stopping, don't request a stop again.
5170 We just haven't seen the notification yet. */
5171 if (!t->stop_requested)
5172 {
1eb8556f
SM
5173 infrun_debug_printf (" %s executing, need stop",
5174 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
5175 target_stop (t->ptid);
5176 t->stop_requested = 1;
5177 }
5178 else
5179 {
1eb8556f
SM
5180 infrun_debug_printf (" %s executing, already stopping",
5181 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
5182 }
5183
5184 if (t->stop_requested)
29d6859f 5185 waits_needed++;
372316f1
PA
5186 }
5187 else
5188 {
1eb8556f
SM
5189 infrun_debug_printf (" %s not executing",
5190 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
5191
5192 /* The thread may be not executing, but still be
5193 resumed with a pending status to process. */
719546c4 5194 t->resumed = false;
372316f1
PA
5195 }
5196 }
5197
29d6859f 5198 if (waits_needed == 0)
372316f1
PA
5199 break;
5200
5201 /* If we find new threads on the second iteration, restart
5202 over. We want to see two iterations in a row with all
5203 threads stopped. */
5204 if (pass > 0)
5205 pass = -1;
5206
29d6859f 5207 for (int i = 0; i < waits_needed; i++)
c29705b7 5208 {
29d6859f 5209 wait_one_event event = wait_one ();
d758e62c
PA
5210 if (handle_one (event))
5211 break;
372316f1
PA
5212 }
5213 }
5214 }
372316f1
PA
5215}
5216
f4836ba9
PA
5217/* Handle a TARGET_WAITKIND_NO_RESUMED event. */
5218
c4464ade 5219static bool
f4836ba9
PA
5220handle_no_resumed (struct execution_control_state *ecs)
5221{
3b12939d 5222 if (target_can_async_p ())
f4836ba9 5223 {
c4464ade 5224 bool any_sync = false;
f4836ba9 5225
2dab0c7b 5226 for (ui *ui : all_uis ())
3b12939d
PA
5227 {
5228 if (ui->prompt_state == PROMPT_BLOCKED)
5229 {
c4464ade 5230 any_sync = true;
3b12939d
PA
5231 break;
5232 }
5233 }
5234 if (!any_sync)
5235 {
5236 /* There were no unwaited-for children left in the target, but,
5237 we're not synchronously waiting for events either. Just
5238 ignore. */
5239
1eb8556f 5240 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
3b12939d 5241 prepare_to_wait (ecs);
c4464ade 5242 return true;
3b12939d 5243 }
f4836ba9
PA
5244 }
5245
5246 /* Otherwise, if we were running a synchronous execution command, we
5247 may need to cancel it and give the user back the terminal.
5248
5249 In non-stop mode, the target can't tell whether we've already
5250 consumed previous stop events, so it can end up sending us a
5251 no-resumed event like so:
5252
5253 #0 - thread 1 is left stopped
5254
5255 #1 - thread 2 is resumed and hits breakpoint
dda83cd7 5256 -> TARGET_WAITKIND_STOPPED
f4836ba9
PA
5257
5258 #2 - thread 3 is resumed and exits
dda83cd7 5259 this is the last resumed thread, so
f4836ba9
PA
5260 -> TARGET_WAITKIND_NO_RESUMED
5261
5262 #3 - gdb processes stop for thread 2 and decides to re-resume
dda83cd7 5263 it.
f4836ba9
PA
5264
5265 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
dda83cd7 5266 thread 2 is now resumed, so the event should be ignored.
f4836ba9
PA
5267
5268 IOW, if the stop for thread 2 doesn't end a foreground command,
5269 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5270 event. But it could be that the event meant that thread 2 itself
5271 (or whatever other thread was the last resumed thread) exited.
5272
5273 To address this we refresh the thread list and check whether we
5274 have resumed threads _now_. In the example above, this removes
5275 thread 3 from the thread list. If thread 2 was re-resumed, we
5276 ignore this event. If we find no thread resumed, then we cancel
7d3badc6
PA
5277 the synchronous command and show "no unwaited-for " to the
5278 user. */
f4836ba9 5279
d6cc5d98 5280 inferior *curr_inf = current_inferior ();
7d3badc6 5281
d6cc5d98
PA
5282 scoped_restore_current_thread restore_thread;
5283
5284 for (auto *target : all_non_exited_process_targets ())
5285 {
5286 switch_to_target_no_thread (target);
5287 update_thread_list ();
5288 }
5289
5290 /* If:
5291
5292 - the current target has no thread executing, and
5293 - the current inferior is native, and
5294 - the current inferior is the one which has the terminal, and
5295 - we did nothing,
5296
5297 then a Ctrl-C from this point on would remain stuck in the
5298 kernel, until a thread resumes and dequeues it. That would
5299 result in the GDB CLI not reacting to Ctrl-C, not able to
5300 interrupt the program. To address this, if the current inferior
5301 no longer has any thread executing, we give the terminal to some
5302 other inferior that has at least one thread executing. */
5303 bool swap_terminal = true;
5304
5305 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5306 whether to report it to the user. */
5307 bool ignore_event = false;
7d3badc6
PA
5308
5309 for (thread_info *thread : all_non_exited_threads ())
f4836ba9 5310 {
d6cc5d98
PA
5311 if (swap_terminal && thread->executing)
5312 {
5313 if (thread->inf != curr_inf)
5314 {
5315 target_terminal::ours ();
5316
5317 switch_to_thread (thread);
5318 target_terminal::inferior ();
5319 }
5320 swap_terminal = false;
5321 }
5322
5323 if (!ignore_event
5324 && (thread->executing
5325 || thread->suspend.waitstatus_pending_p))
f4836ba9 5326 {
7d3badc6
PA
5327 /* Either there were no unwaited-for children left in the
5328 target at some point, but there are now, or some target
5329 other than the eventing one has unwaited-for children
5330 left. Just ignore. */
1eb8556f
SM
5331 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5332 "(ignoring: found resumed)");
d6cc5d98
PA
5333
5334 ignore_event = true;
f4836ba9 5335 }
d6cc5d98
PA
5336
5337 if (ignore_event && !swap_terminal)
5338 break;
5339 }
5340
5341 if (ignore_event)
5342 {
5343 switch_to_inferior_no_thread (curr_inf);
5344 prepare_to_wait (ecs);
c4464ade 5345 return true;
f4836ba9
PA
5346 }
5347
5348 /* Go ahead and report the event. */
c4464ade 5349 return false;
f4836ba9
PA
5350}
5351
05ba8510
PA
5352/* Given an execution control state that has been freshly filled in by
5353 an event from the inferior, figure out what it means and take
5354 appropriate action.
5355
5356 The alternatives are:
5357
22bcd14b 5358 1) stop_waiting and return; to really stop and return to the
05ba8510
PA
5359 debugger.
5360
5361 2) keep_going and return; to wait for the next event (set
5362 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5363 once). */
c906108c 5364
ec9499be 5365static void
595915c1 5366handle_inferior_event (struct execution_control_state *ecs)
cd0fc7c3 5367{
595915c1
TT
5368 /* Make sure that all temporary struct value objects that were
5369 created during the handling of the event get deleted at the
5370 end. */
5371 scoped_value_mark free_values;
5372
1eb8556f 5373 infrun_debug_printf ("%s", target_waitstatus_to_string (&ecs->ws).c_str ());
c29705b7 5374
28736962
PA
5375 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
5376 {
5377 /* We had an event in the inferior, but we are not interested in
5378 handling it at this level. The lower layers have already
5379 done what needs to be done, if anything.
5380
5381 One of the possible circumstances for this is when the
5382 inferior produces output for the console. The inferior has
5383 not stopped, and we are ignoring the event. Another possible
5384 circumstance is any event which the lower level knows will be
5385 reported multiple times without an intervening resume. */
28736962
PA
5386 prepare_to_wait (ecs);
5387 return;
5388 }
5389
65706a29
PA
5390 if (ecs->ws.kind == TARGET_WAITKIND_THREAD_EXITED)
5391 {
65706a29
PA
5392 prepare_to_wait (ecs);
5393 return;
5394 }
5395
0e5bf2a8 5396 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
f4836ba9
PA
5397 && handle_no_resumed (ecs))
5398 return;
0e5bf2a8 5399
5b6d1e4f
PA
5400 /* Cache the last target/ptid/waitstatus. */
5401 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
e02bc4cc 5402
ca005067 5403 /* Always clear state belonging to the previous time we stopped. */
aa7d318d 5404 stop_stack_dummy = STOP_NONE;
ca005067 5405
0e5bf2a8
PA
5406 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
5407 {
5408 /* No unwaited-for children left. IOW, all resumed children
5409 have exited. */
c4464ade 5410 stop_print_frame = false;
22bcd14b 5411 stop_waiting (ecs);
0e5bf2a8
PA
5412 return;
5413 }
5414
8c90c137 5415 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
64776a0b 5416 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
359f5fe6 5417 {
5b6d1e4f 5418 ecs->event_thread = find_thread_ptid (ecs->target, ecs->ptid);
359f5fe6
PA
5419 /* If it's a new thread, add it to the thread database. */
5420 if (ecs->event_thread == NULL)
5b6d1e4f 5421 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
c1e36e3e
PA
5422
5423 /* Disable range stepping. If the next step request could use a
5424 range, this will be end up re-enabled then. */
5425 ecs->event_thread->control.may_range_step = 0;
359f5fe6 5426 }
88ed393a
JK
5427
5428 /* Dependent on valid ECS->EVENT_THREAD. */
d8dd4d5f 5429 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
88ed393a
JK
5430
5431 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5432 reinit_frame_cache ();
5433
28736962
PA
5434 breakpoint_retire_moribund ();
5435
2b009048
DJ
5436 /* First, distinguish signals caused by the debugger from signals
5437 that have to do with the program's own actions. Note that
5438 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5439 on the operating system version. Here we detect when a SIGILL or
5440 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5441 something similar for SIGSEGV, since a SIGSEGV will be generated
5442 when we're trying to execute a breakpoint instruction on a
5443 non-executable stack. This happens for call dummy breakpoints
5444 for architectures like SPARC that place call dummies on the
5445 stack. */
2b009048 5446 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
a493e3e2
PA
5447 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
5448 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
5449 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
2b009048 5450 {
00431a78 5451 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
de0a0249 5452
a01bda52 5453 if (breakpoint_inserted_here_p (regcache->aspace (),
de0a0249
UW
5454 regcache_read_pc (regcache)))
5455 {
1eb8556f 5456 infrun_debug_printf ("Treating signal as SIGTRAP");
a493e3e2 5457 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
de0a0249 5458 }
2b009048
DJ
5459 }
5460
293b3ebc 5461 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
8c90c137 5462
488f131b
JB
5463 switch (ecs->ws.kind)
5464 {
5465 case TARGET_WAITKIND_LOADED:
72d383bb
SM
5466 {
5467 context_switch (ecs);
5468 /* Ignore gracefully during startup of the inferior, as it might
5469 be the shell which has just loaded some objects, otherwise
5470 add the symbols for the newly loaded objects. Also ignore at
5471 the beginning of an attach or remote session; we will query
5472 the full list of libraries once the connection is
5473 established. */
5474
5475 stop_kind stop_soon = get_inferior_stop_soon (ecs);
5476 if (stop_soon == NO_STOP_QUIETLY)
5477 {
5478 struct regcache *regcache;
edcc5120 5479
72d383bb 5480 regcache = get_thread_regcache (ecs->event_thread);
edcc5120 5481
72d383bb 5482 handle_solib_event ();
ab04a2af 5483
72d383bb
SM
5484 ecs->event_thread->control.stop_bpstat
5485 = bpstat_stop_status (regcache->aspace (),
5486 ecs->event_thread->suspend.stop_pc,
5487 ecs->event_thread, &ecs->ws);
c65d6b55 5488
72d383bb 5489 if (handle_stop_requested (ecs))
94c57d6a 5490 return;
488f131b 5491
72d383bb
SM
5492 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5493 {
5494 /* A catchpoint triggered. */
5495 process_event_stop_test (ecs);
5496 return;
5497 }
55409f9d 5498
72d383bb
SM
5499 /* If requested, stop when the dynamic linker notifies
5500 gdb of events. This allows the user to get control
5501 and place breakpoints in initializer routines for
5502 dynamically loaded objects (among other things). */
5503 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5504 if (stop_on_solib_events)
5505 {
5506 /* Make sure we print "Stopped due to solib-event" in
5507 normal_stop. */
5508 stop_print_frame = true;
b0f4b84b 5509
72d383bb
SM
5510 stop_waiting (ecs);
5511 return;
5512 }
5513 }
b0f4b84b 5514
72d383bb
SM
5515 /* If we are skipping through a shell, or through shared library
5516 loading that we aren't interested in, resume the program. If
5517 we're running the program normally, also resume. */
5518 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
5519 {
5520 /* Loading of shared libraries might have changed breakpoint
5521 addresses. Make sure new breakpoints are inserted. */
5522 if (stop_soon == NO_STOP_QUIETLY)
5523 insert_breakpoints ();
5524 resume (GDB_SIGNAL_0);
5525 prepare_to_wait (ecs);
5526 return;
5527 }
5c09a2c5 5528
72d383bb
SM
5529 /* But stop if we're attaching or setting up a remote
5530 connection. */
5531 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5532 || stop_soon == STOP_QUIETLY_REMOTE)
5533 {
5534 infrun_debug_printf ("quietly stopped");
5535 stop_waiting (ecs);
5536 return;
5537 }
5538
5539 internal_error (__FILE__, __LINE__,
5540 _("unhandled stop_soon: %d"), (int) stop_soon);
5541 }
c5aa993b 5542
488f131b 5543 case TARGET_WAITKIND_SPURIOUS:
c65d6b55
PA
5544 if (handle_stop_requested (ecs))
5545 return;
00431a78 5546 context_switch (ecs);
64ce06e4 5547 resume (GDB_SIGNAL_0);
488f131b
JB
5548 prepare_to_wait (ecs);
5549 return;
c5aa993b 5550
65706a29 5551 case TARGET_WAITKIND_THREAD_CREATED:
c65d6b55
PA
5552 if (handle_stop_requested (ecs))
5553 return;
00431a78 5554 context_switch (ecs);
65706a29
PA
5555 if (!switch_back_to_stepped_thread (ecs))
5556 keep_going (ecs);
5557 return;
5558
488f131b 5559 case TARGET_WAITKIND_EXITED:
940c3c06 5560 case TARGET_WAITKIND_SIGNALLED:
18493a00
PA
5561 {
5562 /* Depending on the system, ecs->ptid may point to a thread or
5563 to a process. On some targets, target_mourn_inferior may
5564 need to have access to the just-exited thread. That is the
5565 case of GNU/Linux's "checkpoint" support, for example.
5566 Call the switch_to_xxx routine as appropriate. */
5567 thread_info *thr = find_thread_ptid (ecs->target, ecs->ptid);
5568 if (thr != nullptr)
5569 switch_to_thread (thr);
5570 else
5571 {
5572 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5573 switch_to_inferior_no_thread (inf);
5574 }
5575 }
6c95b8df 5576 handle_vfork_child_exec_or_exit (0);
223ffa71 5577 target_terminal::ours (); /* Must do this before mourn anyway. */
488f131b 5578
0c557179
SDJ
5579 /* Clearing any previous state of convenience variables. */
5580 clear_exit_convenience_vars ();
5581
940c3c06
PA
5582 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
5583 {
5584 /* Record the exit code in the convenience variable $_exitcode, so
5585 that the user can inspect this again later. */
5586 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5587 (LONGEST) ecs->ws.value.integer);
5588
5589 /* Also record this in the inferior itself. */
5590 current_inferior ()->has_exit_code = 1;
5591 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
8cf64490 5592
98eb56a4
PA
5593 /* Support the --return-child-result option. */
5594 return_child_result_value = ecs->ws.value.integer;
5595
76727919 5596 gdb::observers::exited.notify (ecs->ws.value.integer);
940c3c06
PA
5597 }
5598 else
0c557179 5599 {
00431a78 5600 struct gdbarch *gdbarch = current_inferior ()->gdbarch;
0c557179
SDJ
5601
5602 if (gdbarch_gdb_signal_to_target_p (gdbarch))
5603 {
5604 /* Set the value of the internal variable $_exitsignal,
5605 which holds the signal uncaught by the inferior. */
5606 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5607 gdbarch_gdb_signal_to_target (gdbarch,
5608 ecs->ws.value.sig));
5609 }
5610 else
5611 {
5612 /* We don't have access to the target's method used for
5613 converting between signal numbers (GDB's internal
5614 representation <-> target's representation).
5615 Therefore, we cannot do a good job at displaying this
5616 information to the user. It's better to just warn
5617 her about it (if infrun debugging is enabled), and
5618 give up. */
1eb8556f
SM
5619 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
5620 "signal number.");
0c557179
SDJ
5621 }
5622
76727919 5623 gdb::observers::signal_exited.notify (ecs->ws.value.sig);
0c557179 5624 }
8cf64490 5625
488f131b 5626 gdb_flush (gdb_stdout);
bc1e6c81 5627 target_mourn_inferior (inferior_ptid);
c4464ade 5628 stop_print_frame = false;
22bcd14b 5629 stop_waiting (ecs);
488f131b 5630 return;
c5aa993b 5631
488f131b 5632 case TARGET_WAITKIND_FORKED:
deb3b17b 5633 case TARGET_WAITKIND_VFORKED:
e2d96639
YQ
5634 /* Check whether the inferior is displaced stepping. */
5635 {
00431a78 5636 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
ac7936df 5637 struct gdbarch *gdbarch = regcache->arch ();
c0aba012 5638 inferior *parent_inf = find_inferior_ptid (ecs->target, ecs->ptid);
e2d96639 5639
aeeb758d
JB
5640 /* If this is a fork (child gets its own address space copy)
5641 and some displaced step buffers were in use at the time of
5642 the fork, restore the displaced step buffer bytes in the
5643 child process.
5644
5645 Architectures which support displaced stepping and fork
5646 events must supply an implementation of
5647 gdbarch_displaced_step_restore_all_in_ptid. This is not
5648 enforced during gdbarch validation to support architectures
5649 which support displaced stepping but not forks. */
5650 if (ecs->ws.kind == TARGET_WAITKIND_FORKED
5651 && gdbarch_supports_displaced_stepping (gdbarch))
187b041e
SM
5652 gdbarch_displaced_step_restore_all_in_ptid
5653 (gdbarch, parent_inf, ecs->ws.value.related_pid);
c0aba012
SM
5654
5655 /* If displaced stepping is supported, and thread ecs->ptid is
5656 displaced stepping. */
00431a78 5657 if (displaced_step_in_progress_thread (ecs->event_thread))
e2d96639 5658 {
e2d96639
YQ
5659 struct regcache *child_regcache;
5660 CORE_ADDR parent_pc;
5661
5662 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5663 indicating that the displaced stepping of syscall instruction
5664 has been done. Perform cleanup for parent process here. Note
5665 that this operation also cleans up the child process for vfork,
5666 because their pages are shared. */
7def77a1 5667 displaced_step_finish (ecs->event_thread, GDB_SIGNAL_TRAP);
c2829269
PA
5668 /* Start a new step-over in another thread if there's one
5669 that needs it. */
5670 start_step_over ();
e2d96639 5671
e2d96639
YQ
5672 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5673 the child's PC is also within the scratchpad. Set the child's PC
5674 to the parent's PC value, which has already been fixed up.
5675 FIXME: we use the parent's aspace here, although we're touching
5676 the child, because the child hasn't been added to the inferior
5677 list yet at this point. */
5678
5679 child_regcache
5b6d1e4f
PA
5680 = get_thread_arch_aspace_regcache (parent_inf->process_target (),
5681 ecs->ws.value.related_pid,
e2d96639
YQ
5682 gdbarch,
5683 parent_inf->aspace);
5684 /* Read PC value of parent process. */
5685 parent_pc = regcache_read_pc (regcache);
5686
136821d9
SM
5687 displaced_debug_printf ("write child pc from %s to %s",
5688 paddress (gdbarch,
5689 regcache_read_pc (child_regcache)),
5690 paddress (gdbarch, parent_pc));
e2d96639
YQ
5691
5692 regcache_write_pc (child_regcache, parent_pc);
5693 }
5694 }
5695
00431a78 5696 context_switch (ecs);
5a2901d9 5697
b242c3c2
PA
5698 /* Immediately detach breakpoints from the child before there's
5699 any chance of letting the user delete breakpoints from the
5700 breakpoint lists. If we don't do this early, it's easy to
5701 leave left over traps in the child, vis: "break foo; catch
5702 fork; c; <fork>; del; c; <child calls foo>". We only follow
5703 the fork on the last `continue', and by that time the
5704 breakpoint at "foo" is long gone from the breakpoint table.
5705 If we vforked, then we don't need to unpatch here, since both
5706 parent and child are sharing the same memory pages; we'll
5707 need to unpatch at follow/detach time instead to be certain
5708 that new breakpoints added between catchpoint hit time and
5709 vfork follow are detached. */
5710 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
5711 {
b242c3c2
PA
5712 /* This won't actually modify the breakpoint list, but will
5713 physically remove the breakpoints from the child. */
d80ee84f 5714 detach_breakpoints (ecs->ws.value.related_pid);
b242c3c2
PA
5715 }
5716
34b7e8a6 5717 delete_just_stopped_threads_single_step_breakpoints ();
d03285ec 5718
e58b0e63
PA
5719 /* In case the event is caught by a catchpoint, remember that
5720 the event is to be followed at the next resume of the thread,
5721 and not immediately. */
5722 ecs->event_thread->pending_follow = ecs->ws;
5723
f2ffa92b
PA
5724 ecs->event_thread->suspend.stop_pc
5725 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
675bf4cb 5726
16c381f0 5727 ecs->event_thread->control.stop_bpstat
a01bda52 5728 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5729 ecs->event_thread->suspend.stop_pc,
5730 ecs->event_thread, &ecs->ws);
675bf4cb 5731
c65d6b55
PA
5732 if (handle_stop_requested (ecs))
5733 return;
5734
ce12b012
PA
5735 /* If no catchpoint triggered for this, then keep going. Note
5736 that we're interested in knowing the bpstat actually causes a
5737 stop, not just if it may explain the signal. Software
5738 watchpoints, for example, always appear in the bpstat. */
5739 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5740 {
5ab2fbf1 5741 bool follow_child
3e43a32a 5742 = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63 5743
a493e3e2 5744 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
e58b0e63 5745
5b6d1e4f
PA
5746 process_stratum_target *targ
5747 = ecs->event_thread->inf->process_target ();
5748
5ab2fbf1 5749 bool should_resume = follow_fork ();
e58b0e63 5750
5b6d1e4f
PA
5751 /* Note that one of these may be an invalid pointer,
5752 depending on detach_fork. */
00431a78 5753 thread_info *parent = ecs->event_thread;
5b6d1e4f
PA
5754 thread_info *child
5755 = find_thread_ptid (targ, ecs->ws.value.related_pid);
6c95b8df 5756
a2077e25
PA
5757 /* At this point, the parent is marked running, and the
5758 child is marked stopped. */
5759
5760 /* If not resuming the parent, mark it stopped. */
5761 if (follow_child && !detach_fork && !non_stop && !sched_multi)
00431a78 5762 parent->set_running (false);
a2077e25
PA
5763
5764 /* If resuming the child, mark it running. */
5765 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
00431a78 5766 child->set_running (true);
a2077e25 5767
6c95b8df 5768 /* In non-stop mode, also resume the other branch. */
fbea99ea
PA
5769 if (!detach_fork && (non_stop
5770 || (sched_multi && target_is_non_stop_p ())))
6c95b8df
PA
5771 {
5772 if (follow_child)
5773 switch_to_thread (parent);
5774 else
5775 switch_to_thread (child);
5776
5777 ecs->event_thread = inferior_thread ();
5778 ecs->ptid = inferior_ptid;
5779 keep_going (ecs);
5780 }
5781
5782 if (follow_child)
5783 switch_to_thread (child);
5784 else
5785 switch_to_thread (parent);
5786
e58b0e63
PA
5787 ecs->event_thread = inferior_thread ();
5788 ecs->ptid = inferior_ptid;
5789
5790 if (should_resume)
5791 keep_going (ecs);
5792 else
22bcd14b 5793 stop_waiting (ecs);
04e68871
DJ
5794 return;
5795 }
94c57d6a
PA
5796 process_event_stop_test (ecs);
5797 return;
488f131b 5798
6c95b8df
PA
5799 case TARGET_WAITKIND_VFORK_DONE:
5800 /* Done with the shared memory region. Re-insert breakpoints in
5801 the parent, and keep going. */
5802
00431a78 5803 context_switch (ecs);
6c95b8df 5804
81d92403
SM
5805 handle_vfork_done (ecs->event_thread);
5806 gdb_assert (inferior_thread () == ecs->event_thread);
c65d6b55
PA
5807
5808 if (handle_stop_requested (ecs))
5809 return;
5810
6c95b8df
PA
5811 /* This also takes care of reinserting breakpoints in the
5812 previously locked inferior. */
5813 keep_going (ecs);
5814 return;
5815
488f131b 5816 case TARGET_WAITKIND_EXECD:
488f131b 5817
cbd2b4e3
PA
5818 /* Note we can't read registers yet (the stop_pc), because we
5819 don't yet know the inferior's post-exec architecture.
5820 'stop_pc' is explicitly read below instead. */
00431a78 5821 switch_to_thread_no_regs (ecs->event_thread);
5a2901d9 5822
6c95b8df
PA
5823 /* Do whatever is necessary to the parent branch of the vfork. */
5824 handle_vfork_child_exec_or_exit (1);
5825
795e548f 5826 /* This causes the eventpoints and symbol table to be reset.
dda83cd7
SM
5827 Must do this now, before trying to determine whether to
5828 stop. */
71b43ef8 5829 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
795e548f 5830
17d8546e
DB
5831 /* In follow_exec we may have deleted the original thread and
5832 created a new one. Make sure that the event thread is the
5833 execd thread for that case (this is a nop otherwise). */
5834 ecs->event_thread = inferior_thread ();
5835
f2ffa92b
PA
5836 ecs->event_thread->suspend.stop_pc
5837 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
ecdc3a72 5838
16c381f0 5839 ecs->event_thread->control.stop_bpstat
a01bda52 5840 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5841 ecs->event_thread->suspend.stop_pc,
5842 ecs->event_thread, &ecs->ws);
795e548f 5843
71b43ef8
PA
5844 /* Note that this may be referenced from inside
5845 bpstat_stop_status above, through inferior_has_execd. */
5846 xfree (ecs->ws.value.execd_pathname);
5847 ecs->ws.value.execd_pathname = NULL;
5848
c65d6b55
PA
5849 if (handle_stop_requested (ecs))
5850 return;
5851
04e68871 5852 /* If no catchpoint triggered for this, then keep going. */
ce12b012 5853 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5854 {
a493e3e2 5855 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
04e68871
DJ
5856 keep_going (ecs);
5857 return;
5858 }
94c57d6a
PA
5859 process_event_stop_test (ecs);
5860 return;
488f131b 5861
b4dc5ffa 5862 /* Be careful not to try to gather much state about a thread
dda83cd7 5863 that's in a syscall. It's frequently a losing proposition. */
488f131b 5864 case TARGET_WAITKIND_SYSCALL_ENTRY:
1777feb0 5865 /* Getting the current syscall number. */
94c57d6a
PA
5866 if (handle_syscall_event (ecs) == 0)
5867 process_event_stop_test (ecs);
5868 return;
c906108c 5869
488f131b 5870 /* Before examining the threads further, step this thread to
dda83cd7
SM
5871 get it entirely out of the syscall. (We get notice of the
5872 event when the thread is just on the verge of exiting a
5873 syscall. Stepping one instruction seems to get it back
5874 into user code.) */
488f131b 5875 case TARGET_WAITKIND_SYSCALL_RETURN:
94c57d6a
PA
5876 if (handle_syscall_event (ecs) == 0)
5877 process_event_stop_test (ecs);
5878 return;
c906108c 5879
488f131b 5880 case TARGET_WAITKIND_STOPPED:
4f5d7f63
PA
5881 handle_signal_stop (ecs);
5882 return;
c906108c 5883
b2175913
MS
5884 case TARGET_WAITKIND_NO_HISTORY:
5885 /* Reverse execution: target ran out of history info. */
eab402df 5886
d1988021 5887 /* Switch to the stopped thread. */
00431a78 5888 context_switch (ecs);
1eb8556f 5889 infrun_debug_printf ("stopped");
d1988021 5890
34b7e8a6 5891 delete_just_stopped_threads_single_step_breakpoints ();
f2ffa92b
PA
5892 ecs->event_thread->suspend.stop_pc
5893 = regcache_read_pc (get_thread_regcache (inferior_thread ()));
c65d6b55
PA
5894
5895 if (handle_stop_requested (ecs))
5896 return;
5897
76727919 5898 gdb::observers::no_history.notify ();
22bcd14b 5899 stop_waiting (ecs);
b2175913 5900 return;
488f131b 5901 }
4f5d7f63
PA
5902}
5903
372316f1 5904/* Restart threads back to what they were trying to do back when we
4ffff7d3
SM
5905 paused them (because of an in-line step-over or vfork, for example).
5906 The EVENT_THREAD thread is ignored (not restarted).
5907
5908 If INF is non-nullptr, only resume threads from INF. */
4d9d9d04
PA
5909
5910static void
4ffff7d3 5911restart_threads (struct thread_info *event_thread, inferior *inf)
372316f1 5912{
4ffff7d3
SM
5913 INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
5914 event_thread->ptid.to_string ().c_str (),
5915 inf != nullptr ? inf->num : -1);
5916
372316f1
PA
5917 /* In case the instruction just stepped spawned a new thread. */
5918 update_thread_list ();
5919
08036331 5920 for (thread_info *tp : all_non_exited_threads ())
372316f1 5921 {
4ffff7d3
SM
5922 if (inf != nullptr && tp->inf != inf)
5923 continue;
5924
ac7d717c
PA
5925 if (tp->inf->detaching)
5926 {
5927 infrun_debug_printf ("restart threads: [%s] inferior detaching",
5928 target_pid_to_str (tp->ptid).c_str ());
5929 continue;
5930 }
5931
f3f8ece4
PA
5932 switch_to_thread_no_regs (tp);
5933
372316f1
PA
5934 if (tp == event_thread)
5935 {
1eb8556f
SM
5936 infrun_debug_printf ("restart threads: [%s] is event thread",
5937 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5938 continue;
5939 }
5940
5941 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5942 {
1eb8556f
SM
5943 infrun_debug_printf ("restart threads: [%s] not meant to be running",
5944 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5945 continue;
5946 }
5947
5948 if (tp->resumed)
5949 {
1eb8556f
SM
5950 infrun_debug_printf ("restart threads: [%s] resumed",
5951 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5952 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
5953 continue;
5954 }
5955
5956 if (thread_is_in_step_over_chain (tp))
5957 {
1eb8556f
SM
5958 infrun_debug_printf ("restart threads: [%s] needs step-over",
5959 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5960 gdb_assert (!tp->resumed);
5961 continue;
5962 }
5963
5964
5965 if (tp->suspend.waitstatus_pending_p)
5966 {
1eb8556f
SM
5967 infrun_debug_printf ("restart threads: [%s] has pending status",
5968 target_pid_to_str (tp->ptid).c_str ());
719546c4 5969 tp->resumed = true;
372316f1
PA
5970 continue;
5971 }
5972
c65d6b55
PA
5973 gdb_assert (!tp->stop_requested);
5974
372316f1
PA
5975 /* If some thread needs to start a step-over at this point, it
5976 should still be in the step-over queue, and thus skipped
5977 above. */
5978 if (thread_still_needs_step_over (tp))
5979 {
5980 internal_error (__FILE__, __LINE__,
5981 "thread [%s] needs a step-over, but not in "
5982 "step-over queue\n",
a068643d 5983 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5984 }
5985
5986 if (currently_stepping (tp))
5987 {
1eb8556f
SM
5988 infrun_debug_printf ("restart threads: [%s] was stepping",
5989 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5990 keep_going_stepped_thread (tp);
5991 }
5992 else
5993 {
5994 struct execution_control_state ecss;
5995 struct execution_control_state *ecs = &ecss;
5996
1eb8556f
SM
5997 infrun_debug_printf ("restart threads: [%s] continuing",
5998 target_pid_to_str (tp->ptid).c_str ());
372316f1 5999 reset_ecs (ecs, tp);
00431a78 6000 switch_to_thread (tp);
372316f1
PA
6001 keep_going_pass_signal (ecs);
6002 }
6003 }
6004}
6005
6006/* Callback for iterate_over_threads. Find a resumed thread that has
6007 a pending waitstatus. */
6008
6009static int
6010resumed_thread_with_pending_status (struct thread_info *tp,
6011 void *arg)
6012{
6013 return (tp->resumed
6014 && tp->suspend.waitstatus_pending_p);
6015}
6016
6017/* Called when we get an event that may finish an in-line or
6018 out-of-line (displaced stepping) step-over started previously.
6019 Return true if the event is processed and we should go back to the
6020 event loop; false if the caller should continue processing the
6021 event. */
6022
6023static int
4d9d9d04
PA
6024finish_step_over (struct execution_control_state *ecs)
6025{
7def77a1
SM
6026 displaced_step_finish (ecs->event_thread,
6027 ecs->event_thread->suspend.stop_signal);
4d9d9d04 6028
c4464ade 6029 bool had_step_over_info = step_over_info_valid_p ();
372316f1
PA
6030
6031 if (had_step_over_info)
4d9d9d04
PA
6032 {
6033 /* If we're stepping over a breakpoint with all threads locked,
6034 then only the thread that was stepped should be reporting
6035 back an event. */
6036 gdb_assert (ecs->event_thread->control.trap_expected);
6037
c65d6b55 6038 clear_step_over_info ();
4d9d9d04
PA
6039 }
6040
fbea99ea 6041 if (!target_is_non_stop_p ())
372316f1 6042 return 0;
4d9d9d04
PA
6043
6044 /* Start a new step-over in another thread if there's one that
6045 needs it. */
6046 start_step_over ();
372316f1
PA
6047
6048 /* If we were stepping over a breakpoint before, and haven't started
6049 a new in-line step-over sequence, then restart all other threads
6050 (except the event thread). We can't do this in all-stop, as then
6051 e.g., we wouldn't be able to issue any other remote packet until
6052 these other threads stop. */
6053 if (had_step_over_info && !step_over_info_valid_p ())
6054 {
6055 struct thread_info *pending;
6056
6057 /* If we only have threads with pending statuses, the restart
6058 below won't restart any thread and so nothing re-inserts the
6059 breakpoint we just stepped over. But we need it inserted
6060 when we later process the pending events, otherwise if
6061 another thread has a pending event for this breakpoint too,
6062 we'd discard its event (because the breakpoint that
6063 originally caused the event was no longer inserted). */
00431a78 6064 context_switch (ecs);
372316f1
PA
6065 insert_breakpoints ();
6066
6067 restart_threads (ecs->event_thread);
6068
6069 /* If we have events pending, go through handle_inferior_event
6070 again, picking up a pending event at random. This avoids
6071 thread starvation. */
6072
6073 /* But not if we just stepped over a watchpoint in order to let
6074 the instruction execute so we can evaluate its expression.
6075 The set of watchpoints that triggered is recorded in the
6076 breakpoint objects themselves (see bp->watchpoint_triggered).
6077 If we processed another event first, that other event could
6078 clobber this info. */
6079 if (ecs->event_thread->stepping_over_watchpoint)
6080 return 0;
6081
6082 pending = iterate_over_threads (resumed_thread_with_pending_status,
6083 NULL);
6084 if (pending != NULL)
6085 {
6086 struct thread_info *tp = ecs->event_thread;
6087 struct regcache *regcache;
6088
1eb8556f
SM
6089 infrun_debug_printf ("found resumed threads with "
6090 "pending events, saving status");
372316f1
PA
6091
6092 gdb_assert (pending != tp);
6093
6094 /* Record the event thread's event for later. */
6095 save_waitstatus (tp, &ecs->ws);
6096 /* This was cleared early, by handle_inferior_event. Set it
6097 so this pending event is considered by
6098 do_target_wait. */
719546c4 6099 tp->resumed = true;
372316f1
PA
6100
6101 gdb_assert (!tp->executing);
6102
00431a78 6103 regcache = get_thread_regcache (tp);
372316f1
PA
6104 tp->suspend.stop_pc = regcache_read_pc (regcache);
6105
1eb8556f
SM
6106 infrun_debug_printf ("saved stop_pc=%s for %s "
6107 "(currently_stepping=%d)",
6108 paddress (target_gdbarch (),
dda83cd7 6109 tp->suspend.stop_pc),
1eb8556f
SM
6110 target_pid_to_str (tp->ptid).c_str (),
6111 currently_stepping (tp));
372316f1
PA
6112
6113 /* This in-line step-over finished; clear this so we won't
6114 start a new one. This is what handle_signal_stop would
6115 do, if we returned false. */
6116 tp->stepping_over_breakpoint = 0;
6117
6118 /* Wake up the event loop again. */
6119 mark_async_event_handler (infrun_async_inferior_event_token);
6120
6121 prepare_to_wait (ecs);
6122 return 1;
6123 }
6124 }
6125
6126 return 0;
4d9d9d04
PA
6127}
6128
4f5d7f63
PA
6129/* Come here when the program has stopped with a signal. */
6130
6131static void
6132handle_signal_stop (struct execution_control_state *ecs)
6133{
6134 struct frame_info *frame;
6135 struct gdbarch *gdbarch;
6136 int stopped_by_watchpoint;
6137 enum stop_kind stop_soon;
6138 int random_signal;
c906108c 6139
f0407826
DE
6140 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
6141
c65d6b55
PA
6142 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
6143
f0407826
DE
6144 /* Do we need to clean up the state of a thread that has
6145 completed a displaced single-step? (Doing so usually affects
6146 the PC, so do it here, before we set stop_pc.) */
372316f1
PA
6147 if (finish_step_over (ecs))
6148 return;
f0407826
DE
6149
6150 /* If we either finished a single-step or hit a breakpoint, but
6151 the user wanted this thread to be stopped, pretend we got a
6152 SIG0 (generic unsignaled stop). */
6153 if (ecs->event_thread->stop_requested
6154 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
6155 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
237fc4c9 6156
f2ffa92b
PA
6157 ecs->event_thread->suspend.stop_pc
6158 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
488f131b 6159
2ab76a18
PA
6160 context_switch (ecs);
6161
6162 if (deprecated_context_hook)
6163 deprecated_context_hook (ecs->event_thread->global_num);
6164
527159b7 6165 if (debug_infrun)
237fc4c9 6166 {
00431a78 6167 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
b926417a 6168 struct gdbarch *reg_gdbarch = regcache->arch ();
7f82dfc7 6169
1eb8556f
SM
6170 infrun_debug_printf ("stop_pc=%s",
6171 paddress (reg_gdbarch,
6172 ecs->event_thread->suspend.stop_pc));
d92524f1 6173 if (target_stopped_by_watchpoint ())
237fc4c9 6174 {
dda83cd7 6175 CORE_ADDR addr;
abbb1732 6176
1eb8556f 6177 infrun_debug_printf ("stopped by watchpoint");
237fc4c9 6178
328d42d8
SM
6179 if (target_stopped_data_address (current_inferior ()->top_target (),
6180 &addr))
1eb8556f 6181 infrun_debug_printf ("stopped data address=%s",
dda83cd7
SM
6182 paddress (reg_gdbarch, addr));
6183 else
1eb8556f 6184 infrun_debug_printf ("(no data address available)");
237fc4c9
PA
6185 }
6186 }
527159b7 6187
36fa8042
PA
6188 /* This is originated from start_remote(), start_inferior() and
6189 shared libraries hook functions. */
00431a78 6190 stop_soon = get_inferior_stop_soon (ecs);
36fa8042
PA
6191 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
6192 {
1eb8556f 6193 infrun_debug_printf ("quietly stopped");
c4464ade 6194 stop_print_frame = true;
22bcd14b 6195 stop_waiting (ecs);
36fa8042
PA
6196 return;
6197 }
6198
36fa8042
PA
6199 /* This originates from attach_command(). We need to overwrite
6200 the stop_signal here, because some kernels don't ignore a
6201 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6202 See more comments in inferior.h. On the other hand, if we
6203 get a non-SIGSTOP, report it to the user - assume the backend
6204 will handle the SIGSTOP if it should show up later.
6205
6206 Also consider that the attach is complete when we see a
6207 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6208 target extended-remote report it instead of a SIGSTOP
6209 (e.g. gdbserver). We already rely on SIGTRAP being our
6210 signal, so this is no exception.
6211
6212 Also consider that the attach is complete when we see a
6213 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6214 the target to stop all threads of the inferior, in case the
6215 low level attach operation doesn't stop them implicitly. If
6216 they weren't stopped implicitly, then the stub will report a
6217 GDB_SIGNAL_0, meaning: stopped for no particular reason
6218 other than GDB's request. */
6219 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6220 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
6221 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6222 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
6223 {
c4464ade 6224 stop_print_frame = true;
22bcd14b 6225 stop_waiting (ecs);
36fa8042
PA
6226 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6227 return;
6228 }
6229
568d6575
UW
6230 /* At this point, get hold of the now-current thread's frame. */
6231 frame = get_current_frame ();
6232 gdbarch = get_frame_arch (frame);
6233
2adfaa28 6234 /* Pull the single step breakpoints out of the target. */
af48d08f 6235 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
488f131b 6236 {
af48d08f 6237 struct regcache *regcache;
af48d08f 6238 CORE_ADDR pc;
2adfaa28 6239
00431a78 6240 regcache = get_thread_regcache (ecs->event_thread);
8b86c959
YQ
6241 const address_space *aspace = regcache->aspace ();
6242
af48d08f 6243 pc = regcache_read_pc (regcache);
34b7e8a6 6244
af48d08f
PA
6245 /* However, before doing so, if this single-step breakpoint was
6246 actually for another thread, set this thread up for moving
6247 past it. */
6248 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
6249 aspace, pc))
6250 {
6251 if (single_step_breakpoint_inserted_here_p (aspace, pc))
2adfaa28 6252 {
1eb8556f
SM
6253 infrun_debug_printf ("[%s] hit another thread's single-step "
6254 "breakpoint",
6255 target_pid_to_str (ecs->ptid).c_str ());
af48d08f
PA
6256 ecs->hit_singlestep_breakpoint = 1;
6257 }
6258 }
6259 else
6260 {
1eb8556f
SM
6261 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6262 target_pid_to_str (ecs->ptid).c_str ());
2adfaa28 6263 }
488f131b 6264 }
af48d08f 6265 delete_just_stopped_threads_single_step_breakpoints ();
c906108c 6266
963f9c80
PA
6267 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6268 && ecs->event_thread->control.trap_expected
6269 && ecs->event_thread->stepping_over_watchpoint)
d983da9c
DJ
6270 stopped_by_watchpoint = 0;
6271 else
6272 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
6273
6274 /* If necessary, step over this watchpoint. We'll be back to display
6275 it in a moment. */
6276 if (stopped_by_watchpoint
9aed480c 6277 && (target_have_steppable_watchpoint ()
568d6575 6278 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
488f131b 6279 {
488f131b 6280 /* At this point, we are stopped at an instruction which has
dda83cd7
SM
6281 attempted to write to a piece of memory under control of
6282 a watchpoint. The instruction hasn't actually executed
6283 yet. If we were to evaluate the watchpoint expression
6284 now, we would get the old value, and therefore no change
6285 would seem to have occurred.
6286
6287 In order to make watchpoints work `right', we really need
6288 to complete the memory write, and then evaluate the
6289 watchpoint expression. We do this by single-stepping the
d983da9c
DJ
6290 target.
6291
7f89fd65 6292 It may not be necessary to disable the watchpoint to step over
d983da9c
DJ
6293 it. For example, the PA can (with some kernel cooperation)
6294 single step over a watchpoint without disabling the watchpoint.
6295
6296 It is far more common to need to disable a watchpoint to step
6297 the inferior over it. If we have non-steppable watchpoints,
6298 we must disable the current watchpoint; it's simplest to
963f9c80
PA
6299 disable all watchpoints.
6300
6301 Any breakpoint at PC must also be stepped over -- if there's
6302 one, it will have already triggered before the watchpoint
6303 triggered, and we either already reported it to the user, or
6304 it didn't cause a stop and we called keep_going. In either
6305 case, if there was a breakpoint at PC, we must be trying to
6306 step past it. */
6307 ecs->event_thread->stepping_over_watchpoint = 1;
6308 keep_going (ecs);
488f131b
JB
6309 return;
6310 }
6311
4e1c45ea 6312 ecs->event_thread->stepping_over_breakpoint = 0;
963f9c80 6313 ecs->event_thread->stepping_over_watchpoint = 0;
16c381f0
JK
6314 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
6315 ecs->event_thread->control.stop_step = 0;
c4464ade 6316 stop_print_frame = true;
488f131b 6317 stopped_by_random_signal = 0;
ddfe970e 6318 bpstat stop_chain = NULL;
488f131b 6319
edb3359d
DJ
6320 /* Hide inlined functions starting here, unless we just performed stepi or
6321 nexti. After stepi and nexti, always show the innermost frame (not any
6322 inline function call sites). */
16c381f0 6323 if (ecs->event_thread->control.step_range_end != 1)
0574c78f 6324 {
00431a78
PA
6325 const address_space *aspace
6326 = get_thread_regcache (ecs->event_thread)->aspace ();
0574c78f
GB
6327
6328 /* skip_inline_frames is expensive, so we avoid it if we can
6329 determine that the address is one where functions cannot have
6330 been inlined. This improves performance with inferiors that
6331 load a lot of shared libraries, because the solib event
6332 breakpoint is defined as the address of a function (i.e. not
6333 inline). Note that we have to check the previous PC as well
6334 as the current one to catch cases when we have just
6335 single-stepped off a breakpoint prior to reinstating it.
6336 Note that we're assuming that the code we single-step to is
6337 not inline, but that's not definitive: there's nothing
6338 preventing the event breakpoint function from containing
6339 inlined code, and the single-step ending up there. If the
6340 user had set a breakpoint on that inlined code, the missing
6341 skip_inline_frames call would break things. Fortunately
6342 that's an extremely unlikely scenario. */
f2ffa92b
PA
6343 if (!pc_at_non_inline_function (aspace,
6344 ecs->event_thread->suspend.stop_pc,
6345 &ecs->ws)
a210c238
MR
6346 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6347 && ecs->event_thread->control.trap_expected
6348 && pc_at_non_inline_function (aspace,
6349 ecs->event_thread->prev_pc,
09ac7c10 6350 &ecs->ws)))
1c5a993e 6351 {
f2ffa92b
PA
6352 stop_chain = build_bpstat_chain (aspace,
6353 ecs->event_thread->suspend.stop_pc,
6354 &ecs->ws);
00431a78 6355 skip_inline_frames (ecs->event_thread, stop_chain);
1c5a993e
MR
6356
6357 /* Re-fetch current thread's frame in case that invalidated
6358 the frame cache. */
6359 frame = get_current_frame ();
6360 gdbarch = get_frame_arch (frame);
6361 }
0574c78f 6362 }
edb3359d 6363
a493e3e2 6364 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
16c381f0 6365 && ecs->event_thread->control.trap_expected
568d6575 6366 && gdbarch_single_step_through_delay_p (gdbarch)
4e1c45ea 6367 && currently_stepping (ecs->event_thread))
3352ef37 6368 {
b50d7442 6369 /* We're trying to step off a breakpoint. Turns out that we're
3352ef37 6370 also on an instruction that needs to be stepped multiple
1777feb0 6371 times before it's been fully executing. E.g., architectures
3352ef37
AC
6372 with a delay slot. It needs to be stepped twice, once for
6373 the instruction and once for the delay slot. */
6374 int step_through_delay
568d6575 6375 = gdbarch_single_step_through_delay (gdbarch, frame);
abbb1732 6376
1eb8556f
SM
6377 if (step_through_delay)
6378 infrun_debug_printf ("step through delay");
6379
16c381f0
JK
6380 if (ecs->event_thread->control.step_range_end == 0
6381 && step_through_delay)
3352ef37
AC
6382 {
6383 /* The user issued a continue when stopped at a breakpoint.
6384 Set up for another trap and get out of here. */
dda83cd7
SM
6385 ecs->event_thread->stepping_over_breakpoint = 1;
6386 keep_going (ecs);
6387 return;
3352ef37
AC
6388 }
6389 else if (step_through_delay)
6390 {
6391 /* The user issued a step when stopped at a breakpoint.
6392 Maybe we should stop, maybe we should not - the delay
6393 slot *might* correspond to a line of source. In any
ca67fcb8
VP
6394 case, don't decide that here, just set
6395 ecs->stepping_over_breakpoint, making sure we
6396 single-step again before breakpoints are re-inserted. */
4e1c45ea 6397 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
6398 }
6399 }
6400
ab04a2af
TT
6401 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6402 handles this event. */
6403 ecs->event_thread->control.stop_bpstat
a01bda52 6404 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
6405 ecs->event_thread->suspend.stop_pc,
6406 ecs->event_thread, &ecs->ws, stop_chain);
db82e815 6407
ab04a2af
TT
6408 /* Following in case break condition called a
6409 function. */
c4464ade 6410 stop_print_frame = true;
73dd234f 6411
ab04a2af
TT
6412 /* This is where we handle "moribund" watchpoints. Unlike
6413 software breakpoints traps, hardware watchpoint traps are
6414 always distinguishable from random traps. If no high-level
6415 watchpoint is associated with the reported stop data address
6416 anymore, then the bpstat does not explain the signal ---
6417 simply make sure to ignore it if `stopped_by_watchpoint' is
6418 set. */
6419
1eb8556f 6420 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
47591c29 6421 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
427cd150 6422 GDB_SIGNAL_TRAP)
ab04a2af 6423 && stopped_by_watchpoint)
1eb8556f
SM
6424 {
6425 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
6426 "ignoring");
6427 }
73dd234f 6428
bac7d97b 6429 /* NOTE: cagney/2003-03-29: These checks for a random signal
ab04a2af
TT
6430 at one stage in the past included checks for an inferior
6431 function call's call dummy's return breakpoint. The original
6432 comment, that went with the test, read:
03cebad2 6433
ab04a2af
TT
6434 ``End of a stack dummy. Some systems (e.g. Sony news) give
6435 another signal besides SIGTRAP, so check here as well as
6436 above.''
73dd234f 6437
ab04a2af
TT
6438 If someone ever tries to get call dummys on a
6439 non-executable stack to work (where the target would stop
6440 with something like a SIGSEGV), then those tests might need
6441 to be re-instated. Given, however, that the tests were only
6442 enabled when momentary breakpoints were not being used, I
6443 suspect that it won't be the case.
488f131b 6444
ab04a2af
TT
6445 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6446 be necessary for call dummies on a non-executable stack on
6447 SPARC. */
488f131b 6448
bac7d97b 6449 /* See if the breakpoints module can explain the signal. */
47591c29
PA
6450 random_signal
6451 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6452 ecs->event_thread->suspend.stop_signal);
bac7d97b 6453
1cf4d951
PA
6454 /* Maybe this was a trap for a software breakpoint that has since
6455 been removed. */
6456 if (random_signal && target_stopped_by_sw_breakpoint ())
6457 {
5133a315
LM
6458 if (gdbarch_program_breakpoint_here_p (gdbarch,
6459 ecs->event_thread->suspend.stop_pc))
1cf4d951
PA
6460 {
6461 struct regcache *regcache;
6462 int decr_pc;
6463
6464 /* Re-adjust PC to what the program would see if GDB was not
6465 debugging it. */
00431a78 6466 regcache = get_thread_regcache (ecs->event_thread);
527a273a 6467 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
1cf4d951
PA
6468 if (decr_pc != 0)
6469 {
07036511
TT
6470 gdb::optional<scoped_restore_tmpl<int>>
6471 restore_operation_disable;
1cf4d951
PA
6472
6473 if (record_full_is_used ())
07036511
TT
6474 restore_operation_disable.emplace
6475 (record_full_gdb_operation_disable_set ());
1cf4d951 6476
f2ffa92b
PA
6477 regcache_write_pc (regcache,
6478 ecs->event_thread->suspend.stop_pc + decr_pc);
1cf4d951
PA
6479 }
6480 }
6481 else
6482 {
6483 /* A delayed software breakpoint event. Ignore the trap. */
1eb8556f 6484 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
1cf4d951
PA
6485 random_signal = 0;
6486 }
6487 }
6488
6489 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6490 has since been removed. */
6491 if (random_signal && target_stopped_by_hw_breakpoint ())
6492 {
6493 /* A delayed hardware breakpoint event. Ignore the trap. */
1eb8556f
SM
6494 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
6495 "trap, ignoring");
1cf4d951
PA
6496 random_signal = 0;
6497 }
6498
bac7d97b
PA
6499 /* If not, perhaps stepping/nexting can. */
6500 if (random_signal)
6501 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6502 && currently_stepping (ecs->event_thread));
ab04a2af 6503
2adfaa28
PA
6504 /* Perhaps the thread hit a single-step breakpoint of _another_
6505 thread. Single-step breakpoints are transparent to the
6506 breakpoints module. */
6507 if (random_signal)
6508 random_signal = !ecs->hit_singlestep_breakpoint;
6509
bac7d97b
PA
6510 /* No? Perhaps we got a moribund watchpoint. */
6511 if (random_signal)
6512 random_signal = !stopped_by_watchpoint;
ab04a2af 6513
c65d6b55
PA
6514 /* Always stop if the user explicitly requested this thread to
6515 remain stopped. */
6516 if (ecs->event_thread->stop_requested)
6517 {
6518 random_signal = 1;
1eb8556f 6519 infrun_debug_printf ("user-requested stop");
c65d6b55
PA
6520 }
6521
488f131b
JB
6522 /* For the program's own signals, act according to
6523 the signal handling tables. */
6524
ce12b012 6525 if (random_signal)
488f131b
JB
6526 {
6527 /* Signal not for debugging purposes. */
c9737c08 6528 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
488f131b 6529
1eb8556f
SM
6530 infrun_debug_printf ("random signal (%s)",
6531 gdb_signal_to_symbol_string (stop_signal));
527159b7 6532
488f131b
JB
6533 stopped_by_random_signal = 1;
6534
252fbfc8
PA
6535 /* Always stop on signals if we're either just gaining control
6536 of the program, or the user explicitly requested this thread
6537 to remain stopped. */
d6b48e9c 6538 if (stop_soon != NO_STOP_QUIETLY
252fbfc8 6539 || ecs->event_thread->stop_requested
8ff53139 6540 || signal_stop_state (ecs->event_thread->suspend.stop_signal))
488f131b 6541 {
22bcd14b 6542 stop_waiting (ecs);
488f131b
JB
6543 return;
6544 }
b57bacec
PA
6545
6546 /* Notify observers the signal has "handle print" set. Note we
6547 returned early above if stopping; normal_stop handles the
6548 printing in that case. */
6549 if (signal_print[ecs->event_thread->suspend.stop_signal])
6550 {
6551 /* The signal table tells us to print about this signal. */
223ffa71 6552 target_terminal::ours_for_output ();
76727919 6553 gdb::observers::signal_received.notify (ecs->event_thread->suspend.stop_signal);
223ffa71 6554 target_terminal::inferior ();
b57bacec 6555 }
488f131b
JB
6556
6557 /* Clear the signal if it should not be passed. */
16c381f0 6558 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
a493e3e2 6559 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
488f131b 6560
f2ffa92b 6561 if (ecs->event_thread->prev_pc == ecs->event_thread->suspend.stop_pc
16c381f0 6562 && ecs->event_thread->control.trap_expected
8358c15c 6563 && ecs->event_thread->control.step_resume_breakpoint == NULL)
68f53502
AC
6564 {
6565 /* We were just starting a new sequence, attempting to
6566 single-step off of a breakpoint and expecting a SIGTRAP.
237fc4c9 6567 Instead this signal arrives. This signal will take us out
68f53502
AC
6568 of the stepping range so GDB needs to remember to, when
6569 the signal handler returns, resume stepping off that
6570 breakpoint. */
6571 /* To simplify things, "continue" is forced to use the same
6572 code paths as single-step - set a breakpoint at the
6573 signal return address and then, once hit, step off that
6574 breakpoint. */
1eb8556f 6575 infrun_debug_printf ("signal arrived while stepping over breakpoint");
d3169d93 6576
2c03e5be 6577 insert_hp_step_resume_breakpoint_at_frame (frame);
4e1c45ea 6578 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6579 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6580 ecs->event_thread->control.trap_expected = 0;
d137e6dc
PA
6581
6582 /* If we were nexting/stepping some other thread, switch to
6583 it, so that we don't continue it, losing control. */
6584 if (!switch_back_to_stepped_thread (ecs))
6585 keep_going (ecs);
9d799f85 6586 return;
68f53502 6587 }
9d799f85 6588
e5f8a7cc 6589 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
f2ffa92b
PA
6590 && (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6591 ecs->event_thread)
e5f8a7cc 6592 || ecs->event_thread->control.step_range_end == 1)
edb3359d 6593 && frame_id_eq (get_stack_frame_id (frame),
16c381f0 6594 ecs->event_thread->control.step_stack_frame_id)
8358c15c 6595 && ecs->event_thread->control.step_resume_breakpoint == NULL)
d303a6c7
AC
6596 {
6597 /* The inferior is about to take a signal that will take it
6598 out of the single step range. Set a breakpoint at the
6599 current PC (which is presumably where the signal handler
6600 will eventually return) and then allow the inferior to
6601 run free.
6602
6603 Note that this is only needed for a signal delivered
6604 while in the single-step range. Nested signals aren't a
6605 problem as they eventually all return. */
1eb8556f 6606 infrun_debug_printf ("signal may take us out of single-step range");
237fc4c9 6607
372316f1 6608 clear_step_over_info ();
2c03e5be 6609 insert_hp_step_resume_breakpoint_at_frame (frame);
e5f8a7cc 6610 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6611 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6612 ecs->event_thread->control.trap_expected = 0;
9d799f85
AC
6613 keep_going (ecs);
6614 return;
d303a6c7 6615 }
9d799f85 6616
85102364 6617 /* Note: step_resume_breakpoint may be non-NULL. This occurs
9d799f85
AC
6618 when either there's a nested signal, or when there's a
6619 pending signal enabled just as the signal handler returns
6620 (leaving the inferior at the step-resume-breakpoint without
6621 actually executing it). Either way continue until the
6622 breakpoint is really hit. */
c447ac0b
PA
6623
6624 if (!switch_back_to_stepped_thread (ecs))
6625 {
1eb8556f 6626 infrun_debug_printf ("random signal, keep going");
c447ac0b
PA
6627
6628 keep_going (ecs);
6629 }
6630 return;
488f131b 6631 }
94c57d6a
PA
6632
6633 process_event_stop_test (ecs);
6634}
6635
6636/* Come here when we've got some debug event / signal we can explain
6637 (IOW, not a random signal), and test whether it should cause a
6638 stop, or whether we should resume the inferior (transparently).
6639 E.g., could be a breakpoint whose condition evaluates false; we
6640 could be still stepping within the line; etc. */
6641
6642static void
6643process_event_stop_test (struct execution_control_state *ecs)
6644{
6645 struct symtab_and_line stop_pc_sal;
6646 struct frame_info *frame;
6647 struct gdbarch *gdbarch;
cdaa5b73
PA
6648 CORE_ADDR jmp_buf_pc;
6649 struct bpstat_what what;
94c57d6a 6650
cdaa5b73 6651 /* Handle cases caused by hitting a breakpoint. */
611c83ae 6652
cdaa5b73
PA
6653 frame = get_current_frame ();
6654 gdbarch = get_frame_arch (frame);
fcf3daef 6655
cdaa5b73 6656 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
611c83ae 6657
cdaa5b73
PA
6658 if (what.call_dummy)
6659 {
6660 stop_stack_dummy = what.call_dummy;
6661 }
186c406b 6662
243a9253
PA
6663 /* A few breakpoint types have callbacks associated (e.g.,
6664 bp_jit_event). Run them now. */
6665 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
6666
cdaa5b73
PA
6667 /* If we hit an internal event that triggers symbol changes, the
6668 current frame will be invalidated within bpstat_what (e.g., if we
6669 hit an internal solib event). Re-fetch it. */
6670 frame = get_current_frame ();
6671 gdbarch = get_frame_arch (frame);
e2e4d78b 6672
cdaa5b73
PA
6673 switch (what.main_action)
6674 {
6675 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
6676 /* If we hit the breakpoint at longjmp while stepping, we
6677 install a momentary breakpoint at the target of the
6678 jmp_buf. */
186c406b 6679
1eb8556f 6680 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
186c406b 6681
cdaa5b73 6682 ecs->event_thread->stepping_over_breakpoint = 1;
611c83ae 6683
cdaa5b73
PA
6684 if (what.is_longjmp)
6685 {
6686 struct value *arg_value;
6687
6688 /* If we set the longjmp breakpoint via a SystemTap probe,
6689 then use it to extract the arguments. The destination PC
6690 is the third argument to the probe. */
6691 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6692 if (arg_value)
8fa0c4f8
AA
6693 {
6694 jmp_buf_pc = value_as_address (arg_value);
6695 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6696 }
cdaa5b73
PA
6697 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6698 || !gdbarch_get_longjmp_target (gdbarch,
6699 frame, &jmp_buf_pc))
e2e4d78b 6700 {
1eb8556f
SM
6701 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
6702 "(!gdbarch_get_longjmp_target)");
cdaa5b73
PA
6703 keep_going (ecs);
6704 return;
e2e4d78b 6705 }
e2e4d78b 6706
cdaa5b73
PA
6707 /* Insert a breakpoint at resume address. */
6708 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6709 }
6710 else
6711 check_exception_resume (ecs, frame);
6712 keep_going (ecs);
6713 return;
e81a37f7 6714
cdaa5b73
PA
6715 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6716 {
6717 struct frame_info *init_frame;
e81a37f7 6718
cdaa5b73 6719 /* There are several cases to consider.
c906108c 6720
cdaa5b73
PA
6721 1. The initiating frame no longer exists. In this case we
6722 must stop, because the exception or longjmp has gone too
6723 far.
2c03e5be 6724
cdaa5b73
PA
6725 2. The initiating frame exists, and is the same as the
6726 current frame. We stop, because the exception or longjmp
6727 has been caught.
2c03e5be 6728
cdaa5b73
PA
6729 3. The initiating frame exists and is different from the
6730 current frame. This means the exception or longjmp has
6731 been caught beneath the initiating frame, so keep going.
c906108c 6732
cdaa5b73
PA
6733 4. longjmp breakpoint has been placed just to protect
6734 against stale dummy frames and user is not interested in
6735 stopping around longjmps. */
c5aa993b 6736
1eb8556f 6737 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
c5aa993b 6738
cdaa5b73
PA
6739 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6740 != NULL);
6741 delete_exception_resume_breakpoint (ecs->event_thread);
c5aa993b 6742
cdaa5b73
PA
6743 if (what.is_longjmp)
6744 {
b67a2c6f 6745 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
c5aa993b 6746
cdaa5b73 6747 if (!frame_id_p (ecs->event_thread->initiating_frame))
e5ef252a 6748 {
cdaa5b73
PA
6749 /* Case 4. */
6750 keep_going (ecs);
6751 return;
e5ef252a 6752 }
cdaa5b73 6753 }
c5aa993b 6754
cdaa5b73 6755 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
527159b7 6756
cdaa5b73
PA
6757 if (init_frame)
6758 {
6759 struct frame_id current_id
6760 = get_frame_id (get_current_frame ());
6761 if (frame_id_eq (current_id,
6762 ecs->event_thread->initiating_frame))
6763 {
6764 /* Case 2. Fall through. */
6765 }
6766 else
6767 {
6768 /* Case 3. */
6769 keep_going (ecs);
6770 return;
6771 }
68f53502 6772 }
488f131b 6773
cdaa5b73
PA
6774 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6775 exists. */
6776 delete_step_resume_breakpoint (ecs->event_thread);
e5ef252a 6777
bdc36728 6778 end_stepping_range (ecs);
cdaa5b73
PA
6779 }
6780 return;
e5ef252a 6781
cdaa5b73 6782 case BPSTAT_WHAT_SINGLE:
1eb8556f 6783 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
cdaa5b73
PA
6784 ecs->event_thread->stepping_over_breakpoint = 1;
6785 /* Still need to check other stuff, at least the case where we
6786 are stepping and step out of the right range. */
6787 break;
e5ef252a 6788
cdaa5b73 6789 case BPSTAT_WHAT_STEP_RESUME:
1eb8556f 6790 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
e5ef252a 6791
cdaa5b73
PA
6792 delete_step_resume_breakpoint (ecs->event_thread);
6793 if (ecs->event_thread->control.proceed_to_finish
6794 && execution_direction == EXEC_REVERSE)
6795 {
6796 struct thread_info *tp = ecs->event_thread;
6797
6798 /* We are finishing a function in reverse, and just hit the
6799 step-resume breakpoint at the start address of the
6800 function, and we're almost there -- just need to back up
6801 by one more single-step, which should take us back to the
6802 function call. */
6803 tp->control.step_range_start = tp->control.step_range_end = 1;
6804 keep_going (ecs);
e5ef252a 6805 return;
cdaa5b73
PA
6806 }
6807 fill_in_stop_func (gdbarch, ecs);
f2ffa92b 6808 if (ecs->event_thread->suspend.stop_pc == ecs->stop_func_start
cdaa5b73
PA
6809 && execution_direction == EXEC_REVERSE)
6810 {
6811 /* We are stepping over a function call in reverse, and just
6812 hit the step-resume breakpoint at the start address of
6813 the function. Go back to single-stepping, which should
6814 take us back to the function call. */
6815 ecs->event_thread->stepping_over_breakpoint = 1;
6816 keep_going (ecs);
6817 return;
6818 }
6819 break;
e5ef252a 6820
cdaa5b73 6821 case BPSTAT_WHAT_STOP_NOISY:
1eb8556f 6822 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
c4464ade 6823 stop_print_frame = true;
e5ef252a 6824
33bf4c5c 6825 /* Assume the thread stopped for a breakpoint. We'll still check
99619bea
PA
6826 whether a/the breakpoint is there when the thread is next
6827 resumed. */
6828 ecs->event_thread->stepping_over_breakpoint = 1;
e5ef252a 6829
22bcd14b 6830 stop_waiting (ecs);
cdaa5b73 6831 return;
e5ef252a 6832
cdaa5b73 6833 case BPSTAT_WHAT_STOP_SILENT:
1eb8556f 6834 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
c4464ade 6835 stop_print_frame = false;
e5ef252a 6836
33bf4c5c 6837 /* Assume the thread stopped for a breakpoint. We'll still check
99619bea
PA
6838 whether a/the breakpoint is there when the thread is next
6839 resumed. */
6840 ecs->event_thread->stepping_over_breakpoint = 1;
22bcd14b 6841 stop_waiting (ecs);
cdaa5b73
PA
6842 return;
6843
6844 case BPSTAT_WHAT_HP_STEP_RESUME:
1eb8556f 6845 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
cdaa5b73
PA
6846
6847 delete_step_resume_breakpoint (ecs->event_thread);
6848 if (ecs->event_thread->step_after_step_resume_breakpoint)
6849 {
6850 /* Back when the step-resume breakpoint was inserted, we
6851 were trying to single-step off a breakpoint. Go back to
6852 doing that. */
6853 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6854 ecs->event_thread->stepping_over_breakpoint = 1;
6855 keep_going (ecs);
6856 return;
e5ef252a 6857 }
cdaa5b73
PA
6858 break;
6859
6860 case BPSTAT_WHAT_KEEP_CHECKING:
6861 break;
e5ef252a 6862 }
c906108c 6863
af48d08f
PA
6864 /* If we stepped a permanent breakpoint and we had a high priority
6865 step-resume breakpoint for the address we stepped, but we didn't
6866 hit it, then we must have stepped into the signal handler. The
6867 step-resume was only necessary to catch the case of _not_
6868 stepping into the handler, so delete it, and fall through to
6869 checking whether the step finished. */
6870 if (ecs->event_thread->stepped_breakpoint)
6871 {
6872 struct breakpoint *sr_bp
6873 = ecs->event_thread->control.step_resume_breakpoint;
6874
8d707a12
PA
6875 if (sr_bp != NULL
6876 && sr_bp->loc->permanent
af48d08f
PA
6877 && sr_bp->type == bp_hp_step_resume
6878 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6879 {
1eb8556f 6880 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
af48d08f
PA
6881 delete_step_resume_breakpoint (ecs->event_thread);
6882 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6883 }
6884 }
6885
cdaa5b73
PA
6886 /* We come here if we hit a breakpoint but should not stop for it.
6887 Possibly we also were stepping and should stop for that. So fall
6888 through and test for stepping. But, if not stepping, do not
6889 stop. */
c906108c 6890
a7212384
UW
6891 /* In all-stop mode, if we're currently stepping but have stopped in
6892 some other thread, we need to switch back to the stepped thread. */
c447ac0b
PA
6893 if (switch_back_to_stepped_thread (ecs))
6894 return;
776f04fa 6895
8358c15c 6896 if (ecs->event_thread->control.step_resume_breakpoint)
488f131b 6897 {
1eb8556f 6898 infrun_debug_printf ("step-resume breakpoint is inserted");
527159b7 6899
488f131b 6900 /* Having a step-resume breakpoint overrides anything
dda83cd7
SM
6901 else having to do with stepping commands until
6902 that breakpoint is reached. */
488f131b
JB
6903 keep_going (ecs);
6904 return;
6905 }
c5aa993b 6906
16c381f0 6907 if (ecs->event_thread->control.step_range_end == 0)
488f131b 6908 {
1eb8556f 6909 infrun_debug_printf ("no stepping, continue");
488f131b 6910 /* Likewise if we aren't even stepping. */
488f131b
JB
6911 keep_going (ecs);
6912 return;
6913 }
c5aa993b 6914
4b7703ad
JB
6915 /* Re-fetch current thread's frame in case the code above caused
6916 the frame cache to be re-initialized, making our FRAME variable
6917 a dangling pointer. */
6918 frame = get_current_frame ();
628fe4e4 6919 gdbarch = get_frame_arch (frame);
7e324e48 6920 fill_in_stop_func (gdbarch, ecs);
4b7703ad 6921
488f131b 6922 /* If stepping through a line, keep going if still within it.
c906108c 6923
488f131b
JB
6924 Note that step_range_end is the address of the first instruction
6925 beyond the step range, and NOT the address of the last instruction
31410e84
MS
6926 within it!
6927
6928 Note also that during reverse execution, we may be stepping
6929 through a function epilogue and therefore must detect when
6930 the current-frame changes in the middle of a line. */
6931
f2ffa92b
PA
6932 if (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6933 ecs->event_thread)
31410e84 6934 && (execution_direction != EXEC_REVERSE
388a8562 6935 || frame_id_eq (get_frame_id (frame),
16c381f0 6936 ecs->event_thread->control.step_frame_id)))
488f131b 6937 {
1eb8556f
SM
6938 infrun_debug_printf
6939 ("stepping inside range [%s-%s]",
6940 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6941 paddress (gdbarch, ecs->event_thread->control.step_range_end));
b2175913 6942
c1e36e3e
PA
6943 /* Tentatively re-enable range stepping; `resume' disables it if
6944 necessary (e.g., if we're stepping over a breakpoint or we
6945 have software watchpoints). */
6946 ecs->event_thread->control.may_range_step = 1;
6947
b2175913
MS
6948 /* When stepping backward, stop at beginning of line range
6949 (unless it's the function entry point, in which case
6950 keep going back to the call point). */
f2ffa92b 6951 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
16c381f0 6952 if (stop_pc == ecs->event_thread->control.step_range_start
b2175913
MS
6953 && stop_pc != ecs->stop_func_start
6954 && execution_direction == EXEC_REVERSE)
bdc36728 6955 end_stepping_range (ecs);
b2175913
MS
6956 else
6957 keep_going (ecs);
6958
488f131b
JB
6959 return;
6960 }
c5aa993b 6961
488f131b 6962 /* We stepped out of the stepping range. */
c906108c 6963
488f131b 6964 /* If we are stepping at the source level and entered the runtime
388a8562
MS
6965 loader dynamic symbol resolution code...
6966
6967 EXEC_FORWARD: we keep on single stepping until we exit the run
6968 time loader code and reach the callee's address.
6969
6970 EXEC_REVERSE: we've already executed the callee (backward), and
6971 the runtime loader code is handled just like any other
6972 undebuggable function call. Now we need only keep stepping
6973 backward through the trampoline code, and that's handled further
6974 down, so there is nothing for us to do here. */
6975
6976 if (execution_direction != EXEC_REVERSE
16c381f0 6977 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
f2ffa92b 6978 && in_solib_dynsym_resolve_code (ecs->event_thread->suspend.stop_pc))
488f131b 6979 {
4c8c40e6 6980 CORE_ADDR pc_after_resolver =
f2ffa92b
PA
6981 gdbarch_skip_solib_resolver (gdbarch,
6982 ecs->event_thread->suspend.stop_pc);
c906108c 6983
1eb8556f 6984 infrun_debug_printf ("stepped into dynsym resolve code");
527159b7 6985
488f131b
JB
6986 if (pc_after_resolver)
6987 {
6988 /* Set up a step-resume breakpoint at the address
6989 indicated by SKIP_SOLIB_RESOLVER. */
51abb421 6990 symtab_and_line sr_sal;
488f131b 6991 sr_sal.pc = pc_after_resolver;
6c95b8df 6992 sr_sal.pspace = get_frame_program_space (frame);
488f131b 6993
a6d9a66e
UW
6994 insert_step_resume_breakpoint_at_sal (gdbarch,
6995 sr_sal, null_frame_id);
c5aa993b 6996 }
c906108c 6997
488f131b
JB
6998 keep_going (ecs);
6999 return;
7000 }
c906108c 7001
1d509aa6
MM
7002 /* Step through an indirect branch thunk. */
7003 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
f2ffa92b
PA
7004 && gdbarch_in_indirect_branch_thunk (gdbarch,
7005 ecs->event_thread->suspend.stop_pc))
1d509aa6 7006 {
1eb8556f 7007 infrun_debug_printf ("stepped into indirect branch thunk");
1d509aa6
MM
7008 keep_going (ecs);
7009 return;
7010 }
7011
16c381f0
JK
7012 if (ecs->event_thread->control.step_range_end != 1
7013 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7014 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
568d6575 7015 && get_frame_type (frame) == SIGTRAMP_FRAME)
488f131b 7016 {
1eb8556f 7017 infrun_debug_printf ("stepped into signal trampoline");
42edda50 7018 /* The inferior, while doing a "step" or "next", has ended up in
dda83cd7
SM
7019 a signal trampoline (either by a signal being delivered or by
7020 the signal handler returning). Just single-step until the
7021 inferior leaves the trampoline (either by calling the handler
7022 or returning). */
488f131b
JB
7023 keep_going (ecs);
7024 return;
7025 }
c906108c 7026
14132e89
MR
7027 /* If we're in the return path from a shared library trampoline,
7028 we want to proceed through the trampoline when stepping. */
7029 /* macro/2012-04-25: This needs to come before the subroutine
7030 call check below as on some targets return trampolines look
7031 like subroutine calls (MIPS16 return thunks). */
7032 if (gdbarch_in_solib_return_trampoline (gdbarch,
f2ffa92b
PA
7033 ecs->event_thread->suspend.stop_pc,
7034 ecs->stop_func_name)
14132e89
MR
7035 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7036 {
7037 /* Determine where this trampoline returns. */
f2ffa92b
PA
7038 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
7039 CORE_ADDR real_stop_pc
7040 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
14132e89 7041
1eb8556f 7042 infrun_debug_printf ("stepped into solib return tramp");
14132e89
MR
7043
7044 /* Only proceed through if we know where it's going. */
7045 if (real_stop_pc)
7046 {
7047 /* And put the step-breakpoint there and go until there. */
51abb421 7048 symtab_and_line sr_sal;
14132e89
MR
7049 sr_sal.pc = real_stop_pc;
7050 sr_sal.section = find_pc_overlay (sr_sal.pc);
7051 sr_sal.pspace = get_frame_program_space (frame);
7052
7053 /* Do not specify what the fp should be when we stop since
7054 on some machines the prologue is where the new fp value
7055 is established. */
7056 insert_step_resume_breakpoint_at_sal (gdbarch,
7057 sr_sal, null_frame_id);
7058
7059 /* Restart without fiddling with the step ranges or
7060 other state. */
7061 keep_going (ecs);
7062 return;
7063 }
7064 }
7065
c17eaafe
DJ
7066 /* Check for subroutine calls. The check for the current frame
7067 equalling the step ID is not necessary - the check of the
7068 previous frame's ID is sufficient - but it is a common case and
7069 cheaper than checking the previous frame's ID.
14e60db5
DJ
7070
7071 NOTE: frame_id_eq will never report two invalid frame IDs as
7072 being equal, so to get into this block, both the current and
7073 previous frame must have valid frame IDs. */
005ca36a
JB
7074 /* The outer_frame_id check is a heuristic to detect stepping
7075 through startup code. If we step over an instruction which
7076 sets the stack pointer from an invalid value to a valid value,
7077 we may detect that as a subroutine call from the mythical
7078 "outermost" function. This could be fixed by marking
7079 outermost frames as !stack_p,code_p,special_p. Then the
7080 initial outermost frame, before sp was valid, would
ce6cca6d 7081 have code_addr == &_start. See the comment in frame_id_eq
005ca36a 7082 for more. */
edb3359d 7083 if (!frame_id_eq (get_stack_frame_id (frame),
16c381f0 7084 ecs->event_thread->control.step_stack_frame_id)
005ca36a 7085 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
16c381f0
JK
7086 ecs->event_thread->control.step_stack_frame_id)
7087 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
005ca36a 7088 outer_frame_id)
885eeb5b 7089 || (ecs->event_thread->control.step_start_function
f2ffa92b 7090 != find_pc_function (ecs->event_thread->suspend.stop_pc)))))
488f131b 7091 {
f2ffa92b 7092 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
95918acb 7093 CORE_ADDR real_stop_pc;
8fb3e588 7094
1eb8556f 7095 infrun_debug_printf ("stepped into subroutine");
527159b7 7096
b7a084be 7097 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
95918acb
AC
7098 {
7099 /* I presume that step_over_calls is only 0 when we're
7100 supposed to be stepping at the assembly language level
7101 ("stepi"). Just stop. */
388a8562 7102 /* And this works the same backward as frontward. MVS */
bdc36728 7103 end_stepping_range (ecs);
95918acb
AC
7104 return;
7105 }
8fb3e588 7106
388a8562
MS
7107 /* Reverse stepping through solib trampolines. */
7108
7109 if (execution_direction == EXEC_REVERSE
16c381f0 7110 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
388a8562
MS
7111 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7112 || (ecs->stop_func_start == 0
7113 && in_solib_dynsym_resolve_code (stop_pc))))
7114 {
7115 /* Any solib trampoline code can be handled in reverse
7116 by simply continuing to single-step. We have already
7117 executed the solib function (backwards), and a few
7118 steps will take us back through the trampoline to the
7119 caller. */
7120 keep_going (ecs);
7121 return;
7122 }
7123
16c381f0 7124 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
8567c30f 7125 {
b2175913
MS
7126 /* We're doing a "next".
7127
7128 Normal (forward) execution: set a breakpoint at the
7129 callee's return address (the address at which the caller
7130 will resume).
7131
7132 Reverse (backward) execution. set the step-resume
7133 breakpoint at the start of the function that we just
7134 stepped into (backwards), and continue to there. When we
6130d0b7 7135 get there, we'll need to single-step back to the caller. */
b2175913
MS
7136
7137 if (execution_direction == EXEC_REVERSE)
7138 {
acf9414f
JK
7139 /* If we're already at the start of the function, we've either
7140 just stepped backward into a single instruction function,
7141 or stepped back out of a signal handler to the first instruction
7142 of the function. Just keep going, which will single-step back
7143 to the caller. */
58c48e72 7144 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
acf9414f 7145 {
acf9414f 7146 /* Normal function call return (static or dynamic). */
51abb421 7147 symtab_and_line sr_sal;
acf9414f
JK
7148 sr_sal.pc = ecs->stop_func_start;
7149 sr_sal.pspace = get_frame_program_space (frame);
7150 insert_step_resume_breakpoint_at_sal (gdbarch,
7151 sr_sal, null_frame_id);
7152 }
b2175913
MS
7153 }
7154 else
568d6575 7155 insert_step_resume_breakpoint_at_caller (frame);
b2175913 7156
8567c30f
AC
7157 keep_going (ecs);
7158 return;
7159 }
a53c66de 7160
95918acb 7161 /* If we are in a function call trampoline (a stub between the
dda83cd7
SM
7162 calling routine and the real function), locate the real
7163 function. That's what tells us (a) whether we want to step
7164 into it at all, and (b) what prologue we want to run to the
7165 end of, if we do step into it. */
568d6575 7166 real_stop_pc = skip_language_trampoline (frame, stop_pc);
95918acb 7167 if (real_stop_pc == 0)
568d6575 7168 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
95918acb
AC
7169 if (real_stop_pc != 0)
7170 ecs->stop_func_start = real_stop_pc;
8fb3e588 7171
db5f024e 7172 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
1b2bfbb9 7173 {
51abb421 7174 symtab_and_line sr_sal;
1b2bfbb9 7175 sr_sal.pc = ecs->stop_func_start;
6c95b8df 7176 sr_sal.pspace = get_frame_program_space (frame);
1b2bfbb9 7177
a6d9a66e
UW
7178 insert_step_resume_breakpoint_at_sal (gdbarch,
7179 sr_sal, null_frame_id);
8fb3e588
AC
7180 keep_going (ecs);
7181 return;
1b2bfbb9
RC
7182 }
7183
95918acb 7184 /* If we have line number information for the function we are
1bfeeb0f
JL
7185 thinking of stepping into and the function isn't on the skip
7186 list, step into it.
95918acb 7187
dda83cd7
SM
7188 If there are several symtabs at that PC (e.g. with include
7189 files), just want to know whether *any* of them have line
7190 numbers. find_pc_line handles this. */
95918acb
AC
7191 {
7192 struct symtab_and_line tmp_sal;
8fb3e588 7193
95918acb 7194 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
2b914b52 7195 if (tmp_sal.line != 0
85817405 7196 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4a4c04f1
BE
7197 tmp_sal)
7198 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
95918acb 7199 {
b2175913 7200 if (execution_direction == EXEC_REVERSE)
568d6575 7201 handle_step_into_function_backward (gdbarch, ecs);
b2175913 7202 else
568d6575 7203 handle_step_into_function (gdbarch, ecs);
95918acb
AC
7204 return;
7205 }
7206 }
7207
7208 /* If we have no line number and the step-stop-if-no-debug is
dda83cd7
SM
7209 set, we stop the step so that the user has a chance to switch
7210 in assembly mode. */
16c381f0 7211 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
078130d0 7212 && step_stop_if_no_debug)
95918acb 7213 {
bdc36728 7214 end_stepping_range (ecs);
95918acb
AC
7215 return;
7216 }
7217
b2175913
MS
7218 if (execution_direction == EXEC_REVERSE)
7219 {
acf9414f
JK
7220 /* If we're already at the start of the function, we've either just
7221 stepped backward into a single instruction function without line
7222 number info, or stepped back out of a signal handler to the first
7223 instruction of the function without line number info. Just keep
7224 going, which will single-step back to the caller. */
7225 if (ecs->stop_func_start != stop_pc)
7226 {
7227 /* Set a breakpoint at callee's start address.
7228 From there we can step once and be back in the caller. */
51abb421 7229 symtab_and_line sr_sal;
acf9414f
JK
7230 sr_sal.pc = ecs->stop_func_start;
7231 sr_sal.pspace = get_frame_program_space (frame);
7232 insert_step_resume_breakpoint_at_sal (gdbarch,
7233 sr_sal, null_frame_id);
7234 }
b2175913
MS
7235 }
7236 else
7237 /* Set a breakpoint at callee's return address (the address
7238 at which the caller will resume). */
568d6575 7239 insert_step_resume_breakpoint_at_caller (frame);
b2175913 7240
95918acb 7241 keep_going (ecs);
488f131b 7242 return;
488f131b 7243 }
c906108c 7244
fdd654f3
MS
7245 /* Reverse stepping through solib trampolines. */
7246
7247 if (execution_direction == EXEC_REVERSE
16c381f0 7248 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
fdd654f3 7249 {
f2ffa92b
PA
7250 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
7251
fdd654f3
MS
7252 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7253 || (ecs->stop_func_start == 0
7254 && in_solib_dynsym_resolve_code (stop_pc)))
7255 {
7256 /* Any solib trampoline code can be handled in reverse
7257 by simply continuing to single-step. We have already
7258 executed the solib function (backwards), and a few
7259 steps will take us back through the trampoline to the
7260 caller. */
7261 keep_going (ecs);
7262 return;
7263 }
7264 else if (in_solib_dynsym_resolve_code (stop_pc))
7265 {
7266 /* Stepped backward into the solib dynsym resolver.
7267 Set a breakpoint at its start and continue, then
7268 one more step will take us out. */
51abb421 7269 symtab_and_line sr_sal;
fdd654f3 7270 sr_sal.pc = ecs->stop_func_start;
9d1807c3 7271 sr_sal.pspace = get_frame_program_space (frame);
fdd654f3
MS
7272 insert_step_resume_breakpoint_at_sal (gdbarch,
7273 sr_sal, null_frame_id);
7274 keep_going (ecs);
7275 return;
7276 }
7277 }
7278
8c95582d
AB
7279 /* This always returns the sal for the inner-most frame when we are in a
7280 stack of inlined frames, even if GDB actually believes that it is in a
7281 more outer frame. This is checked for below by calls to
7282 inline_skipped_frames. */
f2ffa92b 7283 stop_pc_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
7ed0fe66 7284
1b2bfbb9
RC
7285 /* NOTE: tausq/2004-05-24: This if block used to be done before all
7286 the trampoline processing logic, however, there are some trampolines
7287 that have no names, so we should do trampoline handling first. */
16c381f0 7288 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7ed0fe66 7289 && ecs->stop_func_name == NULL
2afb61aa 7290 && stop_pc_sal.line == 0)
1b2bfbb9 7291 {
1eb8556f 7292 infrun_debug_printf ("stepped into undebuggable function");
527159b7 7293
1b2bfbb9 7294 /* The inferior just stepped into, or returned to, an
dda83cd7
SM
7295 undebuggable function (where there is no debugging information
7296 and no line number corresponding to the address where the
7297 inferior stopped). Since we want to skip this kind of code,
7298 we keep going until the inferior returns from this
7299 function - unless the user has asked us not to (via
7300 set step-mode) or we no longer know how to get back
7301 to the call site. */
14e60db5 7302 if (step_stop_if_no_debug
c7ce8faa 7303 || !frame_id_p (frame_unwind_caller_id (frame)))
1b2bfbb9
RC
7304 {
7305 /* If we have no line number and the step-stop-if-no-debug
7306 is set, we stop the step so that the user has a chance to
7307 switch in assembly mode. */
bdc36728 7308 end_stepping_range (ecs);
1b2bfbb9
RC
7309 return;
7310 }
7311 else
7312 {
7313 /* Set a breakpoint at callee's return address (the address
7314 at which the caller will resume). */
568d6575 7315 insert_step_resume_breakpoint_at_caller (frame);
1b2bfbb9
RC
7316 keep_going (ecs);
7317 return;
7318 }
7319 }
7320
16c381f0 7321 if (ecs->event_thread->control.step_range_end == 1)
1b2bfbb9
RC
7322 {
7323 /* It is stepi or nexti. We always want to stop stepping after
dda83cd7 7324 one instruction. */
1eb8556f 7325 infrun_debug_printf ("stepi/nexti");
bdc36728 7326 end_stepping_range (ecs);
1b2bfbb9
RC
7327 return;
7328 }
7329
2afb61aa 7330 if (stop_pc_sal.line == 0)
488f131b
JB
7331 {
7332 /* We have no line number information. That means to stop
dda83cd7
SM
7333 stepping (does this always happen right after one instruction,
7334 when we do "s" in a function with no line numbers,
7335 or can this happen as a result of a return or longjmp?). */
1eb8556f 7336 infrun_debug_printf ("line number info");
bdc36728 7337 end_stepping_range (ecs);
488f131b
JB
7338 return;
7339 }
c906108c 7340
edb3359d
DJ
7341 /* Look for "calls" to inlined functions, part one. If the inline
7342 frame machinery detected some skipped call sites, we have entered
7343 a new inline function. */
7344
7345 if (frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 7346 ecs->event_thread->control.step_frame_id)
00431a78 7347 && inline_skipped_frames (ecs->event_thread))
edb3359d 7348 {
1eb8556f 7349 infrun_debug_printf ("stepped into inlined function");
edb3359d 7350
51abb421 7351 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
edb3359d 7352
16c381f0 7353 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
edb3359d
DJ
7354 {
7355 /* For "step", we're going to stop. But if the call site
7356 for this inlined function is on the same source line as
7357 we were previously stepping, go down into the function
7358 first. Otherwise stop at the call site. */
7359
7360 if (call_sal.line == ecs->event_thread->current_line
7361 && call_sal.symtab == ecs->event_thread->current_symtab)
4a4c04f1
BE
7362 {
7363 step_into_inline_frame (ecs->event_thread);
7364 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
7365 {
7366 keep_going (ecs);
7367 return;
7368 }
7369 }
edb3359d 7370
bdc36728 7371 end_stepping_range (ecs);
edb3359d
DJ
7372 return;
7373 }
7374 else
7375 {
7376 /* For "next", we should stop at the call site if it is on a
7377 different source line. Otherwise continue through the
7378 inlined function. */
7379 if (call_sal.line == ecs->event_thread->current_line
7380 && call_sal.symtab == ecs->event_thread->current_symtab)
7381 keep_going (ecs);
7382 else
bdc36728 7383 end_stepping_range (ecs);
edb3359d
DJ
7384 return;
7385 }
7386 }
7387
7388 /* Look for "calls" to inlined functions, part two. If we are still
7389 in the same real function we were stepping through, but we have
7390 to go further up to find the exact frame ID, we are stepping
7391 through a more inlined call beyond its call site. */
7392
7393 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
7394 && !frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 7395 ecs->event_thread->control.step_frame_id)
edb3359d 7396 && stepped_in_from (get_current_frame (),
16c381f0 7397 ecs->event_thread->control.step_frame_id))
edb3359d 7398 {
1eb8556f 7399 infrun_debug_printf ("stepping through inlined function");
edb3359d 7400
4a4c04f1
BE
7401 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
7402 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
edb3359d
DJ
7403 keep_going (ecs);
7404 else
bdc36728 7405 end_stepping_range (ecs);
edb3359d
DJ
7406 return;
7407 }
7408
8c95582d 7409 bool refresh_step_info = true;
f2ffa92b 7410 if ((ecs->event_thread->suspend.stop_pc == stop_pc_sal.pc)
4e1c45ea 7411 && (ecs->event_thread->current_line != stop_pc_sal.line
24b21115 7412 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
488f131b 7413 {
ebde6f2d
TV
7414 /* We are at a different line. */
7415
8c95582d
AB
7416 if (stop_pc_sal.is_stmt)
7417 {
ebde6f2d
TV
7418 /* We are at the start of a statement.
7419
7420 So stop. Note that we don't stop if we step into the middle of a
7421 statement. That is said to make things like for (;;) statements
7422 work better. */
1eb8556f 7423 infrun_debug_printf ("stepped to a different line");
8c95582d
AB
7424 end_stepping_range (ecs);
7425 return;
7426 }
7427 else if (frame_id_eq (get_frame_id (get_current_frame ()),
ebde6f2d 7428 ecs->event_thread->control.step_frame_id))
8c95582d 7429 {
ebde6f2d
TV
7430 /* We are not at the start of a statement, and we have not changed
7431 frame.
7432
7433 We ignore this line table entry, and continue stepping forward,
8c95582d
AB
7434 looking for a better place to stop. */
7435 refresh_step_info = false;
1eb8556f
SM
7436 infrun_debug_printf ("stepped to a different line, but "
7437 "it's not the start of a statement");
8c95582d 7438 }
ebde6f2d
TV
7439 else
7440 {
7441 /* We are not the start of a statement, and we have changed frame.
7442
7443 We ignore this line table entry, and continue stepping forward,
7444 looking for a better place to stop. Keep refresh_step_info at
7445 true to note that the frame has changed, but ignore the line
7446 number to make sure we don't ignore a subsequent entry with the
7447 same line number. */
7448 stop_pc_sal.line = 0;
7449 infrun_debug_printf ("stepped to a different frame, but "
7450 "it's not the start of a statement");
7451 }
488f131b 7452 }
c906108c 7453
488f131b 7454 /* We aren't done stepping.
c906108c 7455
488f131b
JB
7456 Optimize by setting the stepping range to the line.
7457 (We might not be in the original line, but if we entered a
7458 new line in mid-statement, we continue stepping. This makes
8c95582d
AB
7459 things like for(;;) statements work better.)
7460
7461 If we entered a SAL that indicates a non-statement line table entry,
7462 then we update the stepping range, but we don't update the step info,
7463 which includes things like the line number we are stepping away from.
7464 This means we will stop when we find a line table entry that is marked
7465 as is-statement, even if it matches the non-statement one we just
7466 stepped into. */
c906108c 7467
16c381f0
JK
7468 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
7469 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
c1e36e3e 7470 ecs->event_thread->control.may_range_step = 1;
8c95582d
AB
7471 if (refresh_step_info)
7472 set_step_info (ecs->event_thread, frame, stop_pc_sal);
488f131b 7473
1eb8556f 7474 infrun_debug_printf ("keep going");
488f131b 7475 keep_going (ecs);
104c1213
JM
7476}
7477
408f6686
PA
7478static bool restart_stepped_thread (process_stratum_target *resume_target,
7479 ptid_t resume_ptid);
7480
c447ac0b
PA
7481/* In all-stop mode, if we're currently stepping but have stopped in
7482 some other thread, we may need to switch back to the stepped
7483 thread. Returns true we set the inferior running, false if we left
7484 it stopped (and the event needs further processing). */
7485
c4464ade 7486static bool
c447ac0b
PA
7487switch_back_to_stepped_thread (struct execution_control_state *ecs)
7488{
fbea99ea 7489 if (!target_is_non_stop_p ())
c447ac0b 7490 {
99619bea
PA
7491 /* If any thread is blocked on some internal breakpoint, and we
7492 simply need to step over that breakpoint to get it going
7493 again, do that first. */
7494
7495 /* However, if we see an event for the stepping thread, then we
7496 know all other threads have been moved past their breakpoints
7497 already. Let the caller check whether the step is finished,
7498 etc., before deciding to move it past a breakpoint. */
7499 if (ecs->event_thread->control.step_range_end != 0)
c4464ade 7500 return false;
99619bea
PA
7501
7502 /* Check if the current thread is blocked on an incomplete
7503 step-over, interrupted by a random signal. */
7504 if (ecs->event_thread->control.trap_expected
7505 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
c447ac0b 7506 {
1eb8556f
SM
7507 infrun_debug_printf
7508 ("need to finish step-over of [%s]",
7509 target_pid_to_str (ecs->event_thread->ptid).c_str ());
99619bea 7510 keep_going (ecs);
c4464ade 7511 return true;
99619bea 7512 }
2adfaa28 7513
99619bea
PA
7514 /* Check if the current thread is blocked by a single-step
7515 breakpoint of another thread. */
7516 if (ecs->hit_singlestep_breakpoint)
7517 {
1eb8556f
SM
7518 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
7519 target_pid_to_str (ecs->ptid).c_str ());
99619bea 7520 keep_going (ecs);
c4464ade 7521 return true;
99619bea
PA
7522 }
7523
4d9d9d04
PA
7524 /* If this thread needs yet another step-over (e.g., stepping
7525 through a delay slot), do it first before moving on to
7526 another thread. */
7527 if (thread_still_needs_step_over (ecs->event_thread))
7528 {
1eb8556f
SM
7529 infrun_debug_printf
7530 ("thread [%s] still needs step-over",
7531 target_pid_to_str (ecs->event_thread->ptid).c_str ());
4d9d9d04 7532 keep_going (ecs);
c4464ade 7533 return true;
4d9d9d04 7534 }
70509625 7535
483805cf
PA
7536 /* If scheduler locking applies even if not stepping, there's no
7537 need to walk over threads. Above we've checked whether the
7538 current thread is stepping. If some other thread not the
7539 event thread is stepping, then it must be that scheduler
7540 locking is not in effect. */
856e7dd6 7541 if (schedlock_applies (ecs->event_thread))
c4464ade 7542 return false;
483805cf 7543
4d9d9d04
PA
7544 /* Otherwise, we no longer expect a trap in the current thread.
7545 Clear the trap_expected flag before switching back -- this is
7546 what keep_going does as well, if we call it. */
7547 ecs->event_thread->control.trap_expected = 0;
7548
7549 /* Likewise, clear the signal if it should not be passed. */
7550 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7551 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7552
408f6686 7553 if (restart_stepped_thread (ecs->target, ecs->ptid))
4d9d9d04
PA
7554 {
7555 prepare_to_wait (ecs);
c4464ade 7556 return true;
4d9d9d04
PA
7557 }
7558
408f6686
PA
7559 switch_to_thread (ecs->event_thread);
7560 }
4d9d9d04 7561
408f6686
PA
7562 return false;
7563}
f3f8ece4 7564
408f6686
PA
7565/* Look for the thread that was stepping, and resume it.
7566 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
7567 is resuming. Return true if a thread was started, false
7568 otherwise. */
483805cf 7569
408f6686
PA
7570static bool
7571restart_stepped_thread (process_stratum_target *resume_target,
7572 ptid_t resume_ptid)
7573{
7574 /* Do all pending step-overs before actually proceeding with
7575 step/next/etc. */
7576 if (start_step_over ())
7577 return true;
483805cf 7578
408f6686
PA
7579 for (thread_info *tp : all_threads_safe ())
7580 {
7581 if (tp->state == THREAD_EXITED)
7582 continue;
7583
7584 if (tp->suspend.waitstatus_pending_p)
7585 continue;
483805cf 7586
408f6686
PA
7587 /* Ignore threads of processes the caller is not
7588 resuming. */
7589 if (!sched_multi
7590 && (tp->inf->process_target () != resume_target
7591 || tp->inf->pid != resume_ptid.pid ()))
7592 continue;
483805cf 7593
408f6686
PA
7594 if (tp->control.trap_expected)
7595 {
7596 infrun_debug_printf ("switching back to stepped thread (step-over)");
483805cf 7597
408f6686
PA
7598 if (keep_going_stepped_thread (tp))
7599 return true;
99619bea 7600 }
408f6686
PA
7601 }
7602
7603 for (thread_info *tp : all_threads_safe ())
7604 {
7605 if (tp->state == THREAD_EXITED)
7606 continue;
7607
7608 if (tp->suspend.waitstatus_pending_p)
7609 continue;
99619bea 7610
408f6686
PA
7611 /* Ignore threads of processes the caller is not
7612 resuming. */
7613 if (!sched_multi
7614 && (tp->inf->process_target () != resume_target
7615 || tp->inf->pid != resume_ptid.pid ()))
7616 continue;
7617
7618 /* Did we find the stepping thread? */
7619 if (tp->control.step_range_end)
99619bea 7620 {
408f6686 7621 infrun_debug_printf ("switching back to stepped thread (stepping)");
c447ac0b 7622
408f6686
PA
7623 if (keep_going_stepped_thread (tp))
7624 return true;
2ac7589c
PA
7625 }
7626 }
2adfaa28 7627
c4464ade 7628 return false;
2ac7589c 7629}
2adfaa28 7630
408f6686
PA
7631/* See infrun.h. */
7632
7633void
7634restart_after_all_stop_detach (process_stratum_target *proc_target)
7635{
7636 /* Note we don't check target_is_non_stop_p() here, because the
7637 current inferior may no longer have a process_stratum target
7638 pushed, as we just detached. */
7639
7640 /* See if we have a THREAD_RUNNING thread that need to be
7641 re-resumed. If we have any thread that is already executing,
7642 then we don't need to resume the target -- it is already been
7643 resumed. With the remote target (in all-stop), it's even
7644 impossible to issue another resumption if the target is already
7645 resumed, until the target reports a stop. */
7646 for (thread_info *thr : all_threads (proc_target))
7647 {
7648 if (thr->state != THREAD_RUNNING)
7649 continue;
7650
7651 /* If we have any thread that is already executing, then we
7652 don't need to resume the target -- it is already been
7653 resumed. */
7654 if (thr->executing)
7655 return;
7656
7657 /* If we have a pending event to process, skip resuming the
7658 target and go straight to processing it. */
7659 if (thr->resumed && thr->suspend.waitstatus_pending_p)
7660 return;
7661 }
7662
7663 /* Alright, we need to re-resume the target. If a thread was
7664 stepping, we need to restart it stepping. */
7665 if (restart_stepped_thread (proc_target, minus_one_ptid))
7666 return;
7667
7668 /* Otherwise, find the first THREAD_RUNNING thread and resume
7669 it. */
7670 for (thread_info *thr : all_threads (proc_target))
7671 {
7672 if (thr->state != THREAD_RUNNING)
7673 continue;
7674
7675 execution_control_state ecs;
7676 reset_ecs (&ecs, thr);
7677 switch_to_thread (thr);
7678 keep_going (&ecs);
7679 return;
7680 }
7681}
7682
2ac7589c
PA
7683/* Set a previously stepped thread back to stepping. Returns true on
7684 success, false if the resume is not possible (e.g., the thread
7685 vanished). */
7686
c4464ade 7687static bool
2ac7589c
PA
7688keep_going_stepped_thread (struct thread_info *tp)
7689{
7690 struct frame_info *frame;
2ac7589c
PA
7691 struct execution_control_state ecss;
7692 struct execution_control_state *ecs = &ecss;
2adfaa28 7693
2ac7589c
PA
7694 /* If the stepping thread exited, then don't try to switch back and
7695 resume it, which could fail in several different ways depending
7696 on the target. Instead, just keep going.
2adfaa28 7697
2ac7589c
PA
7698 We can find a stepping dead thread in the thread list in two
7699 cases:
2adfaa28 7700
2ac7589c
PA
7701 - The target supports thread exit events, and when the target
7702 tries to delete the thread from the thread list, inferior_ptid
7703 pointed at the exiting thread. In such case, calling
7704 delete_thread does not really remove the thread from the list;
7705 instead, the thread is left listed, with 'exited' state.
64ce06e4 7706
2ac7589c
PA
7707 - The target's debug interface does not support thread exit
7708 events, and so we have no idea whatsoever if the previously
7709 stepping thread is still alive. For that reason, we need to
7710 synchronously query the target now. */
2adfaa28 7711
00431a78 7712 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
2ac7589c 7713 {
1eb8556f
SM
7714 infrun_debug_printf ("not resuming previously stepped thread, it has "
7715 "vanished");
2ac7589c 7716
00431a78 7717 delete_thread (tp);
c4464ade 7718 return false;
c447ac0b 7719 }
2ac7589c 7720
1eb8556f 7721 infrun_debug_printf ("resuming previously stepped thread");
2ac7589c
PA
7722
7723 reset_ecs (ecs, tp);
00431a78 7724 switch_to_thread (tp);
2ac7589c 7725
f2ffa92b 7726 tp->suspend.stop_pc = regcache_read_pc (get_thread_regcache (tp));
2ac7589c 7727 frame = get_current_frame ();
2ac7589c
PA
7728
7729 /* If the PC of the thread we were trying to single-step has
7730 changed, then that thread has trapped or been signaled, but the
7731 event has not been reported to GDB yet. Re-poll the target
7732 looking for this particular thread's event (i.e. temporarily
7733 enable schedlock) by:
7734
7735 - setting a break at the current PC
7736 - resuming that particular thread, only (by setting trap
7737 expected)
7738
7739 This prevents us continuously moving the single-step breakpoint
7740 forward, one instruction at a time, overstepping. */
7741
f2ffa92b 7742 if (tp->suspend.stop_pc != tp->prev_pc)
2ac7589c
PA
7743 {
7744 ptid_t resume_ptid;
7745
1eb8556f
SM
7746 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
7747 paddress (target_gdbarch (), tp->prev_pc),
7748 paddress (target_gdbarch (), tp->suspend.stop_pc));
2ac7589c
PA
7749
7750 /* Clear the info of the previous step-over, as it's no longer
7751 valid (if the thread was trying to step over a breakpoint, it
7752 has already succeeded). It's what keep_going would do too,
7753 if we called it. Do this before trying to insert the sss
7754 breakpoint, otherwise if we were previously trying to step
7755 over this exact address in another thread, the breakpoint is
7756 skipped. */
7757 clear_step_over_info ();
7758 tp->control.trap_expected = 0;
7759
7760 insert_single_step_breakpoint (get_frame_arch (frame),
7761 get_frame_address_space (frame),
f2ffa92b 7762 tp->suspend.stop_pc);
2ac7589c 7763
719546c4 7764 tp->resumed = true;
fbea99ea 7765 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
c4464ade 7766 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
2ac7589c
PA
7767 }
7768 else
7769 {
1eb8556f 7770 infrun_debug_printf ("expected thread still hasn't advanced");
2ac7589c
PA
7771
7772 keep_going_pass_signal (ecs);
7773 }
c4464ade
SM
7774
7775 return true;
c447ac0b
PA
7776}
7777
8b061563
PA
7778/* Is thread TP in the middle of (software or hardware)
7779 single-stepping? (Note the result of this function must never be
7780 passed directly as target_resume's STEP parameter.) */
104c1213 7781
c4464ade 7782static bool
b3444185 7783currently_stepping (struct thread_info *tp)
a7212384 7784{
8358c15c
JK
7785 return ((tp->control.step_range_end
7786 && tp->control.step_resume_breakpoint == NULL)
7787 || tp->control.trap_expected
af48d08f 7788 || tp->stepped_breakpoint
8358c15c 7789 || bpstat_should_step ());
a7212384
UW
7790}
7791
b2175913
MS
7792/* Inferior has stepped into a subroutine call with source code that
7793 we should not step over. Do step to the first line of code in
7794 it. */
c2c6d25f
JM
7795
7796static void
568d6575
UW
7797handle_step_into_function (struct gdbarch *gdbarch,
7798 struct execution_control_state *ecs)
c2c6d25f 7799{
7e324e48
GB
7800 fill_in_stop_func (gdbarch, ecs);
7801
f2ffa92b
PA
7802 compunit_symtab *cust
7803 = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7804 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7805 ecs->stop_func_start
7806 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
c2c6d25f 7807
51abb421 7808 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
c2c6d25f
JM
7809 /* Use the step_resume_break to step until the end of the prologue,
7810 even if that involves jumps (as it seems to on the vax under
7811 4.2). */
7812 /* If the prologue ends in the middle of a source line, continue to
7813 the end of that source line (if it is still within the function).
7814 Otherwise, just go to end of prologue. */
2afb61aa
PA
7815 if (stop_func_sal.end
7816 && stop_func_sal.pc != ecs->stop_func_start
7817 && stop_func_sal.end < ecs->stop_func_end)
7818 ecs->stop_func_start = stop_func_sal.end;
c2c6d25f 7819
2dbd5e30
KB
7820 /* Architectures which require breakpoint adjustment might not be able
7821 to place a breakpoint at the computed address. If so, the test
7822 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7823 ecs->stop_func_start to an address at which a breakpoint may be
7824 legitimately placed.
8fb3e588 7825
2dbd5e30
KB
7826 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7827 made, GDB will enter an infinite loop when stepping through
7828 optimized code consisting of VLIW instructions which contain
7829 subinstructions corresponding to different source lines. On
7830 FR-V, it's not permitted to place a breakpoint on any but the
7831 first subinstruction of a VLIW instruction. When a breakpoint is
7832 set, GDB will adjust the breakpoint address to the beginning of
7833 the VLIW instruction. Thus, we need to make the corresponding
7834 adjustment here when computing the stop address. */
8fb3e588 7835
568d6575 7836 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
2dbd5e30
KB
7837 {
7838 ecs->stop_func_start
568d6575 7839 = gdbarch_adjust_breakpoint_address (gdbarch,
8fb3e588 7840 ecs->stop_func_start);
2dbd5e30
KB
7841 }
7842
f2ffa92b 7843 if (ecs->stop_func_start == ecs->event_thread->suspend.stop_pc)
c2c6d25f
JM
7844 {
7845 /* We are already there: stop now. */
bdc36728 7846 end_stepping_range (ecs);
c2c6d25f
JM
7847 return;
7848 }
7849 else
7850 {
7851 /* Put the step-breakpoint there and go until there. */
51abb421 7852 symtab_and_line sr_sal;
c2c6d25f
JM
7853 sr_sal.pc = ecs->stop_func_start;
7854 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
6c95b8df 7855 sr_sal.pspace = get_frame_program_space (get_current_frame ());
44cbf7b5 7856
c2c6d25f 7857 /* Do not specify what the fp should be when we stop since on
dda83cd7
SM
7858 some machines the prologue is where the new fp value is
7859 established. */
a6d9a66e 7860 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
c2c6d25f
JM
7861
7862 /* And make sure stepping stops right away then. */
16c381f0 7863 ecs->event_thread->control.step_range_end
dda83cd7 7864 = ecs->event_thread->control.step_range_start;
c2c6d25f
JM
7865 }
7866 keep_going (ecs);
7867}
d4f3574e 7868
b2175913
MS
7869/* Inferior has stepped backward into a subroutine call with source
7870 code that we should not step over. Do step to the beginning of the
7871 last line of code in it. */
7872
7873static void
568d6575
UW
7874handle_step_into_function_backward (struct gdbarch *gdbarch,
7875 struct execution_control_state *ecs)
b2175913 7876{
43f3e411 7877 struct compunit_symtab *cust;
167e4384 7878 struct symtab_and_line stop_func_sal;
b2175913 7879
7e324e48
GB
7880 fill_in_stop_func (gdbarch, ecs);
7881
f2ffa92b 7882 cust = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7883 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7884 ecs->stop_func_start
7885 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
b2175913 7886
f2ffa92b 7887 stop_func_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
b2175913
MS
7888
7889 /* OK, we're just going to keep stepping here. */
f2ffa92b 7890 if (stop_func_sal.pc == ecs->event_thread->suspend.stop_pc)
b2175913
MS
7891 {
7892 /* We're there already. Just stop stepping now. */
bdc36728 7893 end_stepping_range (ecs);
b2175913
MS
7894 }
7895 else
7896 {
7897 /* Else just reset the step range and keep going.
7898 No step-resume breakpoint, they don't work for
7899 epilogues, which can have multiple entry paths. */
16c381f0
JK
7900 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7901 ecs->event_thread->control.step_range_end = stop_func_sal.end;
b2175913
MS
7902 keep_going (ecs);
7903 }
7904 return;
7905}
7906
d3169d93 7907/* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
44cbf7b5
AC
7908 This is used to both functions and to skip over code. */
7909
7910static void
2c03e5be
PA
7911insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7912 struct symtab_and_line sr_sal,
7913 struct frame_id sr_id,
7914 enum bptype sr_type)
44cbf7b5 7915{
611c83ae
PA
7916 /* There should never be more than one step-resume or longjmp-resume
7917 breakpoint per thread, so we should never be setting a new
44cbf7b5 7918 step_resume_breakpoint when one is already active. */
8358c15c 7919 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
2c03e5be 7920 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
d3169d93 7921
1eb8556f
SM
7922 infrun_debug_printf ("inserting step-resume breakpoint at %s",
7923 paddress (gdbarch, sr_sal.pc));
d3169d93 7924
8358c15c 7925 inferior_thread ()->control.step_resume_breakpoint
454dafbd 7926 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
2c03e5be
PA
7927}
7928
9da8c2a0 7929void
2c03e5be
PA
7930insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7931 struct symtab_and_line sr_sal,
7932 struct frame_id sr_id)
7933{
7934 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7935 sr_sal, sr_id,
7936 bp_step_resume);
44cbf7b5 7937}
7ce450bd 7938
2c03e5be
PA
7939/* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7940 This is used to skip a potential signal handler.
7ce450bd 7941
14e60db5
DJ
7942 This is called with the interrupted function's frame. The signal
7943 handler, when it returns, will resume the interrupted function at
7944 RETURN_FRAME.pc. */
d303a6c7
AC
7945
7946static void
2c03e5be 7947insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
d303a6c7 7948{
f4c1edd8 7949 gdb_assert (return_frame != NULL);
d303a6c7 7950
51abb421
PA
7951 struct gdbarch *gdbarch = get_frame_arch (return_frame);
7952
7953 symtab_and_line sr_sal;
568d6575 7954 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
d303a6c7 7955 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7956 sr_sal.pspace = get_frame_program_space (return_frame);
d303a6c7 7957
2c03e5be
PA
7958 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7959 get_stack_frame_id (return_frame),
7960 bp_hp_step_resume);
d303a6c7
AC
7961}
7962
2c03e5be
PA
7963/* Insert a "step-resume breakpoint" at the previous frame's PC. This
7964 is used to skip a function after stepping into it (for "next" or if
7965 the called function has no debugging information).
14e60db5
DJ
7966
7967 The current function has almost always been reached by single
7968 stepping a call or return instruction. NEXT_FRAME belongs to the
7969 current function, and the breakpoint will be set at the caller's
7970 resume address.
7971
7972 This is a separate function rather than reusing
2c03e5be 7973 insert_hp_step_resume_breakpoint_at_frame in order to avoid
14e60db5 7974 get_prev_frame, which may stop prematurely (see the implementation
c7ce8faa 7975 of frame_unwind_caller_id for an example). */
14e60db5
DJ
7976
7977static void
7978insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7979{
14e60db5
DJ
7980 /* We shouldn't have gotten here if we don't know where the call site
7981 is. */
c7ce8faa 7982 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
14e60db5 7983
51abb421 7984 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
14e60db5 7985
51abb421 7986 symtab_and_line sr_sal;
c7ce8faa
DJ
7987 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7988 frame_unwind_caller_pc (next_frame));
14e60db5 7989 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7990 sr_sal.pspace = frame_unwind_program_space (next_frame);
14e60db5 7991
a6d9a66e 7992 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
c7ce8faa 7993 frame_unwind_caller_id (next_frame));
14e60db5
DJ
7994}
7995
611c83ae
PA
7996/* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7997 new breakpoint at the target of a jmp_buf. The handling of
7998 longjmp-resume uses the same mechanisms used for handling
7999 "step-resume" breakpoints. */
8000
8001static void
a6d9a66e 8002insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
611c83ae 8003{
e81a37f7
TT
8004 /* There should never be more than one longjmp-resume breakpoint per
8005 thread, so we should never be setting a new
611c83ae 8006 longjmp_resume_breakpoint when one is already active. */
e81a37f7 8007 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
611c83ae 8008
1eb8556f
SM
8009 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
8010 paddress (gdbarch, pc));
611c83ae 8011
e81a37f7 8012 inferior_thread ()->control.exception_resume_breakpoint =
454dafbd 8013 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
611c83ae
PA
8014}
8015
186c406b
TT
8016/* Insert an exception resume breakpoint. TP is the thread throwing
8017 the exception. The block B is the block of the unwinder debug hook
8018 function. FRAME is the frame corresponding to the call to this
8019 function. SYM is the symbol of the function argument holding the
8020 target PC of the exception. */
8021
8022static void
8023insert_exception_resume_breakpoint (struct thread_info *tp,
3977b71f 8024 const struct block *b,
186c406b
TT
8025 struct frame_info *frame,
8026 struct symbol *sym)
8027{
a70b8144 8028 try
186c406b 8029 {
63e43d3a 8030 struct block_symbol vsym;
186c406b
TT
8031 struct value *value;
8032 CORE_ADDR handler;
8033 struct breakpoint *bp;
8034
987012b8 8035 vsym = lookup_symbol_search_name (sym->search_name (),
de63c46b 8036 b, VAR_DOMAIN);
63e43d3a 8037 value = read_var_value (vsym.symbol, vsym.block, frame);
186c406b
TT
8038 /* If the value was optimized out, revert to the old behavior. */
8039 if (! value_optimized_out (value))
8040 {
8041 handler = value_as_address (value);
8042
1eb8556f
SM
8043 infrun_debug_printf ("exception resume at %lx",
8044 (unsigned long) handler);
186c406b
TT
8045
8046 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd
TT
8047 handler,
8048 bp_exception_resume).release ();
c70a6932
JK
8049
8050 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
8051 frame = NULL;
8052
5d5658a1 8053 bp->thread = tp->global_num;
186c406b
TT
8054 inferior_thread ()->control.exception_resume_breakpoint = bp;
8055 }
8056 }
230d2906 8057 catch (const gdb_exception_error &e)
492d29ea
PA
8058 {
8059 /* We want to ignore errors here. */
8060 }
186c406b
TT
8061}
8062
28106bc2
SDJ
8063/* A helper for check_exception_resume that sets an
8064 exception-breakpoint based on a SystemTap probe. */
8065
8066static void
8067insert_exception_resume_from_probe (struct thread_info *tp,
729662a5 8068 const struct bound_probe *probe,
28106bc2
SDJ
8069 struct frame_info *frame)
8070{
8071 struct value *arg_value;
8072 CORE_ADDR handler;
8073 struct breakpoint *bp;
8074
8075 arg_value = probe_safe_evaluate_at_pc (frame, 1);
8076 if (!arg_value)
8077 return;
8078
8079 handler = value_as_address (arg_value);
8080
1eb8556f
SM
8081 infrun_debug_printf ("exception resume at %s",
8082 paddress (probe->objfile->arch (), handler));
28106bc2
SDJ
8083
8084 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd 8085 handler, bp_exception_resume).release ();
5d5658a1 8086 bp->thread = tp->global_num;
28106bc2
SDJ
8087 inferior_thread ()->control.exception_resume_breakpoint = bp;
8088}
8089
186c406b
TT
8090/* This is called when an exception has been intercepted. Check to
8091 see whether the exception's destination is of interest, and if so,
8092 set an exception resume breakpoint there. */
8093
8094static void
8095check_exception_resume (struct execution_control_state *ecs,
28106bc2 8096 struct frame_info *frame)
186c406b 8097{
729662a5 8098 struct bound_probe probe;
28106bc2
SDJ
8099 struct symbol *func;
8100
8101 /* First see if this exception unwinding breakpoint was set via a
8102 SystemTap probe point. If so, the probe has two arguments: the
8103 CFA and the HANDLER. We ignore the CFA, extract the handler, and
8104 set a breakpoint there. */
6bac7473 8105 probe = find_probe_by_pc (get_frame_pc (frame));
935676c9 8106 if (probe.prob)
28106bc2 8107 {
729662a5 8108 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
28106bc2
SDJ
8109 return;
8110 }
8111
8112 func = get_frame_function (frame);
8113 if (!func)
8114 return;
186c406b 8115
a70b8144 8116 try
186c406b 8117 {
3977b71f 8118 const struct block *b;
8157b174 8119 struct block_iterator iter;
186c406b
TT
8120 struct symbol *sym;
8121 int argno = 0;
8122
8123 /* The exception breakpoint is a thread-specific breakpoint on
8124 the unwinder's debug hook, declared as:
8125
8126 void _Unwind_DebugHook (void *cfa, void *handler);
8127
8128 The CFA argument indicates the frame to which control is
8129 about to be transferred. HANDLER is the destination PC.
8130
8131 We ignore the CFA and set a temporary breakpoint at HANDLER.
8132 This is not extremely efficient but it avoids issues in gdb
8133 with computing the DWARF CFA, and it also works even in weird
8134 cases such as throwing an exception from inside a signal
8135 handler. */
8136
8137 b = SYMBOL_BLOCK_VALUE (func);
8138 ALL_BLOCK_SYMBOLS (b, iter, sym)
8139 {
8140 if (!SYMBOL_IS_ARGUMENT (sym))
8141 continue;
8142
8143 if (argno == 0)
8144 ++argno;
8145 else
8146 {
8147 insert_exception_resume_breakpoint (ecs->event_thread,
8148 b, frame, sym);
8149 break;
8150 }
8151 }
8152 }
230d2906 8153 catch (const gdb_exception_error &e)
492d29ea
PA
8154 {
8155 }
186c406b
TT
8156}
8157
104c1213 8158static void
22bcd14b 8159stop_waiting (struct execution_control_state *ecs)
104c1213 8160{
1eb8556f 8161 infrun_debug_printf ("stop_waiting");
527159b7 8162
cd0fc7c3
SS
8163 /* Let callers know we don't want to wait for the inferior anymore. */
8164 ecs->wait_some_more = 0;
fbea99ea 8165
53cccef1 8166 /* If all-stop, but there exists a non-stop target, stop all
fbea99ea 8167 threads now that we're presenting the stop to the user. */
53cccef1 8168 if (!non_stop && exists_non_stop_target ())
3cebef98 8169 stop_all_threads ("presenting stop to user in all-stop");
cd0fc7c3
SS
8170}
8171
4d9d9d04
PA
8172/* Like keep_going, but passes the signal to the inferior, even if the
8173 signal is set to nopass. */
d4f3574e
SS
8174
8175static void
4d9d9d04 8176keep_going_pass_signal (struct execution_control_state *ecs)
d4f3574e 8177{
d7e15655 8178 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
372316f1 8179 gdb_assert (!ecs->event_thread->resumed);
4d9d9d04 8180
d4f3574e 8181 /* Save the pc before execution, to compare with pc after stop. */
fb14de7b 8182 ecs->event_thread->prev_pc
fc75c28b 8183 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
d4f3574e 8184
4d9d9d04 8185 if (ecs->event_thread->control.trap_expected)
d4f3574e 8186 {
4d9d9d04
PA
8187 struct thread_info *tp = ecs->event_thread;
8188
1eb8556f
SM
8189 infrun_debug_printf ("%s has trap_expected set, "
8190 "resuming to collect trap",
8191 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04 8192
a9ba6bae
PA
8193 /* We haven't yet gotten our trap, and either: intercepted a
8194 non-signal event (e.g., a fork); or took a signal which we
8195 are supposed to pass through to the inferior. Simply
8196 continue. */
64ce06e4 8197 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e 8198 }
372316f1
PA
8199 else if (step_over_info_valid_p ())
8200 {
8201 /* Another thread is stepping over a breakpoint in-line. If
8202 this thread needs a step-over too, queue the request. In
8203 either case, this resume must be deferred for later. */
8204 struct thread_info *tp = ecs->event_thread;
8205
8206 if (ecs->hit_singlestep_breakpoint
8207 || thread_still_needs_step_over (tp))
8208 {
1eb8556f
SM
8209 infrun_debug_printf ("step-over already in progress: "
8210 "step-over for %s deferred",
8211 target_pid_to_str (tp->ptid).c_str ());
28d5518b 8212 global_thread_step_over_chain_enqueue (tp);
372316f1
PA
8213 }
8214 else
8215 {
1eb8556f
SM
8216 infrun_debug_printf ("step-over in progress: resume of %s deferred",
8217 target_pid_to_str (tp->ptid).c_str ());
372316f1 8218 }
372316f1 8219 }
d4f3574e
SS
8220 else
8221 {
31e77af2 8222 struct regcache *regcache = get_current_regcache ();
963f9c80
PA
8223 int remove_bp;
8224 int remove_wps;
8d297bbf 8225 step_over_what step_what;
31e77af2 8226
d4f3574e 8227 /* Either the trap was not expected, but we are continuing
a9ba6bae
PA
8228 anyway (if we got a signal, the user asked it be passed to
8229 the child)
8230 -- or --
8231 We got our expected trap, but decided we should resume from
8232 it.
d4f3574e 8233
a9ba6bae 8234 We're going to run this baby now!
d4f3574e 8235
c36b740a
VP
8236 Note that insert_breakpoints won't try to re-insert
8237 already inserted breakpoints. Therefore, we don't
8238 care if breakpoints were already inserted, or not. */
a9ba6bae 8239
31e77af2
PA
8240 /* If we need to step over a breakpoint, and we're not using
8241 displaced stepping to do so, insert all breakpoints
8242 (watchpoints, etc.) but the one we're stepping over, step one
8243 instruction, and then re-insert the breakpoint when that step
8244 is finished. */
963f9c80 8245
6c4cfb24
PA
8246 step_what = thread_still_needs_step_over (ecs->event_thread);
8247
963f9c80 8248 remove_bp = (ecs->hit_singlestep_breakpoint
6c4cfb24
PA
8249 || (step_what & STEP_OVER_BREAKPOINT));
8250 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
963f9c80 8251
cb71640d
PA
8252 /* We can't use displaced stepping if we need to step past a
8253 watchpoint. The instruction copied to the scratch pad would
8254 still trigger the watchpoint. */
8255 if (remove_bp
3fc8eb30 8256 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
45e8c884 8257 {
a01bda52 8258 set_step_over_info (regcache->aspace (),
21edc42f
YQ
8259 regcache_read_pc (regcache), remove_wps,
8260 ecs->event_thread->global_num);
45e8c884 8261 }
963f9c80 8262 else if (remove_wps)
21edc42f 8263 set_step_over_info (NULL, 0, remove_wps, -1);
372316f1
PA
8264
8265 /* If we now need to do an in-line step-over, we need to stop
8266 all other threads. Note this must be done before
8267 insert_breakpoints below, because that removes the breakpoint
8268 we're about to step over, otherwise other threads could miss
8269 it. */
fbea99ea 8270 if (step_over_info_valid_p () && target_is_non_stop_p ())
3cebef98 8271 stop_all_threads ("starting in-line step-over");
abbb1732 8272
31e77af2 8273 /* Stop stepping if inserting breakpoints fails. */
a70b8144 8274 try
31e77af2
PA
8275 {
8276 insert_breakpoints ();
8277 }
230d2906 8278 catch (const gdb_exception_error &e)
31e77af2
PA
8279 {
8280 exception_print (gdb_stderr, e);
22bcd14b 8281 stop_waiting (ecs);
bdf2a94a 8282 clear_step_over_info ();
31e77af2 8283 return;
d4f3574e
SS
8284 }
8285
963f9c80 8286 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
d4f3574e 8287
64ce06e4 8288 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e
SS
8289 }
8290
488f131b 8291 prepare_to_wait (ecs);
d4f3574e
SS
8292}
8293
4d9d9d04
PA
8294/* Called when we should continue running the inferior, because the
8295 current event doesn't cause a user visible stop. This does the
8296 resuming part; waiting for the next event is done elsewhere. */
8297
8298static void
8299keep_going (struct execution_control_state *ecs)
8300{
8301 if (ecs->event_thread->control.trap_expected
8302 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
8303 ecs->event_thread->control.trap_expected = 0;
8304
8305 if (!signal_program[ecs->event_thread->suspend.stop_signal])
8306 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
8307 keep_going_pass_signal (ecs);
8308}
8309
104c1213
JM
8310/* This function normally comes after a resume, before
8311 handle_inferior_event exits. It takes care of any last bits of
8312 housekeeping, and sets the all-important wait_some_more flag. */
cd0fc7c3 8313
104c1213
JM
8314static void
8315prepare_to_wait (struct execution_control_state *ecs)
cd0fc7c3 8316{
1eb8556f 8317 infrun_debug_printf ("prepare_to_wait");
104c1213 8318
104c1213 8319 ecs->wait_some_more = 1;
0b333c5e 8320
42bd97a6
PA
8321 /* If the target can't async, emulate it by marking the infrun event
8322 handler such that as soon as we get back to the event-loop, we
8323 immediately end up in fetch_inferior_event again calling
8324 target_wait. */
8325 if (!target_can_async_p ())
0b333c5e 8326 mark_infrun_async_event_handler ();
c906108c 8327}
11cf8741 8328
fd664c91 8329/* We are done with the step range of a step/next/si/ni command.
b57bacec 8330 Called once for each n of a "step n" operation. */
fd664c91
PA
8331
8332static void
bdc36728 8333end_stepping_range (struct execution_control_state *ecs)
fd664c91 8334{
bdc36728 8335 ecs->event_thread->control.stop_step = 1;
bdc36728 8336 stop_waiting (ecs);
fd664c91
PA
8337}
8338
33d62d64
JK
8339/* Several print_*_reason functions to print why the inferior has stopped.
8340 We always print something when the inferior exits, or receives a signal.
8341 The rest of the cases are dealt with later on in normal_stop and
8342 print_it_typical. Ideally there should be a call to one of these
8343 print_*_reason functions functions from handle_inferior_event each time
22bcd14b 8344 stop_waiting is called.
33d62d64 8345
fd664c91
PA
8346 Note that we don't call these directly, instead we delegate that to
8347 the interpreters, through observers. Interpreters then call these
8348 with whatever uiout is right. */
33d62d64 8349
fd664c91
PA
8350void
8351print_end_stepping_range_reason (struct ui_out *uiout)
33d62d64 8352{
fd664c91 8353 /* For CLI-like interpreters, print nothing. */
33d62d64 8354
112e8700 8355 if (uiout->is_mi_like_p ())
fd664c91 8356 {
112e8700 8357 uiout->field_string ("reason",
fd664c91
PA
8358 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
8359 }
8360}
33d62d64 8361
fd664c91
PA
8362void
8363print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
11cf8741 8364{
33d62d64 8365 annotate_signalled ();
112e8700
SM
8366 if (uiout->is_mi_like_p ())
8367 uiout->field_string
8368 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
8369 uiout->text ("\nProgram terminated with signal ");
33d62d64 8370 annotate_signal_name ();
112e8700 8371 uiout->field_string ("signal-name",
2ea28649 8372 gdb_signal_to_name (siggnal));
33d62d64 8373 annotate_signal_name_end ();
112e8700 8374 uiout->text (", ");
33d62d64 8375 annotate_signal_string ();
112e8700 8376 uiout->field_string ("signal-meaning",
2ea28649 8377 gdb_signal_to_string (siggnal));
33d62d64 8378 annotate_signal_string_end ();
112e8700
SM
8379 uiout->text (".\n");
8380 uiout->text ("The program no longer exists.\n");
33d62d64
JK
8381}
8382
fd664c91
PA
8383void
8384print_exited_reason (struct ui_out *uiout, int exitstatus)
33d62d64 8385{
fda326dd 8386 struct inferior *inf = current_inferior ();
a068643d 8387 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
fda326dd 8388
33d62d64
JK
8389 annotate_exited (exitstatus);
8390 if (exitstatus)
8391 {
112e8700
SM
8392 if (uiout->is_mi_like_p ())
8393 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
6a831f06
PA
8394 std::string exit_code_str
8395 = string_printf ("0%o", (unsigned int) exitstatus);
8396 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
8397 plongest (inf->num), pidstr.c_str (),
8398 string_field ("exit-code", exit_code_str.c_str ()));
33d62d64
JK
8399 }
8400 else
11cf8741 8401 {
112e8700
SM
8402 if (uiout->is_mi_like_p ())
8403 uiout->field_string
8404 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6a831f06
PA
8405 uiout->message ("[Inferior %s (%s) exited normally]\n",
8406 plongest (inf->num), pidstr.c_str ());
33d62d64 8407 }
33d62d64
JK
8408}
8409
fd664c91
PA
8410void
8411print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
33d62d64 8412{
f303dbd6
PA
8413 struct thread_info *thr = inferior_thread ();
8414
33d62d64
JK
8415 annotate_signal ();
8416
112e8700 8417 if (uiout->is_mi_like_p ())
f303dbd6
PA
8418 ;
8419 else if (show_thread_that_caused_stop ())
33d62d64 8420 {
f303dbd6 8421 const char *name;
33d62d64 8422
112e8700 8423 uiout->text ("\nThread ");
33eca680 8424 uiout->field_string ("thread-id", print_thread_id (thr));
f303dbd6
PA
8425
8426 name = thr->name != NULL ? thr->name : target_thread_name (thr);
8427 if (name != NULL)
8428 {
112e8700 8429 uiout->text (" \"");
33eca680 8430 uiout->field_string ("name", name);
112e8700 8431 uiout->text ("\"");
f303dbd6 8432 }
33d62d64 8433 }
f303dbd6 8434 else
112e8700 8435 uiout->text ("\nProgram");
f303dbd6 8436
112e8700
SM
8437 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
8438 uiout->text (" stopped");
33d62d64
JK
8439 else
8440 {
112e8700 8441 uiout->text (" received signal ");
8b93c638 8442 annotate_signal_name ();
112e8700
SM
8443 if (uiout->is_mi_like_p ())
8444 uiout->field_string
8445 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
8446 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8b93c638 8447 annotate_signal_name_end ();
112e8700 8448 uiout->text (", ");
8b93c638 8449 annotate_signal_string ();
112e8700 8450 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
012b3a21 8451
272bb05c
JB
8452 struct regcache *regcache = get_current_regcache ();
8453 struct gdbarch *gdbarch = regcache->arch ();
8454 if (gdbarch_report_signal_info_p (gdbarch))
8455 gdbarch_report_signal_info (gdbarch, uiout, siggnal);
8456
8b93c638 8457 annotate_signal_string_end ();
33d62d64 8458 }
112e8700 8459 uiout->text (".\n");
33d62d64 8460}
252fbfc8 8461
fd664c91
PA
8462void
8463print_no_history_reason (struct ui_out *uiout)
33d62d64 8464{
112e8700 8465 uiout->text ("\nNo more reverse-execution history.\n");
11cf8741 8466}
43ff13b4 8467
0c7e1a46
PA
8468/* Print current location without a level number, if we have changed
8469 functions or hit a breakpoint. Print source line if we have one.
8470 bpstat_print contains the logic deciding in detail what to print,
8471 based on the event(s) that just occurred. */
8472
243a9253
PA
8473static void
8474print_stop_location (struct target_waitstatus *ws)
0c7e1a46
PA
8475{
8476 int bpstat_ret;
f486487f 8477 enum print_what source_flag;
0c7e1a46
PA
8478 int do_frame_printing = 1;
8479 struct thread_info *tp = inferior_thread ();
8480
8481 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
8482 switch (bpstat_ret)
8483 {
8484 case PRINT_UNKNOWN:
8485 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8486 should) carry around the function and does (or should) use
8487 that when doing a frame comparison. */
8488 if (tp->control.stop_step
8489 && frame_id_eq (tp->control.step_frame_id,
8490 get_frame_id (get_current_frame ()))
f2ffa92b
PA
8491 && (tp->control.step_start_function
8492 == find_pc_function (tp->suspend.stop_pc)))
0c7e1a46
PA
8493 {
8494 /* Finished step, just print source line. */
8495 source_flag = SRC_LINE;
8496 }
8497 else
8498 {
8499 /* Print location and source line. */
8500 source_flag = SRC_AND_LOC;
8501 }
8502 break;
8503 case PRINT_SRC_AND_LOC:
8504 /* Print location and source line. */
8505 source_flag = SRC_AND_LOC;
8506 break;
8507 case PRINT_SRC_ONLY:
8508 source_flag = SRC_LINE;
8509 break;
8510 case PRINT_NOTHING:
8511 /* Something bogus. */
8512 source_flag = SRC_LINE;
8513 do_frame_printing = 0;
8514 break;
8515 default:
8516 internal_error (__FILE__, __LINE__, _("Unknown value."));
8517 }
8518
8519 /* The behavior of this routine with respect to the source
8520 flag is:
8521 SRC_LINE: Print only source line
8522 LOCATION: Print only location
8523 SRC_AND_LOC: Print location and source line. */
8524 if (do_frame_printing)
8525 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
243a9253
PA
8526}
8527
243a9253
PA
8528/* See infrun.h. */
8529
8530void
4c7d57e7 8531print_stop_event (struct ui_out *uiout, bool displays)
243a9253 8532{
243a9253 8533 struct target_waitstatus last;
243a9253
PA
8534 struct thread_info *tp;
8535
5b6d1e4f 8536 get_last_target_status (nullptr, nullptr, &last);
243a9253 8537
67ad9399
TT
8538 {
8539 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
0c7e1a46 8540
67ad9399 8541 print_stop_location (&last);
243a9253 8542
67ad9399 8543 /* Display the auto-display expressions. */
4c7d57e7
TT
8544 if (displays)
8545 do_displays ();
67ad9399 8546 }
243a9253
PA
8547
8548 tp = inferior_thread ();
8549 if (tp->thread_fsm != NULL
46e3ed7f 8550 && tp->thread_fsm->finished_p ())
243a9253
PA
8551 {
8552 struct return_value_info *rv;
8553
46e3ed7f 8554 rv = tp->thread_fsm->return_value ();
243a9253
PA
8555 if (rv != NULL)
8556 print_return_value (uiout, rv);
8557 }
0c7e1a46
PA
8558}
8559
388a7084
PA
8560/* See infrun.h. */
8561
8562void
8563maybe_remove_breakpoints (void)
8564{
55f6301a 8565 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
388a7084
PA
8566 {
8567 if (remove_breakpoints ())
8568 {
223ffa71 8569 target_terminal::ours_for_output ();
388a7084
PA
8570 printf_filtered (_("Cannot remove breakpoints because "
8571 "program is no longer writable.\nFurther "
8572 "execution is probably impossible.\n"));
8573 }
8574 }
8575}
8576
4c2f2a79
PA
8577/* The execution context that just caused a normal stop. */
8578
8579struct stop_context
8580{
2d844eaf 8581 stop_context ();
2d844eaf
TT
8582
8583 DISABLE_COPY_AND_ASSIGN (stop_context);
8584
8585 bool changed () const;
8586
4c2f2a79
PA
8587 /* The stop ID. */
8588 ULONGEST stop_id;
c906108c 8589
4c2f2a79 8590 /* The event PTID. */
c906108c 8591
4c2f2a79
PA
8592 ptid_t ptid;
8593
8594 /* If stopp for a thread event, this is the thread that caused the
8595 stop. */
d634cd0b 8596 thread_info_ref thread;
4c2f2a79
PA
8597
8598 /* The inferior that caused the stop. */
8599 int inf_num;
8600};
8601
2d844eaf 8602/* Initializes a new stop context. If stopped for a thread event, this
4c2f2a79
PA
8603 takes a strong reference to the thread. */
8604
2d844eaf 8605stop_context::stop_context ()
4c2f2a79 8606{
2d844eaf
TT
8607 stop_id = get_stop_id ();
8608 ptid = inferior_ptid;
8609 inf_num = current_inferior ()->num;
4c2f2a79 8610
d7e15655 8611 if (inferior_ptid != null_ptid)
4c2f2a79
PA
8612 {
8613 /* Take a strong reference so that the thread can't be deleted
8614 yet. */
d634cd0b 8615 thread = thread_info_ref::new_reference (inferior_thread ());
4c2f2a79 8616 }
4c2f2a79
PA
8617}
8618
8619/* Return true if the current context no longer matches the saved stop
8620 context. */
8621
2d844eaf
TT
8622bool
8623stop_context::changed () const
8624{
8625 if (ptid != inferior_ptid)
8626 return true;
8627 if (inf_num != current_inferior ()->num)
8628 return true;
8629 if (thread != NULL && thread->state != THREAD_STOPPED)
8630 return true;
8631 if (get_stop_id () != stop_id)
8632 return true;
8633 return false;
4c2f2a79
PA
8634}
8635
8636/* See infrun.h. */
8637
8638int
96baa820 8639normal_stop (void)
c906108c 8640{
73b65bb0 8641 struct target_waitstatus last;
73b65bb0 8642
5b6d1e4f 8643 get_last_target_status (nullptr, nullptr, &last);
73b65bb0 8644
4c2f2a79
PA
8645 new_stop_id ();
8646
29f49a6a
PA
8647 /* If an exception is thrown from this point on, make sure to
8648 propagate GDB's knowledge of the executing state to the
8649 frontend/user running state. A QUIT is an easy exception to see
8650 here, so do this before any filtered output. */
731f534f 8651
5b6d1e4f 8652 ptid_t finish_ptid = null_ptid;
731f534f 8653
c35b1492 8654 if (!non_stop)
5b6d1e4f 8655 finish_ptid = minus_one_ptid;
e1316e60
PA
8656 else if (last.kind == TARGET_WAITKIND_SIGNALLED
8657 || last.kind == TARGET_WAITKIND_EXITED)
8658 {
8659 /* On some targets, we may still have live threads in the
8660 inferior when we get a process exit event. E.g., for
8661 "checkpoint", when the current checkpoint/fork exits,
8662 linux-fork.c automatically switches to another fork from
8663 within target_mourn_inferior. */
731f534f 8664 if (inferior_ptid != null_ptid)
5b6d1e4f 8665 finish_ptid = ptid_t (inferior_ptid.pid ());
e1316e60
PA
8666 }
8667 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
5b6d1e4f
PA
8668 finish_ptid = inferior_ptid;
8669
8670 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
8671 if (finish_ptid != null_ptid)
8672 {
8673 maybe_finish_thread_state.emplace
8674 (user_visible_resume_target (finish_ptid), finish_ptid);
8675 }
29f49a6a 8676
b57bacec
PA
8677 /* As we're presenting a stop, and potentially removing breakpoints,
8678 update the thread list so we can tell whether there are threads
8679 running on the target. With target remote, for example, we can
8680 only learn about new threads when we explicitly update the thread
8681 list. Do this before notifying the interpreters about signal
8682 stops, end of stepping ranges, etc., so that the "new thread"
8683 output is emitted before e.g., "Program received signal FOO",
8684 instead of after. */
8685 update_thread_list ();
8686
8687 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
76727919 8688 gdb::observers::signal_received.notify (inferior_thread ()->suspend.stop_signal);
b57bacec 8689
c906108c
SS
8690 /* As with the notification of thread events, we want to delay
8691 notifying the user that we've switched thread context until
8692 the inferior actually stops.
8693
73b65bb0
DJ
8694 There's no point in saying anything if the inferior has exited.
8695 Note that SIGNALLED here means "exited with a signal", not
b65dc60b
PA
8696 "received a signal".
8697
8698 Also skip saying anything in non-stop mode. In that mode, as we
8699 don't want GDB to switch threads behind the user's back, to avoid
8700 races where the user is typing a command to apply to thread x,
8701 but GDB switches to thread y before the user finishes entering
8702 the command, fetch_inferior_event installs a cleanup to restore
8703 the current thread back to the thread the user had selected right
8704 after this event is handled, so we're not really switching, only
8705 informing of a stop. */
4f8d22e3 8706 if (!non_stop
731f534f 8707 && previous_inferior_ptid != inferior_ptid
55f6301a 8708 && target_has_execution ()
73b65bb0 8709 && last.kind != TARGET_WAITKIND_SIGNALLED
0e5bf2a8
PA
8710 && last.kind != TARGET_WAITKIND_EXITED
8711 && last.kind != TARGET_WAITKIND_NO_RESUMED)
c906108c 8712 {
0e454242 8713 SWITCH_THRU_ALL_UIS ()
3b12939d 8714 {
223ffa71 8715 target_terminal::ours_for_output ();
3b12939d 8716 printf_filtered (_("[Switching to %s]\n"),
a068643d 8717 target_pid_to_str (inferior_ptid).c_str ());
3b12939d
PA
8718 annotate_thread_changed ();
8719 }
39f77062 8720 previous_inferior_ptid = inferior_ptid;
c906108c 8721 }
c906108c 8722
0e5bf2a8
PA
8723 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
8724 {
0e454242 8725 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8726 if (current_ui->prompt_state == PROMPT_BLOCKED)
8727 {
223ffa71 8728 target_terminal::ours_for_output ();
3b12939d
PA
8729 printf_filtered (_("No unwaited-for children left.\n"));
8730 }
0e5bf2a8
PA
8731 }
8732
b57bacec 8733 /* Note: this depends on the update_thread_list call above. */
388a7084 8734 maybe_remove_breakpoints ();
c906108c 8735
c906108c
SS
8736 /* If an auto-display called a function and that got a signal,
8737 delete that auto-display to avoid an infinite recursion. */
8738
8739 if (stopped_by_random_signal)
8740 disable_current_display ();
8741
0e454242 8742 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8743 {
8744 async_enable_stdin ();
8745 }
c906108c 8746
388a7084 8747 /* Let the user/frontend see the threads as stopped. */
731f534f 8748 maybe_finish_thread_state.reset ();
388a7084
PA
8749
8750 /* Select innermost stack frame - i.e., current frame is frame 0,
8751 and current location is based on that. Handle the case where the
8752 dummy call is returning after being stopped. E.g. the dummy call
8753 previously hit a breakpoint. (If the dummy call returns
8754 normally, we won't reach here.) Do this before the stop hook is
8755 run, so that it doesn't get to see the temporary dummy frame,
8756 which is not where we'll present the stop. */
8757 if (has_stack_frames ())
8758 {
8759 if (stop_stack_dummy == STOP_STACK_DUMMY)
8760 {
8761 /* Pop the empty frame that contains the stack dummy. This
8762 also restores inferior state prior to the call (struct
8763 infcall_suspend_state). */
8764 struct frame_info *frame = get_current_frame ();
8765
8766 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8767 frame_pop (frame);
8768 /* frame_pop calls reinit_frame_cache as the last thing it
8769 does which means there's now no selected frame. */
8770 }
8771
8772 select_frame (get_current_frame ());
8773
8774 /* Set the current source location. */
8775 set_current_sal_from_frame (get_current_frame ());
8776 }
dd7e2d2b
PA
8777
8778 /* Look up the hook_stop and run it (CLI internally handles problem
8779 of stop_command's pre-hook not existing). */
4c2f2a79
PA
8780 if (stop_command != NULL)
8781 {
2d844eaf 8782 stop_context saved_context;
4c2f2a79 8783
a70b8144 8784 try
bf469271
PA
8785 {
8786 execute_cmd_pre_hook (stop_command);
8787 }
230d2906 8788 catch (const gdb_exception &ex)
bf469271
PA
8789 {
8790 exception_fprintf (gdb_stderr, ex,
8791 "Error while running hook_stop:\n");
8792 }
4c2f2a79
PA
8793
8794 /* If the stop hook resumes the target, then there's no point in
8795 trying to notify about the previous stop; its context is
8796 gone. Likewise if the command switches thread or inferior --
8797 the observers would print a stop for the wrong
8798 thread/inferior. */
2d844eaf
TT
8799 if (saved_context.changed ())
8800 return 1;
4c2f2a79 8801 }
dd7e2d2b 8802
388a7084
PA
8803 /* Notify observers about the stop. This is where the interpreters
8804 print the stop event. */
d7e15655 8805 if (inferior_ptid != null_ptid)
76727919 8806 gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
24a7f1b5 8807 stop_print_frame);
388a7084 8808 else
76727919 8809 gdb::observers::normal_stop.notify (NULL, stop_print_frame);
347bddb7 8810
243a9253
PA
8811 annotate_stopped ();
8812
55f6301a 8813 if (target_has_execution ())
48844aa6
PA
8814 {
8815 if (last.kind != TARGET_WAITKIND_SIGNALLED
fe726667
PA
8816 && last.kind != TARGET_WAITKIND_EXITED
8817 && last.kind != TARGET_WAITKIND_NO_RESUMED)
48844aa6
PA
8818 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8819 Delete any breakpoint that is to be deleted at the next stop. */
16c381f0 8820 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
94cc34af 8821 }
6c95b8df
PA
8822
8823 /* Try to get rid of automatically added inferiors that are no
8824 longer needed. Keeping those around slows down things linearly.
8825 Note that this never removes the current inferior. */
8826 prune_inferiors ();
4c2f2a79
PA
8827
8828 return 0;
c906108c 8829}
c906108c 8830\f
c5aa993b 8831int
96baa820 8832signal_stop_state (int signo)
c906108c 8833{
d6b48e9c 8834 return signal_stop[signo];
c906108c
SS
8835}
8836
c5aa993b 8837int
96baa820 8838signal_print_state (int signo)
c906108c
SS
8839{
8840 return signal_print[signo];
8841}
8842
c5aa993b 8843int
96baa820 8844signal_pass_state (int signo)
c906108c
SS
8845{
8846 return signal_program[signo];
8847}
8848
2455069d
UW
8849static void
8850signal_cache_update (int signo)
8851{
8852 if (signo == -1)
8853 {
a493e3e2 8854 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
2455069d
UW
8855 signal_cache_update (signo);
8856
8857 return;
8858 }
8859
8860 signal_pass[signo] = (signal_stop[signo] == 0
8861 && signal_print[signo] == 0
ab04a2af
TT
8862 && signal_program[signo] == 1
8863 && signal_catch[signo] == 0);
2455069d
UW
8864}
8865
488f131b 8866int
7bda5e4a 8867signal_stop_update (int signo, int state)
d4f3574e
SS
8868{
8869 int ret = signal_stop[signo];
abbb1732 8870
d4f3574e 8871 signal_stop[signo] = state;
2455069d 8872 signal_cache_update (signo);
d4f3574e
SS
8873 return ret;
8874}
8875
488f131b 8876int
7bda5e4a 8877signal_print_update (int signo, int state)
d4f3574e
SS
8878{
8879 int ret = signal_print[signo];
abbb1732 8880
d4f3574e 8881 signal_print[signo] = state;
2455069d 8882 signal_cache_update (signo);
d4f3574e
SS
8883 return ret;
8884}
8885
488f131b 8886int
7bda5e4a 8887signal_pass_update (int signo, int state)
d4f3574e
SS
8888{
8889 int ret = signal_program[signo];
abbb1732 8890
d4f3574e 8891 signal_program[signo] = state;
2455069d 8892 signal_cache_update (signo);
d4f3574e
SS
8893 return ret;
8894}
8895
ab04a2af
TT
8896/* Update the global 'signal_catch' from INFO and notify the
8897 target. */
8898
8899void
8900signal_catch_update (const unsigned int *info)
8901{
8902 int i;
8903
8904 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8905 signal_catch[i] = info[i] > 0;
8906 signal_cache_update (-1);
adc6a863 8907 target_pass_signals (signal_pass);
ab04a2af
TT
8908}
8909
c906108c 8910static void
96baa820 8911sig_print_header (void)
c906108c 8912{
3e43a32a
MS
8913 printf_filtered (_("Signal Stop\tPrint\tPass "
8914 "to program\tDescription\n"));
c906108c
SS
8915}
8916
8917static void
2ea28649 8918sig_print_info (enum gdb_signal oursig)
c906108c 8919{
2ea28649 8920 const char *name = gdb_signal_to_name (oursig);
c906108c 8921 int name_padding = 13 - strlen (name);
96baa820 8922
c906108c
SS
8923 if (name_padding <= 0)
8924 name_padding = 0;
8925
8926 printf_filtered ("%s", name);
488f131b 8927 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
c906108c
SS
8928 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8929 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
8930 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
2ea28649 8931 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
c906108c
SS
8932}
8933
8934/* Specify how various signals in the inferior should be handled. */
8935
8936static void
0b39b52e 8937handle_command (const char *args, int from_tty)
c906108c 8938{
c906108c 8939 int digits, wordlen;
b926417a 8940 int sigfirst, siglast;
2ea28649 8941 enum gdb_signal oursig;
c906108c 8942 int allsigs;
c906108c
SS
8943
8944 if (args == NULL)
8945 {
e2e0b3e5 8946 error_no_arg (_("signal to handle"));
c906108c
SS
8947 }
8948
1777feb0 8949 /* Allocate and zero an array of flags for which signals to handle. */
c906108c 8950
adc6a863
PA
8951 const size_t nsigs = GDB_SIGNAL_LAST;
8952 unsigned char sigs[nsigs] {};
c906108c 8953
1777feb0 8954 /* Break the command line up into args. */
c906108c 8955
773a1edc 8956 gdb_argv built_argv (args);
c906108c
SS
8957
8958 /* Walk through the args, looking for signal oursigs, signal names, and
8959 actions. Signal numbers and signal names may be interspersed with
8960 actions, with the actions being performed for all signals cumulatively
1777feb0 8961 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
c906108c 8962
773a1edc 8963 for (char *arg : built_argv)
c906108c 8964 {
773a1edc
TT
8965 wordlen = strlen (arg);
8966 for (digits = 0; isdigit (arg[digits]); digits++)
c906108c
SS
8967 {;
8968 }
8969 allsigs = 0;
8970 sigfirst = siglast = -1;
8971
773a1edc 8972 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
c906108c
SS
8973 {
8974 /* Apply action to all signals except those used by the
1777feb0 8975 debugger. Silently skip those. */
c906108c
SS
8976 allsigs = 1;
8977 sigfirst = 0;
8978 siglast = nsigs - 1;
8979 }
773a1edc 8980 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
c906108c
SS
8981 {
8982 SET_SIGS (nsigs, sigs, signal_stop);
8983 SET_SIGS (nsigs, sigs, signal_print);
8984 }
773a1edc 8985 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
c906108c
SS
8986 {
8987 UNSET_SIGS (nsigs, sigs, signal_program);
8988 }
773a1edc 8989 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
c906108c
SS
8990 {
8991 SET_SIGS (nsigs, sigs, signal_print);
8992 }
773a1edc 8993 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
c906108c
SS
8994 {
8995 SET_SIGS (nsigs, sigs, signal_program);
8996 }
773a1edc 8997 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
c906108c
SS
8998 {
8999 UNSET_SIGS (nsigs, sigs, signal_stop);
9000 }
773a1edc 9001 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
c906108c
SS
9002 {
9003 SET_SIGS (nsigs, sigs, signal_program);
9004 }
773a1edc 9005 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
c906108c
SS
9006 {
9007 UNSET_SIGS (nsigs, sigs, signal_print);
9008 UNSET_SIGS (nsigs, sigs, signal_stop);
9009 }
773a1edc 9010 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
c906108c
SS
9011 {
9012 UNSET_SIGS (nsigs, sigs, signal_program);
9013 }
9014 else if (digits > 0)
9015 {
9016 /* It is numeric. The numeric signal refers to our own
9017 internal signal numbering from target.h, not to host/target
9018 signal number. This is a feature; users really should be
9019 using symbolic names anyway, and the common ones like
9020 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
9021
9022 sigfirst = siglast = (int)
773a1edc
TT
9023 gdb_signal_from_command (atoi (arg));
9024 if (arg[digits] == '-')
c906108c
SS
9025 {
9026 siglast = (int)
773a1edc 9027 gdb_signal_from_command (atoi (arg + digits + 1));
c906108c
SS
9028 }
9029 if (sigfirst > siglast)
9030 {
1777feb0 9031 /* Bet he didn't figure we'd think of this case... */
b926417a 9032 std::swap (sigfirst, siglast);
c906108c
SS
9033 }
9034 }
9035 else
9036 {
773a1edc 9037 oursig = gdb_signal_from_name (arg);
a493e3e2 9038 if (oursig != GDB_SIGNAL_UNKNOWN)
c906108c
SS
9039 {
9040 sigfirst = siglast = (int) oursig;
9041 }
9042 else
9043 {
9044 /* Not a number and not a recognized flag word => complain. */
773a1edc 9045 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
c906108c
SS
9046 }
9047 }
9048
9049 /* If any signal numbers or symbol names were found, set flags for
dda83cd7 9050 which signals to apply actions to. */
c906108c 9051
b926417a 9052 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
c906108c 9053 {
2ea28649 9054 switch ((enum gdb_signal) signum)
c906108c 9055 {
a493e3e2
PA
9056 case GDB_SIGNAL_TRAP:
9057 case GDB_SIGNAL_INT:
c906108c
SS
9058 if (!allsigs && !sigs[signum])
9059 {
9e2f0ad4 9060 if (query (_("%s is used by the debugger.\n\
3e43a32a 9061Are you sure you want to change it? "),
2ea28649 9062 gdb_signal_to_name ((enum gdb_signal) signum)))
c906108c
SS
9063 {
9064 sigs[signum] = 1;
9065 }
9066 else
c119e040 9067 printf_unfiltered (_("Not confirmed, unchanged.\n"));
c906108c
SS
9068 }
9069 break;
a493e3e2
PA
9070 case GDB_SIGNAL_0:
9071 case GDB_SIGNAL_DEFAULT:
9072 case GDB_SIGNAL_UNKNOWN:
c906108c
SS
9073 /* Make sure that "all" doesn't print these. */
9074 break;
9075 default:
9076 sigs[signum] = 1;
9077 break;
9078 }
9079 }
c906108c
SS
9080 }
9081
b926417a 9082 for (int signum = 0; signum < nsigs; signum++)
3a031f65
PA
9083 if (sigs[signum])
9084 {
2455069d 9085 signal_cache_update (-1);
adc6a863
PA
9086 target_pass_signals (signal_pass);
9087 target_program_signals (signal_program);
c906108c 9088
3a031f65
PA
9089 if (from_tty)
9090 {
9091 /* Show the results. */
9092 sig_print_header ();
9093 for (; signum < nsigs; signum++)
9094 if (sigs[signum])
aead7601 9095 sig_print_info ((enum gdb_signal) signum);
3a031f65
PA
9096 }
9097
9098 break;
9099 }
c906108c
SS
9100}
9101
de0bea00
MF
9102/* Complete the "handle" command. */
9103
eb3ff9a5 9104static void
de0bea00 9105handle_completer (struct cmd_list_element *ignore,
eb3ff9a5 9106 completion_tracker &tracker,
6f937416 9107 const char *text, const char *word)
de0bea00 9108{
de0bea00
MF
9109 static const char * const keywords[] =
9110 {
9111 "all",
9112 "stop",
9113 "ignore",
9114 "print",
9115 "pass",
9116 "nostop",
9117 "noignore",
9118 "noprint",
9119 "nopass",
9120 NULL,
9121 };
9122
eb3ff9a5
PA
9123 signal_completer (ignore, tracker, text, word);
9124 complete_on_enum (tracker, keywords, word, word);
de0bea00
MF
9125}
9126
2ea28649
PA
9127enum gdb_signal
9128gdb_signal_from_command (int num)
ed01b82c
PA
9129{
9130 if (num >= 1 && num <= 15)
2ea28649 9131 return (enum gdb_signal) num;
ed01b82c
PA
9132 error (_("Only signals 1-15 are valid as numeric signals.\n\
9133Use \"info signals\" for a list of symbolic signals."));
9134}
9135
c906108c
SS
9136/* Print current contents of the tables set by the handle command.
9137 It is possible we should just be printing signals actually used
9138 by the current target (but for things to work right when switching
9139 targets, all signals should be in the signal tables). */
9140
9141static void
1d12d88f 9142info_signals_command (const char *signum_exp, int from_tty)
c906108c 9143{
2ea28649 9144 enum gdb_signal oursig;
abbb1732 9145
c906108c
SS
9146 sig_print_header ();
9147
9148 if (signum_exp)
9149 {
9150 /* First see if this is a symbol name. */
2ea28649 9151 oursig = gdb_signal_from_name (signum_exp);
a493e3e2 9152 if (oursig == GDB_SIGNAL_UNKNOWN)
c906108c
SS
9153 {
9154 /* No, try numeric. */
9155 oursig =
2ea28649 9156 gdb_signal_from_command (parse_and_eval_long (signum_exp));
c906108c
SS
9157 }
9158 sig_print_info (oursig);
9159 return;
9160 }
9161
9162 printf_filtered ("\n");
9163 /* These ugly casts brought to you by the native VAX compiler. */
a493e3e2
PA
9164 for (oursig = GDB_SIGNAL_FIRST;
9165 (int) oursig < (int) GDB_SIGNAL_LAST;
2ea28649 9166 oursig = (enum gdb_signal) ((int) oursig + 1))
c906108c
SS
9167 {
9168 QUIT;
9169
a493e3e2
PA
9170 if (oursig != GDB_SIGNAL_UNKNOWN
9171 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
c906108c
SS
9172 sig_print_info (oursig);
9173 }
9174
3e43a32a
MS
9175 printf_filtered (_("\nUse the \"handle\" command "
9176 "to change these tables.\n"));
c906108c 9177}
4aa995e1
PA
9178
9179/* The $_siginfo convenience variable is a bit special. We don't know
9180 for sure the type of the value until we actually have a chance to
7a9dd1b2 9181 fetch the data. The type can change depending on gdbarch, so it is
4aa995e1
PA
9182 also dependent on which thread you have selected.
9183
9184 1. making $_siginfo be an internalvar that creates a new value on
9185 access.
9186
9187 2. making the value of $_siginfo be an lval_computed value. */
9188
9189/* This function implements the lval_computed support for reading a
9190 $_siginfo value. */
9191
9192static void
9193siginfo_value_read (struct value *v)
9194{
9195 LONGEST transferred;
9196
a911d87a
PA
9197 /* If we can access registers, so can we access $_siginfo. Likewise
9198 vice versa. */
9199 validate_registers_access ();
c709acd1 9200
4aa995e1 9201 transferred =
328d42d8
SM
9202 target_read (current_inferior ()->top_target (),
9203 TARGET_OBJECT_SIGNAL_INFO,
4aa995e1
PA
9204 NULL,
9205 value_contents_all_raw (v),
9206 value_offset (v),
9207 TYPE_LENGTH (value_type (v)));
9208
9209 if (transferred != TYPE_LENGTH (value_type (v)))
9210 error (_("Unable to read siginfo"));
9211}
9212
9213/* This function implements the lval_computed support for writing a
9214 $_siginfo value. */
9215
9216static void
9217siginfo_value_write (struct value *v, struct value *fromval)
9218{
9219 LONGEST transferred;
9220
a911d87a
PA
9221 /* If we can access registers, so can we access $_siginfo. Likewise
9222 vice versa. */
9223 validate_registers_access ();
c709acd1 9224
328d42d8 9225 transferred = target_write (current_inferior ()->top_target (),
4aa995e1
PA
9226 TARGET_OBJECT_SIGNAL_INFO,
9227 NULL,
9228 value_contents_all_raw (fromval),
9229 value_offset (v),
9230 TYPE_LENGTH (value_type (fromval)));
9231
9232 if (transferred != TYPE_LENGTH (value_type (fromval)))
9233 error (_("Unable to write siginfo"));
9234}
9235
c8f2448a 9236static const struct lval_funcs siginfo_value_funcs =
4aa995e1
PA
9237 {
9238 siginfo_value_read,
9239 siginfo_value_write
9240 };
9241
9242/* Return a new value with the correct type for the siginfo object of
78267919
UW
9243 the current thread using architecture GDBARCH. Return a void value
9244 if there's no object available. */
4aa995e1 9245
2c0b251b 9246static struct value *
22d2b532
SDJ
9247siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
9248 void *ignore)
4aa995e1 9249{
841de120 9250 if (target_has_stack ()
d7e15655 9251 && inferior_ptid != null_ptid
78267919 9252 && gdbarch_get_siginfo_type_p (gdbarch))
4aa995e1 9253 {
78267919 9254 struct type *type = gdbarch_get_siginfo_type (gdbarch);
abbb1732 9255
78267919 9256 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
4aa995e1
PA
9257 }
9258
78267919 9259 return allocate_value (builtin_type (gdbarch)->builtin_void);
4aa995e1
PA
9260}
9261
c906108c 9262\f
16c381f0
JK
9263/* infcall_suspend_state contains state about the program itself like its
9264 registers and any signal it received when it last stopped.
9265 This state must be restored regardless of how the inferior function call
9266 ends (either successfully, or after it hits a breakpoint or signal)
9267 if the program is to properly continue where it left off. */
9268
6bf78e29 9269class infcall_suspend_state
7a292a7a 9270{
6bf78e29
AB
9271public:
9272 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
9273 once the inferior function call has finished. */
9274 infcall_suspend_state (struct gdbarch *gdbarch,
dda83cd7
SM
9275 const struct thread_info *tp,
9276 struct regcache *regcache)
6bf78e29
AB
9277 : m_thread_suspend (tp->suspend),
9278 m_registers (new readonly_detached_regcache (*regcache))
9279 {
9280 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
9281
9282 if (gdbarch_get_siginfo_type_p (gdbarch))
9283 {
dda83cd7
SM
9284 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9285 size_t len = TYPE_LENGTH (type);
6bf78e29 9286
dda83cd7 9287 siginfo_data.reset ((gdb_byte *) xmalloc (len));
6bf78e29 9288
328d42d8
SM
9289 if (target_read (current_inferior ()->top_target (),
9290 TARGET_OBJECT_SIGNAL_INFO, NULL,
dda83cd7
SM
9291 siginfo_data.get (), 0, len) != len)
9292 {
9293 /* Errors ignored. */
9294 siginfo_data.reset (nullptr);
9295 }
6bf78e29
AB
9296 }
9297
9298 if (siginfo_data)
9299 {
dda83cd7
SM
9300 m_siginfo_gdbarch = gdbarch;
9301 m_siginfo_data = std::move (siginfo_data);
6bf78e29
AB
9302 }
9303 }
9304
9305 /* Return a pointer to the stored register state. */
16c381f0 9306
6bf78e29
AB
9307 readonly_detached_regcache *registers () const
9308 {
9309 return m_registers.get ();
9310 }
9311
9312 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
9313
9314 void restore (struct gdbarch *gdbarch,
dda83cd7
SM
9315 struct thread_info *tp,
9316 struct regcache *regcache) const
6bf78e29
AB
9317 {
9318 tp->suspend = m_thread_suspend;
9319
9320 if (m_siginfo_gdbarch == gdbarch)
9321 {
dda83cd7 9322 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6bf78e29 9323
dda83cd7 9324 /* Errors ignored. */
328d42d8
SM
9325 target_write (current_inferior ()->top_target (),
9326 TARGET_OBJECT_SIGNAL_INFO, NULL,
dda83cd7 9327 m_siginfo_data.get (), 0, TYPE_LENGTH (type));
6bf78e29
AB
9328 }
9329
9330 /* The inferior can be gone if the user types "print exit(0)"
9331 (and perhaps other times). */
55f6301a 9332 if (target_has_execution ())
6bf78e29
AB
9333 /* NB: The register write goes through to the target. */
9334 regcache->restore (registers ());
9335 }
9336
9337private:
9338 /* How the current thread stopped before the inferior function call was
9339 executed. */
9340 struct thread_suspend_state m_thread_suspend;
9341
9342 /* The registers before the inferior function call was executed. */
9343 std::unique_ptr<readonly_detached_regcache> m_registers;
1736ad11 9344
35515841 9345 /* Format of SIGINFO_DATA or NULL if it is not present. */
6bf78e29 9346 struct gdbarch *m_siginfo_gdbarch = nullptr;
1736ad11
JK
9347
9348 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
9349 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
9350 content would be invalid. */
6bf78e29 9351 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
b89667eb
DE
9352};
9353
cb524840
TT
9354infcall_suspend_state_up
9355save_infcall_suspend_state ()
b89667eb 9356{
b89667eb 9357 struct thread_info *tp = inferior_thread ();
1736ad11 9358 struct regcache *regcache = get_current_regcache ();
ac7936df 9359 struct gdbarch *gdbarch = regcache->arch ();
1736ad11 9360
6bf78e29
AB
9361 infcall_suspend_state_up inf_state
9362 (new struct infcall_suspend_state (gdbarch, tp, regcache));
1736ad11 9363
6bf78e29
AB
9364 /* Having saved the current state, adjust the thread state, discarding
9365 any stop signal information. The stop signal is not useful when
9366 starting an inferior function call, and run_inferior_call will not use
9367 the signal due to its `proceed' call with GDB_SIGNAL_0. */
a493e3e2 9368 tp->suspend.stop_signal = GDB_SIGNAL_0;
35515841 9369
b89667eb
DE
9370 return inf_state;
9371}
9372
9373/* Restore inferior session state to INF_STATE. */
9374
9375void
16c381f0 9376restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb
DE
9377{
9378 struct thread_info *tp = inferior_thread ();
1736ad11 9379 struct regcache *regcache = get_current_regcache ();
ac7936df 9380 struct gdbarch *gdbarch = regcache->arch ();
b89667eb 9381
6bf78e29 9382 inf_state->restore (gdbarch, tp, regcache);
16c381f0 9383 discard_infcall_suspend_state (inf_state);
b89667eb
DE
9384}
9385
b89667eb 9386void
16c381f0 9387discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb 9388{
dd848631 9389 delete inf_state;
b89667eb
DE
9390}
9391
daf6667d 9392readonly_detached_regcache *
16c381f0 9393get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
b89667eb 9394{
6bf78e29 9395 return inf_state->registers ();
b89667eb
DE
9396}
9397
16c381f0
JK
9398/* infcall_control_state contains state regarding gdb's control of the
9399 inferior itself like stepping control. It also contains session state like
9400 the user's currently selected frame. */
b89667eb 9401
16c381f0 9402struct infcall_control_state
b89667eb 9403{
16c381f0
JK
9404 struct thread_control_state thread_control;
9405 struct inferior_control_state inferior_control;
d82142e2
JK
9406
9407 /* Other fields: */
ee841dd8
TT
9408 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
9409 int stopped_by_random_signal = 0;
7a292a7a 9410
79952e69
PA
9411 /* ID and level of the selected frame when the inferior function
9412 call was made. */
ee841dd8 9413 struct frame_id selected_frame_id {};
79952e69 9414 int selected_frame_level = -1;
7a292a7a
SS
9415};
9416
c906108c 9417/* Save all of the information associated with the inferior<==>gdb
b89667eb 9418 connection. */
c906108c 9419
cb524840
TT
9420infcall_control_state_up
9421save_infcall_control_state ()
c906108c 9422{
cb524840 9423 infcall_control_state_up inf_status (new struct infcall_control_state);
4e1c45ea 9424 struct thread_info *tp = inferior_thread ();
d6b48e9c 9425 struct inferior *inf = current_inferior ();
7a292a7a 9426
16c381f0
JK
9427 inf_status->thread_control = tp->control;
9428 inf_status->inferior_control = inf->control;
d82142e2 9429
8358c15c 9430 tp->control.step_resume_breakpoint = NULL;
5b79abe7 9431 tp->control.exception_resume_breakpoint = NULL;
8358c15c 9432
16c381f0
JK
9433 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9434 chain. If caller's caller is walking the chain, they'll be happier if we
9435 hand them back the original chain when restore_infcall_control_state is
9436 called. */
9437 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
d82142e2
JK
9438
9439 /* Other fields: */
9440 inf_status->stop_stack_dummy = stop_stack_dummy;
9441 inf_status->stopped_by_random_signal = stopped_by_random_signal;
c5aa993b 9442
79952e69
PA
9443 save_selected_frame (&inf_status->selected_frame_id,
9444 &inf_status->selected_frame_level);
b89667eb 9445
7a292a7a 9446 return inf_status;
c906108c
SS
9447}
9448
b89667eb
DE
9449/* Restore inferior session state to INF_STATUS. */
9450
c906108c 9451void
16c381f0 9452restore_infcall_control_state (struct infcall_control_state *inf_status)
c906108c 9453{
4e1c45ea 9454 struct thread_info *tp = inferior_thread ();
d6b48e9c 9455 struct inferior *inf = current_inferior ();
4e1c45ea 9456
8358c15c
JK
9457 if (tp->control.step_resume_breakpoint)
9458 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
9459
5b79abe7
TT
9460 if (tp->control.exception_resume_breakpoint)
9461 tp->control.exception_resume_breakpoint->disposition
9462 = disp_del_at_next_stop;
9463
d82142e2 9464 /* Handle the bpstat_copy of the chain. */
16c381f0 9465 bpstat_clear (&tp->control.stop_bpstat);
d82142e2 9466
16c381f0
JK
9467 tp->control = inf_status->thread_control;
9468 inf->control = inf_status->inferior_control;
d82142e2
JK
9469
9470 /* Other fields: */
9471 stop_stack_dummy = inf_status->stop_stack_dummy;
9472 stopped_by_random_signal = inf_status->stopped_by_random_signal;
c906108c 9473
841de120 9474 if (target_has_stack ())
c906108c 9475 {
79952e69
PA
9476 restore_selected_frame (inf_status->selected_frame_id,
9477 inf_status->selected_frame_level);
c906108c 9478 }
c906108c 9479
ee841dd8 9480 delete inf_status;
7a292a7a 9481}
c906108c
SS
9482
9483void
16c381f0 9484discard_infcall_control_state (struct infcall_control_state *inf_status)
7a292a7a 9485{
8358c15c
JK
9486 if (inf_status->thread_control.step_resume_breakpoint)
9487 inf_status->thread_control.step_resume_breakpoint->disposition
9488 = disp_del_at_next_stop;
9489
5b79abe7
TT
9490 if (inf_status->thread_control.exception_resume_breakpoint)
9491 inf_status->thread_control.exception_resume_breakpoint->disposition
9492 = disp_del_at_next_stop;
9493
1777feb0 9494 /* See save_infcall_control_state for info on stop_bpstat. */
16c381f0 9495 bpstat_clear (&inf_status->thread_control.stop_bpstat);
8358c15c 9496
ee841dd8 9497 delete inf_status;
7a292a7a 9498}
b89667eb 9499\f
7f89fd65 9500/* See infrun.h. */
0c557179
SDJ
9501
9502void
9503clear_exit_convenience_vars (void)
9504{
9505 clear_internalvar (lookup_internalvar ("_exitsignal"));
9506 clear_internalvar (lookup_internalvar ("_exitcode"));
9507}
c5aa993b 9508\f
488f131b 9509
b2175913
MS
9510/* User interface for reverse debugging:
9511 Set exec-direction / show exec-direction commands
9512 (returns error unless target implements to_set_exec_direction method). */
9513
170742de 9514enum exec_direction_kind execution_direction = EXEC_FORWARD;
b2175913
MS
9515static const char exec_forward[] = "forward";
9516static const char exec_reverse[] = "reverse";
9517static const char *exec_direction = exec_forward;
40478521 9518static const char *const exec_direction_names[] = {
b2175913
MS
9519 exec_forward,
9520 exec_reverse,
9521 NULL
9522};
9523
9524static void
eb4c3f4a 9525set_exec_direction_func (const char *args, int from_tty,
b2175913
MS
9526 struct cmd_list_element *cmd)
9527{
05374cfd 9528 if (target_can_execute_reverse ())
b2175913
MS
9529 {
9530 if (!strcmp (exec_direction, exec_forward))
9531 execution_direction = EXEC_FORWARD;
9532 else if (!strcmp (exec_direction, exec_reverse))
9533 execution_direction = EXEC_REVERSE;
9534 }
8bbed405
MS
9535 else
9536 {
9537 exec_direction = exec_forward;
9538 error (_("Target does not support this operation."));
9539 }
b2175913
MS
9540}
9541
9542static void
9543show_exec_direction_func (struct ui_file *out, int from_tty,
9544 struct cmd_list_element *cmd, const char *value)
9545{
9546 switch (execution_direction) {
9547 case EXEC_FORWARD:
9548 fprintf_filtered (out, _("Forward.\n"));
9549 break;
9550 case EXEC_REVERSE:
9551 fprintf_filtered (out, _("Reverse.\n"));
9552 break;
b2175913 9553 default:
d8b34453
PA
9554 internal_error (__FILE__, __LINE__,
9555 _("bogus execution_direction value: %d"),
9556 (int) execution_direction);
b2175913
MS
9557 }
9558}
9559
d4db2f36
PA
9560static void
9561show_schedule_multiple (struct ui_file *file, int from_tty,
9562 struct cmd_list_element *c, const char *value)
9563{
3e43a32a
MS
9564 fprintf_filtered (file, _("Resuming the execution of threads "
9565 "of all processes is %s.\n"), value);
d4db2f36 9566}
ad52ddc6 9567
22d2b532
SDJ
9568/* Implementation of `siginfo' variable. */
9569
9570static const struct internalvar_funcs siginfo_funcs =
9571{
9572 siginfo_make_value,
9573 NULL,
9574 NULL
9575};
9576
372316f1
PA
9577/* Callback for infrun's target events source. This is marked when a
9578 thread has a pending status to process. */
9579
9580static void
9581infrun_async_inferior_event_handler (gdb_client_data data)
9582{
6b36ddeb 9583 clear_async_event_handler (infrun_async_inferior_event_token);
b1a35af2 9584 inferior_event_handler (INF_REG_EVENT);
372316f1
PA
9585}
9586
8087c3fa 9587#if GDB_SELF_TEST
b161a60d
SM
9588namespace selftests
9589{
9590
9591/* Verify that when two threads with the same ptid exist (from two different
9592 targets) and one of them changes ptid, we only update inferior_ptid if
9593 it is appropriate. */
9594
9595static void
9596infrun_thread_ptid_changed ()
9597{
9598 gdbarch *arch = current_inferior ()->gdbarch;
9599
9600 /* The thread which inferior_ptid represents changes ptid. */
9601 {
9602 scoped_restore_current_pspace_and_thread restore;
9603
9604 scoped_mock_context<test_target_ops> target1 (arch);
9605 scoped_mock_context<test_target_ops> target2 (arch);
9606 target2.mock_inferior.next = &target1.mock_inferior;
9607
9608 ptid_t old_ptid (111, 222);
9609 ptid_t new_ptid (111, 333);
9610
9611 target1.mock_inferior.pid = old_ptid.pid ();
9612 target1.mock_thread.ptid = old_ptid;
9613 target2.mock_inferior.pid = old_ptid.pid ();
9614 target2.mock_thread.ptid = old_ptid;
9615
9616 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9617 set_current_inferior (&target1.mock_inferior);
9618
9619 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9620
9621 gdb_assert (inferior_ptid == new_ptid);
9622 }
9623
9624 /* A thread with the same ptid as inferior_ptid, but from another target,
9625 changes ptid. */
9626 {
9627 scoped_restore_current_pspace_and_thread restore;
9628
9629 scoped_mock_context<test_target_ops> target1 (arch);
9630 scoped_mock_context<test_target_ops> target2 (arch);
9631 target2.mock_inferior.next = &target1.mock_inferior;
9632
9633 ptid_t old_ptid (111, 222);
9634 ptid_t new_ptid (111, 333);
9635
9636 target1.mock_inferior.pid = old_ptid.pid ();
9637 target1.mock_thread.ptid = old_ptid;
9638 target2.mock_inferior.pid = old_ptid.pid ();
9639 target2.mock_thread.ptid = old_ptid;
9640
9641 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9642 set_current_inferior (&target2.mock_inferior);
9643
9644 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9645
9646 gdb_assert (inferior_ptid == old_ptid);
9647 }
9648}
9649
9650} /* namespace selftests */
9651
8087c3fa
JB
9652#endif /* GDB_SELF_TEST */
9653
6c265988 9654void _initialize_infrun ();
c906108c 9655void
6c265988 9656_initialize_infrun ()
c906108c 9657{
de0bea00 9658 struct cmd_list_element *c;
c906108c 9659
372316f1
PA
9660 /* Register extra event sources in the event loop. */
9661 infrun_async_inferior_event_token
db20ebdf
SM
9662 = create_async_event_handler (infrun_async_inferior_event_handler, NULL,
9663 "infrun");
372316f1 9664
e0f25bd9
SM
9665 cmd_list_element *info_signals_cmd
9666 = add_info ("signals", info_signals_command, _("\
1bedd215
AC
9667What debugger does when program gets various signals.\n\
9668Specify a signal as argument to print info on that signal only."));
e0f25bd9 9669 add_info_alias ("handle", info_signals_cmd, 0);
c906108c 9670
de0bea00 9671 c = add_com ("handle", class_run, handle_command, _("\
dfbd5e7b 9672Specify how to handle signals.\n\
486c7739 9673Usage: handle SIGNAL [ACTIONS]\n\
c906108c 9674Args are signals and actions to apply to those signals.\n\
dfbd5e7b 9675If no actions are specified, the current settings for the specified signals\n\
486c7739
MF
9676will be displayed instead.\n\
9677\n\
c906108c
SS
9678Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9679from 1-15 are allowed for compatibility with old versions of GDB.\n\
9680Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9681The special arg \"all\" is recognized to mean all signals except those\n\
1bedd215 9682used by the debugger, typically SIGTRAP and SIGINT.\n\
486c7739 9683\n\
1bedd215 9684Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
c906108c
SS
9685\"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9686Stop means reenter debugger if this signal happens (implies print).\n\
9687Print means print a message if this signal happens.\n\
9688Pass means let program see this signal; otherwise program doesn't know.\n\
9689Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
dfbd5e7b
PA
9690Pass and Stop may be combined.\n\
9691\n\
9692Multiple signals may be specified. Signal numbers and signal names\n\
9693may be interspersed with actions, with the actions being performed for\n\
9694all signals cumulatively specified."));
de0bea00 9695 set_cmd_completer (c, handle_completer);
486c7739 9696
c906108c 9697 if (!dbx_commands)
1a966eab
AC
9698 stop_command = add_cmd ("stop", class_obscure,
9699 not_just_help_class_command, _("\
9700There is no `stop' command, but you can set a hook on `stop'.\n\
c906108c 9701This allows you to set a list of commands to be run each time execution\n\
1a966eab 9702of the program stops."), &cmdlist);
c906108c 9703
94ba44a6
SM
9704 add_setshow_boolean_cmd
9705 ("infrun", class_maintenance, &debug_infrun,
9706 _("Set inferior debugging."),
9707 _("Show inferior debugging."),
9708 _("When non-zero, inferior specific debugging is enabled."),
9709 NULL, show_debug_infrun, &setdebuglist, &showdebuglist);
527159b7 9710
ad52ddc6
PA
9711 add_setshow_boolean_cmd ("non-stop", no_class,
9712 &non_stop_1, _("\
9713Set whether gdb controls the inferior in non-stop mode."), _("\
9714Show whether gdb controls the inferior in non-stop mode."), _("\
9715When debugging a multi-threaded program and this setting is\n\
9716off (the default, also called all-stop mode), when one thread stops\n\
9717(for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9718all other threads in the program while you interact with the thread of\n\
9719interest. When you continue or step a thread, you can allow the other\n\
9720threads to run, or have them remain stopped, but while you inspect any\n\
9721thread's state, all threads stop.\n\
9722\n\
9723In non-stop mode, when one thread stops, other threads can continue\n\
9724to run freely. You'll be able to step each thread independently,\n\
9725leave it stopped or free to run as needed."),
9726 set_non_stop,
9727 show_non_stop,
9728 &setlist,
9729 &showlist);
9730
adc6a863 9731 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
c906108c
SS
9732 {
9733 signal_stop[i] = 1;
9734 signal_print[i] = 1;
9735 signal_program[i] = 1;
ab04a2af 9736 signal_catch[i] = 0;
c906108c
SS
9737 }
9738
4d9d9d04
PA
9739 /* Signals caused by debugger's own actions should not be given to
9740 the program afterwards.
9741
9742 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9743 explicitly specifies that it should be delivered to the target
9744 program. Typically, that would occur when a user is debugging a
9745 target monitor on a simulator: the target monitor sets a
9746 breakpoint; the simulator encounters this breakpoint and halts
9747 the simulation handing control to GDB; GDB, noting that the stop
9748 address doesn't map to any known breakpoint, returns control back
9749 to the simulator; the simulator then delivers the hardware
9750 equivalent of a GDB_SIGNAL_TRAP to the program being
9751 debugged. */
a493e3e2
PA
9752 signal_program[GDB_SIGNAL_TRAP] = 0;
9753 signal_program[GDB_SIGNAL_INT] = 0;
c906108c
SS
9754
9755 /* Signals that are not errors should not normally enter the debugger. */
a493e3e2
PA
9756 signal_stop[GDB_SIGNAL_ALRM] = 0;
9757 signal_print[GDB_SIGNAL_ALRM] = 0;
9758 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9759 signal_print[GDB_SIGNAL_VTALRM] = 0;
9760 signal_stop[GDB_SIGNAL_PROF] = 0;
9761 signal_print[GDB_SIGNAL_PROF] = 0;
9762 signal_stop[GDB_SIGNAL_CHLD] = 0;
9763 signal_print[GDB_SIGNAL_CHLD] = 0;
9764 signal_stop[GDB_SIGNAL_IO] = 0;
9765 signal_print[GDB_SIGNAL_IO] = 0;
9766 signal_stop[GDB_SIGNAL_POLL] = 0;
9767 signal_print[GDB_SIGNAL_POLL] = 0;
9768 signal_stop[GDB_SIGNAL_URG] = 0;
9769 signal_print[GDB_SIGNAL_URG] = 0;
9770 signal_stop[GDB_SIGNAL_WINCH] = 0;
9771 signal_print[GDB_SIGNAL_WINCH] = 0;
9772 signal_stop[GDB_SIGNAL_PRIO] = 0;
9773 signal_print[GDB_SIGNAL_PRIO] = 0;
c906108c 9774
cd0fc7c3
SS
9775 /* These signals are used internally by user-level thread
9776 implementations. (See signal(5) on Solaris.) Like the above
9777 signals, a healthy program receives and handles them as part of
9778 its normal operation. */
a493e3e2
PA
9779 signal_stop[GDB_SIGNAL_LWP] = 0;
9780 signal_print[GDB_SIGNAL_LWP] = 0;
9781 signal_stop[GDB_SIGNAL_WAITING] = 0;
9782 signal_print[GDB_SIGNAL_WAITING] = 0;
9783 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9784 signal_print[GDB_SIGNAL_CANCEL] = 0;
bc7b765a
JB
9785 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9786 signal_print[GDB_SIGNAL_LIBRT] = 0;
cd0fc7c3 9787
2455069d
UW
9788 /* Update cached state. */
9789 signal_cache_update (-1);
9790
85c07804
AC
9791 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9792 &stop_on_solib_events, _("\
9793Set stopping for shared library events."), _("\
9794Show stopping for shared library events."), _("\
c906108c
SS
9795If nonzero, gdb will give control to the user when the dynamic linker\n\
9796notifies gdb of shared library events. The most common event of interest\n\
85c07804 9797to the user would be loading/unloading of a new library."),
f9e14852 9798 set_stop_on_solib_events,
920d2a44 9799 show_stop_on_solib_events,
85c07804 9800 &setlist, &showlist);
c906108c 9801
7ab04401
AC
9802 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9803 follow_fork_mode_kind_names,
9804 &follow_fork_mode_string, _("\
9805Set debugger response to a program call of fork or vfork."), _("\
9806Show debugger response to a program call of fork or vfork."), _("\
c906108c
SS
9807A fork or vfork creates a new process. follow-fork-mode can be:\n\
9808 parent - the original process is debugged after a fork\n\
9809 child - the new process is debugged after a fork\n\
ea1dd7bc 9810The unfollowed process will continue to run.\n\
7ab04401
AC
9811By default, the debugger will follow the parent process."),
9812 NULL,
920d2a44 9813 show_follow_fork_mode_string,
7ab04401
AC
9814 &setlist, &showlist);
9815
6c95b8df
PA
9816 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9817 follow_exec_mode_names,
9818 &follow_exec_mode_string, _("\
9819Set debugger response to a program call of exec."), _("\
9820Show debugger response to a program call of exec."), _("\
9821An exec call replaces the program image of a process.\n\
9822\n\
9823follow-exec-mode can be:\n\
9824\n\
cce7e648 9825 new - the debugger creates a new inferior and rebinds the process\n\
6c95b8df
PA
9826to this new inferior. The program the process was running before\n\
9827the exec call can be restarted afterwards by restarting the original\n\
9828inferior.\n\
9829\n\
9830 same - the debugger keeps the process bound to the same inferior.\n\
9831The new executable image replaces the previous executable loaded in\n\
9832the inferior. Restarting the inferior after the exec call restarts\n\
9833the executable the process was running after the exec call.\n\
9834\n\
9835By default, the debugger will use the same inferior."),
9836 NULL,
9837 show_follow_exec_mode_string,
9838 &setlist, &showlist);
9839
7ab04401
AC
9840 add_setshow_enum_cmd ("scheduler-locking", class_run,
9841 scheduler_enums, &scheduler_mode, _("\
9842Set mode for locking scheduler during execution."), _("\
9843Show mode for locking scheduler during execution."), _("\
f2665db5
MM
9844off == no locking (threads may preempt at any time)\n\
9845on == full locking (no thread except the current thread may run)\n\
dda83cd7 9846 This applies to both normal execution and replay mode.\n\
f2665db5 9847step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
dda83cd7
SM
9848 In this mode, other threads may run during other commands.\n\
9849 This applies to both normal execution and replay mode.\n\
f2665db5 9850replay == scheduler locked in replay mode and unlocked during normal execution."),
7ab04401 9851 set_schedlock_func, /* traps on target vector */
920d2a44 9852 show_scheduler_mode,
7ab04401 9853 &setlist, &showlist);
5fbbeb29 9854
d4db2f36
PA
9855 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9856Set mode for resuming threads of all processes."), _("\
9857Show mode for resuming threads of all processes."), _("\
9858When on, execution commands (such as 'continue' or 'next') resume all\n\
9859threads of all processes. When off (which is the default), execution\n\
9860commands only resume the threads of the current process. The set of\n\
9861threads that are resumed is further refined by the scheduler-locking\n\
9862mode (see help set scheduler-locking)."),
9863 NULL,
9864 show_schedule_multiple,
9865 &setlist, &showlist);
9866
5bf193a2
AC
9867 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9868Set mode of the step operation."), _("\
9869Show mode of the step operation."), _("\
9870When set, doing a step over a function without debug line information\n\
9871will stop at the first instruction of that function. Otherwise, the\n\
9872function is skipped and the step command stops at a different source line."),
9873 NULL,
920d2a44 9874 show_step_stop_if_no_debug,
5bf193a2 9875 &setlist, &showlist);
ca6724c1 9876
72d0e2c5
YQ
9877 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9878 &can_use_displaced_stepping, _("\
237fc4c9
PA
9879Set debugger's willingness to use displaced stepping."), _("\
9880Show debugger's willingness to use displaced stepping."), _("\
fff08868
HZ
9881If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9882supported by the target architecture. If off, gdb will not use displaced\n\
9883stepping to step over breakpoints, even if such is supported by the target\n\
9884architecture. If auto (which is the default), gdb will use displaced stepping\n\
9885if the target architecture supports it and non-stop mode is active, but will not\n\
9886use it in all-stop mode (see help set non-stop)."),
72d0e2c5
YQ
9887 NULL,
9888 show_can_use_displaced_stepping,
9889 &setlist, &showlist);
237fc4c9 9890
b2175913
MS
9891 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9892 &exec_direction, _("Set direction of execution.\n\
9893Options are 'forward' or 'reverse'."),
9894 _("Show direction of execution (forward/reverse)."),
9895 _("Tells gdb whether to execute forward or backward."),
9896 set_exec_direction_func, show_exec_direction_func,
9897 &setlist, &showlist);
9898
6c95b8df
PA
9899 /* Set/show detach-on-fork: user-settable mode. */
9900
9901 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9902Set whether gdb will detach the child of a fork."), _("\
9903Show whether gdb will detach the child of a fork."), _("\
9904Tells gdb whether to detach the child of a fork."),
9905 NULL, NULL, &setlist, &showlist);
9906
03583c20
UW
9907 /* Set/show disable address space randomization mode. */
9908
9909 add_setshow_boolean_cmd ("disable-randomization", class_support,
9910 &disable_randomization, _("\
9911Set disabling of debuggee's virtual address space randomization."), _("\
9912Show disabling of debuggee's virtual address space randomization."), _("\
9913When this mode is on (which is the default), randomization of the virtual\n\
9914address space is disabled. Standalone programs run with the randomization\n\
9915enabled by default on some platforms."),
9916 &set_disable_randomization,
9917 &show_disable_randomization,
9918 &setlist, &showlist);
9919
ca6724c1 9920 /* ptid initializations */
ca6724c1
KB
9921 inferior_ptid = null_ptid;
9922 target_last_wait_ptid = minus_one_ptid;
5231c1fd 9923
c90e7d63
SM
9924 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed,
9925 "infrun");
9926 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested,
9927 "infrun");
9928 gdb::observers::thread_exit.attach (infrun_thread_thread_exit, "infrun");
9929 gdb::observers::inferior_exit.attach (infrun_inferior_exit, "infrun");
9930 gdb::observers::inferior_execd.attach (infrun_inferior_execd, "infrun");
4aa995e1
PA
9931
9932 /* Explicitly create without lookup, since that tries to create a
9933 value with a void typed value, and when we get here, gdbarch
9934 isn't initialized yet. At this point, we're quite sure there
9935 isn't another convenience variable of the same name. */
22d2b532 9936 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
d914c394
SS
9937
9938 add_setshow_boolean_cmd ("observer", no_class,
9939 &observer_mode_1, _("\
9940Set whether gdb controls the inferior in observer mode."), _("\
9941Show whether gdb controls the inferior in observer mode."), _("\
9942In observer mode, GDB can get data from the inferior, but not\n\
9943affect its execution. Registers and memory may not be changed,\n\
9944breakpoints may not be set, and the program cannot be interrupted\n\
9945or signalled."),
9946 set_observer_mode,
9947 show_observer_mode,
9948 &setlist,
9949 &showlist);
b161a60d
SM
9950
9951#if GDB_SELF_TEST
9952 selftests::register_test ("infrun_thread_ptid_changed",
9953 selftests::infrun_thread_ptid_changed);
9954#endif
c906108c 9955}
This page took 3.880816 seconds and 4 git commands to generate.