gdb: resume ongoing step after handling fork or vfork
[deliverable/binutils-gdb.git] / gdb / infrun.c
CommitLineData
ca557f44
AC
1/* Target-struct-independent code to start (run) and stop an inferior
2 process.
8926118c 3
88b9d363 4 Copyright (C) 1986-2022 Free Software Foundation, Inc.
c906108c 5
c5aa993b 6 This file is part of GDB.
c906108c 7
c5aa993b
JM
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
c5aa993b 11 (at your option) any later version.
c906108c 12
c5aa993b
JM
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
c906108c 17
c5aa993b 18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
c906108c
SS
20
21#include "defs.h"
bab37966 22#include "displaced-stepping.h"
45741a9c 23#include "infrun.h"
c906108c
SS
24#include <ctype.h>
25#include "symtab.h"
26#include "frame.h"
27#include "inferior.h"
28#include "breakpoint.h"
c906108c
SS
29#include "gdbcore.h"
30#include "gdbcmd.h"
31#include "target.h"
2f4fcf00 32#include "target-connection.h"
c906108c
SS
33#include "gdbthread.h"
34#include "annotate.h"
1adeb98a 35#include "symfile.h"
7a292a7a 36#include "top.h"
2acceee2 37#include "inf-loop.h"
4e052eda 38#include "regcache.h"
fd0407d6 39#include "value.h"
76727919 40#include "observable.h"
f636b87d 41#include "language.h"
a77053c2 42#include "solib.h"
f17517ea 43#include "main.h"
186c406b 44#include "block.h"
034dad6f 45#include "mi/mi-common.h"
4f8d22e3 46#include "event-top.h"
96429cc8 47#include "record.h"
d02ed0bb 48#include "record-full.h"
edb3359d 49#include "inline-frame.h"
4efc6507 50#include "jit.h"
06cd862c 51#include "tracepoint.h"
1bfeeb0f 52#include "skip.h"
28106bc2
SDJ
53#include "probe.h"
54#include "objfiles.h"
de0bea00 55#include "completer.h"
9107fc8d 56#include "target-descriptions.h"
f15cb84a 57#include "target-dcache.h"
d83ad864 58#include "terminal.h"
ff862be4 59#include "solist.h"
400b5eca 60#include "gdbsupport/event-loop.h"
243a9253 61#include "thread-fsm.h"
268a13a5 62#include "gdbsupport/enum-flags.h"
5ed8105e 63#include "progspace-and-thread.h"
268a13a5 64#include "gdbsupport/gdb_optional.h"
46a62268 65#include "arch-utils.h"
268a13a5
TT
66#include "gdbsupport/scope-exit.h"
67#include "gdbsupport/forward-scope-exit.h"
06cc9596 68#include "gdbsupport/gdb_select.h"
5b6d1e4f 69#include <unordered_map>
93b54c8e 70#include "async-event.h"
b161a60d
SM
71#include "gdbsupport/selftest.h"
72#include "scoped-mock-context.h"
73#include "test-target.h"
ba988419 74#include "gdbsupport/common-debug.h"
c906108c
SS
75
76/* Prototypes for local functions */
77
2ea28649 78static void sig_print_info (enum gdb_signal);
c906108c 79
96baa820 80static void sig_print_header (void);
c906108c 81
d83ad864
DB
82static void follow_inferior_reset_breakpoints (void);
83
c4464ade 84static bool currently_stepping (struct thread_info *tp);
a289b8f6 85
2c03e5be 86static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
2484c66b
UW
87
88static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
89
2484c66b
UW
90static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
91
c4464ade 92static bool maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
8550d3b3 93
aff4e175
AB
94static void resume (gdb_signal sig);
95
5b6d1e4f
PA
96static void wait_for_inferior (inferior *inf);
97
81d92403
SM
98static void restart_threads (struct thread_info *event_thread,
99 inferior *inf = nullptr);
100
101static bool start_step_over (void);
102
372316f1
PA
103/* Asynchronous signal handler registered as event loop source for
104 when we have pending events ready to be passed to the core. */
105static struct async_event_handler *infrun_async_inferior_event_token;
106
107/* Stores whether infrun_async was previously enabled or disabled.
108 Starts off as -1, indicating "never enabled/disabled". */
109static int infrun_is_async = -1;
110
111/* See infrun.h. */
112
113void
114infrun_async (int enable)
115{
116 if (infrun_is_async != enable)
117 {
118 infrun_is_async = enable;
119
1eb8556f 120 infrun_debug_printf ("enable=%d", enable);
372316f1
PA
121
122 if (enable)
123 mark_async_event_handler (infrun_async_inferior_event_token);
124 else
125 clear_async_event_handler (infrun_async_inferior_event_token);
126 }
127}
128
0b333c5e
PA
129/* See infrun.h. */
130
131void
132mark_infrun_async_event_handler (void)
133{
134 mark_async_event_handler (infrun_async_inferior_event_token);
135}
136
5fbbeb29
CF
137/* When set, stop the 'step' command if we enter a function which has
138 no line number information. The normal behavior is that we step
139 over such function. */
491144b5 140bool step_stop_if_no_debug = false;
920d2a44
AC
141static void
142show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
143 struct cmd_list_element *c, const char *value)
144{
145 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
146}
5fbbeb29 147
b9f437de
PA
148/* proceed and normal_stop use this to notify the user when the
149 inferior stopped in a different thread than it had been running
150 in. */
96baa820 151
39f77062 152static ptid_t previous_inferior_ptid;
7a292a7a 153
07107ca6
LM
154/* If set (default for legacy reasons), when following a fork, GDB
155 will detach from one of the fork branches, child or parent.
156 Exactly which branch is detached depends on 'set follow-fork-mode'
157 setting. */
158
491144b5 159static bool detach_fork = true;
6c95b8df 160
94ba44a6 161bool debug_infrun = false;
920d2a44
AC
162static void
163show_debug_infrun (struct ui_file *file, int from_tty,
164 struct cmd_list_element *c, const char *value)
165{
166 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
167}
527159b7 168
03583c20
UW
169/* Support for disabling address space randomization. */
170
491144b5 171bool disable_randomization = true;
03583c20
UW
172
173static void
174show_disable_randomization (struct ui_file *file, int from_tty,
175 struct cmd_list_element *c, const char *value)
176{
177 if (target_supports_disable_randomization ())
178 fprintf_filtered (file,
179 _("Disabling randomization of debuggee's "
180 "virtual address space is %s.\n"),
181 value);
182 else
183 fputs_filtered (_("Disabling randomization of debuggee's "
184 "virtual address space is unsupported on\n"
185 "this platform.\n"), file);
186}
187
188static void
eb4c3f4a 189set_disable_randomization (const char *args, int from_tty,
03583c20
UW
190 struct cmd_list_element *c)
191{
192 if (!target_supports_disable_randomization ())
193 error (_("Disabling randomization of debuggee's "
194 "virtual address space is unsupported on\n"
195 "this platform."));
196}
197
d32dc48e
PA
198/* User interface for non-stop mode. */
199
491144b5
CB
200bool non_stop = false;
201static bool non_stop_1 = false;
d32dc48e
PA
202
203static void
eb4c3f4a 204set_non_stop (const char *args, int from_tty,
d32dc48e
PA
205 struct cmd_list_element *c)
206{
55f6301a 207 if (target_has_execution ())
d32dc48e
PA
208 {
209 non_stop_1 = non_stop;
210 error (_("Cannot change this setting while the inferior is running."));
211 }
212
213 non_stop = non_stop_1;
214}
215
216static void
217show_non_stop (struct ui_file *file, int from_tty,
218 struct cmd_list_element *c, const char *value)
219{
220 fprintf_filtered (file,
221 _("Controlling the inferior in non-stop mode is %s.\n"),
222 value);
223}
224
d914c394
SS
225/* "Observer mode" is somewhat like a more extreme version of
226 non-stop, in which all GDB operations that might affect the
227 target's execution have been disabled. */
228
6bd434d6 229static bool observer_mode = false;
491144b5 230static bool observer_mode_1 = false;
d914c394
SS
231
232static void
eb4c3f4a 233set_observer_mode (const char *args, int from_tty,
d914c394
SS
234 struct cmd_list_element *c)
235{
55f6301a 236 if (target_has_execution ())
d914c394
SS
237 {
238 observer_mode_1 = observer_mode;
239 error (_("Cannot change this setting while the inferior is running."));
240 }
241
242 observer_mode = observer_mode_1;
243
244 may_write_registers = !observer_mode;
245 may_write_memory = !observer_mode;
246 may_insert_breakpoints = !observer_mode;
247 may_insert_tracepoints = !observer_mode;
248 /* We can insert fast tracepoints in or out of observer mode,
249 but enable them if we're going into this mode. */
250 if (observer_mode)
491144b5 251 may_insert_fast_tracepoints = true;
d914c394
SS
252 may_stop = !observer_mode;
253 update_target_permissions ();
254
255 /* Going *into* observer mode we must force non-stop, then
256 going out we leave it that way. */
257 if (observer_mode)
258 {
d914c394 259 pagination_enabled = 0;
491144b5 260 non_stop = non_stop_1 = true;
d914c394
SS
261 }
262
263 if (from_tty)
264 printf_filtered (_("Observer mode is now %s.\n"),
265 (observer_mode ? "on" : "off"));
266}
267
268static void
269show_observer_mode (struct ui_file *file, int from_tty,
270 struct cmd_list_element *c, const char *value)
271{
272 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
273}
274
275/* This updates the value of observer mode based on changes in
276 permissions. Note that we are deliberately ignoring the values of
277 may-write-registers and may-write-memory, since the user may have
278 reason to enable these during a session, for instance to turn on a
279 debugging-related global. */
280
281void
282update_observer_mode (void)
283{
491144b5
CB
284 bool newval = (!may_insert_breakpoints
285 && !may_insert_tracepoints
286 && may_insert_fast_tracepoints
287 && !may_stop
288 && non_stop);
d914c394
SS
289
290 /* Let the user know if things change. */
291 if (newval != observer_mode)
292 printf_filtered (_("Observer mode is now %s.\n"),
293 (newval ? "on" : "off"));
294
295 observer_mode = observer_mode_1 = newval;
296}
c2c6d25f 297
c906108c
SS
298/* Tables of how to react to signals; the user sets them. */
299
adc6a863
PA
300static unsigned char signal_stop[GDB_SIGNAL_LAST];
301static unsigned char signal_print[GDB_SIGNAL_LAST];
302static unsigned char signal_program[GDB_SIGNAL_LAST];
c906108c 303
ab04a2af
TT
304/* Table of signals that are registered with "catch signal". A
305 non-zero entry indicates that the signal is caught by some "catch
adc6a863
PA
306 signal" command. */
307static unsigned char signal_catch[GDB_SIGNAL_LAST];
ab04a2af 308
2455069d
UW
309/* Table of signals that the target may silently handle.
310 This is automatically determined from the flags above,
311 and simply cached here. */
adc6a863 312static unsigned char signal_pass[GDB_SIGNAL_LAST];
2455069d 313
c906108c
SS
314#define SET_SIGS(nsigs,sigs,flags) \
315 do { \
316 int signum = (nsigs); \
317 while (signum-- > 0) \
318 if ((sigs)[signum]) \
319 (flags)[signum] = 1; \
320 } while (0)
321
322#define UNSET_SIGS(nsigs,sigs,flags) \
323 do { \
324 int signum = (nsigs); \
325 while (signum-- > 0) \
326 if ((sigs)[signum]) \
327 (flags)[signum] = 0; \
328 } while (0)
329
9b224c5e
PA
330/* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
331 this function is to avoid exporting `signal_program'. */
332
333void
334update_signals_program_target (void)
335{
adc6a863 336 target_program_signals (signal_program);
9b224c5e
PA
337}
338
1777feb0 339/* Value to pass to target_resume() to cause all threads to resume. */
39f77062 340
edb3359d 341#define RESUME_ALL minus_one_ptid
c906108c
SS
342
343/* Command list pointer for the "stop" placeholder. */
344
345static struct cmd_list_element *stop_command;
346
c906108c
SS
347/* Nonzero if we want to give control to the user when we're notified
348 of shared library events by the dynamic linker. */
628fe4e4 349int stop_on_solib_events;
f9e14852
GB
350
351/* Enable or disable optional shared library event breakpoints
352 as appropriate when the above flag is changed. */
353
354static void
eb4c3f4a
TT
355set_stop_on_solib_events (const char *args,
356 int from_tty, struct cmd_list_element *c)
f9e14852
GB
357{
358 update_solib_breakpoints ();
359}
360
920d2a44
AC
361static void
362show_stop_on_solib_events (struct ui_file *file, int from_tty,
363 struct cmd_list_element *c, const char *value)
364{
365 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
366 value);
367}
c906108c 368
c4464ade 369/* True after stop if current stack frame should be printed. */
c906108c 370
c4464ade 371static bool stop_print_frame;
c906108c 372
5b6d1e4f
PA
373/* This is a cached copy of the target/ptid/waitstatus of the last
374 event returned by target_wait()/deprecated_target_wait_hook().
375 This information is returned by get_last_target_status(). */
376static process_stratum_target *target_last_proc_target;
39f77062 377static ptid_t target_last_wait_ptid;
e02bc4cc
DS
378static struct target_waitstatus target_last_waitstatus;
379
4e1c45ea 380void init_thread_stepping_state (struct thread_info *tss);
0d1e5fa7 381
53904c9e
AC
382static const char follow_fork_mode_child[] = "child";
383static const char follow_fork_mode_parent[] = "parent";
384
40478521 385static const char *const follow_fork_mode_kind_names[] = {
53904c9e
AC
386 follow_fork_mode_child,
387 follow_fork_mode_parent,
388 NULL
ef346e04 389};
c906108c 390
53904c9e 391static const char *follow_fork_mode_string = follow_fork_mode_parent;
920d2a44
AC
392static void
393show_follow_fork_mode_string (struct ui_file *file, int from_tty,
394 struct cmd_list_element *c, const char *value)
395{
3e43a32a
MS
396 fprintf_filtered (file,
397 _("Debugger response to a program "
398 "call of fork or vfork is \"%s\".\n"),
920d2a44
AC
399 value);
400}
c906108c
SS
401\f
402
d83ad864
DB
403/* Handle changes to the inferior list based on the type of fork,
404 which process is being followed, and whether the other process
405 should be detached. On entry inferior_ptid must be the ptid of
406 the fork parent. At return inferior_ptid is the ptid of the
407 followed inferior. */
408
5ab2fbf1
SM
409static bool
410follow_fork_inferior (bool follow_child, bool detach_fork)
d83ad864
DB
411{
412 int has_vforked;
79639e11 413 ptid_t parent_ptid, child_ptid;
d83ad864
DB
414
415 has_vforked = (inferior_thread ()->pending_follow.kind
416 == TARGET_WAITKIND_VFORKED);
79639e11
PA
417 parent_ptid = inferior_ptid;
418 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
d83ad864
DB
419
420 if (has_vforked
421 && !non_stop /* Non-stop always resumes both branches. */
3b12939d 422 && current_ui->prompt_state == PROMPT_BLOCKED
d83ad864
DB
423 && !(follow_child || detach_fork || sched_multi))
424 {
425 /* The parent stays blocked inside the vfork syscall until the
426 child execs or exits. If we don't let the child run, then
427 the parent stays blocked. If we're telling the parent to run
428 in the foreground, the user will not be able to ctrl-c to get
429 back the terminal, effectively hanging the debug session. */
430 fprintf_filtered (gdb_stderr, _("\
431Can not resume the parent process over vfork in the foreground while\n\
432holding the child stopped. Try \"set detach-on-fork\" or \
433\"set schedule-multiple\".\n"));
e97007b6 434 return true;
d83ad864
DB
435 }
436
81d92403
SM
437 inferior *parent_inf = current_inferior ();
438 gdb_assert (parent_inf->thread_waiting_for_vfork_done == nullptr);
439
d83ad864
DB
440 if (!follow_child)
441 {
442 /* Detach new forked process? */
443 if (detach_fork)
444 {
d83ad864
DB
445 /* Before detaching from the child, remove all breakpoints
446 from it. If we forked, then this has already been taken
447 care of by infrun.c. If we vforked however, any
448 breakpoint inserted in the parent is visible in the
449 child, even those added while stopped in a vfork
450 catchpoint. This will remove the breakpoints from the
451 parent also, but they'll be reinserted below. */
452 if (has_vforked)
453 {
454 /* Keep breakpoints list in sync. */
00431a78 455 remove_breakpoints_inf (current_inferior ());
d83ad864
DB
456 }
457
f67c0c91 458 if (print_inferior_events)
d83ad864 459 {
8dd06f7a 460 /* Ensure that we have a process ptid. */
e99b03dc 461 ptid_t process_ptid = ptid_t (child_ptid.pid ());
8dd06f7a 462
223ffa71 463 target_terminal::ours_for_output ();
d83ad864 464 fprintf_filtered (gdb_stdlog,
f67c0c91 465 _("[Detaching after %s from child %s]\n"),
6f259a23 466 has_vforked ? "vfork" : "fork",
a068643d 467 target_pid_to_str (process_ptid).c_str ());
d83ad864
DB
468 }
469 }
470 else
471 {
472 struct inferior *parent_inf, *child_inf;
d83ad864
DB
473
474 /* Add process to GDB's tables. */
e99b03dc 475 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
476
477 parent_inf = current_inferior ();
478 child_inf->attach_flag = parent_inf->attach_flag;
479 copy_terminal_info (child_inf, parent_inf);
480 child_inf->gdbarch = parent_inf->gdbarch;
481 copy_inferior_target_desc_info (child_inf, parent_inf);
482
5ed8105e 483 scoped_restore_current_pspace_and_thread restore_pspace_thread;
d83ad864 484
2a00d7ce 485 set_current_inferior (child_inf);
5b6d1e4f 486 switch_to_no_thread ();
d83ad864 487 child_inf->symfile_flags = SYMFILE_NO_READ;
02980c56 488 child_inf->push_target (parent_inf->process_target ());
18493a00
PA
489 thread_info *child_thr
490 = add_thread_silent (child_inf->process_target (), child_ptid);
d83ad864
DB
491
492 /* If this is a vfork child, then the address-space is
493 shared with the parent. */
494 if (has_vforked)
495 {
496 child_inf->pspace = parent_inf->pspace;
497 child_inf->aspace = parent_inf->aspace;
498
5b6d1e4f
PA
499 exec_on_vfork ();
500
d83ad864
DB
501 /* The parent will be frozen until the child is done
502 with the shared region. Keep track of the
503 parent. */
504 child_inf->vfork_parent = parent_inf;
505 child_inf->pending_detach = 0;
506 parent_inf->vfork_child = child_inf;
507 parent_inf->pending_detach = 0;
18493a00
PA
508
509 /* Now that the inferiors and program spaces are all
510 wired up, we can switch to the child thread (which
511 switches inferior and program space too). */
512 switch_to_thread (child_thr);
d83ad864
DB
513 }
514 else
515 {
516 child_inf->aspace = new_address_space ();
564b1e3f 517 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
518 child_inf->removable = 1;
519 set_current_program_space (child_inf->pspace);
520 clone_program_space (child_inf->pspace, parent_inf->pspace);
521
18493a00
PA
522 /* solib_create_inferior_hook relies on the current
523 thread. */
524 switch_to_thread (child_thr);
525
d83ad864
DB
526 /* Let the shared library layer (e.g., solib-svr4) learn
527 about this new process, relocate the cloned exec, pull
528 in shared libraries, and install the solib event
529 breakpoint. If a "cloned-VM" event was propagated
530 better throughout the core, this wouldn't be
531 required. */
122373f7
SM
532 scoped_restore restore_in_initial_library_scan
533 = make_scoped_restore (&child_inf->in_initial_library_scan,
534 true);
d83ad864
DB
535 solib_create_inferior_hook (0);
536 }
d83ad864
DB
537 }
538
539 if (has_vforked)
540 {
541 struct inferior *parent_inf;
542
543 parent_inf = current_inferior ();
544
545 /* If we detached from the child, then we have to be careful
546 to not insert breakpoints in the parent until the child
547 is done with the shared memory region. However, if we're
548 staying attached to the child, then we can and should
549 insert breakpoints, so that we can debug it. A
550 subsequent child exec or exit is enough to know when does
551 the child stops using the parent's address space. */
060f2ef8
SM
552 parent_inf->thread_waiting_for_vfork_done
553 = detach_fork ? inferior_thread () : nullptr;
d83ad864
DB
554 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
555 }
556 }
557 else
558 {
559 /* Follow the child. */
560 struct inferior *parent_inf, *child_inf;
561 struct program_space *parent_pspace;
562
f67c0c91 563 if (print_inferior_events)
d83ad864 564 {
f67c0c91
SDJ
565 std::string parent_pid = target_pid_to_str (parent_ptid);
566 std::string child_pid = target_pid_to_str (child_ptid);
567
223ffa71 568 target_terminal::ours_for_output ();
6f259a23 569 fprintf_filtered (gdb_stdlog,
f67c0c91
SDJ
570 _("[Attaching after %s %s to child %s]\n"),
571 parent_pid.c_str (),
6f259a23 572 has_vforked ? "vfork" : "fork",
f67c0c91 573 child_pid.c_str ());
d83ad864
DB
574 }
575
576 /* Add the new inferior first, so that the target_detach below
577 doesn't unpush the target. */
578
e99b03dc 579 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
580
581 parent_inf = current_inferior ();
582 child_inf->attach_flag = parent_inf->attach_flag;
583 copy_terminal_info (child_inf, parent_inf);
584 child_inf->gdbarch = parent_inf->gdbarch;
585 copy_inferior_target_desc_info (child_inf, parent_inf);
586
587 parent_pspace = parent_inf->pspace;
588
5b6d1e4f 589 process_stratum_target *target = parent_inf->process_target ();
d83ad864 590
5b6d1e4f
PA
591 {
592 /* Hold a strong reference to the target while (maybe)
593 detaching the parent. Otherwise detaching could close the
594 target. */
595 auto target_ref = target_ops_ref::new_reference (target);
596
597 /* If we're vforking, we want to hold on to the parent until
598 the child exits or execs. At child exec or exit time we
599 can remove the old breakpoints from the parent and detach
600 or resume debugging it. Otherwise, detach the parent now;
601 we'll want to reuse it's program/address spaces, but we
602 can't set them to the child before removing breakpoints
603 from the parent, otherwise, the breakpoints module could
604 decide to remove breakpoints from the wrong process (since
605 they'd be assigned to the same address space). */
606
607 if (has_vforked)
608 {
609 gdb_assert (child_inf->vfork_parent == NULL);
610 gdb_assert (parent_inf->vfork_child == NULL);
611 child_inf->vfork_parent = parent_inf;
612 child_inf->pending_detach = 0;
613 parent_inf->vfork_child = child_inf;
614 parent_inf->pending_detach = detach_fork;
5b6d1e4f
PA
615 }
616 else if (detach_fork)
617 {
618 if (print_inferior_events)
619 {
620 /* Ensure that we have a process ptid. */
621 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
622
623 target_terminal::ours_for_output ();
624 fprintf_filtered (gdb_stdlog,
625 _("[Detaching after fork from "
626 "parent %s]\n"),
627 target_pid_to_str (process_ptid).c_str ());
628 }
8dd06f7a 629
5b6d1e4f
PA
630 target_detach (parent_inf, 0);
631 parent_inf = NULL;
632 }
6f259a23 633
5b6d1e4f 634 /* Note that the detach above makes PARENT_INF dangling. */
d83ad864 635
5b6d1e4f
PA
636 /* Add the child thread to the appropriate lists, and switch
637 to this new thread, before cloning the program space, and
638 informing the solib layer about this new process. */
d83ad864 639
5b6d1e4f 640 set_current_inferior (child_inf);
02980c56 641 child_inf->push_target (target);
5b6d1e4f 642 }
d83ad864 643
18493a00 644 thread_info *child_thr = add_thread_silent (target, child_ptid);
d83ad864
DB
645
646 /* If this is a vfork child, then the address-space is shared
647 with the parent. If we detached from the parent, then we can
648 reuse the parent's program/address spaces. */
649 if (has_vforked || detach_fork)
650 {
651 child_inf->pspace = parent_pspace;
652 child_inf->aspace = child_inf->pspace->aspace;
5b6d1e4f
PA
653
654 exec_on_vfork ();
d83ad864
DB
655 }
656 else
657 {
658 child_inf->aspace = new_address_space ();
564b1e3f 659 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
660 child_inf->removable = 1;
661 child_inf->symfile_flags = SYMFILE_NO_READ;
662 set_current_program_space (child_inf->pspace);
663 clone_program_space (child_inf->pspace, parent_pspace);
664
665 /* Let the shared library layer (e.g., solib-svr4) learn
666 about this new process, relocate the cloned exec, pull in
667 shared libraries, and install the solib event breakpoint.
668 If a "cloned-VM" event was propagated better throughout
669 the core, this wouldn't be required. */
122373f7
SM
670 scoped_restore restore_in_initial_library_scan
671 = make_scoped_restore (&child_inf->in_initial_library_scan, true);
d83ad864
DB
672 solib_create_inferior_hook (0);
673 }
18493a00
PA
674
675 switch_to_thread (child_thr);
d83ad864
DB
676 }
677
e97007b6
SM
678 target_follow_fork (follow_child, detach_fork);
679
680 return false;
d83ad864
DB
681}
682
e58b0e63
PA
683/* Tell the target to follow the fork we're stopped at. Returns true
684 if the inferior should be resumed; false, if the target for some
685 reason decided it's best not to resume. */
686
5ab2fbf1
SM
687static bool
688follow_fork ()
c906108c 689{
5ab2fbf1
SM
690 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
691 bool should_resume = true;
e58b0e63
PA
692 struct thread_info *tp;
693
694 /* Copy user stepping state to the new inferior thread. FIXME: the
695 followed fork child thread should have a copy of most of the
4e3990f4
DE
696 parent thread structure's run control related fields, not just these.
697 Initialized to avoid "may be used uninitialized" warnings from gcc. */
698 struct breakpoint *step_resume_breakpoint = NULL;
186c406b 699 struct breakpoint *exception_resume_breakpoint = NULL;
4e3990f4
DE
700 CORE_ADDR step_range_start = 0;
701 CORE_ADDR step_range_end = 0;
bf4cb9be
TV
702 int current_line = 0;
703 symtab *current_symtab = NULL;
4e3990f4 704 struct frame_id step_frame_id = { 0 };
8980e177 705 struct thread_fsm *thread_fsm = NULL;
e58b0e63
PA
706
707 if (!non_stop)
708 {
5b6d1e4f 709 process_stratum_target *wait_target;
e58b0e63
PA
710 ptid_t wait_ptid;
711 struct target_waitstatus wait_status;
712
713 /* Get the last target status returned by target_wait(). */
5b6d1e4f 714 get_last_target_status (&wait_target, &wait_ptid, &wait_status);
e58b0e63
PA
715
716 /* If not stopped at a fork event, then there's nothing else to
717 do. */
718 if (wait_status.kind != TARGET_WAITKIND_FORKED
719 && wait_status.kind != TARGET_WAITKIND_VFORKED)
720 return 1;
721
722 /* Check if we switched over from WAIT_PTID, since the event was
723 reported. */
00431a78 724 if (wait_ptid != minus_one_ptid
5b6d1e4f
PA
725 && (current_inferior ()->process_target () != wait_target
726 || inferior_ptid != wait_ptid))
e58b0e63
PA
727 {
728 /* We did. Switch back to WAIT_PTID thread, to tell the
729 target to follow it (in either direction). We'll
730 afterwards refuse to resume, and inform the user what
731 happened. */
5b6d1e4f 732 thread_info *wait_thread = find_thread_ptid (wait_target, wait_ptid);
00431a78 733 switch_to_thread (wait_thread);
5ab2fbf1 734 should_resume = false;
e58b0e63
PA
735 }
736 }
737
738 tp = inferior_thread ();
739
740 /* If there were any forks/vforks that were caught and are now to be
741 followed, then do so now. */
742 switch (tp->pending_follow.kind)
743 {
744 case TARGET_WAITKIND_FORKED:
745 case TARGET_WAITKIND_VFORKED:
746 {
747 ptid_t parent, child;
748
749 /* If the user did a next/step, etc, over a fork call,
750 preserve the stepping state in the fork child. */
751 if (follow_child && should_resume)
752 {
8358c15c
JK
753 step_resume_breakpoint = clone_momentary_breakpoint
754 (tp->control.step_resume_breakpoint);
16c381f0
JK
755 step_range_start = tp->control.step_range_start;
756 step_range_end = tp->control.step_range_end;
bf4cb9be
TV
757 current_line = tp->current_line;
758 current_symtab = tp->current_symtab;
16c381f0 759 step_frame_id = tp->control.step_frame_id;
186c406b
TT
760 exception_resume_breakpoint
761 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
8980e177 762 thread_fsm = tp->thread_fsm;
e58b0e63
PA
763
764 /* For now, delete the parent's sr breakpoint, otherwise,
765 parent/child sr breakpoints are considered duplicates,
766 and the child version will not be installed. Remove
767 this when the breakpoints module becomes aware of
768 inferiors and address spaces. */
769 delete_step_resume_breakpoint (tp);
16c381f0
JK
770 tp->control.step_range_start = 0;
771 tp->control.step_range_end = 0;
772 tp->control.step_frame_id = null_frame_id;
186c406b 773 delete_exception_resume_breakpoint (tp);
8980e177 774 tp->thread_fsm = NULL;
e58b0e63
PA
775 }
776
777 parent = inferior_ptid;
778 child = tp->pending_follow.value.related_pid;
779
81d92403
SM
780 /* If handling a vfork, stop all the inferior's threads, they will be
781 restarted when the vfork shared region is complete. */
782 if (tp->pending_follow.kind == TARGET_WAITKIND_VFORKED
783 && target_is_non_stop_p ())
784 stop_all_threads ("handling vfork", tp->inf);
785
5b6d1e4f 786 process_stratum_target *parent_targ = tp->inf->process_target ();
d83ad864
DB
787 /* Set up inferior(s) as specified by the caller, and tell the
788 target to do whatever is necessary to follow either parent
789 or child. */
790 if (follow_fork_inferior (follow_child, detach_fork))
e58b0e63
PA
791 {
792 /* Target refused to follow, or there's some other reason
793 we shouldn't resume. */
794 should_resume = 0;
795 }
796 else
797 {
798 /* This pending follow fork event is now handled, one way
799 or another. The previous selected thread may be gone
800 from the lists by now, but if it is still around, need
801 to clear the pending follow request. */
5b6d1e4f 802 tp = find_thread_ptid (parent_targ, parent);
e58b0e63
PA
803 if (tp)
804 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
805
806 /* This makes sure we don't try to apply the "Switched
807 over from WAIT_PID" logic above. */
808 nullify_last_target_wait_ptid ();
809
1777feb0 810 /* If we followed the child, switch to it... */
e58b0e63
PA
811 if (follow_child)
812 {
5b6d1e4f 813 thread_info *child_thr = find_thread_ptid (parent_targ, child);
00431a78 814 switch_to_thread (child_thr);
e58b0e63
PA
815
816 /* ... and preserve the stepping state, in case the
817 user was stepping over the fork call. */
818 if (should_resume)
819 {
820 tp = inferior_thread ();
8358c15c
JK
821 tp->control.step_resume_breakpoint
822 = step_resume_breakpoint;
16c381f0
JK
823 tp->control.step_range_start = step_range_start;
824 tp->control.step_range_end = step_range_end;
bf4cb9be
TV
825 tp->current_line = current_line;
826 tp->current_symtab = current_symtab;
16c381f0 827 tp->control.step_frame_id = step_frame_id;
186c406b
TT
828 tp->control.exception_resume_breakpoint
829 = exception_resume_breakpoint;
8980e177 830 tp->thread_fsm = thread_fsm;
e58b0e63
PA
831 }
832 else
833 {
834 /* If we get here, it was because we're trying to
835 resume from a fork catchpoint, but, the user
836 has switched threads away from the thread that
837 forked. In that case, the resume command
838 issued is most likely not applicable to the
839 child, so just warn, and refuse to resume. */
3e43a32a 840 warning (_("Not resuming: switched threads "
fd7dcb94 841 "before following fork child."));
e58b0e63
PA
842 }
843
844 /* Reset breakpoints in the child as appropriate. */
845 follow_inferior_reset_breakpoints ();
846 }
e58b0e63
PA
847 }
848 }
849 break;
850 case TARGET_WAITKIND_SPURIOUS:
851 /* Nothing to follow. */
852 break;
853 default:
854 internal_error (__FILE__, __LINE__,
855 "Unexpected pending_follow.kind %d\n",
856 tp->pending_follow.kind);
857 break;
858 }
c906108c 859
e58b0e63 860 return should_resume;
c906108c
SS
861}
862
d83ad864 863static void
6604731b 864follow_inferior_reset_breakpoints (void)
c906108c 865{
4e1c45ea
PA
866 struct thread_info *tp = inferior_thread ();
867
6604731b
DJ
868 /* Was there a step_resume breakpoint? (There was if the user
869 did a "next" at the fork() call.) If so, explicitly reset its
a1aa2221
LM
870 thread number. Cloned step_resume breakpoints are disabled on
871 creation, so enable it here now that it is associated with the
872 correct thread.
6604731b
DJ
873
874 step_resumes are a form of bp that are made to be per-thread.
875 Since we created the step_resume bp when the parent process
876 was being debugged, and now are switching to the child process,
877 from the breakpoint package's viewpoint, that's a switch of
878 "threads". We must update the bp's notion of which thread
879 it is for, or it'll be ignored when it triggers. */
880
8358c15c 881 if (tp->control.step_resume_breakpoint)
a1aa2221
LM
882 {
883 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
884 tp->control.step_resume_breakpoint->loc->enabled = 1;
885 }
6604731b 886
a1aa2221 887 /* Treat exception_resume breakpoints like step_resume breakpoints. */
186c406b 888 if (tp->control.exception_resume_breakpoint)
a1aa2221
LM
889 {
890 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
891 tp->control.exception_resume_breakpoint->loc->enabled = 1;
892 }
186c406b 893
6604731b
DJ
894 /* Reinsert all breakpoints in the child. The user may have set
895 breakpoints after catching the fork, in which case those
896 were never set in the child, but only in the parent. This makes
897 sure the inserted breakpoints match the breakpoint list. */
898
899 breakpoint_re_set ();
900 insert_breakpoints ();
c906108c 901}
c906108c 902
6c95b8df
PA
903/* The child has exited or execed: resume threads of the parent the
904 user wanted to be executing. */
905
906static int
907proceed_after_vfork_done (struct thread_info *thread,
908 void *arg)
909{
910 int pid = * (int *) arg;
911
00431a78
PA
912 if (thread->ptid.pid () == pid
913 && thread->state == THREAD_RUNNING
914 && !thread->executing
6c95b8df 915 && !thread->stop_requested
a493e3e2 916 && thread->suspend.stop_signal == GDB_SIGNAL_0)
6c95b8df 917 {
1eb8556f
SM
918 infrun_debug_printf ("resuming vfork parent thread %s",
919 target_pid_to_str (thread->ptid).c_str ());
6c95b8df 920
00431a78 921 switch_to_thread (thread);
70509625 922 clear_proceed_status (0);
64ce06e4 923 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
6c95b8df
PA
924 }
925
926 return 0;
927}
928
929/* Called whenever we notice an exec or exit event, to handle
930 detaching or resuming a vfork parent. */
931
932static void
933handle_vfork_child_exec_or_exit (int exec)
934{
935 struct inferior *inf = current_inferior ();
936
937 if (inf->vfork_parent)
938 {
939 int resume_parent = -1;
940
941 /* This exec or exit marks the end of the shared memory region
b73715df
TV
942 between the parent and the child. Break the bonds. */
943 inferior *vfork_parent = inf->vfork_parent;
944 inf->vfork_parent->vfork_child = NULL;
945 inf->vfork_parent = NULL;
6c95b8df 946
b73715df
TV
947 /* If the user wanted to detach from the parent, now is the
948 time. */
949 if (vfork_parent->pending_detach)
6c95b8df 950 {
6c95b8df
PA
951 struct program_space *pspace;
952 struct address_space *aspace;
953
1777feb0 954 /* follow-fork child, detach-on-fork on. */
6c95b8df 955
b73715df 956 vfork_parent->pending_detach = 0;
68c9da30 957
18493a00 958 scoped_restore_current_pspace_and_thread restore_thread;
6c95b8df
PA
959
960 /* We're letting loose of the parent. */
18493a00 961 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
00431a78 962 switch_to_thread (tp);
6c95b8df
PA
963
964 /* We're about to detach from the parent, which implicitly
965 removes breakpoints from its address space. There's a
966 catch here: we want to reuse the spaces for the child,
967 but, parent/child are still sharing the pspace at this
968 point, although the exec in reality makes the kernel give
969 the child a fresh set of new pages. The problem here is
970 that the breakpoints module being unaware of this, would
971 likely chose the child process to write to the parent
972 address space. Swapping the child temporarily away from
973 the spaces has the desired effect. Yes, this is "sort
974 of" a hack. */
975
976 pspace = inf->pspace;
977 aspace = inf->aspace;
978 inf->aspace = NULL;
979 inf->pspace = NULL;
980
f67c0c91 981 if (print_inferior_events)
6c95b8df 982 {
a068643d 983 std::string pidstr
b73715df 984 = target_pid_to_str (ptid_t (vfork_parent->pid));
f67c0c91 985
223ffa71 986 target_terminal::ours_for_output ();
6c95b8df
PA
987
988 if (exec)
6f259a23
DB
989 {
990 fprintf_filtered (gdb_stdlog,
f67c0c91 991 _("[Detaching vfork parent %s "
a068643d 992 "after child exec]\n"), pidstr.c_str ());
6f259a23 993 }
6c95b8df 994 else
6f259a23
DB
995 {
996 fprintf_filtered (gdb_stdlog,
f67c0c91 997 _("[Detaching vfork parent %s "
a068643d 998 "after child exit]\n"), pidstr.c_str ());
6f259a23 999 }
6c95b8df
PA
1000 }
1001
b73715df 1002 target_detach (vfork_parent, 0);
6c95b8df
PA
1003
1004 /* Put it back. */
1005 inf->pspace = pspace;
1006 inf->aspace = aspace;
6c95b8df
PA
1007 }
1008 else if (exec)
1009 {
1010 /* We're staying attached to the parent, so, really give the
1011 child a new address space. */
564b1e3f 1012 inf->pspace = new program_space (maybe_new_address_space ());
6c95b8df
PA
1013 inf->aspace = inf->pspace->aspace;
1014 inf->removable = 1;
1015 set_current_program_space (inf->pspace);
1016
b73715df 1017 resume_parent = vfork_parent->pid;
6c95b8df
PA
1018 }
1019 else
1020 {
6c95b8df
PA
1021 /* If this is a vfork child exiting, then the pspace and
1022 aspaces were shared with the parent. Since we're
1023 reporting the process exit, we'll be mourning all that is
1024 found in the address space, and switching to null_ptid,
1025 preparing to start a new inferior. But, since we don't
1026 want to clobber the parent's address/program spaces, we
1027 go ahead and create a new one for this exiting
1028 inferior. */
1029
18493a00 1030 /* Switch to no-thread while running clone_program_space, so
5ed8105e
PA
1031 that clone_program_space doesn't want to read the
1032 selected frame of a dead process. */
18493a00
PA
1033 scoped_restore_current_thread restore_thread;
1034 switch_to_no_thread ();
6c95b8df 1035
53af73bf
PA
1036 inf->pspace = new program_space (maybe_new_address_space ());
1037 inf->aspace = inf->pspace->aspace;
1038 set_current_program_space (inf->pspace);
6c95b8df 1039 inf->removable = 1;
7dcd53a0 1040 inf->symfile_flags = SYMFILE_NO_READ;
53af73bf 1041 clone_program_space (inf->pspace, vfork_parent->pspace);
6c95b8df 1042
b73715df 1043 resume_parent = vfork_parent->pid;
6c95b8df
PA
1044 }
1045
6c95b8df
PA
1046 gdb_assert (current_program_space == inf->pspace);
1047
1048 if (non_stop && resume_parent != -1)
1049 {
1050 /* If the user wanted the parent to be running, let it go
1051 free now. */
5ed8105e 1052 scoped_restore_current_thread restore_thread;
6c95b8df 1053
1eb8556f
SM
1054 infrun_debug_printf ("resuming vfork parent process %d",
1055 resume_parent);
6c95b8df
PA
1056
1057 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
6c95b8df
PA
1058 }
1059 }
1060}
1061
81d92403
SM
1062/* Handle TARGET_WAITKIND_VFORK_DONE. */
1063
1064static void
1065handle_vfork_done (thread_info *event_thread)
1066{
1067 /* We only care about this event if inferior::thread_waiting_for_vfork_done is
1068 set, that is if we are waiting for a vfork child not under our control
1069 (because we detached it) to exec or exit.
1070
1071 If an inferior has vforked and we are debugging the child, we don't use
1072 the vfork-done event to get notified about the end of the shared address
1073 space window). We rely instead on the child's exec or exit event, and the
1074 inferior::vfork_{parent,child} fields are used instead. See
1075 handle_vfork_child_exec_or_exit for that. */
1076 if (event_thread->inf->thread_waiting_for_vfork_done == nullptr)
1077 {
1078 infrun_debug_printf ("not waiting for a vfork-done event");
1079 return;
1080 }
1081
1082 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1083
1084 /* We stopped all threads (other than the vforking thread) of the inferior in
1085 follow_fork and kept them stopped until now. It should therefore not be
1086 possible for another thread to have reported a vfork during that window.
1087 If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
1088 vfork-done we are handling right now. */
1089 gdb_assert (event_thread->inf->thread_waiting_for_vfork_done == event_thread);
1090
1091 event_thread->inf->thread_waiting_for_vfork_done = nullptr;
1092 event_thread->inf->pspace->breakpoints_not_allowed = 0;
1093
1094 /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
1095 resume them now. On all-stop targets, everything that needs to be resumed
1096 will be when we resume the event thread. */
1097 if (target_is_non_stop_p ())
1098 {
1099 /* restart_threads and start_step_over may change the current thread, make
1100 sure we leave the event thread as the current thread. */
1101 scoped_restore_current_thread restore_thread;
1102
1103 insert_breakpoints ();
1104 restart_threads (event_thread, event_thread->inf);
1105 start_step_over ();
1106 }
1107}
1108
eb6c553b 1109/* Enum strings for "set|show follow-exec-mode". */
6c95b8df
PA
1110
1111static const char follow_exec_mode_new[] = "new";
1112static const char follow_exec_mode_same[] = "same";
40478521 1113static const char *const follow_exec_mode_names[] =
6c95b8df
PA
1114{
1115 follow_exec_mode_new,
1116 follow_exec_mode_same,
1117 NULL,
1118};
1119
1120static const char *follow_exec_mode_string = follow_exec_mode_same;
1121static void
1122show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1123 struct cmd_list_element *c, const char *value)
1124{
1125 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1126}
1127
ecf45d2c 1128/* EXEC_FILE_TARGET is assumed to be non-NULL. */
1adeb98a 1129
c906108c 1130static void
4ca51187 1131follow_exec (ptid_t ptid, const char *exec_file_target)
c906108c 1132{
e99b03dc 1133 int pid = ptid.pid ();
94585166 1134 ptid_t process_ptid;
7a292a7a 1135
65d2b333
PW
1136 /* Switch terminal for any messages produced e.g. by
1137 breakpoint_re_set. */
1138 target_terminal::ours_for_output ();
1139
c906108c
SS
1140 /* This is an exec event that we actually wish to pay attention to.
1141 Refresh our symbol table to the newly exec'd program, remove any
1142 momentary bp's, etc.
1143
1144 If there are breakpoints, they aren't really inserted now,
1145 since the exec() transformed our inferior into a fresh set
1146 of instructions.
1147
1148 We want to preserve symbolic breakpoints on the list, since
1149 we have hopes that they can be reset after the new a.out's
1150 symbol table is read.
1151
1152 However, any "raw" breakpoints must be removed from the list
1153 (e.g., the solib bp's), since their address is probably invalid
1154 now.
1155
1156 And, we DON'T want to call delete_breakpoints() here, since
1157 that may write the bp's "shadow contents" (the instruction
85102364 1158 value that was overwritten with a TRAP instruction). Since
1777feb0 1159 we now have a new a.out, those shadow contents aren't valid. */
6c95b8df
PA
1160
1161 mark_breakpoints_out ();
1162
95e50b27
PA
1163 /* The target reports the exec event to the main thread, even if
1164 some other thread does the exec, and even if the main thread was
1165 stopped or already gone. We may still have non-leader threads of
1166 the process on our list. E.g., on targets that don't have thread
1167 exit events (like remote); or on native Linux in non-stop mode if
1168 there were only two threads in the inferior and the non-leader
1169 one is the one that execs (and nothing forces an update of the
1170 thread list up to here). When debugging remotely, it's best to
1171 avoid extra traffic, when possible, so avoid syncing the thread
1172 list with the target, and instead go ahead and delete all threads
1173 of the process but one that reported the event. Note this must
1174 be done before calling update_breakpoints_after_exec, as
1175 otherwise clearing the threads' resources would reference stale
1176 thread breakpoints -- it may have been one of these threads that
1177 stepped across the exec. We could just clear their stepping
1178 states, but as long as we're iterating, might as well delete
1179 them. Deleting them now rather than at the next user-visible
1180 stop provides a nicer sequence of events for user and MI
1181 notifications. */
08036331 1182 for (thread_info *th : all_threads_safe ())
d7e15655 1183 if (th->ptid.pid () == pid && th->ptid != ptid)
00431a78 1184 delete_thread (th);
95e50b27
PA
1185
1186 /* We also need to clear any left over stale state for the
1187 leader/event thread. E.g., if there was any step-resume
1188 breakpoint or similar, it's gone now. We cannot truly
1189 step-to-next statement through an exec(). */
08036331 1190 thread_info *th = inferior_thread ();
8358c15c 1191 th->control.step_resume_breakpoint = NULL;
186c406b 1192 th->control.exception_resume_breakpoint = NULL;
34b7e8a6 1193 th->control.single_step_breakpoints = NULL;
16c381f0
JK
1194 th->control.step_range_start = 0;
1195 th->control.step_range_end = 0;
c906108c 1196
95e50b27
PA
1197 /* The user may have had the main thread held stopped in the
1198 previous image (e.g., schedlock on, or non-stop). Release
1199 it now. */
a75724bc
PA
1200 th->stop_requested = 0;
1201
95e50b27
PA
1202 update_breakpoints_after_exec ();
1203
1777feb0 1204 /* What is this a.out's name? */
f2907e49 1205 process_ptid = ptid_t (pid);
6c95b8df 1206 printf_unfiltered (_("%s is executing new program: %s\n"),
a068643d 1207 target_pid_to_str (process_ptid).c_str (),
ecf45d2c 1208 exec_file_target);
c906108c
SS
1209
1210 /* We've followed the inferior through an exec. Therefore, the
1777feb0 1211 inferior has essentially been killed & reborn. */
7a292a7a 1212
6ca15a4b 1213 breakpoint_init_inferior (inf_execd);
e85a822c 1214
797bc1cb
TT
1215 gdb::unique_xmalloc_ptr<char> exec_file_host
1216 = exec_file_find (exec_file_target, NULL);
ff862be4 1217
ecf45d2c
SL
1218 /* If we were unable to map the executable target pathname onto a host
1219 pathname, tell the user that. Otherwise GDB's subsequent behavior
1220 is confusing. Maybe it would even be better to stop at this point
1221 so that the user can specify a file manually before continuing. */
1222 if (exec_file_host == NULL)
1223 warning (_("Could not load symbols for executable %s.\n"
1224 "Do you need \"set sysroot\"?"),
1225 exec_file_target);
c906108c 1226
cce9b6bf
PA
1227 /* Reset the shared library package. This ensures that we get a
1228 shlib event when the child reaches "_start", at which point the
1229 dld will have had a chance to initialize the child. */
1230 /* Also, loading a symbol file below may trigger symbol lookups, and
1231 we don't want those to be satisfied by the libraries of the
1232 previous incarnation of this process. */
1233 no_shared_libraries (NULL, 0);
1234
294c36eb
SM
1235 struct inferior *inf = current_inferior ();
1236
6c95b8df
PA
1237 if (follow_exec_mode_string == follow_exec_mode_new)
1238 {
6c95b8df
PA
1239 /* The user wants to keep the old inferior and program spaces
1240 around. Create a new fresh one, and switch to it. */
1241
35ed81d4
SM
1242 /* Do exit processing for the original inferior before setting the new
1243 inferior's pid. Having two inferiors with the same pid would confuse
1244 find_inferior_p(t)id. Transfer the terminal state and info from the
1245 old to the new inferior. */
294c36eb
SM
1246 inferior *new_inferior = add_inferior_with_spaces ();
1247
1248 swap_terminal_info (new_inferior, inf);
1249 exit_inferior_silent (inf);
1250
1251 new_inferior->pid = pid;
1252 target_follow_exec (new_inferior, ptid, exec_file_target);
1253
1254 /* We continue with the new inferior. */
1255 inf = new_inferior;
6c95b8df 1256 }
9107fc8d
PA
1257 else
1258 {
1259 /* The old description may no longer be fit for the new image.
1260 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1261 old description; we'll read a new one below. No need to do
1262 this on "follow-exec-mode new", as the old inferior stays
1263 around (its description is later cleared/refetched on
1264 restart). */
1265 target_clear_description ();
294c36eb 1266 target_follow_exec (inf, ptid, exec_file_target);
9107fc8d 1267 }
6c95b8df 1268
294c36eb 1269 gdb_assert (current_inferior () == inf);
6c95b8df
PA
1270 gdb_assert (current_program_space == inf->pspace);
1271
ecf45d2c
SL
1272 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1273 because the proper displacement for a PIE (Position Independent
1274 Executable) main symbol file will only be computed by
1275 solib_create_inferior_hook below. breakpoint_re_set would fail
1276 to insert the breakpoints with the zero displacement. */
797bc1cb 1277 try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
c906108c 1278
9107fc8d
PA
1279 /* If the target can specify a description, read it. Must do this
1280 after flipping to the new executable (because the target supplied
1281 description must be compatible with the executable's
1282 architecture, and the old executable may e.g., be 32-bit, while
1283 the new one 64-bit), and before anything involving memory or
1284 registers. */
1285 target_find_description ();
1286
42a4fec5 1287 gdb::observers::inferior_execd.notify (inf);
4efc6507 1288
c1e56572
JK
1289 breakpoint_re_set ();
1290
c906108c
SS
1291 /* Reinsert all breakpoints. (Those which were symbolic have
1292 been reset to the proper address in the new a.out, thanks
1777feb0 1293 to symbol_file_command...). */
c906108c
SS
1294 insert_breakpoints ();
1295
1296 /* The next resume of this inferior should bring it to the shlib
1297 startup breakpoints. (If the user had also set bp's on
1298 "main" from the old (parent) process, then they'll auto-
1777feb0 1299 matically get reset there in the new process.). */
c906108c
SS
1300}
1301
28d5518b 1302/* The chain of threads that need to do a step-over operation to get
c2829269
PA
1303 past e.g., a breakpoint. What technique is used to step over the
1304 breakpoint/watchpoint does not matter -- all threads end up in the
1305 same queue, to maintain rough temporal order of execution, in order
1306 to avoid starvation, otherwise, we could e.g., find ourselves
1307 constantly stepping the same couple threads past their breakpoints
1308 over and over, if the single-step finish fast enough. */
28d5518b 1309struct thread_info *global_thread_step_over_chain_head;
c2829269 1310
6c4cfb24
PA
1311/* Bit flags indicating what the thread needs to step over. */
1312
8d297bbf 1313enum step_over_what_flag
6c4cfb24
PA
1314 {
1315 /* Step over a breakpoint. */
1316 STEP_OVER_BREAKPOINT = 1,
1317
1318 /* Step past a non-continuable watchpoint, in order to let the
1319 instruction execute so we can evaluate the watchpoint
1320 expression. */
1321 STEP_OVER_WATCHPOINT = 2
1322 };
8d297bbf 1323DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
6c4cfb24 1324
963f9c80 1325/* Info about an instruction that is being stepped over. */
31e77af2
PA
1326
1327struct step_over_info
1328{
963f9c80
PA
1329 /* If we're stepping past a breakpoint, this is the address space
1330 and address of the instruction the breakpoint is set at. We'll
1331 skip inserting all breakpoints here. Valid iff ASPACE is
1332 non-NULL. */
ac7d717c
PA
1333 const address_space *aspace = nullptr;
1334 CORE_ADDR address = 0;
963f9c80
PA
1335
1336 /* The instruction being stepped over triggers a nonsteppable
1337 watchpoint. If true, we'll skip inserting watchpoints. */
ac7d717c 1338 int nonsteppable_watchpoint_p = 0;
21edc42f
YQ
1339
1340 /* The thread's global number. */
ac7d717c 1341 int thread = -1;
31e77af2
PA
1342};
1343
1344/* The step-over info of the location that is being stepped over.
1345
1346 Note that with async/breakpoint always-inserted mode, a user might
1347 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1348 being stepped over. As setting a new breakpoint inserts all
1349 breakpoints, we need to make sure the breakpoint being stepped over
1350 isn't inserted then. We do that by only clearing the step-over
1351 info when the step-over is actually finished (or aborted).
1352
1353 Presently GDB can only step over one breakpoint at any given time.
1354 Given threads that can't run code in the same address space as the
1355 breakpoint's can't really miss the breakpoint, GDB could be taught
1356 to step-over at most one breakpoint per address space (so this info
1357 could move to the address space object if/when GDB is extended).
1358 The set of breakpoints being stepped over will normally be much
1359 smaller than the set of all breakpoints, so a flag in the
1360 breakpoint location structure would be wasteful. A separate list
1361 also saves complexity and run-time, as otherwise we'd have to go
1362 through all breakpoint locations clearing their flag whenever we
1363 start a new sequence. Similar considerations weigh against storing
1364 this info in the thread object. Plus, not all step overs actually
1365 have breakpoint locations -- e.g., stepping past a single-step
1366 breakpoint, or stepping to complete a non-continuable
1367 watchpoint. */
1368static struct step_over_info step_over_info;
1369
1370/* Record the address of the breakpoint/instruction we're currently
ce0db137
DE
1371 stepping over.
1372 N.B. We record the aspace and address now, instead of say just the thread,
1373 because when we need the info later the thread may be running. */
31e77af2
PA
1374
1375static void
8b86c959 1376set_step_over_info (const address_space *aspace, CORE_ADDR address,
21edc42f
YQ
1377 int nonsteppable_watchpoint_p,
1378 int thread)
31e77af2
PA
1379{
1380 step_over_info.aspace = aspace;
1381 step_over_info.address = address;
963f9c80 1382 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
21edc42f 1383 step_over_info.thread = thread;
31e77af2
PA
1384}
1385
1386/* Called when we're not longer stepping over a breakpoint / an
1387 instruction, so all breakpoints are free to be (re)inserted. */
1388
1389static void
1390clear_step_over_info (void)
1391{
1eb8556f 1392 infrun_debug_printf ("clearing step over info");
31e77af2
PA
1393 step_over_info.aspace = NULL;
1394 step_over_info.address = 0;
963f9c80 1395 step_over_info.nonsteppable_watchpoint_p = 0;
21edc42f 1396 step_over_info.thread = -1;
31e77af2
PA
1397}
1398
7f89fd65 1399/* See infrun.h. */
31e77af2
PA
1400
1401int
1402stepping_past_instruction_at (struct address_space *aspace,
1403 CORE_ADDR address)
1404{
1405 return (step_over_info.aspace != NULL
1406 && breakpoint_address_match (aspace, address,
1407 step_over_info.aspace,
1408 step_over_info.address));
1409}
1410
963f9c80
PA
1411/* See infrun.h. */
1412
21edc42f
YQ
1413int
1414thread_is_stepping_over_breakpoint (int thread)
1415{
1416 return (step_over_info.thread != -1
1417 && thread == step_over_info.thread);
1418}
1419
1420/* See infrun.h. */
1421
963f9c80
PA
1422int
1423stepping_past_nonsteppable_watchpoint (void)
1424{
1425 return step_over_info.nonsteppable_watchpoint_p;
1426}
1427
6cc83d2a
PA
1428/* Returns true if step-over info is valid. */
1429
c4464ade 1430static bool
6cc83d2a
PA
1431step_over_info_valid_p (void)
1432{
963f9c80
PA
1433 return (step_over_info.aspace != NULL
1434 || stepping_past_nonsteppable_watchpoint ());
6cc83d2a
PA
1435}
1436
c906108c 1437\f
237fc4c9
PA
1438/* Displaced stepping. */
1439
1440/* In non-stop debugging mode, we must take special care to manage
1441 breakpoints properly; in particular, the traditional strategy for
1442 stepping a thread past a breakpoint it has hit is unsuitable.
1443 'Displaced stepping' is a tactic for stepping one thread past a
1444 breakpoint it has hit while ensuring that other threads running
1445 concurrently will hit the breakpoint as they should.
1446
1447 The traditional way to step a thread T off a breakpoint in a
1448 multi-threaded program in all-stop mode is as follows:
1449
1450 a0) Initially, all threads are stopped, and breakpoints are not
1451 inserted.
1452 a1) We single-step T, leaving breakpoints uninserted.
1453 a2) We insert breakpoints, and resume all threads.
1454
1455 In non-stop debugging, however, this strategy is unsuitable: we
1456 don't want to have to stop all threads in the system in order to
1457 continue or step T past a breakpoint. Instead, we use displaced
1458 stepping:
1459
1460 n0) Initially, T is stopped, other threads are running, and
1461 breakpoints are inserted.
1462 n1) We copy the instruction "under" the breakpoint to a separate
1463 location, outside the main code stream, making any adjustments
1464 to the instruction, register, and memory state as directed by
1465 T's architecture.
1466 n2) We single-step T over the instruction at its new location.
1467 n3) We adjust the resulting register and memory state as directed
1468 by T's architecture. This includes resetting T's PC to point
1469 back into the main instruction stream.
1470 n4) We resume T.
1471
1472 This approach depends on the following gdbarch methods:
1473
1474 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1475 indicate where to copy the instruction, and how much space must
1476 be reserved there. We use these in step n1.
1477
1478 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1479 address, and makes any necessary adjustments to the instruction,
1480 register contents, and memory. We use this in step n1.
1481
1482 - gdbarch_displaced_step_fixup adjusts registers and memory after
85102364 1483 we have successfully single-stepped the instruction, to yield the
237fc4c9
PA
1484 same effect the instruction would have had if we had executed it
1485 at its original address. We use this in step n3.
1486
237fc4c9
PA
1487 The gdbarch_displaced_step_copy_insn and
1488 gdbarch_displaced_step_fixup functions must be written so that
1489 copying an instruction with gdbarch_displaced_step_copy_insn,
1490 single-stepping across the copied instruction, and then applying
1491 gdbarch_displaced_insn_fixup should have the same effects on the
1492 thread's memory and registers as stepping the instruction in place
1493 would have. Exactly which responsibilities fall to the copy and
1494 which fall to the fixup is up to the author of those functions.
1495
1496 See the comments in gdbarch.sh for details.
1497
1498 Note that displaced stepping and software single-step cannot
1499 currently be used in combination, although with some care I think
1500 they could be made to. Software single-step works by placing
1501 breakpoints on all possible subsequent instructions; if the
1502 displaced instruction is a PC-relative jump, those breakpoints
1503 could fall in very strange places --- on pages that aren't
1504 executable, or at addresses that are not proper instruction
1505 boundaries. (We do generally let other threads run while we wait
1506 to hit the software single-step breakpoint, and they might
1507 encounter such a corrupted instruction.) One way to work around
1508 this would be to have gdbarch_displaced_step_copy_insn fully
1509 simulate the effect of PC-relative instructions (and return NULL)
1510 on architectures that use software single-stepping.
1511
1512 In non-stop mode, we can have independent and simultaneous step
1513 requests, so more than one thread may need to simultaneously step
1514 over a breakpoint. The current implementation assumes there is
1515 only one scratch space per process. In this case, we have to
1516 serialize access to the scratch space. If thread A wants to step
1517 over a breakpoint, but we are currently waiting for some other
1518 thread to complete a displaced step, we leave thread A stopped and
1519 place it in the displaced_step_request_queue. Whenever a displaced
1520 step finishes, we pick the next thread in the queue and start a new
1521 displaced step operation on it. See displaced_step_prepare and
7def77a1 1522 displaced_step_finish for details. */
237fc4c9 1523
a46d1843 1524/* Return true if THREAD is doing a displaced step. */
c0987663 1525
c4464ade 1526static bool
00431a78 1527displaced_step_in_progress_thread (thread_info *thread)
c0987663 1528{
00431a78 1529 gdb_assert (thread != NULL);
c0987663 1530
187b041e 1531 return thread->displaced_step_state.in_progress ();
c0987663
YQ
1532}
1533
a46d1843 1534/* Return true if INF has a thread doing a displaced step. */
8f572e5c 1535
c4464ade 1536static bool
00431a78 1537displaced_step_in_progress (inferior *inf)
8f572e5c 1538{
187b041e 1539 return inf->displaced_step_state.in_progress_count > 0;
fc1cf338
PA
1540}
1541
187b041e 1542/* Return true if any thread is doing a displaced step. */
a42244db 1543
187b041e
SM
1544static bool
1545displaced_step_in_progress_any_thread ()
a42244db 1546{
187b041e
SM
1547 for (inferior *inf : all_non_exited_inferiors ())
1548 {
1549 if (displaced_step_in_progress (inf))
1550 return true;
1551 }
a42244db 1552
187b041e 1553 return false;
a42244db
YQ
1554}
1555
fc1cf338
PA
1556static void
1557infrun_inferior_exit (struct inferior *inf)
1558{
d20172fc 1559 inf->displaced_step_state.reset ();
060f2ef8 1560 inf->thread_waiting_for_vfork_done = nullptr;
fc1cf338 1561}
237fc4c9 1562
3b7a962d
SM
1563static void
1564infrun_inferior_execd (inferior *inf)
1565{
187b041e
SM
1566 /* If some threads where was doing a displaced step in this inferior at the
1567 moment of the exec, they no longer exist. Even if the exec'ing thread
3b7a962d
SM
1568 doing a displaced step, we don't want to to any fixup nor restore displaced
1569 stepping buffer bytes. */
1570 inf->displaced_step_state.reset ();
1571
187b041e
SM
1572 for (thread_info *thread : inf->threads ())
1573 thread->displaced_step_state.reset ();
1574
3b7a962d
SM
1575 /* Since an in-line step is done with everything else stopped, if there was
1576 one in progress at the time of the exec, it must have been the exec'ing
1577 thread. */
1578 clear_step_over_info ();
060f2ef8
SM
1579
1580 inf->thread_waiting_for_vfork_done = nullptr;
3b7a962d
SM
1581}
1582
fff08868
HZ
1583/* If ON, and the architecture supports it, GDB will use displaced
1584 stepping to step over breakpoints. If OFF, or if the architecture
1585 doesn't support it, GDB will instead use the traditional
1586 hold-and-step approach. If AUTO (which is the default), GDB will
1587 decide which technique to use to step over breakpoints depending on
9822cb57 1588 whether the target works in a non-stop way (see use_displaced_stepping). */
fff08868 1589
72d0e2c5 1590static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
fff08868 1591
237fc4c9
PA
1592static void
1593show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1594 struct cmd_list_element *c,
1595 const char *value)
1596{
72d0e2c5 1597 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
3e43a32a
MS
1598 fprintf_filtered (file,
1599 _("Debugger's willingness to use displaced stepping "
1600 "to step over breakpoints is %s (currently %s).\n"),
fbea99ea 1601 value, target_is_non_stop_p () ? "on" : "off");
fff08868 1602 else
3e43a32a
MS
1603 fprintf_filtered (file,
1604 _("Debugger's willingness to use displaced stepping "
1605 "to step over breakpoints is %s.\n"), value);
237fc4c9
PA
1606}
1607
9822cb57
SM
1608/* Return true if the gdbarch implements the required methods to use
1609 displaced stepping. */
1610
1611static bool
1612gdbarch_supports_displaced_stepping (gdbarch *arch)
1613{
187b041e
SM
1614 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1615 that if `prepare` is provided, so is `finish`. */
1616 return gdbarch_displaced_step_prepare_p (arch);
9822cb57
SM
1617}
1618
fff08868 1619/* Return non-zero if displaced stepping can/should be used to step
3fc8eb30 1620 over breakpoints of thread TP. */
fff08868 1621
9822cb57
SM
1622static bool
1623use_displaced_stepping (thread_info *tp)
237fc4c9 1624{
9822cb57
SM
1625 /* If the user disabled it explicitly, don't use displaced stepping. */
1626 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1627 return false;
1628
1629 /* If "auto", only use displaced stepping if the target operates in a non-stop
1630 way. */
1631 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1632 && !target_is_non_stop_p ())
1633 return false;
1634
1635 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1636
1637 /* If the architecture doesn't implement displaced stepping, don't use
1638 it. */
1639 if (!gdbarch_supports_displaced_stepping (gdbarch))
1640 return false;
1641
1642 /* If recording, don't use displaced stepping. */
1643 if (find_record_target () != nullptr)
1644 return false;
1645
9822cb57
SM
1646 /* If displaced stepping failed before for this inferior, don't bother trying
1647 again. */
f5f01699 1648 if (tp->inf->displaced_step_state.failed_before)
9822cb57
SM
1649 return false;
1650
1651 return true;
237fc4c9
PA
1652}
1653
187b041e 1654/* Simple function wrapper around displaced_step_thread_state::reset. */
d8d83535 1655
237fc4c9 1656static void
187b041e 1657displaced_step_reset (displaced_step_thread_state *displaced)
237fc4c9 1658{
d8d83535 1659 displaced->reset ();
237fc4c9
PA
1660}
1661
d8d83535
SM
1662/* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1663 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1664
1665using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
237fc4c9 1666
136821d9
SM
1667/* See infrun.h. */
1668
1669std::string
1670displaced_step_dump_bytes (const gdb_byte *buf, size_t len)
237fc4c9 1671{
136821d9 1672 std::string ret;
237fc4c9 1673
136821d9
SM
1674 for (size_t i = 0; i < len; i++)
1675 {
1676 if (i == 0)
1677 ret += string_printf ("%02x", buf[i]);
1678 else
1679 ret += string_printf (" %02x", buf[i]);
1680 }
1681
1682 return ret;
237fc4c9
PA
1683}
1684
1685/* Prepare to single-step, using displaced stepping.
1686
1687 Note that we cannot use displaced stepping when we have a signal to
1688 deliver. If we have a signal to deliver and an instruction to step
1689 over, then after the step, there will be no indication from the
1690 target whether the thread entered a signal handler or ignored the
1691 signal and stepped over the instruction successfully --- both cases
1692 result in a simple SIGTRAP. In the first case we mustn't do a
1693 fixup, and in the second case we must --- but we can't tell which.
1694 Comments in the code for 'random signals' in handle_inferior_event
1695 explain how we handle this case instead.
1696
bab37966
SM
1697 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1698 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1699 if displaced stepping this thread got queued; or
1700 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1701 stepped. */
7f03bd92 1702
bab37966 1703static displaced_step_prepare_status
00431a78 1704displaced_step_prepare_throw (thread_info *tp)
237fc4c9 1705{
00431a78 1706 regcache *regcache = get_thread_regcache (tp);
ac7936df 1707 struct gdbarch *gdbarch = regcache->arch ();
187b041e
SM
1708 displaced_step_thread_state &disp_step_thread_state
1709 = tp->displaced_step_state;
237fc4c9
PA
1710
1711 /* We should never reach this function if the architecture does not
1712 support displaced stepping. */
9822cb57 1713 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
237fc4c9 1714
c2829269
PA
1715 /* Nor if the thread isn't meant to step over a breakpoint. */
1716 gdb_assert (tp->control.trap_expected);
1717
c1e36e3e
PA
1718 /* Disable range stepping while executing in the scratch pad. We
1719 want a single-step even if executing the displaced instruction in
1720 the scratch buffer lands within the stepping range (e.g., a
1721 jump/branch). */
1722 tp->control.may_range_step = 0;
1723
187b041e
SM
1724 /* We are about to start a displaced step for this thread. If one is already
1725 in progress, something's wrong. */
1726 gdb_assert (!disp_step_thread_state.in_progress ());
237fc4c9 1727
187b041e 1728 if (tp->inf->displaced_step_state.unavailable)
237fc4c9 1729 {
187b041e
SM
1730 /* The gdbarch tells us it's not worth asking to try a prepare because
1731 it is likely that it will return unavailable, so don't bother asking. */
237fc4c9 1732
136821d9
SM
1733 displaced_debug_printf ("deferring step of %s",
1734 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1735
28d5518b 1736 global_thread_step_over_chain_enqueue (tp);
bab37966 1737 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
237fc4c9 1738 }
237fc4c9 1739
187b041e
SM
1740 displaced_debug_printf ("displaced-stepping %s now",
1741 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1742
00431a78
PA
1743 scoped_restore_current_thread restore_thread;
1744
1745 switch_to_thread (tp);
ad53cd71 1746
187b041e
SM
1747 CORE_ADDR original_pc = regcache_read_pc (regcache);
1748 CORE_ADDR displaced_pc;
237fc4c9 1749
187b041e
SM
1750 displaced_step_prepare_status status
1751 = gdbarch_displaced_step_prepare (gdbarch, tp, displaced_pc);
237fc4c9 1752
187b041e 1753 if (status == DISPLACED_STEP_PREPARE_STATUS_CANT)
d35ae833 1754 {
187b041e
SM
1755 displaced_debug_printf ("failed to prepare (%s)",
1756 target_pid_to_str (tp->ptid).c_str ());
d35ae833 1757
bab37966 1758 return DISPLACED_STEP_PREPARE_STATUS_CANT;
d35ae833 1759 }
187b041e 1760 else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
7f03bd92 1761 {
187b041e
SM
1762 /* Not enough displaced stepping resources available, defer this
1763 request by placing it the queue. */
1764
1765 displaced_debug_printf ("not enough resources available, "
1766 "deferring step of %s",
1767 target_pid_to_str (tp->ptid).c_str ());
1768
1769 global_thread_step_over_chain_enqueue (tp);
1770
1771 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
7f03bd92 1772 }
237fc4c9 1773
187b041e
SM
1774 gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
1775
9f5a595d
UW
1776 /* Save the information we need to fix things up if the step
1777 succeeds. */
187b041e 1778 disp_step_thread_state.set (gdbarch);
9f5a595d 1779
187b041e 1780 tp->inf->displaced_step_state.in_progress_count++;
ad53cd71 1781
187b041e
SM
1782 displaced_debug_printf ("prepared successfully thread=%s, "
1783 "original_pc=%s, displaced_pc=%s",
1784 target_pid_to_str (tp->ptid).c_str (),
1785 paddress (gdbarch, original_pc),
1786 paddress (gdbarch, displaced_pc));
237fc4c9 1787
bab37966 1788 return DISPLACED_STEP_PREPARE_STATUS_OK;
237fc4c9
PA
1789}
1790
3fc8eb30
PA
1791/* Wrapper for displaced_step_prepare_throw that disabled further
1792 attempts at displaced stepping if we get a memory error. */
1793
bab37966 1794static displaced_step_prepare_status
00431a78 1795displaced_step_prepare (thread_info *thread)
3fc8eb30 1796{
bab37966
SM
1797 displaced_step_prepare_status status
1798 = DISPLACED_STEP_PREPARE_STATUS_CANT;
3fc8eb30 1799
a70b8144 1800 try
3fc8eb30 1801 {
bab37966 1802 status = displaced_step_prepare_throw (thread);
3fc8eb30 1803 }
230d2906 1804 catch (const gdb_exception_error &ex)
3fc8eb30 1805 {
16b41842
PA
1806 if (ex.error != MEMORY_ERROR
1807 && ex.error != NOT_SUPPORTED_ERROR)
eedc3f4f 1808 throw;
3fc8eb30 1809
1eb8556f
SM
1810 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1811 ex.what ());
3fc8eb30
PA
1812
1813 /* Be verbose if "set displaced-stepping" is "on", silent if
1814 "auto". */
1815 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1816 {
fd7dcb94 1817 warning (_("disabling displaced stepping: %s"),
3d6e9d23 1818 ex.what ());
3fc8eb30
PA
1819 }
1820
1821 /* Disable further displaced stepping attempts. */
f5f01699 1822 thread->inf->displaced_step_state.failed_before = 1;
3fc8eb30 1823 }
3fc8eb30 1824
bab37966 1825 return status;
3fc8eb30
PA
1826}
1827
bab37966
SM
1828/* If we displaced stepped an instruction successfully, adjust registers and
1829 memory to yield the same effect the instruction would have had if we had
1830 executed it at its original address, and return
1831 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
1832 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
372316f1 1833
bab37966
SM
1834 If the thread wasn't displaced stepping, return
1835 DISPLACED_STEP_FINISH_STATUS_OK as well. */
1836
1837static displaced_step_finish_status
7def77a1 1838displaced_step_finish (thread_info *event_thread, enum gdb_signal signal)
237fc4c9 1839{
187b041e 1840 displaced_step_thread_state *displaced = &event_thread->displaced_step_state;
fc1cf338 1841
187b041e
SM
1842 /* Was this thread performing a displaced step? */
1843 if (!displaced->in_progress ())
bab37966 1844 return DISPLACED_STEP_FINISH_STATUS_OK;
237fc4c9 1845
187b041e
SM
1846 gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0);
1847 event_thread->inf->displaced_step_state.in_progress_count--;
1848
cb71640d
PA
1849 /* Fixup may need to read memory/registers. Switch to the thread
1850 that we're fixing up. Also, target_stopped_by_watchpoint checks
d43b7a2d 1851 the current thread, and displaced_step_restore performs ptid-dependent
328d42d8 1852 memory accesses using current_inferior(). */
00431a78 1853 switch_to_thread (event_thread);
cb71640d 1854
d43b7a2d
TBA
1855 displaced_step_reset_cleanup cleanup (displaced);
1856
187b041e
SM
1857 /* Do the fixup, and release the resources acquired to do the displaced
1858 step. */
1859 return gdbarch_displaced_step_finish (displaced->get_original_gdbarch (),
1860 event_thread, signal);
c2829269 1861}
1c5cfe86 1862
4d9d9d04
PA
1863/* Data to be passed around while handling an event. This data is
1864 discarded between events. */
1865struct execution_control_state
1866{
5b6d1e4f 1867 process_stratum_target *target;
4d9d9d04
PA
1868 ptid_t ptid;
1869 /* The thread that got the event, if this was a thread event; NULL
1870 otherwise. */
1871 struct thread_info *event_thread;
1872
1873 struct target_waitstatus ws;
1874 int stop_func_filled_in;
1875 CORE_ADDR stop_func_start;
1876 CORE_ADDR stop_func_end;
1877 const char *stop_func_name;
1878 int wait_some_more;
1879
1880 /* True if the event thread hit the single-step breakpoint of
1881 another thread. Thus the event doesn't cause a stop, the thread
1882 needs to be single-stepped past the single-step breakpoint before
1883 we can switch back to the original stepping thread. */
1884 int hit_singlestep_breakpoint;
1885};
1886
1887/* Clear ECS and set it to point at TP. */
c2829269
PA
1888
1889static void
4d9d9d04
PA
1890reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1891{
1892 memset (ecs, 0, sizeof (*ecs));
1893 ecs->event_thread = tp;
1894 ecs->ptid = tp->ptid;
1895}
1896
1897static void keep_going_pass_signal (struct execution_control_state *ecs);
1898static void prepare_to_wait (struct execution_control_state *ecs);
c4464ade 1899static bool keep_going_stepped_thread (struct thread_info *tp);
8d297bbf 1900static step_over_what thread_still_needs_step_over (struct thread_info *tp);
4d9d9d04
PA
1901
1902/* Are there any pending step-over requests? If so, run all we can
1903 now and return true. Otherwise, return false. */
1904
c4464ade 1905static bool
c2829269
PA
1906start_step_over (void)
1907{
3ec3145c
SM
1908 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1909
187b041e 1910 thread_info *next;
c2829269 1911
372316f1
PA
1912 /* Don't start a new step-over if we already have an in-line
1913 step-over operation ongoing. */
1914 if (step_over_info_valid_p ())
c4464ade 1915 return false;
372316f1 1916
187b041e
SM
1917 /* Steal the global thread step over chain. As we try to initiate displaced
1918 steps, threads will be enqueued in the global chain if no buffers are
1919 available. If we iterated on the global chain directly, we might iterate
1920 indefinitely. */
1921 thread_info *threads_to_step = global_thread_step_over_chain_head;
1922 global_thread_step_over_chain_head = NULL;
1923
1924 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
1925 thread_step_over_chain_length (threads_to_step));
1926
1927 bool started = false;
1928
1929 /* On scope exit (whatever the reason, return or exception), if there are
1930 threads left in the THREADS_TO_STEP chain, put back these threads in the
1931 global list. */
1932 SCOPE_EXIT
1933 {
1934 if (threads_to_step == nullptr)
1935 infrun_debug_printf ("step-over queue now empty");
1936 else
1937 {
1938 infrun_debug_printf ("putting back %d threads to step in global queue",
1939 thread_step_over_chain_length (threads_to_step));
1940
1941 global_thread_step_over_chain_enqueue_chain (threads_to_step);
1942 }
1943 };
1944
1945 for (thread_info *tp = threads_to_step; tp != NULL; tp = next)
237fc4c9 1946 {
4d9d9d04
PA
1947 struct execution_control_state ecss;
1948 struct execution_control_state *ecs = &ecss;
8d297bbf 1949 step_over_what step_what;
372316f1 1950 int must_be_in_line;
c2829269 1951
c65d6b55
PA
1952 gdb_assert (!tp->stop_requested);
1953
187b041e 1954 next = thread_step_over_chain_next (threads_to_step, tp);
237fc4c9 1955
187b041e
SM
1956 if (tp->inf->displaced_step_state.unavailable)
1957 {
1958 /* The arch told us to not even try preparing another displaced step
1959 for this inferior. Just leave the thread in THREADS_TO_STEP, it
1960 will get moved to the global chain on scope exit. */
1961 continue;
1962 }
1963
81d92403
SM
1964 if (tp->inf->thread_waiting_for_vfork_done)
1965 {
1966 /* When we stop all threads, handling a vfork, any thread in the step
1967 over chain remains there. A user could also try to continue a
1968 thread stopped at a breakpoint while another thread is waiting for
1969 a vfork-done event. In any case, we don't want to start a step
1970 over right now. */
1971 continue;
1972 }
1973
187b041e
SM
1974 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
1975 while we try to prepare the displaced step, we don't add it back to
1976 the global step over chain. This is to avoid a thread staying in the
1977 step over chain indefinitely if something goes wrong when resuming it
1978 If the error is intermittent and it still needs a step over, it will
1979 get enqueued again when we try to resume it normally. */
1980 thread_step_over_chain_remove (&threads_to_step, tp);
c2829269 1981
372316f1
PA
1982 step_what = thread_still_needs_step_over (tp);
1983 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
1984 || ((step_what & STEP_OVER_BREAKPOINT)
3fc8eb30 1985 && !use_displaced_stepping (tp)));
372316f1
PA
1986
1987 /* We currently stop all threads of all processes to step-over
1988 in-line. If we need to start a new in-line step-over, let
1989 any pending displaced steps finish first. */
187b041e
SM
1990 if (must_be_in_line && displaced_step_in_progress_any_thread ())
1991 {
1992 global_thread_step_over_chain_enqueue (tp);
1993 continue;
1994 }
c2829269 1995
372316f1
PA
1996 if (tp->control.trap_expected
1997 || tp->resumed
1998 || tp->executing)
ad53cd71 1999 {
4d9d9d04
PA
2000 internal_error (__FILE__, __LINE__,
2001 "[%s] has inconsistent state: "
372316f1 2002 "trap_expected=%d, resumed=%d, executing=%d\n",
a068643d 2003 target_pid_to_str (tp->ptid).c_str (),
4d9d9d04 2004 tp->control.trap_expected,
372316f1 2005 tp->resumed,
4d9d9d04 2006 tp->executing);
ad53cd71 2007 }
1c5cfe86 2008
1eb8556f
SM
2009 infrun_debug_printf ("resuming [%s] for step-over",
2010 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04
PA
2011
2012 /* keep_going_pass_signal skips the step-over if the breakpoint
2013 is no longer inserted. In all-stop, we want to keep looking
2014 for a thread that needs a step-over instead of resuming TP,
2015 because we wouldn't be able to resume anything else until the
2016 target stops again. In non-stop, the resume always resumes
2017 only TP, so it's OK to let the thread resume freely. */
fbea99ea 2018 if (!target_is_non_stop_p () && !step_what)
4d9d9d04 2019 continue;
8550d3b3 2020
00431a78 2021 switch_to_thread (tp);
4d9d9d04
PA
2022 reset_ecs (ecs, tp);
2023 keep_going_pass_signal (ecs);
1c5cfe86 2024
4d9d9d04
PA
2025 if (!ecs->wait_some_more)
2026 error (_("Command aborted."));
1c5cfe86 2027
187b041e
SM
2028 /* If the thread's step over could not be initiated because no buffers
2029 were available, it was re-added to the global step over chain. */
2030 if (tp->resumed)
2031 {
2032 infrun_debug_printf ("[%s] was resumed.",
2033 target_pid_to_str (tp->ptid).c_str ());
2034 gdb_assert (!thread_is_in_step_over_chain (tp));
2035 }
2036 else
2037 {
2038 infrun_debug_printf ("[%s] was NOT resumed.",
2039 target_pid_to_str (tp->ptid).c_str ());
2040 gdb_assert (thread_is_in_step_over_chain (tp));
2041 }
372316f1
PA
2042
2043 /* If we started a new in-line step-over, we're done. */
2044 if (step_over_info_valid_p ())
2045 {
2046 gdb_assert (tp->control.trap_expected);
187b041e
SM
2047 started = true;
2048 break;
372316f1
PA
2049 }
2050
fbea99ea 2051 if (!target_is_non_stop_p ())
4d9d9d04
PA
2052 {
2053 /* On all-stop, shouldn't have resumed unless we needed a
2054 step over. */
2055 gdb_assert (tp->control.trap_expected
2056 || tp->step_after_step_resume_breakpoint);
2057
2058 /* With remote targets (at least), in all-stop, we can't
2059 issue any further remote commands until the program stops
2060 again. */
187b041e
SM
2061 started = true;
2062 break;
1c5cfe86 2063 }
c2829269 2064
4d9d9d04
PA
2065 /* Either the thread no longer needed a step-over, or a new
2066 displaced stepping sequence started. Even in the latter
2067 case, continue looking. Maybe we can also start another
2068 displaced step on a thread of other process. */
237fc4c9 2069 }
4d9d9d04 2070
187b041e 2071 return started;
237fc4c9
PA
2072}
2073
5231c1fd
PA
2074/* Update global variables holding ptids to hold NEW_PTID if they were
2075 holding OLD_PTID. */
2076static void
b161a60d
SM
2077infrun_thread_ptid_changed (process_stratum_target *target,
2078 ptid_t old_ptid, ptid_t new_ptid)
5231c1fd 2079{
b161a60d
SM
2080 if (inferior_ptid == old_ptid
2081 && current_inferior ()->process_target () == target)
5231c1fd 2082 inferior_ptid = new_ptid;
5231c1fd
PA
2083}
2084
237fc4c9 2085\f
c906108c 2086
53904c9e
AC
2087static const char schedlock_off[] = "off";
2088static const char schedlock_on[] = "on";
2089static const char schedlock_step[] = "step";
f2665db5 2090static const char schedlock_replay[] = "replay";
40478521 2091static const char *const scheduler_enums[] = {
ef346e04
AC
2092 schedlock_off,
2093 schedlock_on,
2094 schedlock_step,
f2665db5 2095 schedlock_replay,
ef346e04
AC
2096 NULL
2097};
f2665db5 2098static const char *scheduler_mode = schedlock_replay;
920d2a44
AC
2099static void
2100show_scheduler_mode (struct ui_file *file, int from_tty,
2101 struct cmd_list_element *c, const char *value)
2102{
3e43a32a
MS
2103 fprintf_filtered (file,
2104 _("Mode for locking scheduler "
2105 "during execution is \"%s\".\n"),
920d2a44
AC
2106 value);
2107}
c906108c
SS
2108
2109static void
eb4c3f4a 2110set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
c906108c 2111{
8a3ecb79 2112 if (!target_can_lock_scheduler ())
eefe576e
AC
2113 {
2114 scheduler_mode = schedlock_off;
d777bf0d
SM
2115 error (_("Target '%s' cannot support this command."),
2116 target_shortname ());
eefe576e 2117 }
c906108c
SS
2118}
2119
d4db2f36
PA
2120/* True if execution commands resume all threads of all processes by
2121 default; otherwise, resume only threads of the current inferior
2122 process. */
491144b5 2123bool sched_multi = false;
d4db2f36 2124
2facfe5c 2125/* Try to setup for software single stepping over the specified location.
c4464ade 2126 Return true if target_resume() should use hardware single step.
2facfe5c
DD
2127
2128 GDBARCH the current gdbarch.
2129 PC the location to step over. */
2130
c4464ade 2131static bool
2facfe5c
DD
2132maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2133{
c4464ade 2134 bool hw_step = true;
2facfe5c 2135
f02253f1 2136 if (execution_direction == EXEC_FORWARD
93f9a11f
YQ
2137 && gdbarch_software_single_step_p (gdbarch))
2138 hw_step = !insert_single_step_breakpoints (gdbarch);
2139
2facfe5c
DD
2140 return hw_step;
2141}
c906108c 2142
f3263aa4
PA
2143/* See infrun.h. */
2144
09cee04b
PA
2145ptid_t
2146user_visible_resume_ptid (int step)
2147{
f3263aa4 2148 ptid_t resume_ptid;
09cee04b 2149
09cee04b
PA
2150 if (non_stop)
2151 {
2152 /* With non-stop mode on, threads are always handled
2153 individually. */
2154 resume_ptid = inferior_ptid;
2155 }
2156 else if ((scheduler_mode == schedlock_on)
03d46957 2157 || (scheduler_mode == schedlock_step && step))
09cee04b 2158 {
f3263aa4
PA
2159 /* User-settable 'scheduler' mode requires solo thread
2160 resume. */
09cee04b
PA
2161 resume_ptid = inferior_ptid;
2162 }
f2665db5
MM
2163 else if ((scheduler_mode == schedlock_replay)
2164 && target_record_will_replay (minus_one_ptid, execution_direction))
2165 {
2166 /* User-settable 'scheduler' mode requires solo thread resume in replay
2167 mode. */
2168 resume_ptid = inferior_ptid;
2169 }
f3263aa4
PA
2170 else if (!sched_multi && target_supports_multi_process ())
2171 {
2172 /* Resume all threads of the current process (and none of other
2173 processes). */
e99b03dc 2174 resume_ptid = ptid_t (inferior_ptid.pid ());
f3263aa4
PA
2175 }
2176 else
2177 {
2178 /* Resume all threads of all processes. */
2179 resume_ptid = RESUME_ALL;
2180 }
09cee04b
PA
2181
2182 return resume_ptid;
2183}
2184
5b6d1e4f
PA
2185/* See infrun.h. */
2186
2187process_stratum_target *
2188user_visible_resume_target (ptid_t resume_ptid)
2189{
2190 return (resume_ptid == minus_one_ptid && sched_multi
2191 ? NULL
2192 : current_inferior ()->process_target ());
2193}
2194
fbea99ea
PA
2195/* Return a ptid representing the set of threads that we will resume,
2196 in the perspective of the target, assuming run control handling
2197 does not require leaving some threads stopped (e.g., stepping past
2198 breakpoint). USER_STEP indicates whether we're about to start the
2199 target for a stepping command. */
2200
2201static ptid_t
2202internal_resume_ptid (int user_step)
2203{
2204 /* In non-stop, we always control threads individually. Note that
2205 the target may always work in non-stop mode even with "set
2206 non-stop off", in which case user_visible_resume_ptid could
2207 return a wildcard ptid. */
2208 if (target_is_non_stop_p ())
2209 return inferior_ptid;
81d92403
SM
2210
2211 /* The rest of the function assumes non-stop==off and
2212 target-non-stop==off.
2213
2214 If a thread is waiting for a vfork-done event, it means breakpoints are out
2215 for this inferior (well, program space in fact). We don't want to resume
2216 any thread other than the one waiting for vfork done, otherwise these other
2217 threads could miss breakpoints. So if a thread in the resumption set is
2218 waiting for a vfork-done event, resume only that thread.
2219
2220 The resumption set width depends on whether schedule-multiple is on or off.
2221
2222 Note that if the target_resume interface was more flexible, we could be
2223 smarter here when schedule-multiple is on. For example, imagine 3
2224 inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2225 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
2226 target(s) to resume:
2227
2228 - All threads of inferior 1
2229 - Thread 2.1
2230 - Thread 3.2
2231
2232 Since we don't have that flexibility (we can only pass one ptid), just
2233 resume the first thread waiting for a vfork-done event we find (e.g. thread
2234 2.1). */
2235 if (sched_multi)
2236 {
2237 for (inferior *inf : all_non_exited_inferiors ())
2238 if (inf->thread_waiting_for_vfork_done != nullptr)
2239 return inf->thread_waiting_for_vfork_done->ptid;
2240 }
2241 else if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
2242 return current_inferior ()->thread_waiting_for_vfork_done->ptid;
2243
2244 return user_visible_resume_ptid (user_step);
fbea99ea
PA
2245}
2246
64ce06e4
PA
2247/* Wrapper for target_resume, that handles infrun-specific
2248 bookkeeping. */
2249
2250static void
c4464ade 2251do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
64ce06e4
PA
2252{
2253 struct thread_info *tp = inferior_thread ();
2254
c65d6b55
PA
2255 gdb_assert (!tp->stop_requested);
2256
64ce06e4 2257 /* Install inferior's terminal modes. */
223ffa71 2258 target_terminal::inferior ();
64ce06e4
PA
2259
2260 /* Avoid confusing the next resume, if the next stop/resume
2261 happens to apply to another thread. */
2262 tp->suspend.stop_signal = GDB_SIGNAL_0;
2263
8f572e5c
PA
2264 /* Advise target which signals may be handled silently.
2265
2266 If we have removed breakpoints because we are stepping over one
2267 in-line (in any thread), we need to receive all signals to avoid
2268 accidentally skipping a breakpoint during execution of a signal
2269 handler.
2270
2271 Likewise if we're displaced stepping, otherwise a trap for a
2272 breakpoint in a signal handler might be confused with the
7def77a1 2273 displaced step finishing. We don't make the displaced_step_finish
8f572e5c
PA
2274 step distinguish the cases instead, because:
2275
2276 - a backtrace while stopped in the signal handler would show the
2277 scratch pad as frame older than the signal handler, instead of
2278 the real mainline code.
2279
2280 - when the thread is later resumed, the signal handler would
2281 return to the scratch pad area, which would no longer be
2282 valid. */
2283 if (step_over_info_valid_p ()
00431a78 2284 || displaced_step_in_progress (tp->inf))
adc6a863 2285 target_pass_signals ({});
64ce06e4 2286 else
adc6a863 2287 target_pass_signals (signal_pass);
64ce06e4 2288
17543e57
SM
2289 infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
2290 resume_ptid.to_string ().c_str (),
2291 step, gdb_signal_to_symbol_string (sig));
2292
64ce06e4 2293 target_resume (resume_ptid, step, sig);
85ad3aaf 2294
5b6d1e4f
PA
2295 if (target_can_async_p ())
2296 target_async (1);
64ce06e4
PA
2297}
2298
d930703d 2299/* Resume the inferior. SIG is the signal to give the inferior
71d378ae
PA
2300 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2301 call 'resume', which handles exceptions. */
c906108c 2302
71d378ae
PA
2303static void
2304resume_1 (enum gdb_signal sig)
c906108c 2305{
515630c5 2306 struct regcache *regcache = get_current_regcache ();
ac7936df 2307 struct gdbarch *gdbarch = regcache->arch ();
4e1c45ea 2308 struct thread_info *tp = inferior_thread ();
8b86c959 2309 const address_space *aspace = regcache->aspace ();
b0f16a3e 2310 ptid_t resume_ptid;
856e7dd6
PA
2311 /* This represents the user's step vs continue request. When
2312 deciding whether "set scheduler-locking step" applies, it's the
2313 user's intention that counts. */
2314 const int user_step = tp->control.stepping_command;
64ce06e4
PA
2315 /* This represents what we'll actually request the target to do.
2316 This can decay from a step to a continue, if e.g., we need to
2317 implement single-stepping with breakpoints (software
2318 single-step). */
c4464ade 2319 bool step;
c7e8a53c 2320
c65d6b55 2321 gdb_assert (!tp->stop_requested);
c2829269
PA
2322 gdb_assert (!thread_is_in_step_over_chain (tp));
2323
372316f1
PA
2324 if (tp->suspend.waitstatus_pending_p)
2325 {
1eb8556f
SM
2326 infrun_debug_printf
2327 ("thread %s has pending wait "
2328 "status %s (currently_stepping=%d).",
2329 target_pid_to_str (tp->ptid).c_str (),
2330 target_waitstatus_to_string (&tp->suspend.waitstatus).c_str (),
2331 currently_stepping (tp));
372316f1 2332
5b6d1e4f 2333 tp->inf->process_target ()->threads_executing = true;
719546c4 2334 tp->resumed = true;
372316f1
PA
2335
2336 /* FIXME: What should we do if we are supposed to resume this
2337 thread with a signal? Maybe we should maintain a queue of
2338 pending signals to deliver. */
2339 if (sig != GDB_SIGNAL_0)
2340 {
fd7dcb94 2341 warning (_("Couldn't deliver signal %s to %s."),
a068643d
TT
2342 gdb_signal_to_name (sig),
2343 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2344 }
2345
2346 tp->suspend.stop_signal = GDB_SIGNAL_0;
372316f1
PA
2347
2348 if (target_can_async_p ())
9516f85a
AB
2349 {
2350 target_async (1);
2351 /* Tell the event loop we have an event to process. */
2352 mark_async_event_handler (infrun_async_inferior_event_token);
2353 }
372316f1
PA
2354 return;
2355 }
2356
2357 tp->stepped_breakpoint = 0;
2358
6b403daa
PA
2359 /* Depends on stepped_breakpoint. */
2360 step = currently_stepping (tp);
2361
060f2ef8 2362 if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
74609e71 2363 {
48f9886d
PA
2364 /* Don't try to single-step a vfork parent that is waiting for
2365 the child to get out of the shared memory region (by exec'ing
2366 or exiting). This is particularly important on software
2367 single-step archs, as the child process would trip on the
2368 software single step breakpoint inserted for the parent
2369 process. Since the parent will not actually execute any
2370 instruction until the child is out of the shared region (such
2371 are vfork's semantics), it is safe to simply continue it.
2372 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2373 the parent, and tell it to `keep_going', which automatically
2374 re-sets it stepping. */
1eb8556f 2375 infrun_debug_printf ("resume : clear step");
c4464ade 2376 step = false;
74609e71
YQ
2377 }
2378
7ca9b62a
TBA
2379 CORE_ADDR pc = regcache_read_pc (regcache);
2380
1eb8556f
SM
2381 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2382 "current thread [%s] at %s",
2383 step, gdb_signal_to_symbol_string (sig),
2384 tp->control.trap_expected,
2385 target_pid_to_str (inferior_ptid).c_str (),
2386 paddress (gdbarch, pc));
c906108c 2387
c2c6d25f
JM
2388 /* Normally, by the time we reach `resume', the breakpoints are either
2389 removed or inserted, as appropriate. The exception is if we're sitting
2390 at a permanent breakpoint; we need to step over it, but permanent
2391 breakpoints can't be removed. So we have to test for it here. */
6c95b8df 2392 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
6d350bb5 2393 {
af48d08f
PA
2394 if (sig != GDB_SIGNAL_0)
2395 {
2396 /* We have a signal to pass to the inferior. The resume
2397 may, or may not take us to the signal handler. If this
2398 is a step, we'll need to stop in the signal handler, if
2399 there's one, (if the target supports stepping into
2400 handlers), or in the next mainline instruction, if
2401 there's no handler. If this is a continue, we need to be
2402 sure to run the handler with all breakpoints inserted.
2403 In all cases, set a breakpoint at the current address
2404 (where the handler returns to), and once that breakpoint
2405 is hit, resume skipping the permanent breakpoint. If
2406 that breakpoint isn't hit, then we've stepped into the
2407 signal handler (or hit some other event). We'll delete
2408 the step-resume breakpoint then. */
2409
1eb8556f
SM
2410 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2411 "deliver signal first");
af48d08f
PA
2412
2413 clear_step_over_info ();
2414 tp->control.trap_expected = 0;
2415
2416 if (tp->control.step_resume_breakpoint == NULL)
2417 {
2418 /* Set a "high-priority" step-resume, as we don't want
2419 user breakpoints at PC to trigger (again) when this
2420 hits. */
2421 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2422 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2423
2424 tp->step_after_step_resume_breakpoint = step;
2425 }
2426
2427 insert_breakpoints ();
2428 }
2429 else
2430 {
2431 /* There's no signal to pass, we can go ahead and skip the
2432 permanent breakpoint manually. */
1eb8556f 2433 infrun_debug_printf ("skipping permanent breakpoint");
af48d08f
PA
2434 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2435 /* Update pc to reflect the new address from which we will
2436 execute instructions. */
2437 pc = regcache_read_pc (regcache);
2438
2439 if (step)
2440 {
2441 /* We've already advanced the PC, so the stepping part
2442 is done. Now we need to arrange for a trap to be
2443 reported to handle_inferior_event. Set a breakpoint
2444 at the current PC, and run to it. Don't update
2445 prev_pc, because if we end in
44a1ee51
PA
2446 switch_back_to_stepped_thread, we want the "expected
2447 thread advanced also" branch to be taken. IOW, we
2448 don't want this thread to step further from PC
af48d08f 2449 (overstep). */
1ac806b8 2450 gdb_assert (!step_over_info_valid_p ());
af48d08f
PA
2451 insert_single_step_breakpoint (gdbarch, aspace, pc);
2452 insert_breakpoints ();
2453
fbea99ea 2454 resume_ptid = internal_resume_ptid (user_step);
c4464ade 2455 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
719546c4 2456 tp->resumed = true;
af48d08f
PA
2457 return;
2458 }
2459 }
6d350bb5 2460 }
c2c6d25f 2461
c1e36e3e
PA
2462 /* If we have a breakpoint to step over, make sure to do a single
2463 step only. Same if we have software watchpoints. */
2464 if (tp->control.trap_expected || bpstat_should_step ())
2465 tp->control.may_range_step = 0;
2466
7da6a5b9
LM
2467 /* If displaced stepping is enabled, step over breakpoints by executing a
2468 copy of the instruction at a different address.
237fc4c9
PA
2469
2470 We can't use displaced stepping when we have a signal to deliver;
2471 the comments for displaced_step_prepare explain why. The
2472 comments in the handle_inferior event for dealing with 'random
74609e71
YQ
2473 signals' explain what we do instead.
2474
2475 We can't use displaced stepping when we are waiting for vfork_done
2476 event, displaced stepping breaks the vfork child similarly as single
2477 step software breakpoint. */
3fc8eb30
PA
2478 if (tp->control.trap_expected
2479 && use_displaced_stepping (tp)
cb71640d 2480 && !step_over_info_valid_p ()
a493e3e2 2481 && sig == GDB_SIGNAL_0
060f2ef8 2482 && current_inferior ()->thread_waiting_for_vfork_done == nullptr)
237fc4c9 2483 {
bab37966
SM
2484 displaced_step_prepare_status prepare_status
2485 = displaced_step_prepare (tp);
fc1cf338 2486
bab37966 2487 if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
d56b7306 2488 {
1eb8556f 2489 infrun_debug_printf ("Got placed in step-over queue");
4d9d9d04
PA
2490
2491 tp->control.trap_expected = 0;
d56b7306
VP
2492 return;
2493 }
bab37966 2494 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT)
3fc8eb30
PA
2495 {
2496 /* Fallback to stepping over the breakpoint in-line. */
2497
2498 if (target_is_non_stop_p ())
3cebef98 2499 stop_all_threads ("displaced stepping falling back on inline stepping");
3fc8eb30 2500
a01bda52 2501 set_step_over_info (regcache->aspace (),
21edc42f 2502 regcache_read_pc (regcache), 0, tp->global_num);
3fc8eb30
PA
2503
2504 step = maybe_software_singlestep (gdbarch, pc);
2505
2506 insert_breakpoints ();
2507 }
bab37966 2508 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
3fc8eb30 2509 {
3fc8eb30
PA
2510 /* Update pc to reflect the new address from which we will
2511 execute instructions due to displaced stepping. */
00431a78 2512 pc = regcache_read_pc (get_thread_regcache (tp));
ca7781d2 2513
40a53766 2514 step = gdbarch_displaced_step_hw_singlestep (gdbarch);
3fc8eb30 2515 }
bab37966
SM
2516 else
2517 gdb_assert_not_reached (_("Invalid displaced_step_prepare_status "
2518 "value."));
237fc4c9
PA
2519 }
2520
2facfe5c 2521 /* Do we need to do it the hard way, w/temp breakpoints? */
99e40580 2522 else if (step)
2facfe5c 2523 step = maybe_software_singlestep (gdbarch, pc);
c906108c 2524
30852783
UW
2525 /* Currently, our software single-step implementation leads to different
2526 results than hardware single-stepping in one situation: when stepping
2527 into delivering a signal which has an associated signal handler,
2528 hardware single-step will stop at the first instruction of the handler,
2529 while software single-step will simply skip execution of the handler.
2530
2531 For now, this difference in behavior is accepted since there is no
2532 easy way to actually implement single-stepping into a signal handler
2533 without kernel support.
2534
2535 However, there is one scenario where this difference leads to follow-on
2536 problems: if we're stepping off a breakpoint by removing all breakpoints
2537 and then single-stepping. In this case, the software single-step
2538 behavior means that even if there is a *breakpoint* in the signal
2539 handler, GDB still would not stop.
2540
2541 Fortunately, we can at least fix this particular issue. We detect
2542 here the case where we are about to deliver a signal while software
2543 single-stepping with breakpoints removed. In this situation, we
2544 revert the decisions to remove all breakpoints and insert single-
2545 step breakpoints, and instead we install a step-resume breakpoint
2546 at the current address, deliver the signal without stepping, and
2547 once we arrive back at the step-resume breakpoint, actually step
2548 over the breakpoint we originally wanted to step over. */
34b7e8a6 2549 if (thread_has_single_step_breakpoints_set (tp)
6cc83d2a
PA
2550 && sig != GDB_SIGNAL_0
2551 && step_over_info_valid_p ())
30852783
UW
2552 {
2553 /* If we have nested signals or a pending signal is delivered
7da6a5b9 2554 immediately after a handler returns, might already have
30852783
UW
2555 a step-resume breakpoint set on the earlier handler. We cannot
2556 set another step-resume breakpoint; just continue on until the
2557 original breakpoint is hit. */
2558 if (tp->control.step_resume_breakpoint == NULL)
2559 {
2c03e5be 2560 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
30852783
UW
2561 tp->step_after_step_resume_breakpoint = 1;
2562 }
2563
34b7e8a6 2564 delete_single_step_breakpoints (tp);
30852783 2565
31e77af2 2566 clear_step_over_info ();
30852783 2567 tp->control.trap_expected = 0;
31e77af2
PA
2568
2569 insert_breakpoints ();
30852783
UW
2570 }
2571
b0f16a3e
SM
2572 /* If STEP is set, it's a request to use hardware stepping
2573 facilities. But in that case, we should never
2574 use singlestep breakpoint. */
34b7e8a6 2575 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
dfcd3bfb 2576
fbea99ea 2577 /* Decide the set of threads to ask the target to resume. */
1946c4cc 2578 if (tp->control.trap_expected)
b0f16a3e
SM
2579 {
2580 /* We're allowing a thread to run past a breakpoint it has
1946c4cc
YQ
2581 hit, either by single-stepping the thread with the breakpoint
2582 removed, or by displaced stepping, with the breakpoint inserted.
2583 In the former case, we need to single-step only this thread,
2584 and keep others stopped, as they can miss this breakpoint if
2585 allowed to run. That's not really a problem for displaced
2586 stepping, but, we still keep other threads stopped, in case
2587 another thread is also stopped for a breakpoint waiting for
2588 its turn in the displaced stepping queue. */
b0f16a3e
SM
2589 resume_ptid = inferior_ptid;
2590 }
fbea99ea
PA
2591 else
2592 resume_ptid = internal_resume_ptid (user_step);
d4db2f36 2593
7f5ef605
PA
2594 if (execution_direction != EXEC_REVERSE
2595 && step && breakpoint_inserted_here_p (aspace, pc))
b0f16a3e 2596 {
372316f1
PA
2597 /* There are two cases where we currently need to step a
2598 breakpoint instruction when we have a signal to deliver:
2599
2600 - See handle_signal_stop where we handle random signals that
2601 could take out us out of the stepping range. Normally, in
2602 that case we end up continuing (instead of stepping) over the
7f5ef605
PA
2603 signal handler with a breakpoint at PC, but there are cases
2604 where we should _always_ single-step, even if we have a
2605 step-resume breakpoint, like when a software watchpoint is
2606 set. Assuming single-stepping and delivering a signal at the
2607 same time would takes us to the signal handler, then we could
2608 have removed the breakpoint at PC to step over it. However,
2609 some hardware step targets (like e.g., Mac OS) can't step
2610 into signal handlers, and for those, we need to leave the
2611 breakpoint at PC inserted, as otherwise if the handler
2612 recurses and executes PC again, it'll miss the breakpoint.
2613 So we leave the breakpoint inserted anyway, but we need to
2614 record that we tried to step a breakpoint instruction, so
372316f1
PA
2615 that adjust_pc_after_break doesn't end up confused.
2616
dda83cd7 2617 - In non-stop if we insert a breakpoint (e.g., a step-resume)
372316f1
PA
2618 in one thread after another thread that was stepping had been
2619 momentarily paused for a step-over. When we re-resume the
2620 stepping thread, it may be resumed from that address with a
2621 breakpoint that hasn't trapped yet. Seen with
2622 gdb.threads/non-stop-fair-events.exp, on targets that don't
2623 do displaced stepping. */
2624
1eb8556f
SM
2625 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2626 target_pid_to_str (tp->ptid).c_str ());
7f5ef605
PA
2627
2628 tp->stepped_breakpoint = 1;
2629
b0f16a3e
SM
2630 /* Most targets can step a breakpoint instruction, thus
2631 executing it normally. But if this one cannot, just
2632 continue and we will hit it anyway. */
7f5ef605 2633 if (gdbarch_cannot_step_breakpoint (gdbarch))
c4464ade 2634 step = false;
b0f16a3e 2635 }
ef5cf84e 2636
b0f16a3e 2637 if (debug_displaced
cb71640d 2638 && tp->control.trap_expected
3fc8eb30 2639 && use_displaced_stepping (tp)
cb71640d 2640 && !step_over_info_valid_p ())
b0f16a3e 2641 {
00431a78 2642 struct regcache *resume_regcache = get_thread_regcache (tp);
ac7936df 2643 struct gdbarch *resume_gdbarch = resume_regcache->arch ();
b0f16a3e
SM
2644 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2645 gdb_byte buf[4];
2646
b0f16a3e 2647 read_memory (actual_pc, buf, sizeof (buf));
136821d9
SM
2648 displaced_debug_printf ("run %s: %s",
2649 paddress (resume_gdbarch, actual_pc),
2650 displaced_step_dump_bytes
2651 (buf, sizeof (buf)).c_str ());
b0f16a3e 2652 }
237fc4c9 2653
b0f16a3e
SM
2654 if (tp->control.may_range_step)
2655 {
2656 /* If we're resuming a thread with the PC out of the step
2657 range, then we're doing some nested/finer run control
2658 operation, like stepping the thread out of the dynamic
2659 linker or the displaced stepping scratch pad. We
2660 shouldn't have allowed a range step then. */
2661 gdb_assert (pc_in_thread_step_range (pc, tp));
2662 }
c1e36e3e 2663
64ce06e4 2664 do_target_resume (resume_ptid, step, sig);
719546c4 2665 tp->resumed = true;
c906108c 2666}
71d378ae
PA
2667
2668/* Resume the inferior. SIG is the signal to give the inferior
2669 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2670 rolls back state on error. */
2671
aff4e175 2672static void
71d378ae
PA
2673resume (gdb_signal sig)
2674{
a70b8144 2675 try
71d378ae
PA
2676 {
2677 resume_1 (sig);
2678 }
230d2906 2679 catch (const gdb_exception &ex)
71d378ae
PA
2680 {
2681 /* If resuming is being aborted for any reason, delete any
2682 single-step breakpoint resume_1 may have created, to avoid
2683 confusing the following resumption, and to avoid leaving
2684 single-step breakpoints perturbing other threads, in case
2685 we're running in non-stop mode. */
2686 if (inferior_ptid != null_ptid)
2687 delete_single_step_breakpoints (inferior_thread ());
eedc3f4f 2688 throw;
71d378ae 2689 }
71d378ae
PA
2690}
2691
c906108c 2692\f
237fc4c9 2693/* Proceeding. */
c906108c 2694
4c2f2a79
PA
2695/* See infrun.h. */
2696
2697/* Counter that tracks number of user visible stops. This can be used
2698 to tell whether a command has proceeded the inferior past the
2699 current location. This allows e.g., inferior function calls in
2700 breakpoint commands to not interrupt the command list. When the
2701 call finishes successfully, the inferior is standing at the same
2702 breakpoint as if nothing happened (and so we don't call
2703 normal_stop). */
2704static ULONGEST current_stop_id;
2705
2706/* See infrun.h. */
2707
2708ULONGEST
2709get_stop_id (void)
2710{
2711 return current_stop_id;
2712}
2713
2714/* Called when we report a user visible stop. */
2715
2716static void
2717new_stop_id (void)
2718{
2719 current_stop_id++;
2720}
2721
c906108c
SS
2722/* Clear out all variables saying what to do when inferior is continued.
2723 First do this, then set the ones you want, then call `proceed'. */
2724
a7212384
UW
2725static void
2726clear_proceed_status_thread (struct thread_info *tp)
c906108c 2727{
1eb8556f 2728 infrun_debug_printf ("%s", target_pid_to_str (tp->ptid).c_str ());
d6b48e9c 2729
372316f1
PA
2730 /* If we're starting a new sequence, then the previous finished
2731 single-step is no longer relevant. */
2732 if (tp->suspend.waitstatus_pending_p)
2733 {
2734 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
2735 {
1eb8556f
SM
2736 infrun_debug_printf ("pending event of %s was a finished step. "
2737 "Discarding.",
2738 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2739
2740 tp->suspend.waitstatus_pending_p = 0;
2741 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
2742 }
1eb8556f 2743 else
372316f1 2744 {
1eb8556f
SM
2745 infrun_debug_printf
2746 ("thread %s has pending wait status %s (currently_stepping=%d).",
2747 target_pid_to_str (tp->ptid).c_str (),
2748 target_waitstatus_to_string (&tp->suspend.waitstatus).c_str (),
2749 currently_stepping (tp));
372316f1
PA
2750 }
2751 }
2752
70509625
PA
2753 /* If this signal should not be seen by program, give it zero.
2754 Used for debugging signals. */
2755 if (!signal_pass_state (tp->suspend.stop_signal))
2756 tp->suspend.stop_signal = GDB_SIGNAL_0;
2757
46e3ed7f 2758 delete tp->thread_fsm;
243a9253
PA
2759 tp->thread_fsm = NULL;
2760
16c381f0
JK
2761 tp->control.trap_expected = 0;
2762 tp->control.step_range_start = 0;
2763 tp->control.step_range_end = 0;
c1e36e3e 2764 tp->control.may_range_step = 0;
16c381f0
JK
2765 tp->control.step_frame_id = null_frame_id;
2766 tp->control.step_stack_frame_id = null_frame_id;
2767 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
885eeb5b 2768 tp->control.step_start_function = NULL;
a7212384 2769 tp->stop_requested = 0;
4e1c45ea 2770
16c381f0 2771 tp->control.stop_step = 0;
32400beb 2772
16c381f0 2773 tp->control.proceed_to_finish = 0;
414c69f7 2774
856e7dd6 2775 tp->control.stepping_command = 0;
17b2616c 2776
a7212384 2777 /* Discard any remaining commands or status from previous stop. */
16c381f0 2778 bpstat_clear (&tp->control.stop_bpstat);
a7212384 2779}
32400beb 2780
a7212384 2781void
70509625 2782clear_proceed_status (int step)
a7212384 2783{
f2665db5
MM
2784 /* With scheduler-locking replay, stop replaying other threads if we're
2785 not replaying the user-visible resume ptid.
2786
2787 This is a convenience feature to not require the user to explicitly
2788 stop replaying the other threads. We're assuming that the user's
2789 intent is to resume tracing the recorded process. */
2790 if (!non_stop && scheduler_mode == schedlock_replay
2791 && target_record_is_replaying (minus_one_ptid)
2792 && !target_record_will_replay (user_visible_resume_ptid (step),
2793 execution_direction))
2794 target_record_stop_replaying ();
2795
08036331 2796 if (!non_stop && inferior_ptid != null_ptid)
6c95b8df 2797 {
08036331 2798 ptid_t resume_ptid = user_visible_resume_ptid (step);
5b6d1e4f
PA
2799 process_stratum_target *resume_target
2800 = user_visible_resume_target (resume_ptid);
70509625
PA
2801
2802 /* In all-stop mode, delete the per-thread status of all threads
2803 we're about to resume, implicitly and explicitly. */
5b6d1e4f 2804 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
08036331 2805 clear_proceed_status_thread (tp);
6c95b8df
PA
2806 }
2807
d7e15655 2808 if (inferior_ptid != null_ptid)
a7212384
UW
2809 {
2810 struct inferior *inferior;
2811
2812 if (non_stop)
2813 {
6c95b8df
PA
2814 /* If in non-stop mode, only delete the per-thread status of
2815 the current thread. */
a7212384
UW
2816 clear_proceed_status_thread (inferior_thread ());
2817 }
6c95b8df 2818
d6b48e9c 2819 inferior = current_inferior ();
16c381f0 2820 inferior->control.stop_soon = NO_STOP_QUIETLY;
4e1c45ea
PA
2821 }
2822
76727919 2823 gdb::observers::about_to_proceed.notify ();
c906108c
SS
2824}
2825
99619bea
PA
2826/* Returns true if TP is still stopped at a breakpoint that needs
2827 stepping-over in order to make progress. If the breakpoint is gone
2828 meanwhile, we can skip the whole step-over dance. */
ea67f13b 2829
c4464ade 2830static bool
6c4cfb24 2831thread_still_needs_step_over_bp (struct thread_info *tp)
99619bea
PA
2832{
2833 if (tp->stepping_over_breakpoint)
2834 {
00431a78 2835 struct regcache *regcache = get_thread_regcache (tp);
99619bea 2836
a01bda52 2837 if (breakpoint_here_p (regcache->aspace (),
af48d08f
PA
2838 regcache_read_pc (regcache))
2839 == ordinary_breakpoint_here)
c4464ade 2840 return true;
99619bea
PA
2841
2842 tp->stepping_over_breakpoint = 0;
2843 }
2844
c4464ade 2845 return false;
99619bea
PA
2846}
2847
6c4cfb24
PA
2848/* Check whether thread TP still needs to start a step-over in order
2849 to make progress when resumed. Returns an bitwise or of enum
2850 step_over_what bits, indicating what needs to be stepped over. */
2851
8d297bbf 2852static step_over_what
6c4cfb24
PA
2853thread_still_needs_step_over (struct thread_info *tp)
2854{
8d297bbf 2855 step_over_what what = 0;
6c4cfb24
PA
2856
2857 if (thread_still_needs_step_over_bp (tp))
2858 what |= STEP_OVER_BREAKPOINT;
2859
2860 if (tp->stepping_over_watchpoint
9aed480c 2861 && !target_have_steppable_watchpoint ())
6c4cfb24
PA
2862 what |= STEP_OVER_WATCHPOINT;
2863
2864 return what;
2865}
2866
483805cf
PA
2867/* Returns true if scheduler locking applies. STEP indicates whether
2868 we're about to do a step/next-like command to a thread. */
2869
c4464ade 2870static bool
856e7dd6 2871schedlock_applies (struct thread_info *tp)
483805cf
PA
2872{
2873 return (scheduler_mode == schedlock_on
2874 || (scheduler_mode == schedlock_step
f2665db5
MM
2875 && tp->control.stepping_command)
2876 || (scheduler_mode == schedlock_replay
2877 && target_record_will_replay (minus_one_ptid,
2878 execution_direction)));
483805cf
PA
2879}
2880
1192f124
SM
2881/* Set process_stratum_target::COMMIT_RESUMED_STATE in all target
2882 stacks that have threads executing and don't have threads with
2883 pending events. */
5b6d1e4f
PA
2884
2885static void
1192f124
SM
2886maybe_set_commit_resumed_all_targets ()
2887{
b4b1a226
SM
2888 scoped_restore_current_thread restore_thread;
2889
1192f124
SM
2890 for (inferior *inf : all_non_exited_inferiors ())
2891 {
2892 process_stratum_target *proc_target = inf->process_target ();
2893
2894 if (proc_target->commit_resumed_state)
2895 {
2896 /* We already set this in a previous iteration, via another
2897 inferior sharing the process_stratum target. */
2898 continue;
2899 }
2900
2901 /* If the target has no resumed threads, it would be useless to
2902 ask it to commit the resumed threads. */
2903 if (!proc_target->threads_executing)
2904 {
2905 infrun_debug_printf ("not requesting commit-resumed for target "
2906 "%s, no resumed threads",
2907 proc_target->shortname ());
2908 continue;
2909 }
2910
2911 /* As an optimization, if a thread from this target has some
2912 status to report, handle it before requiring the target to
2913 commit its resumed threads: handling the status might lead to
2914 resuming more threads. */
2915 bool has_thread_with_pending_status = false;
2916 for (thread_info *thread : all_non_exited_threads (proc_target))
2917 if (thread->resumed && thread->suspend.waitstatus_pending_p)
2918 {
2919 has_thread_with_pending_status = true;
2920 break;
2921 }
2922
2923 if (has_thread_with_pending_status)
2924 {
2925 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
2926 " thread has a pending waitstatus",
2927 proc_target->shortname ());
2928 continue;
2929 }
2930
b4b1a226
SM
2931 switch_to_inferior_no_thread (inf);
2932
2933 if (target_has_pending_events ())
2934 {
2935 infrun_debug_printf ("not requesting commit-resumed for target %s, "
2936 "target has pending events",
2937 proc_target->shortname ());
2938 continue;
2939 }
2940
1192f124
SM
2941 infrun_debug_printf ("enabling commit-resumed for target %s",
2942 proc_target->shortname ());
2943
2944 proc_target->commit_resumed_state = true;
2945 }
2946}
2947
2948/* See infrun.h. */
2949
2950void
2951maybe_call_commit_resumed_all_targets ()
5b6d1e4f
PA
2952{
2953 scoped_restore_current_thread restore_thread;
2954
1192f124
SM
2955 for (inferior *inf : all_non_exited_inferiors ())
2956 {
2957 process_stratum_target *proc_target = inf->process_target ();
2958
2959 if (!proc_target->commit_resumed_state)
2960 continue;
2961
2962 switch_to_inferior_no_thread (inf);
2963
2964 infrun_debug_printf ("calling commit_resumed for target %s",
2965 proc_target->shortname());
2966
2967 target_commit_resumed ();
2968 }
2969}
2970
2971/* To track nesting of scoped_disable_commit_resumed objects, ensuring
2972 that only the outermost one attempts to re-enable
2973 commit-resumed. */
2974static bool enable_commit_resumed = true;
2975
2976/* See infrun.h. */
2977
2978scoped_disable_commit_resumed::scoped_disable_commit_resumed
2979 (const char *reason)
2980 : m_reason (reason),
2981 m_prev_enable_commit_resumed (enable_commit_resumed)
2982{
2983 infrun_debug_printf ("reason=%s", m_reason);
2984
2985 enable_commit_resumed = false;
5b6d1e4f
PA
2986
2987 for (inferior *inf : all_non_exited_inferiors ())
1192f124
SM
2988 {
2989 process_stratum_target *proc_target = inf->process_target ();
5b6d1e4f 2990
1192f124
SM
2991 if (m_prev_enable_commit_resumed)
2992 {
2993 /* This is the outermost instance: force all
2994 COMMIT_RESUMED_STATE to false. */
2995 proc_target->commit_resumed_state = false;
2996 }
2997 else
2998 {
2999 /* This is not the outermost instance, we expect
3000 COMMIT_RESUMED_STATE to have been cleared by the
3001 outermost instance. */
3002 gdb_assert (!proc_target->commit_resumed_state);
3003 }
3004 }
3005}
3006
3007/* See infrun.h. */
3008
3009void
3010scoped_disable_commit_resumed::reset ()
3011{
3012 if (m_reset)
3013 return;
3014 m_reset = true;
3015
3016 infrun_debug_printf ("reason=%s", m_reason);
3017
3018 gdb_assert (!enable_commit_resumed);
3019
3020 enable_commit_resumed = m_prev_enable_commit_resumed;
3021
3022 if (m_prev_enable_commit_resumed)
5b6d1e4f 3023 {
1192f124
SM
3024 /* This is the outermost instance, re-enable
3025 COMMIT_RESUMED_STATE on the targets where it's possible. */
3026 maybe_set_commit_resumed_all_targets ();
3027 }
3028 else
3029 {
3030 /* This is not the outermost instance, we expect
3031 COMMIT_RESUMED_STATE to still be false. */
3032 for (inferior *inf : all_non_exited_inferiors ())
3033 {
3034 process_stratum_target *proc_target = inf->process_target ();
3035 gdb_assert (!proc_target->commit_resumed_state);
3036 }
3037 }
3038}
3039
3040/* See infrun.h. */
3041
3042scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
3043{
3044 reset ();
3045}
3046
3047/* See infrun.h. */
3048
3049void
3050scoped_disable_commit_resumed::reset_and_commit ()
3051{
3052 reset ();
3053 maybe_call_commit_resumed_all_targets ();
3054}
3055
3056/* See infrun.h. */
3057
3058scoped_enable_commit_resumed::scoped_enable_commit_resumed
3059 (const char *reason)
3060 : m_reason (reason),
3061 m_prev_enable_commit_resumed (enable_commit_resumed)
3062{
3063 infrun_debug_printf ("reason=%s", m_reason);
3064
3065 if (!enable_commit_resumed)
3066 {
3067 enable_commit_resumed = true;
3068
3069 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
3070 possible. */
3071 maybe_set_commit_resumed_all_targets ();
3072
3073 maybe_call_commit_resumed_all_targets ();
3074 }
3075}
3076
3077/* See infrun.h. */
3078
3079scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
3080{
3081 infrun_debug_printf ("reason=%s", m_reason);
3082
3083 gdb_assert (enable_commit_resumed);
3084
3085 enable_commit_resumed = m_prev_enable_commit_resumed;
3086
3087 if (!enable_commit_resumed)
3088 {
3089 /* Force all COMMIT_RESUMED_STATE back to false. */
3090 for (inferior *inf : all_non_exited_inferiors ())
3091 {
3092 process_stratum_target *proc_target = inf->process_target ();
3093 proc_target->commit_resumed_state = false;
3094 }
5b6d1e4f
PA
3095 }
3096}
3097
2f4fcf00
PA
3098/* Check that all the targets we're about to resume are in non-stop
3099 mode. Ideally, we'd only care whether all targets support
3100 target-async, but we're not there yet. E.g., stop_all_threads
3101 doesn't know how to handle all-stop targets. Also, the remote
3102 protocol in all-stop mode is synchronous, irrespective of
3103 target-async, which means that things like a breakpoint re-set
3104 triggered by one target would try to read memory from all targets
3105 and fail. */
3106
3107static void
3108check_multi_target_resumption (process_stratum_target *resume_target)
3109{
3110 if (!non_stop && resume_target == nullptr)
3111 {
3112 scoped_restore_current_thread restore_thread;
3113
3114 /* This is used to track whether we're resuming more than one
3115 target. */
3116 process_stratum_target *first_connection = nullptr;
3117
3118 /* The first inferior we see with a target that does not work in
3119 always-non-stop mode. */
3120 inferior *first_not_non_stop = nullptr;
3121
f058c521 3122 for (inferior *inf : all_non_exited_inferiors ())
2f4fcf00
PA
3123 {
3124 switch_to_inferior_no_thread (inf);
3125
55f6301a 3126 if (!target_has_execution ())
2f4fcf00
PA
3127 continue;
3128
3129 process_stratum_target *proc_target
3130 = current_inferior ()->process_target();
3131
3132 if (!target_is_non_stop_p ())
3133 first_not_non_stop = inf;
3134
3135 if (first_connection == nullptr)
3136 first_connection = proc_target;
3137 else if (first_connection != proc_target
3138 && first_not_non_stop != nullptr)
3139 {
3140 switch_to_inferior_no_thread (first_not_non_stop);
3141
3142 proc_target = current_inferior ()->process_target();
3143
3144 error (_("Connection %d (%s) does not support "
3145 "multi-target resumption."),
3146 proc_target->connection_number,
3147 make_target_connection_string (proc_target).c_str ());
3148 }
3149 }
3150 }
3151}
3152
c906108c
SS
3153/* Basic routine for continuing the program in various fashions.
3154
3155 ADDR is the address to resume at, or -1 for resume where stopped.
aff4e175
AB
3156 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3157 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
c906108c
SS
3158
3159 You should call clear_proceed_status before calling proceed. */
3160
3161void
64ce06e4 3162proceed (CORE_ADDR addr, enum gdb_signal siggnal)
c906108c 3163{
3ec3145c
SM
3164 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
3165
e58b0e63
PA
3166 struct regcache *regcache;
3167 struct gdbarch *gdbarch;
e58b0e63 3168 CORE_ADDR pc;
4d9d9d04
PA
3169 struct execution_control_state ecss;
3170 struct execution_control_state *ecs = &ecss;
c4464ade 3171 bool started;
c906108c 3172
e58b0e63
PA
3173 /* If we're stopped at a fork/vfork, follow the branch set by the
3174 "set follow-fork-mode" command; otherwise, we'll just proceed
3175 resuming the current thread. */
3176 if (!follow_fork ())
3177 {
3178 /* The target for some reason decided not to resume. */
3179 normal_stop ();
f148b27e 3180 if (target_can_async_p ())
b1a35af2 3181 inferior_event_handler (INF_EXEC_COMPLETE);
e58b0e63
PA
3182 return;
3183 }
3184
842951eb
PA
3185 /* We'll update this if & when we switch to a new thread. */
3186 previous_inferior_ptid = inferior_ptid;
3187
e58b0e63 3188 regcache = get_current_regcache ();
ac7936df 3189 gdbarch = regcache->arch ();
8b86c959
YQ
3190 const address_space *aspace = regcache->aspace ();
3191
fc75c28b
TBA
3192 pc = regcache_read_pc_protected (regcache);
3193
08036331 3194 thread_info *cur_thr = inferior_thread ();
e58b0e63 3195
99619bea 3196 /* Fill in with reasonable starting values. */
08036331 3197 init_thread_stepping_state (cur_thr);
99619bea 3198
08036331 3199 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
c2829269 3200
5b6d1e4f
PA
3201 ptid_t resume_ptid
3202 = user_visible_resume_ptid (cur_thr->control.stepping_command);
3203 process_stratum_target *resume_target
3204 = user_visible_resume_target (resume_ptid);
3205
2f4fcf00
PA
3206 check_multi_target_resumption (resume_target);
3207
2acceee2 3208 if (addr == (CORE_ADDR) -1)
c906108c 3209 {
08036331 3210 if (pc == cur_thr->suspend.stop_pc
af48d08f 3211 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
b2175913 3212 && execution_direction != EXEC_REVERSE)
3352ef37
AC
3213 /* There is a breakpoint at the address we will resume at,
3214 step one instruction before inserting breakpoints so that
3215 we do not stop right away (and report a second hit at this
b2175913
MS
3216 breakpoint).
3217
3218 Note, we don't do this in reverse, because we won't
3219 actually be executing the breakpoint insn anyway.
3220 We'll be (un-)executing the previous instruction. */
08036331 3221 cur_thr->stepping_over_breakpoint = 1;
515630c5
UW
3222 else if (gdbarch_single_step_through_delay_p (gdbarch)
3223 && gdbarch_single_step_through_delay (gdbarch,
3224 get_current_frame ()))
3352ef37
AC
3225 /* We stepped onto an instruction that needs to be stepped
3226 again before re-inserting the breakpoint, do so. */
08036331 3227 cur_thr->stepping_over_breakpoint = 1;
c906108c
SS
3228 }
3229 else
3230 {
515630c5 3231 regcache_write_pc (regcache, addr);
c906108c
SS
3232 }
3233
70509625 3234 if (siggnal != GDB_SIGNAL_DEFAULT)
08036331 3235 cur_thr->suspend.stop_signal = siggnal;
70509625 3236
4d9d9d04
PA
3237 /* If an exception is thrown from this point on, make sure to
3238 propagate GDB's knowledge of the executing state to the
3239 frontend/user running state. */
5b6d1e4f 3240 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
4d9d9d04
PA
3241
3242 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3243 threads (e.g., we might need to set threads stepping over
3244 breakpoints first), from the user/frontend's point of view, all
3245 threads in RESUME_PTID are now running. Unless we're calling an
3246 inferior function, as in that case we pretend the inferior
3247 doesn't run at all. */
08036331 3248 if (!cur_thr->control.in_infcall)
719546c4 3249 set_running (resume_target, resume_ptid, true);
17b2616c 3250
1eb8556f
SM
3251 infrun_debug_printf ("addr=%s, signal=%s", paddress (gdbarch, addr),
3252 gdb_signal_to_symbol_string (siggnal));
527159b7 3253
4d9d9d04
PA
3254 annotate_starting ();
3255
3256 /* Make sure that output from GDB appears before output from the
3257 inferior. */
3258 gdb_flush (gdb_stdout);
3259
d930703d
PA
3260 /* Since we've marked the inferior running, give it the terminal. A
3261 QUIT/Ctrl-C from here on is forwarded to the target (which can
3262 still detect attempts to unblock a stuck connection with repeated
3263 Ctrl-C from within target_pass_ctrlc). */
3264 target_terminal::inferior ();
3265
4d9d9d04
PA
3266 /* In a multi-threaded task we may select another thread and
3267 then continue or step.
3268
3269 But if a thread that we're resuming had stopped at a breakpoint,
3270 it will immediately cause another breakpoint stop without any
3271 execution (i.e. it will report a breakpoint hit incorrectly). So
3272 we must step over it first.
3273
3274 Look for threads other than the current (TP) that reported a
3275 breakpoint hit and haven't been resumed yet since. */
3276
3277 /* If scheduler locking applies, we can avoid iterating over all
3278 threads. */
08036331 3279 if (!non_stop && !schedlock_applies (cur_thr))
94cc34af 3280 {
5b6d1e4f
PA
3281 for (thread_info *tp : all_non_exited_threads (resume_target,
3282 resume_ptid))
08036331 3283 {
f3f8ece4
PA
3284 switch_to_thread_no_regs (tp);
3285
4d9d9d04
PA
3286 /* Ignore the current thread here. It's handled
3287 afterwards. */
08036331 3288 if (tp == cur_thr)
4d9d9d04 3289 continue;
c906108c 3290
4d9d9d04
PA
3291 if (!thread_still_needs_step_over (tp))
3292 continue;
3293
3294 gdb_assert (!thread_is_in_step_over_chain (tp));
c906108c 3295
1eb8556f
SM
3296 infrun_debug_printf ("need to step-over [%s] first",
3297 target_pid_to_str (tp->ptid).c_str ());
99619bea 3298
28d5518b 3299 global_thread_step_over_chain_enqueue (tp);
2adfaa28 3300 }
f3f8ece4
PA
3301
3302 switch_to_thread (cur_thr);
30852783
UW
3303 }
3304
4d9d9d04
PA
3305 /* Enqueue the current thread last, so that we move all other
3306 threads over their breakpoints first. */
08036331 3307 if (cur_thr->stepping_over_breakpoint)
28d5518b 3308 global_thread_step_over_chain_enqueue (cur_thr);
30852783 3309
4d9d9d04
PA
3310 /* If the thread isn't started, we'll still need to set its prev_pc,
3311 so that switch_back_to_stepped_thread knows the thread hasn't
3312 advanced. Must do this before resuming any thread, as in
3313 all-stop/remote, once we resume we can't send any other packet
3314 until the target stops again. */
fc75c28b 3315 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
99619bea 3316
a9bc57b9 3317 {
1192f124 3318 scoped_disable_commit_resumed disable_commit_resumed ("proceeding");
85ad3aaf 3319
a9bc57b9 3320 started = start_step_over ();
c906108c 3321
a9bc57b9
TT
3322 if (step_over_info_valid_p ())
3323 {
3324 /* Either this thread started a new in-line step over, or some
3325 other thread was already doing one. In either case, don't
3326 resume anything else until the step-over is finished. */
3327 }
3328 else if (started && !target_is_non_stop_p ())
3329 {
3330 /* A new displaced stepping sequence was started. In all-stop,
3331 we can't talk to the target anymore until it next stops. */
3332 }
3333 else if (!non_stop && target_is_non_stop_p ())
3334 {
3ec3145c
SM
3335 INFRUN_SCOPED_DEBUG_START_END
3336 ("resuming threads, all-stop-on-top-of-non-stop");
3337
a9bc57b9
TT
3338 /* In all-stop, but the target is always in non-stop mode.
3339 Start all other threads that are implicitly resumed too. */
5b6d1e4f
PA
3340 for (thread_info *tp : all_non_exited_threads (resume_target,
3341 resume_ptid))
3342 {
3343 switch_to_thread_no_regs (tp);
3344
f9fac3c8
SM
3345 if (!tp->inf->has_execution ())
3346 {
1eb8556f
SM
3347 infrun_debug_printf ("[%s] target has no execution",
3348 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3349 continue;
3350 }
f3f8ece4 3351
f9fac3c8
SM
3352 if (tp->resumed)
3353 {
1eb8556f
SM
3354 infrun_debug_printf ("[%s] resumed",
3355 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3356 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
3357 continue;
3358 }
fbea99ea 3359
f9fac3c8
SM
3360 if (thread_is_in_step_over_chain (tp))
3361 {
1eb8556f
SM
3362 infrun_debug_printf ("[%s] needs step-over",
3363 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3364 continue;
3365 }
fbea99ea 3366
81d92403
SM
3367 /* If a thread of that inferior is waiting for a vfork-done
3368 (for a detached vfork child to exec or exit), breakpoints are
3369 removed. We must not resume any thread of that inferior, other
3370 than the one waiting for the vfork-done. */
3371 if (tp->inf->thread_waiting_for_vfork_done != nullptr
3372 && tp != tp->inf->thread_waiting_for_vfork_done)
3373 {
3374 infrun_debug_printf ("[%s] another thread of this inferior is "
3375 "waiting for vfork-done",
3376 tp->ptid.to_string ().c_str ());
3377 continue;
3378 }
3379
1eb8556f 3380 infrun_debug_printf ("resuming %s",
dda83cd7 3381 target_pid_to_str (tp->ptid).c_str ());
fbea99ea 3382
f9fac3c8
SM
3383 reset_ecs (ecs, tp);
3384 switch_to_thread (tp);
3385 keep_going_pass_signal (ecs);
3386 if (!ecs->wait_some_more)
3387 error (_("Command aborted."));
3388 }
a9bc57b9 3389 }
81d92403
SM
3390 else if (!cur_thr->resumed
3391 && !thread_is_in_step_over_chain (cur_thr)
3392 /* In non-stop, forbid resume a thread if some other thread of
3393 that inferior is waiting for a vfork-done event (this means
3394 breakpoints are out for this inferior). */
3395 && !(non_stop && cur_thr->inf->thread_waiting_for_vfork_done))
a9bc57b9
TT
3396 {
3397 /* The thread wasn't started, and isn't queued, run it now. */
08036331
PA
3398 reset_ecs (ecs, cur_thr);
3399 switch_to_thread (cur_thr);
a9bc57b9
TT
3400 keep_going_pass_signal (ecs);
3401 if (!ecs->wait_some_more)
3402 error (_("Command aborted."));
3403 }
c906108c 3404
1192f124
SM
3405 disable_commit_resumed.reset_and_commit ();
3406 }
85ad3aaf 3407
731f534f 3408 finish_state.release ();
c906108c 3409
873657b9
PA
3410 /* If we've switched threads above, switch back to the previously
3411 current thread. We don't want the user to see a different
3412 selected thread. */
3413 switch_to_thread (cur_thr);
3414
0b333c5e
PA
3415 /* Tell the event loop to wait for it to stop. If the target
3416 supports asynchronous execution, it'll do this from within
3417 target_resume. */
362646f5 3418 if (!target_can_async_p ())
0b333c5e 3419 mark_async_event_handler (infrun_async_inferior_event_token);
c906108c 3420}
c906108c
SS
3421\f
3422
3423/* Start remote-debugging of a machine over a serial link. */
96baa820 3424
c906108c 3425void
8621d6a9 3426start_remote (int from_tty)
c906108c 3427{
5b6d1e4f
PA
3428 inferior *inf = current_inferior ();
3429 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
43ff13b4 3430
1777feb0 3431 /* Always go on waiting for the target, regardless of the mode. */
6426a772 3432 /* FIXME: cagney/1999-09-23: At present it isn't possible to
7e73cedf 3433 indicate to wait_for_inferior that a target should timeout if
6426a772
JM
3434 nothing is returned (instead of just blocking). Because of this,
3435 targets expecting an immediate response need to, internally, set
3436 things up so that the target_wait() is forced to eventually
1777feb0 3437 timeout. */
6426a772
JM
3438 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3439 differentiate to its caller what the state of the target is after
3440 the initial open has been performed. Here we're assuming that
3441 the target has stopped. It should be possible to eventually have
3442 target_open() return to the caller an indication that the target
3443 is currently running and GDB state should be set to the same as
1777feb0 3444 for an async run. */
5b6d1e4f 3445 wait_for_inferior (inf);
8621d6a9
DJ
3446
3447 /* Now that the inferior has stopped, do any bookkeeping like
3448 loading shared libraries. We want to do this before normal_stop,
3449 so that the displayed frame is up to date. */
a7aba266 3450 post_create_inferior (from_tty);
8621d6a9 3451
6426a772 3452 normal_stop ();
c906108c
SS
3453}
3454
3455/* Initialize static vars when a new inferior begins. */
3456
3457void
96baa820 3458init_wait_for_inferior (void)
c906108c
SS
3459{
3460 /* These are meaningless until the first time through wait_for_inferior. */
c906108c 3461
c906108c
SS
3462 breakpoint_init_inferior (inf_starting);
3463
70509625 3464 clear_proceed_status (0);
9f976b41 3465
ab1ddbcf 3466 nullify_last_target_wait_ptid ();
237fc4c9 3467
842951eb 3468 previous_inferior_ptid = inferior_ptid;
c906108c 3469}
237fc4c9 3470
c906108c 3471\f
488f131b 3472
ec9499be 3473static void handle_inferior_event (struct execution_control_state *ecs);
cd0fc7c3 3474
568d6575
UW
3475static void handle_step_into_function (struct gdbarch *gdbarch,
3476 struct execution_control_state *ecs);
3477static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3478 struct execution_control_state *ecs);
4f5d7f63 3479static void handle_signal_stop (struct execution_control_state *ecs);
186c406b 3480static void check_exception_resume (struct execution_control_state *,
28106bc2 3481 struct frame_info *);
611c83ae 3482
bdc36728 3483static void end_stepping_range (struct execution_control_state *ecs);
22bcd14b 3484static void stop_waiting (struct execution_control_state *ecs);
d4f3574e 3485static void keep_going (struct execution_control_state *ecs);
94c57d6a 3486static void process_event_stop_test (struct execution_control_state *ecs);
c4464ade 3487static bool switch_back_to_stepped_thread (struct execution_control_state *ecs);
104c1213 3488
252fbfc8
PA
3489/* This function is attached as a "thread_stop_requested" observer.
3490 Cleanup local state that assumed the PTID was to be resumed, and
3491 report the stop to the frontend. */
3492
2c0b251b 3493static void
252fbfc8
PA
3494infrun_thread_stop_requested (ptid_t ptid)
3495{
5b6d1e4f
PA
3496 process_stratum_target *curr_target = current_inferior ()->process_target ();
3497
c65d6b55
PA
3498 /* PTID was requested to stop. If the thread was already stopped,
3499 but the user/frontend doesn't know about that yet (e.g., the
3500 thread had been temporarily paused for some step-over), set up
3501 for reporting the stop now. */
5b6d1e4f 3502 for (thread_info *tp : all_threads (curr_target, ptid))
08036331
PA
3503 {
3504 if (tp->state != THREAD_RUNNING)
3505 continue;
3506 if (tp->executing)
3507 continue;
c65d6b55 3508
08036331
PA
3509 /* Remove matching threads from the step-over queue, so
3510 start_step_over doesn't try to resume them
3511 automatically. */
3512 if (thread_is_in_step_over_chain (tp))
28d5518b 3513 global_thread_step_over_chain_remove (tp);
c65d6b55 3514
08036331
PA
3515 /* If the thread is stopped, but the user/frontend doesn't
3516 know about that yet, queue a pending event, as if the
3517 thread had just stopped now. Unless the thread already had
3518 a pending event. */
3519 if (!tp->suspend.waitstatus_pending_p)
3520 {
3521 tp->suspend.waitstatus_pending_p = 1;
3522 tp->suspend.waitstatus.kind = TARGET_WAITKIND_STOPPED;
3523 tp->suspend.waitstatus.value.sig = GDB_SIGNAL_0;
3524 }
c65d6b55 3525
08036331
PA
3526 /* Clear the inline-frame state, since we're re-processing the
3527 stop. */
5b6d1e4f 3528 clear_inline_frame_state (tp);
c65d6b55 3529
08036331
PA
3530 /* If this thread was paused because some other thread was
3531 doing an inline-step over, let that finish first. Once
3532 that happens, we'll restart all threads and consume pending
3533 stop events then. */
3534 if (step_over_info_valid_p ())
3535 continue;
3536
3537 /* Otherwise we can process the (new) pending event now. Set
3538 it so this pending event is considered by
3539 do_target_wait. */
719546c4 3540 tp->resumed = true;
08036331 3541 }
252fbfc8
PA
3542}
3543
a07daef3
PA
3544static void
3545infrun_thread_thread_exit (struct thread_info *tp, int silent)
3546{
5b6d1e4f
PA
3547 if (target_last_proc_target == tp->inf->process_target ()
3548 && target_last_wait_ptid == tp->ptid)
a07daef3
PA
3549 nullify_last_target_wait_ptid ();
3550}
3551
0cbcdb96
PA
3552/* Delete the step resume, single-step and longjmp/exception resume
3553 breakpoints of TP. */
4e1c45ea 3554
0cbcdb96
PA
3555static void
3556delete_thread_infrun_breakpoints (struct thread_info *tp)
4e1c45ea 3557{
0cbcdb96
PA
3558 delete_step_resume_breakpoint (tp);
3559 delete_exception_resume_breakpoint (tp);
34b7e8a6 3560 delete_single_step_breakpoints (tp);
4e1c45ea
PA
3561}
3562
0cbcdb96
PA
3563/* If the target still has execution, call FUNC for each thread that
3564 just stopped. In all-stop, that's all the non-exited threads; in
3565 non-stop, that's the current thread, only. */
3566
3567typedef void (*for_each_just_stopped_thread_callback_func)
3568 (struct thread_info *tp);
4e1c45ea
PA
3569
3570static void
0cbcdb96 3571for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
4e1c45ea 3572{
55f6301a 3573 if (!target_has_execution () || inferior_ptid == null_ptid)
4e1c45ea
PA
3574 return;
3575
fbea99ea 3576 if (target_is_non_stop_p ())
4e1c45ea 3577 {
0cbcdb96
PA
3578 /* If in non-stop mode, only the current thread stopped. */
3579 func (inferior_thread ());
4e1c45ea
PA
3580 }
3581 else
0cbcdb96 3582 {
0cbcdb96 3583 /* In all-stop mode, all threads have stopped. */
08036331
PA
3584 for (thread_info *tp : all_non_exited_threads ())
3585 func (tp);
0cbcdb96
PA
3586 }
3587}
3588
3589/* Delete the step resume and longjmp/exception resume breakpoints of
3590 the threads that just stopped. */
3591
3592static void
3593delete_just_stopped_threads_infrun_breakpoints (void)
3594{
3595 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
34b7e8a6
PA
3596}
3597
3598/* Delete the single-step breakpoints of the threads that just
3599 stopped. */
7c16b83e 3600
34b7e8a6
PA
3601static void
3602delete_just_stopped_threads_single_step_breakpoints (void)
3603{
3604 for_each_just_stopped_thread (delete_single_step_breakpoints);
4e1c45ea
PA
3605}
3606
221e1a37 3607/* See infrun.h. */
223698f8 3608
221e1a37 3609void
223698f8
DE
3610print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3611 const struct target_waitstatus *ws)
3612{
e71daf80
SM
3613 infrun_debug_printf ("target_wait (%d.%ld.%ld [%s], status) =",
3614 waiton_ptid.pid (),
3615 waiton_ptid.lwp (),
3616 waiton_ptid.tid (),
3617 target_pid_to_str (waiton_ptid).c_str ());
3618 infrun_debug_printf (" %d.%ld.%ld [%s],",
3619 result_ptid.pid (),
3620 result_ptid.lwp (),
3621 result_ptid.tid (),
3622 target_pid_to_str (result_ptid).c_str ());
3623 infrun_debug_printf (" %s", target_waitstatus_to_string (ws).c_str ());
223698f8
DE
3624}
3625
372316f1
PA
3626/* Select a thread at random, out of those which are resumed and have
3627 had events. */
3628
3629static struct thread_info *
5b6d1e4f 3630random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
372316f1 3631{
372316f1 3632 int num_events = 0;
08036331 3633
5b6d1e4f 3634 auto has_event = [&] (thread_info *tp)
08036331 3635 {
5b6d1e4f
PA
3636 return (tp->ptid.matches (waiton_ptid)
3637 && tp->resumed
08036331
PA
3638 && tp->suspend.waitstatus_pending_p);
3639 };
372316f1
PA
3640
3641 /* First see how many events we have. Count only resumed threads
3642 that have an event pending. */
5b6d1e4f 3643 for (thread_info *tp : inf->non_exited_threads ())
08036331 3644 if (has_event (tp))
372316f1
PA
3645 num_events++;
3646
3647 if (num_events == 0)
3648 return NULL;
3649
3650 /* Now randomly pick a thread out of those that have had events. */
08036331
PA
3651 int random_selector = (int) ((num_events * (double) rand ())
3652 / (RAND_MAX + 1.0));
372316f1 3653
1eb8556f
SM
3654 if (num_events > 1)
3655 infrun_debug_printf ("Found %d events, selecting #%d",
3656 num_events, random_selector);
372316f1
PA
3657
3658 /* Select the Nth thread that has had an event. */
5b6d1e4f 3659 for (thread_info *tp : inf->non_exited_threads ())
08036331 3660 if (has_event (tp))
372316f1 3661 if (random_selector-- == 0)
08036331 3662 return tp;
372316f1 3663
08036331 3664 gdb_assert_not_reached ("event thread not found");
372316f1
PA
3665}
3666
3667/* Wrapper for target_wait that first checks whether threads have
3668 pending statuses to report before actually asking the target for
5b6d1e4f
PA
3669 more events. INF is the inferior we're using to call target_wait
3670 on. */
372316f1
PA
3671
3672static ptid_t
5b6d1e4f 3673do_target_wait_1 (inferior *inf, ptid_t ptid,
b60cea74 3674 target_waitstatus *status, target_wait_flags options)
372316f1
PA
3675{
3676 ptid_t event_ptid;
3677 struct thread_info *tp;
3678
24ed6739
AB
3679 /* We know that we are looking for an event in the target of inferior
3680 INF, but we don't know which thread the event might come from. As
3681 such we want to make sure that INFERIOR_PTID is reset so that none of
3682 the wait code relies on it - doing so is always a mistake. */
3683 switch_to_inferior_no_thread (inf);
3684
372316f1
PA
3685 /* First check if there is a resumed thread with a wait status
3686 pending. */
d7e15655 3687 if (ptid == minus_one_ptid || ptid.is_pid ())
372316f1 3688 {
5b6d1e4f 3689 tp = random_pending_event_thread (inf, ptid);
372316f1
PA
3690 }
3691 else
3692 {
1eb8556f
SM
3693 infrun_debug_printf ("Waiting for specific thread %s.",
3694 target_pid_to_str (ptid).c_str ());
372316f1
PA
3695
3696 /* We have a specific thread to check. */
5b6d1e4f 3697 tp = find_thread_ptid (inf, ptid);
372316f1
PA
3698 gdb_assert (tp != NULL);
3699 if (!tp->suspend.waitstatus_pending_p)
3700 tp = NULL;
3701 }
3702
3703 if (tp != NULL
3704 && (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3705 || tp->suspend.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
3706 {
00431a78 3707 struct regcache *regcache = get_thread_regcache (tp);
ac7936df 3708 struct gdbarch *gdbarch = regcache->arch ();
372316f1
PA
3709 CORE_ADDR pc;
3710 int discard = 0;
3711
3712 pc = regcache_read_pc (regcache);
3713
3714 if (pc != tp->suspend.stop_pc)
3715 {
1eb8556f
SM
3716 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
3717 target_pid_to_str (tp->ptid).c_str (),
3718 paddress (gdbarch, tp->suspend.stop_pc),
3719 paddress (gdbarch, pc));
372316f1
PA
3720 discard = 1;
3721 }
a01bda52 3722 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
372316f1 3723 {
1eb8556f
SM
3724 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
3725 target_pid_to_str (tp->ptid).c_str (),
3726 paddress (gdbarch, pc));
372316f1
PA
3727
3728 discard = 1;
3729 }
3730
3731 if (discard)
3732 {
1eb8556f
SM
3733 infrun_debug_printf ("pending event of %s cancelled.",
3734 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3735
3736 tp->suspend.waitstatus.kind = TARGET_WAITKIND_SPURIOUS;
3737 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3738 }
3739 }
3740
3741 if (tp != NULL)
3742 {
1eb8556f
SM
3743 infrun_debug_printf ("Using pending wait status %s for %s.",
3744 target_waitstatus_to_string
3745 (&tp->suspend.waitstatus).c_str (),
3746 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3747
3748 /* Now that we've selected our final event LWP, un-adjust its PC
3749 if it was a software breakpoint (and the target doesn't
3750 always adjust the PC itself). */
3751 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3752 && !target_supports_stopped_by_sw_breakpoint ())
3753 {
3754 struct regcache *regcache;
3755 struct gdbarch *gdbarch;
3756 int decr_pc;
3757
00431a78 3758 regcache = get_thread_regcache (tp);
ac7936df 3759 gdbarch = regcache->arch ();
372316f1
PA
3760
3761 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3762 if (decr_pc != 0)
3763 {
3764 CORE_ADDR pc;
3765
3766 pc = regcache_read_pc (regcache);
3767 regcache_write_pc (regcache, pc + decr_pc);
3768 }
3769 }
3770
3771 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3772 *status = tp->suspend.waitstatus;
3773 tp->suspend.waitstatus_pending_p = 0;
3774
3775 /* Wake up the event loop again, until all pending events are
3776 processed. */
3777 if (target_is_async_p ())
3778 mark_async_event_handler (infrun_async_inferior_event_token);
3779 return tp->ptid;
3780 }
3781
3782 /* But if we don't find one, we'll have to wait. */
3783
d3a07122
SM
3784 /* We can't ask a non-async target to do a non-blocking wait, so this will be
3785 a blocking wait. */
3786 if (!target_can_async_p ())
3787 options &= ~TARGET_WNOHANG;
3788
372316f1
PA
3789 if (deprecated_target_wait_hook)
3790 event_ptid = deprecated_target_wait_hook (ptid, status, options);
3791 else
3792 event_ptid = target_wait (ptid, status, options);
3793
3794 return event_ptid;
3795}
3796
5b6d1e4f
PA
3797/* Wrapper for target_wait that first checks whether threads have
3798 pending statuses to report before actually asking the target for
b3e3a4c1 3799 more events. Polls for events from all inferiors/targets. */
5b6d1e4f
PA
3800
3801static bool
ac0d67ed 3802do_target_wait (execution_control_state *ecs, target_wait_flags options)
5b6d1e4f
PA
3803{
3804 int num_inferiors = 0;
3805 int random_selector;
3806
b3e3a4c1
SM
3807 /* For fairness, we pick the first inferior/target to poll at random
3808 out of all inferiors that may report events, and then continue
3809 polling the rest of the inferior list starting from that one in a
3810 circular fashion until the whole list is polled once. */
5b6d1e4f 3811
ac0d67ed 3812 auto inferior_matches = [] (inferior *inf)
5b6d1e4f 3813 {
ac0d67ed 3814 return inf->process_target () != nullptr;
5b6d1e4f
PA
3815 };
3816
b3e3a4c1 3817 /* First see how many matching inferiors we have. */
5b6d1e4f
PA
3818 for (inferior *inf : all_inferiors ())
3819 if (inferior_matches (inf))
3820 num_inferiors++;
3821
3822 if (num_inferiors == 0)
3823 {
3824 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3825 return false;
3826 }
3827
b3e3a4c1 3828 /* Now randomly pick an inferior out of those that matched. */
5b6d1e4f
PA
3829 random_selector = (int)
3830 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
3831
1eb8556f
SM
3832 if (num_inferiors > 1)
3833 infrun_debug_printf ("Found %d inferiors, starting at #%d",
3834 num_inferiors, random_selector);
5b6d1e4f 3835
b3e3a4c1 3836 /* Select the Nth inferior that matched. */
5b6d1e4f
PA
3837
3838 inferior *selected = nullptr;
3839
3840 for (inferior *inf : all_inferiors ())
3841 if (inferior_matches (inf))
3842 if (random_selector-- == 0)
3843 {
3844 selected = inf;
3845 break;
3846 }
3847
b3e3a4c1 3848 /* Now poll for events out of each of the matching inferior's
5b6d1e4f
PA
3849 targets, starting from the selected one. */
3850
3851 auto do_wait = [&] (inferior *inf)
3852 {
ac0d67ed 3853 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, options);
5b6d1e4f
PA
3854 ecs->target = inf->process_target ();
3855 return (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
3856 };
3857
b3e3a4c1
SM
3858 /* Needed in 'all-stop + target-non-stop' mode, because we end up
3859 here spuriously after the target is all stopped and we've already
5b6d1e4f
PA
3860 reported the stop to the user, polling for events. */
3861 scoped_restore_current_thread restore_thread;
3862
3863 int inf_num = selected->num;
3864 for (inferior *inf = selected; inf != NULL; inf = inf->next)
3865 if (inferior_matches (inf))
3866 if (do_wait (inf))
3867 return true;
3868
3869 for (inferior *inf = inferior_list;
3870 inf != NULL && inf->num < inf_num;
3871 inf = inf->next)
3872 if (inferior_matches (inf))
3873 if (do_wait (inf))
3874 return true;
3875
3876 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3877 return false;
3878}
3879
8ff53139
PA
3880/* An event reported by wait_one. */
3881
3882struct wait_one_event
3883{
3884 /* The target the event came out of. */
3885 process_stratum_target *target;
3886
3887 /* The PTID the event was for. */
3888 ptid_t ptid;
3889
3890 /* The waitstatus. */
3891 target_waitstatus ws;
3892};
3893
3894static bool handle_one (const wait_one_event &event);
3895
24291992
PA
3896/* Prepare and stabilize the inferior for detaching it. E.g.,
3897 detaching while a thread is displaced stepping is a recipe for
3898 crashing it, as nothing would readjust the PC out of the scratch
3899 pad. */
3900
3901void
3902prepare_for_detach (void)
3903{
3904 struct inferior *inf = current_inferior ();
f2907e49 3905 ptid_t pid_ptid = ptid_t (inf->pid);
8ff53139 3906 scoped_restore_current_thread restore_thread;
24291992 3907
9bcb1f16 3908 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
24291992 3909
8ff53139
PA
3910 /* Remove all threads of INF from the global step-over chain. We
3911 want to stop any ongoing step-over, not start any new one. */
3912 thread_info *next;
3913 for (thread_info *tp = global_thread_step_over_chain_head;
3914 tp != nullptr;
3915 tp = next)
24291992 3916 {
8ff53139
PA
3917 next = global_thread_step_over_chain_next (tp);
3918 if (tp->inf == inf)
3919 global_thread_step_over_chain_remove (tp);
3920 }
24291992 3921
ac7d717c
PA
3922 /* If we were already in the middle of an inline step-over, and the
3923 thread stepping belongs to the inferior we're detaching, we need
3924 to restart the threads of other inferiors. */
3925 if (step_over_info.thread != -1)
3926 {
3927 infrun_debug_printf ("inline step-over in-process while detaching");
3928
3929 thread_info *thr = find_thread_global_id (step_over_info.thread);
3930 if (thr->inf == inf)
3931 {
3932 /* Since we removed threads of INF from the step-over chain,
3933 we know this won't start a step-over for INF. */
3934 clear_step_over_info ();
3935
3936 if (target_is_non_stop_p ())
3937 {
3938 /* Start a new step-over in another thread if there's
3939 one that needs it. */
3940 start_step_over ();
3941
3942 /* Restart all other threads (except the
3943 previously-stepping thread, since that one is still
3944 running). */
3945 if (!step_over_info_valid_p ())
3946 restart_threads (thr);
3947 }
3948 }
3949 }
3950
8ff53139
PA
3951 if (displaced_step_in_progress (inf))
3952 {
3953 infrun_debug_printf ("displaced-stepping in-process while detaching");
24291992 3954
8ff53139 3955 /* Stop threads currently displaced stepping, aborting it. */
24291992 3956
8ff53139
PA
3957 for (thread_info *thr : inf->non_exited_threads ())
3958 {
3959 if (thr->displaced_step_state.in_progress ())
3960 {
3961 if (thr->executing)
3962 {
3963 if (!thr->stop_requested)
3964 {
3965 target_stop (thr->ptid);
3966 thr->stop_requested = true;
3967 }
3968 }
3969 else
3970 thr->resumed = false;
3971 }
3972 }
24291992 3973
8ff53139
PA
3974 while (displaced_step_in_progress (inf))
3975 {
3976 wait_one_event event;
24291992 3977
8ff53139
PA
3978 event.target = inf->process_target ();
3979 event.ptid = do_target_wait_1 (inf, pid_ptid, &event.ws, 0);
24291992 3980
8ff53139
PA
3981 if (debug_infrun)
3982 print_target_wait_results (pid_ptid, event.ptid, &event.ws);
24291992 3983
8ff53139
PA
3984 handle_one (event);
3985 }
24291992 3986
8ff53139
PA
3987 /* It's OK to leave some of the threads of INF stopped, since
3988 they'll be detached shortly. */
24291992 3989 }
24291992
PA
3990}
3991
cd0fc7c3 3992/* Wait for control to return from inferior to debugger.
ae123ec6 3993
cd0fc7c3
SS
3994 If inferior gets a signal, we may decide to start it up again
3995 instead of returning. That is why there is a loop in this function.
3996 When this function actually returns it means the inferior
3997 should be left stopped and GDB should read more commands. */
3998
5b6d1e4f
PA
3999static void
4000wait_for_inferior (inferior *inf)
cd0fc7c3 4001{
1eb8556f 4002 infrun_debug_printf ("wait_for_inferior ()");
527159b7 4003
4c41382a 4004 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
cd0fc7c3 4005
e6f5c25b
PA
4006 /* If an error happens while handling the event, propagate GDB's
4007 knowledge of the executing state to the frontend/user running
4008 state. */
5b6d1e4f
PA
4009 scoped_finish_thread_state finish_state
4010 (inf->process_target (), minus_one_ptid);
e6f5c25b 4011
c906108c
SS
4012 while (1)
4013 {
ae25568b
PA
4014 struct execution_control_state ecss;
4015 struct execution_control_state *ecs = &ecss;
29f49a6a 4016
ae25568b
PA
4017 memset (ecs, 0, sizeof (*ecs));
4018
ec9499be 4019 overlay_cache_invalid = 1;
ec9499be 4020
f15cb84a
YQ
4021 /* Flush target cache before starting to handle each event.
4022 Target was running and cache could be stale. This is just a
4023 heuristic. Running threads may modify target memory, but we
4024 don't get any event. */
4025 target_dcache_invalidate ();
4026
5b6d1e4f
PA
4027 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, 0);
4028 ecs->target = inf->process_target ();
c906108c 4029
f00150c9 4030 if (debug_infrun)
5b6d1e4f 4031 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
f00150c9 4032
cd0fc7c3
SS
4033 /* Now figure out what to do with the result of the result. */
4034 handle_inferior_event (ecs);
c906108c 4035
cd0fc7c3
SS
4036 if (!ecs->wait_some_more)
4037 break;
4038 }
4e1c45ea 4039
e6f5c25b 4040 /* No error, don't finish the state yet. */
731f534f 4041 finish_state.release ();
cd0fc7c3 4042}
c906108c 4043
d3d4baed
PA
4044/* Cleanup that reinstalls the readline callback handler, if the
4045 target is running in the background. If while handling the target
4046 event something triggered a secondary prompt, like e.g., a
4047 pagination prompt, we'll have removed the callback handler (see
4048 gdb_readline_wrapper_line). Need to do this as we go back to the
4049 event loop, ready to process further input. Note this has no
4050 effect if the handler hasn't actually been removed, because calling
4051 rl_callback_handler_install resets the line buffer, thus losing
4052 input. */
4053
4054static void
d238133d 4055reinstall_readline_callback_handler_cleanup ()
d3d4baed 4056{
3b12939d
PA
4057 struct ui *ui = current_ui;
4058
4059 if (!ui->async)
6c400b59
PA
4060 {
4061 /* We're not going back to the top level event loop yet. Don't
4062 install the readline callback, as it'd prep the terminal,
4063 readline-style (raw, noecho) (e.g., --batch). We'll install
4064 it the next time the prompt is displayed, when we're ready
4065 for input. */
4066 return;
4067 }
4068
3b12939d 4069 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
d3d4baed
PA
4070 gdb_rl_callback_handler_reinstall ();
4071}
4072
243a9253
PA
4073/* Clean up the FSMs of threads that are now stopped. In non-stop,
4074 that's just the event thread. In all-stop, that's all threads. */
4075
4076static void
4077clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
4078{
08036331
PA
4079 if (ecs->event_thread != NULL
4080 && ecs->event_thread->thread_fsm != NULL)
46e3ed7f 4081 ecs->event_thread->thread_fsm->clean_up (ecs->event_thread);
243a9253
PA
4082
4083 if (!non_stop)
4084 {
08036331 4085 for (thread_info *thr : all_non_exited_threads ())
dda83cd7 4086 {
243a9253
PA
4087 if (thr->thread_fsm == NULL)
4088 continue;
4089 if (thr == ecs->event_thread)
4090 continue;
4091
00431a78 4092 switch_to_thread (thr);
46e3ed7f 4093 thr->thread_fsm->clean_up (thr);
243a9253
PA
4094 }
4095
4096 if (ecs->event_thread != NULL)
00431a78 4097 switch_to_thread (ecs->event_thread);
243a9253
PA
4098 }
4099}
4100
3b12939d
PA
4101/* Helper for all_uis_check_sync_execution_done that works on the
4102 current UI. */
4103
4104static void
4105check_curr_ui_sync_execution_done (void)
4106{
4107 struct ui *ui = current_ui;
4108
4109 if (ui->prompt_state == PROMPT_NEEDED
4110 && ui->async
4111 && !gdb_in_secondary_prompt_p (ui))
4112 {
223ffa71 4113 target_terminal::ours ();
76727919 4114 gdb::observers::sync_execution_done.notify ();
3eb7562a 4115 ui_register_input_event_handler (ui);
3b12939d
PA
4116 }
4117}
4118
4119/* See infrun.h. */
4120
4121void
4122all_uis_check_sync_execution_done (void)
4123{
0e454242 4124 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
4125 {
4126 check_curr_ui_sync_execution_done ();
4127 }
4128}
4129
a8836c93
PA
4130/* See infrun.h. */
4131
4132void
4133all_uis_on_sync_execution_starting (void)
4134{
0e454242 4135 SWITCH_THRU_ALL_UIS ()
a8836c93
PA
4136 {
4137 if (current_ui->prompt_state == PROMPT_NEEDED)
4138 async_disable_stdin ();
4139 }
4140}
4141
1777feb0 4142/* Asynchronous version of wait_for_inferior. It is called by the
43ff13b4 4143 event loop whenever a change of state is detected on the file
1777feb0
MS
4144 descriptor corresponding to the target. It can be called more than
4145 once to complete a single execution command. In such cases we need
4146 to keep the state in a global variable ECSS. If it is the last time
a474d7c2
PA
4147 that this function is called for a single execution command, then
4148 report to the user that the inferior has stopped, and do the
1777feb0 4149 necessary cleanups. */
43ff13b4
JM
4150
4151void
b1a35af2 4152fetch_inferior_event ()
43ff13b4 4153{
3ec3145c
SM
4154 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
4155
0d1e5fa7 4156 struct execution_control_state ecss;
a474d7c2 4157 struct execution_control_state *ecs = &ecss;
0f641c01 4158 int cmd_done = 0;
43ff13b4 4159
0d1e5fa7
PA
4160 memset (ecs, 0, sizeof (*ecs));
4161
c61db772
PA
4162 /* Events are always processed with the main UI as current UI. This
4163 way, warnings, debug output, etc. are always consistently sent to
4164 the main console. */
4b6749b9 4165 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
c61db772 4166
b78b3a29
TBA
4167 /* Temporarily disable pagination. Otherwise, the user would be
4168 given an option to press 'q' to quit, which would cause an early
4169 exit and could leave GDB in a half-baked state. */
4170 scoped_restore save_pagination
4171 = make_scoped_restore (&pagination_enabled, false);
4172
d3d4baed 4173 /* End up with readline processing input, if necessary. */
d238133d
TT
4174 {
4175 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
4176
4177 /* We're handling a live event, so make sure we're doing live
4178 debugging. If we're looking at traceframes while the target is
4179 running, we're going to need to get back to that mode after
4180 handling the event. */
4181 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
4182 if (non_stop)
4183 {
4184 maybe_restore_traceframe.emplace ();
4185 set_current_traceframe (-1);
4186 }
43ff13b4 4187
873657b9
PA
4188 /* The user/frontend should not notice a thread switch due to
4189 internal events. Make sure we revert to the user selected
4190 thread and frame after handling the event and running any
4191 breakpoint commands. */
4192 scoped_restore_current_thread restore_thread;
d238133d
TT
4193
4194 overlay_cache_invalid = 1;
4195 /* Flush target cache before starting to handle each event. Target
4196 was running and cache could be stale. This is just a heuristic.
4197 Running threads may modify target memory, but we don't get any
4198 event. */
4199 target_dcache_invalidate ();
4200
4201 scoped_restore save_exec_dir
4202 = make_scoped_restore (&execution_direction,
4203 target_execution_direction ());
4204
1192f124
SM
4205 /* Allow targets to pause their resumed threads while we handle
4206 the event. */
4207 scoped_disable_commit_resumed disable_commit_resumed ("handling event");
4208
ac0d67ed 4209 if (!do_target_wait (ecs, TARGET_WNOHANG))
1192f124
SM
4210 {
4211 infrun_debug_printf ("do_target_wait returned no event");
4212 disable_commit_resumed.reset_and_commit ();
4213 return;
4214 }
5b6d1e4f
PA
4215
4216 gdb_assert (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
4217
4218 /* Switch to the target that generated the event, so we can do
7f08fd51
TBA
4219 target calls. */
4220 switch_to_target_no_thread (ecs->target);
d238133d
TT
4221
4222 if (debug_infrun)
5b6d1e4f 4223 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
d238133d
TT
4224
4225 /* If an error happens while handling the event, propagate GDB's
4226 knowledge of the executing state to the frontend/user running
4227 state. */
4228 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
5b6d1e4f 4229 scoped_finish_thread_state finish_state (ecs->target, finish_ptid);
d238133d 4230
979a0d13 4231 /* Get executed before scoped_restore_current_thread above to apply
d238133d
TT
4232 still for the thread which has thrown the exception. */
4233 auto defer_bpstat_clear
4234 = make_scope_exit (bpstat_clear_actions);
4235 auto defer_delete_threads
4236 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
4237
4238 /* Now figure out what to do with the result of the result. */
4239 handle_inferior_event (ecs);
4240
4241 if (!ecs->wait_some_more)
4242 {
5b6d1e4f 4243 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
758cb810 4244 bool should_stop = true;
d238133d 4245 struct thread_info *thr = ecs->event_thread;
d6b48e9c 4246
d238133d 4247 delete_just_stopped_threads_infrun_breakpoints ();
f107f563 4248
d238133d
TT
4249 if (thr != NULL)
4250 {
4251 struct thread_fsm *thread_fsm = thr->thread_fsm;
243a9253 4252
d238133d 4253 if (thread_fsm != NULL)
46e3ed7f 4254 should_stop = thread_fsm->should_stop (thr);
d238133d 4255 }
243a9253 4256
d238133d
TT
4257 if (!should_stop)
4258 {
4259 keep_going (ecs);
4260 }
4261 else
4262 {
46e3ed7f 4263 bool should_notify_stop = true;
d238133d 4264 int proceeded = 0;
1840d81a 4265
d238133d 4266 clean_up_just_stopped_threads_fsms (ecs);
243a9253 4267
d238133d 4268 if (thr != NULL && thr->thread_fsm != NULL)
46e3ed7f 4269 should_notify_stop = thr->thread_fsm->should_notify_stop ();
388a7084 4270
d238133d
TT
4271 if (should_notify_stop)
4272 {
4273 /* We may not find an inferior if this was a process exit. */
4274 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
4275 proceeded = normal_stop ();
4276 }
243a9253 4277
d238133d
TT
4278 if (!proceeded)
4279 {
b1a35af2 4280 inferior_event_handler (INF_EXEC_COMPLETE);
d238133d
TT
4281 cmd_done = 1;
4282 }
873657b9
PA
4283
4284 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4285 previously selected thread is gone. We have two
4286 choices - switch to no thread selected, or restore the
4287 previously selected thread (now exited). We chose the
4288 later, just because that's what GDB used to do. After
4289 this, "info threads" says "The current thread <Thread
4290 ID 2> has terminated." instead of "No thread
4291 selected.". */
4292 if (!non_stop
4293 && cmd_done
4294 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
4295 restore_thread.dont_restore ();
d238133d
TT
4296 }
4297 }
4f8d22e3 4298
d238133d
TT
4299 defer_delete_threads.release ();
4300 defer_bpstat_clear.release ();
29f49a6a 4301
d238133d
TT
4302 /* No error, don't finish the thread states yet. */
4303 finish_state.release ();
731f534f 4304
1192f124
SM
4305 disable_commit_resumed.reset_and_commit ();
4306
d238133d
TT
4307 /* This scope is used to ensure that readline callbacks are
4308 reinstalled here. */
4309 }
4f8d22e3 4310
3b12939d
PA
4311 /* If a UI was in sync execution mode, and now isn't, restore its
4312 prompt (a synchronous execution command has finished, and we're
4313 ready for input). */
4314 all_uis_check_sync_execution_done ();
0f641c01
PA
4315
4316 if (cmd_done
0f641c01 4317 && exec_done_display_p
00431a78
PA
4318 && (inferior_ptid == null_ptid
4319 || inferior_thread ()->state != THREAD_RUNNING))
0f641c01 4320 printf_unfiltered (_("completed.\n"));
43ff13b4
JM
4321}
4322
29734269
SM
4323/* See infrun.h. */
4324
edb3359d 4325void
29734269
SM
4326set_step_info (thread_info *tp, struct frame_info *frame,
4327 struct symtab_and_line sal)
edb3359d 4328{
29734269
SM
4329 /* This can be removed once this function no longer implicitly relies on the
4330 inferior_ptid value. */
4331 gdb_assert (inferior_ptid == tp->ptid);
edb3359d 4332
16c381f0
JK
4333 tp->control.step_frame_id = get_frame_id (frame);
4334 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
edb3359d
DJ
4335
4336 tp->current_symtab = sal.symtab;
4337 tp->current_line = sal.line;
4338}
4339
0d1e5fa7
PA
4340/* Clear context switchable stepping state. */
4341
4342void
4e1c45ea 4343init_thread_stepping_state (struct thread_info *tss)
0d1e5fa7 4344{
7f5ef605 4345 tss->stepped_breakpoint = 0;
0d1e5fa7 4346 tss->stepping_over_breakpoint = 0;
963f9c80 4347 tss->stepping_over_watchpoint = 0;
0d1e5fa7 4348 tss->step_after_step_resume_breakpoint = 0;
cd0fc7c3
SS
4349}
4350
ab1ddbcf 4351/* See infrun.h. */
c32c64b7 4352
6efcd9a8 4353void
5b6d1e4f
PA
4354set_last_target_status (process_stratum_target *target, ptid_t ptid,
4355 target_waitstatus status)
c32c64b7 4356{
5b6d1e4f 4357 target_last_proc_target = target;
c32c64b7
DE
4358 target_last_wait_ptid = ptid;
4359 target_last_waitstatus = status;
4360}
4361
ab1ddbcf 4362/* See infrun.h. */
e02bc4cc
DS
4363
4364void
5b6d1e4f
PA
4365get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4366 target_waitstatus *status)
e02bc4cc 4367{
5b6d1e4f
PA
4368 if (target != nullptr)
4369 *target = target_last_proc_target;
ab1ddbcf
PA
4370 if (ptid != nullptr)
4371 *ptid = target_last_wait_ptid;
4372 if (status != nullptr)
4373 *status = target_last_waitstatus;
e02bc4cc
DS
4374}
4375
ab1ddbcf
PA
4376/* See infrun.h. */
4377
ac264b3b
MS
4378void
4379nullify_last_target_wait_ptid (void)
4380{
5b6d1e4f 4381 target_last_proc_target = nullptr;
ac264b3b 4382 target_last_wait_ptid = minus_one_ptid;
ab1ddbcf 4383 target_last_waitstatus = {};
ac264b3b
MS
4384}
4385
dcf4fbde 4386/* Switch thread contexts. */
dd80620e
MS
4387
4388static void
00431a78 4389context_switch (execution_control_state *ecs)
dd80620e 4390{
1eb8556f 4391 if (ecs->ptid != inferior_ptid
5b6d1e4f
PA
4392 && (inferior_ptid == null_ptid
4393 || ecs->event_thread != inferior_thread ()))
fd48f117 4394 {
1eb8556f
SM
4395 infrun_debug_printf ("Switching context from %s to %s",
4396 target_pid_to_str (inferior_ptid).c_str (),
4397 target_pid_to_str (ecs->ptid).c_str ());
fd48f117
DJ
4398 }
4399
00431a78 4400 switch_to_thread (ecs->event_thread);
dd80620e
MS
4401}
4402
d8dd4d5f
PA
4403/* If the target can't tell whether we've hit breakpoints
4404 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4405 check whether that could have been caused by a breakpoint. If so,
4406 adjust the PC, per gdbarch_decr_pc_after_break. */
4407
4fa8626c 4408static void
d8dd4d5f
PA
4409adjust_pc_after_break (struct thread_info *thread,
4410 struct target_waitstatus *ws)
4fa8626c 4411{
24a73cce
UW
4412 struct regcache *regcache;
4413 struct gdbarch *gdbarch;
118e6252 4414 CORE_ADDR breakpoint_pc, decr_pc;
4fa8626c 4415
4fa8626c
DJ
4416 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4417 we aren't, just return.
9709f61c
DJ
4418
4419 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
b798847d
UW
4420 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4421 implemented by software breakpoints should be handled through the normal
4422 breakpoint layer.
8fb3e588 4423
4fa8626c
DJ
4424 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4425 different signals (SIGILL or SIGEMT for instance), but it is less
4426 clear where the PC is pointing afterwards. It may not match
b798847d
UW
4427 gdbarch_decr_pc_after_break. I don't know any specific target that
4428 generates these signals at breakpoints (the code has been in GDB since at
4429 least 1992) so I can not guess how to handle them here.
8fb3e588 4430
e6cf7916
UW
4431 In earlier versions of GDB, a target with
4432 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
b798847d
UW
4433 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4434 target with both of these set in GDB history, and it seems unlikely to be
4435 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4fa8626c 4436
d8dd4d5f 4437 if (ws->kind != TARGET_WAITKIND_STOPPED)
4fa8626c
DJ
4438 return;
4439
d8dd4d5f 4440 if (ws->value.sig != GDB_SIGNAL_TRAP)
4fa8626c
DJ
4441 return;
4442
4058b839
PA
4443 /* In reverse execution, when a breakpoint is hit, the instruction
4444 under it has already been de-executed. The reported PC always
4445 points at the breakpoint address, so adjusting it further would
4446 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4447 architecture:
4448
4449 B1 0x08000000 : INSN1
4450 B2 0x08000001 : INSN2
4451 0x08000002 : INSN3
4452 PC -> 0x08000003 : INSN4
4453
4454 Say you're stopped at 0x08000003 as above. Reverse continuing
4455 from that point should hit B2 as below. Reading the PC when the
4456 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4457 been de-executed already.
4458
4459 B1 0x08000000 : INSN1
4460 B2 PC -> 0x08000001 : INSN2
4461 0x08000002 : INSN3
4462 0x08000003 : INSN4
4463
4464 We can't apply the same logic as for forward execution, because
4465 we would wrongly adjust the PC to 0x08000000, since there's a
4466 breakpoint at PC - 1. We'd then report a hit on B1, although
4467 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4468 behaviour. */
4469 if (execution_direction == EXEC_REVERSE)
4470 return;
4471
1cf4d951
PA
4472 /* If the target can tell whether the thread hit a SW breakpoint,
4473 trust it. Targets that can tell also adjust the PC
4474 themselves. */
4475 if (target_supports_stopped_by_sw_breakpoint ())
4476 return;
4477
4478 /* Note that relying on whether a breakpoint is planted in memory to
4479 determine this can fail. E.g,. the breakpoint could have been
4480 removed since. Or the thread could have been told to step an
4481 instruction the size of a breakpoint instruction, and only
4482 _after_ was a breakpoint inserted at its address. */
4483
24a73cce
UW
4484 /* If this target does not decrement the PC after breakpoints, then
4485 we have nothing to do. */
00431a78 4486 regcache = get_thread_regcache (thread);
ac7936df 4487 gdbarch = regcache->arch ();
118e6252 4488
527a273a 4489 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
118e6252 4490 if (decr_pc == 0)
24a73cce
UW
4491 return;
4492
8b86c959 4493 const address_space *aspace = regcache->aspace ();
6c95b8df 4494
8aad930b
AC
4495 /* Find the location where (if we've hit a breakpoint) the
4496 breakpoint would be. */
118e6252 4497 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
8aad930b 4498
1cf4d951
PA
4499 /* If the target can't tell whether a software breakpoint triggered,
4500 fallback to figuring it out based on breakpoints we think were
4501 inserted in the target, and on whether the thread was stepped or
4502 continued. */
4503
1c5cfe86
PA
4504 /* Check whether there actually is a software breakpoint inserted at
4505 that location.
4506
4507 If in non-stop mode, a race condition is possible where we've
4508 removed a breakpoint, but stop events for that breakpoint were
4509 already queued and arrive later. To suppress those spurious
4510 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
1cf4d951
PA
4511 and retire them after a number of stop events are reported. Note
4512 this is an heuristic and can thus get confused. The real fix is
4513 to get the "stopped by SW BP and needs adjustment" info out of
4514 the target/kernel (and thus never reach here; see above). */
6c95b8df 4515 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
fbea99ea
PA
4516 || (target_is_non_stop_p ()
4517 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
8aad930b 4518 {
07036511 4519 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
abbb1732 4520
8213266a 4521 if (record_full_is_used ())
07036511
TT
4522 restore_operation_disable.emplace
4523 (record_full_gdb_operation_disable_set ());
96429cc8 4524
1c0fdd0e
UW
4525 /* When using hardware single-step, a SIGTRAP is reported for both
4526 a completed single-step and a software breakpoint. Need to
4527 differentiate between the two, as the latter needs adjusting
4528 but the former does not.
4529
4530 The SIGTRAP can be due to a completed hardware single-step only if
4531 - we didn't insert software single-step breakpoints
1c0fdd0e
UW
4532 - this thread is currently being stepped
4533
4534 If any of these events did not occur, we must have stopped due
4535 to hitting a software breakpoint, and have to back up to the
4536 breakpoint address.
4537
4538 As a special case, we could have hardware single-stepped a
4539 software breakpoint. In this case (prev_pc == breakpoint_pc),
4540 we also need to back up to the breakpoint address. */
4541
d8dd4d5f
PA
4542 if (thread_has_single_step_breakpoints_set (thread)
4543 || !currently_stepping (thread)
4544 || (thread->stepped_breakpoint
4545 && thread->prev_pc == breakpoint_pc))
515630c5 4546 regcache_write_pc (regcache, breakpoint_pc);
8aad930b 4547 }
4fa8626c
DJ
4548}
4549
c4464ade 4550static bool
edb3359d
DJ
4551stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4552{
4553 for (frame = get_prev_frame (frame);
4554 frame != NULL;
4555 frame = get_prev_frame (frame))
4556 {
4557 if (frame_id_eq (get_frame_id (frame), step_frame_id))
c4464ade
SM
4558 return true;
4559
edb3359d
DJ
4560 if (get_frame_type (frame) != INLINE_FRAME)
4561 break;
4562 }
4563
c4464ade 4564 return false;
edb3359d
DJ
4565}
4566
4a4c04f1
BE
4567/* Look for an inline frame that is marked for skip.
4568 If PREV_FRAME is TRUE start at the previous frame,
4569 otherwise start at the current frame. Stop at the
4570 first non-inline frame, or at the frame where the
4571 step started. */
4572
4573static bool
4574inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
4575{
4576 struct frame_info *frame = get_current_frame ();
4577
4578 if (prev_frame)
4579 frame = get_prev_frame (frame);
4580
4581 for (; frame != NULL; frame = get_prev_frame (frame))
4582 {
4583 const char *fn = NULL;
4584 symtab_and_line sal;
4585 struct symbol *sym;
4586
4587 if (frame_id_eq (get_frame_id (frame), tp->control.step_frame_id))
4588 break;
4589 if (get_frame_type (frame) != INLINE_FRAME)
4590 break;
4591
4592 sal = find_frame_sal (frame);
4593 sym = get_frame_function (frame);
4594
4595 if (sym != NULL)
4596 fn = sym->print_name ();
4597
4598 if (sal.line != 0
4599 && function_name_is_marked_for_skip (fn, sal))
4600 return true;
4601 }
4602
4603 return false;
4604}
4605
c65d6b55
PA
4606/* If the event thread has the stop requested flag set, pretend it
4607 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4608 target_stop). */
4609
4610static bool
4611handle_stop_requested (struct execution_control_state *ecs)
4612{
4613 if (ecs->event_thread->stop_requested)
4614 {
4615 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
4616 ecs->ws.value.sig = GDB_SIGNAL_0;
4617 handle_signal_stop (ecs);
4618 return true;
4619 }
4620 return false;
4621}
4622
a96d9b2e 4623/* Auxiliary function that handles syscall entry/return events.
c4464ade
SM
4624 It returns true if the inferior should keep going (and GDB
4625 should ignore the event), or false if the event deserves to be
a96d9b2e 4626 processed. */
ca2163eb 4627
c4464ade 4628static bool
ca2163eb 4629handle_syscall_event (struct execution_control_state *ecs)
a96d9b2e 4630{
ca2163eb 4631 struct regcache *regcache;
ca2163eb
PA
4632 int syscall_number;
4633
00431a78 4634 context_switch (ecs);
ca2163eb 4635
00431a78 4636 regcache = get_thread_regcache (ecs->event_thread);
f90263c1 4637 syscall_number = ecs->ws.value.syscall_number;
f2ffa92b 4638 ecs->event_thread->suspend.stop_pc = regcache_read_pc (regcache);
ca2163eb 4639
a96d9b2e
SDJ
4640 if (catch_syscall_enabled () > 0
4641 && catching_syscall_number (syscall_number) > 0)
4642 {
1eb8556f 4643 infrun_debug_printf ("syscall number=%d", syscall_number);
a96d9b2e 4644
16c381f0 4645 ecs->event_thread->control.stop_bpstat
a01bda52 4646 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
4647 ecs->event_thread->suspend.stop_pc,
4648 ecs->event_thread, &ecs->ws);
ab04a2af 4649
c65d6b55 4650 if (handle_stop_requested (ecs))
c4464ade 4651 return false;
c65d6b55 4652
ce12b012 4653 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
ca2163eb
PA
4654 {
4655 /* Catchpoint hit. */
c4464ade 4656 return false;
ca2163eb 4657 }
a96d9b2e 4658 }
ca2163eb 4659
c65d6b55 4660 if (handle_stop_requested (ecs))
c4464ade 4661 return false;
c65d6b55 4662
ca2163eb 4663 /* If no catchpoint triggered for this, then keep going. */
ca2163eb 4664 keep_going (ecs);
c4464ade
SM
4665
4666 return true;
a96d9b2e
SDJ
4667}
4668
7e324e48
GB
4669/* Lazily fill in the execution_control_state's stop_func_* fields. */
4670
4671static void
4672fill_in_stop_func (struct gdbarch *gdbarch,
4673 struct execution_control_state *ecs)
4674{
4675 if (!ecs->stop_func_filled_in)
4676 {
98a617f8 4677 const block *block;
fe830662 4678 const general_symbol_info *gsi;
98a617f8 4679
7e324e48
GB
4680 /* Don't care about return value; stop_func_start and stop_func_name
4681 will both be 0 if it doesn't work. */
fe830662
TT
4682 find_pc_partial_function_sym (ecs->event_thread->suspend.stop_pc,
4683 &gsi,
4684 &ecs->stop_func_start,
4685 &ecs->stop_func_end,
4686 &block);
4687 ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name ();
98a617f8
KB
4688
4689 /* The call to find_pc_partial_function, above, will set
4690 stop_func_start and stop_func_end to the start and end
4691 of the range containing the stop pc. If this range
4692 contains the entry pc for the block (which is always the
4693 case for contiguous blocks), advance stop_func_start past
4694 the function's start offset and entrypoint. Note that
4695 stop_func_start is NOT advanced when in a range of a
4696 non-contiguous block that does not contain the entry pc. */
4697 if (block != nullptr
4698 && ecs->stop_func_start <= BLOCK_ENTRY_PC (block)
4699 && BLOCK_ENTRY_PC (block) < ecs->stop_func_end)
4700 {
4701 ecs->stop_func_start
4702 += gdbarch_deprecated_function_start_offset (gdbarch);
4703
4704 if (gdbarch_skip_entrypoint_p (gdbarch))
4705 ecs->stop_func_start
4706 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
4707 }
591a12a1 4708
7e324e48
GB
4709 ecs->stop_func_filled_in = 1;
4710 }
4711}
4712
4f5d7f63 4713
00431a78 4714/* Return the STOP_SOON field of the inferior pointed at by ECS. */
4f5d7f63
PA
4715
4716static enum stop_kind
00431a78 4717get_inferior_stop_soon (execution_control_state *ecs)
4f5d7f63 4718{
5b6d1e4f 4719 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4f5d7f63
PA
4720
4721 gdb_assert (inf != NULL);
4722 return inf->control.stop_soon;
4723}
4724
5b6d1e4f
PA
4725/* Poll for one event out of the current target. Store the resulting
4726 waitstatus in WS, and return the event ptid. Does not block. */
372316f1
PA
4727
4728static ptid_t
5b6d1e4f 4729poll_one_curr_target (struct target_waitstatus *ws)
372316f1
PA
4730{
4731 ptid_t event_ptid;
372316f1
PA
4732
4733 overlay_cache_invalid = 1;
4734
4735 /* Flush target cache before starting to handle each event.
4736 Target was running and cache could be stale. This is just a
4737 heuristic. Running threads may modify target memory, but we
4738 don't get any event. */
4739 target_dcache_invalidate ();
4740
4741 if (deprecated_target_wait_hook)
5b6d1e4f 4742 event_ptid = deprecated_target_wait_hook (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1 4743 else
5b6d1e4f 4744 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1
PA
4745
4746 if (debug_infrun)
5b6d1e4f 4747 print_target_wait_results (minus_one_ptid, event_ptid, ws);
372316f1
PA
4748
4749 return event_ptid;
4750}
4751
5b6d1e4f
PA
4752/* Wait for one event out of any target. */
4753
4754static wait_one_event
4755wait_one ()
4756{
4757 while (1)
4758 {
4759 for (inferior *inf : all_inferiors ())
4760 {
4761 process_stratum_target *target = inf->process_target ();
4762 if (target == NULL
4763 || !target->is_async_p ()
4764 || !target->threads_executing)
4765 continue;
4766
4767 switch_to_inferior_no_thread (inf);
4768
4769 wait_one_event event;
4770 event.target = target;
4771 event.ptid = poll_one_curr_target (&event.ws);
4772
4773 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
4774 {
4775 /* If nothing is resumed, remove the target from the
4776 event loop. */
4777 target_async (0);
4778 }
4779 else if (event.ws.kind != TARGET_WAITKIND_IGNORE)
4780 return event;
4781 }
4782
4783 /* Block waiting for some event. */
4784
4785 fd_set readfds;
4786 int nfds = 0;
4787
4788 FD_ZERO (&readfds);
4789
4790 for (inferior *inf : all_inferiors ())
4791 {
4792 process_stratum_target *target = inf->process_target ();
4793 if (target == NULL
4794 || !target->is_async_p ()
4795 || !target->threads_executing)
4796 continue;
4797
4798 int fd = target->async_wait_fd ();
4799 FD_SET (fd, &readfds);
4800 if (nfds <= fd)
4801 nfds = fd + 1;
4802 }
4803
4804 if (nfds == 0)
4805 {
4806 /* No waitable targets left. All must be stopped. */
4807 return {NULL, minus_one_ptid, {TARGET_WAITKIND_NO_RESUMED}};
4808 }
4809
4810 QUIT;
4811
4812 int numfds = interruptible_select (nfds, &readfds, 0, NULL, 0);
4813 if (numfds < 0)
4814 {
4815 if (errno == EINTR)
4816 continue;
4817 else
4818 perror_with_name ("interruptible_select");
4819 }
4820 }
4821}
4822
372316f1
PA
4823/* Save the thread's event and stop reason to process it later. */
4824
4825static void
5b6d1e4f 4826save_waitstatus (struct thread_info *tp, const target_waitstatus *ws)
372316f1 4827{
1eb8556f
SM
4828 infrun_debug_printf ("saving status %s for %d.%ld.%ld",
4829 target_waitstatus_to_string (ws).c_str (),
4830 tp->ptid.pid (),
4831 tp->ptid.lwp (),
4832 tp->ptid.tid ());
372316f1
PA
4833
4834 /* Record for later. */
4835 tp->suspend.waitstatus = *ws;
4836 tp->suspend.waitstatus_pending_p = 1;
4837
372316f1
PA
4838 if (ws->kind == TARGET_WAITKIND_STOPPED
4839 && ws->value.sig == GDB_SIGNAL_TRAP)
4840 {
89ba430c
SM
4841 struct regcache *regcache = get_thread_regcache (tp);
4842 const address_space *aspace = regcache->aspace ();
372316f1
PA
4843 CORE_ADDR pc = regcache_read_pc (regcache);
4844
4845 adjust_pc_after_break (tp, &tp->suspend.waitstatus);
4846
18493a00
PA
4847 scoped_restore_current_thread restore_thread;
4848 switch_to_thread (tp);
4849
4850 if (target_stopped_by_watchpoint ())
372316f1
PA
4851 {
4852 tp->suspend.stop_reason
4853 = TARGET_STOPPED_BY_WATCHPOINT;
4854 }
4855 else if (target_supports_stopped_by_sw_breakpoint ()
18493a00 4856 && target_stopped_by_sw_breakpoint ())
372316f1
PA
4857 {
4858 tp->suspend.stop_reason
4859 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4860 }
4861 else if (target_supports_stopped_by_hw_breakpoint ()
18493a00 4862 && target_stopped_by_hw_breakpoint ())
372316f1
PA
4863 {
4864 tp->suspend.stop_reason
4865 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4866 }
4867 else if (!target_supports_stopped_by_hw_breakpoint ()
4868 && hardware_breakpoint_inserted_here_p (aspace,
4869 pc))
4870 {
4871 tp->suspend.stop_reason
4872 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4873 }
4874 else if (!target_supports_stopped_by_sw_breakpoint ()
4875 && software_breakpoint_inserted_here_p (aspace,
4876 pc))
4877 {
4878 tp->suspend.stop_reason
4879 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4880 }
4881 else if (!thread_has_single_step_breakpoints_set (tp)
4882 && currently_stepping (tp))
4883 {
4884 tp->suspend.stop_reason
4885 = TARGET_STOPPED_BY_SINGLE_STEP;
4886 }
4887 }
4888}
4889
293b3ebc
TBA
4890/* Mark the non-executing threads accordingly. In all-stop, all
4891 threads of all processes are stopped when we get any event
4892 reported. In non-stop mode, only the event thread stops. */
4893
4894static void
4895mark_non_executing_threads (process_stratum_target *target,
4896 ptid_t event_ptid,
4897 struct target_waitstatus ws)
4898{
4899 ptid_t mark_ptid;
4900
4901 if (!target_is_non_stop_p ())
4902 mark_ptid = minus_one_ptid;
4903 else if (ws.kind == TARGET_WAITKIND_SIGNALLED
4904 || ws.kind == TARGET_WAITKIND_EXITED)
4905 {
4906 /* If we're handling a process exit in non-stop mode, even
4907 though threads haven't been deleted yet, one would think
4908 that there is nothing to do, as threads of the dead process
4909 will be soon deleted, and threads of any other process were
4910 left running. However, on some targets, threads survive a
4911 process exit event. E.g., for the "checkpoint" command,
4912 when the current checkpoint/fork exits, linux-fork.c
4913 automatically switches to another fork from within
4914 target_mourn_inferior, by associating the same
4915 inferior/thread to another fork. We haven't mourned yet at
4916 this point, but we must mark any threads left in the
4917 process as not-executing so that finish_thread_state marks
4918 them stopped (in the user's perspective) if/when we present
4919 the stop to the user. */
4920 mark_ptid = ptid_t (event_ptid.pid ());
4921 }
4922 else
4923 mark_ptid = event_ptid;
4924
4925 set_executing (target, mark_ptid, false);
4926
4927 /* Likewise the resumed flag. */
4928 set_resumed (target, mark_ptid, false);
4929}
4930
d758e62c
PA
4931/* Handle one event after stopping threads. If the eventing thread
4932 reports back any interesting event, we leave it pending. If the
4933 eventing thread was in the middle of a displaced step, we
8ff53139
PA
4934 cancel/finish it, and unless the thread's inferior is being
4935 detached, put the thread back in the step-over chain. Returns true
4936 if there are no resumed threads left in the target (thus there's no
4937 point in waiting further), false otherwise. */
d758e62c
PA
4938
4939static bool
4940handle_one (const wait_one_event &event)
4941{
4942 infrun_debug_printf
4943 ("%s %s", target_waitstatus_to_string (&event.ws).c_str (),
4944 target_pid_to_str (event.ptid).c_str ());
4945
4946 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
4947 {
4948 /* All resumed threads exited. */
4949 return true;
4950 }
4951 else if (event.ws.kind == TARGET_WAITKIND_THREAD_EXITED
4952 || event.ws.kind == TARGET_WAITKIND_EXITED
4953 || event.ws.kind == TARGET_WAITKIND_SIGNALLED)
4954 {
4955 /* One thread/process exited/signalled. */
4956
4957 thread_info *t = nullptr;
4958
4959 /* The target may have reported just a pid. If so, try
4960 the first non-exited thread. */
4961 if (event.ptid.is_pid ())
4962 {
4963 int pid = event.ptid.pid ();
4964 inferior *inf = find_inferior_pid (event.target, pid);
4965 for (thread_info *tp : inf->non_exited_threads ())
4966 {
4967 t = tp;
4968 break;
4969 }
4970
4971 /* If there is no available thread, the event would
4972 have to be appended to a per-inferior event list,
4973 which does not exist (and if it did, we'd have
4974 to adjust run control command to be able to
4975 resume such an inferior). We assert here instead
4976 of going into an infinite loop. */
4977 gdb_assert (t != nullptr);
4978
4979 infrun_debug_printf
4980 ("using %s", target_pid_to_str (t->ptid).c_str ());
4981 }
4982 else
4983 {
4984 t = find_thread_ptid (event.target, event.ptid);
4985 /* Check if this is the first time we see this thread.
4986 Don't bother adding if it individually exited. */
4987 if (t == nullptr
4988 && event.ws.kind != TARGET_WAITKIND_THREAD_EXITED)
4989 t = add_thread (event.target, event.ptid);
4990 }
4991
4992 if (t != nullptr)
4993 {
4994 /* Set the threads as non-executing to avoid
4995 another stop attempt on them. */
4996 switch_to_thread_no_regs (t);
4997 mark_non_executing_threads (event.target, event.ptid,
4998 event.ws);
4999 save_waitstatus (t, &event.ws);
5000 t->stop_requested = false;
5001 }
5002 }
5003 else
5004 {
5005 thread_info *t = find_thread_ptid (event.target, event.ptid);
5006 if (t == NULL)
5007 t = add_thread (event.target, event.ptid);
5008
5009 t->stop_requested = 0;
5010 t->executing = 0;
5011 t->resumed = false;
5012 t->control.may_range_step = 0;
5013
5014 /* This may be the first time we see the inferior report
5015 a stop. */
5016 inferior *inf = find_inferior_ptid (event.target, event.ptid);
5017 if (inf->needs_setup)
5018 {
5019 switch_to_thread_no_regs (t);
5020 setup_inferior (0);
5021 }
5022
5023 if (event.ws.kind == TARGET_WAITKIND_STOPPED
5024 && event.ws.value.sig == GDB_SIGNAL_0)
5025 {
5026 /* We caught the event that we intended to catch, so
5027 there's no event pending. */
5028 t->suspend.waitstatus.kind = TARGET_WAITKIND_IGNORE;
5029 t->suspend.waitstatus_pending_p = 0;
5030
5031 if (displaced_step_finish (t, GDB_SIGNAL_0)
5032 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5033 {
5034 /* Add it back to the step-over queue. */
5035 infrun_debug_printf
5036 ("displaced-step of %s canceled",
5037 target_pid_to_str (t->ptid).c_str ());
5038
5039 t->control.trap_expected = 0;
8ff53139
PA
5040 if (!t->inf->detaching)
5041 global_thread_step_over_chain_enqueue (t);
d758e62c
PA
5042 }
5043 }
5044 else
5045 {
5046 enum gdb_signal sig;
5047 struct regcache *regcache;
5048
5049 infrun_debug_printf
5050 ("target_wait %s, saving status for %d.%ld.%ld",
5051 target_waitstatus_to_string (&event.ws).c_str (),
5052 t->ptid.pid (), t->ptid.lwp (), t->ptid.tid ());
5053
5054 /* Record for later. */
5055 save_waitstatus (t, &event.ws);
5056
5057 sig = (event.ws.kind == TARGET_WAITKIND_STOPPED
5058 ? event.ws.value.sig : GDB_SIGNAL_0);
5059
5060 if (displaced_step_finish (t, sig)
5061 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5062 {
5063 /* Add it back to the step-over queue. */
5064 t->control.trap_expected = 0;
8ff53139
PA
5065 if (!t->inf->detaching)
5066 global_thread_step_over_chain_enqueue (t);
d758e62c
PA
5067 }
5068
5069 regcache = get_thread_regcache (t);
5070 t->suspend.stop_pc = regcache_read_pc (regcache);
5071
5072 infrun_debug_printf ("saved stop_pc=%s for %s "
5073 "(currently_stepping=%d)",
5074 paddress (target_gdbarch (),
5075 t->suspend.stop_pc),
5076 target_pid_to_str (t->ptid).c_str (),
5077 currently_stepping (t));
5078 }
5079 }
5080
5081 return false;
5082}
5083
6efcd9a8 5084/* See infrun.h. */
372316f1 5085
6efcd9a8 5086void
4ffff7d3 5087stop_all_threads (const char *reason, inferior *inf)
372316f1
PA
5088{
5089 /* We may need multiple passes to discover all threads. */
5090 int pass;
5091 int iterations = 0;
372316f1 5092
53cccef1 5093 gdb_assert (exists_non_stop_target ());
372316f1 5094
4ffff7d3
SM
5095 INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason,
5096 inf != nullptr ? inf->num : -1);
372316f1 5097
00431a78 5098 scoped_restore_current_thread restore_thread;
372316f1 5099
4ffff7d3 5100 /* Enable thread events on relevant targets. */
6ad82919
TBA
5101 for (auto *target : all_non_exited_process_targets ())
5102 {
4ffff7d3
SM
5103 if (inf != nullptr && inf->process_target () != target)
5104 continue;
5105
6ad82919
TBA
5106 switch_to_target_no_thread (target);
5107 target_thread_events (true);
5108 }
5109
5110 SCOPE_EXIT
5111 {
4ffff7d3 5112 /* Disable thread events on relevant targets. */
6ad82919
TBA
5113 for (auto *target : all_non_exited_process_targets ())
5114 {
4ffff7d3
SM
5115 if (inf != nullptr && inf->process_target () != target)
5116 continue;
5117
6ad82919
TBA
5118 switch_to_target_no_thread (target);
5119 target_thread_events (false);
5120 }
5121
17417fb0 5122 /* Use debug_prefixed_printf directly to get a meaningful function
dda83cd7 5123 name. */
6ad82919 5124 if (debug_infrun)
17417fb0 5125 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
6ad82919 5126 };
65706a29 5127
372316f1
PA
5128 /* Request threads to stop, and then wait for the stops. Because
5129 threads we already know about can spawn more threads while we're
5130 trying to stop them, and we only learn about new threads when we
5131 update the thread list, do this in a loop, and keep iterating
5132 until two passes find no threads that need to be stopped. */
5133 for (pass = 0; pass < 2; pass++, iterations++)
5134 {
1eb8556f 5135 infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations);
372316f1
PA
5136 while (1)
5137 {
29d6859f 5138 int waits_needed = 0;
372316f1 5139
a05575d3
TBA
5140 for (auto *target : all_non_exited_process_targets ())
5141 {
4ffff7d3
SM
5142 if (inf != nullptr && inf->process_target () != target)
5143 continue;
5144
a05575d3
TBA
5145 switch_to_target_no_thread (target);
5146 update_thread_list ();
5147 }
372316f1
PA
5148
5149 /* Go through all threads looking for threads that we need
5150 to tell the target to stop. */
08036331 5151 for (thread_info *t : all_non_exited_threads ())
372316f1 5152 {
4ffff7d3
SM
5153 if (inf != nullptr && t->inf != inf)
5154 continue;
5155
53cccef1
TBA
5156 /* For a single-target setting with an all-stop target,
5157 we would not even arrive here. For a multi-target
5158 setting, until GDB is able to handle a mixture of
5159 all-stop and non-stop targets, simply skip all-stop
5160 targets' threads. This should be fine due to the
5161 protection of 'check_multi_target_resumption'. */
5162
5163 switch_to_thread_no_regs (t);
5164 if (!target_is_non_stop_p ())
5165 continue;
5166
372316f1
PA
5167 if (t->executing)
5168 {
5169 /* If already stopping, don't request a stop again.
5170 We just haven't seen the notification yet. */
5171 if (!t->stop_requested)
5172 {
1eb8556f
SM
5173 infrun_debug_printf (" %s executing, need stop",
5174 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
5175 target_stop (t->ptid);
5176 t->stop_requested = 1;
5177 }
5178 else
5179 {
1eb8556f
SM
5180 infrun_debug_printf (" %s executing, already stopping",
5181 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
5182 }
5183
5184 if (t->stop_requested)
29d6859f 5185 waits_needed++;
372316f1
PA
5186 }
5187 else
5188 {
1eb8556f
SM
5189 infrun_debug_printf (" %s not executing",
5190 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
5191
5192 /* The thread may be not executing, but still be
5193 resumed with a pending status to process. */
719546c4 5194 t->resumed = false;
372316f1
PA
5195 }
5196 }
5197
29d6859f 5198 if (waits_needed == 0)
372316f1
PA
5199 break;
5200
5201 /* If we find new threads on the second iteration, restart
5202 over. We want to see two iterations in a row with all
5203 threads stopped. */
5204 if (pass > 0)
5205 pass = -1;
5206
29d6859f 5207 for (int i = 0; i < waits_needed; i++)
c29705b7 5208 {
29d6859f 5209 wait_one_event event = wait_one ();
d758e62c
PA
5210 if (handle_one (event))
5211 break;
372316f1
PA
5212 }
5213 }
5214 }
372316f1
PA
5215}
5216
f4836ba9
PA
5217/* Handle a TARGET_WAITKIND_NO_RESUMED event. */
5218
c4464ade 5219static bool
f4836ba9
PA
5220handle_no_resumed (struct execution_control_state *ecs)
5221{
3b12939d 5222 if (target_can_async_p ())
f4836ba9 5223 {
c4464ade 5224 bool any_sync = false;
f4836ba9 5225
2dab0c7b 5226 for (ui *ui : all_uis ())
3b12939d
PA
5227 {
5228 if (ui->prompt_state == PROMPT_BLOCKED)
5229 {
c4464ade 5230 any_sync = true;
3b12939d
PA
5231 break;
5232 }
5233 }
5234 if (!any_sync)
5235 {
5236 /* There were no unwaited-for children left in the target, but,
5237 we're not synchronously waiting for events either. Just
5238 ignore. */
5239
1eb8556f 5240 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
3b12939d 5241 prepare_to_wait (ecs);
c4464ade 5242 return true;
3b12939d 5243 }
f4836ba9
PA
5244 }
5245
5246 /* Otherwise, if we were running a synchronous execution command, we
5247 may need to cancel it and give the user back the terminal.
5248
5249 In non-stop mode, the target can't tell whether we've already
5250 consumed previous stop events, so it can end up sending us a
5251 no-resumed event like so:
5252
5253 #0 - thread 1 is left stopped
5254
5255 #1 - thread 2 is resumed and hits breakpoint
dda83cd7 5256 -> TARGET_WAITKIND_STOPPED
f4836ba9
PA
5257
5258 #2 - thread 3 is resumed and exits
dda83cd7 5259 this is the last resumed thread, so
f4836ba9
PA
5260 -> TARGET_WAITKIND_NO_RESUMED
5261
5262 #3 - gdb processes stop for thread 2 and decides to re-resume
dda83cd7 5263 it.
f4836ba9
PA
5264
5265 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
dda83cd7 5266 thread 2 is now resumed, so the event should be ignored.
f4836ba9
PA
5267
5268 IOW, if the stop for thread 2 doesn't end a foreground command,
5269 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5270 event. But it could be that the event meant that thread 2 itself
5271 (or whatever other thread was the last resumed thread) exited.
5272
5273 To address this we refresh the thread list and check whether we
5274 have resumed threads _now_. In the example above, this removes
5275 thread 3 from the thread list. If thread 2 was re-resumed, we
5276 ignore this event. If we find no thread resumed, then we cancel
7d3badc6
PA
5277 the synchronous command and show "no unwaited-for " to the
5278 user. */
f4836ba9 5279
d6cc5d98 5280 inferior *curr_inf = current_inferior ();
7d3badc6 5281
d6cc5d98
PA
5282 scoped_restore_current_thread restore_thread;
5283
5284 for (auto *target : all_non_exited_process_targets ())
5285 {
5286 switch_to_target_no_thread (target);
5287 update_thread_list ();
5288 }
5289
5290 /* If:
5291
5292 - the current target has no thread executing, and
5293 - the current inferior is native, and
5294 - the current inferior is the one which has the terminal, and
5295 - we did nothing,
5296
5297 then a Ctrl-C from this point on would remain stuck in the
5298 kernel, until a thread resumes and dequeues it. That would
5299 result in the GDB CLI not reacting to Ctrl-C, not able to
5300 interrupt the program. To address this, if the current inferior
5301 no longer has any thread executing, we give the terminal to some
5302 other inferior that has at least one thread executing. */
5303 bool swap_terminal = true;
5304
5305 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5306 whether to report it to the user. */
5307 bool ignore_event = false;
7d3badc6
PA
5308
5309 for (thread_info *thread : all_non_exited_threads ())
f4836ba9 5310 {
d6cc5d98
PA
5311 if (swap_terminal && thread->executing)
5312 {
5313 if (thread->inf != curr_inf)
5314 {
5315 target_terminal::ours ();
5316
5317 switch_to_thread (thread);
5318 target_terminal::inferior ();
5319 }
5320 swap_terminal = false;
5321 }
5322
5323 if (!ignore_event
5324 && (thread->executing
5325 || thread->suspend.waitstatus_pending_p))
f4836ba9 5326 {
7d3badc6
PA
5327 /* Either there were no unwaited-for children left in the
5328 target at some point, but there are now, or some target
5329 other than the eventing one has unwaited-for children
5330 left. Just ignore. */
1eb8556f
SM
5331 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5332 "(ignoring: found resumed)");
d6cc5d98
PA
5333
5334 ignore_event = true;
f4836ba9 5335 }
d6cc5d98
PA
5336
5337 if (ignore_event && !swap_terminal)
5338 break;
5339 }
5340
5341 if (ignore_event)
5342 {
5343 switch_to_inferior_no_thread (curr_inf);
5344 prepare_to_wait (ecs);
c4464ade 5345 return true;
f4836ba9
PA
5346 }
5347
5348 /* Go ahead and report the event. */
c4464ade 5349 return false;
f4836ba9
PA
5350}
5351
05ba8510
PA
5352/* Given an execution control state that has been freshly filled in by
5353 an event from the inferior, figure out what it means and take
5354 appropriate action.
5355
5356 The alternatives are:
5357
22bcd14b 5358 1) stop_waiting and return; to really stop and return to the
05ba8510
PA
5359 debugger.
5360
5361 2) keep_going and return; to wait for the next event (set
5362 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5363 once). */
c906108c 5364
ec9499be 5365static void
595915c1 5366handle_inferior_event (struct execution_control_state *ecs)
cd0fc7c3 5367{
595915c1
TT
5368 /* Make sure that all temporary struct value objects that were
5369 created during the handling of the event get deleted at the
5370 end. */
5371 scoped_value_mark free_values;
5372
1eb8556f 5373 infrun_debug_printf ("%s", target_waitstatus_to_string (&ecs->ws).c_str ());
c29705b7 5374
28736962
PA
5375 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
5376 {
5377 /* We had an event in the inferior, but we are not interested in
5378 handling it at this level. The lower layers have already
5379 done what needs to be done, if anything.
5380
5381 One of the possible circumstances for this is when the
5382 inferior produces output for the console. The inferior has
5383 not stopped, and we are ignoring the event. Another possible
5384 circumstance is any event which the lower level knows will be
5385 reported multiple times without an intervening resume. */
28736962
PA
5386 prepare_to_wait (ecs);
5387 return;
5388 }
5389
65706a29
PA
5390 if (ecs->ws.kind == TARGET_WAITKIND_THREAD_EXITED)
5391 {
65706a29
PA
5392 prepare_to_wait (ecs);
5393 return;
5394 }
5395
0e5bf2a8 5396 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
f4836ba9
PA
5397 && handle_no_resumed (ecs))
5398 return;
0e5bf2a8 5399
5b6d1e4f
PA
5400 /* Cache the last target/ptid/waitstatus. */
5401 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
e02bc4cc 5402
ca005067 5403 /* Always clear state belonging to the previous time we stopped. */
aa7d318d 5404 stop_stack_dummy = STOP_NONE;
ca005067 5405
0e5bf2a8
PA
5406 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
5407 {
5408 /* No unwaited-for children left. IOW, all resumed children
5409 have exited. */
c4464ade 5410 stop_print_frame = false;
22bcd14b 5411 stop_waiting (ecs);
0e5bf2a8
PA
5412 return;
5413 }
5414
8c90c137 5415 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
64776a0b 5416 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
359f5fe6 5417 {
5b6d1e4f 5418 ecs->event_thread = find_thread_ptid (ecs->target, ecs->ptid);
359f5fe6
PA
5419 /* If it's a new thread, add it to the thread database. */
5420 if (ecs->event_thread == NULL)
5b6d1e4f 5421 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
c1e36e3e
PA
5422
5423 /* Disable range stepping. If the next step request could use a
5424 range, this will be end up re-enabled then. */
5425 ecs->event_thread->control.may_range_step = 0;
359f5fe6 5426 }
88ed393a
JK
5427
5428 /* Dependent on valid ECS->EVENT_THREAD. */
d8dd4d5f 5429 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
88ed393a
JK
5430
5431 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5432 reinit_frame_cache ();
5433
28736962
PA
5434 breakpoint_retire_moribund ();
5435
2b009048
DJ
5436 /* First, distinguish signals caused by the debugger from signals
5437 that have to do with the program's own actions. Note that
5438 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5439 on the operating system version. Here we detect when a SIGILL or
5440 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5441 something similar for SIGSEGV, since a SIGSEGV will be generated
5442 when we're trying to execute a breakpoint instruction on a
5443 non-executable stack. This happens for call dummy breakpoints
5444 for architectures like SPARC that place call dummies on the
5445 stack. */
2b009048 5446 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
a493e3e2
PA
5447 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
5448 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
5449 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
2b009048 5450 {
00431a78 5451 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
de0a0249 5452
a01bda52 5453 if (breakpoint_inserted_here_p (regcache->aspace (),
de0a0249
UW
5454 regcache_read_pc (regcache)))
5455 {
1eb8556f 5456 infrun_debug_printf ("Treating signal as SIGTRAP");
a493e3e2 5457 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
de0a0249 5458 }
2b009048
DJ
5459 }
5460
293b3ebc 5461 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
8c90c137 5462
488f131b
JB
5463 switch (ecs->ws.kind)
5464 {
5465 case TARGET_WAITKIND_LOADED:
72d383bb
SM
5466 {
5467 context_switch (ecs);
5468 /* Ignore gracefully during startup of the inferior, as it might
5469 be the shell which has just loaded some objects, otherwise
5470 add the symbols for the newly loaded objects. Also ignore at
5471 the beginning of an attach or remote session; we will query
5472 the full list of libraries once the connection is
5473 established. */
5474
5475 stop_kind stop_soon = get_inferior_stop_soon (ecs);
5476 if (stop_soon == NO_STOP_QUIETLY)
5477 {
5478 struct regcache *regcache;
edcc5120 5479
72d383bb 5480 regcache = get_thread_regcache (ecs->event_thread);
edcc5120 5481
72d383bb 5482 handle_solib_event ();
ab04a2af 5483
72d383bb
SM
5484 ecs->event_thread->control.stop_bpstat
5485 = bpstat_stop_status (regcache->aspace (),
5486 ecs->event_thread->suspend.stop_pc,
5487 ecs->event_thread, &ecs->ws);
c65d6b55 5488
72d383bb 5489 if (handle_stop_requested (ecs))
94c57d6a 5490 return;
488f131b 5491
72d383bb
SM
5492 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5493 {
5494 /* A catchpoint triggered. */
5495 process_event_stop_test (ecs);
5496 return;
5497 }
55409f9d 5498
72d383bb
SM
5499 /* If requested, stop when the dynamic linker notifies
5500 gdb of events. This allows the user to get control
5501 and place breakpoints in initializer routines for
5502 dynamically loaded objects (among other things). */
5503 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5504 if (stop_on_solib_events)
5505 {
5506 /* Make sure we print "Stopped due to solib-event" in
5507 normal_stop. */
5508 stop_print_frame = true;
b0f4b84b 5509
72d383bb
SM
5510 stop_waiting (ecs);
5511 return;
5512 }
5513 }
b0f4b84b 5514
72d383bb
SM
5515 /* If we are skipping through a shell, or through shared library
5516 loading that we aren't interested in, resume the program. If
5517 we're running the program normally, also resume. */
5518 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
5519 {
5520 /* Loading of shared libraries might have changed breakpoint
5521 addresses. Make sure new breakpoints are inserted. */
5522 if (stop_soon == NO_STOP_QUIETLY)
5523 insert_breakpoints ();
5524 resume (GDB_SIGNAL_0);
5525 prepare_to_wait (ecs);
5526 return;
5527 }
5c09a2c5 5528
72d383bb
SM
5529 /* But stop if we're attaching or setting up a remote
5530 connection. */
5531 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5532 || stop_soon == STOP_QUIETLY_REMOTE)
5533 {
5534 infrun_debug_printf ("quietly stopped");
5535 stop_waiting (ecs);
5536 return;
5537 }
5538
5539 internal_error (__FILE__, __LINE__,
5540 _("unhandled stop_soon: %d"), (int) stop_soon);
5541 }
c5aa993b 5542
488f131b 5543 case TARGET_WAITKIND_SPURIOUS:
c65d6b55
PA
5544 if (handle_stop_requested (ecs))
5545 return;
00431a78 5546 context_switch (ecs);
64ce06e4 5547 resume (GDB_SIGNAL_0);
488f131b
JB
5548 prepare_to_wait (ecs);
5549 return;
c5aa993b 5550
65706a29 5551 case TARGET_WAITKIND_THREAD_CREATED:
c65d6b55
PA
5552 if (handle_stop_requested (ecs))
5553 return;
00431a78 5554 context_switch (ecs);
65706a29
PA
5555 if (!switch_back_to_stepped_thread (ecs))
5556 keep_going (ecs);
5557 return;
5558
488f131b 5559 case TARGET_WAITKIND_EXITED:
940c3c06 5560 case TARGET_WAITKIND_SIGNALLED:
18493a00
PA
5561 {
5562 /* Depending on the system, ecs->ptid may point to a thread or
5563 to a process. On some targets, target_mourn_inferior may
5564 need to have access to the just-exited thread. That is the
5565 case of GNU/Linux's "checkpoint" support, for example.
5566 Call the switch_to_xxx routine as appropriate. */
5567 thread_info *thr = find_thread_ptid (ecs->target, ecs->ptid);
5568 if (thr != nullptr)
5569 switch_to_thread (thr);
5570 else
5571 {
5572 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5573 switch_to_inferior_no_thread (inf);
5574 }
5575 }
6c95b8df 5576 handle_vfork_child_exec_or_exit (0);
223ffa71 5577 target_terminal::ours (); /* Must do this before mourn anyway. */
488f131b 5578
0c557179
SDJ
5579 /* Clearing any previous state of convenience variables. */
5580 clear_exit_convenience_vars ();
5581
940c3c06
PA
5582 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
5583 {
5584 /* Record the exit code in the convenience variable $_exitcode, so
5585 that the user can inspect this again later. */
5586 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5587 (LONGEST) ecs->ws.value.integer);
5588
5589 /* Also record this in the inferior itself. */
5590 current_inferior ()->has_exit_code = 1;
5591 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
8cf64490 5592
98eb56a4
PA
5593 /* Support the --return-child-result option. */
5594 return_child_result_value = ecs->ws.value.integer;
5595
76727919 5596 gdb::observers::exited.notify (ecs->ws.value.integer);
940c3c06
PA
5597 }
5598 else
0c557179 5599 {
00431a78 5600 struct gdbarch *gdbarch = current_inferior ()->gdbarch;
0c557179
SDJ
5601
5602 if (gdbarch_gdb_signal_to_target_p (gdbarch))
5603 {
5604 /* Set the value of the internal variable $_exitsignal,
5605 which holds the signal uncaught by the inferior. */
5606 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5607 gdbarch_gdb_signal_to_target (gdbarch,
5608 ecs->ws.value.sig));
5609 }
5610 else
5611 {
5612 /* We don't have access to the target's method used for
5613 converting between signal numbers (GDB's internal
5614 representation <-> target's representation).
5615 Therefore, we cannot do a good job at displaying this
5616 information to the user. It's better to just warn
5617 her about it (if infrun debugging is enabled), and
5618 give up. */
1eb8556f
SM
5619 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
5620 "signal number.");
0c557179
SDJ
5621 }
5622
76727919 5623 gdb::observers::signal_exited.notify (ecs->ws.value.sig);
0c557179 5624 }
8cf64490 5625
488f131b 5626 gdb_flush (gdb_stdout);
bc1e6c81 5627 target_mourn_inferior (inferior_ptid);
c4464ade 5628 stop_print_frame = false;
22bcd14b 5629 stop_waiting (ecs);
488f131b 5630 return;
c5aa993b 5631
488f131b 5632 case TARGET_WAITKIND_FORKED:
deb3b17b 5633 case TARGET_WAITKIND_VFORKED:
e2d96639
YQ
5634 /* Check whether the inferior is displaced stepping. */
5635 {
00431a78 5636 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
ac7936df 5637 struct gdbarch *gdbarch = regcache->arch ();
c0aba012 5638 inferior *parent_inf = find_inferior_ptid (ecs->target, ecs->ptid);
e2d96639 5639
aeeb758d
JB
5640 /* If this is a fork (child gets its own address space copy)
5641 and some displaced step buffers were in use at the time of
5642 the fork, restore the displaced step buffer bytes in the
5643 child process.
5644
5645 Architectures which support displaced stepping and fork
5646 events must supply an implementation of
5647 gdbarch_displaced_step_restore_all_in_ptid. This is not
5648 enforced during gdbarch validation to support architectures
5649 which support displaced stepping but not forks. */
5650 if (ecs->ws.kind == TARGET_WAITKIND_FORKED
5651 && gdbarch_supports_displaced_stepping (gdbarch))
187b041e
SM
5652 gdbarch_displaced_step_restore_all_in_ptid
5653 (gdbarch, parent_inf, ecs->ws.value.related_pid);
c0aba012
SM
5654
5655 /* If displaced stepping is supported, and thread ecs->ptid is
5656 displaced stepping. */
00431a78 5657 if (displaced_step_in_progress_thread (ecs->event_thread))
e2d96639 5658 {
e2d96639
YQ
5659 struct regcache *child_regcache;
5660 CORE_ADDR parent_pc;
5661
5662 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5663 indicating that the displaced stepping of syscall instruction
5664 has been done. Perform cleanup for parent process here. Note
5665 that this operation also cleans up the child process for vfork,
5666 because their pages are shared. */
7def77a1 5667 displaced_step_finish (ecs->event_thread, GDB_SIGNAL_TRAP);
c2829269
PA
5668 /* Start a new step-over in another thread if there's one
5669 that needs it. */
5670 start_step_over ();
e2d96639 5671
e2d96639
YQ
5672 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5673 the child's PC is also within the scratchpad. Set the child's PC
5674 to the parent's PC value, which has already been fixed up.
5675 FIXME: we use the parent's aspace here, although we're touching
5676 the child, because the child hasn't been added to the inferior
5677 list yet at this point. */
5678
5679 child_regcache
5b6d1e4f
PA
5680 = get_thread_arch_aspace_regcache (parent_inf->process_target (),
5681 ecs->ws.value.related_pid,
e2d96639
YQ
5682 gdbarch,
5683 parent_inf->aspace);
5684 /* Read PC value of parent process. */
5685 parent_pc = regcache_read_pc (regcache);
5686
136821d9
SM
5687 displaced_debug_printf ("write child pc from %s to %s",
5688 paddress (gdbarch,
5689 regcache_read_pc (child_regcache)),
5690 paddress (gdbarch, parent_pc));
e2d96639
YQ
5691
5692 regcache_write_pc (child_regcache, parent_pc);
5693 }
5694 }
5695
00431a78 5696 context_switch (ecs);
5a2901d9 5697
b242c3c2
PA
5698 /* Immediately detach breakpoints from the child before there's
5699 any chance of letting the user delete breakpoints from the
5700 breakpoint lists. If we don't do this early, it's easy to
5701 leave left over traps in the child, vis: "break foo; catch
5702 fork; c; <fork>; del; c; <child calls foo>". We only follow
5703 the fork on the last `continue', and by that time the
5704 breakpoint at "foo" is long gone from the breakpoint table.
5705 If we vforked, then we don't need to unpatch here, since both
5706 parent and child are sharing the same memory pages; we'll
5707 need to unpatch at follow/detach time instead to be certain
5708 that new breakpoints added between catchpoint hit time and
5709 vfork follow are detached. */
5710 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
5711 {
b242c3c2
PA
5712 /* This won't actually modify the breakpoint list, but will
5713 physically remove the breakpoints from the child. */
d80ee84f 5714 detach_breakpoints (ecs->ws.value.related_pid);
b242c3c2
PA
5715 }
5716
34b7e8a6 5717 delete_just_stopped_threads_single_step_breakpoints ();
d03285ec 5718
e58b0e63
PA
5719 /* In case the event is caught by a catchpoint, remember that
5720 the event is to be followed at the next resume of the thread,
5721 and not immediately. */
5722 ecs->event_thread->pending_follow = ecs->ws;
5723
f2ffa92b
PA
5724 ecs->event_thread->suspend.stop_pc
5725 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
675bf4cb 5726
16c381f0 5727 ecs->event_thread->control.stop_bpstat
a01bda52 5728 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5729 ecs->event_thread->suspend.stop_pc,
5730 ecs->event_thread, &ecs->ws);
675bf4cb 5731
c65d6b55
PA
5732 if (handle_stop_requested (ecs))
5733 return;
5734
ce12b012
PA
5735 /* If no catchpoint triggered for this, then keep going. Note
5736 that we're interested in knowing the bpstat actually causes a
5737 stop, not just if it may explain the signal. Software
5738 watchpoints, for example, always appear in the bpstat. */
5739 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5740 {
5ab2fbf1 5741 bool follow_child
3e43a32a 5742 = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63 5743
a493e3e2 5744 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
e58b0e63 5745
5b6d1e4f
PA
5746 process_stratum_target *targ
5747 = ecs->event_thread->inf->process_target ();
5748
5ab2fbf1 5749 bool should_resume = follow_fork ();
e58b0e63 5750
5b6d1e4f
PA
5751 /* Note that one of these may be an invalid pointer,
5752 depending on detach_fork. */
00431a78 5753 thread_info *parent = ecs->event_thread;
5b6d1e4f
PA
5754 thread_info *child
5755 = find_thread_ptid (targ, ecs->ws.value.related_pid);
6c95b8df 5756
a2077e25
PA
5757 /* At this point, the parent is marked running, and the
5758 child is marked stopped. */
5759
5760 /* If not resuming the parent, mark it stopped. */
5761 if (follow_child && !detach_fork && !non_stop && !sched_multi)
00431a78 5762 parent->set_running (false);
a2077e25
PA
5763
5764 /* If resuming the child, mark it running. */
5765 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
00431a78 5766 child->set_running (true);
a2077e25 5767
6c95b8df 5768 /* In non-stop mode, also resume the other branch. */
fbea99ea
PA
5769 if (!detach_fork && (non_stop
5770 || (sched_multi && target_is_non_stop_p ())))
6c95b8df
PA
5771 {
5772 if (follow_child)
5773 switch_to_thread (parent);
5774 else
5775 switch_to_thread (child);
5776
5777 ecs->event_thread = inferior_thread ();
5778 ecs->ptid = inferior_ptid;
5779 keep_going (ecs);
5780 }
5781
5782 if (follow_child)
5783 switch_to_thread (child);
5784 else
5785 switch_to_thread (parent);
5786
e58b0e63
PA
5787 ecs->event_thread = inferior_thread ();
5788 ecs->ptid = inferior_ptid;
5789
5790 if (should_resume)
1595475b
SM
5791 {
5792 /* Never call switch_back_to_stepped_thread if we are waiting for
5793 vfork-done (waiting for an external vfork child to exec or
5794 exit). We will resume only the vforking thread for the purpose
5795 of collecting the vfork-done event, and we will restart any
5796 step once the critical shared address space window is done. */
5797 if (parent->inf->thread_waiting_for_vfork_done != nullptr
5798 || !switch_back_to_stepped_thread (ecs))
5799 keep_going (ecs);
5800 }
e58b0e63 5801 else
22bcd14b 5802 stop_waiting (ecs);
04e68871
DJ
5803 return;
5804 }
94c57d6a
PA
5805 process_event_stop_test (ecs);
5806 return;
488f131b 5807
6c95b8df
PA
5808 case TARGET_WAITKIND_VFORK_DONE:
5809 /* Done with the shared memory region. Re-insert breakpoints in
5810 the parent, and keep going. */
5811
00431a78 5812 context_switch (ecs);
6c95b8df 5813
81d92403
SM
5814 handle_vfork_done (ecs->event_thread);
5815 gdb_assert (inferior_thread () == ecs->event_thread);
c65d6b55
PA
5816
5817 if (handle_stop_requested (ecs))
5818 return;
5819
1595475b
SM
5820 if (!switch_back_to_stepped_thread (ecs))
5821 {
5822 gdb_assert (inferior_thread () == ecs->event_thread);
5823 /* This also takes care of reinserting breakpoints in the
5824 previously locked inferior. */
5825 keep_going (ecs);
5826 }
6c95b8df
PA
5827 return;
5828
488f131b 5829 case TARGET_WAITKIND_EXECD:
488f131b 5830
cbd2b4e3
PA
5831 /* Note we can't read registers yet (the stop_pc), because we
5832 don't yet know the inferior's post-exec architecture.
5833 'stop_pc' is explicitly read below instead. */
00431a78 5834 switch_to_thread_no_regs (ecs->event_thread);
5a2901d9 5835
6c95b8df
PA
5836 /* Do whatever is necessary to the parent branch of the vfork. */
5837 handle_vfork_child_exec_or_exit (1);
5838
795e548f 5839 /* This causes the eventpoints and symbol table to be reset.
dda83cd7
SM
5840 Must do this now, before trying to determine whether to
5841 stop. */
71b43ef8 5842 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
795e548f 5843
17d8546e
DB
5844 /* In follow_exec we may have deleted the original thread and
5845 created a new one. Make sure that the event thread is the
5846 execd thread for that case (this is a nop otherwise). */
5847 ecs->event_thread = inferior_thread ();
5848
f2ffa92b
PA
5849 ecs->event_thread->suspend.stop_pc
5850 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
ecdc3a72 5851
16c381f0 5852 ecs->event_thread->control.stop_bpstat
a01bda52 5853 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5854 ecs->event_thread->suspend.stop_pc,
5855 ecs->event_thread, &ecs->ws);
795e548f 5856
71b43ef8
PA
5857 /* Note that this may be referenced from inside
5858 bpstat_stop_status above, through inferior_has_execd. */
5859 xfree (ecs->ws.value.execd_pathname);
5860 ecs->ws.value.execd_pathname = NULL;
5861
c65d6b55
PA
5862 if (handle_stop_requested (ecs))
5863 return;
5864
04e68871 5865 /* If no catchpoint triggered for this, then keep going. */
ce12b012 5866 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5867 {
a493e3e2 5868 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
04e68871
DJ
5869 keep_going (ecs);
5870 return;
5871 }
94c57d6a
PA
5872 process_event_stop_test (ecs);
5873 return;
488f131b 5874
b4dc5ffa 5875 /* Be careful not to try to gather much state about a thread
dda83cd7 5876 that's in a syscall. It's frequently a losing proposition. */
488f131b 5877 case TARGET_WAITKIND_SYSCALL_ENTRY:
1777feb0 5878 /* Getting the current syscall number. */
94c57d6a
PA
5879 if (handle_syscall_event (ecs) == 0)
5880 process_event_stop_test (ecs);
5881 return;
c906108c 5882
488f131b 5883 /* Before examining the threads further, step this thread to
dda83cd7
SM
5884 get it entirely out of the syscall. (We get notice of the
5885 event when the thread is just on the verge of exiting a
5886 syscall. Stepping one instruction seems to get it back
5887 into user code.) */
488f131b 5888 case TARGET_WAITKIND_SYSCALL_RETURN:
94c57d6a
PA
5889 if (handle_syscall_event (ecs) == 0)
5890 process_event_stop_test (ecs);
5891 return;
c906108c 5892
488f131b 5893 case TARGET_WAITKIND_STOPPED:
4f5d7f63
PA
5894 handle_signal_stop (ecs);
5895 return;
c906108c 5896
b2175913
MS
5897 case TARGET_WAITKIND_NO_HISTORY:
5898 /* Reverse execution: target ran out of history info. */
eab402df 5899
d1988021 5900 /* Switch to the stopped thread. */
00431a78 5901 context_switch (ecs);
1eb8556f 5902 infrun_debug_printf ("stopped");
d1988021 5903
34b7e8a6 5904 delete_just_stopped_threads_single_step_breakpoints ();
f2ffa92b
PA
5905 ecs->event_thread->suspend.stop_pc
5906 = regcache_read_pc (get_thread_regcache (inferior_thread ()));
c65d6b55
PA
5907
5908 if (handle_stop_requested (ecs))
5909 return;
5910
76727919 5911 gdb::observers::no_history.notify ();
22bcd14b 5912 stop_waiting (ecs);
b2175913 5913 return;
488f131b 5914 }
4f5d7f63
PA
5915}
5916
372316f1 5917/* Restart threads back to what they were trying to do back when we
4ffff7d3
SM
5918 paused them (because of an in-line step-over or vfork, for example).
5919 The EVENT_THREAD thread is ignored (not restarted).
5920
5921 If INF is non-nullptr, only resume threads from INF. */
4d9d9d04
PA
5922
5923static void
4ffff7d3 5924restart_threads (struct thread_info *event_thread, inferior *inf)
372316f1 5925{
4ffff7d3
SM
5926 INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
5927 event_thread->ptid.to_string ().c_str (),
5928 inf != nullptr ? inf->num : -1);
5929
372316f1
PA
5930 /* In case the instruction just stepped spawned a new thread. */
5931 update_thread_list ();
5932
08036331 5933 for (thread_info *tp : all_non_exited_threads ())
372316f1 5934 {
4ffff7d3
SM
5935 if (inf != nullptr && tp->inf != inf)
5936 continue;
5937
ac7d717c
PA
5938 if (tp->inf->detaching)
5939 {
5940 infrun_debug_printf ("restart threads: [%s] inferior detaching",
5941 target_pid_to_str (tp->ptid).c_str ());
5942 continue;
5943 }
5944
f3f8ece4
PA
5945 switch_to_thread_no_regs (tp);
5946
372316f1
PA
5947 if (tp == event_thread)
5948 {
1eb8556f
SM
5949 infrun_debug_printf ("restart threads: [%s] is event thread",
5950 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5951 continue;
5952 }
5953
5954 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5955 {
1eb8556f
SM
5956 infrun_debug_printf ("restart threads: [%s] not meant to be running",
5957 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5958 continue;
5959 }
5960
5961 if (tp->resumed)
5962 {
1eb8556f
SM
5963 infrun_debug_printf ("restart threads: [%s] resumed",
5964 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5965 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
5966 continue;
5967 }
5968
5969 if (thread_is_in_step_over_chain (tp))
5970 {
1eb8556f
SM
5971 infrun_debug_printf ("restart threads: [%s] needs step-over",
5972 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5973 gdb_assert (!tp->resumed);
5974 continue;
5975 }
5976
5977
5978 if (tp->suspend.waitstatus_pending_p)
5979 {
1eb8556f
SM
5980 infrun_debug_printf ("restart threads: [%s] has pending status",
5981 target_pid_to_str (tp->ptid).c_str ());
719546c4 5982 tp->resumed = true;
372316f1
PA
5983 continue;
5984 }
5985
c65d6b55
PA
5986 gdb_assert (!tp->stop_requested);
5987
372316f1
PA
5988 /* If some thread needs to start a step-over at this point, it
5989 should still be in the step-over queue, and thus skipped
5990 above. */
5991 if (thread_still_needs_step_over (tp))
5992 {
5993 internal_error (__FILE__, __LINE__,
5994 "thread [%s] needs a step-over, but not in "
5995 "step-over queue\n",
a068643d 5996 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5997 }
5998
5999 if (currently_stepping (tp))
6000 {
1eb8556f
SM
6001 infrun_debug_printf ("restart threads: [%s] was stepping",
6002 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
6003 keep_going_stepped_thread (tp);
6004 }
6005 else
6006 {
6007 struct execution_control_state ecss;
6008 struct execution_control_state *ecs = &ecss;
6009
1eb8556f
SM
6010 infrun_debug_printf ("restart threads: [%s] continuing",
6011 target_pid_to_str (tp->ptid).c_str ());
372316f1 6012 reset_ecs (ecs, tp);
00431a78 6013 switch_to_thread (tp);
372316f1
PA
6014 keep_going_pass_signal (ecs);
6015 }
6016 }
6017}
6018
6019/* Callback for iterate_over_threads. Find a resumed thread that has
6020 a pending waitstatus. */
6021
6022static int
6023resumed_thread_with_pending_status (struct thread_info *tp,
6024 void *arg)
6025{
6026 return (tp->resumed
6027 && tp->suspend.waitstatus_pending_p);
6028}
6029
6030/* Called when we get an event that may finish an in-line or
6031 out-of-line (displaced stepping) step-over started previously.
6032 Return true if the event is processed and we should go back to the
6033 event loop; false if the caller should continue processing the
6034 event. */
6035
6036static int
4d9d9d04
PA
6037finish_step_over (struct execution_control_state *ecs)
6038{
7def77a1
SM
6039 displaced_step_finish (ecs->event_thread,
6040 ecs->event_thread->suspend.stop_signal);
4d9d9d04 6041
c4464ade 6042 bool had_step_over_info = step_over_info_valid_p ();
372316f1
PA
6043
6044 if (had_step_over_info)
4d9d9d04
PA
6045 {
6046 /* If we're stepping over a breakpoint with all threads locked,
6047 then only the thread that was stepped should be reporting
6048 back an event. */
6049 gdb_assert (ecs->event_thread->control.trap_expected);
6050
c65d6b55 6051 clear_step_over_info ();
4d9d9d04
PA
6052 }
6053
fbea99ea 6054 if (!target_is_non_stop_p ())
372316f1 6055 return 0;
4d9d9d04
PA
6056
6057 /* Start a new step-over in another thread if there's one that
6058 needs it. */
6059 start_step_over ();
372316f1
PA
6060
6061 /* If we were stepping over a breakpoint before, and haven't started
6062 a new in-line step-over sequence, then restart all other threads
6063 (except the event thread). We can't do this in all-stop, as then
6064 e.g., we wouldn't be able to issue any other remote packet until
6065 these other threads stop. */
6066 if (had_step_over_info && !step_over_info_valid_p ())
6067 {
6068 struct thread_info *pending;
6069
6070 /* If we only have threads with pending statuses, the restart
6071 below won't restart any thread and so nothing re-inserts the
6072 breakpoint we just stepped over. But we need it inserted
6073 when we later process the pending events, otherwise if
6074 another thread has a pending event for this breakpoint too,
6075 we'd discard its event (because the breakpoint that
6076 originally caused the event was no longer inserted). */
00431a78 6077 context_switch (ecs);
372316f1
PA
6078 insert_breakpoints ();
6079
6080 restart_threads (ecs->event_thread);
6081
6082 /* If we have events pending, go through handle_inferior_event
6083 again, picking up a pending event at random. This avoids
6084 thread starvation. */
6085
6086 /* But not if we just stepped over a watchpoint in order to let
6087 the instruction execute so we can evaluate its expression.
6088 The set of watchpoints that triggered is recorded in the
6089 breakpoint objects themselves (see bp->watchpoint_triggered).
6090 If we processed another event first, that other event could
6091 clobber this info. */
6092 if (ecs->event_thread->stepping_over_watchpoint)
6093 return 0;
6094
6095 pending = iterate_over_threads (resumed_thread_with_pending_status,
6096 NULL);
6097 if (pending != NULL)
6098 {
6099 struct thread_info *tp = ecs->event_thread;
6100 struct regcache *regcache;
6101
1eb8556f
SM
6102 infrun_debug_printf ("found resumed threads with "
6103 "pending events, saving status");
372316f1
PA
6104
6105 gdb_assert (pending != tp);
6106
6107 /* Record the event thread's event for later. */
6108 save_waitstatus (tp, &ecs->ws);
6109 /* This was cleared early, by handle_inferior_event. Set it
6110 so this pending event is considered by
6111 do_target_wait. */
719546c4 6112 tp->resumed = true;
372316f1
PA
6113
6114 gdb_assert (!tp->executing);
6115
00431a78 6116 regcache = get_thread_regcache (tp);
372316f1
PA
6117 tp->suspend.stop_pc = regcache_read_pc (regcache);
6118
1eb8556f
SM
6119 infrun_debug_printf ("saved stop_pc=%s for %s "
6120 "(currently_stepping=%d)",
6121 paddress (target_gdbarch (),
dda83cd7 6122 tp->suspend.stop_pc),
1eb8556f
SM
6123 target_pid_to_str (tp->ptid).c_str (),
6124 currently_stepping (tp));
372316f1
PA
6125
6126 /* This in-line step-over finished; clear this so we won't
6127 start a new one. This is what handle_signal_stop would
6128 do, if we returned false. */
6129 tp->stepping_over_breakpoint = 0;
6130
6131 /* Wake up the event loop again. */
6132 mark_async_event_handler (infrun_async_inferior_event_token);
6133
6134 prepare_to_wait (ecs);
6135 return 1;
6136 }
6137 }
6138
6139 return 0;
4d9d9d04
PA
6140}
6141
4f5d7f63
PA
6142/* Come here when the program has stopped with a signal. */
6143
6144static void
6145handle_signal_stop (struct execution_control_state *ecs)
6146{
6147 struct frame_info *frame;
6148 struct gdbarch *gdbarch;
6149 int stopped_by_watchpoint;
6150 enum stop_kind stop_soon;
6151 int random_signal;
c906108c 6152
f0407826
DE
6153 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
6154
c65d6b55
PA
6155 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
6156
f0407826
DE
6157 /* Do we need to clean up the state of a thread that has
6158 completed a displaced single-step? (Doing so usually affects
6159 the PC, so do it here, before we set stop_pc.) */
372316f1
PA
6160 if (finish_step_over (ecs))
6161 return;
f0407826
DE
6162
6163 /* If we either finished a single-step or hit a breakpoint, but
6164 the user wanted this thread to be stopped, pretend we got a
6165 SIG0 (generic unsignaled stop). */
6166 if (ecs->event_thread->stop_requested
6167 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
6168 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
237fc4c9 6169
f2ffa92b
PA
6170 ecs->event_thread->suspend.stop_pc
6171 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
488f131b 6172
2ab76a18
PA
6173 context_switch (ecs);
6174
6175 if (deprecated_context_hook)
6176 deprecated_context_hook (ecs->event_thread->global_num);
6177
527159b7 6178 if (debug_infrun)
237fc4c9 6179 {
00431a78 6180 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
b926417a 6181 struct gdbarch *reg_gdbarch = regcache->arch ();
7f82dfc7 6182
1eb8556f
SM
6183 infrun_debug_printf ("stop_pc=%s",
6184 paddress (reg_gdbarch,
6185 ecs->event_thread->suspend.stop_pc));
d92524f1 6186 if (target_stopped_by_watchpoint ())
237fc4c9 6187 {
dda83cd7 6188 CORE_ADDR addr;
abbb1732 6189
1eb8556f 6190 infrun_debug_printf ("stopped by watchpoint");
237fc4c9 6191
328d42d8
SM
6192 if (target_stopped_data_address (current_inferior ()->top_target (),
6193 &addr))
1eb8556f 6194 infrun_debug_printf ("stopped data address=%s",
dda83cd7
SM
6195 paddress (reg_gdbarch, addr));
6196 else
1eb8556f 6197 infrun_debug_printf ("(no data address available)");
237fc4c9
PA
6198 }
6199 }
527159b7 6200
36fa8042
PA
6201 /* This is originated from start_remote(), start_inferior() and
6202 shared libraries hook functions. */
00431a78 6203 stop_soon = get_inferior_stop_soon (ecs);
36fa8042
PA
6204 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
6205 {
1eb8556f 6206 infrun_debug_printf ("quietly stopped");
c4464ade 6207 stop_print_frame = true;
22bcd14b 6208 stop_waiting (ecs);
36fa8042
PA
6209 return;
6210 }
6211
36fa8042
PA
6212 /* This originates from attach_command(). We need to overwrite
6213 the stop_signal here, because some kernels don't ignore a
6214 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6215 See more comments in inferior.h. On the other hand, if we
6216 get a non-SIGSTOP, report it to the user - assume the backend
6217 will handle the SIGSTOP if it should show up later.
6218
6219 Also consider that the attach is complete when we see a
6220 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6221 target extended-remote report it instead of a SIGSTOP
6222 (e.g. gdbserver). We already rely on SIGTRAP being our
6223 signal, so this is no exception.
6224
6225 Also consider that the attach is complete when we see a
6226 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6227 the target to stop all threads of the inferior, in case the
6228 low level attach operation doesn't stop them implicitly. If
6229 they weren't stopped implicitly, then the stub will report a
6230 GDB_SIGNAL_0, meaning: stopped for no particular reason
6231 other than GDB's request. */
6232 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6233 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
6234 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6235 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
6236 {
c4464ade 6237 stop_print_frame = true;
22bcd14b 6238 stop_waiting (ecs);
36fa8042
PA
6239 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6240 return;
6241 }
6242
568d6575
UW
6243 /* At this point, get hold of the now-current thread's frame. */
6244 frame = get_current_frame ();
6245 gdbarch = get_frame_arch (frame);
6246
2adfaa28 6247 /* Pull the single step breakpoints out of the target. */
af48d08f 6248 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
488f131b 6249 {
af48d08f 6250 struct regcache *regcache;
af48d08f 6251 CORE_ADDR pc;
2adfaa28 6252
00431a78 6253 regcache = get_thread_regcache (ecs->event_thread);
8b86c959
YQ
6254 const address_space *aspace = regcache->aspace ();
6255
af48d08f 6256 pc = regcache_read_pc (regcache);
34b7e8a6 6257
af48d08f
PA
6258 /* However, before doing so, if this single-step breakpoint was
6259 actually for another thread, set this thread up for moving
6260 past it. */
6261 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
6262 aspace, pc))
6263 {
6264 if (single_step_breakpoint_inserted_here_p (aspace, pc))
2adfaa28 6265 {
1eb8556f
SM
6266 infrun_debug_printf ("[%s] hit another thread's single-step "
6267 "breakpoint",
6268 target_pid_to_str (ecs->ptid).c_str ());
af48d08f
PA
6269 ecs->hit_singlestep_breakpoint = 1;
6270 }
6271 }
6272 else
6273 {
1eb8556f
SM
6274 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6275 target_pid_to_str (ecs->ptid).c_str ());
2adfaa28 6276 }
488f131b 6277 }
af48d08f 6278 delete_just_stopped_threads_single_step_breakpoints ();
c906108c 6279
963f9c80
PA
6280 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6281 && ecs->event_thread->control.trap_expected
6282 && ecs->event_thread->stepping_over_watchpoint)
d983da9c
DJ
6283 stopped_by_watchpoint = 0;
6284 else
6285 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
6286
6287 /* If necessary, step over this watchpoint. We'll be back to display
6288 it in a moment. */
6289 if (stopped_by_watchpoint
9aed480c 6290 && (target_have_steppable_watchpoint ()
568d6575 6291 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
488f131b 6292 {
488f131b 6293 /* At this point, we are stopped at an instruction which has
dda83cd7
SM
6294 attempted to write to a piece of memory under control of
6295 a watchpoint. The instruction hasn't actually executed
6296 yet. If we were to evaluate the watchpoint expression
6297 now, we would get the old value, and therefore no change
6298 would seem to have occurred.
6299
6300 In order to make watchpoints work `right', we really need
6301 to complete the memory write, and then evaluate the
6302 watchpoint expression. We do this by single-stepping the
d983da9c
DJ
6303 target.
6304
7f89fd65 6305 It may not be necessary to disable the watchpoint to step over
d983da9c
DJ
6306 it. For example, the PA can (with some kernel cooperation)
6307 single step over a watchpoint without disabling the watchpoint.
6308
6309 It is far more common to need to disable a watchpoint to step
6310 the inferior over it. If we have non-steppable watchpoints,
6311 we must disable the current watchpoint; it's simplest to
963f9c80
PA
6312 disable all watchpoints.
6313
6314 Any breakpoint at PC must also be stepped over -- if there's
6315 one, it will have already triggered before the watchpoint
6316 triggered, and we either already reported it to the user, or
6317 it didn't cause a stop and we called keep_going. In either
6318 case, if there was a breakpoint at PC, we must be trying to
6319 step past it. */
6320 ecs->event_thread->stepping_over_watchpoint = 1;
6321 keep_going (ecs);
488f131b
JB
6322 return;
6323 }
6324
4e1c45ea 6325 ecs->event_thread->stepping_over_breakpoint = 0;
963f9c80 6326 ecs->event_thread->stepping_over_watchpoint = 0;
16c381f0
JK
6327 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
6328 ecs->event_thread->control.stop_step = 0;
c4464ade 6329 stop_print_frame = true;
488f131b 6330 stopped_by_random_signal = 0;
ddfe970e 6331 bpstat stop_chain = NULL;
488f131b 6332
edb3359d
DJ
6333 /* Hide inlined functions starting here, unless we just performed stepi or
6334 nexti. After stepi and nexti, always show the innermost frame (not any
6335 inline function call sites). */
16c381f0 6336 if (ecs->event_thread->control.step_range_end != 1)
0574c78f 6337 {
00431a78
PA
6338 const address_space *aspace
6339 = get_thread_regcache (ecs->event_thread)->aspace ();
0574c78f
GB
6340
6341 /* skip_inline_frames is expensive, so we avoid it if we can
6342 determine that the address is one where functions cannot have
6343 been inlined. This improves performance with inferiors that
6344 load a lot of shared libraries, because the solib event
6345 breakpoint is defined as the address of a function (i.e. not
6346 inline). Note that we have to check the previous PC as well
6347 as the current one to catch cases when we have just
6348 single-stepped off a breakpoint prior to reinstating it.
6349 Note that we're assuming that the code we single-step to is
6350 not inline, but that's not definitive: there's nothing
6351 preventing the event breakpoint function from containing
6352 inlined code, and the single-step ending up there. If the
6353 user had set a breakpoint on that inlined code, the missing
6354 skip_inline_frames call would break things. Fortunately
6355 that's an extremely unlikely scenario. */
f2ffa92b
PA
6356 if (!pc_at_non_inline_function (aspace,
6357 ecs->event_thread->suspend.stop_pc,
6358 &ecs->ws)
a210c238
MR
6359 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6360 && ecs->event_thread->control.trap_expected
6361 && pc_at_non_inline_function (aspace,
6362 ecs->event_thread->prev_pc,
09ac7c10 6363 &ecs->ws)))
1c5a993e 6364 {
f2ffa92b
PA
6365 stop_chain = build_bpstat_chain (aspace,
6366 ecs->event_thread->suspend.stop_pc,
6367 &ecs->ws);
00431a78 6368 skip_inline_frames (ecs->event_thread, stop_chain);
1c5a993e
MR
6369
6370 /* Re-fetch current thread's frame in case that invalidated
6371 the frame cache. */
6372 frame = get_current_frame ();
6373 gdbarch = get_frame_arch (frame);
6374 }
0574c78f 6375 }
edb3359d 6376
a493e3e2 6377 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
16c381f0 6378 && ecs->event_thread->control.trap_expected
568d6575 6379 && gdbarch_single_step_through_delay_p (gdbarch)
4e1c45ea 6380 && currently_stepping (ecs->event_thread))
3352ef37 6381 {
b50d7442 6382 /* We're trying to step off a breakpoint. Turns out that we're
3352ef37 6383 also on an instruction that needs to be stepped multiple
1777feb0 6384 times before it's been fully executing. E.g., architectures
3352ef37
AC
6385 with a delay slot. It needs to be stepped twice, once for
6386 the instruction and once for the delay slot. */
6387 int step_through_delay
568d6575 6388 = gdbarch_single_step_through_delay (gdbarch, frame);
abbb1732 6389
1eb8556f
SM
6390 if (step_through_delay)
6391 infrun_debug_printf ("step through delay");
6392
16c381f0
JK
6393 if (ecs->event_thread->control.step_range_end == 0
6394 && step_through_delay)
3352ef37
AC
6395 {
6396 /* The user issued a continue when stopped at a breakpoint.
6397 Set up for another trap and get out of here. */
dda83cd7
SM
6398 ecs->event_thread->stepping_over_breakpoint = 1;
6399 keep_going (ecs);
6400 return;
3352ef37
AC
6401 }
6402 else if (step_through_delay)
6403 {
6404 /* The user issued a step when stopped at a breakpoint.
6405 Maybe we should stop, maybe we should not - the delay
6406 slot *might* correspond to a line of source. In any
ca67fcb8
VP
6407 case, don't decide that here, just set
6408 ecs->stepping_over_breakpoint, making sure we
6409 single-step again before breakpoints are re-inserted. */
4e1c45ea 6410 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
6411 }
6412 }
6413
ab04a2af
TT
6414 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6415 handles this event. */
6416 ecs->event_thread->control.stop_bpstat
a01bda52 6417 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
6418 ecs->event_thread->suspend.stop_pc,
6419 ecs->event_thread, &ecs->ws, stop_chain);
db82e815 6420
ab04a2af
TT
6421 /* Following in case break condition called a
6422 function. */
c4464ade 6423 stop_print_frame = true;
73dd234f 6424
ab04a2af
TT
6425 /* This is where we handle "moribund" watchpoints. Unlike
6426 software breakpoints traps, hardware watchpoint traps are
6427 always distinguishable from random traps. If no high-level
6428 watchpoint is associated with the reported stop data address
6429 anymore, then the bpstat does not explain the signal ---
6430 simply make sure to ignore it if `stopped_by_watchpoint' is
6431 set. */
6432
1eb8556f 6433 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
47591c29 6434 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
427cd150 6435 GDB_SIGNAL_TRAP)
ab04a2af 6436 && stopped_by_watchpoint)
1eb8556f
SM
6437 {
6438 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
6439 "ignoring");
6440 }
73dd234f 6441
bac7d97b 6442 /* NOTE: cagney/2003-03-29: These checks for a random signal
ab04a2af
TT
6443 at one stage in the past included checks for an inferior
6444 function call's call dummy's return breakpoint. The original
6445 comment, that went with the test, read:
03cebad2 6446
ab04a2af
TT
6447 ``End of a stack dummy. Some systems (e.g. Sony news) give
6448 another signal besides SIGTRAP, so check here as well as
6449 above.''
73dd234f 6450
ab04a2af
TT
6451 If someone ever tries to get call dummys on a
6452 non-executable stack to work (where the target would stop
6453 with something like a SIGSEGV), then those tests might need
6454 to be re-instated. Given, however, that the tests were only
6455 enabled when momentary breakpoints were not being used, I
6456 suspect that it won't be the case.
488f131b 6457
ab04a2af
TT
6458 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6459 be necessary for call dummies on a non-executable stack on
6460 SPARC. */
488f131b 6461
bac7d97b 6462 /* See if the breakpoints module can explain the signal. */
47591c29
PA
6463 random_signal
6464 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6465 ecs->event_thread->suspend.stop_signal);
bac7d97b 6466
1cf4d951
PA
6467 /* Maybe this was a trap for a software breakpoint that has since
6468 been removed. */
6469 if (random_signal && target_stopped_by_sw_breakpoint ())
6470 {
5133a315
LM
6471 if (gdbarch_program_breakpoint_here_p (gdbarch,
6472 ecs->event_thread->suspend.stop_pc))
1cf4d951
PA
6473 {
6474 struct regcache *regcache;
6475 int decr_pc;
6476
6477 /* Re-adjust PC to what the program would see if GDB was not
6478 debugging it. */
00431a78 6479 regcache = get_thread_regcache (ecs->event_thread);
527a273a 6480 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
1cf4d951
PA
6481 if (decr_pc != 0)
6482 {
07036511
TT
6483 gdb::optional<scoped_restore_tmpl<int>>
6484 restore_operation_disable;
1cf4d951
PA
6485
6486 if (record_full_is_used ())
07036511
TT
6487 restore_operation_disable.emplace
6488 (record_full_gdb_operation_disable_set ());
1cf4d951 6489
f2ffa92b
PA
6490 regcache_write_pc (regcache,
6491 ecs->event_thread->suspend.stop_pc + decr_pc);
1cf4d951
PA
6492 }
6493 }
6494 else
6495 {
6496 /* A delayed software breakpoint event. Ignore the trap. */
1eb8556f 6497 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
1cf4d951
PA
6498 random_signal = 0;
6499 }
6500 }
6501
6502 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6503 has since been removed. */
6504 if (random_signal && target_stopped_by_hw_breakpoint ())
6505 {
6506 /* A delayed hardware breakpoint event. Ignore the trap. */
1eb8556f
SM
6507 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
6508 "trap, ignoring");
1cf4d951
PA
6509 random_signal = 0;
6510 }
6511
bac7d97b
PA
6512 /* If not, perhaps stepping/nexting can. */
6513 if (random_signal)
6514 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6515 && currently_stepping (ecs->event_thread));
ab04a2af 6516
2adfaa28
PA
6517 /* Perhaps the thread hit a single-step breakpoint of _another_
6518 thread. Single-step breakpoints are transparent to the
6519 breakpoints module. */
6520 if (random_signal)
6521 random_signal = !ecs->hit_singlestep_breakpoint;
6522
bac7d97b
PA
6523 /* No? Perhaps we got a moribund watchpoint. */
6524 if (random_signal)
6525 random_signal = !stopped_by_watchpoint;
ab04a2af 6526
c65d6b55
PA
6527 /* Always stop if the user explicitly requested this thread to
6528 remain stopped. */
6529 if (ecs->event_thread->stop_requested)
6530 {
6531 random_signal = 1;
1eb8556f 6532 infrun_debug_printf ("user-requested stop");
c65d6b55
PA
6533 }
6534
488f131b
JB
6535 /* For the program's own signals, act according to
6536 the signal handling tables. */
6537
ce12b012 6538 if (random_signal)
488f131b
JB
6539 {
6540 /* Signal not for debugging purposes. */
c9737c08 6541 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
488f131b 6542
1eb8556f
SM
6543 infrun_debug_printf ("random signal (%s)",
6544 gdb_signal_to_symbol_string (stop_signal));
527159b7 6545
488f131b
JB
6546 stopped_by_random_signal = 1;
6547
252fbfc8
PA
6548 /* Always stop on signals if we're either just gaining control
6549 of the program, or the user explicitly requested this thread
6550 to remain stopped. */
d6b48e9c 6551 if (stop_soon != NO_STOP_QUIETLY
252fbfc8 6552 || ecs->event_thread->stop_requested
8ff53139 6553 || signal_stop_state (ecs->event_thread->suspend.stop_signal))
488f131b 6554 {
22bcd14b 6555 stop_waiting (ecs);
488f131b
JB
6556 return;
6557 }
b57bacec
PA
6558
6559 /* Notify observers the signal has "handle print" set. Note we
6560 returned early above if stopping; normal_stop handles the
6561 printing in that case. */
6562 if (signal_print[ecs->event_thread->suspend.stop_signal])
6563 {
6564 /* The signal table tells us to print about this signal. */
223ffa71 6565 target_terminal::ours_for_output ();
76727919 6566 gdb::observers::signal_received.notify (ecs->event_thread->suspend.stop_signal);
223ffa71 6567 target_terminal::inferior ();
b57bacec 6568 }
488f131b
JB
6569
6570 /* Clear the signal if it should not be passed. */
16c381f0 6571 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
a493e3e2 6572 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
488f131b 6573
f2ffa92b 6574 if (ecs->event_thread->prev_pc == ecs->event_thread->suspend.stop_pc
16c381f0 6575 && ecs->event_thread->control.trap_expected
8358c15c 6576 && ecs->event_thread->control.step_resume_breakpoint == NULL)
68f53502
AC
6577 {
6578 /* We were just starting a new sequence, attempting to
6579 single-step off of a breakpoint and expecting a SIGTRAP.
237fc4c9 6580 Instead this signal arrives. This signal will take us out
68f53502
AC
6581 of the stepping range so GDB needs to remember to, when
6582 the signal handler returns, resume stepping off that
6583 breakpoint. */
6584 /* To simplify things, "continue" is forced to use the same
6585 code paths as single-step - set a breakpoint at the
6586 signal return address and then, once hit, step off that
6587 breakpoint. */
1eb8556f 6588 infrun_debug_printf ("signal arrived while stepping over breakpoint");
d3169d93 6589
2c03e5be 6590 insert_hp_step_resume_breakpoint_at_frame (frame);
4e1c45ea 6591 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6592 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6593 ecs->event_thread->control.trap_expected = 0;
d137e6dc
PA
6594
6595 /* If we were nexting/stepping some other thread, switch to
6596 it, so that we don't continue it, losing control. */
6597 if (!switch_back_to_stepped_thread (ecs))
6598 keep_going (ecs);
9d799f85 6599 return;
68f53502 6600 }
9d799f85 6601
e5f8a7cc 6602 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
f2ffa92b
PA
6603 && (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6604 ecs->event_thread)
e5f8a7cc 6605 || ecs->event_thread->control.step_range_end == 1)
edb3359d 6606 && frame_id_eq (get_stack_frame_id (frame),
16c381f0 6607 ecs->event_thread->control.step_stack_frame_id)
8358c15c 6608 && ecs->event_thread->control.step_resume_breakpoint == NULL)
d303a6c7
AC
6609 {
6610 /* The inferior is about to take a signal that will take it
6611 out of the single step range. Set a breakpoint at the
6612 current PC (which is presumably where the signal handler
6613 will eventually return) and then allow the inferior to
6614 run free.
6615
6616 Note that this is only needed for a signal delivered
6617 while in the single-step range. Nested signals aren't a
6618 problem as they eventually all return. */
1eb8556f 6619 infrun_debug_printf ("signal may take us out of single-step range");
237fc4c9 6620
372316f1 6621 clear_step_over_info ();
2c03e5be 6622 insert_hp_step_resume_breakpoint_at_frame (frame);
e5f8a7cc 6623 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6624 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6625 ecs->event_thread->control.trap_expected = 0;
9d799f85
AC
6626 keep_going (ecs);
6627 return;
d303a6c7 6628 }
9d799f85 6629
85102364 6630 /* Note: step_resume_breakpoint may be non-NULL. This occurs
9d799f85
AC
6631 when either there's a nested signal, or when there's a
6632 pending signal enabled just as the signal handler returns
6633 (leaving the inferior at the step-resume-breakpoint without
6634 actually executing it). Either way continue until the
6635 breakpoint is really hit. */
c447ac0b
PA
6636
6637 if (!switch_back_to_stepped_thread (ecs))
6638 {
1eb8556f 6639 infrun_debug_printf ("random signal, keep going");
c447ac0b
PA
6640
6641 keep_going (ecs);
6642 }
6643 return;
488f131b 6644 }
94c57d6a
PA
6645
6646 process_event_stop_test (ecs);
6647}
6648
6649/* Come here when we've got some debug event / signal we can explain
6650 (IOW, not a random signal), and test whether it should cause a
6651 stop, or whether we should resume the inferior (transparently).
6652 E.g., could be a breakpoint whose condition evaluates false; we
6653 could be still stepping within the line; etc. */
6654
6655static void
6656process_event_stop_test (struct execution_control_state *ecs)
6657{
6658 struct symtab_and_line stop_pc_sal;
6659 struct frame_info *frame;
6660 struct gdbarch *gdbarch;
cdaa5b73
PA
6661 CORE_ADDR jmp_buf_pc;
6662 struct bpstat_what what;
94c57d6a 6663
cdaa5b73 6664 /* Handle cases caused by hitting a breakpoint. */
611c83ae 6665
cdaa5b73
PA
6666 frame = get_current_frame ();
6667 gdbarch = get_frame_arch (frame);
fcf3daef 6668
cdaa5b73 6669 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
611c83ae 6670
cdaa5b73
PA
6671 if (what.call_dummy)
6672 {
6673 stop_stack_dummy = what.call_dummy;
6674 }
186c406b 6675
243a9253
PA
6676 /* A few breakpoint types have callbacks associated (e.g.,
6677 bp_jit_event). Run them now. */
6678 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
6679
cdaa5b73
PA
6680 /* If we hit an internal event that triggers symbol changes, the
6681 current frame will be invalidated within bpstat_what (e.g., if we
6682 hit an internal solib event). Re-fetch it. */
6683 frame = get_current_frame ();
6684 gdbarch = get_frame_arch (frame);
e2e4d78b 6685
cdaa5b73
PA
6686 switch (what.main_action)
6687 {
6688 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
6689 /* If we hit the breakpoint at longjmp while stepping, we
6690 install a momentary breakpoint at the target of the
6691 jmp_buf. */
186c406b 6692
1eb8556f 6693 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
186c406b 6694
cdaa5b73 6695 ecs->event_thread->stepping_over_breakpoint = 1;
611c83ae 6696
cdaa5b73
PA
6697 if (what.is_longjmp)
6698 {
6699 struct value *arg_value;
6700
6701 /* If we set the longjmp breakpoint via a SystemTap probe,
6702 then use it to extract the arguments. The destination PC
6703 is the third argument to the probe. */
6704 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6705 if (arg_value)
8fa0c4f8
AA
6706 {
6707 jmp_buf_pc = value_as_address (arg_value);
6708 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6709 }
cdaa5b73
PA
6710 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6711 || !gdbarch_get_longjmp_target (gdbarch,
6712 frame, &jmp_buf_pc))
e2e4d78b 6713 {
1eb8556f
SM
6714 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
6715 "(!gdbarch_get_longjmp_target)");
cdaa5b73
PA
6716 keep_going (ecs);
6717 return;
e2e4d78b 6718 }
e2e4d78b 6719
cdaa5b73
PA
6720 /* Insert a breakpoint at resume address. */
6721 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6722 }
6723 else
6724 check_exception_resume (ecs, frame);
6725 keep_going (ecs);
6726 return;
e81a37f7 6727
cdaa5b73
PA
6728 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6729 {
6730 struct frame_info *init_frame;
e81a37f7 6731
cdaa5b73 6732 /* There are several cases to consider.
c906108c 6733
cdaa5b73
PA
6734 1. The initiating frame no longer exists. In this case we
6735 must stop, because the exception or longjmp has gone too
6736 far.
2c03e5be 6737
cdaa5b73
PA
6738 2. The initiating frame exists, and is the same as the
6739 current frame. We stop, because the exception or longjmp
6740 has been caught.
2c03e5be 6741
cdaa5b73
PA
6742 3. The initiating frame exists and is different from the
6743 current frame. This means the exception or longjmp has
6744 been caught beneath the initiating frame, so keep going.
c906108c 6745
cdaa5b73
PA
6746 4. longjmp breakpoint has been placed just to protect
6747 against stale dummy frames and user is not interested in
6748 stopping around longjmps. */
c5aa993b 6749
1eb8556f 6750 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
c5aa993b 6751
cdaa5b73
PA
6752 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6753 != NULL);
6754 delete_exception_resume_breakpoint (ecs->event_thread);
c5aa993b 6755
cdaa5b73
PA
6756 if (what.is_longjmp)
6757 {
b67a2c6f 6758 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
c5aa993b 6759
cdaa5b73 6760 if (!frame_id_p (ecs->event_thread->initiating_frame))
e5ef252a 6761 {
cdaa5b73
PA
6762 /* Case 4. */
6763 keep_going (ecs);
6764 return;
e5ef252a 6765 }
cdaa5b73 6766 }
c5aa993b 6767
cdaa5b73 6768 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
527159b7 6769
cdaa5b73
PA
6770 if (init_frame)
6771 {
6772 struct frame_id current_id
6773 = get_frame_id (get_current_frame ());
6774 if (frame_id_eq (current_id,
6775 ecs->event_thread->initiating_frame))
6776 {
6777 /* Case 2. Fall through. */
6778 }
6779 else
6780 {
6781 /* Case 3. */
6782 keep_going (ecs);
6783 return;
6784 }
68f53502 6785 }
488f131b 6786
cdaa5b73
PA
6787 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6788 exists. */
6789 delete_step_resume_breakpoint (ecs->event_thread);
e5ef252a 6790
bdc36728 6791 end_stepping_range (ecs);
cdaa5b73
PA
6792 }
6793 return;
e5ef252a 6794
cdaa5b73 6795 case BPSTAT_WHAT_SINGLE:
1eb8556f 6796 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
cdaa5b73
PA
6797 ecs->event_thread->stepping_over_breakpoint = 1;
6798 /* Still need to check other stuff, at least the case where we
6799 are stepping and step out of the right range. */
6800 break;
e5ef252a 6801
cdaa5b73 6802 case BPSTAT_WHAT_STEP_RESUME:
1eb8556f 6803 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
e5ef252a 6804
cdaa5b73
PA
6805 delete_step_resume_breakpoint (ecs->event_thread);
6806 if (ecs->event_thread->control.proceed_to_finish
6807 && execution_direction == EXEC_REVERSE)
6808 {
6809 struct thread_info *tp = ecs->event_thread;
6810
6811 /* We are finishing a function in reverse, and just hit the
6812 step-resume breakpoint at the start address of the
6813 function, and we're almost there -- just need to back up
6814 by one more single-step, which should take us back to the
6815 function call. */
6816 tp->control.step_range_start = tp->control.step_range_end = 1;
6817 keep_going (ecs);
e5ef252a 6818 return;
cdaa5b73
PA
6819 }
6820 fill_in_stop_func (gdbarch, ecs);
f2ffa92b 6821 if (ecs->event_thread->suspend.stop_pc == ecs->stop_func_start
cdaa5b73
PA
6822 && execution_direction == EXEC_REVERSE)
6823 {
6824 /* We are stepping over a function call in reverse, and just
6825 hit the step-resume breakpoint at the start address of
6826 the function. Go back to single-stepping, which should
6827 take us back to the function call. */
6828 ecs->event_thread->stepping_over_breakpoint = 1;
6829 keep_going (ecs);
6830 return;
6831 }
6832 break;
e5ef252a 6833
cdaa5b73 6834 case BPSTAT_WHAT_STOP_NOISY:
1eb8556f 6835 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
c4464ade 6836 stop_print_frame = true;
e5ef252a 6837
33bf4c5c 6838 /* Assume the thread stopped for a breakpoint. We'll still check
99619bea
PA
6839 whether a/the breakpoint is there when the thread is next
6840 resumed. */
6841 ecs->event_thread->stepping_over_breakpoint = 1;
e5ef252a 6842
22bcd14b 6843 stop_waiting (ecs);
cdaa5b73 6844 return;
e5ef252a 6845
cdaa5b73 6846 case BPSTAT_WHAT_STOP_SILENT:
1eb8556f 6847 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
c4464ade 6848 stop_print_frame = false;
e5ef252a 6849
33bf4c5c 6850 /* Assume the thread stopped for a breakpoint. We'll still check
99619bea
PA
6851 whether a/the breakpoint is there when the thread is next
6852 resumed. */
6853 ecs->event_thread->stepping_over_breakpoint = 1;
22bcd14b 6854 stop_waiting (ecs);
cdaa5b73
PA
6855 return;
6856
6857 case BPSTAT_WHAT_HP_STEP_RESUME:
1eb8556f 6858 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
cdaa5b73
PA
6859
6860 delete_step_resume_breakpoint (ecs->event_thread);
6861 if (ecs->event_thread->step_after_step_resume_breakpoint)
6862 {
6863 /* Back when the step-resume breakpoint was inserted, we
6864 were trying to single-step off a breakpoint. Go back to
6865 doing that. */
6866 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6867 ecs->event_thread->stepping_over_breakpoint = 1;
6868 keep_going (ecs);
6869 return;
e5ef252a 6870 }
cdaa5b73
PA
6871 break;
6872
6873 case BPSTAT_WHAT_KEEP_CHECKING:
6874 break;
e5ef252a 6875 }
c906108c 6876
af48d08f
PA
6877 /* If we stepped a permanent breakpoint and we had a high priority
6878 step-resume breakpoint for the address we stepped, but we didn't
6879 hit it, then we must have stepped into the signal handler. The
6880 step-resume was only necessary to catch the case of _not_
6881 stepping into the handler, so delete it, and fall through to
6882 checking whether the step finished. */
6883 if (ecs->event_thread->stepped_breakpoint)
6884 {
6885 struct breakpoint *sr_bp
6886 = ecs->event_thread->control.step_resume_breakpoint;
6887
8d707a12
PA
6888 if (sr_bp != NULL
6889 && sr_bp->loc->permanent
af48d08f
PA
6890 && sr_bp->type == bp_hp_step_resume
6891 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6892 {
1eb8556f 6893 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
af48d08f
PA
6894 delete_step_resume_breakpoint (ecs->event_thread);
6895 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6896 }
6897 }
6898
cdaa5b73
PA
6899 /* We come here if we hit a breakpoint but should not stop for it.
6900 Possibly we also were stepping and should stop for that. So fall
6901 through and test for stepping. But, if not stepping, do not
6902 stop. */
c906108c 6903
a7212384
UW
6904 /* In all-stop mode, if we're currently stepping but have stopped in
6905 some other thread, we need to switch back to the stepped thread. */
c447ac0b
PA
6906 if (switch_back_to_stepped_thread (ecs))
6907 return;
776f04fa 6908
8358c15c 6909 if (ecs->event_thread->control.step_resume_breakpoint)
488f131b 6910 {
1eb8556f 6911 infrun_debug_printf ("step-resume breakpoint is inserted");
527159b7 6912
488f131b 6913 /* Having a step-resume breakpoint overrides anything
dda83cd7
SM
6914 else having to do with stepping commands until
6915 that breakpoint is reached. */
488f131b
JB
6916 keep_going (ecs);
6917 return;
6918 }
c5aa993b 6919
16c381f0 6920 if (ecs->event_thread->control.step_range_end == 0)
488f131b 6921 {
1eb8556f 6922 infrun_debug_printf ("no stepping, continue");
488f131b 6923 /* Likewise if we aren't even stepping. */
488f131b
JB
6924 keep_going (ecs);
6925 return;
6926 }
c5aa993b 6927
4b7703ad
JB
6928 /* Re-fetch current thread's frame in case the code above caused
6929 the frame cache to be re-initialized, making our FRAME variable
6930 a dangling pointer. */
6931 frame = get_current_frame ();
628fe4e4 6932 gdbarch = get_frame_arch (frame);
7e324e48 6933 fill_in_stop_func (gdbarch, ecs);
4b7703ad 6934
488f131b 6935 /* If stepping through a line, keep going if still within it.
c906108c 6936
488f131b
JB
6937 Note that step_range_end is the address of the first instruction
6938 beyond the step range, and NOT the address of the last instruction
31410e84
MS
6939 within it!
6940
6941 Note also that during reverse execution, we may be stepping
6942 through a function epilogue and therefore must detect when
6943 the current-frame changes in the middle of a line. */
6944
f2ffa92b
PA
6945 if (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6946 ecs->event_thread)
31410e84 6947 && (execution_direction != EXEC_REVERSE
388a8562 6948 || frame_id_eq (get_frame_id (frame),
16c381f0 6949 ecs->event_thread->control.step_frame_id)))
488f131b 6950 {
1eb8556f
SM
6951 infrun_debug_printf
6952 ("stepping inside range [%s-%s]",
6953 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6954 paddress (gdbarch, ecs->event_thread->control.step_range_end));
b2175913 6955
c1e36e3e
PA
6956 /* Tentatively re-enable range stepping; `resume' disables it if
6957 necessary (e.g., if we're stepping over a breakpoint or we
6958 have software watchpoints). */
6959 ecs->event_thread->control.may_range_step = 1;
6960
b2175913
MS
6961 /* When stepping backward, stop at beginning of line range
6962 (unless it's the function entry point, in which case
6963 keep going back to the call point). */
f2ffa92b 6964 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
16c381f0 6965 if (stop_pc == ecs->event_thread->control.step_range_start
b2175913
MS
6966 && stop_pc != ecs->stop_func_start
6967 && execution_direction == EXEC_REVERSE)
bdc36728 6968 end_stepping_range (ecs);
b2175913
MS
6969 else
6970 keep_going (ecs);
6971
488f131b
JB
6972 return;
6973 }
c5aa993b 6974
488f131b 6975 /* We stepped out of the stepping range. */
c906108c 6976
488f131b 6977 /* If we are stepping at the source level and entered the runtime
388a8562
MS
6978 loader dynamic symbol resolution code...
6979
6980 EXEC_FORWARD: we keep on single stepping until we exit the run
6981 time loader code and reach the callee's address.
6982
6983 EXEC_REVERSE: we've already executed the callee (backward), and
6984 the runtime loader code is handled just like any other
6985 undebuggable function call. Now we need only keep stepping
6986 backward through the trampoline code, and that's handled further
6987 down, so there is nothing for us to do here. */
6988
6989 if (execution_direction != EXEC_REVERSE
16c381f0 6990 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
f2ffa92b 6991 && in_solib_dynsym_resolve_code (ecs->event_thread->suspend.stop_pc))
488f131b 6992 {
4c8c40e6 6993 CORE_ADDR pc_after_resolver =
f2ffa92b
PA
6994 gdbarch_skip_solib_resolver (gdbarch,
6995 ecs->event_thread->suspend.stop_pc);
c906108c 6996
1eb8556f 6997 infrun_debug_printf ("stepped into dynsym resolve code");
527159b7 6998
488f131b
JB
6999 if (pc_after_resolver)
7000 {
7001 /* Set up a step-resume breakpoint at the address
7002 indicated by SKIP_SOLIB_RESOLVER. */
51abb421 7003 symtab_and_line sr_sal;
488f131b 7004 sr_sal.pc = pc_after_resolver;
6c95b8df 7005 sr_sal.pspace = get_frame_program_space (frame);
488f131b 7006
a6d9a66e
UW
7007 insert_step_resume_breakpoint_at_sal (gdbarch,
7008 sr_sal, null_frame_id);
c5aa993b 7009 }
c906108c 7010
488f131b
JB
7011 keep_going (ecs);
7012 return;
7013 }
c906108c 7014
1d509aa6
MM
7015 /* Step through an indirect branch thunk. */
7016 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
f2ffa92b
PA
7017 && gdbarch_in_indirect_branch_thunk (gdbarch,
7018 ecs->event_thread->suspend.stop_pc))
1d509aa6 7019 {
1eb8556f 7020 infrun_debug_printf ("stepped into indirect branch thunk");
1d509aa6
MM
7021 keep_going (ecs);
7022 return;
7023 }
7024
16c381f0
JK
7025 if (ecs->event_thread->control.step_range_end != 1
7026 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7027 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
568d6575 7028 && get_frame_type (frame) == SIGTRAMP_FRAME)
488f131b 7029 {
1eb8556f 7030 infrun_debug_printf ("stepped into signal trampoline");
42edda50 7031 /* The inferior, while doing a "step" or "next", has ended up in
dda83cd7
SM
7032 a signal trampoline (either by a signal being delivered or by
7033 the signal handler returning). Just single-step until the
7034 inferior leaves the trampoline (either by calling the handler
7035 or returning). */
488f131b
JB
7036 keep_going (ecs);
7037 return;
7038 }
c906108c 7039
14132e89
MR
7040 /* If we're in the return path from a shared library trampoline,
7041 we want to proceed through the trampoline when stepping. */
7042 /* macro/2012-04-25: This needs to come before the subroutine
7043 call check below as on some targets return trampolines look
7044 like subroutine calls (MIPS16 return thunks). */
7045 if (gdbarch_in_solib_return_trampoline (gdbarch,
f2ffa92b
PA
7046 ecs->event_thread->suspend.stop_pc,
7047 ecs->stop_func_name)
14132e89
MR
7048 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7049 {
7050 /* Determine where this trampoline returns. */
f2ffa92b
PA
7051 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
7052 CORE_ADDR real_stop_pc
7053 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
14132e89 7054
1eb8556f 7055 infrun_debug_printf ("stepped into solib return tramp");
14132e89
MR
7056
7057 /* Only proceed through if we know where it's going. */
7058 if (real_stop_pc)
7059 {
7060 /* And put the step-breakpoint there and go until there. */
51abb421 7061 symtab_and_line sr_sal;
14132e89
MR
7062 sr_sal.pc = real_stop_pc;
7063 sr_sal.section = find_pc_overlay (sr_sal.pc);
7064 sr_sal.pspace = get_frame_program_space (frame);
7065
7066 /* Do not specify what the fp should be when we stop since
7067 on some machines the prologue is where the new fp value
7068 is established. */
7069 insert_step_resume_breakpoint_at_sal (gdbarch,
7070 sr_sal, null_frame_id);
7071
7072 /* Restart without fiddling with the step ranges or
7073 other state. */
7074 keep_going (ecs);
7075 return;
7076 }
7077 }
7078
c17eaafe
DJ
7079 /* Check for subroutine calls. The check for the current frame
7080 equalling the step ID is not necessary - the check of the
7081 previous frame's ID is sufficient - but it is a common case and
7082 cheaper than checking the previous frame's ID.
14e60db5
DJ
7083
7084 NOTE: frame_id_eq will never report two invalid frame IDs as
7085 being equal, so to get into this block, both the current and
7086 previous frame must have valid frame IDs. */
005ca36a
JB
7087 /* The outer_frame_id check is a heuristic to detect stepping
7088 through startup code. If we step over an instruction which
7089 sets the stack pointer from an invalid value to a valid value,
7090 we may detect that as a subroutine call from the mythical
7091 "outermost" function. This could be fixed by marking
7092 outermost frames as !stack_p,code_p,special_p. Then the
7093 initial outermost frame, before sp was valid, would
ce6cca6d 7094 have code_addr == &_start. See the comment in frame_id_eq
005ca36a 7095 for more. */
edb3359d 7096 if (!frame_id_eq (get_stack_frame_id (frame),
16c381f0 7097 ecs->event_thread->control.step_stack_frame_id)
005ca36a 7098 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
16c381f0
JK
7099 ecs->event_thread->control.step_stack_frame_id)
7100 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
005ca36a 7101 outer_frame_id)
885eeb5b 7102 || (ecs->event_thread->control.step_start_function
f2ffa92b 7103 != find_pc_function (ecs->event_thread->suspend.stop_pc)))))
488f131b 7104 {
f2ffa92b 7105 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
95918acb 7106 CORE_ADDR real_stop_pc;
8fb3e588 7107
1eb8556f 7108 infrun_debug_printf ("stepped into subroutine");
527159b7 7109
b7a084be 7110 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
95918acb
AC
7111 {
7112 /* I presume that step_over_calls is only 0 when we're
7113 supposed to be stepping at the assembly language level
7114 ("stepi"). Just stop. */
388a8562 7115 /* And this works the same backward as frontward. MVS */
bdc36728 7116 end_stepping_range (ecs);
95918acb
AC
7117 return;
7118 }
8fb3e588 7119
388a8562
MS
7120 /* Reverse stepping through solib trampolines. */
7121
7122 if (execution_direction == EXEC_REVERSE
16c381f0 7123 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
388a8562
MS
7124 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7125 || (ecs->stop_func_start == 0
7126 && in_solib_dynsym_resolve_code (stop_pc))))
7127 {
7128 /* Any solib trampoline code can be handled in reverse
7129 by simply continuing to single-step. We have already
7130 executed the solib function (backwards), and a few
7131 steps will take us back through the trampoline to the
7132 caller. */
7133 keep_going (ecs);
7134 return;
7135 }
7136
16c381f0 7137 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
8567c30f 7138 {
b2175913
MS
7139 /* We're doing a "next".
7140
7141 Normal (forward) execution: set a breakpoint at the
7142 callee's return address (the address at which the caller
7143 will resume).
7144
7145 Reverse (backward) execution. set the step-resume
7146 breakpoint at the start of the function that we just
7147 stepped into (backwards), and continue to there. When we
6130d0b7 7148 get there, we'll need to single-step back to the caller. */
b2175913
MS
7149
7150 if (execution_direction == EXEC_REVERSE)
7151 {
acf9414f
JK
7152 /* If we're already at the start of the function, we've either
7153 just stepped backward into a single instruction function,
7154 or stepped back out of a signal handler to the first instruction
7155 of the function. Just keep going, which will single-step back
7156 to the caller. */
58c48e72 7157 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
acf9414f 7158 {
acf9414f 7159 /* Normal function call return (static or dynamic). */
51abb421 7160 symtab_and_line sr_sal;
acf9414f
JK
7161 sr_sal.pc = ecs->stop_func_start;
7162 sr_sal.pspace = get_frame_program_space (frame);
7163 insert_step_resume_breakpoint_at_sal (gdbarch,
7164 sr_sal, null_frame_id);
7165 }
b2175913
MS
7166 }
7167 else
568d6575 7168 insert_step_resume_breakpoint_at_caller (frame);
b2175913 7169
8567c30f
AC
7170 keep_going (ecs);
7171 return;
7172 }
a53c66de 7173
95918acb 7174 /* If we are in a function call trampoline (a stub between the
dda83cd7
SM
7175 calling routine and the real function), locate the real
7176 function. That's what tells us (a) whether we want to step
7177 into it at all, and (b) what prologue we want to run to the
7178 end of, if we do step into it. */
568d6575 7179 real_stop_pc = skip_language_trampoline (frame, stop_pc);
95918acb 7180 if (real_stop_pc == 0)
568d6575 7181 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
95918acb
AC
7182 if (real_stop_pc != 0)
7183 ecs->stop_func_start = real_stop_pc;
8fb3e588 7184
db5f024e 7185 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
1b2bfbb9 7186 {
51abb421 7187 symtab_and_line sr_sal;
1b2bfbb9 7188 sr_sal.pc = ecs->stop_func_start;
6c95b8df 7189 sr_sal.pspace = get_frame_program_space (frame);
1b2bfbb9 7190
a6d9a66e
UW
7191 insert_step_resume_breakpoint_at_sal (gdbarch,
7192 sr_sal, null_frame_id);
8fb3e588
AC
7193 keep_going (ecs);
7194 return;
1b2bfbb9
RC
7195 }
7196
95918acb 7197 /* If we have line number information for the function we are
1bfeeb0f
JL
7198 thinking of stepping into and the function isn't on the skip
7199 list, step into it.
95918acb 7200
dda83cd7
SM
7201 If there are several symtabs at that PC (e.g. with include
7202 files), just want to know whether *any* of them have line
7203 numbers. find_pc_line handles this. */
95918acb
AC
7204 {
7205 struct symtab_and_line tmp_sal;
8fb3e588 7206
95918acb 7207 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
2b914b52 7208 if (tmp_sal.line != 0
85817405 7209 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4a4c04f1
BE
7210 tmp_sal)
7211 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
95918acb 7212 {
b2175913 7213 if (execution_direction == EXEC_REVERSE)
568d6575 7214 handle_step_into_function_backward (gdbarch, ecs);
b2175913 7215 else
568d6575 7216 handle_step_into_function (gdbarch, ecs);
95918acb
AC
7217 return;
7218 }
7219 }
7220
7221 /* If we have no line number and the step-stop-if-no-debug is
dda83cd7
SM
7222 set, we stop the step so that the user has a chance to switch
7223 in assembly mode. */
16c381f0 7224 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
078130d0 7225 && step_stop_if_no_debug)
95918acb 7226 {
bdc36728 7227 end_stepping_range (ecs);
95918acb
AC
7228 return;
7229 }
7230
b2175913
MS
7231 if (execution_direction == EXEC_REVERSE)
7232 {
acf9414f
JK
7233 /* If we're already at the start of the function, we've either just
7234 stepped backward into a single instruction function without line
7235 number info, or stepped back out of a signal handler to the first
7236 instruction of the function without line number info. Just keep
7237 going, which will single-step back to the caller. */
7238 if (ecs->stop_func_start != stop_pc)
7239 {
7240 /* Set a breakpoint at callee's start address.
7241 From there we can step once and be back in the caller. */
51abb421 7242 symtab_and_line sr_sal;
acf9414f
JK
7243 sr_sal.pc = ecs->stop_func_start;
7244 sr_sal.pspace = get_frame_program_space (frame);
7245 insert_step_resume_breakpoint_at_sal (gdbarch,
7246 sr_sal, null_frame_id);
7247 }
b2175913
MS
7248 }
7249 else
7250 /* Set a breakpoint at callee's return address (the address
7251 at which the caller will resume). */
568d6575 7252 insert_step_resume_breakpoint_at_caller (frame);
b2175913 7253
95918acb 7254 keep_going (ecs);
488f131b 7255 return;
488f131b 7256 }
c906108c 7257
fdd654f3
MS
7258 /* Reverse stepping through solib trampolines. */
7259
7260 if (execution_direction == EXEC_REVERSE
16c381f0 7261 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
fdd654f3 7262 {
f2ffa92b
PA
7263 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
7264
fdd654f3
MS
7265 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7266 || (ecs->stop_func_start == 0
7267 && in_solib_dynsym_resolve_code (stop_pc)))
7268 {
7269 /* Any solib trampoline code can be handled in reverse
7270 by simply continuing to single-step. We have already
7271 executed the solib function (backwards), and a few
7272 steps will take us back through the trampoline to the
7273 caller. */
7274 keep_going (ecs);
7275 return;
7276 }
7277 else if (in_solib_dynsym_resolve_code (stop_pc))
7278 {
7279 /* Stepped backward into the solib dynsym resolver.
7280 Set a breakpoint at its start and continue, then
7281 one more step will take us out. */
51abb421 7282 symtab_and_line sr_sal;
fdd654f3 7283 sr_sal.pc = ecs->stop_func_start;
9d1807c3 7284 sr_sal.pspace = get_frame_program_space (frame);
fdd654f3
MS
7285 insert_step_resume_breakpoint_at_sal (gdbarch,
7286 sr_sal, null_frame_id);
7287 keep_going (ecs);
7288 return;
7289 }
7290 }
7291
8c95582d
AB
7292 /* This always returns the sal for the inner-most frame when we are in a
7293 stack of inlined frames, even if GDB actually believes that it is in a
7294 more outer frame. This is checked for below by calls to
7295 inline_skipped_frames. */
f2ffa92b 7296 stop_pc_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
7ed0fe66 7297
1b2bfbb9
RC
7298 /* NOTE: tausq/2004-05-24: This if block used to be done before all
7299 the trampoline processing logic, however, there are some trampolines
7300 that have no names, so we should do trampoline handling first. */
16c381f0 7301 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7ed0fe66 7302 && ecs->stop_func_name == NULL
2afb61aa 7303 && stop_pc_sal.line == 0)
1b2bfbb9 7304 {
1eb8556f 7305 infrun_debug_printf ("stepped into undebuggable function");
527159b7 7306
1b2bfbb9 7307 /* The inferior just stepped into, or returned to, an
dda83cd7
SM
7308 undebuggable function (where there is no debugging information
7309 and no line number corresponding to the address where the
7310 inferior stopped). Since we want to skip this kind of code,
7311 we keep going until the inferior returns from this
7312 function - unless the user has asked us not to (via
7313 set step-mode) or we no longer know how to get back
7314 to the call site. */
14e60db5 7315 if (step_stop_if_no_debug
c7ce8faa 7316 || !frame_id_p (frame_unwind_caller_id (frame)))
1b2bfbb9
RC
7317 {
7318 /* If we have no line number and the step-stop-if-no-debug
7319 is set, we stop the step so that the user has a chance to
7320 switch in assembly mode. */
bdc36728 7321 end_stepping_range (ecs);
1b2bfbb9
RC
7322 return;
7323 }
7324 else
7325 {
7326 /* Set a breakpoint at callee's return address (the address
7327 at which the caller will resume). */
568d6575 7328 insert_step_resume_breakpoint_at_caller (frame);
1b2bfbb9
RC
7329 keep_going (ecs);
7330 return;
7331 }
7332 }
7333
16c381f0 7334 if (ecs->event_thread->control.step_range_end == 1)
1b2bfbb9
RC
7335 {
7336 /* It is stepi or nexti. We always want to stop stepping after
dda83cd7 7337 one instruction. */
1eb8556f 7338 infrun_debug_printf ("stepi/nexti");
bdc36728 7339 end_stepping_range (ecs);
1b2bfbb9
RC
7340 return;
7341 }
7342
2afb61aa 7343 if (stop_pc_sal.line == 0)
488f131b
JB
7344 {
7345 /* We have no line number information. That means to stop
dda83cd7
SM
7346 stepping (does this always happen right after one instruction,
7347 when we do "s" in a function with no line numbers,
7348 or can this happen as a result of a return or longjmp?). */
1eb8556f 7349 infrun_debug_printf ("line number info");
bdc36728 7350 end_stepping_range (ecs);
488f131b
JB
7351 return;
7352 }
c906108c 7353
edb3359d
DJ
7354 /* Look for "calls" to inlined functions, part one. If the inline
7355 frame machinery detected some skipped call sites, we have entered
7356 a new inline function. */
7357
7358 if (frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 7359 ecs->event_thread->control.step_frame_id)
00431a78 7360 && inline_skipped_frames (ecs->event_thread))
edb3359d 7361 {
1eb8556f 7362 infrun_debug_printf ("stepped into inlined function");
edb3359d 7363
51abb421 7364 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
edb3359d 7365
16c381f0 7366 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
edb3359d
DJ
7367 {
7368 /* For "step", we're going to stop. But if the call site
7369 for this inlined function is on the same source line as
7370 we were previously stepping, go down into the function
7371 first. Otherwise stop at the call site. */
7372
7373 if (call_sal.line == ecs->event_thread->current_line
7374 && call_sal.symtab == ecs->event_thread->current_symtab)
4a4c04f1
BE
7375 {
7376 step_into_inline_frame (ecs->event_thread);
7377 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
7378 {
7379 keep_going (ecs);
7380 return;
7381 }
7382 }
edb3359d 7383
bdc36728 7384 end_stepping_range (ecs);
edb3359d
DJ
7385 return;
7386 }
7387 else
7388 {
7389 /* For "next", we should stop at the call site if it is on a
7390 different source line. Otherwise continue through the
7391 inlined function. */
7392 if (call_sal.line == ecs->event_thread->current_line
7393 && call_sal.symtab == ecs->event_thread->current_symtab)
7394 keep_going (ecs);
7395 else
bdc36728 7396 end_stepping_range (ecs);
edb3359d
DJ
7397 return;
7398 }
7399 }
7400
7401 /* Look for "calls" to inlined functions, part two. If we are still
7402 in the same real function we were stepping through, but we have
7403 to go further up to find the exact frame ID, we are stepping
7404 through a more inlined call beyond its call site. */
7405
7406 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
7407 && !frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 7408 ecs->event_thread->control.step_frame_id)
edb3359d 7409 && stepped_in_from (get_current_frame (),
16c381f0 7410 ecs->event_thread->control.step_frame_id))
edb3359d 7411 {
1eb8556f 7412 infrun_debug_printf ("stepping through inlined function");
edb3359d 7413
4a4c04f1
BE
7414 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
7415 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
edb3359d
DJ
7416 keep_going (ecs);
7417 else
bdc36728 7418 end_stepping_range (ecs);
edb3359d
DJ
7419 return;
7420 }
7421
8c95582d 7422 bool refresh_step_info = true;
f2ffa92b 7423 if ((ecs->event_thread->suspend.stop_pc == stop_pc_sal.pc)
4e1c45ea 7424 && (ecs->event_thread->current_line != stop_pc_sal.line
24b21115 7425 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
488f131b 7426 {
ebde6f2d
TV
7427 /* We are at a different line. */
7428
8c95582d
AB
7429 if (stop_pc_sal.is_stmt)
7430 {
ebde6f2d
TV
7431 /* We are at the start of a statement.
7432
7433 So stop. Note that we don't stop if we step into the middle of a
7434 statement. That is said to make things like for (;;) statements
7435 work better. */
1eb8556f 7436 infrun_debug_printf ("stepped to a different line");
8c95582d
AB
7437 end_stepping_range (ecs);
7438 return;
7439 }
7440 else if (frame_id_eq (get_frame_id (get_current_frame ()),
ebde6f2d 7441 ecs->event_thread->control.step_frame_id))
8c95582d 7442 {
ebde6f2d
TV
7443 /* We are not at the start of a statement, and we have not changed
7444 frame.
7445
7446 We ignore this line table entry, and continue stepping forward,
8c95582d
AB
7447 looking for a better place to stop. */
7448 refresh_step_info = false;
1eb8556f
SM
7449 infrun_debug_printf ("stepped to a different line, but "
7450 "it's not the start of a statement");
8c95582d 7451 }
ebde6f2d
TV
7452 else
7453 {
7454 /* We are not the start of a statement, and we have changed frame.
7455
7456 We ignore this line table entry, and continue stepping forward,
7457 looking for a better place to stop. Keep refresh_step_info at
7458 true to note that the frame has changed, but ignore the line
7459 number to make sure we don't ignore a subsequent entry with the
7460 same line number. */
7461 stop_pc_sal.line = 0;
7462 infrun_debug_printf ("stepped to a different frame, but "
7463 "it's not the start of a statement");
7464 }
488f131b 7465 }
c906108c 7466
488f131b 7467 /* We aren't done stepping.
c906108c 7468
488f131b
JB
7469 Optimize by setting the stepping range to the line.
7470 (We might not be in the original line, but if we entered a
7471 new line in mid-statement, we continue stepping. This makes
8c95582d
AB
7472 things like for(;;) statements work better.)
7473
7474 If we entered a SAL that indicates a non-statement line table entry,
7475 then we update the stepping range, but we don't update the step info,
7476 which includes things like the line number we are stepping away from.
7477 This means we will stop when we find a line table entry that is marked
7478 as is-statement, even if it matches the non-statement one we just
7479 stepped into. */
c906108c 7480
16c381f0
JK
7481 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
7482 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
c1e36e3e 7483 ecs->event_thread->control.may_range_step = 1;
8c95582d
AB
7484 if (refresh_step_info)
7485 set_step_info (ecs->event_thread, frame, stop_pc_sal);
488f131b 7486
1eb8556f 7487 infrun_debug_printf ("keep going");
488f131b 7488 keep_going (ecs);
104c1213
JM
7489}
7490
408f6686
PA
7491static bool restart_stepped_thread (process_stratum_target *resume_target,
7492 ptid_t resume_ptid);
7493
c447ac0b
PA
7494/* In all-stop mode, if we're currently stepping but have stopped in
7495 some other thread, we may need to switch back to the stepped
7496 thread. Returns true we set the inferior running, false if we left
7497 it stopped (and the event needs further processing). */
7498
c4464ade 7499static bool
c447ac0b
PA
7500switch_back_to_stepped_thread (struct execution_control_state *ecs)
7501{
fbea99ea 7502 if (!target_is_non_stop_p ())
c447ac0b 7503 {
99619bea
PA
7504 /* If any thread is blocked on some internal breakpoint, and we
7505 simply need to step over that breakpoint to get it going
7506 again, do that first. */
7507
7508 /* However, if we see an event for the stepping thread, then we
7509 know all other threads have been moved past their breakpoints
7510 already. Let the caller check whether the step is finished,
7511 etc., before deciding to move it past a breakpoint. */
7512 if (ecs->event_thread->control.step_range_end != 0)
c4464ade 7513 return false;
99619bea
PA
7514
7515 /* Check if the current thread is blocked on an incomplete
7516 step-over, interrupted by a random signal. */
7517 if (ecs->event_thread->control.trap_expected
7518 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
c447ac0b 7519 {
1eb8556f
SM
7520 infrun_debug_printf
7521 ("need to finish step-over of [%s]",
7522 target_pid_to_str (ecs->event_thread->ptid).c_str ());
99619bea 7523 keep_going (ecs);
c4464ade 7524 return true;
99619bea 7525 }
2adfaa28 7526
99619bea
PA
7527 /* Check if the current thread is blocked by a single-step
7528 breakpoint of another thread. */
7529 if (ecs->hit_singlestep_breakpoint)
7530 {
1eb8556f
SM
7531 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
7532 target_pid_to_str (ecs->ptid).c_str ());
99619bea 7533 keep_going (ecs);
c4464ade 7534 return true;
99619bea
PA
7535 }
7536
4d9d9d04
PA
7537 /* If this thread needs yet another step-over (e.g., stepping
7538 through a delay slot), do it first before moving on to
7539 another thread. */
7540 if (thread_still_needs_step_over (ecs->event_thread))
7541 {
1eb8556f
SM
7542 infrun_debug_printf
7543 ("thread [%s] still needs step-over",
7544 target_pid_to_str (ecs->event_thread->ptid).c_str ());
4d9d9d04 7545 keep_going (ecs);
c4464ade 7546 return true;
4d9d9d04 7547 }
70509625 7548
483805cf
PA
7549 /* If scheduler locking applies even if not stepping, there's no
7550 need to walk over threads. Above we've checked whether the
7551 current thread is stepping. If some other thread not the
7552 event thread is stepping, then it must be that scheduler
7553 locking is not in effect. */
856e7dd6 7554 if (schedlock_applies (ecs->event_thread))
c4464ade 7555 return false;
483805cf 7556
4d9d9d04
PA
7557 /* Otherwise, we no longer expect a trap in the current thread.
7558 Clear the trap_expected flag before switching back -- this is
7559 what keep_going does as well, if we call it. */
7560 ecs->event_thread->control.trap_expected = 0;
7561
7562 /* Likewise, clear the signal if it should not be passed. */
7563 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7564 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7565
408f6686 7566 if (restart_stepped_thread (ecs->target, ecs->ptid))
4d9d9d04
PA
7567 {
7568 prepare_to_wait (ecs);
c4464ade 7569 return true;
4d9d9d04
PA
7570 }
7571
408f6686
PA
7572 switch_to_thread (ecs->event_thread);
7573 }
4d9d9d04 7574
408f6686
PA
7575 return false;
7576}
f3f8ece4 7577
408f6686
PA
7578/* Look for the thread that was stepping, and resume it.
7579 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
7580 is resuming. Return true if a thread was started, false
7581 otherwise. */
483805cf 7582
408f6686
PA
7583static bool
7584restart_stepped_thread (process_stratum_target *resume_target,
7585 ptid_t resume_ptid)
7586{
7587 /* Do all pending step-overs before actually proceeding with
7588 step/next/etc. */
7589 if (start_step_over ())
7590 return true;
483805cf 7591
408f6686
PA
7592 for (thread_info *tp : all_threads_safe ())
7593 {
7594 if (tp->state == THREAD_EXITED)
7595 continue;
7596
7597 if (tp->suspend.waitstatus_pending_p)
7598 continue;
483805cf 7599
408f6686
PA
7600 /* Ignore threads of processes the caller is not
7601 resuming. */
7602 if (!sched_multi
7603 && (tp->inf->process_target () != resume_target
7604 || tp->inf->pid != resume_ptid.pid ()))
7605 continue;
483805cf 7606
408f6686
PA
7607 if (tp->control.trap_expected)
7608 {
7609 infrun_debug_printf ("switching back to stepped thread (step-over)");
483805cf 7610
408f6686
PA
7611 if (keep_going_stepped_thread (tp))
7612 return true;
99619bea 7613 }
408f6686
PA
7614 }
7615
7616 for (thread_info *tp : all_threads_safe ())
7617 {
7618 if (tp->state == THREAD_EXITED)
7619 continue;
7620
7621 if (tp->suspend.waitstatus_pending_p)
7622 continue;
99619bea 7623
408f6686
PA
7624 /* Ignore threads of processes the caller is not
7625 resuming. */
7626 if (!sched_multi
7627 && (tp->inf->process_target () != resume_target
7628 || tp->inf->pid != resume_ptid.pid ()))
7629 continue;
7630
7631 /* Did we find the stepping thread? */
7632 if (tp->control.step_range_end)
99619bea 7633 {
408f6686 7634 infrun_debug_printf ("switching back to stepped thread (stepping)");
c447ac0b 7635
408f6686
PA
7636 if (keep_going_stepped_thread (tp))
7637 return true;
2ac7589c
PA
7638 }
7639 }
2adfaa28 7640
c4464ade 7641 return false;
2ac7589c 7642}
2adfaa28 7643
408f6686
PA
7644/* See infrun.h. */
7645
7646void
7647restart_after_all_stop_detach (process_stratum_target *proc_target)
7648{
7649 /* Note we don't check target_is_non_stop_p() here, because the
7650 current inferior may no longer have a process_stratum target
7651 pushed, as we just detached. */
7652
7653 /* See if we have a THREAD_RUNNING thread that need to be
7654 re-resumed. If we have any thread that is already executing,
7655 then we don't need to resume the target -- it is already been
7656 resumed. With the remote target (in all-stop), it's even
7657 impossible to issue another resumption if the target is already
7658 resumed, until the target reports a stop. */
7659 for (thread_info *thr : all_threads (proc_target))
7660 {
7661 if (thr->state != THREAD_RUNNING)
7662 continue;
7663
7664 /* If we have any thread that is already executing, then we
7665 don't need to resume the target -- it is already been
7666 resumed. */
7667 if (thr->executing)
7668 return;
7669
7670 /* If we have a pending event to process, skip resuming the
7671 target and go straight to processing it. */
7672 if (thr->resumed && thr->suspend.waitstatus_pending_p)
7673 return;
7674 }
7675
7676 /* Alright, we need to re-resume the target. If a thread was
7677 stepping, we need to restart it stepping. */
7678 if (restart_stepped_thread (proc_target, minus_one_ptid))
7679 return;
7680
7681 /* Otherwise, find the first THREAD_RUNNING thread and resume
7682 it. */
7683 for (thread_info *thr : all_threads (proc_target))
7684 {
7685 if (thr->state != THREAD_RUNNING)
7686 continue;
7687
7688 execution_control_state ecs;
7689 reset_ecs (&ecs, thr);
7690 switch_to_thread (thr);
7691 keep_going (&ecs);
7692 return;
7693 }
7694}
7695
2ac7589c
PA
7696/* Set a previously stepped thread back to stepping. Returns true on
7697 success, false if the resume is not possible (e.g., the thread
7698 vanished). */
7699
c4464ade 7700static bool
2ac7589c
PA
7701keep_going_stepped_thread (struct thread_info *tp)
7702{
7703 struct frame_info *frame;
2ac7589c
PA
7704 struct execution_control_state ecss;
7705 struct execution_control_state *ecs = &ecss;
2adfaa28 7706
2ac7589c
PA
7707 /* If the stepping thread exited, then don't try to switch back and
7708 resume it, which could fail in several different ways depending
7709 on the target. Instead, just keep going.
2adfaa28 7710
2ac7589c
PA
7711 We can find a stepping dead thread in the thread list in two
7712 cases:
2adfaa28 7713
2ac7589c
PA
7714 - The target supports thread exit events, and when the target
7715 tries to delete the thread from the thread list, inferior_ptid
7716 pointed at the exiting thread. In such case, calling
7717 delete_thread does not really remove the thread from the list;
7718 instead, the thread is left listed, with 'exited' state.
64ce06e4 7719
2ac7589c
PA
7720 - The target's debug interface does not support thread exit
7721 events, and so we have no idea whatsoever if the previously
7722 stepping thread is still alive. For that reason, we need to
7723 synchronously query the target now. */
2adfaa28 7724
00431a78 7725 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
2ac7589c 7726 {
1eb8556f
SM
7727 infrun_debug_printf ("not resuming previously stepped thread, it has "
7728 "vanished");
2ac7589c 7729
00431a78 7730 delete_thread (tp);
c4464ade 7731 return false;
c447ac0b 7732 }
2ac7589c 7733
1eb8556f 7734 infrun_debug_printf ("resuming previously stepped thread");
2ac7589c
PA
7735
7736 reset_ecs (ecs, tp);
00431a78 7737 switch_to_thread (tp);
2ac7589c 7738
f2ffa92b 7739 tp->suspend.stop_pc = regcache_read_pc (get_thread_regcache (tp));
2ac7589c 7740 frame = get_current_frame ();
2ac7589c
PA
7741
7742 /* If the PC of the thread we were trying to single-step has
7743 changed, then that thread has trapped or been signaled, but the
7744 event has not been reported to GDB yet. Re-poll the target
7745 looking for this particular thread's event (i.e. temporarily
7746 enable schedlock) by:
7747
7748 - setting a break at the current PC
7749 - resuming that particular thread, only (by setting trap
7750 expected)
7751
7752 This prevents us continuously moving the single-step breakpoint
7753 forward, one instruction at a time, overstepping. */
7754
f2ffa92b 7755 if (tp->suspend.stop_pc != tp->prev_pc)
2ac7589c
PA
7756 {
7757 ptid_t resume_ptid;
7758
1eb8556f
SM
7759 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
7760 paddress (target_gdbarch (), tp->prev_pc),
7761 paddress (target_gdbarch (), tp->suspend.stop_pc));
2ac7589c
PA
7762
7763 /* Clear the info of the previous step-over, as it's no longer
7764 valid (if the thread was trying to step over a breakpoint, it
7765 has already succeeded). It's what keep_going would do too,
7766 if we called it. Do this before trying to insert the sss
7767 breakpoint, otherwise if we were previously trying to step
7768 over this exact address in another thread, the breakpoint is
7769 skipped. */
7770 clear_step_over_info ();
7771 tp->control.trap_expected = 0;
7772
7773 insert_single_step_breakpoint (get_frame_arch (frame),
7774 get_frame_address_space (frame),
f2ffa92b 7775 tp->suspend.stop_pc);
2ac7589c 7776
719546c4 7777 tp->resumed = true;
fbea99ea 7778 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
c4464ade 7779 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
2ac7589c
PA
7780 }
7781 else
7782 {
1eb8556f 7783 infrun_debug_printf ("expected thread still hasn't advanced");
2ac7589c
PA
7784
7785 keep_going_pass_signal (ecs);
7786 }
c4464ade
SM
7787
7788 return true;
c447ac0b
PA
7789}
7790
8b061563
PA
7791/* Is thread TP in the middle of (software or hardware)
7792 single-stepping? (Note the result of this function must never be
7793 passed directly as target_resume's STEP parameter.) */
104c1213 7794
c4464ade 7795static bool
b3444185 7796currently_stepping (struct thread_info *tp)
a7212384 7797{
8358c15c
JK
7798 return ((tp->control.step_range_end
7799 && tp->control.step_resume_breakpoint == NULL)
7800 || tp->control.trap_expected
af48d08f 7801 || tp->stepped_breakpoint
8358c15c 7802 || bpstat_should_step ());
a7212384
UW
7803}
7804
b2175913
MS
7805/* Inferior has stepped into a subroutine call with source code that
7806 we should not step over. Do step to the first line of code in
7807 it. */
c2c6d25f
JM
7808
7809static void
568d6575
UW
7810handle_step_into_function (struct gdbarch *gdbarch,
7811 struct execution_control_state *ecs)
c2c6d25f 7812{
7e324e48
GB
7813 fill_in_stop_func (gdbarch, ecs);
7814
f2ffa92b
PA
7815 compunit_symtab *cust
7816 = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7817 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7818 ecs->stop_func_start
7819 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
c2c6d25f 7820
51abb421 7821 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
c2c6d25f
JM
7822 /* Use the step_resume_break to step until the end of the prologue,
7823 even if that involves jumps (as it seems to on the vax under
7824 4.2). */
7825 /* If the prologue ends in the middle of a source line, continue to
7826 the end of that source line (if it is still within the function).
7827 Otherwise, just go to end of prologue. */
2afb61aa
PA
7828 if (stop_func_sal.end
7829 && stop_func_sal.pc != ecs->stop_func_start
7830 && stop_func_sal.end < ecs->stop_func_end)
7831 ecs->stop_func_start = stop_func_sal.end;
c2c6d25f 7832
2dbd5e30
KB
7833 /* Architectures which require breakpoint adjustment might not be able
7834 to place a breakpoint at the computed address. If so, the test
7835 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7836 ecs->stop_func_start to an address at which a breakpoint may be
7837 legitimately placed.
8fb3e588 7838
2dbd5e30
KB
7839 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7840 made, GDB will enter an infinite loop when stepping through
7841 optimized code consisting of VLIW instructions which contain
7842 subinstructions corresponding to different source lines. On
7843 FR-V, it's not permitted to place a breakpoint on any but the
7844 first subinstruction of a VLIW instruction. When a breakpoint is
7845 set, GDB will adjust the breakpoint address to the beginning of
7846 the VLIW instruction. Thus, we need to make the corresponding
7847 adjustment here when computing the stop address. */
8fb3e588 7848
568d6575 7849 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
2dbd5e30
KB
7850 {
7851 ecs->stop_func_start
568d6575 7852 = gdbarch_adjust_breakpoint_address (gdbarch,
8fb3e588 7853 ecs->stop_func_start);
2dbd5e30
KB
7854 }
7855
f2ffa92b 7856 if (ecs->stop_func_start == ecs->event_thread->suspend.stop_pc)
c2c6d25f
JM
7857 {
7858 /* We are already there: stop now. */
bdc36728 7859 end_stepping_range (ecs);
c2c6d25f
JM
7860 return;
7861 }
7862 else
7863 {
7864 /* Put the step-breakpoint there and go until there. */
51abb421 7865 symtab_and_line sr_sal;
c2c6d25f
JM
7866 sr_sal.pc = ecs->stop_func_start;
7867 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
6c95b8df 7868 sr_sal.pspace = get_frame_program_space (get_current_frame ());
44cbf7b5 7869
c2c6d25f 7870 /* Do not specify what the fp should be when we stop since on
dda83cd7
SM
7871 some machines the prologue is where the new fp value is
7872 established. */
a6d9a66e 7873 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
c2c6d25f
JM
7874
7875 /* And make sure stepping stops right away then. */
16c381f0 7876 ecs->event_thread->control.step_range_end
dda83cd7 7877 = ecs->event_thread->control.step_range_start;
c2c6d25f
JM
7878 }
7879 keep_going (ecs);
7880}
d4f3574e 7881
b2175913
MS
7882/* Inferior has stepped backward into a subroutine call with source
7883 code that we should not step over. Do step to the beginning of the
7884 last line of code in it. */
7885
7886static void
568d6575
UW
7887handle_step_into_function_backward (struct gdbarch *gdbarch,
7888 struct execution_control_state *ecs)
b2175913 7889{
43f3e411 7890 struct compunit_symtab *cust;
167e4384 7891 struct symtab_and_line stop_func_sal;
b2175913 7892
7e324e48
GB
7893 fill_in_stop_func (gdbarch, ecs);
7894
f2ffa92b 7895 cust = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7896 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7897 ecs->stop_func_start
7898 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
b2175913 7899
f2ffa92b 7900 stop_func_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
b2175913
MS
7901
7902 /* OK, we're just going to keep stepping here. */
f2ffa92b 7903 if (stop_func_sal.pc == ecs->event_thread->suspend.stop_pc)
b2175913
MS
7904 {
7905 /* We're there already. Just stop stepping now. */
bdc36728 7906 end_stepping_range (ecs);
b2175913
MS
7907 }
7908 else
7909 {
7910 /* Else just reset the step range and keep going.
7911 No step-resume breakpoint, they don't work for
7912 epilogues, which can have multiple entry paths. */
16c381f0
JK
7913 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7914 ecs->event_thread->control.step_range_end = stop_func_sal.end;
b2175913
MS
7915 keep_going (ecs);
7916 }
7917 return;
7918}
7919
d3169d93 7920/* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
44cbf7b5
AC
7921 This is used to both functions and to skip over code. */
7922
7923static void
2c03e5be
PA
7924insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7925 struct symtab_and_line sr_sal,
7926 struct frame_id sr_id,
7927 enum bptype sr_type)
44cbf7b5 7928{
611c83ae
PA
7929 /* There should never be more than one step-resume or longjmp-resume
7930 breakpoint per thread, so we should never be setting a new
44cbf7b5 7931 step_resume_breakpoint when one is already active. */
8358c15c 7932 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
2c03e5be 7933 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
d3169d93 7934
1eb8556f
SM
7935 infrun_debug_printf ("inserting step-resume breakpoint at %s",
7936 paddress (gdbarch, sr_sal.pc));
d3169d93 7937
8358c15c 7938 inferior_thread ()->control.step_resume_breakpoint
454dafbd 7939 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
2c03e5be
PA
7940}
7941
9da8c2a0 7942void
2c03e5be
PA
7943insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7944 struct symtab_and_line sr_sal,
7945 struct frame_id sr_id)
7946{
7947 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7948 sr_sal, sr_id,
7949 bp_step_resume);
44cbf7b5 7950}
7ce450bd 7951
2c03e5be
PA
7952/* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7953 This is used to skip a potential signal handler.
7ce450bd 7954
14e60db5
DJ
7955 This is called with the interrupted function's frame. The signal
7956 handler, when it returns, will resume the interrupted function at
7957 RETURN_FRAME.pc. */
d303a6c7
AC
7958
7959static void
2c03e5be 7960insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
d303a6c7 7961{
f4c1edd8 7962 gdb_assert (return_frame != NULL);
d303a6c7 7963
51abb421
PA
7964 struct gdbarch *gdbarch = get_frame_arch (return_frame);
7965
7966 symtab_and_line sr_sal;
568d6575 7967 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
d303a6c7 7968 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7969 sr_sal.pspace = get_frame_program_space (return_frame);
d303a6c7 7970
2c03e5be
PA
7971 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7972 get_stack_frame_id (return_frame),
7973 bp_hp_step_resume);
d303a6c7
AC
7974}
7975
2c03e5be
PA
7976/* Insert a "step-resume breakpoint" at the previous frame's PC. This
7977 is used to skip a function after stepping into it (for "next" or if
7978 the called function has no debugging information).
14e60db5
DJ
7979
7980 The current function has almost always been reached by single
7981 stepping a call or return instruction. NEXT_FRAME belongs to the
7982 current function, and the breakpoint will be set at the caller's
7983 resume address.
7984
7985 This is a separate function rather than reusing
2c03e5be 7986 insert_hp_step_resume_breakpoint_at_frame in order to avoid
14e60db5 7987 get_prev_frame, which may stop prematurely (see the implementation
c7ce8faa 7988 of frame_unwind_caller_id for an example). */
14e60db5
DJ
7989
7990static void
7991insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7992{
14e60db5
DJ
7993 /* We shouldn't have gotten here if we don't know where the call site
7994 is. */
c7ce8faa 7995 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
14e60db5 7996
51abb421 7997 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
14e60db5 7998
51abb421 7999 symtab_and_line sr_sal;
c7ce8faa
DJ
8000 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
8001 frame_unwind_caller_pc (next_frame));
14e60db5 8002 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 8003 sr_sal.pspace = frame_unwind_program_space (next_frame);
14e60db5 8004
a6d9a66e 8005 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
c7ce8faa 8006 frame_unwind_caller_id (next_frame));
14e60db5
DJ
8007}
8008
611c83ae
PA
8009/* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
8010 new breakpoint at the target of a jmp_buf. The handling of
8011 longjmp-resume uses the same mechanisms used for handling
8012 "step-resume" breakpoints. */
8013
8014static void
a6d9a66e 8015insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
611c83ae 8016{
e81a37f7
TT
8017 /* There should never be more than one longjmp-resume breakpoint per
8018 thread, so we should never be setting a new
611c83ae 8019 longjmp_resume_breakpoint when one is already active. */
e81a37f7 8020 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
611c83ae 8021
1eb8556f
SM
8022 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
8023 paddress (gdbarch, pc));
611c83ae 8024
e81a37f7 8025 inferior_thread ()->control.exception_resume_breakpoint =
454dafbd 8026 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
611c83ae
PA
8027}
8028
186c406b
TT
8029/* Insert an exception resume breakpoint. TP is the thread throwing
8030 the exception. The block B is the block of the unwinder debug hook
8031 function. FRAME is the frame corresponding to the call to this
8032 function. SYM is the symbol of the function argument holding the
8033 target PC of the exception. */
8034
8035static void
8036insert_exception_resume_breakpoint (struct thread_info *tp,
3977b71f 8037 const struct block *b,
186c406b
TT
8038 struct frame_info *frame,
8039 struct symbol *sym)
8040{
a70b8144 8041 try
186c406b 8042 {
63e43d3a 8043 struct block_symbol vsym;
186c406b
TT
8044 struct value *value;
8045 CORE_ADDR handler;
8046 struct breakpoint *bp;
8047
987012b8 8048 vsym = lookup_symbol_search_name (sym->search_name (),
de63c46b 8049 b, VAR_DOMAIN);
63e43d3a 8050 value = read_var_value (vsym.symbol, vsym.block, frame);
186c406b
TT
8051 /* If the value was optimized out, revert to the old behavior. */
8052 if (! value_optimized_out (value))
8053 {
8054 handler = value_as_address (value);
8055
1eb8556f
SM
8056 infrun_debug_printf ("exception resume at %lx",
8057 (unsigned long) handler);
186c406b
TT
8058
8059 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd
TT
8060 handler,
8061 bp_exception_resume).release ();
c70a6932
JK
8062
8063 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
8064 frame = NULL;
8065
5d5658a1 8066 bp->thread = tp->global_num;
186c406b
TT
8067 inferior_thread ()->control.exception_resume_breakpoint = bp;
8068 }
8069 }
230d2906 8070 catch (const gdb_exception_error &e)
492d29ea
PA
8071 {
8072 /* We want to ignore errors here. */
8073 }
186c406b
TT
8074}
8075
28106bc2
SDJ
8076/* A helper for check_exception_resume that sets an
8077 exception-breakpoint based on a SystemTap probe. */
8078
8079static void
8080insert_exception_resume_from_probe (struct thread_info *tp,
729662a5 8081 const struct bound_probe *probe,
28106bc2
SDJ
8082 struct frame_info *frame)
8083{
8084 struct value *arg_value;
8085 CORE_ADDR handler;
8086 struct breakpoint *bp;
8087
8088 arg_value = probe_safe_evaluate_at_pc (frame, 1);
8089 if (!arg_value)
8090 return;
8091
8092 handler = value_as_address (arg_value);
8093
1eb8556f
SM
8094 infrun_debug_printf ("exception resume at %s",
8095 paddress (probe->objfile->arch (), handler));
28106bc2
SDJ
8096
8097 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd 8098 handler, bp_exception_resume).release ();
5d5658a1 8099 bp->thread = tp->global_num;
28106bc2
SDJ
8100 inferior_thread ()->control.exception_resume_breakpoint = bp;
8101}
8102
186c406b
TT
8103/* This is called when an exception has been intercepted. Check to
8104 see whether the exception's destination is of interest, and if so,
8105 set an exception resume breakpoint there. */
8106
8107static void
8108check_exception_resume (struct execution_control_state *ecs,
28106bc2 8109 struct frame_info *frame)
186c406b 8110{
729662a5 8111 struct bound_probe probe;
28106bc2
SDJ
8112 struct symbol *func;
8113
8114 /* First see if this exception unwinding breakpoint was set via a
8115 SystemTap probe point. If so, the probe has two arguments: the
8116 CFA and the HANDLER. We ignore the CFA, extract the handler, and
8117 set a breakpoint there. */
6bac7473 8118 probe = find_probe_by_pc (get_frame_pc (frame));
935676c9 8119 if (probe.prob)
28106bc2 8120 {
729662a5 8121 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
28106bc2
SDJ
8122 return;
8123 }
8124
8125 func = get_frame_function (frame);
8126 if (!func)
8127 return;
186c406b 8128
a70b8144 8129 try
186c406b 8130 {
3977b71f 8131 const struct block *b;
8157b174 8132 struct block_iterator iter;
186c406b
TT
8133 struct symbol *sym;
8134 int argno = 0;
8135
8136 /* The exception breakpoint is a thread-specific breakpoint on
8137 the unwinder's debug hook, declared as:
8138
8139 void _Unwind_DebugHook (void *cfa, void *handler);
8140
8141 The CFA argument indicates the frame to which control is
8142 about to be transferred. HANDLER is the destination PC.
8143
8144 We ignore the CFA and set a temporary breakpoint at HANDLER.
8145 This is not extremely efficient but it avoids issues in gdb
8146 with computing the DWARF CFA, and it also works even in weird
8147 cases such as throwing an exception from inside a signal
8148 handler. */
8149
8150 b = SYMBOL_BLOCK_VALUE (func);
8151 ALL_BLOCK_SYMBOLS (b, iter, sym)
8152 {
8153 if (!SYMBOL_IS_ARGUMENT (sym))
8154 continue;
8155
8156 if (argno == 0)
8157 ++argno;
8158 else
8159 {
8160 insert_exception_resume_breakpoint (ecs->event_thread,
8161 b, frame, sym);
8162 break;
8163 }
8164 }
8165 }
230d2906 8166 catch (const gdb_exception_error &e)
492d29ea
PA
8167 {
8168 }
186c406b
TT
8169}
8170
104c1213 8171static void
22bcd14b 8172stop_waiting (struct execution_control_state *ecs)
104c1213 8173{
1eb8556f 8174 infrun_debug_printf ("stop_waiting");
527159b7 8175
cd0fc7c3
SS
8176 /* Let callers know we don't want to wait for the inferior anymore. */
8177 ecs->wait_some_more = 0;
fbea99ea 8178
53cccef1 8179 /* If all-stop, but there exists a non-stop target, stop all
fbea99ea 8180 threads now that we're presenting the stop to the user. */
53cccef1 8181 if (!non_stop && exists_non_stop_target ())
3cebef98 8182 stop_all_threads ("presenting stop to user in all-stop");
cd0fc7c3
SS
8183}
8184
4d9d9d04
PA
8185/* Like keep_going, but passes the signal to the inferior, even if the
8186 signal is set to nopass. */
d4f3574e
SS
8187
8188static void
4d9d9d04 8189keep_going_pass_signal (struct execution_control_state *ecs)
d4f3574e 8190{
d7e15655 8191 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
372316f1 8192 gdb_assert (!ecs->event_thread->resumed);
4d9d9d04 8193
d4f3574e 8194 /* Save the pc before execution, to compare with pc after stop. */
fb14de7b 8195 ecs->event_thread->prev_pc
fc75c28b 8196 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
d4f3574e 8197
4d9d9d04 8198 if (ecs->event_thread->control.trap_expected)
d4f3574e 8199 {
4d9d9d04
PA
8200 struct thread_info *tp = ecs->event_thread;
8201
1eb8556f
SM
8202 infrun_debug_printf ("%s has trap_expected set, "
8203 "resuming to collect trap",
8204 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04 8205
a9ba6bae
PA
8206 /* We haven't yet gotten our trap, and either: intercepted a
8207 non-signal event (e.g., a fork); or took a signal which we
8208 are supposed to pass through to the inferior. Simply
8209 continue. */
64ce06e4 8210 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e 8211 }
372316f1
PA
8212 else if (step_over_info_valid_p ())
8213 {
8214 /* Another thread is stepping over a breakpoint in-line. If
8215 this thread needs a step-over too, queue the request. In
8216 either case, this resume must be deferred for later. */
8217 struct thread_info *tp = ecs->event_thread;
8218
8219 if (ecs->hit_singlestep_breakpoint
8220 || thread_still_needs_step_over (tp))
8221 {
1eb8556f
SM
8222 infrun_debug_printf ("step-over already in progress: "
8223 "step-over for %s deferred",
8224 target_pid_to_str (tp->ptid).c_str ());
28d5518b 8225 global_thread_step_over_chain_enqueue (tp);
372316f1
PA
8226 }
8227 else
8228 {
1eb8556f
SM
8229 infrun_debug_printf ("step-over in progress: resume of %s deferred",
8230 target_pid_to_str (tp->ptid).c_str ());
372316f1 8231 }
372316f1 8232 }
d4f3574e
SS
8233 else
8234 {
31e77af2 8235 struct regcache *regcache = get_current_regcache ();
963f9c80
PA
8236 int remove_bp;
8237 int remove_wps;
8d297bbf 8238 step_over_what step_what;
31e77af2 8239
d4f3574e 8240 /* Either the trap was not expected, but we are continuing
a9ba6bae
PA
8241 anyway (if we got a signal, the user asked it be passed to
8242 the child)
8243 -- or --
8244 We got our expected trap, but decided we should resume from
8245 it.
d4f3574e 8246
a9ba6bae 8247 We're going to run this baby now!
d4f3574e 8248
c36b740a
VP
8249 Note that insert_breakpoints won't try to re-insert
8250 already inserted breakpoints. Therefore, we don't
8251 care if breakpoints were already inserted, or not. */
a9ba6bae 8252
31e77af2
PA
8253 /* If we need to step over a breakpoint, and we're not using
8254 displaced stepping to do so, insert all breakpoints
8255 (watchpoints, etc.) but the one we're stepping over, step one
8256 instruction, and then re-insert the breakpoint when that step
8257 is finished. */
963f9c80 8258
6c4cfb24
PA
8259 step_what = thread_still_needs_step_over (ecs->event_thread);
8260
963f9c80 8261 remove_bp = (ecs->hit_singlestep_breakpoint
6c4cfb24
PA
8262 || (step_what & STEP_OVER_BREAKPOINT));
8263 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
963f9c80 8264
cb71640d
PA
8265 /* We can't use displaced stepping if we need to step past a
8266 watchpoint. The instruction copied to the scratch pad would
8267 still trigger the watchpoint. */
8268 if (remove_bp
3fc8eb30 8269 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
45e8c884 8270 {
a01bda52 8271 set_step_over_info (regcache->aspace (),
21edc42f
YQ
8272 regcache_read_pc (regcache), remove_wps,
8273 ecs->event_thread->global_num);
45e8c884 8274 }
963f9c80 8275 else if (remove_wps)
21edc42f 8276 set_step_over_info (NULL, 0, remove_wps, -1);
372316f1
PA
8277
8278 /* If we now need to do an in-line step-over, we need to stop
8279 all other threads. Note this must be done before
8280 insert_breakpoints below, because that removes the breakpoint
8281 we're about to step over, otherwise other threads could miss
8282 it. */
fbea99ea 8283 if (step_over_info_valid_p () && target_is_non_stop_p ())
3cebef98 8284 stop_all_threads ("starting in-line step-over");
abbb1732 8285
31e77af2 8286 /* Stop stepping if inserting breakpoints fails. */
a70b8144 8287 try
31e77af2
PA
8288 {
8289 insert_breakpoints ();
8290 }
230d2906 8291 catch (const gdb_exception_error &e)
31e77af2
PA
8292 {
8293 exception_print (gdb_stderr, e);
22bcd14b 8294 stop_waiting (ecs);
bdf2a94a 8295 clear_step_over_info ();
31e77af2 8296 return;
d4f3574e
SS
8297 }
8298
963f9c80 8299 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
d4f3574e 8300
64ce06e4 8301 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e
SS
8302 }
8303
488f131b 8304 prepare_to_wait (ecs);
d4f3574e
SS
8305}
8306
4d9d9d04
PA
8307/* Called when we should continue running the inferior, because the
8308 current event doesn't cause a user visible stop. This does the
8309 resuming part; waiting for the next event is done elsewhere. */
8310
8311static void
8312keep_going (struct execution_control_state *ecs)
8313{
8314 if (ecs->event_thread->control.trap_expected
8315 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
8316 ecs->event_thread->control.trap_expected = 0;
8317
8318 if (!signal_program[ecs->event_thread->suspend.stop_signal])
8319 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
8320 keep_going_pass_signal (ecs);
8321}
8322
104c1213
JM
8323/* This function normally comes after a resume, before
8324 handle_inferior_event exits. It takes care of any last bits of
8325 housekeeping, and sets the all-important wait_some_more flag. */
cd0fc7c3 8326
104c1213
JM
8327static void
8328prepare_to_wait (struct execution_control_state *ecs)
cd0fc7c3 8329{
1eb8556f 8330 infrun_debug_printf ("prepare_to_wait");
104c1213 8331
104c1213 8332 ecs->wait_some_more = 1;
0b333c5e 8333
42bd97a6
PA
8334 /* If the target can't async, emulate it by marking the infrun event
8335 handler such that as soon as we get back to the event-loop, we
8336 immediately end up in fetch_inferior_event again calling
8337 target_wait. */
8338 if (!target_can_async_p ())
0b333c5e 8339 mark_infrun_async_event_handler ();
c906108c 8340}
11cf8741 8341
fd664c91 8342/* We are done with the step range of a step/next/si/ni command.
b57bacec 8343 Called once for each n of a "step n" operation. */
fd664c91
PA
8344
8345static void
bdc36728 8346end_stepping_range (struct execution_control_state *ecs)
fd664c91 8347{
bdc36728 8348 ecs->event_thread->control.stop_step = 1;
bdc36728 8349 stop_waiting (ecs);
fd664c91
PA
8350}
8351
33d62d64
JK
8352/* Several print_*_reason functions to print why the inferior has stopped.
8353 We always print something when the inferior exits, or receives a signal.
8354 The rest of the cases are dealt with later on in normal_stop and
8355 print_it_typical. Ideally there should be a call to one of these
8356 print_*_reason functions functions from handle_inferior_event each time
22bcd14b 8357 stop_waiting is called.
33d62d64 8358
fd664c91
PA
8359 Note that we don't call these directly, instead we delegate that to
8360 the interpreters, through observers. Interpreters then call these
8361 with whatever uiout is right. */
33d62d64 8362
fd664c91
PA
8363void
8364print_end_stepping_range_reason (struct ui_out *uiout)
33d62d64 8365{
fd664c91 8366 /* For CLI-like interpreters, print nothing. */
33d62d64 8367
112e8700 8368 if (uiout->is_mi_like_p ())
fd664c91 8369 {
112e8700 8370 uiout->field_string ("reason",
fd664c91
PA
8371 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
8372 }
8373}
33d62d64 8374
fd664c91
PA
8375void
8376print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
11cf8741 8377{
33d62d64 8378 annotate_signalled ();
112e8700
SM
8379 if (uiout->is_mi_like_p ())
8380 uiout->field_string
8381 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
8382 uiout->text ("\nProgram terminated with signal ");
33d62d64 8383 annotate_signal_name ();
112e8700 8384 uiout->field_string ("signal-name",
2ea28649 8385 gdb_signal_to_name (siggnal));
33d62d64 8386 annotate_signal_name_end ();
112e8700 8387 uiout->text (", ");
33d62d64 8388 annotate_signal_string ();
112e8700 8389 uiout->field_string ("signal-meaning",
2ea28649 8390 gdb_signal_to_string (siggnal));
33d62d64 8391 annotate_signal_string_end ();
112e8700
SM
8392 uiout->text (".\n");
8393 uiout->text ("The program no longer exists.\n");
33d62d64
JK
8394}
8395
fd664c91
PA
8396void
8397print_exited_reason (struct ui_out *uiout, int exitstatus)
33d62d64 8398{
fda326dd 8399 struct inferior *inf = current_inferior ();
a068643d 8400 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
fda326dd 8401
33d62d64
JK
8402 annotate_exited (exitstatus);
8403 if (exitstatus)
8404 {
112e8700
SM
8405 if (uiout->is_mi_like_p ())
8406 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
6a831f06
PA
8407 std::string exit_code_str
8408 = string_printf ("0%o", (unsigned int) exitstatus);
8409 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
8410 plongest (inf->num), pidstr.c_str (),
8411 string_field ("exit-code", exit_code_str.c_str ()));
33d62d64
JK
8412 }
8413 else
11cf8741 8414 {
112e8700
SM
8415 if (uiout->is_mi_like_p ())
8416 uiout->field_string
8417 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6a831f06
PA
8418 uiout->message ("[Inferior %s (%s) exited normally]\n",
8419 plongest (inf->num), pidstr.c_str ());
33d62d64 8420 }
33d62d64
JK
8421}
8422
fd664c91
PA
8423void
8424print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
33d62d64 8425{
f303dbd6
PA
8426 struct thread_info *thr = inferior_thread ();
8427
33d62d64
JK
8428 annotate_signal ();
8429
112e8700 8430 if (uiout->is_mi_like_p ())
f303dbd6
PA
8431 ;
8432 else if (show_thread_that_caused_stop ())
33d62d64 8433 {
f303dbd6 8434 const char *name;
33d62d64 8435
112e8700 8436 uiout->text ("\nThread ");
33eca680 8437 uiout->field_string ("thread-id", print_thread_id (thr));
f303dbd6
PA
8438
8439 name = thr->name != NULL ? thr->name : target_thread_name (thr);
8440 if (name != NULL)
8441 {
112e8700 8442 uiout->text (" \"");
33eca680 8443 uiout->field_string ("name", name);
112e8700 8444 uiout->text ("\"");
f303dbd6 8445 }
33d62d64 8446 }
f303dbd6 8447 else
112e8700 8448 uiout->text ("\nProgram");
f303dbd6 8449
112e8700
SM
8450 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
8451 uiout->text (" stopped");
33d62d64
JK
8452 else
8453 {
112e8700 8454 uiout->text (" received signal ");
8b93c638 8455 annotate_signal_name ();
112e8700
SM
8456 if (uiout->is_mi_like_p ())
8457 uiout->field_string
8458 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
8459 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8b93c638 8460 annotate_signal_name_end ();
112e8700 8461 uiout->text (", ");
8b93c638 8462 annotate_signal_string ();
112e8700 8463 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
012b3a21 8464
272bb05c
JB
8465 struct regcache *regcache = get_current_regcache ();
8466 struct gdbarch *gdbarch = regcache->arch ();
8467 if (gdbarch_report_signal_info_p (gdbarch))
8468 gdbarch_report_signal_info (gdbarch, uiout, siggnal);
8469
8b93c638 8470 annotate_signal_string_end ();
33d62d64 8471 }
112e8700 8472 uiout->text (".\n");
33d62d64 8473}
252fbfc8 8474
fd664c91
PA
8475void
8476print_no_history_reason (struct ui_out *uiout)
33d62d64 8477{
112e8700 8478 uiout->text ("\nNo more reverse-execution history.\n");
11cf8741 8479}
43ff13b4 8480
0c7e1a46
PA
8481/* Print current location without a level number, if we have changed
8482 functions or hit a breakpoint. Print source line if we have one.
8483 bpstat_print contains the logic deciding in detail what to print,
8484 based on the event(s) that just occurred. */
8485
243a9253
PA
8486static void
8487print_stop_location (struct target_waitstatus *ws)
0c7e1a46
PA
8488{
8489 int bpstat_ret;
f486487f 8490 enum print_what source_flag;
0c7e1a46
PA
8491 int do_frame_printing = 1;
8492 struct thread_info *tp = inferior_thread ();
8493
8494 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
8495 switch (bpstat_ret)
8496 {
8497 case PRINT_UNKNOWN:
8498 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8499 should) carry around the function and does (or should) use
8500 that when doing a frame comparison. */
8501 if (tp->control.stop_step
8502 && frame_id_eq (tp->control.step_frame_id,
8503 get_frame_id (get_current_frame ()))
f2ffa92b
PA
8504 && (tp->control.step_start_function
8505 == find_pc_function (tp->suspend.stop_pc)))
0c7e1a46
PA
8506 {
8507 /* Finished step, just print source line. */
8508 source_flag = SRC_LINE;
8509 }
8510 else
8511 {
8512 /* Print location and source line. */
8513 source_flag = SRC_AND_LOC;
8514 }
8515 break;
8516 case PRINT_SRC_AND_LOC:
8517 /* Print location and source line. */
8518 source_flag = SRC_AND_LOC;
8519 break;
8520 case PRINT_SRC_ONLY:
8521 source_flag = SRC_LINE;
8522 break;
8523 case PRINT_NOTHING:
8524 /* Something bogus. */
8525 source_flag = SRC_LINE;
8526 do_frame_printing = 0;
8527 break;
8528 default:
8529 internal_error (__FILE__, __LINE__, _("Unknown value."));
8530 }
8531
8532 /* The behavior of this routine with respect to the source
8533 flag is:
8534 SRC_LINE: Print only source line
8535 LOCATION: Print only location
8536 SRC_AND_LOC: Print location and source line. */
8537 if (do_frame_printing)
8538 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
243a9253
PA
8539}
8540
243a9253
PA
8541/* See infrun.h. */
8542
8543void
4c7d57e7 8544print_stop_event (struct ui_out *uiout, bool displays)
243a9253 8545{
243a9253 8546 struct target_waitstatus last;
243a9253
PA
8547 struct thread_info *tp;
8548
5b6d1e4f 8549 get_last_target_status (nullptr, nullptr, &last);
243a9253 8550
67ad9399
TT
8551 {
8552 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
0c7e1a46 8553
67ad9399 8554 print_stop_location (&last);
243a9253 8555
67ad9399 8556 /* Display the auto-display expressions. */
4c7d57e7
TT
8557 if (displays)
8558 do_displays ();
67ad9399 8559 }
243a9253
PA
8560
8561 tp = inferior_thread ();
8562 if (tp->thread_fsm != NULL
46e3ed7f 8563 && tp->thread_fsm->finished_p ())
243a9253
PA
8564 {
8565 struct return_value_info *rv;
8566
46e3ed7f 8567 rv = tp->thread_fsm->return_value ();
243a9253
PA
8568 if (rv != NULL)
8569 print_return_value (uiout, rv);
8570 }
0c7e1a46
PA
8571}
8572
388a7084
PA
8573/* See infrun.h. */
8574
8575void
8576maybe_remove_breakpoints (void)
8577{
55f6301a 8578 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
388a7084
PA
8579 {
8580 if (remove_breakpoints ())
8581 {
223ffa71 8582 target_terminal::ours_for_output ();
388a7084
PA
8583 printf_filtered (_("Cannot remove breakpoints because "
8584 "program is no longer writable.\nFurther "
8585 "execution is probably impossible.\n"));
8586 }
8587 }
8588}
8589
4c2f2a79
PA
8590/* The execution context that just caused a normal stop. */
8591
8592struct stop_context
8593{
2d844eaf 8594 stop_context ();
2d844eaf
TT
8595
8596 DISABLE_COPY_AND_ASSIGN (stop_context);
8597
8598 bool changed () const;
8599
4c2f2a79
PA
8600 /* The stop ID. */
8601 ULONGEST stop_id;
c906108c 8602
4c2f2a79 8603 /* The event PTID. */
c906108c 8604
4c2f2a79
PA
8605 ptid_t ptid;
8606
8607 /* If stopp for a thread event, this is the thread that caused the
8608 stop. */
d634cd0b 8609 thread_info_ref thread;
4c2f2a79
PA
8610
8611 /* The inferior that caused the stop. */
8612 int inf_num;
8613};
8614
2d844eaf 8615/* Initializes a new stop context. If stopped for a thread event, this
4c2f2a79
PA
8616 takes a strong reference to the thread. */
8617
2d844eaf 8618stop_context::stop_context ()
4c2f2a79 8619{
2d844eaf
TT
8620 stop_id = get_stop_id ();
8621 ptid = inferior_ptid;
8622 inf_num = current_inferior ()->num;
4c2f2a79 8623
d7e15655 8624 if (inferior_ptid != null_ptid)
4c2f2a79
PA
8625 {
8626 /* Take a strong reference so that the thread can't be deleted
8627 yet. */
d634cd0b 8628 thread = thread_info_ref::new_reference (inferior_thread ());
4c2f2a79 8629 }
4c2f2a79
PA
8630}
8631
8632/* Return true if the current context no longer matches the saved stop
8633 context. */
8634
2d844eaf
TT
8635bool
8636stop_context::changed () const
8637{
8638 if (ptid != inferior_ptid)
8639 return true;
8640 if (inf_num != current_inferior ()->num)
8641 return true;
8642 if (thread != NULL && thread->state != THREAD_STOPPED)
8643 return true;
8644 if (get_stop_id () != stop_id)
8645 return true;
8646 return false;
4c2f2a79
PA
8647}
8648
8649/* See infrun.h. */
8650
8651int
96baa820 8652normal_stop (void)
c906108c 8653{
73b65bb0 8654 struct target_waitstatus last;
73b65bb0 8655
5b6d1e4f 8656 get_last_target_status (nullptr, nullptr, &last);
73b65bb0 8657
4c2f2a79
PA
8658 new_stop_id ();
8659
29f49a6a
PA
8660 /* If an exception is thrown from this point on, make sure to
8661 propagate GDB's knowledge of the executing state to the
8662 frontend/user running state. A QUIT is an easy exception to see
8663 here, so do this before any filtered output. */
731f534f 8664
5b6d1e4f 8665 ptid_t finish_ptid = null_ptid;
731f534f 8666
c35b1492 8667 if (!non_stop)
5b6d1e4f 8668 finish_ptid = minus_one_ptid;
e1316e60
PA
8669 else if (last.kind == TARGET_WAITKIND_SIGNALLED
8670 || last.kind == TARGET_WAITKIND_EXITED)
8671 {
8672 /* On some targets, we may still have live threads in the
8673 inferior when we get a process exit event. E.g., for
8674 "checkpoint", when the current checkpoint/fork exits,
8675 linux-fork.c automatically switches to another fork from
8676 within target_mourn_inferior. */
731f534f 8677 if (inferior_ptid != null_ptid)
5b6d1e4f 8678 finish_ptid = ptid_t (inferior_ptid.pid ());
e1316e60
PA
8679 }
8680 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
5b6d1e4f
PA
8681 finish_ptid = inferior_ptid;
8682
8683 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
8684 if (finish_ptid != null_ptid)
8685 {
8686 maybe_finish_thread_state.emplace
8687 (user_visible_resume_target (finish_ptid), finish_ptid);
8688 }
29f49a6a 8689
b57bacec
PA
8690 /* As we're presenting a stop, and potentially removing breakpoints,
8691 update the thread list so we can tell whether there are threads
8692 running on the target. With target remote, for example, we can
8693 only learn about new threads when we explicitly update the thread
8694 list. Do this before notifying the interpreters about signal
8695 stops, end of stepping ranges, etc., so that the "new thread"
8696 output is emitted before e.g., "Program received signal FOO",
8697 instead of after. */
8698 update_thread_list ();
8699
8700 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
76727919 8701 gdb::observers::signal_received.notify (inferior_thread ()->suspend.stop_signal);
b57bacec 8702
c906108c
SS
8703 /* As with the notification of thread events, we want to delay
8704 notifying the user that we've switched thread context until
8705 the inferior actually stops.
8706
73b65bb0
DJ
8707 There's no point in saying anything if the inferior has exited.
8708 Note that SIGNALLED here means "exited with a signal", not
b65dc60b
PA
8709 "received a signal".
8710
8711 Also skip saying anything in non-stop mode. In that mode, as we
8712 don't want GDB to switch threads behind the user's back, to avoid
8713 races where the user is typing a command to apply to thread x,
8714 but GDB switches to thread y before the user finishes entering
8715 the command, fetch_inferior_event installs a cleanup to restore
8716 the current thread back to the thread the user had selected right
8717 after this event is handled, so we're not really switching, only
8718 informing of a stop. */
4f8d22e3 8719 if (!non_stop
731f534f 8720 && previous_inferior_ptid != inferior_ptid
55f6301a 8721 && target_has_execution ()
73b65bb0 8722 && last.kind != TARGET_WAITKIND_SIGNALLED
0e5bf2a8
PA
8723 && last.kind != TARGET_WAITKIND_EXITED
8724 && last.kind != TARGET_WAITKIND_NO_RESUMED)
c906108c 8725 {
0e454242 8726 SWITCH_THRU_ALL_UIS ()
3b12939d 8727 {
223ffa71 8728 target_terminal::ours_for_output ();
3b12939d 8729 printf_filtered (_("[Switching to %s]\n"),
a068643d 8730 target_pid_to_str (inferior_ptid).c_str ());
3b12939d
PA
8731 annotate_thread_changed ();
8732 }
39f77062 8733 previous_inferior_ptid = inferior_ptid;
c906108c 8734 }
c906108c 8735
0e5bf2a8
PA
8736 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
8737 {
0e454242 8738 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8739 if (current_ui->prompt_state == PROMPT_BLOCKED)
8740 {
223ffa71 8741 target_terminal::ours_for_output ();
3b12939d
PA
8742 printf_filtered (_("No unwaited-for children left.\n"));
8743 }
0e5bf2a8
PA
8744 }
8745
b57bacec 8746 /* Note: this depends on the update_thread_list call above. */
388a7084 8747 maybe_remove_breakpoints ();
c906108c 8748
c906108c
SS
8749 /* If an auto-display called a function and that got a signal,
8750 delete that auto-display to avoid an infinite recursion. */
8751
8752 if (stopped_by_random_signal)
8753 disable_current_display ();
8754
0e454242 8755 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8756 {
8757 async_enable_stdin ();
8758 }
c906108c 8759
388a7084 8760 /* Let the user/frontend see the threads as stopped. */
731f534f 8761 maybe_finish_thread_state.reset ();
388a7084
PA
8762
8763 /* Select innermost stack frame - i.e., current frame is frame 0,
8764 and current location is based on that. Handle the case where the
8765 dummy call is returning after being stopped. E.g. the dummy call
8766 previously hit a breakpoint. (If the dummy call returns
8767 normally, we won't reach here.) Do this before the stop hook is
8768 run, so that it doesn't get to see the temporary dummy frame,
8769 which is not where we'll present the stop. */
8770 if (has_stack_frames ())
8771 {
8772 if (stop_stack_dummy == STOP_STACK_DUMMY)
8773 {
8774 /* Pop the empty frame that contains the stack dummy. This
8775 also restores inferior state prior to the call (struct
8776 infcall_suspend_state). */
8777 struct frame_info *frame = get_current_frame ();
8778
8779 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8780 frame_pop (frame);
8781 /* frame_pop calls reinit_frame_cache as the last thing it
8782 does which means there's now no selected frame. */
8783 }
8784
8785 select_frame (get_current_frame ());
8786
8787 /* Set the current source location. */
8788 set_current_sal_from_frame (get_current_frame ());
8789 }
dd7e2d2b
PA
8790
8791 /* Look up the hook_stop and run it (CLI internally handles problem
8792 of stop_command's pre-hook not existing). */
4c2f2a79
PA
8793 if (stop_command != NULL)
8794 {
2d844eaf 8795 stop_context saved_context;
4c2f2a79 8796
a70b8144 8797 try
bf469271
PA
8798 {
8799 execute_cmd_pre_hook (stop_command);
8800 }
230d2906 8801 catch (const gdb_exception &ex)
bf469271
PA
8802 {
8803 exception_fprintf (gdb_stderr, ex,
8804 "Error while running hook_stop:\n");
8805 }
4c2f2a79
PA
8806
8807 /* If the stop hook resumes the target, then there's no point in
8808 trying to notify about the previous stop; its context is
8809 gone. Likewise if the command switches thread or inferior --
8810 the observers would print a stop for the wrong
8811 thread/inferior. */
2d844eaf
TT
8812 if (saved_context.changed ())
8813 return 1;
4c2f2a79 8814 }
dd7e2d2b 8815
388a7084
PA
8816 /* Notify observers about the stop. This is where the interpreters
8817 print the stop event. */
d7e15655 8818 if (inferior_ptid != null_ptid)
76727919 8819 gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
24a7f1b5 8820 stop_print_frame);
388a7084 8821 else
76727919 8822 gdb::observers::normal_stop.notify (NULL, stop_print_frame);
347bddb7 8823
243a9253
PA
8824 annotate_stopped ();
8825
55f6301a 8826 if (target_has_execution ())
48844aa6
PA
8827 {
8828 if (last.kind != TARGET_WAITKIND_SIGNALLED
fe726667
PA
8829 && last.kind != TARGET_WAITKIND_EXITED
8830 && last.kind != TARGET_WAITKIND_NO_RESUMED)
48844aa6
PA
8831 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8832 Delete any breakpoint that is to be deleted at the next stop. */
16c381f0 8833 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
94cc34af 8834 }
6c95b8df
PA
8835
8836 /* Try to get rid of automatically added inferiors that are no
8837 longer needed. Keeping those around slows down things linearly.
8838 Note that this never removes the current inferior. */
8839 prune_inferiors ();
4c2f2a79
PA
8840
8841 return 0;
c906108c 8842}
c906108c 8843\f
c5aa993b 8844int
96baa820 8845signal_stop_state (int signo)
c906108c 8846{
d6b48e9c 8847 return signal_stop[signo];
c906108c
SS
8848}
8849
c5aa993b 8850int
96baa820 8851signal_print_state (int signo)
c906108c
SS
8852{
8853 return signal_print[signo];
8854}
8855
c5aa993b 8856int
96baa820 8857signal_pass_state (int signo)
c906108c
SS
8858{
8859 return signal_program[signo];
8860}
8861
2455069d
UW
8862static void
8863signal_cache_update (int signo)
8864{
8865 if (signo == -1)
8866 {
a493e3e2 8867 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
2455069d
UW
8868 signal_cache_update (signo);
8869
8870 return;
8871 }
8872
8873 signal_pass[signo] = (signal_stop[signo] == 0
8874 && signal_print[signo] == 0
ab04a2af
TT
8875 && signal_program[signo] == 1
8876 && signal_catch[signo] == 0);
2455069d
UW
8877}
8878
488f131b 8879int
7bda5e4a 8880signal_stop_update (int signo, int state)
d4f3574e
SS
8881{
8882 int ret = signal_stop[signo];
abbb1732 8883
d4f3574e 8884 signal_stop[signo] = state;
2455069d 8885 signal_cache_update (signo);
d4f3574e
SS
8886 return ret;
8887}
8888
488f131b 8889int
7bda5e4a 8890signal_print_update (int signo, int state)
d4f3574e
SS
8891{
8892 int ret = signal_print[signo];
abbb1732 8893
d4f3574e 8894 signal_print[signo] = state;
2455069d 8895 signal_cache_update (signo);
d4f3574e
SS
8896 return ret;
8897}
8898
488f131b 8899int
7bda5e4a 8900signal_pass_update (int signo, int state)
d4f3574e
SS
8901{
8902 int ret = signal_program[signo];
abbb1732 8903
d4f3574e 8904 signal_program[signo] = state;
2455069d 8905 signal_cache_update (signo);
d4f3574e
SS
8906 return ret;
8907}
8908
ab04a2af
TT
8909/* Update the global 'signal_catch' from INFO and notify the
8910 target. */
8911
8912void
8913signal_catch_update (const unsigned int *info)
8914{
8915 int i;
8916
8917 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8918 signal_catch[i] = info[i] > 0;
8919 signal_cache_update (-1);
adc6a863 8920 target_pass_signals (signal_pass);
ab04a2af
TT
8921}
8922
c906108c 8923static void
96baa820 8924sig_print_header (void)
c906108c 8925{
3e43a32a
MS
8926 printf_filtered (_("Signal Stop\tPrint\tPass "
8927 "to program\tDescription\n"));
c906108c
SS
8928}
8929
8930static void
2ea28649 8931sig_print_info (enum gdb_signal oursig)
c906108c 8932{
2ea28649 8933 const char *name = gdb_signal_to_name (oursig);
c906108c 8934 int name_padding = 13 - strlen (name);
96baa820 8935
c906108c
SS
8936 if (name_padding <= 0)
8937 name_padding = 0;
8938
8939 printf_filtered ("%s", name);
488f131b 8940 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
c906108c
SS
8941 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8942 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
8943 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
2ea28649 8944 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
c906108c
SS
8945}
8946
8947/* Specify how various signals in the inferior should be handled. */
8948
8949static void
0b39b52e 8950handle_command (const char *args, int from_tty)
c906108c 8951{
c906108c 8952 int digits, wordlen;
b926417a 8953 int sigfirst, siglast;
2ea28649 8954 enum gdb_signal oursig;
c906108c 8955 int allsigs;
c906108c
SS
8956
8957 if (args == NULL)
8958 {
e2e0b3e5 8959 error_no_arg (_("signal to handle"));
c906108c
SS
8960 }
8961
1777feb0 8962 /* Allocate and zero an array of flags for which signals to handle. */
c906108c 8963
adc6a863
PA
8964 const size_t nsigs = GDB_SIGNAL_LAST;
8965 unsigned char sigs[nsigs] {};
c906108c 8966
1777feb0 8967 /* Break the command line up into args. */
c906108c 8968
773a1edc 8969 gdb_argv built_argv (args);
c906108c
SS
8970
8971 /* Walk through the args, looking for signal oursigs, signal names, and
8972 actions. Signal numbers and signal names may be interspersed with
8973 actions, with the actions being performed for all signals cumulatively
1777feb0 8974 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
c906108c 8975
773a1edc 8976 for (char *arg : built_argv)
c906108c 8977 {
773a1edc
TT
8978 wordlen = strlen (arg);
8979 for (digits = 0; isdigit (arg[digits]); digits++)
c906108c
SS
8980 {;
8981 }
8982 allsigs = 0;
8983 sigfirst = siglast = -1;
8984
773a1edc 8985 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
c906108c
SS
8986 {
8987 /* Apply action to all signals except those used by the
1777feb0 8988 debugger. Silently skip those. */
c906108c
SS
8989 allsigs = 1;
8990 sigfirst = 0;
8991 siglast = nsigs - 1;
8992 }
773a1edc 8993 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
c906108c
SS
8994 {
8995 SET_SIGS (nsigs, sigs, signal_stop);
8996 SET_SIGS (nsigs, sigs, signal_print);
8997 }
773a1edc 8998 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
c906108c
SS
8999 {
9000 UNSET_SIGS (nsigs, sigs, signal_program);
9001 }
773a1edc 9002 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
c906108c
SS
9003 {
9004 SET_SIGS (nsigs, sigs, signal_print);
9005 }
773a1edc 9006 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
c906108c
SS
9007 {
9008 SET_SIGS (nsigs, sigs, signal_program);
9009 }
773a1edc 9010 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
c906108c
SS
9011 {
9012 UNSET_SIGS (nsigs, sigs, signal_stop);
9013 }
773a1edc 9014 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
c906108c
SS
9015 {
9016 SET_SIGS (nsigs, sigs, signal_program);
9017 }
773a1edc 9018 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
c906108c
SS
9019 {
9020 UNSET_SIGS (nsigs, sigs, signal_print);
9021 UNSET_SIGS (nsigs, sigs, signal_stop);
9022 }
773a1edc 9023 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
c906108c
SS
9024 {
9025 UNSET_SIGS (nsigs, sigs, signal_program);
9026 }
9027 else if (digits > 0)
9028 {
9029 /* It is numeric. The numeric signal refers to our own
9030 internal signal numbering from target.h, not to host/target
9031 signal number. This is a feature; users really should be
9032 using symbolic names anyway, and the common ones like
9033 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
9034
9035 sigfirst = siglast = (int)
773a1edc
TT
9036 gdb_signal_from_command (atoi (arg));
9037 if (arg[digits] == '-')
c906108c
SS
9038 {
9039 siglast = (int)
773a1edc 9040 gdb_signal_from_command (atoi (arg + digits + 1));
c906108c
SS
9041 }
9042 if (sigfirst > siglast)
9043 {
1777feb0 9044 /* Bet he didn't figure we'd think of this case... */
b926417a 9045 std::swap (sigfirst, siglast);
c906108c
SS
9046 }
9047 }
9048 else
9049 {
773a1edc 9050 oursig = gdb_signal_from_name (arg);
a493e3e2 9051 if (oursig != GDB_SIGNAL_UNKNOWN)
c906108c
SS
9052 {
9053 sigfirst = siglast = (int) oursig;
9054 }
9055 else
9056 {
9057 /* Not a number and not a recognized flag word => complain. */
773a1edc 9058 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
c906108c
SS
9059 }
9060 }
9061
9062 /* If any signal numbers or symbol names were found, set flags for
dda83cd7 9063 which signals to apply actions to. */
c906108c 9064
b926417a 9065 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
c906108c 9066 {
2ea28649 9067 switch ((enum gdb_signal) signum)
c906108c 9068 {
a493e3e2
PA
9069 case GDB_SIGNAL_TRAP:
9070 case GDB_SIGNAL_INT:
c906108c
SS
9071 if (!allsigs && !sigs[signum])
9072 {
9e2f0ad4 9073 if (query (_("%s is used by the debugger.\n\
3e43a32a 9074Are you sure you want to change it? "),
2ea28649 9075 gdb_signal_to_name ((enum gdb_signal) signum)))
c906108c
SS
9076 {
9077 sigs[signum] = 1;
9078 }
9079 else
c119e040 9080 printf_unfiltered (_("Not confirmed, unchanged.\n"));
c906108c
SS
9081 }
9082 break;
a493e3e2
PA
9083 case GDB_SIGNAL_0:
9084 case GDB_SIGNAL_DEFAULT:
9085 case GDB_SIGNAL_UNKNOWN:
c906108c
SS
9086 /* Make sure that "all" doesn't print these. */
9087 break;
9088 default:
9089 sigs[signum] = 1;
9090 break;
9091 }
9092 }
c906108c
SS
9093 }
9094
b926417a 9095 for (int signum = 0; signum < nsigs; signum++)
3a031f65
PA
9096 if (sigs[signum])
9097 {
2455069d 9098 signal_cache_update (-1);
adc6a863
PA
9099 target_pass_signals (signal_pass);
9100 target_program_signals (signal_program);
c906108c 9101
3a031f65
PA
9102 if (from_tty)
9103 {
9104 /* Show the results. */
9105 sig_print_header ();
9106 for (; signum < nsigs; signum++)
9107 if (sigs[signum])
aead7601 9108 sig_print_info ((enum gdb_signal) signum);
3a031f65
PA
9109 }
9110
9111 break;
9112 }
c906108c
SS
9113}
9114
de0bea00
MF
9115/* Complete the "handle" command. */
9116
eb3ff9a5 9117static void
de0bea00 9118handle_completer (struct cmd_list_element *ignore,
eb3ff9a5 9119 completion_tracker &tracker,
6f937416 9120 const char *text, const char *word)
de0bea00 9121{
de0bea00
MF
9122 static const char * const keywords[] =
9123 {
9124 "all",
9125 "stop",
9126 "ignore",
9127 "print",
9128 "pass",
9129 "nostop",
9130 "noignore",
9131 "noprint",
9132 "nopass",
9133 NULL,
9134 };
9135
eb3ff9a5
PA
9136 signal_completer (ignore, tracker, text, word);
9137 complete_on_enum (tracker, keywords, word, word);
de0bea00
MF
9138}
9139
2ea28649
PA
9140enum gdb_signal
9141gdb_signal_from_command (int num)
ed01b82c
PA
9142{
9143 if (num >= 1 && num <= 15)
2ea28649 9144 return (enum gdb_signal) num;
ed01b82c
PA
9145 error (_("Only signals 1-15 are valid as numeric signals.\n\
9146Use \"info signals\" for a list of symbolic signals."));
9147}
9148
c906108c
SS
9149/* Print current contents of the tables set by the handle command.
9150 It is possible we should just be printing signals actually used
9151 by the current target (but for things to work right when switching
9152 targets, all signals should be in the signal tables). */
9153
9154static void
1d12d88f 9155info_signals_command (const char *signum_exp, int from_tty)
c906108c 9156{
2ea28649 9157 enum gdb_signal oursig;
abbb1732 9158
c906108c
SS
9159 sig_print_header ();
9160
9161 if (signum_exp)
9162 {
9163 /* First see if this is a symbol name. */
2ea28649 9164 oursig = gdb_signal_from_name (signum_exp);
a493e3e2 9165 if (oursig == GDB_SIGNAL_UNKNOWN)
c906108c
SS
9166 {
9167 /* No, try numeric. */
9168 oursig =
2ea28649 9169 gdb_signal_from_command (parse_and_eval_long (signum_exp));
c906108c
SS
9170 }
9171 sig_print_info (oursig);
9172 return;
9173 }
9174
9175 printf_filtered ("\n");
9176 /* These ugly casts brought to you by the native VAX compiler. */
a493e3e2
PA
9177 for (oursig = GDB_SIGNAL_FIRST;
9178 (int) oursig < (int) GDB_SIGNAL_LAST;
2ea28649 9179 oursig = (enum gdb_signal) ((int) oursig + 1))
c906108c
SS
9180 {
9181 QUIT;
9182
a493e3e2
PA
9183 if (oursig != GDB_SIGNAL_UNKNOWN
9184 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
c906108c
SS
9185 sig_print_info (oursig);
9186 }
9187
3e43a32a
MS
9188 printf_filtered (_("\nUse the \"handle\" command "
9189 "to change these tables.\n"));
c906108c 9190}
4aa995e1
PA
9191
9192/* The $_siginfo convenience variable is a bit special. We don't know
9193 for sure the type of the value until we actually have a chance to
7a9dd1b2 9194 fetch the data. The type can change depending on gdbarch, so it is
4aa995e1
PA
9195 also dependent on which thread you have selected.
9196
9197 1. making $_siginfo be an internalvar that creates a new value on
9198 access.
9199
9200 2. making the value of $_siginfo be an lval_computed value. */
9201
9202/* This function implements the lval_computed support for reading a
9203 $_siginfo value. */
9204
9205static void
9206siginfo_value_read (struct value *v)
9207{
9208 LONGEST transferred;
9209
a911d87a
PA
9210 /* If we can access registers, so can we access $_siginfo. Likewise
9211 vice versa. */
9212 validate_registers_access ();
c709acd1 9213
4aa995e1 9214 transferred =
328d42d8
SM
9215 target_read (current_inferior ()->top_target (),
9216 TARGET_OBJECT_SIGNAL_INFO,
4aa995e1
PA
9217 NULL,
9218 value_contents_all_raw (v),
9219 value_offset (v),
9220 TYPE_LENGTH (value_type (v)));
9221
9222 if (transferred != TYPE_LENGTH (value_type (v)))
9223 error (_("Unable to read siginfo"));
9224}
9225
9226/* This function implements the lval_computed support for writing a
9227 $_siginfo value. */
9228
9229static void
9230siginfo_value_write (struct value *v, struct value *fromval)
9231{
9232 LONGEST transferred;
9233
a911d87a
PA
9234 /* If we can access registers, so can we access $_siginfo. Likewise
9235 vice versa. */
9236 validate_registers_access ();
c709acd1 9237
328d42d8 9238 transferred = target_write (current_inferior ()->top_target (),
4aa995e1
PA
9239 TARGET_OBJECT_SIGNAL_INFO,
9240 NULL,
9241 value_contents_all_raw (fromval),
9242 value_offset (v),
9243 TYPE_LENGTH (value_type (fromval)));
9244
9245 if (transferred != TYPE_LENGTH (value_type (fromval)))
9246 error (_("Unable to write siginfo"));
9247}
9248
c8f2448a 9249static const struct lval_funcs siginfo_value_funcs =
4aa995e1
PA
9250 {
9251 siginfo_value_read,
9252 siginfo_value_write
9253 };
9254
9255/* Return a new value with the correct type for the siginfo object of
78267919
UW
9256 the current thread using architecture GDBARCH. Return a void value
9257 if there's no object available. */
4aa995e1 9258
2c0b251b 9259static struct value *
22d2b532
SDJ
9260siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
9261 void *ignore)
4aa995e1 9262{
841de120 9263 if (target_has_stack ()
d7e15655 9264 && inferior_ptid != null_ptid
78267919 9265 && gdbarch_get_siginfo_type_p (gdbarch))
4aa995e1 9266 {
78267919 9267 struct type *type = gdbarch_get_siginfo_type (gdbarch);
abbb1732 9268
78267919 9269 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
4aa995e1
PA
9270 }
9271
78267919 9272 return allocate_value (builtin_type (gdbarch)->builtin_void);
4aa995e1
PA
9273}
9274
c906108c 9275\f
16c381f0
JK
9276/* infcall_suspend_state contains state about the program itself like its
9277 registers and any signal it received when it last stopped.
9278 This state must be restored regardless of how the inferior function call
9279 ends (either successfully, or after it hits a breakpoint or signal)
9280 if the program is to properly continue where it left off. */
9281
6bf78e29 9282class infcall_suspend_state
7a292a7a 9283{
6bf78e29
AB
9284public:
9285 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
9286 once the inferior function call has finished. */
9287 infcall_suspend_state (struct gdbarch *gdbarch,
dda83cd7
SM
9288 const struct thread_info *tp,
9289 struct regcache *regcache)
6bf78e29
AB
9290 : m_thread_suspend (tp->suspend),
9291 m_registers (new readonly_detached_regcache (*regcache))
9292 {
9293 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
9294
9295 if (gdbarch_get_siginfo_type_p (gdbarch))
9296 {
dda83cd7
SM
9297 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9298 size_t len = TYPE_LENGTH (type);
6bf78e29 9299
dda83cd7 9300 siginfo_data.reset ((gdb_byte *) xmalloc (len));
6bf78e29 9301
328d42d8
SM
9302 if (target_read (current_inferior ()->top_target (),
9303 TARGET_OBJECT_SIGNAL_INFO, NULL,
dda83cd7
SM
9304 siginfo_data.get (), 0, len) != len)
9305 {
9306 /* Errors ignored. */
9307 siginfo_data.reset (nullptr);
9308 }
6bf78e29
AB
9309 }
9310
9311 if (siginfo_data)
9312 {
dda83cd7
SM
9313 m_siginfo_gdbarch = gdbarch;
9314 m_siginfo_data = std::move (siginfo_data);
6bf78e29
AB
9315 }
9316 }
9317
9318 /* Return a pointer to the stored register state. */
16c381f0 9319
6bf78e29
AB
9320 readonly_detached_regcache *registers () const
9321 {
9322 return m_registers.get ();
9323 }
9324
9325 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
9326
9327 void restore (struct gdbarch *gdbarch,
dda83cd7
SM
9328 struct thread_info *tp,
9329 struct regcache *regcache) const
6bf78e29
AB
9330 {
9331 tp->suspend = m_thread_suspend;
9332
9333 if (m_siginfo_gdbarch == gdbarch)
9334 {
dda83cd7 9335 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6bf78e29 9336
dda83cd7 9337 /* Errors ignored. */
328d42d8
SM
9338 target_write (current_inferior ()->top_target (),
9339 TARGET_OBJECT_SIGNAL_INFO, NULL,
dda83cd7 9340 m_siginfo_data.get (), 0, TYPE_LENGTH (type));
6bf78e29
AB
9341 }
9342
9343 /* The inferior can be gone if the user types "print exit(0)"
9344 (and perhaps other times). */
55f6301a 9345 if (target_has_execution ())
6bf78e29
AB
9346 /* NB: The register write goes through to the target. */
9347 regcache->restore (registers ());
9348 }
9349
9350private:
9351 /* How the current thread stopped before the inferior function call was
9352 executed. */
9353 struct thread_suspend_state m_thread_suspend;
9354
9355 /* The registers before the inferior function call was executed. */
9356 std::unique_ptr<readonly_detached_regcache> m_registers;
1736ad11 9357
35515841 9358 /* Format of SIGINFO_DATA or NULL if it is not present. */
6bf78e29 9359 struct gdbarch *m_siginfo_gdbarch = nullptr;
1736ad11
JK
9360
9361 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
9362 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
9363 content would be invalid. */
6bf78e29 9364 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
b89667eb
DE
9365};
9366
cb524840
TT
9367infcall_suspend_state_up
9368save_infcall_suspend_state ()
b89667eb 9369{
b89667eb 9370 struct thread_info *tp = inferior_thread ();
1736ad11 9371 struct regcache *regcache = get_current_regcache ();
ac7936df 9372 struct gdbarch *gdbarch = regcache->arch ();
1736ad11 9373
6bf78e29
AB
9374 infcall_suspend_state_up inf_state
9375 (new struct infcall_suspend_state (gdbarch, tp, regcache));
1736ad11 9376
6bf78e29
AB
9377 /* Having saved the current state, adjust the thread state, discarding
9378 any stop signal information. The stop signal is not useful when
9379 starting an inferior function call, and run_inferior_call will not use
9380 the signal due to its `proceed' call with GDB_SIGNAL_0. */
a493e3e2 9381 tp->suspend.stop_signal = GDB_SIGNAL_0;
35515841 9382
b89667eb
DE
9383 return inf_state;
9384}
9385
9386/* Restore inferior session state to INF_STATE. */
9387
9388void
16c381f0 9389restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb
DE
9390{
9391 struct thread_info *tp = inferior_thread ();
1736ad11 9392 struct regcache *regcache = get_current_regcache ();
ac7936df 9393 struct gdbarch *gdbarch = regcache->arch ();
b89667eb 9394
6bf78e29 9395 inf_state->restore (gdbarch, tp, regcache);
16c381f0 9396 discard_infcall_suspend_state (inf_state);
b89667eb
DE
9397}
9398
b89667eb 9399void
16c381f0 9400discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb 9401{
dd848631 9402 delete inf_state;
b89667eb
DE
9403}
9404
daf6667d 9405readonly_detached_regcache *
16c381f0 9406get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
b89667eb 9407{
6bf78e29 9408 return inf_state->registers ();
b89667eb
DE
9409}
9410
16c381f0
JK
9411/* infcall_control_state contains state regarding gdb's control of the
9412 inferior itself like stepping control. It also contains session state like
9413 the user's currently selected frame. */
b89667eb 9414
16c381f0 9415struct infcall_control_state
b89667eb 9416{
16c381f0
JK
9417 struct thread_control_state thread_control;
9418 struct inferior_control_state inferior_control;
d82142e2
JK
9419
9420 /* Other fields: */
ee841dd8
TT
9421 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
9422 int stopped_by_random_signal = 0;
7a292a7a 9423
79952e69
PA
9424 /* ID and level of the selected frame when the inferior function
9425 call was made. */
ee841dd8 9426 struct frame_id selected_frame_id {};
79952e69 9427 int selected_frame_level = -1;
7a292a7a
SS
9428};
9429
c906108c 9430/* Save all of the information associated with the inferior<==>gdb
b89667eb 9431 connection. */
c906108c 9432
cb524840
TT
9433infcall_control_state_up
9434save_infcall_control_state ()
c906108c 9435{
cb524840 9436 infcall_control_state_up inf_status (new struct infcall_control_state);
4e1c45ea 9437 struct thread_info *tp = inferior_thread ();
d6b48e9c 9438 struct inferior *inf = current_inferior ();
7a292a7a 9439
16c381f0
JK
9440 inf_status->thread_control = tp->control;
9441 inf_status->inferior_control = inf->control;
d82142e2 9442
8358c15c 9443 tp->control.step_resume_breakpoint = NULL;
5b79abe7 9444 tp->control.exception_resume_breakpoint = NULL;
8358c15c 9445
16c381f0
JK
9446 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9447 chain. If caller's caller is walking the chain, they'll be happier if we
9448 hand them back the original chain when restore_infcall_control_state is
9449 called. */
9450 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
d82142e2
JK
9451
9452 /* Other fields: */
9453 inf_status->stop_stack_dummy = stop_stack_dummy;
9454 inf_status->stopped_by_random_signal = stopped_by_random_signal;
c5aa993b 9455
79952e69
PA
9456 save_selected_frame (&inf_status->selected_frame_id,
9457 &inf_status->selected_frame_level);
b89667eb 9458
7a292a7a 9459 return inf_status;
c906108c
SS
9460}
9461
b89667eb
DE
9462/* Restore inferior session state to INF_STATUS. */
9463
c906108c 9464void
16c381f0 9465restore_infcall_control_state (struct infcall_control_state *inf_status)
c906108c 9466{
4e1c45ea 9467 struct thread_info *tp = inferior_thread ();
d6b48e9c 9468 struct inferior *inf = current_inferior ();
4e1c45ea 9469
8358c15c
JK
9470 if (tp->control.step_resume_breakpoint)
9471 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
9472
5b79abe7
TT
9473 if (tp->control.exception_resume_breakpoint)
9474 tp->control.exception_resume_breakpoint->disposition
9475 = disp_del_at_next_stop;
9476
d82142e2 9477 /* Handle the bpstat_copy of the chain. */
16c381f0 9478 bpstat_clear (&tp->control.stop_bpstat);
d82142e2 9479
16c381f0
JK
9480 tp->control = inf_status->thread_control;
9481 inf->control = inf_status->inferior_control;
d82142e2
JK
9482
9483 /* Other fields: */
9484 stop_stack_dummy = inf_status->stop_stack_dummy;
9485 stopped_by_random_signal = inf_status->stopped_by_random_signal;
c906108c 9486
841de120 9487 if (target_has_stack ())
c906108c 9488 {
79952e69
PA
9489 restore_selected_frame (inf_status->selected_frame_id,
9490 inf_status->selected_frame_level);
c906108c 9491 }
c906108c 9492
ee841dd8 9493 delete inf_status;
7a292a7a 9494}
c906108c
SS
9495
9496void
16c381f0 9497discard_infcall_control_state (struct infcall_control_state *inf_status)
7a292a7a 9498{
8358c15c
JK
9499 if (inf_status->thread_control.step_resume_breakpoint)
9500 inf_status->thread_control.step_resume_breakpoint->disposition
9501 = disp_del_at_next_stop;
9502
5b79abe7
TT
9503 if (inf_status->thread_control.exception_resume_breakpoint)
9504 inf_status->thread_control.exception_resume_breakpoint->disposition
9505 = disp_del_at_next_stop;
9506
1777feb0 9507 /* See save_infcall_control_state for info on stop_bpstat. */
16c381f0 9508 bpstat_clear (&inf_status->thread_control.stop_bpstat);
8358c15c 9509
ee841dd8 9510 delete inf_status;
7a292a7a 9511}
b89667eb 9512\f
7f89fd65 9513/* See infrun.h. */
0c557179
SDJ
9514
9515void
9516clear_exit_convenience_vars (void)
9517{
9518 clear_internalvar (lookup_internalvar ("_exitsignal"));
9519 clear_internalvar (lookup_internalvar ("_exitcode"));
9520}
c5aa993b 9521\f
488f131b 9522
b2175913
MS
9523/* User interface for reverse debugging:
9524 Set exec-direction / show exec-direction commands
9525 (returns error unless target implements to_set_exec_direction method). */
9526
170742de 9527enum exec_direction_kind execution_direction = EXEC_FORWARD;
b2175913
MS
9528static const char exec_forward[] = "forward";
9529static const char exec_reverse[] = "reverse";
9530static const char *exec_direction = exec_forward;
40478521 9531static const char *const exec_direction_names[] = {
b2175913
MS
9532 exec_forward,
9533 exec_reverse,
9534 NULL
9535};
9536
9537static void
eb4c3f4a 9538set_exec_direction_func (const char *args, int from_tty,
b2175913
MS
9539 struct cmd_list_element *cmd)
9540{
05374cfd 9541 if (target_can_execute_reverse ())
b2175913
MS
9542 {
9543 if (!strcmp (exec_direction, exec_forward))
9544 execution_direction = EXEC_FORWARD;
9545 else if (!strcmp (exec_direction, exec_reverse))
9546 execution_direction = EXEC_REVERSE;
9547 }
8bbed405
MS
9548 else
9549 {
9550 exec_direction = exec_forward;
9551 error (_("Target does not support this operation."));
9552 }
b2175913
MS
9553}
9554
9555static void
9556show_exec_direction_func (struct ui_file *out, int from_tty,
9557 struct cmd_list_element *cmd, const char *value)
9558{
9559 switch (execution_direction) {
9560 case EXEC_FORWARD:
9561 fprintf_filtered (out, _("Forward.\n"));
9562 break;
9563 case EXEC_REVERSE:
9564 fprintf_filtered (out, _("Reverse.\n"));
9565 break;
b2175913 9566 default:
d8b34453
PA
9567 internal_error (__FILE__, __LINE__,
9568 _("bogus execution_direction value: %d"),
9569 (int) execution_direction);
b2175913
MS
9570 }
9571}
9572
d4db2f36
PA
9573static void
9574show_schedule_multiple (struct ui_file *file, int from_tty,
9575 struct cmd_list_element *c, const char *value)
9576{
3e43a32a
MS
9577 fprintf_filtered (file, _("Resuming the execution of threads "
9578 "of all processes is %s.\n"), value);
d4db2f36 9579}
ad52ddc6 9580
22d2b532
SDJ
9581/* Implementation of `siginfo' variable. */
9582
9583static const struct internalvar_funcs siginfo_funcs =
9584{
9585 siginfo_make_value,
9586 NULL,
9587 NULL
9588};
9589
372316f1
PA
9590/* Callback for infrun's target events source. This is marked when a
9591 thread has a pending status to process. */
9592
9593static void
9594infrun_async_inferior_event_handler (gdb_client_data data)
9595{
6b36ddeb 9596 clear_async_event_handler (infrun_async_inferior_event_token);
b1a35af2 9597 inferior_event_handler (INF_REG_EVENT);
372316f1
PA
9598}
9599
8087c3fa 9600#if GDB_SELF_TEST
b161a60d
SM
9601namespace selftests
9602{
9603
9604/* Verify that when two threads with the same ptid exist (from two different
9605 targets) and one of them changes ptid, we only update inferior_ptid if
9606 it is appropriate. */
9607
9608static void
9609infrun_thread_ptid_changed ()
9610{
9611 gdbarch *arch = current_inferior ()->gdbarch;
9612
9613 /* The thread which inferior_ptid represents changes ptid. */
9614 {
9615 scoped_restore_current_pspace_and_thread restore;
9616
9617 scoped_mock_context<test_target_ops> target1 (arch);
9618 scoped_mock_context<test_target_ops> target2 (arch);
9619 target2.mock_inferior.next = &target1.mock_inferior;
9620
9621 ptid_t old_ptid (111, 222);
9622 ptid_t new_ptid (111, 333);
9623
9624 target1.mock_inferior.pid = old_ptid.pid ();
9625 target1.mock_thread.ptid = old_ptid;
9626 target2.mock_inferior.pid = old_ptid.pid ();
9627 target2.mock_thread.ptid = old_ptid;
9628
9629 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9630 set_current_inferior (&target1.mock_inferior);
9631
9632 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9633
9634 gdb_assert (inferior_ptid == new_ptid);
9635 }
9636
9637 /* A thread with the same ptid as inferior_ptid, but from another target,
9638 changes ptid. */
9639 {
9640 scoped_restore_current_pspace_and_thread restore;
9641
9642 scoped_mock_context<test_target_ops> target1 (arch);
9643 scoped_mock_context<test_target_ops> target2 (arch);
9644 target2.mock_inferior.next = &target1.mock_inferior;
9645
9646 ptid_t old_ptid (111, 222);
9647 ptid_t new_ptid (111, 333);
9648
9649 target1.mock_inferior.pid = old_ptid.pid ();
9650 target1.mock_thread.ptid = old_ptid;
9651 target2.mock_inferior.pid = old_ptid.pid ();
9652 target2.mock_thread.ptid = old_ptid;
9653
9654 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9655 set_current_inferior (&target2.mock_inferior);
9656
9657 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9658
9659 gdb_assert (inferior_ptid == old_ptid);
9660 }
9661}
9662
9663} /* namespace selftests */
9664
8087c3fa
JB
9665#endif /* GDB_SELF_TEST */
9666
6c265988 9667void _initialize_infrun ();
c906108c 9668void
6c265988 9669_initialize_infrun ()
c906108c 9670{
de0bea00 9671 struct cmd_list_element *c;
c906108c 9672
372316f1
PA
9673 /* Register extra event sources in the event loop. */
9674 infrun_async_inferior_event_token
db20ebdf
SM
9675 = create_async_event_handler (infrun_async_inferior_event_handler, NULL,
9676 "infrun");
372316f1 9677
e0f25bd9
SM
9678 cmd_list_element *info_signals_cmd
9679 = add_info ("signals", info_signals_command, _("\
1bedd215
AC
9680What debugger does when program gets various signals.\n\
9681Specify a signal as argument to print info on that signal only."));
e0f25bd9 9682 add_info_alias ("handle", info_signals_cmd, 0);
c906108c 9683
de0bea00 9684 c = add_com ("handle", class_run, handle_command, _("\
dfbd5e7b 9685Specify how to handle signals.\n\
486c7739 9686Usage: handle SIGNAL [ACTIONS]\n\
c906108c 9687Args are signals and actions to apply to those signals.\n\
dfbd5e7b 9688If no actions are specified, the current settings for the specified signals\n\
486c7739
MF
9689will be displayed instead.\n\
9690\n\
c906108c
SS
9691Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9692from 1-15 are allowed for compatibility with old versions of GDB.\n\
9693Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9694The special arg \"all\" is recognized to mean all signals except those\n\
1bedd215 9695used by the debugger, typically SIGTRAP and SIGINT.\n\
486c7739 9696\n\
1bedd215 9697Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
c906108c
SS
9698\"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9699Stop means reenter debugger if this signal happens (implies print).\n\
9700Print means print a message if this signal happens.\n\
9701Pass means let program see this signal; otherwise program doesn't know.\n\
9702Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
dfbd5e7b
PA
9703Pass and Stop may be combined.\n\
9704\n\
9705Multiple signals may be specified. Signal numbers and signal names\n\
9706may be interspersed with actions, with the actions being performed for\n\
9707all signals cumulatively specified."));
de0bea00 9708 set_cmd_completer (c, handle_completer);
486c7739 9709
c906108c 9710 if (!dbx_commands)
1a966eab
AC
9711 stop_command = add_cmd ("stop", class_obscure,
9712 not_just_help_class_command, _("\
9713There is no `stop' command, but you can set a hook on `stop'.\n\
c906108c 9714This allows you to set a list of commands to be run each time execution\n\
1a966eab 9715of the program stops."), &cmdlist);
c906108c 9716
94ba44a6
SM
9717 add_setshow_boolean_cmd
9718 ("infrun", class_maintenance, &debug_infrun,
9719 _("Set inferior debugging."),
9720 _("Show inferior debugging."),
9721 _("When non-zero, inferior specific debugging is enabled."),
9722 NULL, show_debug_infrun, &setdebuglist, &showdebuglist);
527159b7 9723
ad52ddc6
PA
9724 add_setshow_boolean_cmd ("non-stop", no_class,
9725 &non_stop_1, _("\
9726Set whether gdb controls the inferior in non-stop mode."), _("\
9727Show whether gdb controls the inferior in non-stop mode."), _("\
9728When debugging a multi-threaded program and this setting is\n\
9729off (the default, also called all-stop mode), when one thread stops\n\
9730(for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9731all other threads in the program while you interact with the thread of\n\
9732interest. When you continue or step a thread, you can allow the other\n\
9733threads to run, or have them remain stopped, but while you inspect any\n\
9734thread's state, all threads stop.\n\
9735\n\
9736In non-stop mode, when one thread stops, other threads can continue\n\
9737to run freely. You'll be able to step each thread independently,\n\
9738leave it stopped or free to run as needed."),
9739 set_non_stop,
9740 show_non_stop,
9741 &setlist,
9742 &showlist);
9743
adc6a863 9744 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
c906108c
SS
9745 {
9746 signal_stop[i] = 1;
9747 signal_print[i] = 1;
9748 signal_program[i] = 1;
ab04a2af 9749 signal_catch[i] = 0;
c906108c
SS
9750 }
9751
4d9d9d04
PA
9752 /* Signals caused by debugger's own actions should not be given to
9753 the program afterwards.
9754
9755 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9756 explicitly specifies that it should be delivered to the target
9757 program. Typically, that would occur when a user is debugging a
9758 target monitor on a simulator: the target monitor sets a
9759 breakpoint; the simulator encounters this breakpoint and halts
9760 the simulation handing control to GDB; GDB, noting that the stop
9761 address doesn't map to any known breakpoint, returns control back
9762 to the simulator; the simulator then delivers the hardware
9763 equivalent of a GDB_SIGNAL_TRAP to the program being
9764 debugged. */
a493e3e2
PA
9765 signal_program[GDB_SIGNAL_TRAP] = 0;
9766 signal_program[GDB_SIGNAL_INT] = 0;
c906108c
SS
9767
9768 /* Signals that are not errors should not normally enter the debugger. */
a493e3e2
PA
9769 signal_stop[GDB_SIGNAL_ALRM] = 0;
9770 signal_print[GDB_SIGNAL_ALRM] = 0;
9771 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9772 signal_print[GDB_SIGNAL_VTALRM] = 0;
9773 signal_stop[GDB_SIGNAL_PROF] = 0;
9774 signal_print[GDB_SIGNAL_PROF] = 0;
9775 signal_stop[GDB_SIGNAL_CHLD] = 0;
9776 signal_print[GDB_SIGNAL_CHLD] = 0;
9777 signal_stop[GDB_SIGNAL_IO] = 0;
9778 signal_print[GDB_SIGNAL_IO] = 0;
9779 signal_stop[GDB_SIGNAL_POLL] = 0;
9780 signal_print[GDB_SIGNAL_POLL] = 0;
9781 signal_stop[GDB_SIGNAL_URG] = 0;
9782 signal_print[GDB_SIGNAL_URG] = 0;
9783 signal_stop[GDB_SIGNAL_WINCH] = 0;
9784 signal_print[GDB_SIGNAL_WINCH] = 0;
9785 signal_stop[GDB_SIGNAL_PRIO] = 0;
9786 signal_print[GDB_SIGNAL_PRIO] = 0;
c906108c 9787
cd0fc7c3
SS
9788 /* These signals are used internally by user-level thread
9789 implementations. (See signal(5) on Solaris.) Like the above
9790 signals, a healthy program receives and handles them as part of
9791 its normal operation. */
a493e3e2
PA
9792 signal_stop[GDB_SIGNAL_LWP] = 0;
9793 signal_print[GDB_SIGNAL_LWP] = 0;
9794 signal_stop[GDB_SIGNAL_WAITING] = 0;
9795 signal_print[GDB_SIGNAL_WAITING] = 0;
9796 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9797 signal_print[GDB_SIGNAL_CANCEL] = 0;
bc7b765a
JB
9798 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9799 signal_print[GDB_SIGNAL_LIBRT] = 0;
cd0fc7c3 9800
2455069d
UW
9801 /* Update cached state. */
9802 signal_cache_update (-1);
9803
85c07804
AC
9804 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9805 &stop_on_solib_events, _("\
9806Set stopping for shared library events."), _("\
9807Show stopping for shared library events."), _("\
c906108c
SS
9808If nonzero, gdb will give control to the user when the dynamic linker\n\
9809notifies gdb of shared library events. The most common event of interest\n\
85c07804 9810to the user would be loading/unloading of a new library."),
f9e14852 9811 set_stop_on_solib_events,
920d2a44 9812 show_stop_on_solib_events,
85c07804 9813 &setlist, &showlist);
c906108c 9814
7ab04401
AC
9815 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9816 follow_fork_mode_kind_names,
9817 &follow_fork_mode_string, _("\
9818Set debugger response to a program call of fork or vfork."), _("\
9819Show debugger response to a program call of fork or vfork."), _("\
c906108c
SS
9820A fork or vfork creates a new process. follow-fork-mode can be:\n\
9821 parent - the original process is debugged after a fork\n\
9822 child - the new process is debugged after a fork\n\
ea1dd7bc 9823The unfollowed process will continue to run.\n\
7ab04401
AC
9824By default, the debugger will follow the parent process."),
9825 NULL,
920d2a44 9826 show_follow_fork_mode_string,
7ab04401
AC
9827 &setlist, &showlist);
9828
6c95b8df
PA
9829 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9830 follow_exec_mode_names,
9831 &follow_exec_mode_string, _("\
9832Set debugger response to a program call of exec."), _("\
9833Show debugger response to a program call of exec."), _("\
9834An exec call replaces the program image of a process.\n\
9835\n\
9836follow-exec-mode can be:\n\
9837\n\
cce7e648 9838 new - the debugger creates a new inferior and rebinds the process\n\
6c95b8df
PA
9839to this new inferior. The program the process was running before\n\
9840the exec call can be restarted afterwards by restarting the original\n\
9841inferior.\n\
9842\n\
9843 same - the debugger keeps the process bound to the same inferior.\n\
9844The new executable image replaces the previous executable loaded in\n\
9845the inferior. Restarting the inferior after the exec call restarts\n\
9846the executable the process was running after the exec call.\n\
9847\n\
9848By default, the debugger will use the same inferior."),
9849 NULL,
9850 show_follow_exec_mode_string,
9851 &setlist, &showlist);
9852
7ab04401
AC
9853 add_setshow_enum_cmd ("scheduler-locking", class_run,
9854 scheduler_enums, &scheduler_mode, _("\
9855Set mode for locking scheduler during execution."), _("\
9856Show mode for locking scheduler during execution."), _("\
f2665db5
MM
9857off == no locking (threads may preempt at any time)\n\
9858on == full locking (no thread except the current thread may run)\n\
dda83cd7 9859 This applies to both normal execution and replay mode.\n\
f2665db5 9860step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
dda83cd7
SM
9861 In this mode, other threads may run during other commands.\n\
9862 This applies to both normal execution and replay mode.\n\
f2665db5 9863replay == scheduler locked in replay mode and unlocked during normal execution."),
7ab04401 9864 set_schedlock_func, /* traps on target vector */
920d2a44 9865 show_scheduler_mode,
7ab04401 9866 &setlist, &showlist);
5fbbeb29 9867
d4db2f36
PA
9868 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9869Set mode for resuming threads of all processes."), _("\
9870Show mode for resuming threads of all processes."), _("\
9871When on, execution commands (such as 'continue' or 'next') resume all\n\
9872threads of all processes. When off (which is the default), execution\n\
9873commands only resume the threads of the current process. The set of\n\
9874threads that are resumed is further refined by the scheduler-locking\n\
9875mode (see help set scheduler-locking)."),
9876 NULL,
9877 show_schedule_multiple,
9878 &setlist, &showlist);
9879
5bf193a2
AC
9880 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9881Set mode of the step operation."), _("\
9882Show mode of the step operation."), _("\
9883When set, doing a step over a function without debug line information\n\
9884will stop at the first instruction of that function. Otherwise, the\n\
9885function is skipped and the step command stops at a different source line."),
9886 NULL,
920d2a44 9887 show_step_stop_if_no_debug,
5bf193a2 9888 &setlist, &showlist);
ca6724c1 9889
72d0e2c5
YQ
9890 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9891 &can_use_displaced_stepping, _("\
237fc4c9
PA
9892Set debugger's willingness to use displaced stepping."), _("\
9893Show debugger's willingness to use displaced stepping."), _("\
fff08868
HZ
9894If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9895supported by the target architecture. If off, gdb will not use displaced\n\
9896stepping to step over breakpoints, even if such is supported by the target\n\
9897architecture. If auto (which is the default), gdb will use displaced stepping\n\
9898if the target architecture supports it and non-stop mode is active, but will not\n\
9899use it in all-stop mode (see help set non-stop)."),
72d0e2c5
YQ
9900 NULL,
9901 show_can_use_displaced_stepping,
9902 &setlist, &showlist);
237fc4c9 9903
b2175913
MS
9904 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9905 &exec_direction, _("Set direction of execution.\n\
9906Options are 'forward' or 'reverse'."),
9907 _("Show direction of execution (forward/reverse)."),
9908 _("Tells gdb whether to execute forward or backward."),
9909 set_exec_direction_func, show_exec_direction_func,
9910 &setlist, &showlist);
9911
6c95b8df
PA
9912 /* Set/show detach-on-fork: user-settable mode. */
9913
9914 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9915Set whether gdb will detach the child of a fork."), _("\
9916Show whether gdb will detach the child of a fork."), _("\
9917Tells gdb whether to detach the child of a fork."),
9918 NULL, NULL, &setlist, &showlist);
9919
03583c20
UW
9920 /* Set/show disable address space randomization mode. */
9921
9922 add_setshow_boolean_cmd ("disable-randomization", class_support,
9923 &disable_randomization, _("\
9924Set disabling of debuggee's virtual address space randomization."), _("\
9925Show disabling of debuggee's virtual address space randomization."), _("\
9926When this mode is on (which is the default), randomization of the virtual\n\
9927address space is disabled. Standalone programs run with the randomization\n\
9928enabled by default on some platforms."),
9929 &set_disable_randomization,
9930 &show_disable_randomization,
9931 &setlist, &showlist);
9932
ca6724c1 9933 /* ptid initializations */
ca6724c1
KB
9934 inferior_ptid = null_ptid;
9935 target_last_wait_ptid = minus_one_ptid;
5231c1fd 9936
c90e7d63
SM
9937 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed,
9938 "infrun");
9939 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested,
9940 "infrun");
9941 gdb::observers::thread_exit.attach (infrun_thread_thread_exit, "infrun");
9942 gdb::observers::inferior_exit.attach (infrun_inferior_exit, "infrun");
9943 gdb::observers::inferior_execd.attach (infrun_inferior_execd, "infrun");
4aa995e1
PA
9944
9945 /* Explicitly create without lookup, since that tries to create a
9946 value with a void typed value, and when we get here, gdbarch
9947 isn't initialized yet. At this point, we're quite sure there
9948 isn't another convenience variable of the same name. */
22d2b532 9949 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
d914c394
SS
9950
9951 add_setshow_boolean_cmd ("observer", no_class,
9952 &observer_mode_1, _("\
9953Set whether gdb controls the inferior in observer mode."), _("\
9954Show whether gdb controls the inferior in observer mode."), _("\
9955In observer mode, GDB can get data from the inferior, but not\n\
9956affect its execution. Registers and memory may not be changed,\n\
9957breakpoints may not be set, and the program cannot be interrupted\n\
9958or signalled."),
9959 set_observer_mode,
9960 show_observer_mode,
9961 &setlist,
9962 &showlist);
b161a60d
SM
9963
9964#if GDB_SELF_TEST
9965 selftests::register_test ("infrun_thread_ptid_changed",
9966 selftests::infrun_thread_ptid_changed);
9967#endif
c906108c 9968}
This page took 3.169042 seconds and 4 git commands to generate.