gdb: stop trying to prepare displaced steps for an inferior when it returns _UNAVAILABLE
[deliverable/binutils-gdb.git] / gdb / infrun.c
CommitLineData
ca557f44
AC
1/* Target-struct-independent code to start (run) and stop an inferior
2 process.
8926118c 3
b811d2c2 4 Copyright (C) 1986-2020 Free Software Foundation, Inc.
c906108c 5
c5aa993b 6 This file is part of GDB.
c906108c 7
c5aa993b
JM
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
c5aa993b 11 (at your option) any later version.
c906108c 12
c5aa993b
JM
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
c906108c 17
c5aa993b 18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
c906108c
SS
20
21#include "defs.h"
9844051a 22#include "displaced-stepping.h"
edbcda09
SM
23#include "gdbsupport/common-defs.h"
24#include "gdbsupport/common-utils.h"
45741a9c 25#include "infrun.h"
c906108c
SS
26#include <ctype.h>
27#include "symtab.h"
28#include "frame.h"
29#include "inferior.h"
30#include "breakpoint.h"
c906108c
SS
31#include "gdbcore.h"
32#include "gdbcmd.h"
33#include "target.h"
2f4fcf00 34#include "target-connection.h"
c906108c
SS
35#include "gdbthread.h"
36#include "annotate.h"
1adeb98a 37#include "symfile.h"
7a292a7a 38#include "top.h"
2acceee2 39#include "inf-loop.h"
4e052eda 40#include "regcache.h"
9844051a 41#include "utils.h"
fd0407d6 42#include "value.h"
76727919 43#include "observable.h"
f636b87d 44#include "language.h"
a77053c2 45#include "solib.h"
f17517ea 46#include "main.h"
186c406b 47#include "block.h"
034dad6f 48#include "mi/mi-common.h"
4f8d22e3 49#include "event-top.h"
96429cc8 50#include "record.h"
d02ed0bb 51#include "record-full.h"
edb3359d 52#include "inline-frame.h"
4efc6507 53#include "jit.h"
06cd862c 54#include "tracepoint.h"
1bfeeb0f 55#include "skip.h"
28106bc2
SDJ
56#include "probe.h"
57#include "objfiles.h"
de0bea00 58#include "completer.h"
9107fc8d 59#include "target-descriptions.h"
f15cb84a 60#include "target-dcache.h"
d83ad864 61#include "terminal.h"
ff862be4 62#include "solist.h"
400b5eca 63#include "gdbsupport/event-loop.h"
243a9253 64#include "thread-fsm.h"
268a13a5 65#include "gdbsupport/enum-flags.h"
5ed8105e 66#include "progspace-and-thread.h"
268a13a5 67#include "gdbsupport/gdb_optional.h"
46a62268 68#include "arch-utils.h"
268a13a5
TT
69#include "gdbsupport/scope-exit.h"
70#include "gdbsupport/forward-scope-exit.h"
06cc9596 71#include "gdbsupport/gdb_select.h"
5b6d1e4f 72#include <unordered_map>
93b54c8e 73#include "async-event.h"
c906108c
SS
74
75/* Prototypes for local functions */
76
2ea28649 77static void sig_print_info (enum gdb_signal);
c906108c 78
96baa820 79static void sig_print_header (void);
c906108c 80
d83ad864
DB
81static void follow_inferior_reset_breakpoints (void);
82
a289b8f6
JK
83static int currently_stepping (struct thread_info *tp);
84
2c03e5be 85static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
2484c66b
UW
86
87static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
88
2484c66b
UW
89static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
90
8550d3b3
YQ
91static int maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
92
aff4e175
AB
93static void resume (gdb_signal sig);
94
5b6d1e4f
PA
95static void wait_for_inferior (inferior *inf);
96
372316f1
PA
97/* Asynchronous signal handler registered as event loop source for
98 when we have pending events ready to be passed to the core. */
99static struct async_event_handler *infrun_async_inferior_event_token;
100
101/* Stores whether infrun_async was previously enabled or disabled.
102 Starts off as -1, indicating "never enabled/disabled". */
103static int infrun_is_async = -1;
104
edbcda09
SM
105#define infrun_log_debug(fmt, args...) \
106 infrun_log_debug_1 (__LINE__, __func__, fmt, ##args)
107
108static void ATTRIBUTE_PRINTF(3, 4)
109infrun_log_debug_1 (int line, const char *func,
110 const char *fmt, ...)
111{
112 if (debug_infrun)
113 {
114 va_list args;
115 va_start (args, fmt);
116 std::string msg = string_vprintf (fmt, args);
117 va_end (args);
118
119 fprintf_unfiltered (gdb_stdout, "infrun: %s: %s\n", func, msg.c_str ());
120 }
121}
122
372316f1
PA
123/* See infrun.h. */
124
125void
126infrun_async (int enable)
127{
128 if (infrun_is_async != enable)
129 {
130 infrun_is_async = enable;
131
edbcda09 132 infrun_log_debug ("enable=%d", enable);
372316f1
PA
133
134 if (enable)
135 mark_async_event_handler (infrun_async_inferior_event_token);
136 else
137 clear_async_event_handler (infrun_async_inferior_event_token);
138 }
139}
140
0b333c5e
PA
141/* See infrun.h. */
142
143void
144mark_infrun_async_event_handler (void)
145{
146 mark_async_event_handler (infrun_async_inferior_event_token);
147}
148
5fbbeb29
CF
149/* When set, stop the 'step' command if we enter a function which has
150 no line number information. The normal behavior is that we step
151 over such function. */
491144b5 152bool step_stop_if_no_debug = false;
920d2a44
AC
153static void
154show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
155 struct cmd_list_element *c, const char *value)
156{
157 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
158}
5fbbeb29 159
b9f437de
PA
160/* proceed and normal_stop use this to notify the user when the
161 inferior stopped in a different thread than it had been running
162 in. */
96baa820 163
39f77062 164static ptid_t previous_inferior_ptid;
7a292a7a 165
07107ca6
LM
166/* If set (default for legacy reasons), when following a fork, GDB
167 will detach from one of the fork branches, child or parent.
168 Exactly which branch is detached depends on 'set follow-fork-mode'
169 setting. */
170
491144b5 171static bool detach_fork = true;
6c95b8df 172
491144b5 173bool debug_displaced = false;
237fc4c9
PA
174static void
175show_debug_displaced (struct ui_file *file, int from_tty,
176 struct cmd_list_element *c, const char *value)
177{
178 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
179}
180
ccce17b0 181unsigned int debug_infrun = 0;
920d2a44
AC
182static void
183show_debug_infrun (struct ui_file *file, int from_tty,
184 struct cmd_list_element *c, const char *value)
185{
186 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
187}
527159b7 188
03583c20
UW
189
190/* Support for disabling address space randomization. */
191
491144b5 192bool disable_randomization = true;
03583c20
UW
193
194static void
195show_disable_randomization (struct ui_file *file, int from_tty,
196 struct cmd_list_element *c, const char *value)
197{
198 if (target_supports_disable_randomization ())
199 fprintf_filtered (file,
200 _("Disabling randomization of debuggee's "
201 "virtual address space is %s.\n"),
202 value);
203 else
204 fputs_filtered (_("Disabling randomization of debuggee's "
205 "virtual address space is unsupported on\n"
206 "this platform.\n"), file);
207}
208
209static void
eb4c3f4a 210set_disable_randomization (const char *args, int from_tty,
03583c20
UW
211 struct cmd_list_element *c)
212{
213 if (!target_supports_disable_randomization ())
214 error (_("Disabling randomization of debuggee's "
215 "virtual address space is unsupported on\n"
216 "this platform."));
217}
218
d32dc48e
PA
219/* User interface for non-stop mode. */
220
491144b5
CB
221bool non_stop = false;
222static bool non_stop_1 = false;
d32dc48e
PA
223
224static void
eb4c3f4a 225set_non_stop (const char *args, int from_tty,
d32dc48e
PA
226 struct cmd_list_element *c)
227{
228 if (target_has_execution)
229 {
230 non_stop_1 = non_stop;
231 error (_("Cannot change this setting while the inferior is running."));
232 }
233
234 non_stop = non_stop_1;
235}
236
237static void
238show_non_stop (struct ui_file *file, int from_tty,
239 struct cmd_list_element *c, const char *value)
240{
241 fprintf_filtered (file,
242 _("Controlling the inferior in non-stop mode is %s.\n"),
243 value);
244}
245
d914c394
SS
246/* "Observer mode" is somewhat like a more extreme version of
247 non-stop, in which all GDB operations that might affect the
248 target's execution have been disabled. */
249
491144b5
CB
250bool observer_mode = false;
251static bool observer_mode_1 = false;
d914c394
SS
252
253static void
eb4c3f4a 254set_observer_mode (const char *args, int from_tty,
d914c394
SS
255 struct cmd_list_element *c)
256{
d914c394
SS
257 if (target_has_execution)
258 {
259 observer_mode_1 = observer_mode;
260 error (_("Cannot change this setting while the inferior is running."));
261 }
262
263 observer_mode = observer_mode_1;
264
265 may_write_registers = !observer_mode;
266 may_write_memory = !observer_mode;
267 may_insert_breakpoints = !observer_mode;
268 may_insert_tracepoints = !observer_mode;
269 /* We can insert fast tracepoints in or out of observer mode,
270 but enable them if we're going into this mode. */
271 if (observer_mode)
491144b5 272 may_insert_fast_tracepoints = true;
d914c394
SS
273 may_stop = !observer_mode;
274 update_target_permissions ();
275
276 /* Going *into* observer mode we must force non-stop, then
277 going out we leave it that way. */
278 if (observer_mode)
279 {
d914c394 280 pagination_enabled = 0;
491144b5 281 non_stop = non_stop_1 = true;
d914c394
SS
282 }
283
284 if (from_tty)
285 printf_filtered (_("Observer mode is now %s.\n"),
286 (observer_mode ? "on" : "off"));
287}
288
289static void
290show_observer_mode (struct ui_file *file, int from_tty,
291 struct cmd_list_element *c, const char *value)
292{
293 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
294}
295
296/* This updates the value of observer mode based on changes in
297 permissions. Note that we are deliberately ignoring the values of
298 may-write-registers and may-write-memory, since the user may have
299 reason to enable these during a session, for instance to turn on a
300 debugging-related global. */
301
302void
303update_observer_mode (void)
304{
491144b5
CB
305 bool newval = (!may_insert_breakpoints
306 && !may_insert_tracepoints
307 && may_insert_fast_tracepoints
308 && !may_stop
309 && non_stop);
d914c394
SS
310
311 /* Let the user know if things change. */
312 if (newval != observer_mode)
313 printf_filtered (_("Observer mode is now %s.\n"),
314 (newval ? "on" : "off"));
315
316 observer_mode = observer_mode_1 = newval;
317}
c2c6d25f 318
c906108c
SS
319/* Tables of how to react to signals; the user sets them. */
320
adc6a863
PA
321static unsigned char signal_stop[GDB_SIGNAL_LAST];
322static unsigned char signal_print[GDB_SIGNAL_LAST];
323static unsigned char signal_program[GDB_SIGNAL_LAST];
c906108c 324
ab04a2af
TT
325/* Table of signals that are registered with "catch signal". A
326 non-zero entry indicates that the signal is caught by some "catch
adc6a863
PA
327 signal" command. */
328static unsigned char signal_catch[GDB_SIGNAL_LAST];
ab04a2af 329
2455069d
UW
330/* Table of signals that the target may silently handle.
331 This is automatically determined from the flags above,
332 and simply cached here. */
adc6a863 333static unsigned char signal_pass[GDB_SIGNAL_LAST];
2455069d 334
c906108c
SS
335#define SET_SIGS(nsigs,sigs,flags) \
336 do { \
337 int signum = (nsigs); \
338 while (signum-- > 0) \
339 if ((sigs)[signum]) \
340 (flags)[signum] = 1; \
341 } while (0)
342
343#define UNSET_SIGS(nsigs,sigs,flags) \
344 do { \
345 int signum = (nsigs); \
346 while (signum-- > 0) \
347 if ((sigs)[signum]) \
348 (flags)[signum] = 0; \
349 } while (0)
350
9b224c5e
PA
351/* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
352 this function is to avoid exporting `signal_program'. */
353
354void
355update_signals_program_target (void)
356{
adc6a863 357 target_program_signals (signal_program);
9b224c5e
PA
358}
359
1777feb0 360/* Value to pass to target_resume() to cause all threads to resume. */
39f77062 361
edb3359d 362#define RESUME_ALL minus_one_ptid
c906108c
SS
363
364/* Command list pointer for the "stop" placeholder. */
365
366static struct cmd_list_element *stop_command;
367
c906108c
SS
368/* Nonzero if we want to give control to the user when we're notified
369 of shared library events by the dynamic linker. */
628fe4e4 370int stop_on_solib_events;
f9e14852
GB
371
372/* Enable or disable optional shared library event breakpoints
373 as appropriate when the above flag is changed. */
374
375static void
eb4c3f4a
TT
376set_stop_on_solib_events (const char *args,
377 int from_tty, struct cmd_list_element *c)
f9e14852
GB
378{
379 update_solib_breakpoints ();
380}
381
920d2a44
AC
382static void
383show_stop_on_solib_events (struct ui_file *file, int from_tty,
384 struct cmd_list_element *c, const char *value)
385{
386 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
387 value);
388}
c906108c 389
c906108c
SS
390/* Nonzero after stop if current stack frame should be printed. */
391
392static int stop_print_frame;
393
5b6d1e4f
PA
394/* This is a cached copy of the target/ptid/waitstatus of the last
395 event returned by target_wait()/deprecated_target_wait_hook().
396 This information is returned by get_last_target_status(). */
397static process_stratum_target *target_last_proc_target;
39f77062 398static ptid_t target_last_wait_ptid;
e02bc4cc
DS
399static struct target_waitstatus target_last_waitstatus;
400
4e1c45ea 401void init_thread_stepping_state (struct thread_info *tss);
0d1e5fa7 402
53904c9e
AC
403static const char follow_fork_mode_child[] = "child";
404static const char follow_fork_mode_parent[] = "parent";
405
40478521 406static const char *const follow_fork_mode_kind_names[] = {
53904c9e
AC
407 follow_fork_mode_child,
408 follow_fork_mode_parent,
409 NULL
ef346e04 410};
c906108c 411
53904c9e 412static const char *follow_fork_mode_string = follow_fork_mode_parent;
920d2a44
AC
413static void
414show_follow_fork_mode_string (struct ui_file *file, int from_tty,
415 struct cmd_list_element *c, const char *value)
416{
3e43a32a
MS
417 fprintf_filtered (file,
418 _("Debugger response to a program "
419 "call of fork or vfork is \"%s\".\n"),
920d2a44
AC
420 value);
421}
c906108c
SS
422\f
423
d83ad864
DB
424/* Handle changes to the inferior list based on the type of fork,
425 which process is being followed, and whether the other process
426 should be detached. On entry inferior_ptid must be the ptid of
427 the fork parent. At return inferior_ptid is the ptid of the
428 followed inferior. */
429
5ab2fbf1
SM
430static bool
431follow_fork_inferior (bool follow_child, bool detach_fork)
d83ad864
DB
432{
433 int has_vforked;
79639e11 434 ptid_t parent_ptid, child_ptid;
d83ad864
DB
435
436 has_vforked = (inferior_thread ()->pending_follow.kind
437 == TARGET_WAITKIND_VFORKED);
79639e11
PA
438 parent_ptid = inferior_ptid;
439 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
d83ad864
DB
440
441 if (has_vforked
442 && !non_stop /* Non-stop always resumes both branches. */
3b12939d 443 && current_ui->prompt_state == PROMPT_BLOCKED
d83ad864
DB
444 && !(follow_child || detach_fork || sched_multi))
445 {
446 /* The parent stays blocked inside the vfork syscall until the
447 child execs or exits. If we don't let the child run, then
448 the parent stays blocked. If we're telling the parent to run
449 in the foreground, the user will not be able to ctrl-c to get
450 back the terminal, effectively hanging the debug session. */
451 fprintf_filtered (gdb_stderr, _("\
452Can not resume the parent process over vfork in the foreground while\n\
453holding the child stopped. Try \"set detach-on-fork\" or \
454\"set schedule-multiple\".\n"));
d83ad864
DB
455 return 1;
456 }
457
458 if (!follow_child)
459 {
460 /* Detach new forked process? */
461 if (detach_fork)
462 {
d83ad864
DB
463 /* Before detaching from the child, remove all breakpoints
464 from it. If we forked, then this has already been taken
465 care of by infrun.c. If we vforked however, any
466 breakpoint inserted in the parent is visible in the
467 child, even those added while stopped in a vfork
468 catchpoint. This will remove the breakpoints from the
469 parent also, but they'll be reinserted below. */
470 if (has_vforked)
471 {
472 /* Keep breakpoints list in sync. */
00431a78 473 remove_breakpoints_inf (current_inferior ());
d83ad864
DB
474 }
475
f67c0c91 476 if (print_inferior_events)
d83ad864 477 {
8dd06f7a 478 /* Ensure that we have a process ptid. */
e99b03dc 479 ptid_t process_ptid = ptid_t (child_ptid.pid ());
8dd06f7a 480
223ffa71 481 target_terminal::ours_for_output ();
d83ad864 482 fprintf_filtered (gdb_stdlog,
f67c0c91 483 _("[Detaching after %s from child %s]\n"),
6f259a23 484 has_vforked ? "vfork" : "fork",
a068643d 485 target_pid_to_str (process_ptid).c_str ());
d83ad864
DB
486 }
487 }
488 else
489 {
490 struct inferior *parent_inf, *child_inf;
d83ad864
DB
491
492 /* Add process to GDB's tables. */
e99b03dc 493 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
494
495 parent_inf = current_inferior ();
496 child_inf->attach_flag = parent_inf->attach_flag;
497 copy_terminal_info (child_inf, parent_inf);
498 child_inf->gdbarch = parent_inf->gdbarch;
499 copy_inferior_target_desc_info (child_inf, parent_inf);
500
5ed8105e 501 scoped_restore_current_pspace_and_thread restore_pspace_thread;
d83ad864 502
2a00d7ce 503 set_current_inferior (child_inf);
5b6d1e4f 504 switch_to_no_thread ();
d83ad864 505 child_inf->symfile_flags = SYMFILE_NO_READ;
5b6d1e4f 506 push_target (parent_inf->process_target ());
18493a00
PA
507 thread_info *child_thr
508 = add_thread_silent (child_inf->process_target (), child_ptid);
d83ad864
DB
509
510 /* If this is a vfork child, then the address-space is
511 shared with the parent. */
512 if (has_vforked)
513 {
514 child_inf->pspace = parent_inf->pspace;
515 child_inf->aspace = parent_inf->aspace;
516
5b6d1e4f
PA
517 exec_on_vfork ();
518
d83ad864
DB
519 /* The parent will be frozen until the child is done
520 with the shared region. Keep track of the
521 parent. */
522 child_inf->vfork_parent = parent_inf;
523 child_inf->pending_detach = 0;
524 parent_inf->vfork_child = child_inf;
525 parent_inf->pending_detach = 0;
18493a00
PA
526
527 /* Now that the inferiors and program spaces are all
528 wired up, we can switch to the child thread (which
529 switches inferior and program space too). */
530 switch_to_thread (child_thr);
d83ad864
DB
531 }
532 else
533 {
534 child_inf->aspace = new_address_space ();
564b1e3f 535 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
536 child_inf->removable = 1;
537 set_current_program_space (child_inf->pspace);
538 clone_program_space (child_inf->pspace, parent_inf->pspace);
539
18493a00
PA
540 /* solib_create_inferior_hook relies on the current
541 thread. */
542 switch_to_thread (child_thr);
543
d83ad864
DB
544 /* Let the shared library layer (e.g., solib-svr4) learn
545 about this new process, relocate the cloned exec, pull
546 in shared libraries, and install the solib event
547 breakpoint. If a "cloned-VM" event was propagated
548 better throughout the core, this wouldn't be
549 required. */
550 solib_create_inferior_hook (0);
551 }
d83ad864
DB
552 }
553
554 if (has_vforked)
555 {
556 struct inferior *parent_inf;
557
558 parent_inf = current_inferior ();
559
560 /* If we detached from the child, then we have to be careful
561 to not insert breakpoints in the parent until the child
562 is done with the shared memory region. However, if we're
563 staying attached to the child, then we can and should
564 insert breakpoints, so that we can debug it. A
565 subsequent child exec or exit is enough to know when does
566 the child stops using the parent's address space. */
567 parent_inf->waiting_for_vfork_done = detach_fork;
568 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
569 }
570 }
571 else
572 {
573 /* Follow the child. */
574 struct inferior *parent_inf, *child_inf;
575 struct program_space *parent_pspace;
576
f67c0c91 577 if (print_inferior_events)
d83ad864 578 {
f67c0c91
SDJ
579 std::string parent_pid = target_pid_to_str (parent_ptid);
580 std::string child_pid = target_pid_to_str (child_ptid);
581
223ffa71 582 target_terminal::ours_for_output ();
6f259a23 583 fprintf_filtered (gdb_stdlog,
f67c0c91
SDJ
584 _("[Attaching after %s %s to child %s]\n"),
585 parent_pid.c_str (),
6f259a23 586 has_vforked ? "vfork" : "fork",
f67c0c91 587 child_pid.c_str ());
d83ad864
DB
588 }
589
590 /* Add the new inferior first, so that the target_detach below
591 doesn't unpush the target. */
592
e99b03dc 593 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
594
595 parent_inf = current_inferior ();
596 child_inf->attach_flag = parent_inf->attach_flag;
597 copy_terminal_info (child_inf, parent_inf);
598 child_inf->gdbarch = parent_inf->gdbarch;
599 copy_inferior_target_desc_info (child_inf, parent_inf);
600
601 parent_pspace = parent_inf->pspace;
602
5b6d1e4f 603 process_stratum_target *target = parent_inf->process_target ();
d83ad864 604
5b6d1e4f
PA
605 {
606 /* Hold a strong reference to the target while (maybe)
607 detaching the parent. Otherwise detaching could close the
608 target. */
609 auto target_ref = target_ops_ref::new_reference (target);
610
611 /* If we're vforking, we want to hold on to the parent until
612 the child exits or execs. At child exec or exit time we
613 can remove the old breakpoints from the parent and detach
614 or resume debugging it. Otherwise, detach the parent now;
615 we'll want to reuse it's program/address spaces, but we
616 can't set them to the child before removing breakpoints
617 from the parent, otherwise, the breakpoints module could
618 decide to remove breakpoints from the wrong process (since
619 they'd be assigned to the same address space). */
620
621 if (has_vforked)
622 {
623 gdb_assert (child_inf->vfork_parent == NULL);
624 gdb_assert (parent_inf->vfork_child == NULL);
625 child_inf->vfork_parent = parent_inf;
626 child_inf->pending_detach = 0;
627 parent_inf->vfork_child = child_inf;
628 parent_inf->pending_detach = detach_fork;
629 parent_inf->waiting_for_vfork_done = 0;
630 }
631 else if (detach_fork)
632 {
633 if (print_inferior_events)
634 {
635 /* Ensure that we have a process ptid. */
636 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
637
638 target_terminal::ours_for_output ();
639 fprintf_filtered (gdb_stdlog,
640 _("[Detaching after fork from "
641 "parent %s]\n"),
642 target_pid_to_str (process_ptid).c_str ());
643 }
8dd06f7a 644
5b6d1e4f
PA
645 target_detach (parent_inf, 0);
646 parent_inf = NULL;
647 }
6f259a23 648
5b6d1e4f 649 /* Note that the detach above makes PARENT_INF dangling. */
d83ad864 650
5b6d1e4f
PA
651 /* Add the child thread to the appropriate lists, and switch
652 to this new thread, before cloning the program space, and
653 informing the solib layer about this new process. */
d83ad864 654
5b6d1e4f
PA
655 set_current_inferior (child_inf);
656 push_target (target);
657 }
d83ad864 658
18493a00 659 thread_info *child_thr = add_thread_silent (target, child_ptid);
d83ad864
DB
660
661 /* If this is a vfork child, then the address-space is shared
662 with the parent. If we detached from the parent, then we can
663 reuse the parent's program/address spaces. */
664 if (has_vforked || detach_fork)
665 {
666 child_inf->pspace = parent_pspace;
667 child_inf->aspace = child_inf->pspace->aspace;
5b6d1e4f
PA
668
669 exec_on_vfork ();
d83ad864
DB
670 }
671 else
672 {
673 child_inf->aspace = new_address_space ();
564b1e3f 674 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
675 child_inf->removable = 1;
676 child_inf->symfile_flags = SYMFILE_NO_READ;
677 set_current_program_space (child_inf->pspace);
678 clone_program_space (child_inf->pspace, parent_pspace);
679
680 /* Let the shared library layer (e.g., solib-svr4) learn
681 about this new process, relocate the cloned exec, pull in
682 shared libraries, and install the solib event breakpoint.
683 If a "cloned-VM" event was propagated better throughout
684 the core, this wouldn't be required. */
685 solib_create_inferior_hook (0);
686 }
18493a00
PA
687
688 switch_to_thread (child_thr);
d83ad864
DB
689 }
690
691 return target_follow_fork (follow_child, detach_fork);
692}
693
e58b0e63
PA
694/* Tell the target to follow the fork we're stopped at. Returns true
695 if the inferior should be resumed; false, if the target for some
696 reason decided it's best not to resume. */
697
5ab2fbf1
SM
698static bool
699follow_fork ()
c906108c 700{
5ab2fbf1
SM
701 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
702 bool should_resume = true;
e58b0e63
PA
703 struct thread_info *tp;
704
705 /* Copy user stepping state to the new inferior thread. FIXME: the
706 followed fork child thread should have a copy of most of the
4e3990f4
DE
707 parent thread structure's run control related fields, not just these.
708 Initialized to avoid "may be used uninitialized" warnings from gcc. */
709 struct breakpoint *step_resume_breakpoint = NULL;
186c406b 710 struct breakpoint *exception_resume_breakpoint = NULL;
4e3990f4
DE
711 CORE_ADDR step_range_start = 0;
712 CORE_ADDR step_range_end = 0;
bf4cb9be
TV
713 int current_line = 0;
714 symtab *current_symtab = NULL;
4e3990f4 715 struct frame_id step_frame_id = { 0 };
8980e177 716 struct thread_fsm *thread_fsm = NULL;
e58b0e63
PA
717
718 if (!non_stop)
719 {
5b6d1e4f 720 process_stratum_target *wait_target;
e58b0e63
PA
721 ptid_t wait_ptid;
722 struct target_waitstatus wait_status;
723
724 /* Get the last target status returned by target_wait(). */
5b6d1e4f 725 get_last_target_status (&wait_target, &wait_ptid, &wait_status);
e58b0e63
PA
726
727 /* If not stopped at a fork event, then there's nothing else to
728 do. */
729 if (wait_status.kind != TARGET_WAITKIND_FORKED
730 && wait_status.kind != TARGET_WAITKIND_VFORKED)
731 return 1;
732
733 /* Check if we switched over from WAIT_PTID, since the event was
734 reported. */
00431a78 735 if (wait_ptid != minus_one_ptid
5b6d1e4f
PA
736 && (current_inferior ()->process_target () != wait_target
737 || inferior_ptid != wait_ptid))
e58b0e63
PA
738 {
739 /* We did. Switch back to WAIT_PTID thread, to tell the
740 target to follow it (in either direction). We'll
741 afterwards refuse to resume, and inform the user what
742 happened. */
5b6d1e4f 743 thread_info *wait_thread = find_thread_ptid (wait_target, wait_ptid);
00431a78 744 switch_to_thread (wait_thread);
5ab2fbf1 745 should_resume = false;
e58b0e63
PA
746 }
747 }
748
749 tp = inferior_thread ();
750
751 /* If there were any forks/vforks that were caught and are now to be
752 followed, then do so now. */
753 switch (tp->pending_follow.kind)
754 {
755 case TARGET_WAITKIND_FORKED:
756 case TARGET_WAITKIND_VFORKED:
757 {
758 ptid_t parent, child;
759
760 /* If the user did a next/step, etc, over a fork call,
761 preserve the stepping state in the fork child. */
762 if (follow_child && should_resume)
763 {
8358c15c
JK
764 step_resume_breakpoint = clone_momentary_breakpoint
765 (tp->control.step_resume_breakpoint);
16c381f0
JK
766 step_range_start = tp->control.step_range_start;
767 step_range_end = tp->control.step_range_end;
bf4cb9be
TV
768 current_line = tp->current_line;
769 current_symtab = tp->current_symtab;
16c381f0 770 step_frame_id = tp->control.step_frame_id;
186c406b
TT
771 exception_resume_breakpoint
772 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
8980e177 773 thread_fsm = tp->thread_fsm;
e58b0e63
PA
774
775 /* For now, delete the parent's sr breakpoint, otherwise,
776 parent/child sr breakpoints are considered duplicates,
777 and the child version will not be installed. Remove
778 this when the breakpoints module becomes aware of
779 inferiors and address spaces. */
780 delete_step_resume_breakpoint (tp);
16c381f0
JK
781 tp->control.step_range_start = 0;
782 tp->control.step_range_end = 0;
783 tp->control.step_frame_id = null_frame_id;
186c406b 784 delete_exception_resume_breakpoint (tp);
8980e177 785 tp->thread_fsm = NULL;
e58b0e63
PA
786 }
787
788 parent = inferior_ptid;
789 child = tp->pending_follow.value.related_pid;
790
5b6d1e4f 791 process_stratum_target *parent_targ = tp->inf->process_target ();
d83ad864
DB
792 /* Set up inferior(s) as specified by the caller, and tell the
793 target to do whatever is necessary to follow either parent
794 or child. */
795 if (follow_fork_inferior (follow_child, detach_fork))
e58b0e63
PA
796 {
797 /* Target refused to follow, or there's some other reason
798 we shouldn't resume. */
799 should_resume = 0;
800 }
801 else
802 {
803 /* This pending follow fork event is now handled, one way
804 or another. The previous selected thread may be gone
805 from the lists by now, but if it is still around, need
806 to clear the pending follow request. */
5b6d1e4f 807 tp = find_thread_ptid (parent_targ, parent);
e58b0e63
PA
808 if (tp)
809 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
810
811 /* This makes sure we don't try to apply the "Switched
812 over from WAIT_PID" logic above. */
813 nullify_last_target_wait_ptid ();
814
1777feb0 815 /* If we followed the child, switch to it... */
e58b0e63
PA
816 if (follow_child)
817 {
5b6d1e4f 818 thread_info *child_thr = find_thread_ptid (parent_targ, child);
00431a78 819 switch_to_thread (child_thr);
e58b0e63
PA
820
821 /* ... and preserve the stepping state, in case the
822 user was stepping over the fork call. */
823 if (should_resume)
824 {
825 tp = inferior_thread ();
8358c15c
JK
826 tp->control.step_resume_breakpoint
827 = step_resume_breakpoint;
16c381f0
JK
828 tp->control.step_range_start = step_range_start;
829 tp->control.step_range_end = step_range_end;
bf4cb9be
TV
830 tp->current_line = current_line;
831 tp->current_symtab = current_symtab;
16c381f0 832 tp->control.step_frame_id = step_frame_id;
186c406b
TT
833 tp->control.exception_resume_breakpoint
834 = exception_resume_breakpoint;
8980e177 835 tp->thread_fsm = thread_fsm;
e58b0e63
PA
836 }
837 else
838 {
839 /* If we get here, it was because we're trying to
840 resume from a fork catchpoint, but, the user
841 has switched threads away from the thread that
842 forked. In that case, the resume command
843 issued is most likely not applicable to the
844 child, so just warn, and refuse to resume. */
3e43a32a 845 warning (_("Not resuming: switched threads "
fd7dcb94 846 "before following fork child."));
e58b0e63
PA
847 }
848
849 /* Reset breakpoints in the child as appropriate. */
850 follow_inferior_reset_breakpoints ();
851 }
e58b0e63
PA
852 }
853 }
854 break;
855 case TARGET_WAITKIND_SPURIOUS:
856 /* Nothing to follow. */
857 break;
858 default:
859 internal_error (__FILE__, __LINE__,
860 "Unexpected pending_follow.kind %d\n",
861 tp->pending_follow.kind);
862 break;
863 }
c906108c 864
e58b0e63 865 return should_resume;
c906108c
SS
866}
867
d83ad864 868static void
6604731b 869follow_inferior_reset_breakpoints (void)
c906108c 870{
4e1c45ea
PA
871 struct thread_info *tp = inferior_thread ();
872
6604731b
DJ
873 /* Was there a step_resume breakpoint? (There was if the user
874 did a "next" at the fork() call.) If so, explicitly reset its
a1aa2221
LM
875 thread number. Cloned step_resume breakpoints are disabled on
876 creation, so enable it here now that it is associated with the
877 correct thread.
6604731b
DJ
878
879 step_resumes are a form of bp that are made to be per-thread.
880 Since we created the step_resume bp when the parent process
881 was being debugged, and now are switching to the child process,
882 from the breakpoint package's viewpoint, that's a switch of
883 "threads". We must update the bp's notion of which thread
884 it is for, or it'll be ignored when it triggers. */
885
8358c15c 886 if (tp->control.step_resume_breakpoint)
a1aa2221
LM
887 {
888 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
889 tp->control.step_resume_breakpoint->loc->enabled = 1;
890 }
6604731b 891
a1aa2221 892 /* Treat exception_resume breakpoints like step_resume breakpoints. */
186c406b 893 if (tp->control.exception_resume_breakpoint)
a1aa2221
LM
894 {
895 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
896 tp->control.exception_resume_breakpoint->loc->enabled = 1;
897 }
186c406b 898
6604731b
DJ
899 /* Reinsert all breakpoints in the child. The user may have set
900 breakpoints after catching the fork, in which case those
901 were never set in the child, but only in the parent. This makes
902 sure the inserted breakpoints match the breakpoint list. */
903
904 breakpoint_re_set ();
905 insert_breakpoints ();
c906108c 906}
c906108c 907
6c95b8df
PA
908/* The child has exited or execed: resume threads of the parent the
909 user wanted to be executing. */
910
911static int
912proceed_after_vfork_done (struct thread_info *thread,
913 void *arg)
914{
915 int pid = * (int *) arg;
916
00431a78
PA
917 if (thread->ptid.pid () == pid
918 && thread->state == THREAD_RUNNING
919 && !thread->executing
6c95b8df 920 && !thread->stop_requested
a493e3e2 921 && thread->suspend.stop_signal == GDB_SIGNAL_0)
6c95b8df 922 {
edbcda09
SM
923 infrun_log_debug ("resuming vfork parent thread %s",
924 target_pid_to_str (thread->ptid).c_str ());
6c95b8df 925
00431a78 926 switch_to_thread (thread);
70509625 927 clear_proceed_status (0);
64ce06e4 928 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
6c95b8df
PA
929 }
930
931 return 0;
932}
933
934/* Called whenever we notice an exec or exit event, to handle
935 detaching or resuming a vfork parent. */
936
937static void
938handle_vfork_child_exec_or_exit (int exec)
939{
940 struct inferior *inf = current_inferior ();
941
942 if (inf->vfork_parent)
943 {
944 int resume_parent = -1;
945
946 /* This exec or exit marks the end of the shared memory region
b73715df
TV
947 between the parent and the child. Break the bonds. */
948 inferior *vfork_parent = inf->vfork_parent;
949 inf->vfork_parent->vfork_child = NULL;
950 inf->vfork_parent = NULL;
6c95b8df 951
b73715df
TV
952 /* If the user wanted to detach from the parent, now is the
953 time. */
954 if (vfork_parent->pending_detach)
6c95b8df 955 {
6c95b8df
PA
956 struct program_space *pspace;
957 struct address_space *aspace;
958
1777feb0 959 /* follow-fork child, detach-on-fork on. */
6c95b8df 960
b73715df 961 vfork_parent->pending_detach = 0;
68c9da30 962
18493a00 963 scoped_restore_current_pspace_and_thread restore_thread;
6c95b8df
PA
964
965 /* We're letting loose of the parent. */
18493a00 966 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
00431a78 967 switch_to_thread (tp);
6c95b8df
PA
968
969 /* We're about to detach from the parent, which implicitly
970 removes breakpoints from its address space. There's a
971 catch here: we want to reuse the spaces for the child,
972 but, parent/child are still sharing the pspace at this
973 point, although the exec in reality makes the kernel give
974 the child a fresh set of new pages. The problem here is
975 that the breakpoints module being unaware of this, would
976 likely chose the child process to write to the parent
977 address space. Swapping the child temporarily away from
978 the spaces has the desired effect. Yes, this is "sort
979 of" a hack. */
980
981 pspace = inf->pspace;
982 aspace = inf->aspace;
983 inf->aspace = NULL;
984 inf->pspace = NULL;
985
f67c0c91 986 if (print_inferior_events)
6c95b8df 987 {
a068643d 988 std::string pidstr
b73715df 989 = target_pid_to_str (ptid_t (vfork_parent->pid));
f67c0c91 990
223ffa71 991 target_terminal::ours_for_output ();
6c95b8df
PA
992
993 if (exec)
6f259a23
DB
994 {
995 fprintf_filtered (gdb_stdlog,
f67c0c91 996 _("[Detaching vfork parent %s "
a068643d 997 "after child exec]\n"), pidstr.c_str ());
6f259a23 998 }
6c95b8df 999 else
6f259a23
DB
1000 {
1001 fprintf_filtered (gdb_stdlog,
f67c0c91 1002 _("[Detaching vfork parent %s "
a068643d 1003 "after child exit]\n"), pidstr.c_str ());
6f259a23 1004 }
6c95b8df
PA
1005 }
1006
b73715df 1007 target_detach (vfork_parent, 0);
6c95b8df
PA
1008
1009 /* Put it back. */
1010 inf->pspace = pspace;
1011 inf->aspace = aspace;
6c95b8df
PA
1012 }
1013 else if (exec)
1014 {
1015 /* We're staying attached to the parent, so, really give the
1016 child a new address space. */
564b1e3f 1017 inf->pspace = new program_space (maybe_new_address_space ());
6c95b8df
PA
1018 inf->aspace = inf->pspace->aspace;
1019 inf->removable = 1;
1020 set_current_program_space (inf->pspace);
1021
b73715df 1022 resume_parent = vfork_parent->pid;
6c95b8df
PA
1023 }
1024 else
1025 {
6c95b8df
PA
1026 /* If this is a vfork child exiting, then the pspace and
1027 aspaces were shared with the parent. Since we're
1028 reporting the process exit, we'll be mourning all that is
1029 found in the address space, and switching to null_ptid,
1030 preparing to start a new inferior. But, since we don't
1031 want to clobber the parent's address/program spaces, we
1032 go ahead and create a new one for this exiting
1033 inferior. */
1034
18493a00 1035 /* Switch to no-thread while running clone_program_space, so
5ed8105e
PA
1036 that clone_program_space doesn't want to read the
1037 selected frame of a dead process. */
18493a00
PA
1038 scoped_restore_current_thread restore_thread;
1039 switch_to_no_thread ();
6c95b8df 1040
53af73bf
PA
1041 inf->pspace = new program_space (maybe_new_address_space ());
1042 inf->aspace = inf->pspace->aspace;
1043 set_current_program_space (inf->pspace);
6c95b8df 1044 inf->removable = 1;
7dcd53a0 1045 inf->symfile_flags = SYMFILE_NO_READ;
53af73bf 1046 clone_program_space (inf->pspace, vfork_parent->pspace);
6c95b8df 1047
b73715df 1048 resume_parent = vfork_parent->pid;
6c95b8df
PA
1049 }
1050
6c95b8df
PA
1051 gdb_assert (current_program_space == inf->pspace);
1052
1053 if (non_stop && resume_parent != -1)
1054 {
1055 /* If the user wanted the parent to be running, let it go
1056 free now. */
5ed8105e 1057 scoped_restore_current_thread restore_thread;
6c95b8df 1058
edbcda09
SM
1059 infrun_log_debug ("resuming vfork parent process %d",
1060 resume_parent);
6c95b8df
PA
1061
1062 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
6c95b8df
PA
1063 }
1064 }
1065}
1066
eb6c553b 1067/* Enum strings for "set|show follow-exec-mode". */
6c95b8df
PA
1068
1069static const char follow_exec_mode_new[] = "new";
1070static const char follow_exec_mode_same[] = "same";
40478521 1071static const char *const follow_exec_mode_names[] =
6c95b8df
PA
1072{
1073 follow_exec_mode_new,
1074 follow_exec_mode_same,
1075 NULL,
1076};
1077
1078static const char *follow_exec_mode_string = follow_exec_mode_same;
1079static void
1080show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1081 struct cmd_list_element *c, const char *value)
1082{
1083 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1084}
1085
ecf45d2c 1086/* EXEC_FILE_TARGET is assumed to be non-NULL. */
1adeb98a 1087
c906108c 1088static void
4ca51187 1089follow_exec (ptid_t ptid, const char *exec_file_target)
c906108c 1090{
6c95b8df 1091 struct inferior *inf = current_inferior ();
e99b03dc 1092 int pid = ptid.pid ();
94585166 1093 ptid_t process_ptid;
7a292a7a 1094
65d2b333
PW
1095 /* Switch terminal for any messages produced e.g. by
1096 breakpoint_re_set. */
1097 target_terminal::ours_for_output ();
1098
c906108c
SS
1099 /* This is an exec event that we actually wish to pay attention to.
1100 Refresh our symbol table to the newly exec'd program, remove any
1101 momentary bp's, etc.
1102
1103 If there are breakpoints, they aren't really inserted now,
1104 since the exec() transformed our inferior into a fresh set
1105 of instructions.
1106
1107 We want to preserve symbolic breakpoints on the list, since
1108 we have hopes that they can be reset after the new a.out's
1109 symbol table is read.
1110
1111 However, any "raw" breakpoints must be removed from the list
1112 (e.g., the solib bp's), since their address is probably invalid
1113 now.
1114
1115 And, we DON'T want to call delete_breakpoints() here, since
1116 that may write the bp's "shadow contents" (the instruction
85102364 1117 value that was overwritten with a TRAP instruction). Since
1777feb0 1118 we now have a new a.out, those shadow contents aren't valid. */
6c95b8df
PA
1119
1120 mark_breakpoints_out ();
1121
95e50b27
PA
1122 /* The target reports the exec event to the main thread, even if
1123 some other thread does the exec, and even if the main thread was
1124 stopped or already gone. We may still have non-leader threads of
1125 the process on our list. E.g., on targets that don't have thread
1126 exit events (like remote); or on native Linux in non-stop mode if
1127 there were only two threads in the inferior and the non-leader
1128 one is the one that execs (and nothing forces an update of the
1129 thread list up to here). When debugging remotely, it's best to
1130 avoid extra traffic, when possible, so avoid syncing the thread
1131 list with the target, and instead go ahead and delete all threads
1132 of the process but one that reported the event. Note this must
1133 be done before calling update_breakpoints_after_exec, as
1134 otherwise clearing the threads' resources would reference stale
1135 thread breakpoints -- it may have been one of these threads that
1136 stepped across the exec. We could just clear their stepping
1137 states, but as long as we're iterating, might as well delete
1138 them. Deleting them now rather than at the next user-visible
1139 stop provides a nicer sequence of events for user and MI
1140 notifications. */
08036331 1141 for (thread_info *th : all_threads_safe ())
d7e15655 1142 if (th->ptid.pid () == pid && th->ptid != ptid)
00431a78 1143 delete_thread (th);
95e50b27
PA
1144
1145 /* We also need to clear any left over stale state for the
1146 leader/event thread. E.g., if there was any step-resume
1147 breakpoint or similar, it's gone now. We cannot truly
1148 step-to-next statement through an exec(). */
08036331 1149 thread_info *th = inferior_thread ();
8358c15c 1150 th->control.step_resume_breakpoint = NULL;
186c406b 1151 th->control.exception_resume_breakpoint = NULL;
34b7e8a6 1152 th->control.single_step_breakpoints = NULL;
16c381f0
JK
1153 th->control.step_range_start = 0;
1154 th->control.step_range_end = 0;
c906108c 1155
95e50b27
PA
1156 /* The user may have had the main thread held stopped in the
1157 previous image (e.g., schedlock on, or non-stop). Release
1158 it now. */
a75724bc
PA
1159 th->stop_requested = 0;
1160
95e50b27
PA
1161 update_breakpoints_after_exec ();
1162
1777feb0 1163 /* What is this a.out's name? */
f2907e49 1164 process_ptid = ptid_t (pid);
6c95b8df 1165 printf_unfiltered (_("%s is executing new program: %s\n"),
a068643d 1166 target_pid_to_str (process_ptid).c_str (),
ecf45d2c 1167 exec_file_target);
c906108c
SS
1168
1169 /* We've followed the inferior through an exec. Therefore, the
1777feb0 1170 inferior has essentially been killed & reborn. */
7a292a7a 1171
6ca15a4b 1172 breakpoint_init_inferior (inf_execd);
e85a822c 1173
797bc1cb
TT
1174 gdb::unique_xmalloc_ptr<char> exec_file_host
1175 = exec_file_find (exec_file_target, NULL);
ff862be4 1176
ecf45d2c
SL
1177 /* If we were unable to map the executable target pathname onto a host
1178 pathname, tell the user that. Otherwise GDB's subsequent behavior
1179 is confusing. Maybe it would even be better to stop at this point
1180 so that the user can specify a file manually before continuing. */
1181 if (exec_file_host == NULL)
1182 warning (_("Could not load symbols for executable %s.\n"
1183 "Do you need \"set sysroot\"?"),
1184 exec_file_target);
c906108c 1185
cce9b6bf
PA
1186 /* Reset the shared library package. This ensures that we get a
1187 shlib event when the child reaches "_start", at which point the
1188 dld will have had a chance to initialize the child. */
1189 /* Also, loading a symbol file below may trigger symbol lookups, and
1190 we don't want those to be satisfied by the libraries of the
1191 previous incarnation of this process. */
1192 no_shared_libraries (NULL, 0);
1193
6c95b8df
PA
1194 if (follow_exec_mode_string == follow_exec_mode_new)
1195 {
6c95b8df
PA
1196 /* The user wants to keep the old inferior and program spaces
1197 around. Create a new fresh one, and switch to it. */
1198
35ed81d4
SM
1199 /* Do exit processing for the original inferior before setting the new
1200 inferior's pid. Having two inferiors with the same pid would confuse
1201 find_inferior_p(t)id. Transfer the terminal state and info from the
1202 old to the new inferior. */
1203 inf = add_inferior_with_spaces ();
1204 swap_terminal_info (inf, current_inferior ());
057302ce 1205 exit_inferior_silent (current_inferior ());
17d8546e 1206
94585166 1207 inf->pid = pid;
ecf45d2c 1208 target_follow_exec (inf, exec_file_target);
6c95b8df 1209
5b6d1e4f
PA
1210 inferior *org_inferior = current_inferior ();
1211 switch_to_inferior_no_thread (inf);
1212 push_target (org_inferior->process_target ());
1213 thread_info *thr = add_thread (inf->process_target (), ptid);
1214 switch_to_thread (thr);
6c95b8df 1215 }
9107fc8d
PA
1216 else
1217 {
1218 /* The old description may no longer be fit for the new image.
1219 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1220 old description; we'll read a new one below. No need to do
1221 this on "follow-exec-mode new", as the old inferior stays
1222 around (its description is later cleared/refetched on
1223 restart). */
1224 target_clear_description ();
1225 }
6c95b8df
PA
1226
1227 gdb_assert (current_program_space == inf->pspace);
1228
ecf45d2c
SL
1229 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1230 because the proper displacement for a PIE (Position Independent
1231 Executable) main symbol file will only be computed by
1232 solib_create_inferior_hook below. breakpoint_re_set would fail
1233 to insert the breakpoints with the zero displacement. */
797bc1cb 1234 try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
c906108c 1235
9107fc8d
PA
1236 /* If the target can specify a description, read it. Must do this
1237 after flipping to the new executable (because the target supplied
1238 description must be compatible with the executable's
1239 architecture, and the old executable may e.g., be 32-bit, while
1240 the new one 64-bit), and before anything involving memory or
1241 registers. */
1242 target_find_description ();
1243
268a4a75 1244 solib_create_inferior_hook (0);
c906108c 1245
4efc6507
DE
1246 jit_inferior_created_hook ();
1247
c1e56572
JK
1248 breakpoint_re_set ();
1249
c906108c
SS
1250 /* Reinsert all breakpoints. (Those which were symbolic have
1251 been reset to the proper address in the new a.out, thanks
1777feb0 1252 to symbol_file_command...). */
c906108c
SS
1253 insert_breakpoints ();
1254
1255 /* The next resume of this inferior should bring it to the shlib
1256 startup breakpoints. (If the user had also set bp's on
1257 "main" from the old (parent) process, then they'll auto-
1777feb0 1258 matically get reset there in the new process.). */
c906108c
SS
1259}
1260
c2829269
PA
1261/* The queue of threads that need to do a step-over operation to get
1262 past e.g., a breakpoint. What technique is used to step over the
1263 breakpoint/watchpoint does not matter -- all threads end up in the
1264 same queue, to maintain rough temporal order of execution, in order
1265 to avoid starvation, otherwise, we could e.g., find ourselves
1266 constantly stepping the same couple threads past their breakpoints
1267 over and over, if the single-step finish fast enough. */
7bd43605 1268struct thread_info *global_thread_step_over_chain_head;
c2829269 1269
6c4cfb24
PA
1270/* Bit flags indicating what the thread needs to step over. */
1271
8d297bbf 1272enum step_over_what_flag
6c4cfb24
PA
1273 {
1274 /* Step over a breakpoint. */
1275 STEP_OVER_BREAKPOINT = 1,
1276
1277 /* Step past a non-continuable watchpoint, in order to let the
1278 instruction execute so we can evaluate the watchpoint
1279 expression. */
1280 STEP_OVER_WATCHPOINT = 2
1281 };
8d297bbf 1282DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
6c4cfb24 1283
963f9c80 1284/* Info about an instruction that is being stepped over. */
31e77af2
PA
1285
1286struct step_over_info
1287{
963f9c80
PA
1288 /* If we're stepping past a breakpoint, this is the address space
1289 and address of the instruction the breakpoint is set at. We'll
1290 skip inserting all breakpoints here. Valid iff ASPACE is
1291 non-NULL. */
8b86c959 1292 const address_space *aspace;
31e77af2 1293 CORE_ADDR address;
963f9c80
PA
1294
1295 /* The instruction being stepped over triggers a nonsteppable
1296 watchpoint. If true, we'll skip inserting watchpoints. */
1297 int nonsteppable_watchpoint_p;
21edc42f
YQ
1298
1299 /* The thread's global number. */
1300 int thread;
31e77af2
PA
1301};
1302
1303/* The step-over info of the location that is being stepped over.
1304
1305 Note that with async/breakpoint always-inserted mode, a user might
1306 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1307 being stepped over. As setting a new breakpoint inserts all
1308 breakpoints, we need to make sure the breakpoint being stepped over
1309 isn't inserted then. We do that by only clearing the step-over
1310 info when the step-over is actually finished (or aborted).
1311
1312 Presently GDB can only step over one breakpoint at any given time.
1313 Given threads that can't run code in the same address space as the
1314 breakpoint's can't really miss the breakpoint, GDB could be taught
1315 to step-over at most one breakpoint per address space (so this info
1316 could move to the address space object if/when GDB is extended).
1317 The set of breakpoints being stepped over will normally be much
1318 smaller than the set of all breakpoints, so a flag in the
1319 breakpoint location structure would be wasteful. A separate list
1320 also saves complexity and run-time, as otherwise we'd have to go
1321 through all breakpoint locations clearing their flag whenever we
1322 start a new sequence. Similar considerations weigh against storing
1323 this info in the thread object. Plus, not all step overs actually
1324 have breakpoint locations -- e.g., stepping past a single-step
1325 breakpoint, or stepping to complete a non-continuable
1326 watchpoint. */
1327static struct step_over_info step_over_info;
1328
1329/* Record the address of the breakpoint/instruction we're currently
ce0db137
DE
1330 stepping over.
1331 N.B. We record the aspace and address now, instead of say just the thread,
1332 because when we need the info later the thread may be running. */
31e77af2
PA
1333
1334static void
8b86c959 1335set_step_over_info (const address_space *aspace, CORE_ADDR address,
21edc42f
YQ
1336 int nonsteppable_watchpoint_p,
1337 int thread)
31e77af2
PA
1338{
1339 step_over_info.aspace = aspace;
1340 step_over_info.address = address;
963f9c80 1341 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
21edc42f 1342 step_over_info.thread = thread;
31e77af2
PA
1343}
1344
1345/* Called when we're not longer stepping over a breakpoint / an
1346 instruction, so all breakpoints are free to be (re)inserted. */
1347
1348static void
1349clear_step_over_info (void)
1350{
edbcda09 1351 infrun_log_debug ("clearing step over info");
31e77af2
PA
1352 step_over_info.aspace = NULL;
1353 step_over_info.address = 0;
963f9c80 1354 step_over_info.nonsteppable_watchpoint_p = 0;
21edc42f 1355 step_over_info.thread = -1;
31e77af2
PA
1356}
1357
7f89fd65 1358/* See infrun.h. */
31e77af2
PA
1359
1360int
1361stepping_past_instruction_at (struct address_space *aspace,
1362 CORE_ADDR address)
1363{
1364 return (step_over_info.aspace != NULL
1365 && breakpoint_address_match (aspace, address,
1366 step_over_info.aspace,
1367 step_over_info.address));
1368}
1369
963f9c80
PA
1370/* See infrun.h. */
1371
21edc42f
YQ
1372int
1373thread_is_stepping_over_breakpoint (int thread)
1374{
1375 return (step_over_info.thread != -1
1376 && thread == step_over_info.thread);
1377}
1378
1379/* See infrun.h. */
1380
963f9c80
PA
1381int
1382stepping_past_nonsteppable_watchpoint (void)
1383{
1384 return step_over_info.nonsteppable_watchpoint_p;
1385}
1386
6cc83d2a
PA
1387/* Returns true if step-over info is valid. */
1388
1389static int
1390step_over_info_valid_p (void)
1391{
963f9c80
PA
1392 return (step_over_info.aspace != NULL
1393 || stepping_past_nonsteppable_watchpoint ());
6cc83d2a
PA
1394}
1395
c906108c 1396\f
237fc4c9
PA
1397/* Displaced stepping. */
1398
1399/* In non-stop debugging mode, we must take special care to manage
1400 breakpoints properly; in particular, the traditional strategy for
1401 stepping a thread past a breakpoint it has hit is unsuitable.
1402 'Displaced stepping' is a tactic for stepping one thread past a
1403 breakpoint it has hit while ensuring that other threads running
1404 concurrently will hit the breakpoint as they should.
1405
1406 The traditional way to step a thread T off a breakpoint in a
1407 multi-threaded program in all-stop mode is as follows:
1408
1409 a0) Initially, all threads are stopped, and breakpoints are not
1410 inserted.
1411 a1) We single-step T, leaving breakpoints uninserted.
1412 a2) We insert breakpoints, and resume all threads.
1413
1414 In non-stop debugging, however, this strategy is unsuitable: we
1415 don't want to have to stop all threads in the system in order to
1416 continue or step T past a breakpoint. Instead, we use displaced
1417 stepping:
1418
1419 n0) Initially, T is stopped, other threads are running, and
1420 breakpoints are inserted.
1421 n1) We copy the instruction "under" the breakpoint to a separate
1422 location, outside the main code stream, making any adjustments
1423 to the instruction, register, and memory state as directed by
1424 T's architecture.
1425 n2) We single-step T over the instruction at its new location.
1426 n3) We adjust the resulting register and memory state as directed
1427 by T's architecture. This includes resetting T's PC to point
1428 back into the main instruction stream.
1429 n4) We resume T.
1430
1431 This approach depends on the following gdbarch methods:
1432
1433 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1434 indicate where to copy the instruction, and how much space must
1435 be reserved there. We use these in step n1.
1436
1437 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1438 address, and makes any necessary adjustments to the instruction,
1439 register contents, and memory. We use this in step n1.
1440
1441 - gdbarch_displaced_step_fixup adjusts registers and memory after
85102364 1442 we have successfully single-stepped the instruction, to yield the
237fc4c9
PA
1443 same effect the instruction would have had if we had executed it
1444 at its original address. We use this in step n3.
1445
237fc4c9
PA
1446 The gdbarch_displaced_step_copy_insn and
1447 gdbarch_displaced_step_fixup functions must be written so that
1448 copying an instruction with gdbarch_displaced_step_copy_insn,
1449 single-stepping across the copied instruction, and then applying
1450 gdbarch_displaced_insn_fixup should have the same effects on the
1451 thread's memory and registers as stepping the instruction in place
1452 would have. Exactly which responsibilities fall to the copy and
1453 which fall to the fixup is up to the author of those functions.
1454
1455 See the comments in gdbarch.sh for details.
1456
1457 Note that displaced stepping and software single-step cannot
1458 currently be used in combination, although with some care I think
1459 they could be made to. Software single-step works by placing
1460 breakpoints on all possible subsequent instructions; if the
1461 displaced instruction is a PC-relative jump, those breakpoints
1462 could fall in very strange places --- on pages that aren't
1463 executable, or at addresses that are not proper instruction
1464 boundaries. (We do generally let other threads run while we wait
1465 to hit the software single-step breakpoint, and they might
1466 encounter such a corrupted instruction.) One way to work around
1467 this would be to have gdbarch_displaced_step_copy_insn fully
1468 simulate the effect of PC-relative instructions (and return NULL)
1469 on architectures that use software single-stepping.
1470
1471 In non-stop mode, we can have independent and simultaneous step
1472 requests, so more than one thread may need to simultaneously step
1473 over a breakpoint. The current implementation assumes there is
1474 only one scratch space per process. In this case, we have to
1475 serialize access to the scratch space. If thread A wants to step
1476 over a breakpoint, but we are currently waiting for some other
1477 thread to complete a displaced step, we leave thread A stopped and
1478 place it in the displaced_step_request_queue. Whenever a displaced
1479 step finishes, we pick the next thread in the queue and start a new
1480 displaced step operation on it. See displaced_step_prepare and
1481 displaced_step_fixup for details. */
1482
9844051a 1483/* Get the displaced stepping state of inferior INF. */
fc1cf338 1484
39a36629 1485static displaced_step_inferior_state *
00431a78 1486get_displaced_stepping_state (inferior *inf)
fc1cf338 1487{
d20172fc 1488 return &inf->displaced_step_state;
fc1cf338
PA
1489}
1490
9844051a 1491/* Get the displaced stepping state of thread THREAD. */
372316f1 1492
9844051a
SM
1493static displaced_step_thread_state *
1494get_displaced_stepping_state (thread_info *thread)
372316f1 1495{
9844051a 1496 return &thread->displaced_step_state;
372316f1
PA
1497}
1498
9844051a 1499/* Return true if the given thread is doing a displaced step. */
c0987663 1500
9844051a
SM
1501static bool
1502displaced_step_in_progress (thread_info *thread)
c0987663 1503{
00431a78 1504 gdb_assert (thread != NULL);
c0987663 1505
9844051a 1506 return get_displaced_stepping_state (thread)->in_progress ();
c0987663
YQ
1507}
1508
9844051a 1509/* Return true if any thread of this inferior is doing a displaced step. */
8f572e5c 1510
9844051a 1511static bool
00431a78 1512displaced_step_in_progress (inferior *inf)
8f572e5c 1513{
9844051a
SM
1514 for (thread_info *thread : inf->non_exited_threads ())
1515 {
1516 if (displaced_step_in_progress (thread))
1517 return true;
1518 }
1519
1520 return false;
1521}
1522
1523/* Return true if any thread is doing a displaced step. */
1524
1525static bool
1526displaced_step_in_progress_any_thread ()
1527{
1528 for (thread_info *thread : all_non_exited_threads ())
1529 {
1530 if (displaced_step_in_progress (thread))
1531 return true;
1532 }
1533
1534 return false;
fc1cf338
PA
1535}
1536
a42244db 1537/* If inferior is in displaced stepping, and ADDR equals to starting address
7ccba087 1538 of copy area, return corresponding displaced_step_copy_insn_closure. Otherwise,
a42244db
YQ
1539 return NULL. */
1540
7ccba087
SM
1541struct displaced_step_copy_insn_closure *
1542get_displaced_step_copy_insn_closure_by_addr (CORE_ADDR addr)
a42244db 1543{
9844051a
SM
1544// FIXME: implement me (only needed on ARM).
1545// displaced_step_inferior_state *displaced
1546// = get_displaced_stepping_state (current_inferior ());
1547//
1548// /* If checking the mode of displaced instruction in copy area. */
1549// if (displaced->step_thread != nullptr
1550// && displaced->step_copy == addr)
1551// return displaced->step_closure.get ();
1552//
a42244db
YQ
1553 return NULL;
1554}
1555
fc1cf338
PA
1556static void
1557infrun_inferior_exit (struct inferior *inf)
1558{
d20172fc 1559 inf->displaced_step_state.reset ();
fc1cf338 1560}
237fc4c9 1561
fff08868
HZ
1562/* If ON, and the architecture supports it, GDB will use displaced
1563 stepping to step over breakpoints. If OFF, or if the architecture
1564 doesn't support it, GDB will instead use the traditional
1565 hold-and-step approach. If AUTO (which is the default), GDB will
1566 decide which technique to use to step over breakpoints depending on
9822cb57 1567 whether the target works in a non-stop way (see use_displaced_stepping). */
fff08868 1568
72d0e2c5 1569static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
fff08868 1570
237fc4c9
PA
1571static void
1572show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1573 struct cmd_list_element *c,
1574 const char *value)
1575{
72d0e2c5 1576 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
3e43a32a
MS
1577 fprintf_filtered (file,
1578 _("Debugger's willingness to use displaced stepping "
1579 "to step over breakpoints is %s (currently %s).\n"),
fbea99ea 1580 value, target_is_non_stop_p () ? "on" : "off");
fff08868 1581 else
3e43a32a
MS
1582 fprintf_filtered (file,
1583 _("Debugger's willingness to use displaced stepping "
1584 "to step over breakpoints is %s.\n"), value);
237fc4c9
PA
1585}
1586
9822cb57
SM
1587/* Return true if the gdbarch implements the required methods to use
1588 displaced stepping. */
1589
1590static bool
1591gdbarch_supports_displaced_stepping (gdbarch *arch)
1592{
9844051a
SM
1593 /* Only check for the presence of copy_insn. Other required methods
1594 are checked by the gdbarch validation to be provided if copy_insn is
1595 provided. */
9822cb57
SM
1596 return gdbarch_displaced_step_copy_insn_p (arch);
1597}
1598
fff08868 1599/* Return non-zero if displaced stepping can/should be used to step
3fc8eb30 1600 over breakpoints of thread TP. */
fff08868 1601
9822cb57
SM
1602static bool
1603use_displaced_stepping (thread_info *tp)
237fc4c9 1604{
9822cb57
SM
1605 /* If the user disabled it explicitly, don't use displaced stepping. */
1606 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1607 return false;
1608
1609 /* If "auto", only use displaced stepping if the target operates in a non-stop
1610 way. */
1611 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1612 && !target_is_non_stop_p ())
1613 return false;
1614
1615 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1616
1617 /* If the architecture doesn't implement displaced stepping, don't use
1618 it. */
1619 if (!gdbarch_supports_displaced_stepping (gdbarch))
1620 return false;
1621
1622 /* If recording, don't use displaced stepping. */
1623 if (find_record_target () != nullptr)
1624 return false;
1625
d20172fc
SM
1626 displaced_step_inferior_state *displaced_state
1627 = get_displaced_stepping_state (tp->inf);
3fc8eb30 1628
9822cb57
SM
1629 /* If displaced stepping failed before for this inferior, don't bother trying
1630 again. */
1631 if (displaced_state->failed_before)
1632 return false;
1633
1634 return true;
237fc4c9
PA
1635}
1636
9844051a 1637/* Simple function wrapper around displaced_step_thread_state::reset. */
d8d83535 1638
237fc4c9 1639static void
9844051a 1640displaced_step_reset (displaced_step_thread_state *displaced)
237fc4c9 1641{
d8d83535 1642 displaced->reset ();
237fc4c9
PA
1643}
1644
d8d83535
SM
1645/* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1646 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1647
1648using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
237fc4c9
PA
1649
1650/* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1651void
1652displaced_step_dump_bytes (struct ui_file *file,
1653 const gdb_byte *buf,
1654 size_t len)
1655{
1656 int i;
1657
1658 for (i = 0; i < len; i++)
1659 fprintf_unfiltered (file, "%02x ", buf[i]);
1660 fputs_unfiltered ("\n", file);
1661}
1662
1663/* Prepare to single-step, using displaced stepping.
1664
1665 Note that we cannot use displaced stepping when we have a signal to
1666 deliver. If we have a signal to deliver and an instruction to step
1667 over, then after the step, there will be no indication from the
1668 target whether the thread entered a signal handler or ignored the
1669 signal and stepped over the instruction successfully --- both cases
1670 result in a simple SIGTRAP. In the first case we mustn't do a
1671 fixup, and in the second case we must --- but we can't tell which.
1672 Comments in the code for 'random signals' in handle_inferior_event
1673 explain how we handle this case instead.
1674
1675 Returns 1 if preparing was successful -- this thread is going to be
7f03bd92
PA
1676 stepped now; 0 if displaced stepping this thread got queued; or -1
1677 if this instruction can't be displaced stepped. */
1678
9844051a 1679static displaced_step_prepare_status
00431a78 1680displaced_step_prepare_throw (thread_info *tp)
237fc4c9 1681{
00431a78 1682 regcache *regcache = get_thread_regcache (tp);
ac7936df 1683 struct gdbarch *gdbarch = regcache->arch ();
9844051a
SM
1684 displaced_step_thread_state *thread_disp_step_state
1685 = get_displaced_stepping_state (tp);
237fc4c9
PA
1686
1687 /* We should never reach this function if the architecture does not
1688 support displaced stepping. */
9822cb57 1689 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
237fc4c9 1690
c2829269
PA
1691 /* Nor if the thread isn't meant to step over a breakpoint. */
1692 gdb_assert (tp->control.trap_expected);
1693
c1e36e3e
PA
1694 /* Disable range stepping while executing in the scratch pad. We
1695 want a single-step even if executing the displaced instruction in
1696 the scratch buffer lands within the stepping range (e.g., a
1697 jump/branch). */
1698 tp->control.may_range_step = 0;
1699
9844051a
SM
1700 /* We are about to start a displaced step for this thread, if one is already
1701 in progress, we goofed up somewhere. */
1702 gdb_assert (!thread_disp_step_state->in_progress ());
237fc4c9 1703
9844051a 1704 scoped_restore_current_thread restore_thread;
fc1cf338 1705
9844051a
SM
1706 switch_to_thread (tp);
1707
1708 CORE_ADDR original_pc = regcache_read_pc (regcache);
1709
1710 displaced_step_prepare_status status =
1711 gdbarch_displaced_step_prepare (gdbarch, tp);
237fc4c9 1712
9844051a
SM
1713 if (status == DISPLACED_STEP_PREPARE_STATUS_ERROR)
1714 {
237fc4c9
PA
1715 if (debug_displaced)
1716 fprintf_unfiltered (gdb_stdlog,
9844051a 1717 "displaced: failed to prepare (%s)",
a068643d 1718 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1719
9844051a 1720 return DISPLACED_STEP_PREPARE_STATUS_ERROR;
237fc4c9 1721 }
9844051a 1722 else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
237fc4c9 1723 {
9844051a
SM
1724 /* Not enough displaced stepping resources available, defer this
1725 request by placing it the queue. */
1726
237fc4c9
PA
1727 if (debug_displaced)
1728 fprintf_unfiltered (gdb_stdlog,
9844051a
SM
1729 "displaced: not enough resources available, "
1730 "deferring step of %s\n",
a068643d 1731 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1732
9844051a 1733 global_thread_step_over_chain_enqueue (tp);
effb9843 1734 tp->inf->displaced_step_state.unavailable = true;
d35ae833 1735
9844051a 1736 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
d35ae833
PA
1737 }
1738
9844051a
SM
1739 gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
1740
1741// FIXME: Should probably replicated in the arch implementation now.
1742//
1743// if (breakpoint_in_range_p (aspace, copy, len))
1744// {
1745// /* There's a breakpoint set in the scratch pad location range
1746// (which is usually around the entry point). We'd either
1747// install it before resuming, which would overwrite/corrupt the
1748// scratch pad, or if it was already inserted, this displaced
1749// step would overwrite it. The latter is OK in the sense that
1750// we already assume that no thread is going to execute the code
1751// in the scratch pad range (after initial startup) anyway, but
1752// the former is unacceptable. Simply punt and fallback to
1753// stepping over this breakpoint in-line. */
1754// if (debug_displaced)
1755// {
1756// fprintf_unfiltered (gdb_stdlog,
1757// "displaced: breakpoint set in scratch pad. "
1758// "Stepping over breakpoint in-line instead.\n");
1759// }
1760//
1761// gdb_assert (false);
1762// gdbarch_displaced_step_release_location (gdbarch, copy);
1763//
1764// return -1;
1765// }
237fc4c9 1766
9f5a595d
UW
1767 /* Save the information we need to fix things up if the step
1768 succeeds. */
9844051a 1769 thread_disp_step_state->set (gdbarch);
9f5a595d 1770
9844051a
SM
1771 // FIXME: get it from _prepare?
1772 CORE_ADDR displaced_pc = 0;
ad53cd71 1773
237fc4c9 1774 if (debug_displaced)
9844051a
SM
1775 fprintf_unfiltered (gdb_stdlog,
1776 "displaced: prepared successfully thread=%s, "
1777 "original_pc=%s, displaced_pc=%s\n",
1778 target_pid_to_str (tp->ptid).c_str (),
1779 paddress (gdbarch, original_pc),
1780 paddress (gdbarch, displaced_pc));
1781
1782 return DISPLACED_STEP_PREPARE_STATUS_OK;
237fc4c9
PA
1783}
1784
3fc8eb30
PA
1785/* Wrapper for displaced_step_prepare_throw that disabled further
1786 attempts at displaced stepping if we get a memory error. */
1787
9844051a 1788static displaced_step_prepare_status
00431a78 1789displaced_step_prepare (thread_info *thread)
3fc8eb30 1790{
9844051a
SM
1791 displaced_step_prepare_status status
1792 = DISPLACED_STEP_PREPARE_STATUS_ERROR;
3fc8eb30 1793
a70b8144 1794 try
3fc8eb30 1795 {
9844051a 1796 status = displaced_step_prepare_throw (thread);
3fc8eb30 1797 }
230d2906 1798 catch (const gdb_exception_error &ex)
3fc8eb30
PA
1799 {
1800 struct displaced_step_inferior_state *displaced_state;
1801
16b41842
PA
1802 if (ex.error != MEMORY_ERROR
1803 && ex.error != NOT_SUPPORTED_ERROR)
eedc3f4f 1804 throw;
3fc8eb30 1805
edbcda09
SM
1806 infrun_log_debug ("caught exception, disabling displaced stepping: %s",
1807 ex.what ());
3fc8eb30
PA
1808
1809 /* Be verbose if "set displaced-stepping" is "on", silent if
1810 "auto". */
1811 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1812 {
fd7dcb94 1813 warning (_("disabling displaced stepping: %s"),
3d6e9d23 1814 ex.what ());
3fc8eb30
PA
1815 }
1816
1817 /* Disable further displaced stepping attempts. */
1818 displaced_state
00431a78 1819 = get_displaced_stepping_state (thread->inf);
3fc8eb30
PA
1820 displaced_state->failed_before = 1;
1821 }
3fc8eb30 1822
9844051a 1823 return status;
e2d96639
YQ
1824}
1825
372316f1
PA
1826/* If we displaced stepped an instruction successfully, adjust
1827 registers and memory to yield the same effect the instruction would
1828 have had if we had executed it at its original address, and return
1829 1. If the instruction didn't complete, relocate the PC and return
1830 -1. If the thread wasn't displaced stepping, return 0. */
1831
1832static int
9844051a 1833displaced_step_finish (thread_info *event_thread, enum gdb_signal signal)
237fc4c9 1834{
9844051a
SM
1835 displaced_step_thread_state *displaced
1836 = get_displaced_stepping_state (event_thread);
fc1cf338 1837
9844051a
SM
1838 /* Was this thread performing a displaced step? */
1839 if (!displaced->in_progress ())
372316f1 1840 return 0;
237fc4c9 1841
9844051a
SM
1842 displaced_step_reset_cleanup cleanup (displaced);
1843
cb71640d
PA
1844 /* Fixup may need to read memory/registers. Switch to the thread
1845 that we're fixing up. Also, target_stopped_by_watchpoint checks
d43b7a2d
TBA
1846 the current thread, and displaced_step_restore performs ptid-dependent
1847 memory accesses using current_inferior() and current_top_target(). */
00431a78 1848 switch_to_thread (event_thread);
cb71640d 1849
9844051a
SM
1850 /* Do the fixup, and release the resources acquired to do the displaced
1851 step. */
1852 displaced_step_finish_status finish_status =
1853 gdbarch_displaced_step_finish (displaced->get_original_gdbarch (),
1854 event_thread, signal);
d43b7a2d 1855
9844051a
SM
1856 if (finish_status == DISPLACED_STEP_FINISH_STATUS_OK)
1857 return 1;
237fc4c9 1858 else
9844051a 1859 return -1;
c2829269 1860}
1c5cfe86 1861
4d9d9d04
PA
1862/* Data to be passed around while handling an event. This data is
1863 discarded between events. */
1864struct execution_control_state
1865{
5b6d1e4f 1866 process_stratum_target *target;
4d9d9d04
PA
1867 ptid_t ptid;
1868 /* The thread that got the event, if this was a thread event; NULL
1869 otherwise. */
1870 struct thread_info *event_thread;
1871
1872 struct target_waitstatus ws;
1873 int stop_func_filled_in;
1874 CORE_ADDR stop_func_start;
1875 CORE_ADDR stop_func_end;
1876 const char *stop_func_name;
1877 int wait_some_more;
1878
1879 /* True if the event thread hit the single-step breakpoint of
1880 another thread. Thus the event doesn't cause a stop, the thread
1881 needs to be single-stepped past the single-step breakpoint before
1882 we can switch back to the original stepping thread. */
1883 int hit_singlestep_breakpoint;
1884};
1885
1886/* Clear ECS and set it to point at TP. */
c2829269
PA
1887
1888static void
4d9d9d04
PA
1889reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1890{
1891 memset (ecs, 0, sizeof (*ecs));
1892 ecs->event_thread = tp;
1893 ecs->ptid = tp->ptid;
1894}
1895
1896static void keep_going_pass_signal (struct execution_control_state *ecs);
1897static void prepare_to_wait (struct execution_control_state *ecs);
2ac7589c 1898static int keep_going_stepped_thread (struct thread_info *tp);
8d297bbf 1899static step_over_what thread_still_needs_step_over (struct thread_info *tp);
4d9d9d04
PA
1900
1901/* Are there any pending step-over requests? If so, run all we can
1902 now and return true. Otherwise, return false. */
1903
1904static int
c2829269
PA
1905start_step_over (void)
1906{
1907 struct thread_info *tp, *next;
9844051a 1908 int started = 0;
c2829269 1909
372316f1
PA
1910 /* Don't start a new step-over if we already have an in-line
1911 step-over operation ongoing. */
1912 if (step_over_info_valid_p ())
9844051a
SM
1913 return started;
1914
1915 /* Steal the global thread step over chain. */
1916 thread_info *threads_to_step = global_thread_step_over_chain_head;
1917 global_thread_step_over_chain_head = NULL;
1918
1919 if (debug_infrun)
1920 fprintf_unfiltered (gdb_stdlog,
1921 "infrun: stealing list of %d threads to step from global queue\n",
1922 thread_step_over_chain_length (threads_to_step));
372316f1 1923
effb9843
SM
1924 for (inferior *inf : all_inferiors ())
1925 inf->displaced_step_state.unavailable = false;
1926
9844051a 1927 for (tp = threads_to_step; tp != NULL; tp = next)
237fc4c9 1928 {
4d9d9d04
PA
1929 struct execution_control_state ecss;
1930 struct execution_control_state *ecs = &ecss;
8d297bbf 1931 step_over_what step_what;
372316f1 1932 int must_be_in_line;
c2829269 1933
c65d6b55
PA
1934 gdb_assert (!tp->stop_requested);
1935
9844051a 1936 next = thread_step_over_chain_next (threads_to_step, tp);
c2829269 1937
372316f1
PA
1938 step_what = thread_still_needs_step_over (tp);
1939 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
1940 || ((step_what & STEP_OVER_BREAKPOINT)
3fc8eb30 1941 && !use_displaced_stepping (tp)));
372316f1
PA
1942
1943 /* We currently stop all threads of all processes to step-over
1944 in-line. If we need to start a new in-line step-over, let
1945 any pending displaced steps finish first. */
9844051a
SM
1946 if (must_be_in_line && displaced_step_in_progress_any_thread ())
1947 continue;
c2829269 1948
9844051a 1949 thread_step_over_chain_remove (&threads_to_step, tp);
c2829269 1950
372316f1
PA
1951 if (tp->control.trap_expected
1952 || tp->resumed
1953 || tp->executing)
ad53cd71 1954 {
4d9d9d04
PA
1955 internal_error (__FILE__, __LINE__,
1956 "[%s] has inconsistent state: "
372316f1 1957 "trap_expected=%d, resumed=%d, executing=%d\n",
a068643d 1958 target_pid_to_str (tp->ptid).c_str (),
4d9d9d04 1959 tp->control.trap_expected,
372316f1 1960 tp->resumed,
4d9d9d04 1961 tp->executing);
ad53cd71 1962 }
1c5cfe86 1963
edbcda09
SM
1964 infrun_log_debug ("resuming [%s] for step-over",
1965 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04
PA
1966
1967 /* keep_going_pass_signal skips the step-over if the breakpoint
1968 is no longer inserted. In all-stop, we want to keep looking
1969 for a thread that needs a step-over instead of resuming TP,
1970 because we wouldn't be able to resume anything else until the
1971 target stops again. In non-stop, the resume always resumes
1972 only TP, so it's OK to let the thread resume freely. */
fbea99ea 1973 if (!target_is_non_stop_p () && !step_what)
4d9d9d04 1974 continue;
8550d3b3 1975
effb9843
SM
1976 if (tp->inf->displaced_step_state.unavailable)
1977 {
1978 global_thread_step_over_chain_enqueue (tp);
1979 continue;
1980 }
1981
00431a78 1982 switch_to_thread (tp);
4d9d9d04
PA
1983 reset_ecs (ecs, tp);
1984 keep_going_pass_signal (ecs);
1c5cfe86 1985
4d9d9d04
PA
1986 if (!ecs->wait_some_more)
1987 error (_("Command aborted."));
1c5cfe86 1988
9844051a
SM
1989 /* If the thread's step over could not be initiated, it was re-added
1990 to the global step over chain. */
1991 if (tp->resumed)
1992 {
1993 infrun_log_debug ("start_step_over: [%s] was resumed.\n",
1994 target_pid_to_str (tp->ptid).c_str ());
1995 gdb_assert (!thread_is_in_step_over_chain (tp));
1996 }
1997 else
1998 {
1999 infrun_log_debug ("infrun: start_step_over: [%s] was NOT resumed.\n",
2000 target_pid_to_str (tp->ptid).c_str ());
2001 gdb_assert (thread_is_in_step_over_chain (tp));
2002
2003 }
372316f1
PA
2004
2005 /* If we started a new in-line step-over, we're done. */
2006 if (step_over_info_valid_p ())
2007 {
2008 gdb_assert (tp->control.trap_expected);
9844051a
SM
2009 started = 1;
2010 break;
372316f1
PA
2011 }
2012
fbea99ea 2013 if (!target_is_non_stop_p ())
4d9d9d04
PA
2014 {
2015 /* On all-stop, shouldn't have resumed unless we needed a
2016 step over. */
2017 gdb_assert (tp->control.trap_expected
2018 || tp->step_after_step_resume_breakpoint);
2019
2020 /* With remote targets (at least), in all-stop, we can't
2021 issue any further remote commands until the program stops
2022 again. */
9844051a
SM
2023 started = 1;
2024 break;
1c5cfe86 2025 }
c2829269 2026
4d9d9d04
PA
2027 /* Either the thread no longer needed a step-over, or a new
2028 displaced stepping sequence started. Even in the latter
2029 case, continue looking. Maybe we can also start another
2030 displaced step on a thread of other process. */
237fc4c9 2031 }
4d9d9d04 2032
9844051a
SM
2033 /* If there are threads left in the THREADS_TO_STEP list, but we have
2034 detected that we can't start anything more, put back these threads
2035 in the global list. */
2036 if (threads_to_step == NULL)
2037 {
2038 if (debug_infrun)
2039 fprintf_unfiltered (gdb_stdlog,
2040 "infrun: step-over queue now empty\n");
2041 }
2042 else
2043 {
2044 if (debug_infrun)
2045 fprintf_unfiltered (gdb_stdlog,
2046 "infrun: putting back %d threads to step in global queue\n",
2047 thread_step_over_chain_length (threads_to_step));
2048 while (threads_to_step != nullptr)
2049 {
2050 thread_info *thread = threads_to_step;
2051
2052 /* Remove from that list. */
2053 thread_step_over_chain_remove (&threads_to_step, thread);
2054
2055 /* Add to global list. */
2056 global_thread_step_over_chain_enqueue (thread);
2057
2058 }
2059 }
2060
2061 return started;
237fc4c9
PA
2062}
2063
5231c1fd
PA
2064/* Update global variables holding ptids to hold NEW_PTID if they were
2065 holding OLD_PTID. */
2066static void
2067infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
2068{
d7e15655 2069 if (inferior_ptid == old_ptid)
5231c1fd 2070 inferior_ptid = new_ptid;
5231c1fd
PA
2071}
2072
237fc4c9 2073\f
c906108c 2074
53904c9e
AC
2075static const char schedlock_off[] = "off";
2076static const char schedlock_on[] = "on";
2077static const char schedlock_step[] = "step";
f2665db5 2078static const char schedlock_replay[] = "replay";
40478521 2079static const char *const scheduler_enums[] = {
ef346e04
AC
2080 schedlock_off,
2081 schedlock_on,
2082 schedlock_step,
f2665db5 2083 schedlock_replay,
ef346e04
AC
2084 NULL
2085};
f2665db5 2086static const char *scheduler_mode = schedlock_replay;
920d2a44
AC
2087static void
2088show_scheduler_mode (struct ui_file *file, int from_tty,
2089 struct cmd_list_element *c, const char *value)
2090{
3e43a32a
MS
2091 fprintf_filtered (file,
2092 _("Mode for locking scheduler "
2093 "during execution is \"%s\".\n"),
920d2a44
AC
2094 value);
2095}
c906108c
SS
2096
2097static void
eb4c3f4a 2098set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
c906108c 2099{
eefe576e
AC
2100 if (!target_can_lock_scheduler)
2101 {
2102 scheduler_mode = schedlock_off;
2103 error (_("Target '%s' cannot support this command."), target_shortname);
2104 }
c906108c
SS
2105}
2106
d4db2f36
PA
2107/* True if execution commands resume all threads of all processes by
2108 default; otherwise, resume only threads of the current inferior
2109 process. */
491144b5 2110bool sched_multi = false;
d4db2f36 2111
2facfe5c
DD
2112/* Try to setup for software single stepping over the specified location.
2113 Return 1 if target_resume() should use hardware single step.
2114
2115 GDBARCH the current gdbarch.
2116 PC the location to step over. */
2117
2118static int
2119maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2120{
2121 int hw_step = 1;
2122
f02253f1 2123 if (execution_direction == EXEC_FORWARD
93f9a11f
YQ
2124 && gdbarch_software_single_step_p (gdbarch))
2125 hw_step = !insert_single_step_breakpoints (gdbarch);
2126
2facfe5c
DD
2127 return hw_step;
2128}
c906108c 2129
f3263aa4
PA
2130/* See infrun.h. */
2131
09cee04b
PA
2132ptid_t
2133user_visible_resume_ptid (int step)
2134{
f3263aa4 2135 ptid_t resume_ptid;
09cee04b 2136
09cee04b
PA
2137 if (non_stop)
2138 {
2139 /* With non-stop mode on, threads are always handled
2140 individually. */
2141 resume_ptid = inferior_ptid;
2142 }
2143 else if ((scheduler_mode == schedlock_on)
03d46957 2144 || (scheduler_mode == schedlock_step && step))
09cee04b 2145 {
f3263aa4
PA
2146 /* User-settable 'scheduler' mode requires solo thread
2147 resume. */
09cee04b
PA
2148 resume_ptid = inferior_ptid;
2149 }
f2665db5
MM
2150 else if ((scheduler_mode == schedlock_replay)
2151 && target_record_will_replay (minus_one_ptid, execution_direction))
2152 {
2153 /* User-settable 'scheduler' mode requires solo thread resume in replay
2154 mode. */
2155 resume_ptid = inferior_ptid;
2156 }
f3263aa4
PA
2157 else if (!sched_multi && target_supports_multi_process ())
2158 {
2159 /* Resume all threads of the current process (and none of other
2160 processes). */
e99b03dc 2161 resume_ptid = ptid_t (inferior_ptid.pid ());
f3263aa4
PA
2162 }
2163 else
2164 {
2165 /* Resume all threads of all processes. */
2166 resume_ptid = RESUME_ALL;
2167 }
09cee04b
PA
2168
2169 return resume_ptid;
2170}
2171
5b6d1e4f
PA
2172/* See infrun.h. */
2173
2174process_stratum_target *
2175user_visible_resume_target (ptid_t resume_ptid)
2176{
2177 return (resume_ptid == minus_one_ptid && sched_multi
2178 ? NULL
2179 : current_inferior ()->process_target ());
2180}
2181
fbea99ea
PA
2182/* Return a ptid representing the set of threads that we will resume,
2183 in the perspective of the target, assuming run control handling
2184 does not require leaving some threads stopped (e.g., stepping past
2185 breakpoint). USER_STEP indicates whether we're about to start the
2186 target for a stepping command. */
2187
2188static ptid_t
2189internal_resume_ptid (int user_step)
2190{
2191 /* In non-stop, we always control threads individually. Note that
2192 the target may always work in non-stop mode even with "set
2193 non-stop off", in which case user_visible_resume_ptid could
2194 return a wildcard ptid. */
2195 if (target_is_non_stop_p ())
2196 return inferior_ptid;
2197 else
2198 return user_visible_resume_ptid (user_step);
2199}
2200
64ce06e4
PA
2201/* Wrapper for target_resume, that handles infrun-specific
2202 bookkeeping. */
2203
2204static void
2205do_target_resume (ptid_t resume_ptid, int step, enum gdb_signal sig)
2206{
2207 struct thread_info *tp = inferior_thread ();
2208
c65d6b55
PA
2209 gdb_assert (!tp->stop_requested);
2210
64ce06e4 2211 /* Install inferior's terminal modes. */
223ffa71 2212 target_terminal::inferior ();
64ce06e4
PA
2213
2214 /* Avoid confusing the next resume, if the next stop/resume
2215 happens to apply to another thread. */
2216 tp->suspend.stop_signal = GDB_SIGNAL_0;
2217
8f572e5c
PA
2218 /* Advise target which signals may be handled silently.
2219
2220 If we have removed breakpoints because we are stepping over one
2221 in-line (in any thread), we need to receive all signals to avoid
2222 accidentally skipping a breakpoint during execution of a signal
2223 handler.
2224
2225 Likewise if we're displaced stepping, otherwise a trap for a
2226 breakpoint in a signal handler might be confused with the
2227 displaced step finishing. We don't make the displaced_step_fixup
2228 step distinguish the cases instead, because:
2229
2230 - a backtrace while stopped in the signal handler would show the
2231 scratch pad as frame older than the signal handler, instead of
2232 the real mainline code.
2233
2234 - when the thread is later resumed, the signal handler would
2235 return to the scratch pad area, which would no longer be
2236 valid. */
2237 if (step_over_info_valid_p ()
00431a78 2238 || displaced_step_in_progress (tp->inf))
adc6a863 2239 target_pass_signals ({});
64ce06e4 2240 else
adc6a863 2241 target_pass_signals (signal_pass);
64ce06e4
PA
2242
2243 target_resume (resume_ptid, step, sig);
85ad3aaf
PA
2244
2245 target_commit_resume ();
5b6d1e4f
PA
2246
2247 if (target_can_async_p ())
2248 target_async (1);
64ce06e4
PA
2249}
2250
d930703d 2251/* Resume the inferior. SIG is the signal to give the inferior
71d378ae
PA
2252 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2253 call 'resume', which handles exceptions. */
c906108c 2254
71d378ae
PA
2255static void
2256resume_1 (enum gdb_signal sig)
c906108c 2257{
515630c5 2258 struct regcache *regcache = get_current_regcache ();
ac7936df 2259 struct gdbarch *gdbarch = regcache->arch ();
4e1c45ea 2260 struct thread_info *tp = inferior_thread ();
8b86c959 2261 const address_space *aspace = regcache->aspace ();
b0f16a3e 2262 ptid_t resume_ptid;
856e7dd6
PA
2263 /* This represents the user's step vs continue request. When
2264 deciding whether "set scheduler-locking step" applies, it's the
2265 user's intention that counts. */
2266 const int user_step = tp->control.stepping_command;
64ce06e4
PA
2267 /* This represents what we'll actually request the target to do.
2268 This can decay from a step to a continue, if e.g., we need to
2269 implement single-stepping with breakpoints (software
2270 single-step). */
6b403daa 2271 int step;
c7e8a53c 2272
c65d6b55 2273 gdb_assert (!tp->stop_requested);
c2829269
PA
2274 gdb_assert (!thread_is_in_step_over_chain (tp));
2275
372316f1
PA
2276 if (tp->suspend.waitstatus_pending_p)
2277 {
edbcda09
SM
2278 infrun_log_debug
2279 ("thread %s has pending wait "
2280 "status %s (currently_stepping=%d).",
2281 target_pid_to_str (tp->ptid).c_str (),
2282 target_waitstatus_to_string (&tp->suspend.waitstatus).c_str (),
2283 currently_stepping (tp));
372316f1 2284
5b6d1e4f 2285 tp->inf->process_target ()->threads_executing = true;
719546c4 2286 tp->resumed = true;
372316f1
PA
2287
2288 /* FIXME: What should we do if we are supposed to resume this
2289 thread with a signal? Maybe we should maintain a queue of
2290 pending signals to deliver. */
2291 if (sig != GDB_SIGNAL_0)
2292 {
fd7dcb94 2293 warning (_("Couldn't deliver signal %s to %s."),
a068643d
TT
2294 gdb_signal_to_name (sig),
2295 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2296 }
2297
2298 tp->suspend.stop_signal = GDB_SIGNAL_0;
372316f1
PA
2299
2300 if (target_can_async_p ())
9516f85a
AB
2301 {
2302 target_async (1);
2303 /* Tell the event loop we have an event to process. */
2304 mark_async_event_handler (infrun_async_inferior_event_token);
2305 }
372316f1
PA
2306 return;
2307 }
2308
2309 tp->stepped_breakpoint = 0;
2310
6b403daa
PA
2311 /* Depends on stepped_breakpoint. */
2312 step = currently_stepping (tp);
2313
74609e71
YQ
2314 if (current_inferior ()->waiting_for_vfork_done)
2315 {
48f9886d
PA
2316 /* Don't try to single-step a vfork parent that is waiting for
2317 the child to get out of the shared memory region (by exec'ing
2318 or exiting). This is particularly important on software
2319 single-step archs, as the child process would trip on the
2320 software single step breakpoint inserted for the parent
2321 process. Since the parent will not actually execute any
2322 instruction until the child is out of the shared region (such
2323 are vfork's semantics), it is safe to simply continue it.
2324 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2325 the parent, and tell it to `keep_going', which automatically
2326 re-sets it stepping. */
edbcda09 2327 infrun_log_debug ("resume : clear step");
a09dd441 2328 step = 0;
74609e71
YQ
2329 }
2330
7ca9b62a
TBA
2331 CORE_ADDR pc = regcache_read_pc (regcache);
2332
edbcda09
SM
2333 infrun_log_debug ("step=%d, signal=%s, trap_expected=%d, "
2334 "current thread [%s] at %s",
2335 step, gdb_signal_to_symbol_string (sig),
2336 tp->control.trap_expected,
2337 target_pid_to_str (inferior_ptid).c_str (),
2338 paddress (gdbarch, pc));
c906108c 2339
c2c6d25f
JM
2340 /* Normally, by the time we reach `resume', the breakpoints are either
2341 removed or inserted, as appropriate. The exception is if we're sitting
2342 at a permanent breakpoint; we need to step over it, but permanent
2343 breakpoints can't be removed. So we have to test for it here. */
6c95b8df 2344 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
6d350bb5 2345 {
af48d08f
PA
2346 if (sig != GDB_SIGNAL_0)
2347 {
2348 /* We have a signal to pass to the inferior. The resume
2349 may, or may not take us to the signal handler. If this
2350 is a step, we'll need to stop in the signal handler, if
2351 there's one, (if the target supports stepping into
2352 handlers), or in the next mainline instruction, if
2353 there's no handler. If this is a continue, we need to be
2354 sure to run the handler with all breakpoints inserted.
2355 In all cases, set a breakpoint at the current address
2356 (where the handler returns to), and once that breakpoint
2357 is hit, resume skipping the permanent breakpoint. If
2358 that breakpoint isn't hit, then we've stepped into the
2359 signal handler (or hit some other event). We'll delete
2360 the step-resume breakpoint then. */
2361
edbcda09
SM
2362 infrun_log_debug ("resume: skipping permanent breakpoint, "
2363 "deliver signal first");
af48d08f
PA
2364
2365 clear_step_over_info ();
2366 tp->control.trap_expected = 0;
2367
2368 if (tp->control.step_resume_breakpoint == NULL)
2369 {
2370 /* Set a "high-priority" step-resume, as we don't want
2371 user breakpoints at PC to trigger (again) when this
2372 hits. */
2373 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2374 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2375
2376 tp->step_after_step_resume_breakpoint = step;
2377 }
2378
2379 insert_breakpoints ();
2380 }
2381 else
2382 {
2383 /* There's no signal to pass, we can go ahead and skip the
2384 permanent breakpoint manually. */
edbcda09 2385 infrun_log_debug ("skipping permanent breakpoint");
af48d08f
PA
2386 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2387 /* Update pc to reflect the new address from which we will
2388 execute instructions. */
2389 pc = regcache_read_pc (regcache);
2390
2391 if (step)
2392 {
2393 /* We've already advanced the PC, so the stepping part
2394 is done. Now we need to arrange for a trap to be
2395 reported to handle_inferior_event. Set a breakpoint
2396 at the current PC, and run to it. Don't update
2397 prev_pc, because if we end in
44a1ee51
PA
2398 switch_back_to_stepped_thread, we want the "expected
2399 thread advanced also" branch to be taken. IOW, we
2400 don't want this thread to step further from PC
af48d08f 2401 (overstep). */
1ac806b8 2402 gdb_assert (!step_over_info_valid_p ());
af48d08f
PA
2403 insert_single_step_breakpoint (gdbarch, aspace, pc);
2404 insert_breakpoints ();
2405
fbea99ea 2406 resume_ptid = internal_resume_ptid (user_step);
1ac806b8 2407 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
719546c4 2408 tp->resumed = true;
af48d08f
PA
2409 return;
2410 }
2411 }
6d350bb5 2412 }
c2c6d25f 2413
c1e36e3e
PA
2414 /* If we have a breakpoint to step over, make sure to do a single
2415 step only. Same if we have software watchpoints. */
2416 if (tp->control.trap_expected || bpstat_should_step ())
2417 tp->control.may_range_step = 0;
2418
7da6a5b9
LM
2419 /* If displaced stepping is enabled, step over breakpoints by executing a
2420 copy of the instruction at a different address.
237fc4c9
PA
2421
2422 We can't use displaced stepping when we have a signal to deliver;
2423 the comments for displaced_step_prepare explain why. The
2424 comments in the handle_inferior event for dealing with 'random
74609e71
YQ
2425 signals' explain what we do instead.
2426
2427 We can't use displaced stepping when we are waiting for vfork_done
2428 event, displaced stepping breaks the vfork child similarly as single
2429 step software breakpoint. */
3fc8eb30
PA
2430 if (tp->control.trap_expected
2431 && use_displaced_stepping (tp)
cb71640d 2432 && !step_over_info_valid_p ()
a493e3e2 2433 && sig == GDB_SIGNAL_0
74609e71 2434 && !current_inferior ()->waiting_for_vfork_done)
237fc4c9 2435 {
9844051a
SM
2436 displaced_step_prepare_status prepare_status
2437 = displaced_step_prepare (tp);
fc1cf338 2438
9844051a 2439 if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
d56b7306 2440 {
edbcda09 2441 infrun_log_debug ("Got placed in step-over queue");
4d9d9d04
PA
2442
2443 tp->control.trap_expected = 0;
d56b7306
VP
2444 return;
2445 }
9844051a 2446 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_ERROR)
3fc8eb30
PA
2447 {
2448 /* Fallback to stepping over the breakpoint in-line. */
2449
2450 if (target_is_non_stop_p ())
2451 stop_all_threads ();
2452
a01bda52 2453 set_step_over_info (regcache->aspace (),
21edc42f 2454 regcache_read_pc (regcache), 0, tp->global_num);
3fc8eb30
PA
2455
2456 step = maybe_software_singlestep (gdbarch, pc);
2457
2458 insert_breakpoints ();
2459 }
9844051a 2460 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
3fc8eb30 2461 {
9844051a 2462 step = gdbarch_displaced_step_hw_singlestep (gdbarch, NULL);
3fc8eb30 2463 }
9844051a
SM
2464 else
2465 gdb_assert_not_reached ("invalid displaced_step_prepare_status value");
237fc4c9
PA
2466 }
2467
2facfe5c 2468 /* Do we need to do it the hard way, w/temp breakpoints? */
99e40580 2469 else if (step)
2facfe5c 2470 step = maybe_software_singlestep (gdbarch, pc);
c906108c 2471
30852783
UW
2472 /* Currently, our software single-step implementation leads to different
2473 results than hardware single-stepping in one situation: when stepping
2474 into delivering a signal which has an associated signal handler,
2475 hardware single-step will stop at the first instruction of the handler,
2476 while software single-step will simply skip execution of the handler.
2477
2478 For now, this difference in behavior is accepted since there is no
2479 easy way to actually implement single-stepping into a signal handler
2480 without kernel support.
2481
2482 However, there is one scenario where this difference leads to follow-on
2483 problems: if we're stepping off a breakpoint by removing all breakpoints
2484 and then single-stepping. In this case, the software single-step
2485 behavior means that even if there is a *breakpoint* in the signal
2486 handler, GDB still would not stop.
2487
2488 Fortunately, we can at least fix this particular issue. We detect
2489 here the case where we are about to deliver a signal while software
2490 single-stepping with breakpoints removed. In this situation, we
2491 revert the decisions to remove all breakpoints and insert single-
2492 step breakpoints, and instead we install a step-resume breakpoint
2493 at the current address, deliver the signal without stepping, and
2494 once we arrive back at the step-resume breakpoint, actually step
2495 over the breakpoint we originally wanted to step over. */
34b7e8a6 2496 if (thread_has_single_step_breakpoints_set (tp)
6cc83d2a
PA
2497 && sig != GDB_SIGNAL_0
2498 && step_over_info_valid_p ())
30852783
UW
2499 {
2500 /* If we have nested signals or a pending signal is delivered
7da6a5b9 2501 immediately after a handler returns, might already have
30852783
UW
2502 a step-resume breakpoint set on the earlier handler. We cannot
2503 set another step-resume breakpoint; just continue on until the
2504 original breakpoint is hit. */
2505 if (tp->control.step_resume_breakpoint == NULL)
2506 {
2c03e5be 2507 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
30852783
UW
2508 tp->step_after_step_resume_breakpoint = 1;
2509 }
2510
34b7e8a6 2511 delete_single_step_breakpoints (tp);
30852783 2512
31e77af2 2513 clear_step_over_info ();
30852783 2514 tp->control.trap_expected = 0;
31e77af2
PA
2515
2516 insert_breakpoints ();
30852783
UW
2517 }
2518
b0f16a3e
SM
2519 /* If STEP is set, it's a request to use hardware stepping
2520 facilities. But in that case, we should never
2521 use singlestep breakpoint. */
34b7e8a6 2522 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
dfcd3bfb 2523
fbea99ea 2524 /* Decide the set of threads to ask the target to resume. */
1946c4cc 2525 if (tp->control.trap_expected)
b0f16a3e
SM
2526 {
2527 /* We're allowing a thread to run past a breakpoint it has
1946c4cc
YQ
2528 hit, either by single-stepping the thread with the breakpoint
2529 removed, or by displaced stepping, with the breakpoint inserted.
2530 In the former case, we need to single-step only this thread,
2531 and keep others stopped, as they can miss this breakpoint if
2532 allowed to run. That's not really a problem for displaced
2533 stepping, but, we still keep other threads stopped, in case
2534 another thread is also stopped for a breakpoint waiting for
2535 its turn in the displaced stepping queue. */
b0f16a3e
SM
2536 resume_ptid = inferior_ptid;
2537 }
fbea99ea
PA
2538 else
2539 resume_ptid = internal_resume_ptid (user_step);
d4db2f36 2540
7f5ef605
PA
2541 if (execution_direction != EXEC_REVERSE
2542 && step && breakpoint_inserted_here_p (aspace, pc))
b0f16a3e 2543 {
372316f1
PA
2544 /* There are two cases where we currently need to step a
2545 breakpoint instruction when we have a signal to deliver:
2546
2547 - See handle_signal_stop where we handle random signals that
2548 could take out us out of the stepping range. Normally, in
2549 that case we end up continuing (instead of stepping) over the
7f5ef605
PA
2550 signal handler with a breakpoint at PC, but there are cases
2551 where we should _always_ single-step, even if we have a
2552 step-resume breakpoint, like when a software watchpoint is
2553 set. Assuming single-stepping and delivering a signal at the
2554 same time would takes us to the signal handler, then we could
2555 have removed the breakpoint at PC to step over it. However,
2556 some hardware step targets (like e.g., Mac OS) can't step
2557 into signal handlers, and for those, we need to leave the
2558 breakpoint at PC inserted, as otherwise if the handler
2559 recurses and executes PC again, it'll miss the breakpoint.
2560 So we leave the breakpoint inserted anyway, but we need to
2561 record that we tried to step a breakpoint instruction, so
372316f1
PA
2562 that adjust_pc_after_break doesn't end up confused.
2563
2564 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2565 in one thread after another thread that was stepping had been
2566 momentarily paused for a step-over. When we re-resume the
2567 stepping thread, it may be resumed from that address with a
2568 breakpoint that hasn't trapped yet. Seen with
2569 gdb.threads/non-stop-fair-events.exp, on targets that don't
2570 do displaced stepping. */
2571
edbcda09
SM
2572 infrun_log_debug ("resume: [%s] stepped breakpoint",
2573 target_pid_to_str (tp->ptid).c_str ());
7f5ef605
PA
2574
2575 tp->stepped_breakpoint = 1;
2576
b0f16a3e
SM
2577 /* Most targets can step a breakpoint instruction, thus
2578 executing it normally. But if this one cannot, just
2579 continue and we will hit it anyway. */
7f5ef605 2580 if (gdbarch_cannot_step_breakpoint (gdbarch))
b0f16a3e
SM
2581 step = 0;
2582 }
ef5cf84e 2583
b0f16a3e 2584 if (debug_displaced
cb71640d 2585 && tp->control.trap_expected
3fc8eb30 2586 && use_displaced_stepping (tp)
cb71640d 2587 && !step_over_info_valid_p ())
b0f16a3e 2588 {
00431a78 2589 struct regcache *resume_regcache = get_thread_regcache (tp);
ac7936df 2590 struct gdbarch *resume_gdbarch = resume_regcache->arch ();
b0f16a3e
SM
2591 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2592 gdb_byte buf[4];
2593
2594 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2595 paddress (resume_gdbarch, actual_pc));
2596 read_memory (actual_pc, buf, sizeof (buf));
2597 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2598 }
237fc4c9 2599
b0f16a3e
SM
2600 if (tp->control.may_range_step)
2601 {
2602 /* If we're resuming a thread with the PC out of the step
2603 range, then we're doing some nested/finer run control
2604 operation, like stepping the thread out of the dynamic
2605 linker or the displaced stepping scratch pad. We
2606 shouldn't have allowed a range step then. */
2607 gdb_assert (pc_in_thread_step_range (pc, tp));
2608 }
c1e36e3e 2609
64ce06e4 2610 do_target_resume (resume_ptid, step, sig);
719546c4 2611 tp->resumed = true;
c906108c 2612}
71d378ae
PA
2613
2614/* Resume the inferior. SIG is the signal to give the inferior
2615 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2616 rolls back state on error. */
2617
aff4e175 2618static void
71d378ae
PA
2619resume (gdb_signal sig)
2620{
a70b8144 2621 try
71d378ae
PA
2622 {
2623 resume_1 (sig);
2624 }
230d2906 2625 catch (const gdb_exception &ex)
71d378ae
PA
2626 {
2627 /* If resuming is being aborted for any reason, delete any
2628 single-step breakpoint resume_1 may have created, to avoid
2629 confusing the following resumption, and to avoid leaving
2630 single-step breakpoints perturbing other threads, in case
2631 we're running in non-stop mode. */
2632 if (inferior_ptid != null_ptid)
2633 delete_single_step_breakpoints (inferior_thread ());
eedc3f4f 2634 throw;
71d378ae 2635 }
71d378ae
PA
2636}
2637
c906108c 2638\f
237fc4c9 2639/* Proceeding. */
c906108c 2640
4c2f2a79
PA
2641/* See infrun.h. */
2642
2643/* Counter that tracks number of user visible stops. This can be used
2644 to tell whether a command has proceeded the inferior past the
2645 current location. This allows e.g., inferior function calls in
2646 breakpoint commands to not interrupt the command list. When the
2647 call finishes successfully, the inferior is standing at the same
2648 breakpoint as if nothing happened (and so we don't call
2649 normal_stop). */
2650static ULONGEST current_stop_id;
2651
2652/* See infrun.h. */
2653
2654ULONGEST
2655get_stop_id (void)
2656{
2657 return current_stop_id;
2658}
2659
2660/* Called when we report a user visible stop. */
2661
2662static void
2663new_stop_id (void)
2664{
2665 current_stop_id++;
2666}
2667
c906108c
SS
2668/* Clear out all variables saying what to do when inferior is continued.
2669 First do this, then set the ones you want, then call `proceed'. */
2670
a7212384
UW
2671static void
2672clear_proceed_status_thread (struct thread_info *tp)
c906108c 2673{
edbcda09 2674 infrun_log_debug ("%s", target_pid_to_str (tp->ptid).c_str ());
d6b48e9c 2675
372316f1
PA
2676 /* If we're starting a new sequence, then the previous finished
2677 single-step is no longer relevant. */
2678 if (tp->suspend.waitstatus_pending_p)
2679 {
2680 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
2681 {
edbcda09
SM
2682 infrun_log_debug ("pending event of %s was a finished step. "
2683 "Discarding.",
2684 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2685
2686 tp->suspend.waitstatus_pending_p = 0;
2687 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
2688 }
edbcda09 2689 else
372316f1 2690 {
edbcda09
SM
2691 infrun_log_debug
2692 ("thread %s has pending wait status %s (currently_stepping=%d).",
2693 target_pid_to_str (tp->ptid).c_str (),
2694 target_waitstatus_to_string (&tp->suspend.waitstatus).c_str (),
2695 currently_stepping (tp));
372316f1
PA
2696 }
2697 }
2698
70509625
PA
2699 /* If this signal should not be seen by program, give it zero.
2700 Used for debugging signals. */
2701 if (!signal_pass_state (tp->suspend.stop_signal))
2702 tp->suspend.stop_signal = GDB_SIGNAL_0;
2703
46e3ed7f 2704 delete tp->thread_fsm;
243a9253
PA
2705 tp->thread_fsm = NULL;
2706
16c381f0
JK
2707 tp->control.trap_expected = 0;
2708 tp->control.step_range_start = 0;
2709 tp->control.step_range_end = 0;
c1e36e3e 2710 tp->control.may_range_step = 0;
16c381f0
JK
2711 tp->control.step_frame_id = null_frame_id;
2712 tp->control.step_stack_frame_id = null_frame_id;
2713 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
885eeb5b 2714 tp->control.step_start_function = NULL;
a7212384 2715 tp->stop_requested = 0;
4e1c45ea 2716
16c381f0 2717 tp->control.stop_step = 0;
32400beb 2718
16c381f0 2719 tp->control.proceed_to_finish = 0;
414c69f7 2720
856e7dd6 2721 tp->control.stepping_command = 0;
17b2616c 2722
a7212384 2723 /* Discard any remaining commands or status from previous stop. */
16c381f0 2724 bpstat_clear (&tp->control.stop_bpstat);
a7212384 2725}
32400beb 2726
a7212384 2727void
70509625 2728clear_proceed_status (int step)
a7212384 2729{
f2665db5
MM
2730 /* With scheduler-locking replay, stop replaying other threads if we're
2731 not replaying the user-visible resume ptid.
2732
2733 This is a convenience feature to not require the user to explicitly
2734 stop replaying the other threads. We're assuming that the user's
2735 intent is to resume tracing the recorded process. */
2736 if (!non_stop && scheduler_mode == schedlock_replay
2737 && target_record_is_replaying (minus_one_ptid)
2738 && !target_record_will_replay (user_visible_resume_ptid (step),
2739 execution_direction))
2740 target_record_stop_replaying ();
2741
08036331 2742 if (!non_stop && inferior_ptid != null_ptid)
6c95b8df 2743 {
08036331 2744 ptid_t resume_ptid = user_visible_resume_ptid (step);
5b6d1e4f
PA
2745 process_stratum_target *resume_target
2746 = user_visible_resume_target (resume_ptid);
70509625
PA
2747
2748 /* In all-stop mode, delete the per-thread status of all threads
2749 we're about to resume, implicitly and explicitly. */
5b6d1e4f 2750 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
08036331 2751 clear_proceed_status_thread (tp);
6c95b8df
PA
2752 }
2753
d7e15655 2754 if (inferior_ptid != null_ptid)
a7212384
UW
2755 {
2756 struct inferior *inferior;
2757
2758 if (non_stop)
2759 {
6c95b8df
PA
2760 /* If in non-stop mode, only delete the per-thread status of
2761 the current thread. */
a7212384
UW
2762 clear_proceed_status_thread (inferior_thread ());
2763 }
6c95b8df 2764
d6b48e9c 2765 inferior = current_inferior ();
16c381f0 2766 inferior->control.stop_soon = NO_STOP_QUIETLY;
4e1c45ea
PA
2767 }
2768
76727919 2769 gdb::observers::about_to_proceed.notify ();
c906108c
SS
2770}
2771
99619bea
PA
2772/* Returns true if TP is still stopped at a breakpoint that needs
2773 stepping-over in order to make progress. If the breakpoint is gone
2774 meanwhile, we can skip the whole step-over dance. */
ea67f13b
DJ
2775
2776static int
6c4cfb24 2777thread_still_needs_step_over_bp (struct thread_info *tp)
99619bea
PA
2778{
2779 if (tp->stepping_over_breakpoint)
2780 {
00431a78 2781 struct regcache *regcache = get_thread_regcache (tp);
99619bea 2782
a01bda52 2783 if (breakpoint_here_p (regcache->aspace (),
af48d08f
PA
2784 regcache_read_pc (regcache))
2785 == ordinary_breakpoint_here)
99619bea
PA
2786 return 1;
2787
2788 tp->stepping_over_breakpoint = 0;
2789 }
2790
2791 return 0;
2792}
2793
6c4cfb24
PA
2794/* Check whether thread TP still needs to start a step-over in order
2795 to make progress when resumed. Returns an bitwise or of enum
2796 step_over_what bits, indicating what needs to be stepped over. */
2797
8d297bbf 2798static step_over_what
6c4cfb24
PA
2799thread_still_needs_step_over (struct thread_info *tp)
2800{
8d297bbf 2801 step_over_what what = 0;
6c4cfb24
PA
2802
2803 if (thread_still_needs_step_over_bp (tp))
2804 what |= STEP_OVER_BREAKPOINT;
2805
2806 if (tp->stepping_over_watchpoint
2807 && !target_have_steppable_watchpoint)
2808 what |= STEP_OVER_WATCHPOINT;
2809
2810 return what;
2811}
2812
483805cf
PA
2813/* Returns true if scheduler locking applies. STEP indicates whether
2814 we're about to do a step/next-like command to a thread. */
2815
2816static int
856e7dd6 2817schedlock_applies (struct thread_info *tp)
483805cf
PA
2818{
2819 return (scheduler_mode == schedlock_on
2820 || (scheduler_mode == schedlock_step
f2665db5
MM
2821 && tp->control.stepping_command)
2822 || (scheduler_mode == schedlock_replay
2823 && target_record_will_replay (minus_one_ptid,
2824 execution_direction)));
483805cf
PA
2825}
2826
5b6d1e4f
PA
2827/* Calls target_commit_resume on all targets. */
2828
2829static void
2830commit_resume_all_targets ()
2831{
2832 scoped_restore_current_thread restore_thread;
2833
2834 /* Map between process_target and a representative inferior. This
2835 is to avoid committing a resume in the same target more than
2836 once. Resumptions must be idempotent, so this is an
2837 optimization. */
2838 std::unordered_map<process_stratum_target *, inferior *> conn_inf;
2839
2840 for (inferior *inf : all_non_exited_inferiors ())
2841 if (inf->has_execution ())
2842 conn_inf[inf->process_target ()] = inf;
2843
2844 for (const auto &ci : conn_inf)
2845 {
2846 inferior *inf = ci.second;
2847 switch_to_inferior_no_thread (inf);
2848 target_commit_resume ();
2849 }
2850}
2851
2f4fcf00
PA
2852/* Check that all the targets we're about to resume are in non-stop
2853 mode. Ideally, we'd only care whether all targets support
2854 target-async, but we're not there yet. E.g., stop_all_threads
2855 doesn't know how to handle all-stop targets. Also, the remote
2856 protocol in all-stop mode is synchronous, irrespective of
2857 target-async, which means that things like a breakpoint re-set
2858 triggered by one target would try to read memory from all targets
2859 and fail. */
2860
2861static void
2862check_multi_target_resumption (process_stratum_target *resume_target)
2863{
2864 if (!non_stop && resume_target == nullptr)
2865 {
2866 scoped_restore_current_thread restore_thread;
2867
2868 /* This is used to track whether we're resuming more than one
2869 target. */
2870 process_stratum_target *first_connection = nullptr;
2871
2872 /* The first inferior we see with a target that does not work in
2873 always-non-stop mode. */
2874 inferior *first_not_non_stop = nullptr;
2875
2876 for (inferior *inf : all_non_exited_inferiors (resume_target))
2877 {
2878 switch_to_inferior_no_thread (inf);
2879
2880 if (!target_has_execution)
2881 continue;
2882
2883 process_stratum_target *proc_target
2884 = current_inferior ()->process_target();
2885
2886 if (!target_is_non_stop_p ())
2887 first_not_non_stop = inf;
2888
2889 if (first_connection == nullptr)
2890 first_connection = proc_target;
2891 else if (first_connection != proc_target
2892 && first_not_non_stop != nullptr)
2893 {
2894 switch_to_inferior_no_thread (first_not_non_stop);
2895
2896 proc_target = current_inferior ()->process_target();
2897
2898 error (_("Connection %d (%s) does not support "
2899 "multi-target resumption."),
2900 proc_target->connection_number,
2901 make_target_connection_string (proc_target).c_str ());
2902 }
2903 }
2904 }
2905}
2906
c906108c
SS
2907/* Basic routine for continuing the program in various fashions.
2908
2909 ADDR is the address to resume at, or -1 for resume where stopped.
aff4e175
AB
2910 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
2911 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
c906108c
SS
2912
2913 You should call clear_proceed_status before calling proceed. */
2914
2915void
64ce06e4 2916proceed (CORE_ADDR addr, enum gdb_signal siggnal)
c906108c 2917{
e58b0e63
PA
2918 struct regcache *regcache;
2919 struct gdbarch *gdbarch;
e58b0e63 2920 CORE_ADDR pc;
4d9d9d04
PA
2921 struct execution_control_state ecss;
2922 struct execution_control_state *ecs = &ecss;
4d9d9d04 2923 int started;
c906108c 2924
e58b0e63
PA
2925 /* If we're stopped at a fork/vfork, follow the branch set by the
2926 "set follow-fork-mode" command; otherwise, we'll just proceed
2927 resuming the current thread. */
2928 if (!follow_fork ())
2929 {
2930 /* The target for some reason decided not to resume. */
2931 normal_stop ();
f148b27e 2932 if (target_can_async_p ())
b1a35af2 2933 inferior_event_handler (INF_EXEC_COMPLETE);
e58b0e63
PA
2934 return;
2935 }
2936
842951eb
PA
2937 /* We'll update this if & when we switch to a new thread. */
2938 previous_inferior_ptid = inferior_ptid;
2939
e58b0e63 2940 regcache = get_current_regcache ();
ac7936df 2941 gdbarch = regcache->arch ();
8b86c959
YQ
2942 const address_space *aspace = regcache->aspace ();
2943
fc75c28b
TBA
2944 pc = regcache_read_pc_protected (regcache);
2945
08036331 2946 thread_info *cur_thr = inferior_thread ();
e58b0e63 2947
99619bea 2948 /* Fill in with reasonable starting values. */
08036331 2949 init_thread_stepping_state (cur_thr);
99619bea 2950
08036331 2951 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
c2829269 2952
5b6d1e4f
PA
2953 ptid_t resume_ptid
2954 = user_visible_resume_ptid (cur_thr->control.stepping_command);
2955 process_stratum_target *resume_target
2956 = user_visible_resume_target (resume_ptid);
2957
2f4fcf00
PA
2958 check_multi_target_resumption (resume_target);
2959
2acceee2 2960 if (addr == (CORE_ADDR) -1)
c906108c 2961 {
08036331 2962 if (pc == cur_thr->suspend.stop_pc
af48d08f 2963 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
b2175913 2964 && execution_direction != EXEC_REVERSE)
3352ef37
AC
2965 /* There is a breakpoint at the address we will resume at,
2966 step one instruction before inserting breakpoints so that
2967 we do not stop right away (and report a second hit at this
b2175913
MS
2968 breakpoint).
2969
2970 Note, we don't do this in reverse, because we won't
2971 actually be executing the breakpoint insn anyway.
2972 We'll be (un-)executing the previous instruction. */
08036331 2973 cur_thr->stepping_over_breakpoint = 1;
515630c5
UW
2974 else if (gdbarch_single_step_through_delay_p (gdbarch)
2975 && gdbarch_single_step_through_delay (gdbarch,
2976 get_current_frame ()))
3352ef37
AC
2977 /* We stepped onto an instruction that needs to be stepped
2978 again before re-inserting the breakpoint, do so. */
08036331 2979 cur_thr->stepping_over_breakpoint = 1;
c906108c
SS
2980 }
2981 else
2982 {
515630c5 2983 regcache_write_pc (regcache, addr);
c906108c
SS
2984 }
2985
70509625 2986 if (siggnal != GDB_SIGNAL_DEFAULT)
08036331 2987 cur_thr->suspend.stop_signal = siggnal;
70509625 2988
4d9d9d04
PA
2989 /* If an exception is thrown from this point on, make sure to
2990 propagate GDB's knowledge of the executing state to the
2991 frontend/user running state. */
5b6d1e4f 2992 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
4d9d9d04
PA
2993
2994 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
2995 threads (e.g., we might need to set threads stepping over
2996 breakpoints first), from the user/frontend's point of view, all
2997 threads in RESUME_PTID are now running. Unless we're calling an
2998 inferior function, as in that case we pretend the inferior
2999 doesn't run at all. */
08036331 3000 if (!cur_thr->control.in_infcall)
719546c4 3001 set_running (resume_target, resume_ptid, true);
17b2616c 3002
edbcda09
SM
3003 infrun_log_debug ("addr=%s, signal=%s", paddress (gdbarch, addr),
3004 gdb_signal_to_symbol_string (siggnal));
527159b7 3005
4d9d9d04
PA
3006 annotate_starting ();
3007
3008 /* Make sure that output from GDB appears before output from the
3009 inferior. */
3010 gdb_flush (gdb_stdout);
3011
d930703d
PA
3012 /* Since we've marked the inferior running, give it the terminal. A
3013 QUIT/Ctrl-C from here on is forwarded to the target (which can
3014 still detect attempts to unblock a stuck connection with repeated
3015 Ctrl-C from within target_pass_ctrlc). */
3016 target_terminal::inferior ();
3017
4d9d9d04
PA
3018 /* In a multi-threaded task we may select another thread and
3019 then continue or step.
3020
3021 But if a thread that we're resuming had stopped at a breakpoint,
3022 it will immediately cause another breakpoint stop without any
3023 execution (i.e. it will report a breakpoint hit incorrectly). So
3024 we must step over it first.
3025
3026 Look for threads other than the current (TP) that reported a
3027 breakpoint hit and haven't been resumed yet since. */
3028
3029 /* If scheduler locking applies, we can avoid iterating over all
3030 threads. */
08036331 3031 if (!non_stop && !schedlock_applies (cur_thr))
94cc34af 3032 {
5b6d1e4f
PA
3033 for (thread_info *tp : all_non_exited_threads (resume_target,
3034 resume_ptid))
08036331 3035 {
f3f8ece4
PA
3036 switch_to_thread_no_regs (tp);
3037
4d9d9d04
PA
3038 /* Ignore the current thread here. It's handled
3039 afterwards. */
08036331 3040 if (tp == cur_thr)
4d9d9d04 3041 continue;
c906108c 3042
4d9d9d04
PA
3043 if (!thread_still_needs_step_over (tp))
3044 continue;
3045
3046 gdb_assert (!thread_is_in_step_over_chain (tp));
c906108c 3047
edbcda09
SM
3048 infrun_log_debug ("need to step-over [%s] first",
3049 target_pid_to_str (tp->ptid).c_str ());
99619bea 3050
7bd43605 3051 global_thread_step_over_chain_enqueue (tp);
2adfaa28 3052 }
f3f8ece4
PA
3053
3054 switch_to_thread (cur_thr);
30852783
UW
3055 }
3056
4d9d9d04
PA
3057 /* Enqueue the current thread last, so that we move all other
3058 threads over their breakpoints first. */
08036331 3059 if (cur_thr->stepping_over_breakpoint)
7bd43605 3060 global_thread_step_over_chain_enqueue (cur_thr);
30852783 3061
4d9d9d04
PA
3062 /* If the thread isn't started, we'll still need to set its prev_pc,
3063 so that switch_back_to_stepped_thread knows the thread hasn't
3064 advanced. Must do this before resuming any thread, as in
3065 all-stop/remote, once we resume we can't send any other packet
3066 until the target stops again. */
fc75c28b 3067 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
99619bea 3068
a9bc57b9
TT
3069 {
3070 scoped_restore save_defer_tc = make_scoped_defer_target_commit_resume ();
85ad3aaf 3071
a9bc57b9 3072 started = start_step_over ();
c906108c 3073
a9bc57b9
TT
3074 if (step_over_info_valid_p ())
3075 {
3076 /* Either this thread started a new in-line step over, or some
3077 other thread was already doing one. In either case, don't
3078 resume anything else until the step-over is finished. */
3079 }
3080 else if (started && !target_is_non_stop_p ())
3081 {
3082 /* A new displaced stepping sequence was started. In all-stop,
3083 we can't talk to the target anymore until it next stops. */
3084 }
3085 else if (!non_stop && target_is_non_stop_p ())
3086 {
3087 /* In all-stop, but the target is always in non-stop mode.
3088 Start all other threads that are implicitly resumed too. */
5b6d1e4f
PA
3089 for (thread_info *tp : all_non_exited_threads (resume_target,
3090 resume_ptid))
3091 {
3092 switch_to_thread_no_regs (tp);
3093
f9fac3c8
SM
3094 if (!tp->inf->has_execution ())
3095 {
edbcda09
SM
3096 infrun_log_debug ("[%s] target has no execution",
3097 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3098 continue;
3099 }
f3f8ece4 3100
f9fac3c8
SM
3101 if (tp->resumed)
3102 {
edbcda09
SM
3103 infrun_log_debug ("[%s] resumed",
3104 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3105 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
3106 continue;
3107 }
fbea99ea 3108
f9fac3c8
SM
3109 if (thread_is_in_step_over_chain (tp))
3110 {
edbcda09
SM
3111 infrun_log_debug ("[%s] needs step-over",
3112 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3113 continue;
3114 }
fbea99ea 3115
edbcda09
SM
3116 infrun_log_debug ("resuming %s",
3117 target_pid_to_str (tp->ptid).c_str ());
fbea99ea 3118
f9fac3c8
SM
3119 reset_ecs (ecs, tp);
3120 switch_to_thread (tp);
3121 keep_going_pass_signal (ecs);
3122 if (!ecs->wait_some_more)
3123 error (_("Command aborted."));
3124 }
a9bc57b9 3125 }
08036331 3126 else if (!cur_thr->resumed && !thread_is_in_step_over_chain (cur_thr))
a9bc57b9
TT
3127 {
3128 /* The thread wasn't started, and isn't queued, run it now. */
08036331
PA
3129 reset_ecs (ecs, cur_thr);
3130 switch_to_thread (cur_thr);
a9bc57b9
TT
3131 keep_going_pass_signal (ecs);
3132 if (!ecs->wait_some_more)
3133 error (_("Command aborted."));
3134 }
3135 }
c906108c 3136
5b6d1e4f 3137 commit_resume_all_targets ();
85ad3aaf 3138
731f534f 3139 finish_state.release ();
c906108c 3140
873657b9
PA
3141 /* If we've switched threads above, switch back to the previously
3142 current thread. We don't want the user to see a different
3143 selected thread. */
3144 switch_to_thread (cur_thr);
3145
0b333c5e
PA
3146 /* Tell the event loop to wait for it to stop. If the target
3147 supports asynchronous execution, it'll do this from within
3148 target_resume. */
362646f5 3149 if (!target_can_async_p ())
0b333c5e 3150 mark_async_event_handler (infrun_async_inferior_event_token);
c906108c 3151}
c906108c
SS
3152\f
3153
3154/* Start remote-debugging of a machine over a serial link. */
96baa820 3155
c906108c 3156void
8621d6a9 3157start_remote (int from_tty)
c906108c 3158{
5b6d1e4f
PA
3159 inferior *inf = current_inferior ();
3160 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
43ff13b4 3161
1777feb0 3162 /* Always go on waiting for the target, regardless of the mode. */
6426a772 3163 /* FIXME: cagney/1999-09-23: At present it isn't possible to
7e73cedf 3164 indicate to wait_for_inferior that a target should timeout if
6426a772
JM
3165 nothing is returned (instead of just blocking). Because of this,
3166 targets expecting an immediate response need to, internally, set
3167 things up so that the target_wait() is forced to eventually
1777feb0 3168 timeout. */
6426a772
JM
3169 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3170 differentiate to its caller what the state of the target is after
3171 the initial open has been performed. Here we're assuming that
3172 the target has stopped. It should be possible to eventually have
3173 target_open() return to the caller an indication that the target
3174 is currently running and GDB state should be set to the same as
1777feb0 3175 for an async run. */
5b6d1e4f 3176 wait_for_inferior (inf);
8621d6a9
DJ
3177
3178 /* Now that the inferior has stopped, do any bookkeeping like
3179 loading shared libraries. We want to do this before normal_stop,
3180 so that the displayed frame is up to date. */
8b88a78e 3181 post_create_inferior (current_top_target (), from_tty);
8621d6a9 3182
6426a772 3183 normal_stop ();
c906108c
SS
3184}
3185
3186/* Initialize static vars when a new inferior begins. */
3187
3188void
96baa820 3189init_wait_for_inferior (void)
c906108c
SS
3190{
3191 /* These are meaningless until the first time through wait_for_inferior. */
c906108c 3192
c906108c
SS
3193 breakpoint_init_inferior (inf_starting);
3194
70509625 3195 clear_proceed_status (0);
9f976b41 3196
ab1ddbcf 3197 nullify_last_target_wait_ptid ();
237fc4c9 3198
842951eb 3199 previous_inferior_ptid = inferior_ptid;
c906108c 3200}
237fc4c9 3201
c906108c 3202\f
488f131b 3203
ec9499be 3204static void handle_inferior_event (struct execution_control_state *ecs);
cd0fc7c3 3205
568d6575
UW
3206static void handle_step_into_function (struct gdbarch *gdbarch,
3207 struct execution_control_state *ecs);
3208static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3209 struct execution_control_state *ecs);
4f5d7f63 3210static void handle_signal_stop (struct execution_control_state *ecs);
186c406b 3211static void check_exception_resume (struct execution_control_state *,
28106bc2 3212 struct frame_info *);
611c83ae 3213
bdc36728 3214static void end_stepping_range (struct execution_control_state *ecs);
22bcd14b 3215static void stop_waiting (struct execution_control_state *ecs);
d4f3574e 3216static void keep_going (struct execution_control_state *ecs);
94c57d6a 3217static void process_event_stop_test (struct execution_control_state *ecs);
c447ac0b 3218static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
104c1213 3219
252fbfc8
PA
3220/* This function is attached as a "thread_stop_requested" observer.
3221 Cleanup local state that assumed the PTID was to be resumed, and
3222 report the stop to the frontend. */
3223
2c0b251b 3224static void
252fbfc8
PA
3225infrun_thread_stop_requested (ptid_t ptid)
3226{
5b6d1e4f
PA
3227 process_stratum_target *curr_target = current_inferior ()->process_target ();
3228
c65d6b55
PA
3229 /* PTID was requested to stop. If the thread was already stopped,
3230 but the user/frontend doesn't know about that yet (e.g., the
3231 thread had been temporarily paused for some step-over), set up
3232 for reporting the stop now. */
5b6d1e4f 3233 for (thread_info *tp : all_threads (curr_target, ptid))
08036331
PA
3234 {
3235 if (tp->state != THREAD_RUNNING)
3236 continue;
3237 if (tp->executing)
3238 continue;
c65d6b55 3239
08036331
PA
3240 /* Remove matching threads from the step-over queue, so
3241 start_step_over doesn't try to resume them
3242 automatically. */
3243 if (thread_is_in_step_over_chain (tp))
7bd43605 3244 global_thread_step_over_chain_remove (tp);
c65d6b55 3245
08036331
PA
3246 /* If the thread is stopped, but the user/frontend doesn't
3247 know about that yet, queue a pending event, as if the
3248 thread had just stopped now. Unless the thread already had
3249 a pending event. */
3250 if (!tp->suspend.waitstatus_pending_p)
3251 {
3252 tp->suspend.waitstatus_pending_p = 1;
3253 tp->suspend.waitstatus.kind = TARGET_WAITKIND_STOPPED;
3254 tp->suspend.waitstatus.value.sig = GDB_SIGNAL_0;
3255 }
c65d6b55 3256
08036331
PA
3257 /* Clear the inline-frame state, since we're re-processing the
3258 stop. */
5b6d1e4f 3259 clear_inline_frame_state (tp);
c65d6b55 3260
08036331
PA
3261 /* If this thread was paused because some other thread was
3262 doing an inline-step over, let that finish first. Once
3263 that happens, we'll restart all threads and consume pending
3264 stop events then. */
3265 if (step_over_info_valid_p ())
3266 continue;
3267
3268 /* Otherwise we can process the (new) pending event now. Set
3269 it so this pending event is considered by
3270 do_target_wait. */
719546c4 3271 tp->resumed = true;
08036331 3272 }
252fbfc8
PA
3273}
3274
a07daef3
PA
3275static void
3276infrun_thread_thread_exit (struct thread_info *tp, int silent)
3277{
5b6d1e4f
PA
3278 if (target_last_proc_target == tp->inf->process_target ()
3279 && target_last_wait_ptid == tp->ptid)
a07daef3
PA
3280 nullify_last_target_wait_ptid ();
3281}
3282
0cbcdb96
PA
3283/* Delete the step resume, single-step and longjmp/exception resume
3284 breakpoints of TP. */
4e1c45ea 3285
0cbcdb96
PA
3286static void
3287delete_thread_infrun_breakpoints (struct thread_info *tp)
4e1c45ea 3288{
0cbcdb96
PA
3289 delete_step_resume_breakpoint (tp);
3290 delete_exception_resume_breakpoint (tp);
34b7e8a6 3291 delete_single_step_breakpoints (tp);
4e1c45ea
PA
3292}
3293
0cbcdb96
PA
3294/* If the target still has execution, call FUNC for each thread that
3295 just stopped. In all-stop, that's all the non-exited threads; in
3296 non-stop, that's the current thread, only. */
3297
3298typedef void (*for_each_just_stopped_thread_callback_func)
3299 (struct thread_info *tp);
4e1c45ea
PA
3300
3301static void
0cbcdb96 3302for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
4e1c45ea 3303{
d7e15655 3304 if (!target_has_execution || inferior_ptid == null_ptid)
4e1c45ea
PA
3305 return;
3306
fbea99ea 3307 if (target_is_non_stop_p ())
4e1c45ea 3308 {
0cbcdb96
PA
3309 /* If in non-stop mode, only the current thread stopped. */
3310 func (inferior_thread ());
4e1c45ea
PA
3311 }
3312 else
0cbcdb96 3313 {
0cbcdb96 3314 /* In all-stop mode, all threads have stopped. */
08036331
PA
3315 for (thread_info *tp : all_non_exited_threads ())
3316 func (tp);
0cbcdb96
PA
3317 }
3318}
3319
3320/* Delete the step resume and longjmp/exception resume breakpoints of
3321 the threads that just stopped. */
3322
3323static void
3324delete_just_stopped_threads_infrun_breakpoints (void)
3325{
3326 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
34b7e8a6
PA
3327}
3328
3329/* Delete the single-step breakpoints of the threads that just
3330 stopped. */
7c16b83e 3331
34b7e8a6
PA
3332static void
3333delete_just_stopped_threads_single_step_breakpoints (void)
3334{
3335 for_each_just_stopped_thread (delete_single_step_breakpoints);
4e1c45ea
PA
3336}
3337
221e1a37 3338/* See infrun.h. */
223698f8 3339
221e1a37 3340void
223698f8
DE
3341print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3342 const struct target_waitstatus *ws)
3343{
23fdd69e 3344 std::string status_string = target_waitstatus_to_string (ws);
d7e74731 3345 string_file stb;
223698f8
DE
3346
3347 /* The text is split over several lines because it was getting too long.
3348 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3349 output as a unit; we want only one timestamp printed if debug_timestamp
3350 is set. */
3351
d7e74731 3352 stb.printf ("infrun: target_wait (%d.%ld.%ld",
e99b03dc 3353 waiton_ptid.pid (),
e38504b3 3354 waiton_ptid.lwp (),
cc6bcb54 3355 waiton_ptid.tid ());
e99b03dc 3356 if (waiton_ptid.pid () != -1)
a068643d 3357 stb.printf (" [%s]", target_pid_to_str (waiton_ptid).c_str ());
d7e74731
PA
3358 stb.printf (", status) =\n");
3359 stb.printf ("infrun: %d.%ld.%ld [%s],\n",
e99b03dc 3360 result_ptid.pid (),
e38504b3 3361 result_ptid.lwp (),
cc6bcb54 3362 result_ptid.tid (),
a068643d 3363 target_pid_to_str (result_ptid).c_str ());
23fdd69e 3364 stb.printf ("infrun: %s\n", status_string.c_str ());
223698f8
DE
3365
3366 /* This uses %s in part to handle %'s in the text, but also to avoid
3367 a gcc error: the format attribute requires a string literal. */
d7e74731 3368 fprintf_unfiltered (gdb_stdlog, "%s", stb.c_str ());
223698f8
DE
3369}
3370
372316f1
PA
3371/* Select a thread at random, out of those which are resumed and have
3372 had events. */
3373
3374static struct thread_info *
5b6d1e4f 3375random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
372316f1 3376{
372316f1 3377 int num_events = 0;
08036331 3378
5b6d1e4f 3379 auto has_event = [&] (thread_info *tp)
08036331 3380 {
5b6d1e4f
PA
3381 return (tp->ptid.matches (waiton_ptid)
3382 && tp->resumed
08036331
PA
3383 && tp->suspend.waitstatus_pending_p);
3384 };
372316f1
PA
3385
3386 /* First see how many events we have. Count only resumed threads
3387 that have an event pending. */
5b6d1e4f 3388 for (thread_info *tp : inf->non_exited_threads ())
08036331 3389 if (has_event (tp))
372316f1
PA
3390 num_events++;
3391
3392 if (num_events == 0)
3393 return NULL;
3394
3395 /* Now randomly pick a thread out of those that have had events. */
08036331
PA
3396 int random_selector = (int) ((num_events * (double) rand ())
3397 / (RAND_MAX + 1.0));
372316f1 3398
edbcda09
SM
3399 if (num_events > 1)
3400 infrun_log_debug ("Found %d events, selecting #%d",
3401 num_events, random_selector);
372316f1
PA
3402
3403 /* Select the Nth thread that has had an event. */
5b6d1e4f 3404 for (thread_info *tp : inf->non_exited_threads ())
08036331 3405 if (has_event (tp))
372316f1 3406 if (random_selector-- == 0)
08036331 3407 return tp;
372316f1 3408
08036331 3409 gdb_assert_not_reached ("event thread not found");
372316f1
PA
3410}
3411
3412/* Wrapper for target_wait that first checks whether threads have
3413 pending statuses to report before actually asking the target for
5b6d1e4f
PA
3414 more events. INF is the inferior we're using to call target_wait
3415 on. */
372316f1
PA
3416
3417static ptid_t
5b6d1e4f
PA
3418do_target_wait_1 (inferior *inf, ptid_t ptid,
3419 target_waitstatus *status, int options)
372316f1
PA
3420{
3421 ptid_t event_ptid;
3422 struct thread_info *tp;
3423
24ed6739
AB
3424 /* We know that we are looking for an event in the target of inferior
3425 INF, but we don't know which thread the event might come from. As
3426 such we want to make sure that INFERIOR_PTID is reset so that none of
3427 the wait code relies on it - doing so is always a mistake. */
3428 switch_to_inferior_no_thread (inf);
3429
372316f1
PA
3430 /* First check if there is a resumed thread with a wait status
3431 pending. */
d7e15655 3432 if (ptid == minus_one_ptid || ptid.is_pid ())
372316f1 3433 {
5b6d1e4f 3434 tp = random_pending_event_thread (inf, ptid);
372316f1
PA
3435 }
3436 else
3437 {
edbcda09
SM
3438 infrun_log_debug ("Waiting for specific thread %s.",
3439 target_pid_to_str (ptid).c_str ());
372316f1
PA
3440
3441 /* We have a specific thread to check. */
5b6d1e4f 3442 tp = find_thread_ptid (inf, ptid);
372316f1
PA
3443 gdb_assert (tp != NULL);
3444 if (!tp->suspend.waitstatus_pending_p)
3445 tp = NULL;
3446 }
3447
3448 if (tp != NULL
3449 && (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3450 || tp->suspend.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
3451 {
00431a78 3452 struct regcache *regcache = get_thread_regcache (tp);
ac7936df 3453 struct gdbarch *gdbarch = regcache->arch ();
372316f1
PA
3454 CORE_ADDR pc;
3455 int discard = 0;
3456
3457 pc = regcache_read_pc (regcache);
3458
3459 if (pc != tp->suspend.stop_pc)
3460 {
edbcda09
SM
3461 infrun_log_debug ("PC of %s changed. was=%s, now=%s",
3462 target_pid_to_str (tp->ptid).c_str (),
3463 paddress (gdbarch, tp->suspend.stop_pc),
3464 paddress (gdbarch, pc));
372316f1
PA
3465 discard = 1;
3466 }
a01bda52 3467 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
372316f1 3468 {
edbcda09
SM
3469 infrun_log_debug ("previous breakpoint of %s, at %s gone",
3470 target_pid_to_str (tp->ptid).c_str (),
3471 paddress (gdbarch, pc));
372316f1
PA
3472
3473 discard = 1;
3474 }
3475
3476 if (discard)
3477 {
edbcda09
SM
3478 infrun_log_debug ("pending event of %s cancelled.",
3479 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3480
3481 tp->suspend.waitstatus.kind = TARGET_WAITKIND_SPURIOUS;
3482 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3483 }
3484 }
3485
3486 if (tp != NULL)
3487 {
edbcda09
SM
3488 infrun_log_debug ("Using pending wait status %s for %s.",
3489 target_waitstatus_to_string
3490 (&tp->suspend.waitstatus).c_str (),
3491 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3492
3493 /* Now that we've selected our final event LWP, un-adjust its PC
3494 if it was a software breakpoint (and the target doesn't
3495 always adjust the PC itself). */
3496 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3497 && !target_supports_stopped_by_sw_breakpoint ())
3498 {
3499 struct regcache *regcache;
3500 struct gdbarch *gdbarch;
3501 int decr_pc;
3502
00431a78 3503 regcache = get_thread_regcache (tp);
ac7936df 3504 gdbarch = regcache->arch ();
372316f1
PA
3505
3506 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3507 if (decr_pc != 0)
3508 {
3509 CORE_ADDR pc;
3510
3511 pc = regcache_read_pc (regcache);
3512 regcache_write_pc (regcache, pc + decr_pc);
3513 }
3514 }
3515
3516 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3517 *status = tp->suspend.waitstatus;
3518 tp->suspend.waitstatus_pending_p = 0;
3519
3520 /* Wake up the event loop again, until all pending events are
3521 processed. */
3522 if (target_is_async_p ())
3523 mark_async_event_handler (infrun_async_inferior_event_token);
3524 return tp->ptid;
3525 }
3526
3527 /* But if we don't find one, we'll have to wait. */
3528
3529 if (deprecated_target_wait_hook)
3530 event_ptid = deprecated_target_wait_hook (ptid, status, options);
3531 else
3532 event_ptid = target_wait (ptid, status, options);
3533
3534 return event_ptid;
3535}
3536
5b6d1e4f
PA
3537/* Wrapper for target_wait that first checks whether threads have
3538 pending statuses to report before actually asking the target for
cad90433 3539 more events. Polls for events from all inferiors/targets. */
5b6d1e4f
PA
3540
3541static bool
3542do_target_wait (ptid_t wait_ptid, execution_control_state *ecs, int options)
3543{
3544 int num_inferiors = 0;
3545 int random_selector;
3546
cad90433
SM
3547 /* For fairness, we pick the first inferior/target to poll at random
3548 out of all inferiors that may report events, and then continue
3549 polling the rest of the inferior list starting from that one in a
3550 circular fashion until the whole list is polled once. */
5b6d1e4f
PA
3551
3552 auto inferior_matches = [&wait_ptid] (inferior *inf)
3553 {
3554 return (inf->process_target () != NULL
5b6d1e4f
PA
3555 && ptid_t (inf->pid).matches (wait_ptid));
3556 };
3557
cad90433 3558 /* First see how many matching inferiors we have. */
5b6d1e4f
PA
3559 for (inferior *inf : all_inferiors ())
3560 if (inferior_matches (inf))
3561 num_inferiors++;
3562
3563 if (num_inferiors == 0)
3564 {
3565 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3566 return false;
3567 }
3568
cad90433 3569 /* Now randomly pick an inferior out of those that matched. */
5b6d1e4f
PA
3570 random_selector = (int)
3571 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
3572
edbcda09
SM
3573 if (num_inferiors > 1)
3574 infrun_log_debug ("Found %d inferiors, starting at #%d",
3575 num_inferiors, random_selector);
5b6d1e4f 3576
cad90433 3577 /* Select the Nth inferior that matched. */
5b6d1e4f
PA
3578
3579 inferior *selected = nullptr;
3580
3581 for (inferior *inf : all_inferiors ())
3582 if (inferior_matches (inf))
3583 if (random_selector-- == 0)
3584 {
3585 selected = inf;
3586 break;
3587 }
3588
cad90433 3589 /* Now poll for events out of each of the matching inferior's
5b6d1e4f
PA
3590 targets, starting from the selected one. */
3591
3592 auto do_wait = [&] (inferior *inf)
3593 {
5b6d1e4f
PA
3594 ecs->ptid = do_target_wait_1 (inf, wait_ptid, &ecs->ws, options);
3595 ecs->target = inf->process_target ();
3596 return (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
3597 };
3598
cad90433
SM
3599 /* Needed in 'all-stop + target-non-stop' mode, because we end up
3600 here spuriously after the target is all stopped and we've already
5b6d1e4f
PA
3601 reported the stop to the user, polling for events. */
3602 scoped_restore_current_thread restore_thread;
3603
3604 int inf_num = selected->num;
3605 for (inferior *inf = selected; inf != NULL; inf = inf->next)
3606 if (inferior_matches (inf))
3607 if (do_wait (inf))
3608 return true;
3609
3610 for (inferior *inf = inferior_list;
3611 inf != NULL && inf->num < inf_num;
3612 inf = inf->next)
3613 if (inferior_matches (inf))
3614 if (do_wait (inf))
3615 return true;
3616
3617 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3618 return false;
3619}
3620
24291992
PA
3621/* Prepare and stabilize the inferior for detaching it. E.g.,
3622 detaching while a thread is displaced stepping is a recipe for
3623 crashing it, as nothing would readjust the PC out of the scratch
3624 pad. */
3625
3626void
3627prepare_for_detach (void)
3628{
3629 struct inferior *inf = current_inferior ();
f2907e49 3630 ptid_t pid_ptid = ptid_t (inf->pid);
24291992 3631
9844051a 3632 // displaced_step_inferior_state *displaced = get_displaced_stepping_state (inf);
24291992
PA
3633
3634 /* Is any thread of this process displaced stepping? If not,
3635 there's nothing else to do. */
9844051a 3636 if (displaced_step_in_progress (inf))
24291992
PA
3637 return;
3638
edbcda09 3639 infrun_log_debug ("displaced-stepping in-process while detaching");
24291992 3640
9bcb1f16 3641 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
24291992 3642
9844051a
SM
3643 // FIXME
3644 while (false)
24291992 3645 {
24291992
PA
3646 struct execution_control_state ecss;
3647 struct execution_control_state *ecs;
3648
3649 ecs = &ecss;
3650 memset (ecs, 0, sizeof (*ecs));
3651
3652 overlay_cache_invalid = 1;
f15cb84a
YQ
3653 /* Flush target cache before starting to handle each event.
3654 Target was running and cache could be stale. This is just a
3655 heuristic. Running threads may modify target memory, but we
3656 don't get any event. */
3657 target_dcache_invalidate ();
24291992 3658
5b6d1e4f 3659 do_target_wait (pid_ptid, ecs, 0);
24291992
PA
3660
3661 if (debug_infrun)
3662 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3663
3664 /* If an error happens while handling the event, propagate GDB's
3665 knowledge of the executing state to the frontend/user running
3666 state. */
5b6d1e4f
PA
3667 scoped_finish_thread_state finish_state (inf->process_target (),
3668 minus_one_ptid);
24291992
PA
3669
3670 /* Now figure out what to do with the result of the result. */
3671 handle_inferior_event (ecs);
3672
3673 /* No error, don't finish the state yet. */
731f534f 3674 finish_state.release ();
24291992
PA
3675
3676 /* Breakpoints and watchpoints are not installed on the target
3677 at this point, and signals are passed directly to the
3678 inferior, so this must mean the process is gone. */
3679 if (!ecs->wait_some_more)
3680 {
9bcb1f16 3681 restore_detaching.release ();
24291992
PA
3682 error (_("Program exited while detaching"));
3683 }
3684 }
3685
9bcb1f16 3686 restore_detaching.release ();
24291992
PA
3687}
3688
cd0fc7c3 3689/* Wait for control to return from inferior to debugger.
ae123ec6 3690
cd0fc7c3
SS
3691 If inferior gets a signal, we may decide to start it up again
3692 instead of returning. That is why there is a loop in this function.
3693 When this function actually returns it means the inferior
3694 should be left stopped and GDB should read more commands. */
3695
5b6d1e4f
PA
3696static void
3697wait_for_inferior (inferior *inf)
cd0fc7c3 3698{
edbcda09 3699 infrun_log_debug ("wait_for_inferior ()");
527159b7 3700
4c41382a 3701 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
cd0fc7c3 3702
e6f5c25b
PA
3703 /* If an error happens while handling the event, propagate GDB's
3704 knowledge of the executing state to the frontend/user running
3705 state. */
5b6d1e4f
PA
3706 scoped_finish_thread_state finish_state
3707 (inf->process_target (), minus_one_ptid);
e6f5c25b 3708
c906108c
SS
3709 while (1)
3710 {
ae25568b
PA
3711 struct execution_control_state ecss;
3712 struct execution_control_state *ecs = &ecss;
29f49a6a 3713
ae25568b
PA
3714 memset (ecs, 0, sizeof (*ecs));
3715
ec9499be 3716 overlay_cache_invalid = 1;
ec9499be 3717
f15cb84a
YQ
3718 /* Flush target cache before starting to handle each event.
3719 Target was running and cache could be stale. This is just a
3720 heuristic. Running threads may modify target memory, but we
3721 don't get any event. */
3722 target_dcache_invalidate ();
3723
5b6d1e4f
PA
3724 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, 0);
3725 ecs->target = inf->process_target ();
c906108c 3726
f00150c9 3727 if (debug_infrun)
5b6d1e4f 3728 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
f00150c9 3729
cd0fc7c3
SS
3730 /* Now figure out what to do with the result of the result. */
3731 handle_inferior_event (ecs);
c906108c 3732
cd0fc7c3
SS
3733 if (!ecs->wait_some_more)
3734 break;
3735 }
4e1c45ea 3736
e6f5c25b 3737 /* No error, don't finish the state yet. */
731f534f 3738 finish_state.release ();
cd0fc7c3 3739}
c906108c 3740
d3d4baed
PA
3741/* Cleanup that reinstalls the readline callback handler, if the
3742 target is running in the background. If while handling the target
3743 event something triggered a secondary prompt, like e.g., a
3744 pagination prompt, we'll have removed the callback handler (see
3745 gdb_readline_wrapper_line). Need to do this as we go back to the
3746 event loop, ready to process further input. Note this has no
3747 effect if the handler hasn't actually been removed, because calling
3748 rl_callback_handler_install resets the line buffer, thus losing
3749 input. */
3750
3751static void
d238133d 3752reinstall_readline_callback_handler_cleanup ()
d3d4baed 3753{
3b12939d
PA
3754 struct ui *ui = current_ui;
3755
3756 if (!ui->async)
6c400b59
PA
3757 {
3758 /* We're not going back to the top level event loop yet. Don't
3759 install the readline callback, as it'd prep the terminal,
3760 readline-style (raw, noecho) (e.g., --batch). We'll install
3761 it the next time the prompt is displayed, when we're ready
3762 for input. */
3763 return;
3764 }
3765
3b12939d 3766 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
d3d4baed
PA
3767 gdb_rl_callback_handler_reinstall ();
3768}
3769
243a9253
PA
3770/* Clean up the FSMs of threads that are now stopped. In non-stop,
3771 that's just the event thread. In all-stop, that's all threads. */
3772
3773static void
3774clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
3775{
08036331
PA
3776 if (ecs->event_thread != NULL
3777 && ecs->event_thread->thread_fsm != NULL)
46e3ed7f 3778 ecs->event_thread->thread_fsm->clean_up (ecs->event_thread);
243a9253
PA
3779
3780 if (!non_stop)
3781 {
08036331 3782 for (thread_info *thr : all_non_exited_threads ())
243a9253
PA
3783 {
3784 if (thr->thread_fsm == NULL)
3785 continue;
3786 if (thr == ecs->event_thread)
3787 continue;
3788
00431a78 3789 switch_to_thread (thr);
46e3ed7f 3790 thr->thread_fsm->clean_up (thr);
243a9253
PA
3791 }
3792
3793 if (ecs->event_thread != NULL)
00431a78 3794 switch_to_thread (ecs->event_thread);
243a9253
PA
3795 }
3796}
3797
3b12939d
PA
3798/* Helper for all_uis_check_sync_execution_done that works on the
3799 current UI. */
3800
3801static void
3802check_curr_ui_sync_execution_done (void)
3803{
3804 struct ui *ui = current_ui;
3805
3806 if (ui->prompt_state == PROMPT_NEEDED
3807 && ui->async
3808 && !gdb_in_secondary_prompt_p (ui))
3809 {
223ffa71 3810 target_terminal::ours ();
76727919 3811 gdb::observers::sync_execution_done.notify ();
3eb7562a 3812 ui_register_input_event_handler (ui);
3b12939d
PA
3813 }
3814}
3815
3816/* See infrun.h. */
3817
3818void
3819all_uis_check_sync_execution_done (void)
3820{
0e454242 3821 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
3822 {
3823 check_curr_ui_sync_execution_done ();
3824 }
3825}
3826
a8836c93
PA
3827/* See infrun.h. */
3828
3829void
3830all_uis_on_sync_execution_starting (void)
3831{
0e454242 3832 SWITCH_THRU_ALL_UIS ()
a8836c93
PA
3833 {
3834 if (current_ui->prompt_state == PROMPT_NEEDED)
3835 async_disable_stdin ();
3836 }
3837}
3838
1777feb0 3839/* Asynchronous version of wait_for_inferior. It is called by the
43ff13b4 3840 event loop whenever a change of state is detected on the file
1777feb0
MS
3841 descriptor corresponding to the target. It can be called more than
3842 once to complete a single execution command. In such cases we need
3843 to keep the state in a global variable ECSS. If it is the last time
a474d7c2
PA
3844 that this function is called for a single execution command, then
3845 report to the user that the inferior has stopped, and do the
1777feb0 3846 necessary cleanups. */
43ff13b4
JM
3847
3848void
b1a35af2 3849fetch_inferior_event ()
43ff13b4 3850{
0d1e5fa7 3851 struct execution_control_state ecss;
a474d7c2 3852 struct execution_control_state *ecs = &ecss;
0f641c01 3853 int cmd_done = 0;
43ff13b4 3854
0d1e5fa7
PA
3855 memset (ecs, 0, sizeof (*ecs));
3856
c61db772
PA
3857 /* Events are always processed with the main UI as current UI. This
3858 way, warnings, debug output, etc. are always consistently sent to
3859 the main console. */
4b6749b9 3860 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
c61db772 3861
d3d4baed 3862 /* End up with readline processing input, if necessary. */
d238133d
TT
3863 {
3864 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
3865
3866 /* We're handling a live event, so make sure we're doing live
3867 debugging. If we're looking at traceframes while the target is
3868 running, we're going to need to get back to that mode after
3869 handling the event. */
3870 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
3871 if (non_stop)
3872 {
3873 maybe_restore_traceframe.emplace ();
3874 set_current_traceframe (-1);
3875 }
43ff13b4 3876
873657b9
PA
3877 /* The user/frontend should not notice a thread switch due to
3878 internal events. Make sure we revert to the user selected
3879 thread and frame after handling the event and running any
3880 breakpoint commands. */
3881 scoped_restore_current_thread restore_thread;
d238133d
TT
3882
3883 overlay_cache_invalid = 1;
3884 /* Flush target cache before starting to handle each event. Target
3885 was running and cache could be stale. This is just a heuristic.
3886 Running threads may modify target memory, but we don't get any
3887 event. */
3888 target_dcache_invalidate ();
3889
3890 scoped_restore save_exec_dir
3891 = make_scoped_restore (&execution_direction,
3892 target_execution_direction ());
3893
5b6d1e4f
PA
3894 if (!do_target_wait (minus_one_ptid, ecs, TARGET_WNOHANG))
3895 return;
3896
3897 gdb_assert (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
3898
3899 /* Switch to the target that generated the event, so we can do
3900 target calls. Any inferior bound to the target will do, so we
3901 just switch to the first we find. */
3902 for (inferior *inf : all_inferiors (ecs->target))
3903 {
3904 switch_to_inferior_no_thread (inf);
3905 break;
3906 }
d238133d
TT
3907
3908 if (debug_infrun)
5b6d1e4f 3909 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
d238133d
TT
3910
3911 /* If an error happens while handling the event, propagate GDB's
3912 knowledge of the executing state to the frontend/user running
3913 state. */
3914 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
5b6d1e4f 3915 scoped_finish_thread_state finish_state (ecs->target, finish_ptid);
d238133d 3916
979a0d13 3917 /* Get executed before scoped_restore_current_thread above to apply
d238133d
TT
3918 still for the thread which has thrown the exception. */
3919 auto defer_bpstat_clear
3920 = make_scope_exit (bpstat_clear_actions);
3921 auto defer_delete_threads
3922 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
3923
3924 /* Now figure out what to do with the result of the result. */
3925 handle_inferior_event (ecs);
3926
3927 if (!ecs->wait_some_more)
3928 {
5b6d1e4f 3929 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
d238133d
TT
3930 int should_stop = 1;
3931 struct thread_info *thr = ecs->event_thread;
d6b48e9c 3932
d238133d 3933 delete_just_stopped_threads_infrun_breakpoints ();
f107f563 3934
d238133d
TT
3935 if (thr != NULL)
3936 {
3937 struct thread_fsm *thread_fsm = thr->thread_fsm;
243a9253 3938
d238133d 3939 if (thread_fsm != NULL)
46e3ed7f 3940 should_stop = thread_fsm->should_stop (thr);
d238133d 3941 }
243a9253 3942
d238133d
TT
3943 if (!should_stop)
3944 {
3945 keep_going (ecs);
3946 }
3947 else
3948 {
46e3ed7f 3949 bool should_notify_stop = true;
d238133d 3950 int proceeded = 0;
1840d81a 3951
d238133d 3952 clean_up_just_stopped_threads_fsms (ecs);
243a9253 3953
d238133d 3954 if (thr != NULL && thr->thread_fsm != NULL)
46e3ed7f 3955 should_notify_stop = thr->thread_fsm->should_notify_stop ();
388a7084 3956
d238133d
TT
3957 if (should_notify_stop)
3958 {
3959 /* We may not find an inferior if this was a process exit. */
3960 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3961 proceeded = normal_stop ();
3962 }
243a9253 3963
d238133d
TT
3964 if (!proceeded)
3965 {
b1a35af2 3966 inferior_event_handler (INF_EXEC_COMPLETE);
d238133d
TT
3967 cmd_done = 1;
3968 }
873657b9
PA
3969
3970 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
3971 previously selected thread is gone. We have two
3972 choices - switch to no thread selected, or restore the
3973 previously selected thread (now exited). We chose the
3974 later, just because that's what GDB used to do. After
3975 this, "info threads" says "The current thread <Thread
3976 ID 2> has terminated." instead of "No thread
3977 selected.". */
3978 if (!non_stop
3979 && cmd_done
3980 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
3981 restore_thread.dont_restore ();
d238133d
TT
3982 }
3983 }
4f8d22e3 3984
d238133d
TT
3985 defer_delete_threads.release ();
3986 defer_bpstat_clear.release ();
29f49a6a 3987
d238133d
TT
3988 /* No error, don't finish the thread states yet. */
3989 finish_state.release ();
731f534f 3990
d238133d
TT
3991 /* This scope is used to ensure that readline callbacks are
3992 reinstalled here. */
3993 }
4f8d22e3 3994
3b12939d
PA
3995 /* If a UI was in sync execution mode, and now isn't, restore its
3996 prompt (a synchronous execution command has finished, and we're
3997 ready for input). */
3998 all_uis_check_sync_execution_done ();
0f641c01
PA
3999
4000 if (cmd_done
0f641c01 4001 && exec_done_display_p
00431a78
PA
4002 && (inferior_ptid == null_ptid
4003 || inferior_thread ()->state != THREAD_RUNNING))
0f641c01 4004 printf_unfiltered (_("completed.\n"));
43ff13b4
JM
4005}
4006
29734269
SM
4007/* See infrun.h. */
4008
edb3359d 4009void
29734269
SM
4010set_step_info (thread_info *tp, struct frame_info *frame,
4011 struct symtab_and_line sal)
edb3359d 4012{
29734269
SM
4013 /* This can be removed once this function no longer implicitly relies on the
4014 inferior_ptid value. */
4015 gdb_assert (inferior_ptid == tp->ptid);
edb3359d 4016
16c381f0
JK
4017 tp->control.step_frame_id = get_frame_id (frame);
4018 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
edb3359d
DJ
4019
4020 tp->current_symtab = sal.symtab;
4021 tp->current_line = sal.line;
4022}
4023
0d1e5fa7
PA
4024/* Clear context switchable stepping state. */
4025
4026void
4e1c45ea 4027init_thread_stepping_state (struct thread_info *tss)
0d1e5fa7 4028{
7f5ef605 4029 tss->stepped_breakpoint = 0;
0d1e5fa7 4030 tss->stepping_over_breakpoint = 0;
963f9c80 4031 tss->stepping_over_watchpoint = 0;
0d1e5fa7 4032 tss->step_after_step_resume_breakpoint = 0;
cd0fc7c3
SS
4033}
4034
ab1ddbcf 4035/* See infrun.h. */
c32c64b7 4036
6efcd9a8 4037void
5b6d1e4f
PA
4038set_last_target_status (process_stratum_target *target, ptid_t ptid,
4039 target_waitstatus status)
c32c64b7 4040{
5b6d1e4f 4041 target_last_proc_target = target;
c32c64b7
DE
4042 target_last_wait_ptid = ptid;
4043 target_last_waitstatus = status;
4044}
4045
ab1ddbcf 4046/* See infrun.h. */
e02bc4cc
DS
4047
4048void
5b6d1e4f
PA
4049get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4050 target_waitstatus *status)
e02bc4cc 4051{
5b6d1e4f
PA
4052 if (target != nullptr)
4053 *target = target_last_proc_target;
ab1ddbcf
PA
4054 if (ptid != nullptr)
4055 *ptid = target_last_wait_ptid;
4056 if (status != nullptr)
4057 *status = target_last_waitstatus;
e02bc4cc
DS
4058}
4059
ab1ddbcf
PA
4060/* See infrun.h. */
4061
ac264b3b
MS
4062void
4063nullify_last_target_wait_ptid (void)
4064{
5b6d1e4f 4065 target_last_proc_target = nullptr;
ac264b3b 4066 target_last_wait_ptid = minus_one_ptid;
ab1ddbcf 4067 target_last_waitstatus = {};
ac264b3b
MS
4068}
4069
dcf4fbde 4070/* Switch thread contexts. */
dd80620e
MS
4071
4072static void
00431a78 4073context_switch (execution_control_state *ecs)
dd80620e 4074{
edbcda09 4075 if (ecs->ptid != inferior_ptid
5b6d1e4f
PA
4076 && (inferior_ptid == null_ptid
4077 || ecs->event_thread != inferior_thread ()))
fd48f117 4078 {
edbcda09
SM
4079 infrun_log_debug ("Switching context from %s to %s",
4080 target_pid_to_str (inferior_ptid).c_str (),
4081 target_pid_to_str (ecs->ptid).c_str ());
fd48f117
DJ
4082 }
4083
00431a78 4084 switch_to_thread (ecs->event_thread);
dd80620e
MS
4085}
4086
d8dd4d5f
PA
4087/* If the target can't tell whether we've hit breakpoints
4088 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4089 check whether that could have been caused by a breakpoint. If so,
4090 adjust the PC, per gdbarch_decr_pc_after_break. */
4091
4fa8626c 4092static void
d8dd4d5f
PA
4093adjust_pc_after_break (struct thread_info *thread,
4094 struct target_waitstatus *ws)
4fa8626c 4095{
24a73cce
UW
4096 struct regcache *regcache;
4097 struct gdbarch *gdbarch;
118e6252 4098 CORE_ADDR breakpoint_pc, decr_pc;
4fa8626c 4099
4fa8626c
DJ
4100 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4101 we aren't, just return.
9709f61c
DJ
4102
4103 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
b798847d
UW
4104 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4105 implemented by software breakpoints should be handled through the normal
4106 breakpoint layer.
8fb3e588 4107
4fa8626c
DJ
4108 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4109 different signals (SIGILL or SIGEMT for instance), but it is less
4110 clear where the PC is pointing afterwards. It may not match
b798847d
UW
4111 gdbarch_decr_pc_after_break. I don't know any specific target that
4112 generates these signals at breakpoints (the code has been in GDB since at
4113 least 1992) so I can not guess how to handle them here.
8fb3e588 4114
e6cf7916
UW
4115 In earlier versions of GDB, a target with
4116 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
b798847d
UW
4117 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4118 target with both of these set in GDB history, and it seems unlikely to be
4119 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4fa8626c 4120
d8dd4d5f 4121 if (ws->kind != TARGET_WAITKIND_STOPPED)
4fa8626c
DJ
4122 return;
4123
d8dd4d5f 4124 if (ws->value.sig != GDB_SIGNAL_TRAP)
4fa8626c
DJ
4125 return;
4126
4058b839
PA
4127 /* In reverse execution, when a breakpoint is hit, the instruction
4128 under it has already been de-executed. The reported PC always
4129 points at the breakpoint address, so adjusting it further would
4130 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4131 architecture:
4132
4133 B1 0x08000000 : INSN1
4134 B2 0x08000001 : INSN2
4135 0x08000002 : INSN3
4136 PC -> 0x08000003 : INSN4
4137
4138 Say you're stopped at 0x08000003 as above. Reverse continuing
4139 from that point should hit B2 as below. Reading the PC when the
4140 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4141 been de-executed already.
4142
4143 B1 0x08000000 : INSN1
4144 B2 PC -> 0x08000001 : INSN2
4145 0x08000002 : INSN3
4146 0x08000003 : INSN4
4147
4148 We can't apply the same logic as for forward execution, because
4149 we would wrongly adjust the PC to 0x08000000, since there's a
4150 breakpoint at PC - 1. We'd then report a hit on B1, although
4151 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4152 behaviour. */
4153 if (execution_direction == EXEC_REVERSE)
4154 return;
4155
1cf4d951
PA
4156 /* If the target can tell whether the thread hit a SW breakpoint,
4157 trust it. Targets that can tell also adjust the PC
4158 themselves. */
4159 if (target_supports_stopped_by_sw_breakpoint ())
4160 return;
4161
4162 /* Note that relying on whether a breakpoint is planted in memory to
4163 determine this can fail. E.g,. the breakpoint could have been
4164 removed since. Or the thread could have been told to step an
4165 instruction the size of a breakpoint instruction, and only
4166 _after_ was a breakpoint inserted at its address. */
4167
24a73cce
UW
4168 /* If this target does not decrement the PC after breakpoints, then
4169 we have nothing to do. */
00431a78 4170 regcache = get_thread_regcache (thread);
ac7936df 4171 gdbarch = regcache->arch ();
118e6252 4172
527a273a 4173 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
118e6252 4174 if (decr_pc == 0)
24a73cce
UW
4175 return;
4176
8b86c959 4177 const address_space *aspace = regcache->aspace ();
6c95b8df 4178
8aad930b
AC
4179 /* Find the location where (if we've hit a breakpoint) the
4180 breakpoint would be. */
118e6252 4181 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
8aad930b 4182
1cf4d951
PA
4183 /* If the target can't tell whether a software breakpoint triggered,
4184 fallback to figuring it out based on breakpoints we think were
4185 inserted in the target, and on whether the thread was stepped or
4186 continued. */
4187
1c5cfe86
PA
4188 /* Check whether there actually is a software breakpoint inserted at
4189 that location.
4190
4191 If in non-stop mode, a race condition is possible where we've
4192 removed a breakpoint, but stop events for that breakpoint were
4193 already queued and arrive later. To suppress those spurious
4194 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
1cf4d951
PA
4195 and retire them after a number of stop events are reported. Note
4196 this is an heuristic and can thus get confused. The real fix is
4197 to get the "stopped by SW BP and needs adjustment" info out of
4198 the target/kernel (and thus never reach here; see above). */
6c95b8df 4199 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
fbea99ea
PA
4200 || (target_is_non_stop_p ()
4201 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
8aad930b 4202 {
07036511 4203 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
abbb1732 4204
8213266a 4205 if (record_full_is_used ())
07036511
TT
4206 restore_operation_disable.emplace
4207 (record_full_gdb_operation_disable_set ());
96429cc8 4208
1c0fdd0e
UW
4209 /* When using hardware single-step, a SIGTRAP is reported for both
4210 a completed single-step and a software breakpoint. Need to
4211 differentiate between the two, as the latter needs adjusting
4212 but the former does not.
4213
4214 The SIGTRAP can be due to a completed hardware single-step only if
4215 - we didn't insert software single-step breakpoints
1c0fdd0e
UW
4216 - this thread is currently being stepped
4217
4218 If any of these events did not occur, we must have stopped due
4219 to hitting a software breakpoint, and have to back up to the
4220 breakpoint address.
4221
4222 As a special case, we could have hardware single-stepped a
4223 software breakpoint. In this case (prev_pc == breakpoint_pc),
4224 we also need to back up to the breakpoint address. */
4225
d8dd4d5f
PA
4226 if (thread_has_single_step_breakpoints_set (thread)
4227 || !currently_stepping (thread)
4228 || (thread->stepped_breakpoint
4229 && thread->prev_pc == breakpoint_pc))
515630c5 4230 regcache_write_pc (regcache, breakpoint_pc);
8aad930b 4231 }
4fa8626c
DJ
4232}
4233
edb3359d
DJ
4234static int
4235stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4236{
4237 for (frame = get_prev_frame (frame);
4238 frame != NULL;
4239 frame = get_prev_frame (frame))
4240 {
4241 if (frame_id_eq (get_frame_id (frame), step_frame_id))
4242 return 1;
4243 if (get_frame_type (frame) != INLINE_FRAME)
4244 break;
4245 }
4246
4247 return 0;
4248}
4249
4a4c04f1
BE
4250/* Look for an inline frame that is marked for skip.
4251 If PREV_FRAME is TRUE start at the previous frame,
4252 otherwise start at the current frame. Stop at the
4253 first non-inline frame, or at the frame where the
4254 step started. */
4255
4256static bool
4257inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
4258{
4259 struct frame_info *frame = get_current_frame ();
4260
4261 if (prev_frame)
4262 frame = get_prev_frame (frame);
4263
4264 for (; frame != NULL; frame = get_prev_frame (frame))
4265 {
4266 const char *fn = NULL;
4267 symtab_and_line sal;
4268 struct symbol *sym;
4269
4270 if (frame_id_eq (get_frame_id (frame), tp->control.step_frame_id))
4271 break;
4272 if (get_frame_type (frame) != INLINE_FRAME)
4273 break;
4274
4275 sal = find_frame_sal (frame);
4276 sym = get_frame_function (frame);
4277
4278 if (sym != NULL)
4279 fn = sym->print_name ();
4280
4281 if (sal.line != 0
4282 && function_name_is_marked_for_skip (fn, sal))
4283 return true;
4284 }
4285
4286 return false;
4287}
4288
c65d6b55
PA
4289/* If the event thread has the stop requested flag set, pretend it
4290 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4291 target_stop). */
4292
4293static bool
4294handle_stop_requested (struct execution_control_state *ecs)
4295{
4296 if (ecs->event_thread->stop_requested)
4297 {
4298 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
4299 ecs->ws.value.sig = GDB_SIGNAL_0;
4300 handle_signal_stop (ecs);
4301 return true;
4302 }
4303 return false;
4304}
4305
a96d9b2e
SDJ
4306/* Auxiliary function that handles syscall entry/return events.
4307 It returns 1 if the inferior should keep going (and GDB
4308 should ignore the event), or 0 if the event deserves to be
4309 processed. */
ca2163eb 4310
a96d9b2e 4311static int
ca2163eb 4312handle_syscall_event (struct execution_control_state *ecs)
a96d9b2e 4313{
ca2163eb 4314 struct regcache *regcache;
ca2163eb
PA
4315 int syscall_number;
4316
00431a78 4317 context_switch (ecs);
ca2163eb 4318
00431a78 4319 regcache = get_thread_regcache (ecs->event_thread);
f90263c1 4320 syscall_number = ecs->ws.value.syscall_number;
f2ffa92b 4321 ecs->event_thread->suspend.stop_pc = regcache_read_pc (regcache);
ca2163eb 4322
a96d9b2e
SDJ
4323 if (catch_syscall_enabled () > 0
4324 && catching_syscall_number (syscall_number) > 0)
4325 {
edbcda09 4326 infrun_log_debug ("syscall number=%d", syscall_number);
a96d9b2e 4327
16c381f0 4328 ecs->event_thread->control.stop_bpstat
a01bda52 4329 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
4330 ecs->event_thread->suspend.stop_pc,
4331 ecs->event_thread, &ecs->ws);
ab04a2af 4332
c65d6b55
PA
4333 if (handle_stop_requested (ecs))
4334 return 0;
4335
ce12b012 4336 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
ca2163eb
PA
4337 {
4338 /* Catchpoint hit. */
ca2163eb
PA
4339 return 0;
4340 }
a96d9b2e 4341 }
ca2163eb 4342
c65d6b55
PA
4343 if (handle_stop_requested (ecs))
4344 return 0;
4345
ca2163eb 4346 /* If no catchpoint triggered for this, then keep going. */
ca2163eb
PA
4347 keep_going (ecs);
4348 return 1;
a96d9b2e
SDJ
4349}
4350
7e324e48
GB
4351/* Lazily fill in the execution_control_state's stop_func_* fields. */
4352
4353static void
4354fill_in_stop_func (struct gdbarch *gdbarch,
4355 struct execution_control_state *ecs)
4356{
4357 if (!ecs->stop_func_filled_in)
4358 {
98a617f8
KB
4359 const block *block;
4360
7e324e48
GB
4361 /* Don't care about return value; stop_func_start and stop_func_name
4362 will both be 0 if it doesn't work. */
98a617f8
KB
4363 find_pc_partial_function (ecs->event_thread->suspend.stop_pc,
4364 &ecs->stop_func_name,
4365 &ecs->stop_func_start,
4366 &ecs->stop_func_end,
4367 &block);
4368
4369 /* The call to find_pc_partial_function, above, will set
4370 stop_func_start and stop_func_end to the start and end
4371 of the range containing the stop pc. If this range
4372 contains the entry pc for the block (which is always the
4373 case for contiguous blocks), advance stop_func_start past
4374 the function's start offset and entrypoint. Note that
4375 stop_func_start is NOT advanced when in a range of a
4376 non-contiguous block that does not contain the entry pc. */
4377 if (block != nullptr
4378 && ecs->stop_func_start <= BLOCK_ENTRY_PC (block)
4379 && BLOCK_ENTRY_PC (block) < ecs->stop_func_end)
4380 {
4381 ecs->stop_func_start
4382 += gdbarch_deprecated_function_start_offset (gdbarch);
4383
4384 if (gdbarch_skip_entrypoint_p (gdbarch))
4385 ecs->stop_func_start
4386 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
4387 }
591a12a1 4388
7e324e48
GB
4389 ecs->stop_func_filled_in = 1;
4390 }
4391}
4392
4f5d7f63 4393
00431a78 4394/* Return the STOP_SOON field of the inferior pointed at by ECS. */
4f5d7f63
PA
4395
4396static enum stop_kind
00431a78 4397get_inferior_stop_soon (execution_control_state *ecs)
4f5d7f63 4398{
5b6d1e4f 4399 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4f5d7f63
PA
4400
4401 gdb_assert (inf != NULL);
4402 return inf->control.stop_soon;
4403}
4404
5b6d1e4f
PA
4405/* Poll for one event out of the current target. Store the resulting
4406 waitstatus in WS, and return the event ptid. Does not block. */
372316f1
PA
4407
4408static ptid_t
5b6d1e4f 4409poll_one_curr_target (struct target_waitstatus *ws)
372316f1
PA
4410{
4411 ptid_t event_ptid;
372316f1
PA
4412
4413 overlay_cache_invalid = 1;
4414
4415 /* Flush target cache before starting to handle each event.
4416 Target was running and cache could be stale. This is just a
4417 heuristic. Running threads may modify target memory, but we
4418 don't get any event. */
4419 target_dcache_invalidate ();
4420
4421 if (deprecated_target_wait_hook)
5b6d1e4f 4422 event_ptid = deprecated_target_wait_hook (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1 4423 else
5b6d1e4f 4424 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1
PA
4425
4426 if (debug_infrun)
5b6d1e4f 4427 print_target_wait_results (minus_one_ptid, event_ptid, ws);
372316f1
PA
4428
4429 return event_ptid;
4430}
4431
5b6d1e4f
PA
4432/* An event reported by wait_one. */
4433
4434struct wait_one_event
4435{
4436 /* The target the event came out of. */
4437 process_stratum_target *target;
4438
4439 /* The PTID the event was for. */
4440 ptid_t ptid;
4441
4442 /* The waitstatus. */
4443 target_waitstatus ws;
4444};
4445
4446/* Wait for one event out of any target. */
4447
4448static wait_one_event
4449wait_one ()
4450{
4451 while (1)
4452 {
4453 for (inferior *inf : all_inferiors ())
4454 {
4455 process_stratum_target *target = inf->process_target ();
4456 if (target == NULL
4457 || !target->is_async_p ()
4458 || !target->threads_executing)
4459 continue;
4460
4461 switch_to_inferior_no_thread (inf);
4462
4463 wait_one_event event;
4464 event.target = target;
4465 event.ptid = poll_one_curr_target (&event.ws);
4466
4467 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
4468 {
4469 /* If nothing is resumed, remove the target from the
4470 event loop. */
4471 target_async (0);
4472 }
4473 else if (event.ws.kind != TARGET_WAITKIND_IGNORE)
4474 return event;
4475 }
4476
4477 /* Block waiting for some event. */
4478
4479 fd_set readfds;
4480 int nfds = 0;
4481
4482 FD_ZERO (&readfds);
4483
4484 for (inferior *inf : all_inferiors ())
4485 {
4486 process_stratum_target *target = inf->process_target ();
4487 if (target == NULL
4488 || !target->is_async_p ()
4489 || !target->threads_executing)
4490 continue;
4491
4492 int fd = target->async_wait_fd ();
4493 FD_SET (fd, &readfds);
4494 if (nfds <= fd)
4495 nfds = fd + 1;
4496 }
4497
4498 if (nfds == 0)
4499 {
4500 /* No waitable targets left. All must be stopped. */
4501 return {NULL, minus_one_ptid, {TARGET_WAITKIND_NO_RESUMED}};
4502 }
4503
4504 QUIT;
4505
4506 int numfds = interruptible_select (nfds, &readfds, 0, NULL, 0);
4507 if (numfds < 0)
4508 {
4509 if (errno == EINTR)
4510 continue;
4511 else
4512 perror_with_name ("interruptible_select");
4513 }
4514 }
4515}
4516
372316f1
PA
4517/* Save the thread's event and stop reason to process it later. */
4518
4519static void
5b6d1e4f 4520save_waitstatus (struct thread_info *tp, const target_waitstatus *ws)
372316f1 4521{
edbcda09
SM
4522 infrun_log_debug ("saving status %s for %d.%ld.%ld",
4523 target_waitstatus_to_string (ws).c_str (),
4524 tp->ptid.pid (),
4525 tp->ptid.lwp (),
4526 tp->ptid.tid ());
372316f1
PA
4527
4528 /* Record for later. */
4529 tp->suspend.waitstatus = *ws;
4530 tp->suspend.waitstatus_pending_p = 1;
4531
00431a78 4532 struct regcache *regcache = get_thread_regcache (tp);
8b86c959 4533 const address_space *aspace = regcache->aspace ();
372316f1
PA
4534
4535 if (ws->kind == TARGET_WAITKIND_STOPPED
4536 && ws->value.sig == GDB_SIGNAL_TRAP)
4537 {
4538 CORE_ADDR pc = regcache_read_pc (regcache);
4539
4540 adjust_pc_after_break (tp, &tp->suspend.waitstatus);
4541
18493a00
PA
4542 scoped_restore_current_thread restore_thread;
4543 switch_to_thread (tp);
4544
4545 if (target_stopped_by_watchpoint ())
372316f1
PA
4546 {
4547 tp->suspend.stop_reason
4548 = TARGET_STOPPED_BY_WATCHPOINT;
4549 }
4550 else if (target_supports_stopped_by_sw_breakpoint ()
18493a00 4551 && target_stopped_by_sw_breakpoint ())
372316f1
PA
4552 {
4553 tp->suspend.stop_reason
4554 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4555 }
4556 else if (target_supports_stopped_by_hw_breakpoint ()
18493a00 4557 && target_stopped_by_hw_breakpoint ())
372316f1
PA
4558 {
4559 tp->suspend.stop_reason
4560 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4561 }
4562 else if (!target_supports_stopped_by_hw_breakpoint ()
4563 && hardware_breakpoint_inserted_here_p (aspace,
4564 pc))
4565 {
4566 tp->suspend.stop_reason
4567 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4568 }
4569 else if (!target_supports_stopped_by_sw_breakpoint ()
4570 && software_breakpoint_inserted_here_p (aspace,
4571 pc))
4572 {
4573 tp->suspend.stop_reason
4574 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4575 }
4576 else if (!thread_has_single_step_breakpoints_set (tp)
4577 && currently_stepping (tp))
4578 {
4579 tp->suspend.stop_reason
4580 = TARGET_STOPPED_BY_SINGLE_STEP;
4581 }
4582 }
4583}
4584
293b3ebc
TBA
4585/* Mark the non-executing threads accordingly. In all-stop, all
4586 threads of all processes are stopped when we get any event
4587 reported. In non-stop mode, only the event thread stops. */
4588
4589static void
4590mark_non_executing_threads (process_stratum_target *target,
4591 ptid_t event_ptid,
4592 struct target_waitstatus ws)
4593{
4594 ptid_t mark_ptid;
4595
4596 if (!target_is_non_stop_p ())
4597 mark_ptid = minus_one_ptid;
4598 else if (ws.kind == TARGET_WAITKIND_SIGNALLED
4599 || ws.kind == TARGET_WAITKIND_EXITED)
4600 {
4601 /* If we're handling a process exit in non-stop mode, even
4602 though threads haven't been deleted yet, one would think
4603 that there is nothing to do, as threads of the dead process
4604 will be soon deleted, and threads of any other process were
4605 left running. However, on some targets, threads survive a
4606 process exit event. E.g., for the "checkpoint" command,
4607 when the current checkpoint/fork exits, linux-fork.c
4608 automatically switches to another fork from within
4609 target_mourn_inferior, by associating the same
4610 inferior/thread to another fork. We haven't mourned yet at
4611 this point, but we must mark any threads left in the
4612 process as not-executing so that finish_thread_state marks
4613 them stopped (in the user's perspective) if/when we present
4614 the stop to the user. */
4615 mark_ptid = ptid_t (event_ptid.pid ());
4616 }
4617 else
4618 mark_ptid = event_ptid;
4619
4620 set_executing (target, mark_ptid, false);
4621
4622 /* Likewise the resumed flag. */
4623 set_resumed (target, mark_ptid, false);
4624}
4625
6efcd9a8 4626/* See infrun.h. */
372316f1 4627
6efcd9a8 4628void
372316f1
PA
4629stop_all_threads (void)
4630{
4631 /* We may need multiple passes to discover all threads. */
4632 int pass;
4633 int iterations = 0;
372316f1 4634
53cccef1 4635 gdb_assert (exists_non_stop_target ());
372316f1 4636
edbcda09 4637 infrun_log_debug ("stop_all_threads");
372316f1 4638
00431a78 4639 scoped_restore_current_thread restore_thread;
372316f1 4640
6ad82919
TBA
4641 /* Enable thread events of all targets. */
4642 for (auto *target : all_non_exited_process_targets ())
4643 {
4644 switch_to_target_no_thread (target);
4645 target_thread_events (true);
4646 }
4647
4648 SCOPE_EXIT
4649 {
4650 /* Disable thread events of all targets. */
4651 for (auto *target : all_non_exited_process_targets ())
4652 {
4653 switch_to_target_no_thread (target);
4654 target_thread_events (false);
4655 }
4656
edbcda09
SM
4657
4658 infrun_log_debug ("stop_all_threads done");
6ad82919 4659 };
65706a29 4660
372316f1
PA
4661 /* Request threads to stop, and then wait for the stops. Because
4662 threads we already know about can spawn more threads while we're
4663 trying to stop them, and we only learn about new threads when we
4664 update the thread list, do this in a loop, and keep iterating
4665 until two passes find no threads that need to be stopped. */
4666 for (pass = 0; pass < 2; pass++, iterations++)
4667 {
edbcda09
SM
4668 infrun_log_debug ("stop_all_threads, pass=%d, iterations=%d",
4669 pass, iterations);
372316f1
PA
4670 while (1)
4671 {
29d6859f 4672 int waits_needed = 0;
372316f1 4673
a05575d3
TBA
4674 for (auto *target : all_non_exited_process_targets ())
4675 {
4676 switch_to_target_no_thread (target);
4677 update_thread_list ();
4678 }
372316f1
PA
4679
4680 /* Go through all threads looking for threads that we need
4681 to tell the target to stop. */
08036331 4682 for (thread_info *t : all_non_exited_threads ())
372316f1 4683 {
53cccef1
TBA
4684 /* For a single-target setting with an all-stop target,
4685 we would not even arrive here. For a multi-target
4686 setting, until GDB is able to handle a mixture of
4687 all-stop and non-stop targets, simply skip all-stop
4688 targets' threads. This should be fine due to the
4689 protection of 'check_multi_target_resumption'. */
4690
4691 switch_to_thread_no_regs (t);
4692 if (!target_is_non_stop_p ())
4693 continue;
4694
372316f1
PA
4695 if (t->executing)
4696 {
4697 /* If already stopping, don't request a stop again.
4698 We just haven't seen the notification yet. */
4699 if (!t->stop_requested)
4700 {
edbcda09
SM
4701 infrun_log_debug (" %s executing, need stop",
4702 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4703 target_stop (t->ptid);
4704 t->stop_requested = 1;
4705 }
4706 else
4707 {
edbcda09
SM
4708 infrun_log_debug (" %s executing, already stopping",
4709 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4710 }
4711
4712 if (t->stop_requested)
29d6859f 4713 waits_needed++;
372316f1
PA
4714 }
4715 else
4716 {
edbcda09
SM
4717 infrun_log_debug (" %s not executing",
4718 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4719
4720 /* The thread may be not executing, but still be
4721 resumed with a pending status to process. */
719546c4 4722 t->resumed = false;
372316f1
PA
4723 }
4724 }
4725
29d6859f 4726 if (waits_needed == 0)
372316f1
PA
4727 break;
4728
4729 /* If we find new threads on the second iteration, restart
4730 over. We want to see two iterations in a row with all
4731 threads stopped. */
4732 if (pass > 0)
4733 pass = -1;
4734
29d6859f 4735 for (int i = 0; i < waits_needed; i++)
c29705b7 4736 {
29d6859f 4737 wait_one_event event = wait_one ();
a05575d3 4738
edbcda09
SM
4739 infrun_log_debug ("%s %s\n",
4740 target_waitstatus_to_string (&event.ws).c_str (),
4741 target_pid_to_str (event.ptid).c_str ());
a05575d3 4742
29d6859f 4743 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
a05575d3 4744 {
29d6859f
LM
4745 /* All resumed threads exited. */
4746 break;
a05575d3 4747 }
29d6859f
LM
4748 else if (event.ws.kind == TARGET_WAITKIND_THREAD_EXITED
4749 || event.ws.kind == TARGET_WAITKIND_EXITED
4750 || event.ws.kind == TARGET_WAITKIND_SIGNALLED)
6efcd9a8 4751 {
29d6859f 4752 /* One thread/process exited/signalled. */
6efcd9a8 4753
29d6859f 4754 thread_info *t = nullptr;
372316f1 4755
29d6859f
LM
4756 /* The target may have reported just a pid. If so, try
4757 the first non-exited thread. */
4758 if (event.ptid.is_pid ())
372316f1 4759 {
29d6859f
LM
4760 int pid = event.ptid.pid ();
4761 inferior *inf = find_inferior_pid (event.target, pid);
4762 for (thread_info *tp : inf->non_exited_threads ())
372316f1 4763 {
29d6859f
LM
4764 t = tp;
4765 break;
372316f1 4766 }
29d6859f
LM
4767
4768 /* If there is no available thread, the event would
4769 have to be appended to a per-inferior event list,
4770 which does not exist (and if it did, we'd have
4771 to adjust run control command to be able to
4772 resume such an inferior). We assert here instead
4773 of going into an infinite loop. */
4774 gdb_assert (t != nullptr);
4775
edbcda09
SM
4776 infrun_log_debug ("using %s\n",
4777 target_pid_to_str (t->ptid).c_str ());
29d6859f
LM
4778 }
4779 else
4780 {
4781 t = find_thread_ptid (event.target, event.ptid);
4782 /* Check if this is the first time we see this thread.
4783 Don't bother adding if it individually exited. */
4784 if (t == nullptr
4785 && event.ws.kind != TARGET_WAITKIND_THREAD_EXITED)
4786 t = add_thread (event.target, event.ptid);
4787 }
4788
4789 if (t != nullptr)
4790 {
4791 /* Set the threads as non-executing to avoid
4792 another stop attempt on them. */
4793 switch_to_thread_no_regs (t);
4794 mark_non_executing_threads (event.target, event.ptid,
4795 event.ws);
4796 save_waitstatus (t, &event.ws);
4797 t->stop_requested = false;
372316f1
PA
4798 }
4799 }
4800 else
4801 {
29d6859f
LM
4802 thread_info *t = find_thread_ptid (event.target, event.ptid);
4803 if (t == NULL)
4804 t = add_thread (event.target, event.ptid);
372316f1 4805
29d6859f
LM
4806 t->stop_requested = 0;
4807 t->executing = 0;
4808 t->resumed = false;
4809 t->control.may_range_step = 0;
4810
4811 /* This may be the first time we see the inferior report
4812 a stop. */
4813 inferior *inf = find_inferior_ptid (event.target, event.ptid);
4814 if (inf->needs_setup)
372316f1 4815 {
29d6859f
LM
4816 switch_to_thread_no_regs (t);
4817 setup_inferior (0);
372316f1
PA
4818 }
4819
29d6859f
LM
4820 if (event.ws.kind == TARGET_WAITKIND_STOPPED
4821 && event.ws.value.sig == GDB_SIGNAL_0)
372316f1 4822 {
29d6859f
LM
4823 /* We caught the event that we intended to catch, so
4824 there's no event pending. */
4825 t->suspend.waitstatus.kind = TARGET_WAITKIND_IGNORE;
4826 t->suspend.waitstatus_pending_p = 0;
4827
9844051a 4828 if (displaced_step_finish (t, GDB_SIGNAL_0) < 0)
29d6859f
LM
4829 {
4830 /* Add it back to the step-over queue. */
edbcda09
SM
4831 infrun_log_debug ("displaced-step of %s "
4832 "canceled: adding back to the "
4833 "step-over queue\n",
4834 target_pid_to_str (t->ptid).c_str ());
4835
29d6859f 4836 t->control.trap_expected = 0;
7bd43605 4837 global_thread_step_over_chain_enqueue (t);
29d6859f 4838 }
372316f1 4839 }
29d6859f
LM
4840 else
4841 {
4842 enum gdb_signal sig;
4843 struct regcache *regcache;
372316f1 4844
29d6859f
LM
4845 if (debug_infrun)
4846 {
4847 std::string statstr = target_waitstatus_to_string (&event.ws);
372316f1 4848
edbcda09
SM
4849 infrun_log_debug ("target_wait %s, saving "
4850 "status for %d.%ld.%ld\n",
4851 statstr.c_str (),
4852 t->ptid.pid (),
4853 t->ptid.lwp (),
4854 t->ptid.tid ());
29d6859f
LM
4855 }
4856
4857 /* Record for later. */
4858 save_waitstatus (t, &event.ws);
4859
4860 sig = (event.ws.kind == TARGET_WAITKIND_STOPPED
4861 ? event.ws.value.sig : GDB_SIGNAL_0);
4862
9844051a 4863 if (displaced_step_finish (t, sig) < 0)
29d6859f
LM
4864 {
4865 /* Add it back to the step-over queue. */
4866 t->control.trap_expected = 0;
7bd43605 4867 global_thread_step_over_chain_enqueue (t);
29d6859f
LM
4868 }
4869
4870 regcache = get_thread_regcache (t);
4871 t->suspend.stop_pc = regcache_read_pc (regcache);
4872
edbcda09
SM
4873 infrun_log_debug ("saved stop_pc=%s for %s "
4874 "(currently_stepping=%d)\n",
4875 paddress (target_gdbarch (),
4876 t->suspend.stop_pc),
4877 target_pid_to_str (t->ptid).c_str (),
4878 currently_stepping (t));
372316f1
PA
4879 }
4880 }
4881 }
4882 }
4883 }
372316f1
PA
4884}
4885
f4836ba9
PA
4886/* Handle a TARGET_WAITKIND_NO_RESUMED event. */
4887
4888static int
4889handle_no_resumed (struct execution_control_state *ecs)
4890{
3b12939d 4891 if (target_can_async_p ())
f4836ba9 4892 {
3b12939d 4893 int any_sync = 0;
f4836ba9 4894
2dab0c7b 4895 for (ui *ui : all_uis ())
3b12939d
PA
4896 {
4897 if (ui->prompt_state == PROMPT_BLOCKED)
4898 {
4899 any_sync = 1;
4900 break;
4901 }
4902 }
4903 if (!any_sync)
4904 {
4905 /* There were no unwaited-for children left in the target, but,
4906 we're not synchronously waiting for events either. Just
4907 ignore. */
4908
edbcda09 4909 infrun_log_debug ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
3b12939d
PA
4910 prepare_to_wait (ecs);
4911 return 1;
4912 }
f4836ba9
PA
4913 }
4914
4915 /* Otherwise, if we were running a synchronous execution command, we
4916 may need to cancel it and give the user back the terminal.
4917
4918 In non-stop mode, the target can't tell whether we've already
4919 consumed previous stop events, so it can end up sending us a
4920 no-resumed event like so:
4921
4922 #0 - thread 1 is left stopped
4923
4924 #1 - thread 2 is resumed and hits breakpoint
4925 -> TARGET_WAITKIND_STOPPED
4926
4927 #2 - thread 3 is resumed and exits
4928 this is the last resumed thread, so
4929 -> TARGET_WAITKIND_NO_RESUMED
4930
4931 #3 - gdb processes stop for thread 2 and decides to re-resume
4932 it.
4933
4934 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
4935 thread 2 is now resumed, so the event should be ignored.
4936
4937 IOW, if the stop for thread 2 doesn't end a foreground command,
4938 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
4939 event. But it could be that the event meant that thread 2 itself
4940 (or whatever other thread was the last resumed thread) exited.
4941
4942 To address this we refresh the thread list and check whether we
4943 have resumed threads _now_. In the example above, this removes
4944 thread 3 from the thread list. If thread 2 was re-resumed, we
4945 ignore this event. If we find no thread resumed, then we cancel
2ec0f7ff
PA
4946 the synchronous command and show "no unwaited-for " to the
4947 user. */
f4836ba9 4948
aecd6cb8 4949 inferior *curr_inf = current_inferior ();
2ec0f7ff 4950
aecd6cb8
PA
4951 scoped_restore_current_thread restore_thread;
4952
4953 for (auto *target : all_non_exited_process_targets ())
4954 {
4955 switch_to_target_no_thread (target);
4956 update_thread_list ();
4957 }
4958
4959 /* If:
4960
4961 - the current target has no thread executing, and
4962 - the current inferior is native, and
4963 - the current inferior is the one which has the terminal, and
4964 - we did nothing,
4965
4966 then a Ctrl-C from this point on would remain stuck in the
4967 kernel, until a thread resumes and dequeues it. That would
4968 result in the GDB CLI not reacting to Ctrl-C, not able to
4969 interrupt the program. To address this, if the current inferior
4970 no longer has any thread executing, we give the terminal to some
4971 other inferior that has at least one thread executing. */
4972 bool swap_terminal = true;
4973
4974 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
4975 whether to report it to the user. */
4976 bool ignore_event = false;
2ec0f7ff
PA
4977
4978 for (thread_info *thread : all_non_exited_threads ())
f4836ba9 4979 {
aecd6cb8
PA
4980 if (swap_terminal && thread->executing)
4981 {
4982 if (thread->inf != curr_inf)
4983 {
4984 target_terminal::ours ();
4985
4986 switch_to_thread (thread);
4987 target_terminal::inferior ();
4988 }
4989 swap_terminal = false;
4990 }
4991
4992 if (!ignore_event
4993 && (thread->executing
4994 || thread->suspend.waitstatus_pending_p))
f4836ba9 4995 {
2ec0f7ff
PA
4996 /* Either there were no unwaited-for children left in the
4997 target at some point, but there are now, or some target
4998 other than the eventing one has unwaited-for children
4999 left. Just ignore. */
edbcda09
SM
5000 infrun_log_debug ("TARGET_WAITKIND_NO_RESUMED "
5001 "(ignoring: found resumed)\n");
aecd6cb8
PA
5002
5003 ignore_event = true;
f4836ba9 5004 }
aecd6cb8
PA
5005
5006 if (ignore_event && !swap_terminal)
5007 break;
5008 }
5009
5010 if (ignore_event)
5011 {
5012 switch_to_inferior_no_thread (curr_inf);
5013 prepare_to_wait (ecs);
5014 return 1;
f4836ba9
PA
5015 }
5016
5017 /* Go ahead and report the event. */
5018 return 0;
5019}
5020
05ba8510
PA
5021/* Given an execution control state that has been freshly filled in by
5022 an event from the inferior, figure out what it means and take
5023 appropriate action.
5024
5025 The alternatives are:
5026
22bcd14b 5027 1) stop_waiting and return; to really stop and return to the
05ba8510
PA
5028 debugger.
5029
5030 2) keep_going and return; to wait for the next event (set
5031 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5032 once). */
c906108c 5033
ec9499be 5034static void
595915c1 5035handle_inferior_event (struct execution_control_state *ecs)
cd0fc7c3 5036{
595915c1
TT
5037 /* Make sure that all temporary struct value objects that were
5038 created during the handling of the event get deleted at the
5039 end. */
5040 scoped_value_mark free_values;
5041
d6b48e9c
PA
5042 enum stop_kind stop_soon;
5043
edbcda09 5044 infrun_log_debug ("%s", target_waitstatus_to_string (&ecs->ws).c_str ());
c29705b7 5045
28736962
PA
5046 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
5047 {
5048 /* We had an event in the inferior, but we are not interested in
5049 handling it at this level. The lower layers have already
5050 done what needs to be done, if anything.
5051
5052 One of the possible circumstances for this is when the
5053 inferior produces output for the console. The inferior has
5054 not stopped, and we are ignoring the event. Another possible
5055 circumstance is any event which the lower level knows will be
5056 reported multiple times without an intervening resume. */
28736962
PA
5057 prepare_to_wait (ecs);
5058 return;
5059 }
5060
65706a29
PA
5061 if (ecs->ws.kind == TARGET_WAITKIND_THREAD_EXITED)
5062 {
65706a29
PA
5063 prepare_to_wait (ecs);
5064 return;
5065 }
5066
0e5bf2a8 5067 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
f4836ba9
PA
5068 && handle_no_resumed (ecs))
5069 return;
0e5bf2a8 5070
5b6d1e4f
PA
5071 /* Cache the last target/ptid/waitstatus. */
5072 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
e02bc4cc 5073
ca005067 5074 /* Always clear state belonging to the previous time we stopped. */
aa7d318d 5075 stop_stack_dummy = STOP_NONE;
ca005067 5076
0e5bf2a8
PA
5077 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
5078 {
5079 /* No unwaited-for children left. IOW, all resumed children
5080 have exited. */
0e5bf2a8 5081 stop_print_frame = 0;
22bcd14b 5082 stop_waiting (ecs);
0e5bf2a8
PA
5083 return;
5084 }
5085
8c90c137 5086 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
64776a0b 5087 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
359f5fe6 5088 {
5b6d1e4f 5089 ecs->event_thread = find_thread_ptid (ecs->target, ecs->ptid);
359f5fe6
PA
5090 /* If it's a new thread, add it to the thread database. */
5091 if (ecs->event_thread == NULL)
5b6d1e4f 5092 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
c1e36e3e
PA
5093
5094 /* Disable range stepping. If the next step request could use a
5095 range, this will be end up re-enabled then. */
5096 ecs->event_thread->control.may_range_step = 0;
359f5fe6 5097 }
88ed393a
JK
5098
5099 /* Dependent on valid ECS->EVENT_THREAD. */
d8dd4d5f 5100 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
88ed393a
JK
5101
5102 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5103 reinit_frame_cache ();
5104
28736962
PA
5105 breakpoint_retire_moribund ();
5106
2b009048
DJ
5107 /* First, distinguish signals caused by the debugger from signals
5108 that have to do with the program's own actions. Note that
5109 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5110 on the operating system version. Here we detect when a SIGILL or
5111 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5112 something similar for SIGSEGV, since a SIGSEGV will be generated
5113 when we're trying to execute a breakpoint instruction on a
5114 non-executable stack. This happens for call dummy breakpoints
5115 for architectures like SPARC that place call dummies on the
5116 stack. */
2b009048 5117 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
a493e3e2
PA
5118 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
5119 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
5120 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
2b009048 5121 {
00431a78 5122 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
de0a0249 5123
a01bda52 5124 if (breakpoint_inserted_here_p (regcache->aspace (),
de0a0249
UW
5125 regcache_read_pc (regcache)))
5126 {
edbcda09 5127 infrun_log_debug ("Treating signal as SIGTRAP");
a493e3e2 5128 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
de0a0249 5129 }
2b009048
DJ
5130 }
5131
293b3ebc 5132 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
8c90c137 5133
488f131b
JB
5134 switch (ecs->ws.kind)
5135 {
5136 case TARGET_WAITKIND_LOADED:
00431a78 5137 context_switch (ecs);
b0f4b84b
DJ
5138 /* Ignore gracefully during startup of the inferior, as it might
5139 be the shell which has just loaded some objects, otherwise
5140 add the symbols for the newly loaded objects. Also ignore at
5141 the beginning of an attach or remote session; we will query
5142 the full list of libraries once the connection is
5143 established. */
4f5d7f63 5144
00431a78 5145 stop_soon = get_inferior_stop_soon (ecs);
c0236d92 5146 if (stop_soon == NO_STOP_QUIETLY)
488f131b 5147 {
edcc5120
TT
5148 struct regcache *regcache;
5149
00431a78 5150 regcache = get_thread_regcache (ecs->event_thread);
edcc5120
TT
5151
5152 handle_solib_event ();
5153
5154 ecs->event_thread->control.stop_bpstat
a01bda52 5155 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
5156 ecs->event_thread->suspend.stop_pc,
5157 ecs->event_thread, &ecs->ws);
ab04a2af 5158
c65d6b55
PA
5159 if (handle_stop_requested (ecs))
5160 return;
5161
ce12b012 5162 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
edcc5120
TT
5163 {
5164 /* A catchpoint triggered. */
94c57d6a
PA
5165 process_event_stop_test (ecs);
5166 return;
edcc5120 5167 }
488f131b 5168
b0f4b84b
DJ
5169 /* If requested, stop when the dynamic linker notifies
5170 gdb of events. This allows the user to get control
5171 and place breakpoints in initializer routines for
5172 dynamically loaded objects (among other things). */
a493e3e2 5173 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
b0f4b84b
DJ
5174 if (stop_on_solib_events)
5175 {
55409f9d
DJ
5176 /* Make sure we print "Stopped due to solib-event" in
5177 normal_stop. */
5178 stop_print_frame = 1;
5179
22bcd14b 5180 stop_waiting (ecs);
b0f4b84b
DJ
5181 return;
5182 }
488f131b 5183 }
b0f4b84b
DJ
5184
5185 /* If we are skipping through a shell, or through shared library
5186 loading that we aren't interested in, resume the program. If
5c09a2c5 5187 we're running the program normally, also resume. */
b0f4b84b
DJ
5188 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
5189 {
74960c60
VP
5190 /* Loading of shared libraries might have changed breakpoint
5191 addresses. Make sure new breakpoints are inserted. */
a25a5a45 5192 if (stop_soon == NO_STOP_QUIETLY)
74960c60 5193 insert_breakpoints ();
64ce06e4 5194 resume (GDB_SIGNAL_0);
b0f4b84b
DJ
5195 prepare_to_wait (ecs);
5196 return;
5197 }
5198
5c09a2c5
PA
5199 /* But stop if we're attaching or setting up a remote
5200 connection. */
5201 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5202 || stop_soon == STOP_QUIETLY_REMOTE)
5203 {
edbcda09 5204 infrun_log_debug ("quietly stopped");
22bcd14b 5205 stop_waiting (ecs);
5c09a2c5
PA
5206 return;
5207 }
5208
5209 internal_error (__FILE__, __LINE__,
5210 _("unhandled stop_soon: %d"), (int) stop_soon);
c5aa993b 5211
488f131b 5212 case TARGET_WAITKIND_SPURIOUS:
c65d6b55
PA
5213 if (handle_stop_requested (ecs))
5214 return;
00431a78 5215 context_switch (ecs);
64ce06e4 5216 resume (GDB_SIGNAL_0);
488f131b
JB
5217 prepare_to_wait (ecs);
5218 return;
c5aa993b 5219
65706a29 5220 case TARGET_WAITKIND_THREAD_CREATED:
c65d6b55
PA
5221 if (handle_stop_requested (ecs))
5222 return;
00431a78 5223 context_switch (ecs);
65706a29
PA
5224 if (!switch_back_to_stepped_thread (ecs))
5225 keep_going (ecs);
5226 return;
5227
488f131b 5228 case TARGET_WAITKIND_EXITED:
940c3c06 5229 case TARGET_WAITKIND_SIGNALLED:
18493a00
PA
5230 {
5231 /* Depending on the system, ecs->ptid may point to a thread or
5232 to a process. On some targets, target_mourn_inferior may
5233 need to have access to the just-exited thread. That is the
5234 case of GNU/Linux's "checkpoint" support, for example.
5235 Call the switch_to_xxx routine as appropriate. */
5236 thread_info *thr = find_thread_ptid (ecs->target, ecs->ptid);
5237 if (thr != nullptr)
5238 switch_to_thread (thr);
5239 else
5240 {
5241 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5242 switch_to_inferior_no_thread (inf);
5243 }
5244 }
6c95b8df 5245 handle_vfork_child_exec_or_exit (0);
223ffa71 5246 target_terminal::ours (); /* Must do this before mourn anyway. */
488f131b 5247
0c557179
SDJ
5248 /* Clearing any previous state of convenience variables. */
5249 clear_exit_convenience_vars ();
5250
940c3c06
PA
5251 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
5252 {
5253 /* Record the exit code in the convenience variable $_exitcode, so
5254 that the user can inspect this again later. */
5255 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5256 (LONGEST) ecs->ws.value.integer);
5257
5258 /* Also record this in the inferior itself. */
5259 current_inferior ()->has_exit_code = 1;
5260 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
8cf64490 5261
98eb56a4
PA
5262 /* Support the --return-child-result option. */
5263 return_child_result_value = ecs->ws.value.integer;
5264
76727919 5265 gdb::observers::exited.notify (ecs->ws.value.integer);
940c3c06
PA
5266 }
5267 else
0c557179 5268 {
00431a78 5269 struct gdbarch *gdbarch = current_inferior ()->gdbarch;
0c557179
SDJ
5270
5271 if (gdbarch_gdb_signal_to_target_p (gdbarch))
5272 {
5273 /* Set the value of the internal variable $_exitsignal,
5274 which holds the signal uncaught by the inferior. */
5275 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5276 gdbarch_gdb_signal_to_target (gdbarch,
5277 ecs->ws.value.sig));
5278 }
5279 else
5280 {
5281 /* We don't have access to the target's method used for
5282 converting between signal numbers (GDB's internal
5283 representation <-> target's representation).
5284 Therefore, we cannot do a good job at displaying this
5285 information to the user. It's better to just warn
5286 her about it (if infrun debugging is enabled), and
5287 give up. */
edbcda09
SM
5288 infrun_log_debug ("Cannot fill $_exitsignal with the correct "
5289 "signal number.");
0c557179
SDJ
5290 }
5291
76727919 5292 gdb::observers::signal_exited.notify (ecs->ws.value.sig);
0c557179 5293 }
8cf64490 5294
488f131b 5295 gdb_flush (gdb_stdout);
bc1e6c81 5296 target_mourn_inferior (inferior_ptid);
488f131b 5297 stop_print_frame = 0;
22bcd14b 5298 stop_waiting (ecs);
488f131b 5299 return;
c5aa993b 5300
488f131b 5301 case TARGET_WAITKIND_FORKED:
deb3b17b 5302 case TARGET_WAITKIND_VFORKED:
e2d96639
YQ
5303 /* Check whether the inferior is displaced stepping. */
5304 {
00431a78 5305 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
ac7936df 5306 struct gdbarch *gdbarch = regcache->arch ();
e2d96639
YQ
5307
5308 /* If checking displaced stepping is supported, and thread
5309 ecs->ptid is displaced stepping. */
9844051a 5310 if (displaced_step_in_progress (ecs->event_thread))
e2d96639
YQ
5311 {
5312 struct inferior *parent_inf
5b6d1e4f 5313 = find_inferior_ptid (ecs->target, ecs->ptid);
e2d96639
YQ
5314 struct regcache *child_regcache;
5315 CORE_ADDR parent_pc;
5316
d8d83535
SM
5317 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
5318 {
9844051a
SM
5319 // struct displaced_step_inferior_state *displaced
5320 // = get_displaced_stepping_state (parent_inf);
d8d83535
SM
5321
5322 /* Restore scratch pad for child process. */
9844051a
SM
5323 //displaced_step_restore (displaced, ecs->ws.value.related_pid);
5324 // FIXME: we should restore all the buffers that were currently in use
d8d83535
SM
5325 }
5326
e2d96639
YQ
5327 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5328 indicating that the displaced stepping of syscall instruction
5329 has been done. Perform cleanup for parent process here. Note
5330 that this operation also cleans up the child process for vfork,
5331 because their pages are shared. */
9844051a 5332 displaced_step_finish (ecs->event_thread, GDB_SIGNAL_TRAP);
c2829269
PA
5333 /* Start a new step-over in another thread if there's one
5334 that needs it. */
5335 start_step_over ();
e2d96639 5336
e2d96639
YQ
5337 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5338 the child's PC is also within the scratchpad. Set the child's PC
5339 to the parent's PC value, which has already been fixed up.
5340 FIXME: we use the parent's aspace here, although we're touching
5341 the child, because the child hasn't been added to the inferior
5342 list yet at this point. */
5343
5344 child_regcache
5b6d1e4f
PA
5345 = get_thread_arch_aspace_regcache (parent_inf->process_target (),
5346 ecs->ws.value.related_pid,
e2d96639
YQ
5347 gdbarch,
5348 parent_inf->aspace);
5349 /* Read PC value of parent process. */
5350 parent_pc = regcache_read_pc (regcache);
5351
5352 if (debug_displaced)
5353 fprintf_unfiltered (gdb_stdlog,
5354 "displaced: write child pc from %s to %s\n",
5355 paddress (gdbarch,
5356 regcache_read_pc (child_regcache)),
5357 paddress (gdbarch, parent_pc));
5358
5359 regcache_write_pc (child_regcache, parent_pc);
5360 }
5361 }
5362
00431a78 5363 context_switch (ecs);
5a2901d9 5364
b242c3c2
PA
5365 /* Immediately detach breakpoints from the child before there's
5366 any chance of letting the user delete breakpoints from the
5367 breakpoint lists. If we don't do this early, it's easy to
5368 leave left over traps in the child, vis: "break foo; catch
5369 fork; c; <fork>; del; c; <child calls foo>". We only follow
5370 the fork on the last `continue', and by that time the
5371 breakpoint at "foo" is long gone from the breakpoint table.
5372 If we vforked, then we don't need to unpatch here, since both
5373 parent and child are sharing the same memory pages; we'll
5374 need to unpatch at follow/detach time instead to be certain
5375 that new breakpoints added between catchpoint hit time and
5376 vfork follow are detached. */
5377 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
5378 {
b242c3c2
PA
5379 /* This won't actually modify the breakpoint list, but will
5380 physically remove the breakpoints from the child. */
d80ee84f 5381 detach_breakpoints (ecs->ws.value.related_pid);
b242c3c2
PA
5382 }
5383
34b7e8a6 5384 delete_just_stopped_threads_single_step_breakpoints ();
d03285ec 5385
e58b0e63
PA
5386 /* In case the event is caught by a catchpoint, remember that
5387 the event is to be followed at the next resume of the thread,
5388 and not immediately. */
5389 ecs->event_thread->pending_follow = ecs->ws;
5390
f2ffa92b
PA
5391 ecs->event_thread->suspend.stop_pc
5392 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
675bf4cb 5393
16c381f0 5394 ecs->event_thread->control.stop_bpstat
a01bda52 5395 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5396 ecs->event_thread->suspend.stop_pc,
5397 ecs->event_thread, &ecs->ws);
675bf4cb 5398
c65d6b55
PA
5399 if (handle_stop_requested (ecs))
5400 return;
5401
ce12b012
PA
5402 /* If no catchpoint triggered for this, then keep going. Note
5403 that we're interested in knowing the bpstat actually causes a
5404 stop, not just if it may explain the signal. Software
5405 watchpoints, for example, always appear in the bpstat. */
5406 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5407 {
5ab2fbf1 5408 bool follow_child
3e43a32a 5409 = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63 5410
a493e3e2 5411 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
e58b0e63 5412
5b6d1e4f
PA
5413 process_stratum_target *targ
5414 = ecs->event_thread->inf->process_target ();
5415
5ab2fbf1 5416 bool should_resume = follow_fork ();
e58b0e63 5417
5b6d1e4f
PA
5418 /* Note that one of these may be an invalid pointer,
5419 depending on detach_fork. */
00431a78 5420 thread_info *parent = ecs->event_thread;
5b6d1e4f
PA
5421 thread_info *child
5422 = find_thread_ptid (targ, ecs->ws.value.related_pid);
6c95b8df 5423
a2077e25
PA
5424 /* At this point, the parent is marked running, and the
5425 child is marked stopped. */
5426
5427 /* If not resuming the parent, mark it stopped. */
5428 if (follow_child && !detach_fork && !non_stop && !sched_multi)
00431a78 5429 parent->set_running (false);
a2077e25
PA
5430
5431 /* If resuming the child, mark it running. */
5432 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
00431a78 5433 child->set_running (true);
a2077e25 5434
6c95b8df 5435 /* In non-stop mode, also resume the other branch. */
fbea99ea
PA
5436 if (!detach_fork && (non_stop
5437 || (sched_multi && target_is_non_stop_p ())))
6c95b8df
PA
5438 {
5439 if (follow_child)
5440 switch_to_thread (parent);
5441 else
5442 switch_to_thread (child);
5443
5444 ecs->event_thread = inferior_thread ();
5445 ecs->ptid = inferior_ptid;
5446 keep_going (ecs);
5447 }
5448
5449 if (follow_child)
5450 switch_to_thread (child);
5451 else
5452 switch_to_thread (parent);
5453
e58b0e63
PA
5454 ecs->event_thread = inferior_thread ();
5455 ecs->ptid = inferior_ptid;
5456
5457 if (should_resume)
5458 keep_going (ecs);
5459 else
22bcd14b 5460 stop_waiting (ecs);
04e68871
DJ
5461 return;
5462 }
94c57d6a
PA
5463 process_event_stop_test (ecs);
5464 return;
488f131b 5465
6c95b8df
PA
5466 case TARGET_WAITKIND_VFORK_DONE:
5467 /* Done with the shared memory region. Re-insert breakpoints in
5468 the parent, and keep going. */
5469
00431a78 5470 context_switch (ecs);
6c95b8df
PA
5471
5472 current_inferior ()->waiting_for_vfork_done = 0;
56710373 5473 current_inferior ()->pspace->breakpoints_not_allowed = 0;
c65d6b55
PA
5474
5475 if (handle_stop_requested (ecs))
5476 return;
5477
6c95b8df
PA
5478 /* This also takes care of reinserting breakpoints in the
5479 previously locked inferior. */
5480 keep_going (ecs);
5481 return;
5482
488f131b 5483 case TARGET_WAITKIND_EXECD:
488f131b 5484
cbd2b4e3
PA
5485 /* Note we can't read registers yet (the stop_pc), because we
5486 don't yet know the inferior's post-exec architecture.
5487 'stop_pc' is explicitly read below instead. */
00431a78 5488 switch_to_thread_no_regs (ecs->event_thread);
5a2901d9 5489
6c95b8df
PA
5490 /* Do whatever is necessary to the parent branch of the vfork. */
5491 handle_vfork_child_exec_or_exit (1);
5492
795e548f
PA
5493 /* This causes the eventpoints and symbol table to be reset.
5494 Must do this now, before trying to determine whether to
5495 stop. */
71b43ef8 5496 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
795e548f 5497
17d8546e
DB
5498 /* In follow_exec we may have deleted the original thread and
5499 created a new one. Make sure that the event thread is the
5500 execd thread for that case (this is a nop otherwise). */
5501 ecs->event_thread = inferior_thread ();
5502
f2ffa92b
PA
5503 ecs->event_thread->suspend.stop_pc
5504 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
ecdc3a72 5505
16c381f0 5506 ecs->event_thread->control.stop_bpstat
a01bda52 5507 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5508 ecs->event_thread->suspend.stop_pc,
5509 ecs->event_thread, &ecs->ws);
795e548f 5510
71b43ef8
PA
5511 /* Note that this may be referenced from inside
5512 bpstat_stop_status above, through inferior_has_execd. */
5513 xfree (ecs->ws.value.execd_pathname);
5514 ecs->ws.value.execd_pathname = NULL;
5515
c65d6b55
PA
5516 if (handle_stop_requested (ecs))
5517 return;
5518
04e68871 5519 /* If no catchpoint triggered for this, then keep going. */
ce12b012 5520 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5521 {
a493e3e2 5522 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
04e68871
DJ
5523 keep_going (ecs);
5524 return;
5525 }
94c57d6a
PA
5526 process_event_stop_test (ecs);
5527 return;
488f131b 5528
b4dc5ffa
MK
5529 /* Be careful not to try to gather much state about a thread
5530 that's in a syscall. It's frequently a losing proposition. */
488f131b 5531 case TARGET_WAITKIND_SYSCALL_ENTRY:
1777feb0 5532 /* Getting the current syscall number. */
94c57d6a
PA
5533 if (handle_syscall_event (ecs) == 0)
5534 process_event_stop_test (ecs);
5535 return;
c906108c 5536
488f131b
JB
5537 /* Before examining the threads further, step this thread to
5538 get it entirely out of the syscall. (We get notice of the
5539 event when the thread is just on the verge of exiting a
5540 syscall. Stepping one instruction seems to get it back
b4dc5ffa 5541 into user code.) */
488f131b 5542 case TARGET_WAITKIND_SYSCALL_RETURN:
94c57d6a
PA
5543 if (handle_syscall_event (ecs) == 0)
5544 process_event_stop_test (ecs);
5545 return;
c906108c 5546
488f131b 5547 case TARGET_WAITKIND_STOPPED:
4f5d7f63
PA
5548 handle_signal_stop (ecs);
5549 return;
c906108c 5550
b2175913
MS
5551 case TARGET_WAITKIND_NO_HISTORY:
5552 /* Reverse execution: target ran out of history info. */
eab402df 5553
d1988021 5554 /* Switch to the stopped thread. */
00431a78 5555 context_switch (ecs);
edbcda09 5556 infrun_log_debug ("stopped");
d1988021 5557
34b7e8a6 5558 delete_just_stopped_threads_single_step_breakpoints ();
f2ffa92b
PA
5559 ecs->event_thread->suspend.stop_pc
5560 = regcache_read_pc (get_thread_regcache (inferior_thread ()));
c65d6b55
PA
5561
5562 if (handle_stop_requested (ecs))
5563 return;
5564
76727919 5565 gdb::observers::no_history.notify ();
22bcd14b 5566 stop_waiting (ecs);
b2175913 5567 return;
488f131b 5568 }
4f5d7f63
PA
5569}
5570
372316f1
PA
5571/* Restart threads back to what they were trying to do back when we
5572 paused them for an in-line step-over. The EVENT_THREAD thread is
5573 ignored. */
4d9d9d04
PA
5574
5575static void
372316f1
PA
5576restart_threads (struct thread_info *event_thread)
5577{
372316f1
PA
5578 /* In case the instruction just stepped spawned a new thread. */
5579 update_thread_list ();
5580
08036331 5581 for (thread_info *tp : all_non_exited_threads ())
372316f1 5582 {
f3f8ece4
PA
5583 switch_to_thread_no_regs (tp);
5584
372316f1
PA
5585 if (tp == event_thread)
5586 {
edbcda09
SM
5587 infrun_log_debug ("restart threads: [%s] is event thread",
5588 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5589 continue;
5590 }
5591
5592 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5593 {
edbcda09
SM
5594 infrun_log_debug ("restart threads: [%s] not meant to be running",
5595 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5596 continue;
5597 }
5598
5599 if (tp->resumed)
5600 {
edbcda09
SM
5601 infrun_log_debug ("restart threads: [%s] resumed",
5602 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5603 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
5604 continue;
5605 }
5606
5607 if (thread_is_in_step_over_chain (tp))
5608 {
edbcda09
SM
5609 infrun_log_debug ("restart threads: [%s] needs step-over",
5610 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5611 gdb_assert (!tp->resumed);
5612 continue;
5613 }
5614
5615
5616 if (tp->suspend.waitstatus_pending_p)
5617 {
edbcda09
SM
5618 infrun_log_debug ("restart threads: [%s] has pending status",
5619 target_pid_to_str (tp->ptid).c_str ());
719546c4 5620 tp->resumed = true;
372316f1
PA
5621 continue;
5622 }
5623
c65d6b55
PA
5624 gdb_assert (!tp->stop_requested);
5625
372316f1
PA
5626 /* If some thread needs to start a step-over at this point, it
5627 should still be in the step-over queue, and thus skipped
5628 above. */
5629 if (thread_still_needs_step_over (tp))
5630 {
5631 internal_error (__FILE__, __LINE__,
5632 "thread [%s] needs a step-over, but not in "
5633 "step-over queue\n",
a068643d 5634 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5635 }
5636
5637 if (currently_stepping (tp))
5638 {
edbcda09
SM
5639 infrun_log_debug ("restart threads: [%s] was stepping",
5640 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5641 keep_going_stepped_thread (tp);
5642 }
5643 else
5644 {
5645 struct execution_control_state ecss;
5646 struct execution_control_state *ecs = &ecss;
5647
edbcda09
SM
5648 infrun_log_debug ("restart threads: [%s] continuing",
5649 target_pid_to_str (tp->ptid).c_str ());
372316f1 5650 reset_ecs (ecs, tp);
00431a78 5651 switch_to_thread (tp);
372316f1
PA
5652 keep_going_pass_signal (ecs);
5653 }
5654 }
5655}
5656
5657/* Callback for iterate_over_threads. Find a resumed thread that has
5658 a pending waitstatus. */
5659
5660static int
5661resumed_thread_with_pending_status (struct thread_info *tp,
5662 void *arg)
5663{
5664 return (tp->resumed
5665 && tp->suspend.waitstatus_pending_p);
5666}
5667
5668/* Called when we get an event that may finish an in-line or
5669 out-of-line (displaced stepping) step-over started previously.
5670 Return true if the event is processed and we should go back to the
5671 event loop; false if the caller should continue processing the
5672 event. */
5673
5674static int
4d9d9d04
PA
5675finish_step_over (struct execution_control_state *ecs)
5676{
372316f1
PA
5677 int had_step_over_info;
5678
9844051a
SM
5679 displaced_step_finish (ecs->event_thread,
5680 ecs->event_thread->suspend.stop_signal);
4d9d9d04 5681
372316f1
PA
5682 had_step_over_info = step_over_info_valid_p ();
5683
5684 if (had_step_over_info)
4d9d9d04
PA
5685 {
5686 /* If we're stepping over a breakpoint with all threads locked,
5687 then only the thread that was stepped should be reporting
5688 back an event. */
5689 gdb_assert (ecs->event_thread->control.trap_expected);
5690
c65d6b55 5691 clear_step_over_info ();
4d9d9d04
PA
5692 }
5693
fbea99ea 5694 if (!target_is_non_stop_p ())
372316f1 5695 return 0;
4d9d9d04
PA
5696
5697 /* Start a new step-over in another thread if there's one that
5698 needs it. */
5699 start_step_over ();
372316f1
PA
5700
5701 /* If we were stepping over a breakpoint before, and haven't started
5702 a new in-line step-over sequence, then restart all other threads
5703 (except the event thread). We can't do this in all-stop, as then
5704 e.g., we wouldn't be able to issue any other remote packet until
5705 these other threads stop. */
5706 if (had_step_over_info && !step_over_info_valid_p ())
5707 {
5708 struct thread_info *pending;
5709
5710 /* If we only have threads with pending statuses, the restart
5711 below won't restart any thread and so nothing re-inserts the
5712 breakpoint we just stepped over. But we need it inserted
5713 when we later process the pending events, otherwise if
5714 another thread has a pending event for this breakpoint too,
5715 we'd discard its event (because the breakpoint that
5716 originally caused the event was no longer inserted). */
00431a78 5717 context_switch (ecs);
372316f1
PA
5718 insert_breakpoints ();
5719
5720 restart_threads (ecs->event_thread);
5721
5722 /* If we have events pending, go through handle_inferior_event
5723 again, picking up a pending event at random. This avoids
5724 thread starvation. */
5725
5726 /* But not if we just stepped over a watchpoint in order to let
5727 the instruction execute so we can evaluate its expression.
5728 The set of watchpoints that triggered is recorded in the
5729 breakpoint objects themselves (see bp->watchpoint_triggered).
5730 If we processed another event first, that other event could
5731 clobber this info. */
5732 if (ecs->event_thread->stepping_over_watchpoint)
5733 return 0;
5734
5735 pending = iterate_over_threads (resumed_thread_with_pending_status,
5736 NULL);
5737 if (pending != NULL)
5738 {
5739 struct thread_info *tp = ecs->event_thread;
5740 struct regcache *regcache;
5741
edbcda09
SM
5742 infrun_log_debug ("found resumed threads with "
5743 "pending events, saving status");
372316f1
PA
5744
5745 gdb_assert (pending != tp);
5746
5747 /* Record the event thread's event for later. */
5748 save_waitstatus (tp, &ecs->ws);
5749 /* This was cleared early, by handle_inferior_event. Set it
5750 so this pending event is considered by
5751 do_target_wait. */
719546c4 5752 tp->resumed = true;
372316f1
PA
5753
5754 gdb_assert (!tp->executing);
5755
00431a78 5756 regcache = get_thread_regcache (tp);
372316f1
PA
5757 tp->suspend.stop_pc = regcache_read_pc (regcache);
5758
edbcda09
SM
5759 infrun_log_debug ("saved stop_pc=%s for %s "
5760 "(currently_stepping=%d)\n",
5761 paddress (target_gdbarch (),
5762 tp->suspend.stop_pc),
5763 target_pid_to_str (tp->ptid).c_str (),
5764 currently_stepping (tp));
372316f1
PA
5765
5766 /* This in-line step-over finished; clear this so we won't
5767 start a new one. This is what handle_signal_stop would
5768 do, if we returned false. */
5769 tp->stepping_over_breakpoint = 0;
5770
5771 /* Wake up the event loop again. */
5772 mark_async_event_handler (infrun_async_inferior_event_token);
5773
5774 prepare_to_wait (ecs);
5775 return 1;
5776 }
5777 }
5778
5779 return 0;
4d9d9d04
PA
5780}
5781
4f5d7f63
PA
5782/* Come here when the program has stopped with a signal. */
5783
5784static void
5785handle_signal_stop (struct execution_control_state *ecs)
5786{
5787 struct frame_info *frame;
5788 struct gdbarch *gdbarch;
5789 int stopped_by_watchpoint;
5790 enum stop_kind stop_soon;
5791 int random_signal;
c906108c 5792
f0407826
DE
5793 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
5794
c65d6b55
PA
5795 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
5796
f0407826
DE
5797 /* Do we need to clean up the state of a thread that has
5798 completed a displaced single-step? (Doing so usually affects
5799 the PC, so do it here, before we set stop_pc.) */
372316f1
PA
5800 if (finish_step_over (ecs))
5801 return;
f0407826
DE
5802
5803 /* If we either finished a single-step or hit a breakpoint, but
5804 the user wanted this thread to be stopped, pretend we got a
5805 SIG0 (generic unsignaled stop). */
5806 if (ecs->event_thread->stop_requested
5807 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
5808 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
237fc4c9 5809
f2ffa92b
PA
5810 ecs->event_thread->suspend.stop_pc
5811 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
488f131b 5812
527159b7 5813 if (debug_infrun)
237fc4c9 5814 {
00431a78 5815 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
b926417a 5816 struct gdbarch *reg_gdbarch = regcache->arch ();
7f82dfc7 5817
f3f8ece4 5818 switch_to_thread (ecs->event_thread);
5af949e3 5819
edbcda09
SM
5820 infrun_log_debug ("stop_pc=%s",
5821 paddress (reg_gdbarch,
5822 ecs->event_thread->suspend.stop_pc));
d92524f1 5823 if (target_stopped_by_watchpoint ())
237fc4c9
PA
5824 {
5825 CORE_ADDR addr;
abbb1732 5826
edbcda09 5827 infrun_log_debug ("stopped by watchpoint");
237fc4c9 5828
8b88a78e 5829 if (target_stopped_data_address (current_top_target (), &addr))
edbcda09
SM
5830 infrun_log_debug ("stopped data address=%s",
5831 paddress (reg_gdbarch, addr));
237fc4c9 5832 else
edbcda09 5833 infrun_log_debug ("(no data address available)");
237fc4c9
PA
5834 }
5835 }
527159b7 5836
36fa8042
PA
5837 /* This is originated from start_remote(), start_inferior() and
5838 shared libraries hook functions. */
00431a78 5839 stop_soon = get_inferior_stop_soon (ecs);
36fa8042
PA
5840 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
5841 {
00431a78 5842 context_switch (ecs);
edbcda09 5843 infrun_log_debug ("quietly stopped");
36fa8042 5844 stop_print_frame = 1;
22bcd14b 5845 stop_waiting (ecs);
36fa8042
PA
5846 return;
5847 }
5848
36fa8042
PA
5849 /* This originates from attach_command(). We need to overwrite
5850 the stop_signal here, because some kernels don't ignore a
5851 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
5852 See more comments in inferior.h. On the other hand, if we
5853 get a non-SIGSTOP, report it to the user - assume the backend
5854 will handle the SIGSTOP if it should show up later.
5855
5856 Also consider that the attach is complete when we see a
5857 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
5858 target extended-remote report it instead of a SIGSTOP
5859 (e.g. gdbserver). We already rely on SIGTRAP being our
5860 signal, so this is no exception.
5861
5862 Also consider that the attach is complete when we see a
5863 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
5864 the target to stop all threads of the inferior, in case the
5865 low level attach operation doesn't stop them implicitly. If
5866 they weren't stopped implicitly, then the stub will report a
5867 GDB_SIGNAL_0, meaning: stopped for no particular reason
5868 other than GDB's request. */
5869 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5870 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
5871 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5872 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
5873 {
5874 stop_print_frame = 1;
22bcd14b 5875 stop_waiting (ecs);
36fa8042
PA
5876 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5877 return;
5878 }
5879
488f131b 5880 /* See if something interesting happened to the non-current thread. If
b40c7d58 5881 so, then switch to that thread. */
d7e15655 5882 if (ecs->ptid != inferior_ptid)
488f131b 5883 {
edbcda09 5884 infrun_log_debug ("context switch");
527159b7 5885
00431a78 5886 context_switch (ecs);
c5aa993b 5887
9a4105ab 5888 if (deprecated_context_hook)
00431a78 5889 deprecated_context_hook (ecs->event_thread->global_num);
488f131b 5890 }
c906108c 5891
568d6575
UW
5892 /* At this point, get hold of the now-current thread's frame. */
5893 frame = get_current_frame ();
5894 gdbarch = get_frame_arch (frame);
5895
2adfaa28 5896 /* Pull the single step breakpoints out of the target. */
af48d08f 5897 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
488f131b 5898 {
af48d08f 5899 struct regcache *regcache;
af48d08f 5900 CORE_ADDR pc;
2adfaa28 5901
00431a78 5902 regcache = get_thread_regcache (ecs->event_thread);
8b86c959
YQ
5903 const address_space *aspace = regcache->aspace ();
5904
af48d08f 5905 pc = regcache_read_pc (regcache);
34b7e8a6 5906
af48d08f
PA
5907 /* However, before doing so, if this single-step breakpoint was
5908 actually for another thread, set this thread up for moving
5909 past it. */
5910 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
5911 aspace, pc))
5912 {
5913 if (single_step_breakpoint_inserted_here_p (aspace, pc))
2adfaa28 5914 {
edbcda09
SM
5915 infrun_log_debug ("[%s] hit another thread's single-step "
5916 "breakpoint",
5917 target_pid_to_str (ecs->ptid).c_str ());
af48d08f
PA
5918 ecs->hit_singlestep_breakpoint = 1;
5919 }
5920 }
5921 else
5922 {
edbcda09
SM
5923 infrun_log_debug ("[%s] hit its single-step breakpoint",
5924 target_pid_to_str (ecs->ptid).c_str ());
2adfaa28 5925 }
488f131b 5926 }
af48d08f 5927 delete_just_stopped_threads_single_step_breakpoints ();
c906108c 5928
963f9c80
PA
5929 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5930 && ecs->event_thread->control.trap_expected
5931 && ecs->event_thread->stepping_over_watchpoint)
d983da9c
DJ
5932 stopped_by_watchpoint = 0;
5933 else
5934 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
5935
5936 /* If necessary, step over this watchpoint. We'll be back to display
5937 it in a moment. */
5938 if (stopped_by_watchpoint
d92524f1 5939 && (target_have_steppable_watchpoint
568d6575 5940 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
488f131b 5941 {
488f131b
JB
5942 /* At this point, we are stopped at an instruction which has
5943 attempted to write to a piece of memory under control of
5944 a watchpoint. The instruction hasn't actually executed
5945 yet. If we were to evaluate the watchpoint expression
5946 now, we would get the old value, and therefore no change
5947 would seem to have occurred.
5948
5949 In order to make watchpoints work `right', we really need
5950 to complete the memory write, and then evaluate the
d983da9c
DJ
5951 watchpoint expression. We do this by single-stepping the
5952 target.
5953
7f89fd65 5954 It may not be necessary to disable the watchpoint to step over
d983da9c
DJ
5955 it. For example, the PA can (with some kernel cooperation)
5956 single step over a watchpoint without disabling the watchpoint.
5957
5958 It is far more common to need to disable a watchpoint to step
5959 the inferior over it. If we have non-steppable watchpoints,
5960 we must disable the current watchpoint; it's simplest to
963f9c80
PA
5961 disable all watchpoints.
5962
5963 Any breakpoint at PC must also be stepped over -- if there's
5964 one, it will have already triggered before the watchpoint
5965 triggered, and we either already reported it to the user, or
5966 it didn't cause a stop and we called keep_going. In either
5967 case, if there was a breakpoint at PC, we must be trying to
5968 step past it. */
5969 ecs->event_thread->stepping_over_watchpoint = 1;
5970 keep_going (ecs);
488f131b
JB
5971 return;
5972 }
5973
4e1c45ea 5974 ecs->event_thread->stepping_over_breakpoint = 0;
963f9c80 5975 ecs->event_thread->stepping_over_watchpoint = 0;
16c381f0
JK
5976 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
5977 ecs->event_thread->control.stop_step = 0;
488f131b 5978 stop_print_frame = 1;
488f131b 5979 stopped_by_random_signal = 0;
ddfe970e 5980 bpstat stop_chain = NULL;
488f131b 5981
edb3359d
DJ
5982 /* Hide inlined functions starting here, unless we just performed stepi or
5983 nexti. After stepi and nexti, always show the innermost frame (not any
5984 inline function call sites). */
16c381f0 5985 if (ecs->event_thread->control.step_range_end != 1)
0574c78f 5986 {
00431a78
PA
5987 const address_space *aspace
5988 = get_thread_regcache (ecs->event_thread)->aspace ();
0574c78f
GB
5989
5990 /* skip_inline_frames is expensive, so we avoid it if we can
5991 determine that the address is one where functions cannot have
5992 been inlined. This improves performance with inferiors that
5993 load a lot of shared libraries, because the solib event
5994 breakpoint is defined as the address of a function (i.e. not
5995 inline). Note that we have to check the previous PC as well
5996 as the current one to catch cases when we have just
5997 single-stepped off a breakpoint prior to reinstating it.
5998 Note that we're assuming that the code we single-step to is
5999 not inline, but that's not definitive: there's nothing
6000 preventing the event breakpoint function from containing
6001 inlined code, and the single-step ending up there. If the
6002 user had set a breakpoint on that inlined code, the missing
6003 skip_inline_frames call would break things. Fortunately
6004 that's an extremely unlikely scenario. */
f2ffa92b
PA
6005 if (!pc_at_non_inline_function (aspace,
6006 ecs->event_thread->suspend.stop_pc,
6007 &ecs->ws)
a210c238
MR
6008 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6009 && ecs->event_thread->control.trap_expected
6010 && pc_at_non_inline_function (aspace,
6011 ecs->event_thread->prev_pc,
09ac7c10 6012 &ecs->ws)))
1c5a993e 6013 {
f2ffa92b
PA
6014 stop_chain = build_bpstat_chain (aspace,
6015 ecs->event_thread->suspend.stop_pc,
6016 &ecs->ws);
00431a78 6017 skip_inline_frames (ecs->event_thread, stop_chain);
1c5a993e
MR
6018
6019 /* Re-fetch current thread's frame in case that invalidated
6020 the frame cache. */
6021 frame = get_current_frame ();
6022 gdbarch = get_frame_arch (frame);
6023 }
0574c78f 6024 }
edb3359d 6025
a493e3e2 6026 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
16c381f0 6027 && ecs->event_thread->control.trap_expected
568d6575 6028 && gdbarch_single_step_through_delay_p (gdbarch)
4e1c45ea 6029 && currently_stepping (ecs->event_thread))
3352ef37 6030 {
b50d7442 6031 /* We're trying to step off a breakpoint. Turns out that we're
3352ef37 6032 also on an instruction that needs to be stepped multiple
1777feb0 6033 times before it's been fully executing. E.g., architectures
3352ef37
AC
6034 with a delay slot. It needs to be stepped twice, once for
6035 the instruction and once for the delay slot. */
6036 int step_through_delay
568d6575 6037 = gdbarch_single_step_through_delay (gdbarch, frame);
abbb1732 6038
edbcda09
SM
6039 if (step_through_delay)
6040 infrun_log_debug ("step through delay");
6041
16c381f0
JK
6042 if (ecs->event_thread->control.step_range_end == 0
6043 && step_through_delay)
3352ef37
AC
6044 {
6045 /* The user issued a continue when stopped at a breakpoint.
6046 Set up for another trap and get out of here. */
4e1c45ea 6047 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
6048 keep_going (ecs);
6049 return;
6050 }
6051 else if (step_through_delay)
6052 {
6053 /* The user issued a step when stopped at a breakpoint.
6054 Maybe we should stop, maybe we should not - the delay
6055 slot *might* correspond to a line of source. In any
ca67fcb8
VP
6056 case, don't decide that here, just set
6057 ecs->stepping_over_breakpoint, making sure we
6058 single-step again before breakpoints are re-inserted. */
4e1c45ea 6059 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
6060 }
6061 }
6062
ab04a2af
TT
6063 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6064 handles this event. */
6065 ecs->event_thread->control.stop_bpstat
a01bda52 6066 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
6067 ecs->event_thread->suspend.stop_pc,
6068 ecs->event_thread, &ecs->ws, stop_chain);
db82e815 6069
ab04a2af
TT
6070 /* Following in case break condition called a
6071 function. */
6072 stop_print_frame = 1;
73dd234f 6073
ab04a2af
TT
6074 /* This is where we handle "moribund" watchpoints. Unlike
6075 software breakpoints traps, hardware watchpoint traps are
6076 always distinguishable from random traps. If no high-level
6077 watchpoint is associated with the reported stop data address
6078 anymore, then the bpstat does not explain the signal ---
6079 simply make sure to ignore it if `stopped_by_watchpoint' is
6080 set. */
6081
edbcda09 6082 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
47591c29 6083 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
427cd150 6084 GDB_SIGNAL_TRAP)
ab04a2af 6085 && stopped_by_watchpoint)
edbcda09
SM
6086 {
6087 infrun_log_debug ("no user watchpoint explains watchpoint SIGTRAP, "
6088 "ignoring");
6089 }
73dd234f 6090
bac7d97b 6091 /* NOTE: cagney/2003-03-29: These checks for a random signal
ab04a2af
TT
6092 at one stage in the past included checks for an inferior
6093 function call's call dummy's return breakpoint. The original
6094 comment, that went with the test, read:
03cebad2 6095
ab04a2af
TT
6096 ``End of a stack dummy. Some systems (e.g. Sony news) give
6097 another signal besides SIGTRAP, so check here as well as
6098 above.''
73dd234f 6099
ab04a2af
TT
6100 If someone ever tries to get call dummys on a
6101 non-executable stack to work (where the target would stop
6102 with something like a SIGSEGV), then those tests might need
6103 to be re-instated. Given, however, that the tests were only
6104 enabled when momentary breakpoints were not being used, I
6105 suspect that it won't be the case.
488f131b 6106
ab04a2af
TT
6107 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6108 be necessary for call dummies on a non-executable stack on
6109 SPARC. */
488f131b 6110
bac7d97b 6111 /* See if the breakpoints module can explain the signal. */
47591c29
PA
6112 random_signal
6113 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6114 ecs->event_thread->suspend.stop_signal);
bac7d97b 6115
1cf4d951
PA
6116 /* Maybe this was a trap for a software breakpoint that has since
6117 been removed. */
6118 if (random_signal && target_stopped_by_sw_breakpoint ())
6119 {
5133a315
LM
6120 if (gdbarch_program_breakpoint_here_p (gdbarch,
6121 ecs->event_thread->suspend.stop_pc))
1cf4d951
PA
6122 {
6123 struct regcache *regcache;
6124 int decr_pc;
6125
6126 /* Re-adjust PC to what the program would see if GDB was not
6127 debugging it. */
00431a78 6128 regcache = get_thread_regcache (ecs->event_thread);
527a273a 6129 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
1cf4d951
PA
6130 if (decr_pc != 0)
6131 {
07036511
TT
6132 gdb::optional<scoped_restore_tmpl<int>>
6133 restore_operation_disable;
1cf4d951
PA
6134
6135 if (record_full_is_used ())
07036511
TT
6136 restore_operation_disable.emplace
6137 (record_full_gdb_operation_disable_set ());
1cf4d951 6138
f2ffa92b
PA
6139 regcache_write_pc (regcache,
6140 ecs->event_thread->suspend.stop_pc + decr_pc);
1cf4d951
PA
6141 }
6142 }
6143 else
6144 {
6145 /* A delayed software breakpoint event. Ignore the trap. */
edbcda09 6146 infrun_log_debug ("delayed software breakpoint trap, ignoring");
1cf4d951
PA
6147 random_signal = 0;
6148 }
6149 }
6150
6151 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6152 has since been removed. */
6153 if (random_signal && target_stopped_by_hw_breakpoint ())
6154 {
6155 /* A delayed hardware breakpoint event. Ignore the trap. */
edbcda09
SM
6156 infrun_log_debug ("delayed hardware breakpoint/watchpoint "
6157 "trap, ignoring");
1cf4d951
PA
6158 random_signal = 0;
6159 }
6160
bac7d97b
PA
6161 /* If not, perhaps stepping/nexting can. */
6162 if (random_signal)
6163 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6164 && currently_stepping (ecs->event_thread));
ab04a2af 6165
2adfaa28
PA
6166 /* Perhaps the thread hit a single-step breakpoint of _another_
6167 thread. Single-step breakpoints are transparent to the
6168 breakpoints module. */
6169 if (random_signal)
6170 random_signal = !ecs->hit_singlestep_breakpoint;
6171
bac7d97b
PA
6172 /* No? Perhaps we got a moribund watchpoint. */
6173 if (random_signal)
6174 random_signal = !stopped_by_watchpoint;
ab04a2af 6175
c65d6b55
PA
6176 /* Always stop if the user explicitly requested this thread to
6177 remain stopped. */
6178 if (ecs->event_thread->stop_requested)
6179 {
6180 random_signal = 1;
edbcda09 6181 infrun_log_debug ("user-requested stop");
c65d6b55
PA
6182 }
6183
488f131b
JB
6184 /* For the program's own signals, act according to
6185 the signal handling tables. */
6186
ce12b012 6187 if (random_signal)
488f131b
JB
6188 {
6189 /* Signal not for debugging purposes. */
5b6d1e4f 6190 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
c9737c08 6191 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
488f131b 6192
edbcda09
SM
6193 infrun_log_debug ("random signal (%s)",
6194 gdb_signal_to_symbol_string (stop_signal));
527159b7 6195
488f131b
JB
6196 stopped_by_random_signal = 1;
6197
252fbfc8
PA
6198 /* Always stop on signals if we're either just gaining control
6199 of the program, or the user explicitly requested this thread
6200 to remain stopped. */
d6b48e9c 6201 if (stop_soon != NO_STOP_QUIETLY
252fbfc8 6202 || ecs->event_thread->stop_requested
24291992 6203 || (!inf->detaching
16c381f0 6204 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
488f131b 6205 {
22bcd14b 6206 stop_waiting (ecs);
488f131b
JB
6207 return;
6208 }
b57bacec
PA
6209
6210 /* Notify observers the signal has "handle print" set. Note we
6211 returned early above if stopping; normal_stop handles the
6212 printing in that case. */
6213 if (signal_print[ecs->event_thread->suspend.stop_signal])
6214 {
6215 /* The signal table tells us to print about this signal. */
223ffa71 6216 target_terminal::ours_for_output ();
76727919 6217 gdb::observers::signal_received.notify (ecs->event_thread->suspend.stop_signal);
223ffa71 6218 target_terminal::inferior ();
b57bacec 6219 }
488f131b
JB
6220
6221 /* Clear the signal if it should not be passed. */
16c381f0 6222 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
a493e3e2 6223 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
488f131b 6224
f2ffa92b 6225 if (ecs->event_thread->prev_pc == ecs->event_thread->suspend.stop_pc
16c381f0 6226 && ecs->event_thread->control.trap_expected
8358c15c 6227 && ecs->event_thread->control.step_resume_breakpoint == NULL)
68f53502
AC
6228 {
6229 /* We were just starting a new sequence, attempting to
6230 single-step off of a breakpoint and expecting a SIGTRAP.
237fc4c9 6231 Instead this signal arrives. This signal will take us out
68f53502
AC
6232 of the stepping range so GDB needs to remember to, when
6233 the signal handler returns, resume stepping off that
6234 breakpoint. */
6235 /* To simplify things, "continue" is forced to use the same
6236 code paths as single-step - set a breakpoint at the
6237 signal return address and then, once hit, step off that
6238 breakpoint. */
edbcda09 6239 infrun_log_debug ("signal arrived while stepping over breakpoint");
d3169d93 6240
2c03e5be 6241 insert_hp_step_resume_breakpoint_at_frame (frame);
4e1c45ea 6242 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6243 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6244 ecs->event_thread->control.trap_expected = 0;
d137e6dc
PA
6245
6246 /* If we were nexting/stepping some other thread, switch to
6247 it, so that we don't continue it, losing control. */
6248 if (!switch_back_to_stepped_thread (ecs))
6249 keep_going (ecs);
9d799f85 6250 return;
68f53502 6251 }
9d799f85 6252
e5f8a7cc 6253 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
f2ffa92b
PA
6254 && (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6255 ecs->event_thread)
e5f8a7cc 6256 || ecs->event_thread->control.step_range_end == 1)
edb3359d 6257 && frame_id_eq (get_stack_frame_id (frame),
16c381f0 6258 ecs->event_thread->control.step_stack_frame_id)
8358c15c 6259 && ecs->event_thread->control.step_resume_breakpoint == NULL)
d303a6c7
AC
6260 {
6261 /* The inferior is about to take a signal that will take it
6262 out of the single step range. Set a breakpoint at the
6263 current PC (which is presumably where the signal handler
6264 will eventually return) and then allow the inferior to
6265 run free.
6266
6267 Note that this is only needed for a signal delivered
6268 while in the single-step range. Nested signals aren't a
6269 problem as they eventually all return. */
edbcda09 6270 infrun_log_debug ("signal may take us out of single-step range");
237fc4c9 6271
372316f1 6272 clear_step_over_info ();
2c03e5be 6273 insert_hp_step_resume_breakpoint_at_frame (frame);
e5f8a7cc 6274 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6275 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6276 ecs->event_thread->control.trap_expected = 0;
9d799f85
AC
6277 keep_going (ecs);
6278 return;
d303a6c7 6279 }
9d799f85 6280
85102364 6281 /* Note: step_resume_breakpoint may be non-NULL. This occurs
9d799f85
AC
6282 when either there's a nested signal, or when there's a
6283 pending signal enabled just as the signal handler returns
6284 (leaving the inferior at the step-resume-breakpoint without
6285 actually executing it). Either way continue until the
6286 breakpoint is really hit. */
c447ac0b
PA
6287
6288 if (!switch_back_to_stepped_thread (ecs))
6289 {
edbcda09 6290 infrun_log_debug ("random signal, keep going");
c447ac0b
PA
6291
6292 keep_going (ecs);
6293 }
6294 return;
488f131b 6295 }
94c57d6a
PA
6296
6297 process_event_stop_test (ecs);
6298}
6299
6300/* Come here when we've got some debug event / signal we can explain
6301 (IOW, not a random signal), and test whether it should cause a
6302 stop, or whether we should resume the inferior (transparently).
6303 E.g., could be a breakpoint whose condition evaluates false; we
6304 could be still stepping within the line; etc. */
6305
6306static void
6307process_event_stop_test (struct execution_control_state *ecs)
6308{
6309 struct symtab_and_line stop_pc_sal;
6310 struct frame_info *frame;
6311 struct gdbarch *gdbarch;
cdaa5b73
PA
6312 CORE_ADDR jmp_buf_pc;
6313 struct bpstat_what what;
94c57d6a 6314
cdaa5b73 6315 /* Handle cases caused by hitting a breakpoint. */
611c83ae 6316
cdaa5b73
PA
6317 frame = get_current_frame ();
6318 gdbarch = get_frame_arch (frame);
fcf3daef 6319
cdaa5b73 6320 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
611c83ae 6321
cdaa5b73
PA
6322 if (what.call_dummy)
6323 {
6324 stop_stack_dummy = what.call_dummy;
6325 }
186c406b 6326
243a9253
PA
6327 /* A few breakpoint types have callbacks associated (e.g.,
6328 bp_jit_event). Run them now. */
6329 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
6330
cdaa5b73
PA
6331 /* If we hit an internal event that triggers symbol changes, the
6332 current frame will be invalidated within bpstat_what (e.g., if we
6333 hit an internal solib event). Re-fetch it. */
6334 frame = get_current_frame ();
6335 gdbarch = get_frame_arch (frame);
e2e4d78b 6336
cdaa5b73
PA
6337 switch (what.main_action)
6338 {
6339 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
6340 /* If we hit the breakpoint at longjmp while stepping, we
6341 install a momentary breakpoint at the target of the
6342 jmp_buf. */
186c406b 6343
edbcda09 6344 infrun_log_debug ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
186c406b 6345
cdaa5b73 6346 ecs->event_thread->stepping_over_breakpoint = 1;
611c83ae 6347
cdaa5b73
PA
6348 if (what.is_longjmp)
6349 {
6350 struct value *arg_value;
6351
6352 /* If we set the longjmp breakpoint via a SystemTap probe,
6353 then use it to extract the arguments. The destination PC
6354 is the third argument to the probe. */
6355 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6356 if (arg_value)
8fa0c4f8
AA
6357 {
6358 jmp_buf_pc = value_as_address (arg_value);
6359 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6360 }
cdaa5b73
PA
6361 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6362 || !gdbarch_get_longjmp_target (gdbarch,
6363 frame, &jmp_buf_pc))
e2e4d78b 6364 {
edbcda09
SM
6365 infrun_log_debug ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
6366 "(!gdbarch_get_longjmp_target)");
cdaa5b73
PA
6367 keep_going (ecs);
6368 return;
e2e4d78b 6369 }
e2e4d78b 6370
cdaa5b73
PA
6371 /* Insert a breakpoint at resume address. */
6372 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6373 }
6374 else
6375 check_exception_resume (ecs, frame);
6376 keep_going (ecs);
6377 return;
e81a37f7 6378
cdaa5b73
PA
6379 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6380 {
6381 struct frame_info *init_frame;
e81a37f7 6382
cdaa5b73 6383 /* There are several cases to consider.
c906108c 6384
cdaa5b73
PA
6385 1. The initiating frame no longer exists. In this case we
6386 must stop, because the exception or longjmp has gone too
6387 far.
2c03e5be 6388
cdaa5b73
PA
6389 2. The initiating frame exists, and is the same as the
6390 current frame. We stop, because the exception or longjmp
6391 has been caught.
2c03e5be 6392
cdaa5b73
PA
6393 3. The initiating frame exists and is different from the
6394 current frame. This means the exception or longjmp has
6395 been caught beneath the initiating frame, so keep going.
c906108c 6396
cdaa5b73
PA
6397 4. longjmp breakpoint has been placed just to protect
6398 against stale dummy frames and user is not interested in
6399 stopping around longjmps. */
c5aa993b 6400
edbcda09 6401 infrun_log_debug ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
c5aa993b 6402
cdaa5b73
PA
6403 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6404 != NULL);
6405 delete_exception_resume_breakpoint (ecs->event_thread);
c5aa993b 6406
cdaa5b73
PA
6407 if (what.is_longjmp)
6408 {
b67a2c6f 6409 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
c5aa993b 6410
cdaa5b73 6411 if (!frame_id_p (ecs->event_thread->initiating_frame))
e5ef252a 6412 {
cdaa5b73
PA
6413 /* Case 4. */
6414 keep_going (ecs);
6415 return;
e5ef252a 6416 }
cdaa5b73 6417 }
c5aa993b 6418
cdaa5b73 6419 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
527159b7 6420
cdaa5b73
PA
6421 if (init_frame)
6422 {
6423 struct frame_id current_id
6424 = get_frame_id (get_current_frame ());
6425 if (frame_id_eq (current_id,
6426 ecs->event_thread->initiating_frame))
6427 {
6428 /* Case 2. Fall through. */
6429 }
6430 else
6431 {
6432 /* Case 3. */
6433 keep_going (ecs);
6434 return;
6435 }
68f53502 6436 }
488f131b 6437
cdaa5b73
PA
6438 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6439 exists. */
6440 delete_step_resume_breakpoint (ecs->event_thread);
e5ef252a 6441
bdc36728 6442 end_stepping_range (ecs);
cdaa5b73
PA
6443 }
6444 return;
e5ef252a 6445
cdaa5b73 6446 case BPSTAT_WHAT_SINGLE:
edbcda09 6447 infrun_log_debug ("BPSTAT_WHAT_SINGLE");
cdaa5b73
PA
6448 ecs->event_thread->stepping_over_breakpoint = 1;
6449 /* Still need to check other stuff, at least the case where we
6450 are stepping and step out of the right range. */
6451 break;
e5ef252a 6452
cdaa5b73 6453 case BPSTAT_WHAT_STEP_RESUME:
edbcda09 6454 infrun_log_debug ("BPSTAT_WHAT_STEP_RESUME");
e5ef252a 6455
cdaa5b73
PA
6456 delete_step_resume_breakpoint (ecs->event_thread);
6457 if (ecs->event_thread->control.proceed_to_finish
6458 && execution_direction == EXEC_REVERSE)
6459 {
6460 struct thread_info *tp = ecs->event_thread;
6461
6462 /* We are finishing a function in reverse, and just hit the
6463 step-resume breakpoint at the start address of the
6464 function, and we're almost there -- just need to back up
6465 by one more single-step, which should take us back to the
6466 function call. */
6467 tp->control.step_range_start = tp->control.step_range_end = 1;
6468 keep_going (ecs);
e5ef252a 6469 return;
cdaa5b73
PA
6470 }
6471 fill_in_stop_func (gdbarch, ecs);
f2ffa92b 6472 if (ecs->event_thread->suspend.stop_pc == ecs->stop_func_start
cdaa5b73
PA
6473 && execution_direction == EXEC_REVERSE)
6474 {
6475 /* We are stepping over a function call in reverse, and just
6476 hit the step-resume breakpoint at the start address of
6477 the function. Go back to single-stepping, which should
6478 take us back to the function call. */
6479 ecs->event_thread->stepping_over_breakpoint = 1;
6480 keep_going (ecs);
6481 return;
6482 }
6483 break;
e5ef252a 6484
cdaa5b73 6485 case BPSTAT_WHAT_STOP_NOISY:
edbcda09 6486 infrun_log_debug ("BPSTAT_WHAT_STOP_NOISY");
cdaa5b73 6487 stop_print_frame = 1;
e5ef252a 6488
99619bea
PA
6489 /* Assume the thread stopped for a breapoint. We'll still check
6490 whether a/the breakpoint is there when the thread is next
6491 resumed. */
6492 ecs->event_thread->stepping_over_breakpoint = 1;
e5ef252a 6493
22bcd14b 6494 stop_waiting (ecs);
cdaa5b73 6495 return;
e5ef252a 6496
cdaa5b73 6497 case BPSTAT_WHAT_STOP_SILENT:
edbcda09 6498 infrun_log_debug ("BPSTAT_WHAT_STOP_SILENT");
cdaa5b73 6499 stop_print_frame = 0;
e5ef252a 6500
99619bea
PA
6501 /* Assume the thread stopped for a breapoint. We'll still check
6502 whether a/the breakpoint is there when the thread is next
6503 resumed. */
6504 ecs->event_thread->stepping_over_breakpoint = 1;
22bcd14b 6505 stop_waiting (ecs);
cdaa5b73
PA
6506 return;
6507
6508 case BPSTAT_WHAT_HP_STEP_RESUME:
edbcda09 6509 infrun_log_debug ("BPSTAT_WHAT_HP_STEP_RESUME");
cdaa5b73
PA
6510
6511 delete_step_resume_breakpoint (ecs->event_thread);
6512 if (ecs->event_thread->step_after_step_resume_breakpoint)
6513 {
6514 /* Back when the step-resume breakpoint was inserted, we
6515 were trying to single-step off a breakpoint. Go back to
6516 doing that. */
6517 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6518 ecs->event_thread->stepping_over_breakpoint = 1;
6519 keep_going (ecs);
6520 return;
e5ef252a 6521 }
cdaa5b73
PA
6522 break;
6523
6524 case BPSTAT_WHAT_KEEP_CHECKING:
6525 break;
e5ef252a 6526 }
c906108c 6527
af48d08f
PA
6528 /* If we stepped a permanent breakpoint and we had a high priority
6529 step-resume breakpoint for the address we stepped, but we didn't
6530 hit it, then we must have stepped into the signal handler. The
6531 step-resume was only necessary to catch the case of _not_
6532 stepping into the handler, so delete it, and fall through to
6533 checking whether the step finished. */
6534 if (ecs->event_thread->stepped_breakpoint)
6535 {
6536 struct breakpoint *sr_bp
6537 = ecs->event_thread->control.step_resume_breakpoint;
6538
8d707a12
PA
6539 if (sr_bp != NULL
6540 && sr_bp->loc->permanent
af48d08f
PA
6541 && sr_bp->type == bp_hp_step_resume
6542 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6543 {
edbcda09 6544 infrun_log_debug ("stepped permanent breakpoint, stopped in handler");
af48d08f
PA
6545 delete_step_resume_breakpoint (ecs->event_thread);
6546 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6547 }
6548 }
6549
cdaa5b73
PA
6550 /* We come here if we hit a breakpoint but should not stop for it.
6551 Possibly we also were stepping and should stop for that. So fall
6552 through and test for stepping. But, if not stepping, do not
6553 stop. */
c906108c 6554
a7212384
UW
6555 /* In all-stop mode, if we're currently stepping but have stopped in
6556 some other thread, we need to switch back to the stepped thread. */
c447ac0b
PA
6557 if (switch_back_to_stepped_thread (ecs))
6558 return;
776f04fa 6559
8358c15c 6560 if (ecs->event_thread->control.step_resume_breakpoint)
488f131b 6561 {
edbcda09 6562 infrun_log_debug ("step-resume breakpoint is inserted");
527159b7 6563
488f131b
JB
6564 /* Having a step-resume breakpoint overrides anything
6565 else having to do with stepping commands until
6566 that breakpoint is reached. */
488f131b
JB
6567 keep_going (ecs);
6568 return;
6569 }
c5aa993b 6570
16c381f0 6571 if (ecs->event_thread->control.step_range_end == 0)
488f131b 6572 {
edbcda09 6573 infrun_log_debug ("no stepping, continue");
488f131b 6574 /* Likewise if we aren't even stepping. */
488f131b
JB
6575 keep_going (ecs);
6576 return;
6577 }
c5aa993b 6578
4b7703ad
JB
6579 /* Re-fetch current thread's frame in case the code above caused
6580 the frame cache to be re-initialized, making our FRAME variable
6581 a dangling pointer. */
6582 frame = get_current_frame ();
628fe4e4 6583 gdbarch = get_frame_arch (frame);
7e324e48 6584 fill_in_stop_func (gdbarch, ecs);
4b7703ad 6585
488f131b 6586 /* If stepping through a line, keep going if still within it.
c906108c 6587
488f131b
JB
6588 Note that step_range_end is the address of the first instruction
6589 beyond the step range, and NOT the address of the last instruction
31410e84
MS
6590 within it!
6591
6592 Note also that during reverse execution, we may be stepping
6593 through a function epilogue and therefore must detect when
6594 the current-frame changes in the middle of a line. */
6595
f2ffa92b
PA
6596 if (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6597 ecs->event_thread)
31410e84 6598 && (execution_direction != EXEC_REVERSE
388a8562 6599 || frame_id_eq (get_frame_id (frame),
16c381f0 6600 ecs->event_thread->control.step_frame_id)))
488f131b 6601 {
edbcda09
SM
6602 infrun_log_debug
6603 ("stepping inside range [%s-%s]",
6604 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6605 paddress (gdbarch, ecs->event_thread->control.step_range_end));
b2175913 6606
c1e36e3e
PA
6607 /* Tentatively re-enable range stepping; `resume' disables it if
6608 necessary (e.g., if we're stepping over a breakpoint or we
6609 have software watchpoints). */
6610 ecs->event_thread->control.may_range_step = 1;
6611
b2175913
MS
6612 /* When stepping backward, stop at beginning of line range
6613 (unless it's the function entry point, in which case
6614 keep going back to the call point). */
f2ffa92b 6615 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
16c381f0 6616 if (stop_pc == ecs->event_thread->control.step_range_start
b2175913
MS
6617 && stop_pc != ecs->stop_func_start
6618 && execution_direction == EXEC_REVERSE)
bdc36728 6619 end_stepping_range (ecs);
b2175913
MS
6620 else
6621 keep_going (ecs);
6622
488f131b
JB
6623 return;
6624 }
c5aa993b 6625
488f131b 6626 /* We stepped out of the stepping range. */
c906108c 6627
488f131b 6628 /* If we are stepping at the source level and entered the runtime
388a8562
MS
6629 loader dynamic symbol resolution code...
6630
6631 EXEC_FORWARD: we keep on single stepping until we exit the run
6632 time loader code and reach the callee's address.
6633
6634 EXEC_REVERSE: we've already executed the callee (backward), and
6635 the runtime loader code is handled just like any other
6636 undebuggable function call. Now we need only keep stepping
6637 backward through the trampoline code, and that's handled further
6638 down, so there is nothing for us to do here. */
6639
6640 if (execution_direction != EXEC_REVERSE
16c381f0 6641 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
f2ffa92b 6642 && in_solib_dynsym_resolve_code (ecs->event_thread->suspend.stop_pc))
488f131b 6643 {
4c8c40e6 6644 CORE_ADDR pc_after_resolver =
f2ffa92b
PA
6645 gdbarch_skip_solib_resolver (gdbarch,
6646 ecs->event_thread->suspend.stop_pc);
c906108c 6647
edbcda09 6648 infrun_log_debug ("stepped into dynsym resolve code");
527159b7 6649
488f131b
JB
6650 if (pc_after_resolver)
6651 {
6652 /* Set up a step-resume breakpoint at the address
6653 indicated by SKIP_SOLIB_RESOLVER. */
51abb421 6654 symtab_and_line sr_sal;
488f131b 6655 sr_sal.pc = pc_after_resolver;
6c95b8df 6656 sr_sal.pspace = get_frame_program_space (frame);
488f131b 6657
a6d9a66e
UW
6658 insert_step_resume_breakpoint_at_sal (gdbarch,
6659 sr_sal, null_frame_id);
c5aa993b 6660 }
c906108c 6661
488f131b
JB
6662 keep_going (ecs);
6663 return;
6664 }
c906108c 6665
1d509aa6
MM
6666 /* Step through an indirect branch thunk. */
6667 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
f2ffa92b
PA
6668 && gdbarch_in_indirect_branch_thunk (gdbarch,
6669 ecs->event_thread->suspend.stop_pc))
1d509aa6 6670 {
edbcda09 6671 infrun_log_debug ("stepped into indirect branch thunk");
1d509aa6
MM
6672 keep_going (ecs);
6673 return;
6674 }
6675
16c381f0
JK
6676 if (ecs->event_thread->control.step_range_end != 1
6677 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6678 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
568d6575 6679 && get_frame_type (frame) == SIGTRAMP_FRAME)
488f131b 6680 {
edbcda09 6681 infrun_log_debug ("stepped into signal trampoline");
42edda50 6682 /* The inferior, while doing a "step" or "next", has ended up in
8fb3e588
AC
6683 a signal trampoline (either by a signal being delivered or by
6684 the signal handler returning). Just single-step until the
6685 inferior leaves the trampoline (either by calling the handler
6686 or returning). */
488f131b
JB
6687 keep_going (ecs);
6688 return;
6689 }
c906108c 6690
14132e89
MR
6691 /* If we're in the return path from a shared library trampoline,
6692 we want to proceed through the trampoline when stepping. */
6693 /* macro/2012-04-25: This needs to come before the subroutine
6694 call check below as on some targets return trampolines look
6695 like subroutine calls (MIPS16 return thunks). */
6696 if (gdbarch_in_solib_return_trampoline (gdbarch,
f2ffa92b
PA
6697 ecs->event_thread->suspend.stop_pc,
6698 ecs->stop_func_name)
14132e89
MR
6699 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
6700 {
6701 /* Determine where this trampoline returns. */
f2ffa92b
PA
6702 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6703 CORE_ADDR real_stop_pc
6704 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
14132e89 6705
edbcda09 6706 infrun_log_debug ("stepped into solib return tramp");
14132e89
MR
6707
6708 /* Only proceed through if we know where it's going. */
6709 if (real_stop_pc)
6710 {
6711 /* And put the step-breakpoint there and go until there. */
51abb421 6712 symtab_and_line sr_sal;
14132e89
MR
6713 sr_sal.pc = real_stop_pc;
6714 sr_sal.section = find_pc_overlay (sr_sal.pc);
6715 sr_sal.pspace = get_frame_program_space (frame);
6716
6717 /* Do not specify what the fp should be when we stop since
6718 on some machines the prologue is where the new fp value
6719 is established. */
6720 insert_step_resume_breakpoint_at_sal (gdbarch,
6721 sr_sal, null_frame_id);
6722
6723 /* Restart without fiddling with the step ranges or
6724 other state. */
6725 keep_going (ecs);
6726 return;
6727 }
6728 }
6729
c17eaafe
DJ
6730 /* Check for subroutine calls. The check for the current frame
6731 equalling the step ID is not necessary - the check of the
6732 previous frame's ID is sufficient - but it is a common case and
6733 cheaper than checking the previous frame's ID.
14e60db5
DJ
6734
6735 NOTE: frame_id_eq will never report two invalid frame IDs as
6736 being equal, so to get into this block, both the current and
6737 previous frame must have valid frame IDs. */
005ca36a
JB
6738 /* The outer_frame_id check is a heuristic to detect stepping
6739 through startup code. If we step over an instruction which
6740 sets the stack pointer from an invalid value to a valid value,
6741 we may detect that as a subroutine call from the mythical
6742 "outermost" function. This could be fixed by marking
6743 outermost frames as !stack_p,code_p,special_p. Then the
6744 initial outermost frame, before sp was valid, would
ce6cca6d 6745 have code_addr == &_start. See the comment in frame_id_eq
005ca36a 6746 for more. */
edb3359d 6747 if (!frame_id_eq (get_stack_frame_id (frame),
16c381f0 6748 ecs->event_thread->control.step_stack_frame_id)
005ca36a 6749 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
16c381f0
JK
6750 ecs->event_thread->control.step_stack_frame_id)
6751 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
005ca36a 6752 outer_frame_id)
885eeb5b 6753 || (ecs->event_thread->control.step_start_function
f2ffa92b 6754 != find_pc_function (ecs->event_thread->suspend.stop_pc)))))
488f131b 6755 {
f2ffa92b 6756 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
95918acb 6757 CORE_ADDR real_stop_pc;
8fb3e588 6758
edbcda09 6759 infrun_log_debug ("stepped into subroutine");
527159b7 6760
b7a084be 6761 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
95918acb
AC
6762 {
6763 /* I presume that step_over_calls is only 0 when we're
6764 supposed to be stepping at the assembly language level
6765 ("stepi"). Just stop. */
388a8562 6766 /* And this works the same backward as frontward. MVS */
bdc36728 6767 end_stepping_range (ecs);
95918acb
AC
6768 return;
6769 }
8fb3e588 6770
388a8562
MS
6771 /* Reverse stepping through solib trampolines. */
6772
6773 if (execution_direction == EXEC_REVERSE
16c381f0 6774 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
388a8562
MS
6775 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6776 || (ecs->stop_func_start == 0
6777 && in_solib_dynsym_resolve_code (stop_pc))))
6778 {
6779 /* Any solib trampoline code can be handled in reverse
6780 by simply continuing to single-step. We have already
6781 executed the solib function (backwards), and a few
6782 steps will take us back through the trampoline to the
6783 caller. */
6784 keep_going (ecs);
6785 return;
6786 }
6787
16c381f0 6788 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
8567c30f 6789 {
b2175913
MS
6790 /* We're doing a "next".
6791
6792 Normal (forward) execution: set a breakpoint at the
6793 callee's return address (the address at which the caller
6794 will resume).
6795
6796 Reverse (backward) execution. set the step-resume
6797 breakpoint at the start of the function that we just
6798 stepped into (backwards), and continue to there. When we
6130d0b7 6799 get there, we'll need to single-step back to the caller. */
b2175913
MS
6800
6801 if (execution_direction == EXEC_REVERSE)
6802 {
acf9414f
JK
6803 /* If we're already at the start of the function, we've either
6804 just stepped backward into a single instruction function,
6805 or stepped back out of a signal handler to the first instruction
6806 of the function. Just keep going, which will single-step back
6807 to the caller. */
58c48e72 6808 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
acf9414f 6809 {
acf9414f 6810 /* Normal function call return (static or dynamic). */
51abb421 6811 symtab_and_line sr_sal;
acf9414f
JK
6812 sr_sal.pc = ecs->stop_func_start;
6813 sr_sal.pspace = get_frame_program_space (frame);
6814 insert_step_resume_breakpoint_at_sal (gdbarch,
6815 sr_sal, null_frame_id);
6816 }
b2175913
MS
6817 }
6818 else
568d6575 6819 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6820
8567c30f
AC
6821 keep_going (ecs);
6822 return;
6823 }
a53c66de 6824
95918acb 6825 /* If we are in a function call trampoline (a stub between the
8fb3e588
AC
6826 calling routine and the real function), locate the real
6827 function. That's what tells us (a) whether we want to step
6828 into it at all, and (b) what prologue we want to run to the
6829 end of, if we do step into it. */
568d6575 6830 real_stop_pc = skip_language_trampoline (frame, stop_pc);
95918acb 6831 if (real_stop_pc == 0)
568d6575 6832 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
95918acb
AC
6833 if (real_stop_pc != 0)
6834 ecs->stop_func_start = real_stop_pc;
8fb3e588 6835
db5f024e 6836 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
1b2bfbb9 6837 {
51abb421 6838 symtab_and_line sr_sal;
1b2bfbb9 6839 sr_sal.pc = ecs->stop_func_start;
6c95b8df 6840 sr_sal.pspace = get_frame_program_space (frame);
1b2bfbb9 6841
a6d9a66e
UW
6842 insert_step_resume_breakpoint_at_sal (gdbarch,
6843 sr_sal, null_frame_id);
8fb3e588
AC
6844 keep_going (ecs);
6845 return;
1b2bfbb9
RC
6846 }
6847
95918acb 6848 /* If we have line number information for the function we are
1bfeeb0f
JL
6849 thinking of stepping into and the function isn't on the skip
6850 list, step into it.
95918acb 6851
8fb3e588
AC
6852 If there are several symtabs at that PC (e.g. with include
6853 files), just want to know whether *any* of them have line
6854 numbers. find_pc_line handles this. */
95918acb
AC
6855 {
6856 struct symtab_and_line tmp_sal;
8fb3e588 6857
95918acb 6858 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
2b914b52 6859 if (tmp_sal.line != 0
85817405 6860 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4a4c04f1
BE
6861 tmp_sal)
6862 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
95918acb 6863 {
b2175913 6864 if (execution_direction == EXEC_REVERSE)
568d6575 6865 handle_step_into_function_backward (gdbarch, ecs);
b2175913 6866 else
568d6575 6867 handle_step_into_function (gdbarch, ecs);
95918acb
AC
6868 return;
6869 }
6870 }
6871
6872 /* If we have no line number and the step-stop-if-no-debug is
8fb3e588
AC
6873 set, we stop the step so that the user has a chance to switch
6874 in assembly mode. */
16c381f0 6875 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
078130d0 6876 && step_stop_if_no_debug)
95918acb 6877 {
bdc36728 6878 end_stepping_range (ecs);
95918acb
AC
6879 return;
6880 }
6881
b2175913
MS
6882 if (execution_direction == EXEC_REVERSE)
6883 {
acf9414f
JK
6884 /* If we're already at the start of the function, we've either just
6885 stepped backward into a single instruction function without line
6886 number info, or stepped back out of a signal handler to the first
6887 instruction of the function without line number info. Just keep
6888 going, which will single-step back to the caller. */
6889 if (ecs->stop_func_start != stop_pc)
6890 {
6891 /* Set a breakpoint at callee's start address.
6892 From there we can step once and be back in the caller. */
51abb421 6893 symtab_and_line sr_sal;
acf9414f
JK
6894 sr_sal.pc = ecs->stop_func_start;
6895 sr_sal.pspace = get_frame_program_space (frame);
6896 insert_step_resume_breakpoint_at_sal (gdbarch,
6897 sr_sal, null_frame_id);
6898 }
b2175913
MS
6899 }
6900 else
6901 /* Set a breakpoint at callee's return address (the address
6902 at which the caller will resume). */
568d6575 6903 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6904
95918acb 6905 keep_going (ecs);
488f131b 6906 return;
488f131b 6907 }
c906108c 6908
fdd654f3
MS
6909 /* Reverse stepping through solib trampolines. */
6910
6911 if (execution_direction == EXEC_REVERSE
16c381f0 6912 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
fdd654f3 6913 {
f2ffa92b
PA
6914 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6915
fdd654f3
MS
6916 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6917 || (ecs->stop_func_start == 0
6918 && in_solib_dynsym_resolve_code (stop_pc)))
6919 {
6920 /* Any solib trampoline code can be handled in reverse
6921 by simply continuing to single-step. We have already
6922 executed the solib function (backwards), and a few
6923 steps will take us back through the trampoline to the
6924 caller. */
6925 keep_going (ecs);
6926 return;
6927 }
6928 else if (in_solib_dynsym_resolve_code (stop_pc))
6929 {
6930 /* Stepped backward into the solib dynsym resolver.
6931 Set a breakpoint at its start and continue, then
6932 one more step will take us out. */
51abb421 6933 symtab_and_line sr_sal;
fdd654f3 6934 sr_sal.pc = ecs->stop_func_start;
9d1807c3 6935 sr_sal.pspace = get_frame_program_space (frame);
fdd654f3
MS
6936 insert_step_resume_breakpoint_at_sal (gdbarch,
6937 sr_sal, null_frame_id);
6938 keep_going (ecs);
6939 return;
6940 }
6941 }
6942
8c95582d
AB
6943 /* This always returns the sal for the inner-most frame when we are in a
6944 stack of inlined frames, even if GDB actually believes that it is in a
6945 more outer frame. This is checked for below by calls to
6946 inline_skipped_frames. */
f2ffa92b 6947 stop_pc_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
7ed0fe66 6948
1b2bfbb9
RC
6949 /* NOTE: tausq/2004-05-24: This if block used to be done before all
6950 the trampoline processing logic, however, there are some trampolines
6951 that have no names, so we should do trampoline handling first. */
16c381f0 6952 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7ed0fe66 6953 && ecs->stop_func_name == NULL
2afb61aa 6954 && stop_pc_sal.line == 0)
1b2bfbb9 6955 {
edbcda09 6956 infrun_log_debug ("stepped into undebuggable function");
527159b7 6957
1b2bfbb9 6958 /* The inferior just stepped into, or returned to, an
7ed0fe66
DJ
6959 undebuggable function (where there is no debugging information
6960 and no line number corresponding to the address where the
1b2bfbb9
RC
6961 inferior stopped). Since we want to skip this kind of code,
6962 we keep going until the inferior returns from this
14e60db5
DJ
6963 function - unless the user has asked us not to (via
6964 set step-mode) or we no longer know how to get back
6965 to the call site. */
6966 if (step_stop_if_no_debug
c7ce8faa 6967 || !frame_id_p (frame_unwind_caller_id (frame)))
1b2bfbb9
RC
6968 {
6969 /* If we have no line number and the step-stop-if-no-debug
6970 is set, we stop the step so that the user has a chance to
6971 switch in assembly mode. */
bdc36728 6972 end_stepping_range (ecs);
1b2bfbb9
RC
6973 return;
6974 }
6975 else
6976 {
6977 /* Set a breakpoint at callee's return address (the address
6978 at which the caller will resume). */
568d6575 6979 insert_step_resume_breakpoint_at_caller (frame);
1b2bfbb9
RC
6980 keep_going (ecs);
6981 return;
6982 }
6983 }
6984
16c381f0 6985 if (ecs->event_thread->control.step_range_end == 1)
1b2bfbb9
RC
6986 {
6987 /* It is stepi or nexti. We always want to stop stepping after
6988 one instruction. */
edbcda09 6989 infrun_log_debug ("stepi/nexti");
bdc36728 6990 end_stepping_range (ecs);
1b2bfbb9
RC
6991 return;
6992 }
6993
2afb61aa 6994 if (stop_pc_sal.line == 0)
488f131b
JB
6995 {
6996 /* We have no line number information. That means to stop
6997 stepping (does this always happen right after one instruction,
6998 when we do "s" in a function with no line numbers,
6999 or can this happen as a result of a return or longjmp?). */
edbcda09 7000 infrun_log_debug ("line number info");
bdc36728 7001 end_stepping_range (ecs);
488f131b
JB
7002 return;
7003 }
c906108c 7004
edb3359d
DJ
7005 /* Look for "calls" to inlined functions, part one. If the inline
7006 frame machinery detected some skipped call sites, we have entered
7007 a new inline function. */
7008
7009 if (frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 7010 ecs->event_thread->control.step_frame_id)
00431a78 7011 && inline_skipped_frames (ecs->event_thread))
edb3359d 7012 {
edbcda09 7013 infrun_log_debug ("stepped into inlined function");
edb3359d 7014
51abb421 7015 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
edb3359d 7016
16c381f0 7017 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
edb3359d
DJ
7018 {
7019 /* For "step", we're going to stop. But if the call site
7020 for this inlined function is on the same source line as
7021 we were previously stepping, go down into the function
7022 first. Otherwise stop at the call site. */
7023
7024 if (call_sal.line == ecs->event_thread->current_line
7025 && call_sal.symtab == ecs->event_thread->current_symtab)
4a4c04f1
BE
7026 {
7027 step_into_inline_frame (ecs->event_thread);
7028 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
7029 {
7030 keep_going (ecs);
7031 return;
7032 }
7033 }
edb3359d 7034
bdc36728 7035 end_stepping_range (ecs);
edb3359d
DJ
7036 return;
7037 }
7038 else
7039 {
7040 /* For "next", we should stop at the call site if it is on a
7041 different source line. Otherwise continue through the
7042 inlined function. */
7043 if (call_sal.line == ecs->event_thread->current_line
7044 && call_sal.symtab == ecs->event_thread->current_symtab)
7045 keep_going (ecs);
7046 else
bdc36728 7047 end_stepping_range (ecs);
edb3359d
DJ
7048 return;
7049 }
7050 }
7051
7052 /* Look for "calls" to inlined functions, part two. If we are still
7053 in the same real function we were stepping through, but we have
7054 to go further up to find the exact frame ID, we are stepping
7055 through a more inlined call beyond its call site. */
7056
7057 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
7058 && !frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 7059 ecs->event_thread->control.step_frame_id)
edb3359d 7060 && stepped_in_from (get_current_frame (),
16c381f0 7061 ecs->event_thread->control.step_frame_id))
edb3359d 7062 {
edbcda09 7063 infrun_log_debug ("stepping through inlined function");
edb3359d 7064
4a4c04f1
BE
7065 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
7066 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
edb3359d
DJ
7067 keep_going (ecs);
7068 else
bdc36728 7069 end_stepping_range (ecs);
edb3359d
DJ
7070 return;
7071 }
7072
8c95582d 7073 bool refresh_step_info = true;
f2ffa92b 7074 if ((ecs->event_thread->suspend.stop_pc == stop_pc_sal.pc)
4e1c45ea
PA
7075 && (ecs->event_thread->current_line != stop_pc_sal.line
7076 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
488f131b 7077 {
8c95582d
AB
7078 if (stop_pc_sal.is_stmt)
7079 {
7080 /* We are at the start of a different line. So stop. Note that
7081 we don't stop if we step into the middle of a different line.
7082 That is said to make things like for (;;) statements work
7083 better. */
edbcda09 7084 infrun_log_debug ("infrun: stepped to a different line\n");
8c95582d
AB
7085 end_stepping_range (ecs);
7086 return;
7087 }
7088 else if (frame_id_eq (get_frame_id (get_current_frame ()),
7089 ecs->event_thread->control.step_frame_id))
7090 {
7091 /* We are at the start of a different line, however, this line is
7092 not marked as a statement, and we have not changed frame. We
7093 ignore this line table entry, and continue stepping forward,
7094 looking for a better place to stop. */
7095 refresh_step_info = false;
edbcda09
SM
7096 infrun_log_debug ("infrun: stepped to a different line, but "
7097 "it's not the start of a statement\n");
8c95582d 7098 }
488f131b 7099 }
c906108c 7100
488f131b 7101 /* We aren't done stepping.
c906108c 7102
488f131b
JB
7103 Optimize by setting the stepping range to the line.
7104 (We might not be in the original line, but if we entered a
7105 new line in mid-statement, we continue stepping. This makes
8c95582d
AB
7106 things like for(;;) statements work better.)
7107
7108 If we entered a SAL that indicates a non-statement line table entry,
7109 then we update the stepping range, but we don't update the step info,
7110 which includes things like the line number we are stepping away from.
7111 This means we will stop when we find a line table entry that is marked
7112 as is-statement, even if it matches the non-statement one we just
7113 stepped into. */
c906108c 7114
16c381f0
JK
7115 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
7116 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
c1e36e3e 7117 ecs->event_thread->control.may_range_step = 1;
8c95582d
AB
7118 if (refresh_step_info)
7119 set_step_info (ecs->event_thread, frame, stop_pc_sal);
488f131b 7120
edbcda09 7121 infrun_log_debug ("keep going");
488f131b 7122 keep_going (ecs);
104c1213
JM
7123}
7124
c447ac0b
PA
7125/* In all-stop mode, if we're currently stepping but have stopped in
7126 some other thread, we may need to switch back to the stepped
7127 thread. Returns true we set the inferior running, false if we left
7128 it stopped (and the event needs further processing). */
7129
7130static int
7131switch_back_to_stepped_thread (struct execution_control_state *ecs)
7132{
fbea99ea 7133 if (!target_is_non_stop_p ())
c447ac0b 7134 {
99619bea
PA
7135 struct thread_info *stepping_thread;
7136
7137 /* If any thread is blocked on some internal breakpoint, and we
7138 simply need to step over that breakpoint to get it going
7139 again, do that first. */
7140
7141 /* However, if we see an event for the stepping thread, then we
7142 know all other threads have been moved past their breakpoints
7143 already. Let the caller check whether the step is finished,
7144 etc., before deciding to move it past a breakpoint. */
7145 if (ecs->event_thread->control.step_range_end != 0)
7146 return 0;
7147
7148 /* Check if the current thread is blocked on an incomplete
7149 step-over, interrupted by a random signal. */
7150 if (ecs->event_thread->control.trap_expected
7151 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
c447ac0b 7152 {
edbcda09
SM
7153 infrun_log_debug ("need to finish step-over of [%s]",
7154 target_pid_to_str (ecs->event_thread->ptid).c_str ());
99619bea
PA
7155 keep_going (ecs);
7156 return 1;
7157 }
2adfaa28 7158
99619bea
PA
7159 /* Check if the current thread is blocked by a single-step
7160 breakpoint of another thread. */
7161 if (ecs->hit_singlestep_breakpoint)
7162 {
edbcda09
SM
7163 infrun_log_debug ("need to step [%s] over single-step breakpoint",
7164 target_pid_to_str (ecs->ptid).c_str ());
99619bea
PA
7165 keep_going (ecs);
7166 return 1;
7167 }
7168
4d9d9d04
PA
7169 /* If this thread needs yet another step-over (e.g., stepping
7170 through a delay slot), do it first before moving on to
7171 another thread. */
7172 if (thread_still_needs_step_over (ecs->event_thread))
7173 {
edbcda09
SM
7174 infrun_log_debug
7175 ("thread [%s] still needs step-over",
7176 target_pid_to_str (ecs->event_thread->ptid).c_str ());
4d9d9d04
PA
7177 keep_going (ecs);
7178 return 1;
7179 }
70509625 7180
483805cf
PA
7181 /* If scheduler locking applies even if not stepping, there's no
7182 need to walk over threads. Above we've checked whether the
7183 current thread is stepping. If some other thread not the
7184 event thread is stepping, then it must be that scheduler
7185 locking is not in effect. */
856e7dd6 7186 if (schedlock_applies (ecs->event_thread))
483805cf
PA
7187 return 0;
7188
4d9d9d04
PA
7189 /* Otherwise, we no longer expect a trap in the current thread.
7190 Clear the trap_expected flag before switching back -- this is
7191 what keep_going does as well, if we call it. */
7192 ecs->event_thread->control.trap_expected = 0;
7193
7194 /* Likewise, clear the signal if it should not be passed. */
7195 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7196 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7197
7198 /* Do all pending step-overs before actually proceeding with
483805cf 7199 step/next/etc. */
4d9d9d04
PA
7200 if (start_step_over ())
7201 {
7202 prepare_to_wait (ecs);
7203 return 1;
7204 }
7205
7206 /* Look for the stepping/nexting thread. */
483805cf 7207 stepping_thread = NULL;
4d9d9d04 7208
08036331 7209 for (thread_info *tp : all_non_exited_threads ())
483805cf 7210 {
f3f8ece4
PA
7211 switch_to_thread_no_regs (tp);
7212
fbea99ea
PA
7213 /* Ignore threads of processes the caller is not
7214 resuming. */
483805cf 7215 if (!sched_multi
5b6d1e4f
PA
7216 && (tp->inf->process_target () != ecs->target
7217 || tp->inf->pid != ecs->ptid.pid ()))
483805cf
PA
7218 continue;
7219
7220 /* When stepping over a breakpoint, we lock all threads
7221 except the one that needs to move past the breakpoint.
7222 If a non-event thread has this set, the "incomplete
7223 step-over" check above should have caught it earlier. */
372316f1
PA
7224 if (tp->control.trap_expected)
7225 {
7226 internal_error (__FILE__, __LINE__,
7227 "[%s] has inconsistent state: "
7228 "trap_expected=%d\n",
a068643d 7229 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
7230 tp->control.trap_expected);
7231 }
483805cf
PA
7232
7233 /* Did we find the stepping thread? */
7234 if (tp->control.step_range_end)
7235 {
7236 /* Yep. There should only one though. */
7237 gdb_assert (stepping_thread == NULL);
7238
7239 /* The event thread is handled at the top, before we
7240 enter this loop. */
7241 gdb_assert (tp != ecs->event_thread);
7242
7243 /* If some thread other than the event thread is
7244 stepping, then scheduler locking can't be in effect,
7245 otherwise we wouldn't have resumed the current event
7246 thread in the first place. */
856e7dd6 7247 gdb_assert (!schedlock_applies (tp));
483805cf
PA
7248
7249 stepping_thread = tp;
7250 }
99619bea
PA
7251 }
7252
483805cf 7253 if (stepping_thread != NULL)
99619bea 7254 {
edbcda09 7255 infrun_log_debug ("switching back to stepped thread");
c447ac0b 7256
2ac7589c
PA
7257 if (keep_going_stepped_thread (stepping_thread))
7258 {
7259 prepare_to_wait (ecs);
7260 return 1;
7261 }
7262 }
f3f8ece4
PA
7263
7264 switch_to_thread (ecs->event_thread);
2ac7589c 7265 }
2adfaa28 7266
2ac7589c
PA
7267 return 0;
7268}
2adfaa28 7269
2ac7589c
PA
7270/* Set a previously stepped thread back to stepping. Returns true on
7271 success, false if the resume is not possible (e.g., the thread
7272 vanished). */
7273
7274static int
7275keep_going_stepped_thread (struct thread_info *tp)
7276{
7277 struct frame_info *frame;
2ac7589c
PA
7278 struct execution_control_state ecss;
7279 struct execution_control_state *ecs = &ecss;
2adfaa28 7280
2ac7589c
PA
7281 /* If the stepping thread exited, then don't try to switch back and
7282 resume it, which could fail in several different ways depending
7283 on the target. Instead, just keep going.
2adfaa28 7284
2ac7589c
PA
7285 We can find a stepping dead thread in the thread list in two
7286 cases:
2adfaa28 7287
2ac7589c
PA
7288 - The target supports thread exit events, and when the target
7289 tries to delete the thread from the thread list, inferior_ptid
7290 pointed at the exiting thread. In such case, calling
7291 delete_thread does not really remove the thread from the list;
7292 instead, the thread is left listed, with 'exited' state.
64ce06e4 7293
2ac7589c
PA
7294 - The target's debug interface does not support thread exit
7295 events, and so we have no idea whatsoever if the previously
7296 stepping thread is still alive. For that reason, we need to
7297 synchronously query the target now. */
2adfaa28 7298
00431a78 7299 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
2ac7589c 7300 {
edbcda09
SM
7301 infrun_log_debug ("not resuming previously stepped thread, it has "
7302 "vanished");
2ac7589c 7303
00431a78 7304 delete_thread (tp);
2ac7589c 7305 return 0;
c447ac0b 7306 }
2ac7589c 7307
edbcda09 7308 infrun_log_debug ("resuming previously stepped thread");
2ac7589c
PA
7309
7310 reset_ecs (ecs, tp);
00431a78 7311 switch_to_thread (tp);
2ac7589c 7312
f2ffa92b 7313 tp->suspend.stop_pc = regcache_read_pc (get_thread_regcache (tp));
2ac7589c 7314 frame = get_current_frame ();
2ac7589c
PA
7315
7316 /* If the PC of the thread we were trying to single-step has
7317 changed, then that thread has trapped or been signaled, but the
7318 event has not been reported to GDB yet. Re-poll the target
7319 looking for this particular thread's event (i.e. temporarily
7320 enable schedlock) by:
7321
7322 - setting a break at the current PC
7323 - resuming that particular thread, only (by setting trap
7324 expected)
7325
7326 This prevents us continuously moving the single-step breakpoint
7327 forward, one instruction at a time, overstepping. */
7328
f2ffa92b 7329 if (tp->suspend.stop_pc != tp->prev_pc)
2ac7589c
PA
7330 {
7331 ptid_t resume_ptid;
7332
edbcda09
SM
7333 infrun_log_debug ("expected thread advanced also (%s -> %s)",
7334 paddress (target_gdbarch (), tp->prev_pc),
7335 paddress (target_gdbarch (), tp->suspend.stop_pc));
2ac7589c
PA
7336
7337 /* Clear the info of the previous step-over, as it's no longer
7338 valid (if the thread was trying to step over a breakpoint, it
7339 has already succeeded). It's what keep_going would do too,
7340 if we called it. Do this before trying to insert the sss
7341 breakpoint, otherwise if we were previously trying to step
7342 over this exact address in another thread, the breakpoint is
7343 skipped. */
7344 clear_step_over_info ();
7345 tp->control.trap_expected = 0;
7346
7347 insert_single_step_breakpoint (get_frame_arch (frame),
7348 get_frame_address_space (frame),
f2ffa92b 7349 tp->suspend.stop_pc);
2ac7589c 7350
719546c4 7351 tp->resumed = true;
fbea99ea 7352 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
2ac7589c
PA
7353 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
7354 }
7355 else
7356 {
edbcda09 7357 infrun_log_debug ("expected thread still hasn't advanced");
2ac7589c
PA
7358
7359 keep_going_pass_signal (ecs);
7360 }
7361 return 1;
c447ac0b
PA
7362}
7363
8b061563
PA
7364/* Is thread TP in the middle of (software or hardware)
7365 single-stepping? (Note the result of this function must never be
7366 passed directly as target_resume's STEP parameter.) */
104c1213 7367
a289b8f6 7368static int
b3444185 7369currently_stepping (struct thread_info *tp)
a7212384 7370{
8358c15c
JK
7371 return ((tp->control.step_range_end
7372 && tp->control.step_resume_breakpoint == NULL)
7373 || tp->control.trap_expected
af48d08f 7374 || tp->stepped_breakpoint
8358c15c 7375 || bpstat_should_step ());
a7212384
UW
7376}
7377
b2175913
MS
7378/* Inferior has stepped into a subroutine call with source code that
7379 we should not step over. Do step to the first line of code in
7380 it. */
c2c6d25f
JM
7381
7382static void
568d6575
UW
7383handle_step_into_function (struct gdbarch *gdbarch,
7384 struct execution_control_state *ecs)
c2c6d25f 7385{
7e324e48
GB
7386 fill_in_stop_func (gdbarch, ecs);
7387
f2ffa92b
PA
7388 compunit_symtab *cust
7389 = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7390 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7391 ecs->stop_func_start
7392 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
c2c6d25f 7393
51abb421 7394 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
c2c6d25f
JM
7395 /* Use the step_resume_break to step until the end of the prologue,
7396 even if that involves jumps (as it seems to on the vax under
7397 4.2). */
7398 /* If the prologue ends in the middle of a source line, continue to
7399 the end of that source line (if it is still within the function).
7400 Otherwise, just go to end of prologue. */
2afb61aa
PA
7401 if (stop_func_sal.end
7402 && stop_func_sal.pc != ecs->stop_func_start
7403 && stop_func_sal.end < ecs->stop_func_end)
7404 ecs->stop_func_start = stop_func_sal.end;
c2c6d25f 7405
2dbd5e30
KB
7406 /* Architectures which require breakpoint adjustment might not be able
7407 to place a breakpoint at the computed address. If so, the test
7408 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7409 ecs->stop_func_start to an address at which a breakpoint may be
7410 legitimately placed.
8fb3e588 7411
2dbd5e30
KB
7412 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7413 made, GDB will enter an infinite loop when stepping through
7414 optimized code consisting of VLIW instructions which contain
7415 subinstructions corresponding to different source lines. On
7416 FR-V, it's not permitted to place a breakpoint on any but the
7417 first subinstruction of a VLIW instruction. When a breakpoint is
7418 set, GDB will adjust the breakpoint address to the beginning of
7419 the VLIW instruction. Thus, we need to make the corresponding
7420 adjustment here when computing the stop address. */
8fb3e588 7421
568d6575 7422 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
2dbd5e30
KB
7423 {
7424 ecs->stop_func_start
568d6575 7425 = gdbarch_adjust_breakpoint_address (gdbarch,
8fb3e588 7426 ecs->stop_func_start);
2dbd5e30
KB
7427 }
7428
f2ffa92b 7429 if (ecs->stop_func_start == ecs->event_thread->suspend.stop_pc)
c2c6d25f
JM
7430 {
7431 /* We are already there: stop now. */
bdc36728 7432 end_stepping_range (ecs);
c2c6d25f
JM
7433 return;
7434 }
7435 else
7436 {
7437 /* Put the step-breakpoint there and go until there. */
51abb421 7438 symtab_and_line sr_sal;
c2c6d25f
JM
7439 sr_sal.pc = ecs->stop_func_start;
7440 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
6c95b8df 7441 sr_sal.pspace = get_frame_program_space (get_current_frame ());
44cbf7b5 7442
c2c6d25f 7443 /* Do not specify what the fp should be when we stop since on
488f131b
JB
7444 some machines the prologue is where the new fp value is
7445 established. */
a6d9a66e 7446 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
c2c6d25f
JM
7447
7448 /* And make sure stepping stops right away then. */
16c381f0
JK
7449 ecs->event_thread->control.step_range_end
7450 = ecs->event_thread->control.step_range_start;
c2c6d25f
JM
7451 }
7452 keep_going (ecs);
7453}
d4f3574e 7454
b2175913
MS
7455/* Inferior has stepped backward into a subroutine call with source
7456 code that we should not step over. Do step to the beginning of the
7457 last line of code in it. */
7458
7459static void
568d6575
UW
7460handle_step_into_function_backward (struct gdbarch *gdbarch,
7461 struct execution_control_state *ecs)
b2175913 7462{
43f3e411 7463 struct compunit_symtab *cust;
167e4384 7464 struct symtab_and_line stop_func_sal;
b2175913 7465
7e324e48
GB
7466 fill_in_stop_func (gdbarch, ecs);
7467
f2ffa92b 7468 cust = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7469 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7470 ecs->stop_func_start
7471 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
b2175913 7472
f2ffa92b 7473 stop_func_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
b2175913
MS
7474
7475 /* OK, we're just going to keep stepping here. */
f2ffa92b 7476 if (stop_func_sal.pc == ecs->event_thread->suspend.stop_pc)
b2175913
MS
7477 {
7478 /* We're there already. Just stop stepping now. */
bdc36728 7479 end_stepping_range (ecs);
b2175913
MS
7480 }
7481 else
7482 {
7483 /* Else just reset the step range and keep going.
7484 No step-resume breakpoint, they don't work for
7485 epilogues, which can have multiple entry paths. */
16c381f0
JK
7486 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7487 ecs->event_thread->control.step_range_end = stop_func_sal.end;
b2175913
MS
7488 keep_going (ecs);
7489 }
7490 return;
7491}
7492
d3169d93 7493/* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
44cbf7b5
AC
7494 This is used to both functions and to skip over code. */
7495
7496static void
2c03e5be
PA
7497insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7498 struct symtab_and_line sr_sal,
7499 struct frame_id sr_id,
7500 enum bptype sr_type)
44cbf7b5 7501{
611c83ae
PA
7502 /* There should never be more than one step-resume or longjmp-resume
7503 breakpoint per thread, so we should never be setting a new
44cbf7b5 7504 step_resume_breakpoint when one is already active. */
8358c15c 7505 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
2c03e5be 7506 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
d3169d93 7507
edbcda09
SM
7508 infrun_log_debug ("inserting step-resume breakpoint at %s",
7509 paddress (gdbarch, sr_sal.pc));
d3169d93 7510
8358c15c 7511 inferior_thread ()->control.step_resume_breakpoint
454dafbd 7512 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
2c03e5be
PA
7513}
7514
9da8c2a0 7515void
2c03e5be
PA
7516insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7517 struct symtab_and_line sr_sal,
7518 struct frame_id sr_id)
7519{
7520 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7521 sr_sal, sr_id,
7522 bp_step_resume);
44cbf7b5 7523}
7ce450bd 7524
2c03e5be
PA
7525/* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7526 This is used to skip a potential signal handler.
7ce450bd 7527
14e60db5
DJ
7528 This is called with the interrupted function's frame. The signal
7529 handler, when it returns, will resume the interrupted function at
7530 RETURN_FRAME.pc. */
d303a6c7
AC
7531
7532static void
2c03e5be 7533insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
d303a6c7 7534{
f4c1edd8 7535 gdb_assert (return_frame != NULL);
d303a6c7 7536
51abb421
PA
7537 struct gdbarch *gdbarch = get_frame_arch (return_frame);
7538
7539 symtab_and_line sr_sal;
568d6575 7540 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
d303a6c7 7541 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7542 sr_sal.pspace = get_frame_program_space (return_frame);
d303a6c7 7543
2c03e5be
PA
7544 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7545 get_stack_frame_id (return_frame),
7546 bp_hp_step_resume);
d303a6c7
AC
7547}
7548
2c03e5be
PA
7549/* Insert a "step-resume breakpoint" at the previous frame's PC. This
7550 is used to skip a function after stepping into it (for "next" or if
7551 the called function has no debugging information).
14e60db5
DJ
7552
7553 The current function has almost always been reached by single
7554 stepping a call or return instruction. NEXT_FRAME belongs to the
7555 current function, and the breakpoint will be set at the caller's
7556 resume address.
7557
7558 This is a separate function rather than reusing
2c03e5be 7559 insert_hp_step_resume_breakpoint_at_frame in order to avoid
14e60db5 7560 get_prev_frame, which may stop prematurely (see the implementation
c7ce8faa 7561 of frame_unwind_caller_id for an example). */
14e60db5
DJ
7562
7563static void
7564insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7565{
14e60db5
DJ
7566 /* We shouldn't have gotten here if we don't know where the call site
7567 is. */
c7ce8faa 7568 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
14e60db5 7569
51abb421 7570 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
14e60db5 7571
51abb421 7572 symtab_and_line sr_sal;
c7ce8faa
DJ
7573 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7574 frame_unwind_caller_pc (next_frame));
14e60db5 7575 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7576 sr_sal.pspace = frame_unwind_program_space (next_frame);
14e60db5 7577
a6d9a66e 7578 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
c7ce8faa 7579 frame_unwind_caller_id (next_frame));
14e60db5
DJ
7580}
7581
611c83ae
PA
7582/* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7583 new breakpoint at the target of a jmp_buf. The handling of
7584 longjmp-resume uses the same mechanisms used for handling
7585 "step-resume" breakpoints. */
7586
7587static void
a6d9a66e 7588insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
611c83ae 7589{
e81a37f7
TT
7590 /* There should never be more than one longjmp-resume breakpoint per
7591 thread, so we should never be setting a new
611c83ae 7592 longjmp_resume_breakpoint when one is already active. */
e81a37f7 7593 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
611c83ae 7594
edbcda09
SM
7595 infrun_log_debug ("inserting longjmp-resume breakpoint at %s",
7596 paddress (gdbarch, pc));
611c83ae 7597
e81a37f7 7598 inferior_thread ()->control.exception_resume_breakpoint =
454dafbd 7599 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
611c83ae
PA
7600}
7601
186c406b
TT
7602/* Insert an exception resume breakpoint. TP is the thread throwing
7603 the exception. The block B is the block of the unwinder debug hook
7604 function. FRAME is the frame corresponding to the call to this
7605 function. SYM is the symbol of the function argument holding the
7606 target PC of the exception. */
7607
7608static void
7609insert_exception_resume_breakpoint (struct thread_info *tp,
3977b71f 7610 const struct block *b,
186c406b
TT
7611 struct frame_info *frame,
7612 struct symbol *sym)
7613{
a70b8144 7614 try
186c406b 7615 {
63e43d3a 7616 struct block_symbol vsym;
186c406b
TT
7617 struct value *value;
7618 CORE_ADDR handler;
7619 struct breakpoint *bp;
7620
987012b8 7621 vsym = lookup_symbol_search_name (sym->search_name (),
de63c46b 7622 b, VAR_DOMAIN);
63e43d3a 7623 value = read_var_value (vsym.symbol, vsym.block, frame);
186c406b
TT
7624 /* If the value was optimized out, revert to the old behavior. */
7625 if (! value_optimized_out (value))
7626 {
7627 handler = value_as_address (value);
7628
edbcda09
SM
7629 infrun_log_debug ("exception resume at %lx",
7630 (unsigned long) handler);
186c406b
TT
7631
7632 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd
TT
7633 handler,
7634 bp_exception_resume).release ();
c70a6932
JK
7635
7636 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
7637 frame = NULL;
7638
5d5658a1 7639 bp->thread = tp->global_num;
186c406b
TT
7640 inferior_thread ()->control.exception_resume_breakpoint = bp;
7641 }
7642 }
230d2906 7643 catch (const gdb_exception_error &e)
492d29ea
PA
7644 {
7645 /* We want to ignore errors here. */
7646 }
186c406b
TT
7647}
7648
28106bc2
SDJ
7649/* A helper for check_exception_resume that sets an
7650 exception-breakpoint based on a SystemTap probe. */
7651
7652static void
7653insert_exception_resume_from_probe (struct thread_info *tp,
729662a5 7654 const struct bound_probe *probe,
28106bc2
SDJ
7655 struct frame_info *frame)
7656{
7657 struct value *arg_value;
7658 CORE_ADDR handler;
7659 struct breakpoint *bp;
7660
7661 arg_value = probe_safe_evaluate_at_pc (frame, 1);
7662 if (!arg_value)
7663 return;
7664
7665 handler = value_as_address (arg_value);
7666
edbcda09
SM
7667 infrun_log_debug ("exception resume at %s",
7668 paddress (probe->objfile->arch (), handler));
28106bc2
SDJ
7669
7670 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd 7671 handler, bp_exception_resume).release ();
5d5658a1 7672 bp->thread = tp->global_num;
28106bc2
SDJ
7673 inferior_thread ()->control.exception_resume_breakpoint = bp;
7674}
7675
186c406b
TT
7676/* This is called when an exception has been intercepted. Check to
7677 see whether the exception's destination is of interest, and if so,
7678 set an exception resume breakpoint there. */
7679
7680static void
7681check_exception_resume (struct execution_control_state *ecs,
28106bc2 7682 struct frame_info *frame)
186c406b 7683{
729662a5 7684 struct bound_probe probe;
28106bc2
SDJ
7685 struct symbol *func;
7686
7687 /* First see if this exception unwinding breakpoint was set via a
7688 SystemTap probe point. If so, the probe has two arguments: the
7689 CFA and the HANDLER. We ignore the CFA, extract the handler, and
7690 set a breakpoint there. */
6bac7473 7691 probe = find_probe_by_pc (get_frame_pc (frame));
935676c9 7692 if (probe.prob)
28106bc2 7693 {
729662a5 7694 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
28106bc2
SDJ
7695 return;
7696 }
7697
7698 func = get_frame_function (frame);
7699 if (!func)
7700 return;
186c406b 7701
a70b8144 7702 try
186c406b 7703 {
3977b71f 7704 const struct block *b;
8157b174 7705 struct block_iterator iter;
186c406b
TT
7706 struct symbol *sym;
7707 int argno = 0;
7708
7709 /* The exception breakpoint is a thread-specific breakpoint on
7710 the unwinder's debug hook, declared as:
7711
7712 void _Unwind_DebugHook (void *cfa, void *handler);
7713
7714 The CFA argument indicates the frame to which control is
7715 about to be transferred. HANDLER is the destination PC.
7716
7717 We ignore the CFA and set a temporary breakpoint at HANDLER.
7718 This is not extremely efficient but it avoids issues in gdb
7719 with computing the DWARF CFA, and it also works even in weird
7720 cases such as throwing an exception from inside a signal
7721 handler. */
7722
7723 b = SYMBOL_BLOCK_VALUE (func);
7724 ALL_BLOCK_SYMBOLS (b, iter, sym)
7725 {
7726 if (!SYMBOL_IS_ARGUMENT (sym))
7727 continue;
7728
7729 if (argno == 0)
7730 ++argno;
7731 else
7732 {
7733 insert_exception_resume_breakpoint (ecs->event_thread,
7734 b, frame, sym);
7735 break;
7736 }
7737 }
7738 }
230d2906 7739 catch (const gdb_exception_error &e)
492d29ea
PA
7740 {
7741 }
186c406b
TT
7742}
7743
104c1213 7744static void
22bcd14b 7745stop_waiting (struct execution_control_state *ecs)
104c1213 7746{
edbcda09 7747 infrun_log_debug ("stop_waiting");
527159b7 7748
cd0fc7c3
SS
7749 /* Let callers know we don't want to wait for the inferior anymore. */
7750 ecs->wait_some_more = 0;
fbea99ea 7751
53cccef1 7752 /* If all-stop, but there exists a non-stop target, stop all
fbea99ea 7753 threads now that we're presenting the stop to the user. */
53cccef1 7754 if (!non_stop && exists_non_stop_target ())
fbea99ea 7755 stop_all_threads ();
cd0fc7c3
SS
7756}
7757
4d9d9d04
PA
7758/* Like keep_going, but passes the signal to the inferior, even if the
7759 signal is set to nopass. */
d4f3574e
SS
7760
7761static void
4d9d9d04 7762keep_going_pass_signal (struct execution_control_state *ecs)
d4f3574e 7763{
d7e15655 7764 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
372316f1 7765 gdb_assert (!ecs->event_thread->resumed);
4d9d9d04 7766
d4f3574e 7767 /* Save the pc before execution, to compare with pc after stop. */
fb14de7b 7768 ecs->event_thread->prev_pc
fc75c28b 7769 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
d4f3574e 7770
4d9d9d04 7771 if (ecs->event_thread->control.trap_expected)
d4f3574e 7772 {
4d9d9d04
PA
7773 struct thread_info *tp = ecs->event_thread;
7774
edbcda09
SM
7775 infrun_log_debug ("%s has trap_expected set, "
7776 "resuming to collect trap",
7777 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04 7778
a9ba6bae
PA
7779 /* We haven't yet gotten our trap, and either: intercepted a
7780 non-signal event (e.g., a fork); or took a signal which we
7781 are supposed to pass through to the inferior. Simply
7782 continue. */
64ce06e4 7783 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e 7784 }
372316f1
PA
7785 else if (step_over_info_valid_p ())
7786 {
7787 /* Another thread is stepping over a breakpoint in-line. If
7788 this thread needs a step-over too, queue the request. In
7789 either case, this resume must be deferred for later. */
7790 struct thread_info *tp = ecs->event_thread;
7791
7792 if (ecs->hit_singlestep_breakpoint
7793 || thread_still_needs_step_over (tp))
7794 {
edbcda09
SM
7795 infrun_log_debug ("step-over already in progress: "
7796 "step-over for %s deferred",
7797 target_pid_to_str (tp->ptid).c_str ());
7bd43605 7798 global_thread_step_over_chain_enqueue (tp);
372316f1
PA
7799 }
7800 else
7801 {
edbcda09
SM
7802 infrun_log_debug ("step-over in progress: resume of %s deferred",
7803 target_pid_to_str (tp->ptid).c_str ());
372316f1 7804 }
372316f1 7805 }
d4f3574e
SS
7806 else
7807 {
31e77af2 7808 struct regcache *regcache = get_current_regcache ();
963f9c80
PA
7809 int remove_bp;
7810 int remove_wps;
8d297bbf 7811 step_over_what step_what;
31e77af2 7812
d4f3574e 7813 /* Either the trap was not expected, but we are continuing
a9ba6bae
PA
7814 anyway (if we got a signal, the user asked it be passed to
7815 the child)
7816 -- or --
7817 We got our expected trap, but decided we should resume from
7818 it.
d4f3574e 7819
a9ba6bae 7820 We're going to run this baby now!
d4f3574e 7821
c36b740a
VP
7822 Note that insert_breakpoints won't try to re-insert
7823 already inserted breakpoints. Therefore, we don't
7824 care if breakpoints were already inserted, or not. */
a9ba6bae 7825
31e77af2
PA
7826 /* If we need to step over a breakpoint, and we're not using
7827 displaced stepping to do so, insert all breakpoints
7828 (watchpoints, etc.) but the one we're stepping over, step one
7829 instruction, and then re-insert the breakpoint when that step
7830 is finished. */
963f9c80 7831
6c4cfb24
PA
7832 step_what = thread_still_needs_step_over (ecs->event_thread);
7833
963f9c80 7834 remove_bp = (ecs->hit_singlestep_breakpoint
6c4cfb24
PA
7835 || (step_what & STEP_OVER_BREAKPOINT));
7836 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
963f9c80 7837
cb71640d
PA
7838 /* We can't use displaced stepping if we need to step past a
7839 watchpoint. The instruction copied to the scratch pad would
7840 still trigger the watchpoint. */
7841 if (remove_bp
3fc8eb30 7842 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
45e8c884 7843 {
a01bda52 7844 set_step_over_info (regcache->aspace (),
21edc42f
YQ
7845 regcache_read_pc (regcache), remove_wps,
7846 ecs->event_thread->global_num);
45e8c884 7847 }
963f9c80 7848 else if (remove_wps)
21edc42f 7849 set_step_over_info (NULL, 0, remove_wps, -1);
372316f1
PA
7850
7851 /* If we now need to do an in-line step-over, we need to stop
7852 all other threads. Note this must be done before
7853 insert_breakpoints below, because that removes the breakpoint
7854 we're about to step over, otherwise other threads could miss
7855 it. */
fbea99ea 7856 if (step_over_info_valid_p () && target_is_non_stop_p ())
372316f1 7857 stop_all_threads ();
abbb1732 7858
31e77af2 7859 /* Stop stepping if inserting breakpoints fails. */
a70b8144 7860 try
31e77af2
PA
7861 {
7862 insert_breakpoints ();
7863 }
230d2906 7864 catch (const gdb_exception_error &e)
31e77af2
PA
7865 {
7866 exception_print (gdb_stderr, e);
22bcd14b 7867 stop_waiting (ecs);
bdf2a94a 7868 clear_step_over_info ();
31e77af2 7869 return;
d4f3574e
SS
7870 }
7871
963f9c80 7872 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
d4f3574e 7873
64ce06e4 7874 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e
SS
7875 }
7876
488f131b 7877 prepare_to_wait (ecs);
d4f3574e
SS
7878}
7879
4d9d9d04
PA
7880/* Called when we should continue running the inferior, because the
7881 current event doesn't cause a user visible stop. This does the
7882 resuming part; waiting for the next event is done elsewhere. */
7883
7884static void
7885keep_going (struct execution_control_state *ecs)
7886{
7887 if (ecs->event_thread->control.trap_expected
7888 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
7889 ecs->event_thread->control.trap_expected = 0;
7890
7891 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7892 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7893 keep_going_pass_signal (ecs);
7894}
7895
104c1213
JM
7896/* This function normally comes after a resume, before
7897 handle_inferior_event exits. It takes care of any last bits of
7898 housekeeping, and sets the all-important wait_some_more flag. */
cd0fc7c3 7899
104c1213
JM
7900static void
7901prepare_to_wait (struct execution_control_state *ecs)
cd0fc7c3 7902{
edbcda09 7903 infrun_log_debug ("prepare_to_wait");
104c1213 7904
104c1213 7905 ecs->wait_some_more = 1;
0b333c5e 7906
0e2dba2d
PA
7907 /* If the target can't async, emulate it by marking the infrun event
7908 handler such that as soon as we get back to the event-loop, we
7909 immediately end up in fetch_inferior_event again calling
7910 target_wait. */
7911 if (!target_can_async_p ())
0b333c5e 7912 mark_infrun_async_event_handler ();
c906108c 7913}
11cf8741 7914
fd664c91 7915/* We are done with the step range of a step/next/si/ni command.
b57bacec 7916 Called once for each n of a "step n" operation. */
fd664c91
PA
7917
7918static void
bdc36728 7919end_stepping_range (struct execution_control_state *ecs)
fd664c91 7920{
bdc36728 7921 ecs->event_thread->control.stop_step = 1;
bdc36728 7922 stop_waiting (ecs);
fd664c91
PA
7923}
7924
33d62d64
JK
7925/* Several print_*_reason functions to print why the inferior has stopped.
7926 We always print something when the inferior exits, or receives a signal.
7927 The rest of the cases are dealt with later on in normal_stop and
7928 print_it_typical. Ideally there should be a call to one of these
7929 print_*_reason functions functions from handle_inferior_event each time
22bcd14b 7930 stop_waiting is called.
33d62d64 7931
fd664c91
PA
7932 Note that we don't call these directly, instead we delegate that to
7933 the interpreters, through observers. Interpreters then call these
7934 with whatever uiout is right. */
33d62d64 7935
fd664c91
PA
7936void
7937print_end_stepping_range_reason (struct ui_out *uiout)
33d62d64 7938{
fd664c91 7939 /* For CLI-like interpreters, print nothing. */
33d62d64 7940
112e8700 7941 if (uiout->is_mi_like_p ())
fd664c91 7942 {
112e8700 7943 uiout->field_string ("reason",
fd664c91
PA
7944 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
7945 }
7946}
33d62d64 7947
fd664c91
PA
7948void
7949print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
11cf8741 7950{
33d62d64 7951 annotate_signalled ();
112e8700
SM
7952 if (uiout->is_mi_like_p ())
7953 uiout->field_string
7954 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
7955 uiout->text ("\nProgram terminated with signal ");
33d62d64 7956 annotate_signal_name ();
112e8700 7957 uiout->field_string ("signal-name",
2ea28649 7958 gdb_signal_to_name (siggnal));
33d62d64 7959 annotate_signal_name_end ();
112e8700 7960 uiout->text (", ");
33d62d64 7961 annotate_signal_string ();
112e8700 7962 uiout->field_string ("signal-meaning",
2ea28649 7963 gdb_signal_to_string (siggnal));
33d62d64 7964 annotate_signal_string_end ();
112e8700
SM
7965 uiout->text (".\n");
7966 uiout->text ("The program no longer exists.\n");
33d62d64
JK
7967}
7968
fd664c91
PA
7969void
7970print_exited_reason (struct ui_out *uiout, int exitstatus)
33d62d64 7971{
fda326dd 7972 struct inferior *inf = current_inferior ();
a068643d 7973 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
fda326dd 7974
33d62d64
JK
7975 annotate_exited (exitstatus);
7976 if (exitstatus)
7977 {
112e8700
SM
7978 if (uiout->is_mi_like_p ())
7979 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
6a831f06
PA
7980 std::string exit_code_str
7981 = string_printf ("0%o", (unsigned int) exitstatus);
7982 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
7983 plongest (inf->num), pidstr.c_str (),
7984 string_field ("exit-code", exit_code_str.c_str ()));
33d62d64
JK
7985 }
7986 else
11cf8741 7987 {
112e8700
SM
7988 if (uiout->is_mi_like_p ())
7989 uiout->field_string
7990 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6a831f06
PA
7991 uiout->message ("[Inferior %s (%s) exited normally]\n",
7992 plongest (inf->num), pidstr.c_str ());
33d62d64 7993 }
33d62d64
JK
7994}
7995
012b3a21
WT
7996/* Some targets/architectures can do extra processing/display of
7997 segmentation faults. E.g., Intel MPX boundary faults.
7998 Call the architecture dependent function to handle the fault. */
7999
8000static void
8001handle_segmentation_fault (struct ui_out *uiout)
8002{
8003 struct regcache *regcache = get_current_regcache ();
ac7936df 8004 struct gdbarch *gdbarch = regcache->arch ();
012b3a21
WT
8005
8006 if (gdbarch_handle_segmentation_fault_p (gdbarch))
8007 gdbarch_handle_segmentation_fault (gdbarch, uiout);
8008}
8009
fd664c91
PA
8010void
8011print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
33d62d64 8012{
f303dbd6
PA
8013 struct thread_info *thr = inferior_thread ();
8014
33d62d64
JK
8015 annotate_signal ();
8016
112e8700 8017 if (uiout->is_mi_like_p ())
f303dbd6
PA
8018 ;
8019 else if (show_thread_that_caused_stop ())
33d62d64 8020 {
f303dbd6 8021 const char *name;
33d62d64 8022
112e8700 8023 uiout->text ("\nThread ");
33eca680 8024 uiout->field_string ("thread-id", print_thread_id (thr));
f303dbd6
PA
8025
8026 name = thr->name != NULL ? thr->name : target_thread_name (thr);
8027 if (name != NULL)
8028 {
112e8700 8029 uiout->text (" \"");
33eca680 8030 uiout->field_string ("name", name);
112e8700 8031 uiout->text ("\"");
f303dbd6 8032 }
33d62d64 8033 }
f303dbd6 8034 else
112e8700 8035 uiout->text ("\nProgram");
f303dbd6 8036
112e8700
SM
8037 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
8038 uiout->text (" stopped");
33d62d64
JK
8039 else
8040 {
112e8700 8041 uiout->text (" received signal ");
8b93c638 8042 annotate_signal_name ();
112e8700
SM
8043 if (uiout->is_mi_like_p ())
8044 uiout->field_string
8045 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
8046 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8b93c638 8047 annotate_signal_name_end ();
112e8700 8048 uiout->text (", ");
8b93c638 8049 annotate_signal_string ();
112e8700 8050 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
012b3a21
WT
8051
8052 if (siggnal == GDB_SIGNAL_SEGV)
8053 handle_segmentation_fault (uiout);
8054
8b93c638 8055 annotate_signal_string_end ();
33d62d64 8056 }
112e8700 8057 uiout->text (".\n");
33d62d64 8058}
252fbfc8 8059
fd664c91
PA
8060void
8061print_no_history_reason (struct ui_out *uiout)
33d62d64 8062{
112e8700 8063 uiout->text ("\nNo more reverse-execution history.\n");
11cf8741 8064}
43ff13b4 8065
0c7e1a46
PA
8066/* Print current location without a level number, if we have changed
8067 functions or hit a breakpoint. Print source line if we have one.
8068 bpstat_print contains the logic deciding in detail what to print,
8069 based on the event(s) that just occurred. */
8070
243a9253
PA
8071static void
8072print_stop_location (struct target_waitstatus *ws)
0c7e1a46
PA
8073{
8074 int bpstat_ret;
f486487f 8075 enum print_what source_flag;
0c7e1a46
PA
8076 int do_frame_printing = 1;
8077 struct thread_info *tp = inferior_thread ();
8078
8079 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
8080 switch (bpstat_ret)
8081 {
8082 case PRINT_UNKNOWN:
8083 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8084 should) carry around the function and does (or should) use
8085 that when doing a frame comparison. */
8086 if (tp->control.stop_step
8087 && frame_id_eq (tp->control.step_frame_id,
8088 get_frame_id (get_current_frame ()))
f2ffa92b
PA
8089 && (tp->control.step_start_function
8090 == find_pc_function (tp->suspend.stop_pc)))
0c7e1a46
PA
8091 {
8092 /* Finished step, just print source line. */
8093 source_flag = SRC_LINE;
8094 }
8095 else
8096 {
8097 /* Print location and source line. */
8098 source_flag = SRC_AND_LOC;
8099 }
8100 break;
8101 case PRINT_SRC_AND_LOC:
8102 /* Print location and source line. */
8103 source_flag = SRC_AND_LOC;
8104 break;
8105 case PRINT_SRC_ONLY:
8106 source_flag = SRC_LINE;
8107 break;
8108 case PRINT_NOTHING:
8109 /* Something bogus. */
8110 source_flag = SRC_LINE;
8111 do_frame_printing = 0;
8112 break;
8113 default:
8114 internal_error (__FILE__, __LINE__, _("Unknown value."));
8115 }
8116
8117 /* The behavior of this routine with respect to the source
8118 flag is:
8119 SRC_LINE: Print only source line
8120 LOCATION: Print only location
8121 SRC_AND_LOC: Print location and source line. */
8122 if (do_frame_printing)
8123 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
243a9253
PA
8124}
8125
243a9253
PA
8126/* See infrun.h. */
8127
8128void
4c7d57e7 8129print_stop_event (struct ui_out *uiout, bool displays)
243a9253 8130{
243a9253 8131 struct target_waitstatus last;
243a9253
PA
8132 struct thread_info *tp;
8133
5b6d1e4f 8134 get_last_target_status (nullptr, nullptr, &last);
243a9253 8135
67ad9399
TT
8136 {
8137 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
0c7e1a46 8138
67ad9399 8139 print_stop_location (&last);
243a9253 8140
67ad9399 8141 /* Display the auto-display expressions. */
4c7d57e7
TT
8142 if (displays)
8143 do_displays ();
67ad9399 8144 }
243a9253
PA
8145
8146 tp = inferior_thread ();
8147 if (tp->thread_fsm != NULL
46e3ed7f 8148 && tp->thread_fsm->finished_p ())
243a9253
PA
8149 {
8150 struct return_value_info *rv;
8151
46e3ed7f 8152 rv = tp->thread_fsm->return_value ();
243a9253
PA
8153 if (rv != NULL)
8154 print_return_value (uiout, rv);
8155 }
0c7e1a46
PA
8156}
8157
388a7084
PA
8158/* See infrun.h. */
8159
8160void
8161maybe_remove_breakpoints (void)
8162{
8163 if (!breakpoints_should_be_inserted_now () && target_has_execution)
8164 {
8165 if (remove_breakpoints ())
8166 {
223ffa71 8167 target_terminal::ours_for_output ();
388a7084
PA
8168 printf_filtered (_("Cannot remove breakpoints because "
8169 "program is no longer writable.\nFurther "
8170 "execution is probably impossible.\n"));
8171 }
8172 }
8173}
8174
4c2f2a79
PA
8175/* The execution context that just caused a normal stop. */
8176
8177struct stop_context
8178{
2d844eaf
TT
8179 stop_context ();
8180 ~stop_context ();
8181
8182 DISABLE_COPY_AND_ASSIGN (stop_context);
8183
8184 bool changed () const;
8185
4c2f2a79
PA
8186 /* The stop ID. */
8187 ULONGEST stop_id;
c906108c 8188
4c2f2a79 8189 /* The event PTID. */
c906108c 8190
4c2f2a79
PA
8191 ptid_t ptid;
8192
8193 /* If stopp for a thread event, this is the thread that caused the
8194 stop. */
8195 struct thread_info *thread;
8196
8197 /* The inferior that caused the stop. */
8198 int inf_num;
8199};
8200
2d844eaf 8201/* Initializes a new stop context. If stopped for a thread event, this
4c2f2a79
PA
8202 takes a strong reference to the thread. */
8203
2d844eaf 8204stop_context::stop_context ()
4c2f2a79 8205{
2d844eaf
TT
8206 stop_id = get_stop_id ();
8207 ptid = inferior_ptid;
8208 inf_num = current_inferior ()->num;
4c2f2a79 8209
d7e15655 8210 if (inferior_ptid != null_ptid)
4c2f2a79
PA
8211 {
8212 /* Take a strong reference so that the thread can't be deleted
8213 yet. */
2d844eaf
TT
8214 thread = inferior_thread ();
8215 thread->incref ();
4c2f2a79
PA
8216 }
8217 else
2d844eaf 8218 thread = NULL;
4c2f2a79
PA
8219}
8220
8221/* Release a stop context previously created with save_stop_context.
8222 Releases the strong reference to the thread as well. */
8223
2d844eaf 8224stop_context::~stop_context ()
4c2f2a79 8225{
2d844eaf
TT
8226 if (thread != NULL)
8227 thread->decref ();
4c2f2a79
PA
8228}
8229
8230/* Return true if the current context no longer matches the saved stop
8231 context. */
8232
2d844eaf
TT
8233bool
8234stop_context::changed () const
8235{
8236 if (ptid != inferior_ptid)
8237 return true;
8238 if (inf_num != current_inferior ()->num)
8239 return true;
8240 if (thread != NULL && thread->state != THREAD_STOPPED)
8241 return true;
8242 if (get_stop_id () != stop_id)
8243 return true;
8244 return false;
4c2f2a79
PA
8245}
8246
8247/* See infrun.h. */
8248
8249int
96baa820 8250normal_stop (void)
c906108c 8251{
73b65bb0 8252 struct target_waitstatus last;
73b65bb0 8253
5b6d1e4f 8254 get_last_target_status (nullptr, nullptr, &last);
73b65bb0 8255
4c2f2a79
PA
8256 new_stop_id ();
8257
29f49a6a
PA
8258 /* If an exception is thrown from this point on, make sure to
8259 propagate GDB's knowledge of the executing state to the
8260 frontend/user running state. A QUIT is an easy exception to see
8261 here, so do this before any filtered output. */
731f534f 8262
5b6d1e4f 8263 ptid_t finish_ptid = null_ptid;
731f534f 8264
c35b1492 8265 if (!non_stop)
5b6d1e4f 8266 finish_ptid = minus_one_ptid;
e1316e60
PA
8267 else if (last.kind == TARGET_WAITKIND_SIGNALLED
8268 || last.kind == TARGET_WAITKIND_EXITED)
8269 {
8270 /* On some targets, we may still have live threads in the
8271 inferior when we get a process exit event. E.g., for
8272 "checkpoint", when the current checkpoint/fork exits,
8273 linux-fork.c automatically switches to another fork from
8274 within target_mourn_inferior. */
731f534f 8275 if (inferior_ptid != null_ptid)
5b6d1e4f 8276 finish_ptid = ptid_t (inferior_ptid.pid ());
e1316e60
PA
8277 }
8278 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
5b6d1e4f
PA
8279 finish_ptid = inferior_ptid;
8280
8281 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
8282 if (finish_ptid != null_ptid)
8283 {
8284 maybe_finish_thread_state.emplace
8285 (user_visible_resume_target (finish_ptid), finish_ptid);
8286 }
29f49a6a 8287
b57bacec
PA
8288 /* As we're presenting a stop, and potentially removing breakpoints,
8289 update the thread list so we can tell whether there are threads
8290 running on the target. With target remote, for example, we can
8291 only learn about new threads when we explicitly update the thread
8292 list. Do this before notifying the interpreters about signal
8293 stops, end of stepping ranges, etc., so that the "new thread"
8294 output is emitted before e.g., "Program received signal FOO",
8295 instead of after. */
8296 update_thread_list ();
8297
8298 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
76727919 8299 gdb::observers::signal_received.notify (inferior_thread ()->suspend.stop_signal);
b57bacec 8300
c906108c
SS
8301 /* As with the notification of thread events, we want to delay
8302 notifying the user that we've switched thread context until
8303 the inferior actually stops.
8304
73b65bb0
DJ
8305 There's no point in saying anything if the inferior has exited.
8306 Note that SIGNALLED here means "exited with a signal", not
b65dc60b
PA
8307 "received a signal".
8308
8309 Also skip saying anything in non-stop mode. In that mode, as we
8310 don't want GDB to switch threads behind the user's back, to avoid
8311 races where the user is typing a command to apply to thread x,
8312 but GDB switches to thread y before the user finishes entering
8313 the command, fetch_inferior_event installs a cleanup to restore
8314 the current thread back to the thread the user had selected right
8315 after this event is handled, so we're not really switching, only
8316 informing of a stop. */
4f8d22e3 8317 if (!non_stop
731f534f 8318 && previous_inferior_ptid != inferior_ptid
73b65bb0
DJ
8319 && target_has_execution
8320 && last.kind != TARGET_WAITKIND_SIGNALLED
0e5bf2a8
PA
8321 && last.kind != TARGET_WAITKIND_EXITED
8322 && last.kind != TARGET_WAITKIND_NO_RESUMED)
c906108c 8323 {
0e454242 8324 SWITCH_THRU_ALL_UIS ()
3b12939d 8325 {
223ffa71 8326 target_terminal::ours_for_output ();
3b12939d 8327 printf_filtered (_("[Switching to %s]\n"),
a068643d 8328 target_pid_to_str (inferior_ptid).c_str ());
3b12939d
PA
8329 annotate_thread_changed ();
8330 }
39f77062 8331 previous_inferior_ptid = inferior_ptid;
c906108c 8332 }
c906108c 8333
0e5bf2a8
PA
8334 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
8335 {
0e454242 8336 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8337 if (current_ui->prompt_state == PROMPT_BLOCKED)
8338 {
223ffa71 8339 target_terminal::ours_for_output ();
3b12939d
PA
8340 printf_filtered (_("No unwaited-for children left.\n"));
8341 }
0e5bf2a8
PA
8342 }
8343
b57bacec 8344 /* Note: this depends on the update_thread_list call above. */
388a7084 8345 maybe_remove_breakpoints ();
c906108c 8346
c906108c
SS
8347 /* If an auto-display called a function and that got a signal,
8348 delete that auto-display to avoid an infinite recursion. */
8349
8350 if (stopped_by_random_signal)
8351 disable_current_display ();
8352
0e454242 8353 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8354 {
8355 async_enable_stdin ();
8356 }
c906108c 8357
388a7084 8358 /* Let the user/frontend see the threads as stopped. */
731f534f 8359 maybe_finish_thread_state.reset ();
388a7084
PA
8360
8361 /* Select innermost stack frame - i.e., current frame is frame 0,
8362 and current location is based on that. Handle the case where the
8363 dummy call is returning after being stopped. E.g. the dummy call
8364 previously hit a breakpoint. (If the dummy call returns
8365 normally, we won't reach here.) Do this before the stop hook is
8366 run, so that it doesn't get to see the temporary dummy frame,
8367 which is not where we'll present the stop. */
8368 if (has_stack_frames ())
8369 {
8370 if (stop_stack_dummy == STOP_STACK_DUMMY)
8371 {
8372 /* Pop the empty frame that contains the stack dummy. This
8373 also restores inferior state prior to the call (struct
8374 infcall_suspend_state). */
8375 struct frame_info *frame = get_current_frame ();
8376
8377 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8378 frame_pop (frame);
8379 /* frame_pop calls reinit_frame_cache as the last thing it
8380 does which means there's now no selected frame. */
8381 }
8382
8383 select_frame (get_current_frame ());
8384
8385 /* Set the current source location. */
8386 set_current_sal_from_frame (get_current_frame ());
8387 }
dd7e2d2b
PA
8388
8389 /* Look up the hook_stop and run it (CLI internally handles problem
8390 of stop_command's pre-hook not existing). */
4c2f2a79
PA
8391 if (stop_command != NULL)
8392 {
2d844eaf 8393 stop_context saved_context;
4c2f2a79 8394
a70b8144 8395 try
bf469271
PA
8396 {
8397 execute_cmd_pre_hook (stop_command);
8398 }
230d2906 8399 catch (const gdb_exception &ex)
bf469271
PA
8400 {
8401 exception_fprintf (gdb_stderr, ex,
8402 "Error while running hook_stop:\n");
8403 }
4c2f2a79
PA
8404
8405 /* If the stop hook resumes the target, then there's no point in
8406 trying to notify about the previous stop; its context is
8407 gone. Likewise if the command switches thread or inferior --
8408 the observers would print a stop for the wrong
8409 thread/inferior. */
2d844eaf
TT
8410 if (saved_context.changed ())
8411 return 1;
4c2f2a79 8412 }
dd7e2d2b 8413
388a7084
PA
8414 /* Notify observers about the stop. This is where the interpreters
8415 print the stop event. */
d7e15655 8416 if (inferior_ptid != null_ptid)
76727919 8417 gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
388a7084
PA
8418 stop_print_frame);
8419 else
76727919 8420 gdb::observers::normal_stop.notify (NULL, stop_print_frame);
347bddb7 8421
243a9253
PA
8422 annotate_stopped ();
8423
48844aa6
PA
8424 if (target_has_execution)
8425 {
8426 if (last.kind != TARGET_WAITKIND_SIGNALLED
fe726667
PA
8427 && last.kind != TARGET_WAITKIND_EXITED
8428 && last.kind != TARGET_WAITKIND_NO_RESUMED)
48844aa6
PA
8429 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8430 Delete any breakpoint that is to be deleted at the next stop. */
16c381f0 8431 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
94cc34af 8432 }
6c95b8df
PA
8433
8434 /* Try to get rid of automatically added inferiors that are no
8435 longer needed. Keeping those around slows down things linearly.
8436 Note that this never removes the current inferior. */
8437 prune_inferiors ();
4c2f2a79
PA
8438
8439 return 0;
c906108c 8440}
c906108c 8441\f
c5aa993b 8442int
96baa820 8443signal_stop_state (int signo)
c906108c 8444{
d6b48e9c 8445 return signal_stop[signo];
c906108c
SS
8446}
8447
c5aa993b 8448int
96baa820 8449signal_print_state (int signo)
c906108c
SS
8450{
8451 return signal_print[signo];
8452}
8453
c5aa993b 8454int
96baa820 8455signal_pass_state (int signo)
c906108c
SS
8456{
8457 return signal_program[signo];
8458}
8459
2455069d
UW
8460static void
8461signal_cache_update (int signo)
8462{
8463 if (signo == -1)
8464 {
a493e3e2 8465 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
2455069d
UW
8466 signal_cache_update (signo);
8467
8468 return;
8469 }
8470
8471 signal_pass[signo] = (signal_stop[signo] == 0
8472 && signal_print[signo] == 0
ab04a2af
TT
8473 && signal_program[signo] == 1
8474 && signal_catch[signo] == 0);
2455069d
UW
8475}
8476
488f131b 8477int
7bda5e4a 8478signal_stop_update (int signo, int state)
d4f3574e
SS
8479{
8480 int ret = signal_stop[signo];
abbb1732 8481
d4f3574e 8482 signal_stop[signo] = state;
2455069d 8483 signal_cache_update (signo);
d4f3574e
SS
8484 return ret;
8485}
8486
488f131b 8487int
7bda5e4a 8488signal_print_update (int signo, int state)
d4f3574e
SS
8489{
8490 int ret = signal_print[signo];
abbb1732 8491
d4f3574e 8492 signal_print[signo] = state;
2455069d 8493 signal_cache_update (signo);
d4f3574e
SS
8494 return ret;
8495}
8496
488f131b 8497int
7bda5e4a 8498signal_pass_update (int signo, int state)
d4f3574e
SS
8499{
8500 int ret = signal_program[signo];
abbb1732 8501
d4f3574e 8502 signal_program[signo] = state;
2455069d 8503 signal_cache_update (signo);
d4f3574e
SS
8504 return ret;
8505}
8506
ab04a2af
TT
8507/* Update the global 'signal_catch' from INFO and notify the
8508 target. */
8509
8510void
8511signal_catch_update (const unsigned int *info)
8512{
8513 int i;
8514
8515 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8516 signal_catch[i] = info[i] > 0;
8517 signal_cache_update (-1);
adc6a863 8518 target_pass_signals (signal_pass);
ab04a2af
TT
8519}
8520
c906108c 8521static void
96baa820 8522sig_print_header (void)
c906108c 8523{
3e43a32a
MS
8524 printf_filtered (_("Signal Stop\tPrint\tPass "
8525 "to program\tDescription\n"));
c906108c
SS
8526}
8527
8528static void
2ea28649 8529sig_print_info (enum gdb_signal oursig)
c906108c 8530{
2ea28649 8531 const char *name = gdb_signal_to_name (oursig);
c906108c 8532 int name_padding = 13 - strlen (name);
96baa820 8533
c906108c
SS
8534 if (name_padding <= 0)
8535 name_padding = 0;
8536
8537 printf_filtered ("%s", name);
488f131b 8538 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
c906108c
SS
8539 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8540 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
8541 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
2ea28649 8542 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
c906108c
SS
8543}
8544
8545/* Specify how various signals in the inferior should be handled. */
8546
8547static void
0b39b52e 8548handle_command (const char *args, int from_tty)
c906108c 8549{
c906108c 8550 int digits, wordlen;
b926417a 8551 int sigfirst, siglast;
2ea28649 8552 enum gdb_signal oursig;
c906108c 8553 int allsigs;
c906108c
SS
8554
8555 if (args == NULL)
8556 {
e2e0b3e5 8557 error_no_arg (_("signal to handle"));
c906108c
SS
8558 }
8559
1777feb0 8560 /* Allocate and zero an array of flags for which signals to handle. */
c906108c 8561
adc6a863
PA
8562 const size_t nsigs = GDB_SIGNAL_LAST;
8563 unsigned char sigs[nsigs] {};
c906108c 8564
1777feb0 8565 /* Break the command line up into args. */
c906108c 8566
773a1edc 8567 gdb_argv built_argv (args);
c906108c
SS
8568
8569 /* Walk through the args, looking for signal oursigs, signal names, and
8570 actions. Signal numbers and signal names may be interspersed with
8571 actions, with the actions being performed for all signals cumulatively
1777feb0 8572 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
c906108c 8573
773a1edc 8574 for (char *arg : built_argv)
c906108c 8575 {
773a1edc
TT
8576 wordlen = strlen (arg);
8577 for (digits = 0; isdigit (arg[digits]); digits++)
c906108c
SS
8578 {;
8579 }
8580 allsigs = 0;
8581 sigfirst = siglast = -1;
8582
773a1edc 8583 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
c906108c
SS
8584 {
8585 /* Apply action to all signals except those used by the
1777feb0 8586 debugger. Silently skip those. */
c906108c
SS
8587 allsigs = 1;
8588 sigfirst = 0;
8589 siglast = nsigs - 1;
8590 }
773a1edc 8591 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
c906108c
SS
8592 {
8593 SET_SIGS (nsigs, sigs, signal_stop);
8594 SET_SIGS (nsigs, sigs, signal_print);
8595 }
773a1edc 8596 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
c906108c
SS
8597 {
8598 UNSET_SIGS (nsigs, sigs, signal_program);
8599 }
773a1edc 8600 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
c906108c
SS
8601 {
8602 SET_SIGS (nsigs, sigs, signal_print);
8603 }
773a1edc 8604 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
c906108c
SS
8605 {
8606 SET_SIGS (nsigs, sigs, signal_program);
8607 }
773a1edc 8608 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
c906108c
SS
8609 {
8610 UNSET_SIGS (nsigs, sigs, signal_stop);
8611 }
773a1edc 8612 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
c906108c
SS
8613 {
8614 SET_SIGS (nsigs, sigs, signal_program);
8615 }
773a1edc 8616 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
c906108c
SS
8617 {
8618 UNSET_SIGS (nsigs, sigs, signal_print);
8619 UNSET_SIGS (nsigs, sigs, signal_stop);
8620 }
773a1edc 8621 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
c906108c
SS
8622 {
8623 UNSET_SIGS (nsigs, sigs, signal_program);
8624 }
8625 else if (digits > 0)
8626 {
8627 /* It is numeric. The numeric signal refers to our own
8628 internal signal numbering from target.h, not to host/target
8629 signal number. This is a feature; users really should be
8630 using symbolic names anyway, and the common ones like
8631 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8632
8633 sigfirst = siglast = (int)
773a1edc
TT
8634 gdb_signal_from_command (atoi (arg));
8635 if (arg[digits] == '-')
c906108c
SS
8636 {
8637 siglast = (int)
773a1edc 8638 gdb_signal_from_command (atoi (arg + digits + 1));
c906108c
SS
8639 }
8640 if (sigfirst > siglast)
8641 {
1777feb0 8642 /* Bet he didn't figure we'd think of this case... */
b926417a 8643 std::swap (sigfirst, siglast);
c906108c
SS
8644 }
8645 }
8646 else
8647 {
773a1edc 8648 oursig = gdb_signal_from_name (arg);
a493e3e2 8649 if (oursig != GDB_SIGNAL_UNKNOWN)
c906108c
SS
8650 {
8651 sigfirst = siglast = (int) oursig;
8652 }
8653 else
8654 {
8655 /* Not a number and not a recognized flag word => complain. */
773a1edc 8656 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
c906108c
SS
8657 }
8658 }
8659
8660 /* If any signal numbers or symbol names were found, set flags for
1777feb0 8661 which signals to apply actions to. */
c906108c 8662
b926417a 8663 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
c906108c 8664 {
2ea28649 8665 switch ((enum gdb_signal) signum)
c906108c 8666 {
a493e3e2
PA
8667 case GDB_SIGNAL_TRAP:
8668 case GDB_SIGNAL_INT:
c906108c
SS
8669 if (!allsigs && !sigs[signum])
8670 {
9e2f0ad4 8671 if (query (_("%s is used by the debugger.\n\
3e43a32a 8672Are you sure you want to change it? "),
2ea28649 8673 gdb_signal_to_name ((enum gdb_signal) signum)))
c906108c
SS
8674 {
8675 sigs[signum] = 1;
8676 }
8677 else
c119e040 8678 printf_unfiltered (_("Not confirmed, unchanged.\n"));
c906108c
SS
8679 }
8680 break;
a493e3e2
PA
8681 case GDB_SIGNAL_0:
8682 case GDB_SIGNAL_DEFAULT:
8683 case GDB_SIGNAL_UNKNOWN:
c906108c
SS
8684 /* Make sure that "all" doesn't print these. */
8685 break;
8686 default:
8687 sigs[signum] = 1;
8688 break;
8689 }
8690 }
c906108c
SS
8691 }
8692
b926417a 8693 for (int signum = 0; signum < nsigs; signum++)
3a031f65
PA
8694 if (sigs[signum])
8695 {
2455069d 8696 signal_cache_update (-1);
adc6a863
PA
8697 target_pass_signals (signal_pass);
8698 target_program_signals (signal_program);
c906108c 8699
3a031f65
PA
8700 if (from_tty)
8701 {
8702 /* Show the results. */
8703 sig_print_header ();
8704 for (; signum < nsigs; signum++)
8705 if (sigs[signum])
aead7601 8706 sig_print_info ((enum gdb_signal) signum);
3a031f65
PA
8707 }
8708
8709 break;
8710 }
c906108c
SS
8711}
8712
de0bea00
MF
8713/* Complete the "handle" command. */
8714
eb3ff9a5 8715static void
de0bea00 8716handle_completer (struct cmd_list_element *ignore,
eb3ff9a5 8717 completion_tracker &tracker,
6f937416 8718 const char *text, const char *word)
de0bea00 8719{
de0bea00
MF
8720 static const char * const keywords[] =
8721 {
8722 "all",
8723 "stop",
8724 "ignore",
8725 "print",
8726 "pass",
8727 "nostop",
8728 "noignore",
8729 "noprint",
8730 "nopass",
8731 NULL,
8732 };
8733
eb3ff9a5
PA
8734 signal_completer (ignore, tracker, text, word);
8735 complete_on_enum (tracker, keywords, word, word);
de0bea00
MF
8736}
8737
2ea28649
PA
8738enum gdb_signal
8739gdb_signal_from_command (int num)
ed01b82c
PA
8740{
8741 if (num >= 1 && num <= 15)
2ea28649 8742 return (enum gdb_signal) num;
ed01b82c
PA
8743 error (_("Only signals 1-15 are valid as numeric signals.\n\
8744Use \"info signals\" for a list of symbolic signals."));
8745}
8746
c906108c
SS
8747/* Print current contents of the tables set by the handle command.
8748 It is possible we should just be printing signals actually used
8749 by the current target (but for things to work right when switching
8750 targets, all signals should be in the signal tables). */
8751
8752static void
1d12d88f 8753info_signals_command (const char *signum_exp, int from_tty)
c906108c 8754{
2ea28649 8755 enum gdb_signal oursig;
abbb1732 8756
c906108c
SS
8757 sig_print_header ();
8758
8759 if (signum_exp)
8760 {
8761 /* First see if this is a symbol name. */
2ea28649 8762 oursig = gdb_signal_from_name (signum_exp);
a493e3e2 8763 if (oursig == GDB_SIGNAL_UNKNOWN)
c906108c
SS
8764 {
8765 /* No, try numeric. */
8766 oursig =
2ea28649 8767 gdb_signal_from_command (parse_and_eval_long (signum_exp));
c906108c
SS
8768 }
8769 sig_print_info (oursig);
8770 return;
8771 }
8772
8773 printf_filtered ("\n");
8774 /* These ugly casts brought to you by the native VAX compiler. */
a493e3e2
PA
8775 for (oursig = GDB_SIGNAL_FIRST;
8776 (int) oursig < (int) GDB_SIGNAL_LAST;
2ea28649 8777 oursig = (enum gdb_signal) ((int) oursig + 1))
c906108c
SS
8778 {
8779 QUIT;
8780
a493e3e2
PA
8781 if (oursig != GDB_SIGNAL_UNKNOWN
8782 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
c906108c
SS
8783 sig_print_info (oursig);
8784 }
8785
3e43a32a
MS
8786 printf_filtered (_("\nUse the \"handle\" command "
8787 "to change these tables.\n"));
c906108c 8788}
4aa995e1
PA
8789
8790/* The $_siginfo convenience variable is a bit special. We don't know
8791 for sure the type of the value until we actually have a chance to
7a9dd1b2 8792 fetch the data. The type can change depending on gdbarch, so it is
4aa995e1
PA
8793 also dependent on which thread you have selected.
8794
8795 1. making $_siginfo be an internalvar that creates a new value on
8796 access.
8797
8798 2. making the value of $_siginfo be an lval_computed value. */
8799
8800/* This function implements the lval_computed support for reading a
8801 $_siginfo value. */
8802
8803static void
8804siginfo_value_read (struct value *v)
8805{
8806 LONGEST transferred;
8807
a911d87a
PA
8808 /* If we can access registers, so can we access $_siginfo. Likewise
8809 vice versa. */
8810 validate_registers_access ();
c709acd1 8811
4aa995e1 8812 transferred =
8b88a78e 8813 target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO,
4aa995e1
PA
8814 NULL,
8815 value_contents_all_raw (v),
8816 value_offset (v),
8817 TYPE_LENGTH (value_type (v)));
8818
8819 if (transferred != TYPE_LENGTH (value_type (v)))
8820 error (_("Unable to read siginfo"));
8821}
8822
8823/* This function implements the lval_computed support for writing a
8824 $_siginfo value. */
8825
8826static void
8827siginfo_value_write (struct value *v, struct value *fromval)
8828{
8829 LONGEST transferred;
8830
a911d87a
PA
8831 /* If we can access registers, so can we access $_siginfo. Likewise
8832 vice versa. */
8833 validate_registers_access ();
c709acd1 8834
8b88a78e 8835 transferred = target_write (current_top_target (),
4aa995e1
PA
8836 TARGET_OBJECT_SIGNAL_INFO,
8837 NULL,
8838 value_contents_all_raw (fromval),
8839 value_offset (v),
8840 TYPE_LENGTH (value_type (fromval)));
8841
8842 if (transferred != TYPE_LENGTH (value_type (fromval)))
8843 error (_("Unable to write siginfo"));
8844}
8845
c8f2448a 8846static const struct lval_funcs siginfo_value_funcs =
4aa995e1
PA
8847 {
8848 siginfo_value_read,
8849 siginfo_value_write
8850 };
8851
8852/* Return a new value with the correct type for the siginfo object of
78267919
UW
8853 the current thread using architecture GDBARCH. Return a void value
8854 if there's no object available. */
4aa995e1 8855
2c0b251b 8856static struct value *
22d2b532
SDJ
8857siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
8858 void *ignore)
4aa995e1 8859{
4aa995e1 8860 if (target_has_stack
d7e15655 8861 && inferior_ptid != null_ptid
78267919 8862 && gdbarch_get_siginfo_type_p (gdbarch))
4aa995e1 8863 {
78267919 8864 struct type *type = gdbarch_get_siginfo_type (gdbarch);
abbb1732 8865
78267919 8866 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
4aa995e1
PA
8867 }
8868
78267919 8869 return allocate_value (builtin_type (gdbarch)->builtin_void);
4aa995e1
PA
8870}
8871
c906108c 8872\f
16c381f0
JK
8873/* infcall_suspend_state contains state about the program itself like its
8874 registers and any signal it received when it last stopped.
8875 This state must be restored regardless of how the inferior function call
8876 ends (either successfully, or after it hits a breakpoint or signal)
8877 if the program is to properly continue where it left off. */
8878
6bf78e29 8879class infcall_suspend_state
7a292a7a 8880{
6bf78e29
AB
8881public:
8882 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
8883 once the inferior function call has finished. */
8884 infcall_suspend_state (struct gdbarch *gdbarch,
8885 const struct thread_info *tp,
8886 struct regcache *regcache)
8887 : m_thread_suspend (tp->suspend),
8888 m_registers (new readonly_detached_regcache (*regcache))
8889 {
8890 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
8891
8892 if (gdbarch_get_siginfo_type_p (gdbarch))
8893 {
8894 struct type *type = gdbarch_get_siginfo_type (gdbarch);
8895 size_t len = TYPE_LENGTH (type);
8896
8897 siginfo_data.reset ((gdb_byte *) xmalloc (len));
8898
8899 if (target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
8900 siginfo_data.get (), 0, len) != len)
8901 {
8902 /* Errors ignored. */
8903 siginfo_data.reset (nullptr);
8904 }
8905 }
8906
8907 if (siginfo_data)
8908 {
8909 m_siginfo_gdbarch = gdbarch;
8910 m_siginfo_data = std::move (siginfo_data);
8911 }
8912 }
8913
8914 /* Return a pointer to the stored register state. */
16c381f0 8915
6bf78e29
AB
8916 readonly_detached_regcache *registers () const
8917 {
8918 return m_registers.get ();
8919 }
8920
8921 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
8922
8923 void restore (struct gdbarch *gdbarch,
8924 struct thread_info *tp,
8925 struct regcache *regcache) const
8926 {
8927 tp->suspend = m_thread_suspend;
8928
8929 if (m_siginfo_gdbarch == gdbarch)
8930 {
8931 struct type *type = gdbarch_get_siginfo_type (gdbarch);
8932
8933 /* Errors ignored. */
8934 target_write (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
8935 m_siginfo_data.get (), 0, TYPE_LENGTH (type));
8936 }
8937
8938 /* The inferior can be gone if the user types "print exit(0)"
8939 (and perhaps other times). */
8940 if (target_has_execution)
8941 /* NB: The register write goes through to the target. */
8942 regcache->restore (registers ());
8943 }
8944
8945private:
8946 /* How the current thread stopped before the inferior function call was
8947 executed. */
8948 struct thread_suspend_state m_thread_suspend;
8949
8950 /* The registers before the inferior function call was executed. */
8951 std::unique_ptr<readonly_detached_regcache> m_registers;
1736ad11 8952
35515841 8953 /* Format of SIGINFO_DATA or NULL if it is not present. */
6bf78e29 8954 struct gdbarch *m_siginfo_gdbarch = nullptr;
1736ad11
JK
8955
8956 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
8957 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
8958 content would be invalid. */
6bf78e29 8959 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
b89667eb
DE
8960};
8961
cb524840
TT
8962infcall_suspend_state_up
8963save_infcall_suspend_state ()
b89667eb 8964{
b89667eb 8965 struct thread_info *tp = inferior_thread ();
1736ad11 8966 struct regcache *regcache = get_current_regcache ();
ac7936df 8967 struct gdbarch *gdbarch = regcache->arch ();
1736ad11 8968
6bf78e29
AB
8969 infcall_suspend_state_up inf_state
8970 (new struct infcall_suspend_state (gdbarch, tp, regcache));
1736ad11 8971
6bf78e29
AB
8972 /* Having saved the current state, adjust the thread state, discarding
8973 any stop signal information. The stop signal is not useful when
8974 starting an inferior function call, and run_inferior_call will not use
8975 the signal due to its `proceed' call with GDB_SIGNAL_0. */
a493e3e2 8976 tp->suspend.stop_signal = GDB_SIGNAL_0;
35515841 8977
b89667eb
DE
8978 return inf_state;
8979}
8980
8981/* Restore inferior session state to INF_STATE. */
8982
8983void
16c381f0 8984restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb
DE
8985{
8986 struct thread_info *tp = inferior_thread ();
1736ad11 8987 struct regcache *regcache = get_current_regcache ();
ac7936df 8988 struct gdbarch *gdbarch = regcache->arch ();
b89667eb 8989
6bf78e29 8990 inf_state->restore (gdbarch, tp, regcache);
16c381f0 8991 discard_infcall_suspend_state (inf_state);
b89667eb
DE
8992}
8993
b89667eb 8994void
16c381f0 8995discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb 8996{
dd848631 8997 delete inf_state;
b89667eb
DE
8998}
8999
daf6667d 9000readonly_detached_regcache *
16c381f0 9001get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
b89667eb 9002{
6bf78e29 9003 return inf_state->registers ();
b89667eb
DE
9004}
9005
16c381f0
JK
9006/* infcall_control_state contains state regarding gdb's control of the
9007 inferior itself like stepping control. It also contains session state like
9008 the user's currently selected frame. */
b89667eb 9009
16c381f0 9010struct infcall_control_state
b89667eb 9011{
16c381f0
JK
9012 struct thread_control_state thread_control;
9013 struct inferior_control_state inferior_control;
d82142e2
JK
9014
9015 /* Other fields: */
ee841dd8
TT
9016 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
9017 int stopped_by_random_signal = 0;
7a292a7a 9018
b89667eb 9019 /* ID if the selected frame when the inferior function call was made. */
ee841dd8 9020 struct frame_id selected_frame_id {};
7a292a7a
SS
9021};
9022
c906108c 9023/* Save all of the information associated with the inferior<==>gdb
b89667eb 9024 connection. */
c906108c 9025
cb524840
TT
9026infcall_control_state_up
9027save_infcall_control_state ()
c906108c 9028{
cb524840 9029 infcall_control_state_up inf_status (new struct infcall_control_state);
4e1c45ea 9030 struct thread_info *tp = inferior_thread ();
d6b48e9c 9031 struct inferior *inf = current_inferior ();
7a292a7a 9032
16c381f0
JK
9033 inf_status->thread_control = tp->control;
9034 inf_status->inferior_control = inf->control;
d82142e2 9035
8358c15c 9036 tp->control.step_resume_breakpoint = NULL;
5b79abe7 9037 tp->control.exception_resume_breakpoint = NULL;
8358c15c 9038
16c381f0
JK
9039 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9040 chain. If caller's caller is walking the chain, they'll be happier if we
9041 hand them back the original chain when restore_infcall_control_state is
9042 called. */
9043 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
d82142e2
JK
9044
9045 /* Other fields: */
9046 inf_status->stop_stack_dummy = stop_stack_dummy;
9047 inf_status->stopped_by_random_signal = stopped_by_random_signal;
c5aa993b 9048
206415a3 9049 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
b89667eb 9050
7a292a7a 9051 return inf_status;
c906108c
SS
9052}
9053
bf469271
PA
9054static void
9055restore_selected_frame (const frame_id &fid)
c906108c 9056{
bf469271 9057 frame_info *frame = frame_find_by_id (fid);
c906108c 9058
aa0cd9c1
AC
9059 /* If inf_status->selected_frame_id is NULL, there was no previously
9060 selected frame. */
101dcfbe 9061 if (frame == NULL)
c906108c 9062 {
8a3fe4f8 9063 warning (_("Unable to restore previously selected frame."));
bf469271 9064 return;
c906108c
SS
9065 }
9066
0f7d239c 9067 select_frame (frame);
c906108c
SS
9068}
9069
b89667eb
DE
9070/* Restore inferior session state to INF_STATUS. */
9071
c906108c 9072void
16c381f0 9073restore_infcall_control_state (struct infcall_control_state *inf_status)
c906108c 9074{
4e1c45ea 9075 struct thread_info *tp = inferior_thread ();
d6b48e9c 9076 struct inferior *inf = current_inferior ();
4e1c45ea 9077
8358c15c
JK
9078 if (tp->control.step_resume_breakpoint)
9079 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
9080
5b79abe7
TT
9081 if (tp->control.exception_resume_breakpoint)
9082 tp->control.exception_resume_breakpoint->disposition
9083 = disp_del_at_next_stop;
9084
d82142e2 9085 /* Handle the bpstat_copy of the chain. */
16c381f0 9086 bpstat_clear (&tp->control.stop_bpstat);
d82142e2 9087
16c381f0
JK
9088 tp->control = inf_status->thread_control;
9089 inf->control = inf_status->inferior_control;
d82142e2
JK
9090
9091 /* Other fields: */
9092 stop_stack_dummy = inf_status->stop_stack_dummy;
9093 stopped_by_random_signal = inf_status->stopped_by_random_signal;
c906108c 9094
b89667eb 9095 if (target_has_stack)
c906108c 9096 {
bf469271 9097 /* The point of the try/catch is that if the stack is clobbered,
101dcfbe
AC
9098 walking the stack might encounter a garbage pointer and
9099 error() trying to dereference it. */
a70b8144 9100 try
bf469271
PA
9101 {
9102 restore_selected_frame (inf_status->selected_frame_id);
9103 }
230d2906 9104 catch (const gdb_exception_error &ex)
bf469271
PA
9105 {
9106 exception_fprintf (gdb_stderr, ex,
9107 "Unable to restore previously selected frame:\n");
9108 /* Error in restoring the selected frame. Select the
9109 innermost frame. */
9110 select_frame (get_current_frame ());
9111 }
c906108c 9112 }
c906108c 9113
ee841dd8 9114 delete inf_status;
7a292a7a 9115}
c906108c
SS
9116
9117void
16c381f0 9118discard_infcall_control_state (struct infcall_control_state *inf_status)
7a292a7a 9119{
8358c15c
JK
9120 if (inf_status->thread_control.step_resume_breakpoint)
9121 inf_status->thread_control.step_resume_breakpoint->disposition
9122 = disp_del_at_next_stop;
9123
5b79abe7
TT
9124 if (inf_status->thread_control.exception_resume_breakpoint)
9125 inf_status->thread_control.exception_resume_breakpoint->disposition
9126 = disp_del_at_next_stop;
9127
1777feb0 9128 /* See save_infcall_control_state for info on stop_bpstat. */
16c381f0 9129 bpstat_clear (&inf_status->thread_control.stop_bpstat);
8358c15c 9130
ee841dd8 9131 delete inf_status;
7a292a7a 9132}
b89667eb 9133\f
7f89fd65 9134/* See infrun.h. */
0c557179
SDJ
9135
9136void
9137clear_exit_convenience_vars (void)
9138{
9139 clear_internalvar (lookup_internalvar ("_exitsignal"));
9140 clear_internalvar (lookup_internalvar ("_exitcode"));
9141}
c5aa993b 9142\f
488f131b 9143
b2175913
MS
9144/* User interface for reverse debugging:
9145 Set exec-direction / show exec-direction commands
9146 (returns error unless target implements to_set_exec_direction method). */
9147
170742de 9148enum exec_direction_kind execution_direction = EXEC_FORWARD;
b2175913
MS
9149static const char exec_forward[] = "forward";
9150static const char exec_reverse[] = "reverse";
9151static const char *exec_direction = exec_forward;
40478521 9152static const char *const exec_direction_names[] = {
b2175913
MS
9153 exec_forward,
9154 exec_reverse,
9155 NULL
9156};
9157
9158static void
eb4c3f4a 9159set_exec_direction_func (const char *args, int from_tty,
b2175913
MS
9160 struct cmd_list_element *cmd)
9161{
9162 if (target_can_execute_reverse)
9163 {
9164 if (!strcmp (exec_direction, exec_forward))
9165 execution_direction = EXEC_FORWARD;
9166 else if (!strcmp (exec_direction, exec_reverse))
9167 execution_direction = EXEC_REVERSE;
9168 }
8bbed405
MS
9169 else
9170 {
9171 exec_direction = exec_forward;
9172 error (_("Target does not support this operation."));
9173 }
b2175913
MS
9174}
9175
9176static void
9177show_exec_direction_func (struct ui_file *out, int from_tty,
9178 struct cmd_list_element *cmd, const char *value)
9179{
9180 switch (execution_direction) {
9181 case EXEC_FORWARD:
9182 fprintf_filtered (out, _("Forward.\n"));
9183 break;
9184 case EXEC_REVERSE:
9185 fprintf_filtered (out, _("Reverse.\n"));
9186 break;
b2175913 9187 default:
d8b34453
PA
9188 internal_error (__FILE__, __LINE__,
9189 _("bogus execution_direction value: %d"),
9190 (int) execution_direction);
b2175913
MS
9191 }
9192}
9193
d4db2f36
PA
9194static void
9195show_schedule_multiple (struct ui_file *file, int from_tty,
9196 struct cmd_list_element *c, const char *value)
9197{
3e43a32a
MS
9198 fprintf_filtered (file, _("Resuming the execution of threads "
9199 "of all processes is %s.\n"), value);
d4db2f36 9200}
ad52ddc6 9201
22d2b532
SDJ
9202/* Implementation of `siginfo' variable. */
9203
9204static const struct internalvar_funcs siginfo_funcs =
9205{
9206 siginfo_make_value,
9207 NULL,
9208 NULL
9209};
9210
372316f1
PA
9211/* Callback for infrun's target events source. This is marked when a
9212 thread has a pending status to process. */
9213
9214static void
9215infrun_async_inferior_event_handler (gdb_client_data data)
9216{
b1a35af2 9217 inferior_event_handler (INF_REG_EVENT);
372316f1
PA
9218}
9219
6c265988 9220void _initialize_infrun ();
c906108c 9221void
6c265988 9222_initialize_infrun ()
c906108c 9223{
de0bea00 9224 struct cmd_list_element *c;
c906108c 9225
372316f1
PA
9226 /* Register extra event sources in the event loop. */
9227 infrun_async_inferior_event_token
9228 = create_async_event_handler (infrun_async_inferior_event_handler, NULL);
9229
11db9430 9230 add_info ("signals", info_signals_command, _("\
1bedd215
AC
9231What debugger does when program gets various signals.\n\
9232Specify a signal as argument to print info on that signal only."));
c906108c
SS
9233 add_info_alias ("handle", "signals", 0);
9234
de0bea00 9235 c = add_com ("handle", class_run, handle_command, _("\
dfbd5e7b 9236Specify how to handle signals.\n\
486c7739 9237Usage: handle SIGNAL [ACTIONS]\n\
c906108c 9238Args are signals and actions to apply to those signals.\n\
dfbd5e7b 9239If no actions are specified, the current settings for the specified signals\n\
486c7739
MF
9240will be displayed instead.\n\
9241\n\
c906108c
SS
9242Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9243from 1-15 are allowed for compatibility with old versions of GDB.\n\
9244Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9245The special arg \"all\" is recognized to mean all signals except those\n\
1bedd215 9246used by the debugger, typically SIGTRAP and SIGINT.\n\
486c7739 9247\n\
1bedd215 9248Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
c906108c
SS
9249\"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9250Stop means reenter debugger if this signal happens (implies print).\n\
9251Print means print a message if this signal happens.\n\
9252Pass means let program see this signal; otherwise program doesn't know.\n\
9253Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
dfbd5e7b
PA
9254Pass and Stop may be combined.\n\
9255\n\
9256Multiple signals may be specified. Signal numbers and signal names\n\
9257may be interspersed with actions, with the actions being performed for\n\
9258all signals cumulatively specified."));
de0bea00 9259 set_cmd_completer (c, handle_completer);
486c7739 9260
c906108c 9261 if (!dbx_commands)
1a966eab
AC
9262 stop_command = add_cmd ("stop", class_obscure,
9263 not_just_help_class_command, _("\
9264There is no `stop' command, but you can set a hook on `stop'.\n\
c906108c 9265This allows you to set a list of commands to be run each time execution\n\
1a966eab 9266of the program stops."), &cmdlist);
c906108c 9267
ccce17b0 9268 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
85c07804
AC
9269Set inferior debugging."), _("\
9270Show inferior debugging."), _("\
9271When non-zero, inferior specific debugging is enabled."),
ccce17b0
YQ
9272 NULL,
9273 show_debug_infrun,
9274 &setdebuglist, &showdebuglist);
527159b7 9275
3e43a32a
MS
9276 add_setshow_boolean_cmd ("displaced", class_maintenance,
9277 &debug_displaced, _("\
237fc4c9
PA
9278Set displaced stepping debugging."), _("\
9279Show displaced stepping debugging."), _("\
9280When non-zero, displaced stepping specific debugging is enabled."),
9281 NULL,
9282 show_debug_displaced,
9283 &setdebuglist, &showdebuglist);
9284
ad52ddc6
PA
9285 add_setshow_boolean_cmd ("non-stop", no_class,
9286 &non_stop_1, _("\
9287Set whether gdb controls the inferior in non-stop mode."), _("\
9288Show whether gdb controls the inferior in non-stop mode."), _("\
9289When debugging a multi-threaded program and this setting is\n\
9290off (the default, also called all-stop mode), when one thread stops\n\
9291(for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9292all other threads in the program while you interact with the thread of\n\
9293interest. When you continue or step a thread, you can allow the other\n\
9294threads to run, or have them remain stopped, but while you inspect any\n\
9295thread's state, all threads stop.\n\
9296\n\
9297In non-stop mode, when one thread stops, other threads can continue\n\
9298to run freely. You'll be able to step each thread independently,\n\
9299leave it stopped or free to run as needed."),
9300 set_non_stop,
9301 show_non_stop,
9302 &setlist,
9303 &showlist);
9304
adc6a863 9305 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
c906108c
SS
9306 {
9307 signal_stop[i] = 1;
9308 signal_print[i] = 1;
9309 signal_program[i] = 1;
ab04a2af 9310 signal_catch[i] = 0;
c906108c
SS
9311 }
9312
4d9d9d04
PA
9313 /* Signals caused by debugger's own actions should not be given to
9314 the program afterwards.
9315
9316 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9317 explicitly specifies that it should be delivered to the target
9318 program. Typically, that would occur when a user is debugging a
9319 target monitor on a simulator: the target monitor sets a
9320 breakpoint; the simulator encounters this breakpoint and halts
9321 the simulation handing control to GDB; GDB, noting that the stop
9322 address doesn't map to any known breakpoint, returns control back
9323 to the simulator; the simulator then delivers the hardware
9324 equivalent of a GDB_SIGNAL_TRAP to the program being
9325 debugged. */
a493e3e2
PA
9326 signal_program[GDB_SIGNAL_TRAP] = 0;
9327 signal_program[GDB_SIGNAL_INT] = 0;
c906108c
SS
9328
9329 /* Signals that are not errors should not normally enter the debugger. */
a493e3e2
PA
9330 signal_stop[GDB_SIGNAL_ALRM] = 0;
9331 signal_print[GDB_SIGNAL_ALRM] = 0;
9332 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9333 signal_print[GDB_SIGNAL_VTALRM] = 0;
9334 signal_stop[GDB_SIGNAL_PROF] = 0;
9335 signal_print[GDB_SIGNAL_PROF] = 0;
9336 signal_stop[GDB_SIGNAL_CHLD] = 0;
9337 signal_print[GDB_SIGNAL_CHLD] = 0;
9338 signal_stop[GDB_SIGNAL_IO] = 0;
9339 signal_print[GDB_SIGNAL_IO] = 0;
9340 signal_stop[GDB_SIGNAL_POLL] = 0;
9341 signal_print[GDB_SIGNAL_POLL] = 0;
9342 signal_stop[GDB_SIGNAL_URG] = 0;
9343 signal_print[GDB_SIGNAL_URG] = 0;
9344 signal_stop[GDB_SIGNAL_WINCH] = 0;
9345 signal_print[GDB_SIGNAL_WINCH] = 0;
9346 signal_stop[GDB_SIGNAL_PRIO] = 0;
9347 signal_print[GDB_SIGNAL_PRIO] = 0;
c906108c 9348
cd0fc7c3
SS
9349 /* These signals are used internally by user-level thread
9350 implementations. (See signal(5) on Solaris.) Like the above
9351 signals, a healthy program receives and handles them as part of
9352 its normal operation. */
a493e3e2
PA
9353 signal_stop[GDB_SIGNAL_LWP] = 0;
9354 signal_print[GDB_SIGNAL_LWP] = 0;
9355 signal_stop[GDB_SIGNAL_WAITING] = 0;
9356 signal_print[GDB_SIGNAL_WAITING] = 0;
9357 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9358 signal_print[GDB_SIGNAL_CANCEL] = 0;
bc7b765a
JB
9359 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9360 signal_print[GDB_SIGNAL_LIBRT] = 0;
cd0fc7c3 9361
2455069d
UW
9362 /* Update cached state. */
9363 signal_cache_update (-1);
9364
85c07804
AC
9365 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9366 &stop_on_solib_events, _("\
9367Set stopping for shared library events."), _("\
9368Show stopping for shared library events."), _("\
c906108c
SS
9369If nonzero, gdb will give control to the user when the dynamic linker\n\
9370notifies gdb of shared library events. The most common event of interest\n\
85c07804 9371to the user would be loading/unloading of a new library."),
f9e14852 9372 set_stop_on_solib_events,
920d2a44 9373 show_stop_on_solib_events,
85c07804 9374 &setlist, &showlist);
c906108c 9375
7ab04401
AC
9376 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9377 follow_fork_mode_kind_names,
9378 &follow_fork_mode_string, _("\
9379Set debugger response to a program call of fork or vfork."), _("\
9380Show debugger response to a program call of fork or vfork."), _("\
c906108c
SS
9381A fork or vfork creates a new process. follow-fork-mode can be:\n\
9382 parent - the original process is debugged after a fork\n\
9383 child - the new process is debugged after a fork\n\
ea1dd7bc 9384The unfollowed process will continue to run.\n\
7ab04401
AC
9385By default, the debugger will follow the parent process."),
9386 NULL,
920d2a44 9387 show_follow_fork_mode_string,
7ab04401
AC
9388 &setlist, &showlist);
9389
6c95b8df
PA
9390 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9391 follow_exec_mode_names,
9392 &follow_exec_mode_string, _("\
9393Set debugger response to a program call of exec."), _("\
9394Show debugger response to a program call of exec."), _("\
9395An exec call replaces the program image of a process.\n\
9396\n\
9397follow-exec-mode can be:\n\
9398\n\
cce7e648 9399 new - the debugger creates a new inferior and rebinds the process\n\
6c95b8df
PA
9400to this new inferior. The program the process was running before\n\
9401the exec call can be restarted afterwards by restarting the original\n\
9402inferior.\n\
9403\n\
9404 same - the debugger keeps the process bound to the same inferior.\n\
9405The new executable image replaces the previous executable loaded in\n\
9406the inferior. Restarting the inferior after the exec call restarts\n\
9407the executable the process was running after the exec call.\n\
9408\n\
9409By default, the debugger will use the same inferior."),
9410 NULL,
9411 show_follow_exec_mode_string,
9412 &setlist, &showlist);
9413
7ab04401
AC
9414 add_setshow_enum_cmd ("scheduler-locking", class_run,
9415 scheduler_enums, &scheduler_mode, _("\
9416Set mode for locking scheduler during execution."), _("\
9417Show mode for locking scheduler during execution."), _("\
f2665db5
MM
9418off == no locking (threads may preempt at any time)\n\
9419on == full locking (no thread except the current thread may run)\n\
9420 This applies to both normal execution and replay mode.\n\
9421step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9422 In this mode, other threads may run during other commands.\n\
9423 This applies to both normal execution and replay mode.\n\
9424replay == scheduler locked in replay mode and unlocked during normal execution."),
7ab04401 9425 set_schedlock_func, /* traps on target vector */
920d2a44 9426 show_scheduler_mode,
7ab04401 9427 &setlist, &showlist);
5fbbeb29 9428
d4db2f36
PA
9429 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9430Set mode for resuming threads of all processes."), _("\
9431Show mode for resuming threads of all processes."), _("\
9432When on, execution commands (such as 'continue' or 'next') resume all\n\
9433threads of all processes. When off (which is the default), execution\n\
9434commands only resume the threads of the current process. The set of\n\
9435threads that are resumed is further refined by the scheduler-locking\n\
9436mode (see help set scheduler-locking)."),
9437 NULL,
9438 show_schedule_multiple,
9439 &setlist, &showlist);
9440
5bf193a2
AC
9441 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9442Set mode of the step operation."), _("\
9443Show mode of the step operation."), _("\
9444When set, doing a step over a function without debug line information\n\
9445will stop at the first instruction of that function. Otherwise, the\n\
9446function is skipped and the step command stops at a different source line."),
9447 NULL,
920d2a44 9448 show_step_stop_if_no_debug,
5bf193a2 9449 &setlist, &showlist);
ca6724c1 9450
72d0e2c5
YQ
9451 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9452 &can_use_displaced_stepping, _("\
237fc4c9
PA
9453Set debugger's willingness to use displaced stepping."), _("\
9454Show debugger's willingness to use displaced stepping."), _("\
fff08868
HZ
9455If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9456supported by the target architecture. If off, gdb will not use displaced\n\
9457stepping to step over breakpoints, even if such is supported by the target\n\
9458architecture. If auto (which is the default), gdb will use displaced stepping\n\
9459if the target architecture supports it and non-stop mode is active, but will not\n\
9460use it in all-stop mode (see help set non-stop)."),
72d0e2c5
YQ
9461 NULL,
9462 show_can_use_displaced_stepping,
9463 &setlist, &showlist);
237fc4c9 9464
b2175913
MS
9465 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9466 &exec_direction, _("Set direction of execution.\n\
9467Options are 'forward' or 'reverse'."),
9468 _("Show direction of execution (forward/reverse)."),
9469 _("Tells gdb whether to execute forward or backward."),
9470 set_exec_direction_func, show_exec_direction_func,
9471 &setlist, &showlist);
9472
6c95b8df
PA
9473 /* Set/show detach-on-fork: user-settable mode. */
9474
9475 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9476Set whether gdb will detach the child of a fork."), _("\
9477Show whether gdb will detach the child of a fork."), _("\
9478Tells gdb whether to detach the child of a fork."),
9479 NULL, NULL, &setlist, &showlist);
9480
03583c20
UW
9481 /* Set/show disable address space randomization mode. */
9482
9483 add_setshow_boolean_cmd ("disable-randomization", class_support,
9484 &disable_randomization, _("\
9485Set disabling of debuggee's virtual address space randomization."), _("\
9486Show disabling of debuggee's virtual address space randomization."), _("\
9487When this mode is on (which is the default), randomization of the virtual\n\
9488address space is disabled. Standalone programs run with the randomization\n\
9489enabled by default on some platforms."),
9490 &set_disable_randomization,
9491 &show_disable_randomization,
9492 &setlist, &showlist);
9493
ca6724c1 9494 /* ptid initializations */
ca6724c1
KB
9495 inferior_ptid = null_ptid;
9496 target_last_wait_ptid = minus_one_ptid;
5231c1fd 9497
76727919
TT
9498 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed);
9499 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested);
9500 gdb::observers::thread_exit.attach (infrun_thread_thread_exit);
9501 gdb::observers::inferior_exit.attach (infrun_inferior_exit);
4aa995e1
PA
9502
9503 /* Explicitly create without lookup, since that tries to create a
9504 value with a void typed value, and when we get here, gdbarch
9505 isn't initialized yet. At this point, we're quite sure there
9506 isn't another convenience variable of the same name. */
22d2b532 9507 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
d914c394
SS
9508
9509 add_setshow_boolean_cmd ("observer", no_class,
9510 &observer_mode_1, _("\
9511Set whether gdb controls the inferior in observer mode."), _("\
9512Show whether gdb controls the inferior in observer mode."), _("\
9513In observer mode, GDB can get data from the inferior, but not\n\
9514affect its execution. Registers and memory may not be changed,\n\
9515breakpoints may not be set, and the program cannot be interrupted\n\
9516or signalled."),
9517 set_observer_mode,
9518 show_observer_mode,
9519 &setlist,
9520 &showlist);
c906108c 9521}
This page took 3.03042 seconds and 4 git commands to generate.