gdb: tweak format of infrun debug log
[deliverable/binutils-gdb.git] / gdb / infrun.c
CommitLineData
ca557f44
AC
1/* Target-struct-independent code to start (run) and stop an inferior
2 process.
8926118c 3
b811d2c2 4 Copyright (C) 1986-2020 Free Software Foundation, Inc.
c906108c 5
c5aa993b 6 This file is part of GDB.
c906108c 7
c5aa993b
JM
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
c5aa993b 11 (at your option) any later version.
c906108c 12
c5aa993b
JM
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
c906108c 17
c5aa993b 18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
c906108c
SS
20
21#include "defs.h"
edbcda09
SM
22#include "gdbsupport/common-defs.h"
23#include "gdbsupport/common-utils.h"
45741a9c 24#include "infrun.h"
c906108c
SS
25#include <ctype.h>
26#include "symtab.h"
27#include "frame.h"
28#include "inferior.h"
29#include "breakpoint.h"
c906108c
SS
30#include "gdbcore.h"
31#include "gdbcmd.h"
32#include "target.h"
2f4fcf00 33#include "target-connection.h"
c906108c
SS
34#include "gdbthread.h"
35#include "annotate.h"
1adeb98a 36#include "symfile.h"
7a292a7a 37#include "top.h"
2acceee2 38#include "inf-loop.h"
4e052eda 39#include "regcache.h"
fd0407d6 40#include "value.h"
76727919 41#include "observable.h"
f636b87d 42#include "language.h"
a77053c2 43#include "solib.h"
f17517ea 44#include "main.h"
186c406b 45#include "block.h"
034dad6f 46#include "mi/mi-common.h"
4f8d22e3 47#include "event-top.h"
96429cc8 48#include "record.h"
d02ed0bb 49#include "record-full.h"
edb3359d 50#include "inline-frame.h"
4efc6507 51#include "jit.h"
06cd862c 52#include "tracepoint.h"
1bfeeb0f 53#include "skip.h"
28106bc2
SDJ
54#include "probe.h"
55#include "objfiles.h"
de0bea00 56#include "completer.h"
9107fc8d 57#include "target-descriptions.h"
f15cb84a 58#include "target-dcache.h"
d83ad864 59#include "terminal.h"
ff862be4 60#include "solist.h"
400b5eca 61#include "gdbsupport/event-loop.h"
243a9253 62#include "thread-fsm.h"
268a13a5 63#include "gdbsupport/enum-flags.h"
5ed8105e 64#include "progspace-and-thread.h"
268a13a5 65#include "gdbsupport/gdb_optional.h"
46a62268 66#include "arch-utils.h"
268a13a5
TT
67#include "gdbsupport/scope-exit.h"
68#include "gdbsupport/forward-scope-exit.h"
06cc9596 69#include "gdbsupport/gdb_select.h"
5b6d1e4f 70#include <unordered_map>
93b54c8e 71#include "async-event.h"
c906108c
SS
72
73/* Prototypes for local functions */
74
2ea28649 75static void sig_print_info (enum gdb_signal);
c906108c 76
96baa820 77static void sig_print_header (void);
c906108c 78
d83ad864
DB
79static void follow_inferior_reset_breakpoints (void);
80
a289b8f6
JK
81static int currently_stepping (struct thread_info *tp);
82
2c03e5be 83static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
2484c66b
UW
84
85static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
86
2484c66b
UW
87static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
88
8550d3b3
YQ
89static int maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
90
aff4e175
AB
91static void resume (gdb_signal sig);
92
5b6d1e4f
PA
93static void wait_for_inferior (inferior *inf);
94
372316f1
PA
95/* Asynchronous signal handler registered as event loop source for
96 when we have pending events ready to be passed to the core. */
97static struct async_event_handler *infrun_async_inferior_event_token;
98
99/* Stores whether infrun_async was previously enabled or disabled.
100 Starts off as -1, indicating "never enabled/disabled". */
101static int infrun_is_async = -1;
102
edbcda09
SM
103#define infrun_log_debug(fmt, args...) \
104 infrun_log_debug_1 (__LINE__, __func__, fmt, ##args)
105
106static void ATTRIBUTE_PRINTF(3, 4)
107infrun_log_debug_1 (int line, const char *func,
108 const char *fmt, ...)
109{
110 if (debug_infrun)
111 {
112 va_list args;
113 va_start (args, fmt);
114 std::string msg = string_vprintf (fmt, args);
115 va_end (args);
116
117 fprintf_unfiltered (gdb_stdout, "infrun: %s: %s\n", func, msg.c_str ());
118 }
119}
120
372316f1
PA
121/* See infrun.h. */
122
123void
124infrun_async (int enable)
125{
126 if (infrun_is_async != enable)
127 {
128 infrun_is_async = enable;
129
edbcda09 130 infrun_log_debug ("enable=%d", enable);
372316f1
PA
131
132 if (enable)
133 mark_async_event_handler (infrun_async_inferior_event_token);
134 else
135 clear_async_event_handler (infrun_async_inferior_event_token);
136 }
137}
138
0b333c5e
PA
139/* See infrun.h. */
140
141void
142mark_infrun_async_event_handler (void)
143{
144 mark_async_event_handler (infrun_async_inferior_event_token);
145}
146
5fbbeb29
CF
147/* When set, stop the 'step' command if we enter a function which has
148 no line number information. The normal behavior is that we step
149 over such function. */
491144b5 150bool step_stop_if_no_debug = false;
920d2a44
AC
151static void
152show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
153 struct cmd_list_element *c, const char *value)
154{
155 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
156}
5fbbeb29 157
b9f437de
PA
158/* proceed and normal_stop use this to notify the user when the
159 inferior stopped in a different thread than it had been running
160 in. */
96baa820 161
39f77062 162static ptid_t previous_inferior_ptid;
7a292a7a 163
07107ca6
LM
164/* If set (default for legacy reasons), when following a fork, GDB
165 will detach from one of the fork branches, child or parent.
166 Exactly which branch is detached depends on 'set follow-fork-mode'
167 setting. */
168
491144b5 169static bool detach_fork = true;
6c95b8df 170
491144b5 171bool debug_displaced = false;
237fc4c9
PA
172static void
173show_debug_displaced (struct ui_file *file, int from_tty,
174 struct cmd_list_element *c, const char *value)
175{
176 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
177}
178
ccce17b0 179unsigned int debug_infrun = 0;
920d2a44
AC
180static void
181show_debug_infrun (struct ui_file *file, int from_tty,
182 struct cmd_list_element *c, const char *value)
183{
184 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
185}
527159b7 186
03583c20
UW
187
188/* Support for disabling address space randomization. */
189
491144b5 190bool disable_randomization = true;
03583c20
UW
191
192static void
193show_disable_randomization (struct ui_file *file, int from_tty,
194 struct cmd_list_element *c, const char *value)
195{
196 if (target_supports_disable_randomization ())
197 fprintf_filtered (file,
198 _("Disabling randomization of debuggee's "
199 "virtual address space is %s.\n"),
200 value);
201 else
202 fputs_filtered (_("Disabling randomization of debuggee's "
203 "virtual address space is unsupported on\n"
204 "this platform.\n"), file);
205}
206
207static void
eb4c3f4a 208set_disable_randomization (const char *args, int from_tty,
03583c20
UW
209 struct cmd_list_element *c)
210{
211 if (!target_supports_disable_randomization ())
212 error (_("Disabling randomization of debuggee's "
213 "virtual address space is unsupported on\n"
214 "this platform."));
215}
216
d32dc48e
PA
217/* User interface for non-stop mode. */
218
491144b5
CB
219bool non_stop = false;
220static bool non_stop_1 = false;
d32dc48e
PA
221
222static void
eb4c3f4a 223set_non_stop (const char *args, int from_tty,
d32dc48e
PA
224 struct cmd_list_element *c)
225{
226 if (target_has_execution)
227 {
228 non_stop_1 = non_stop;
229 error (_("Cannot change this setting while the inferior is running."));
230 }
231
232 non_stop = non_stop_1;
233}
234
235static void
236show_non_stop (struct ui_file *file, int from_tty,
237 struct cmd_list_element *c, const char *value)
238{
239 fprintf_filtered (file,
240 _("Controlling the inferior in non-stop mode is %s.\n"),
241 value);
242}
243
d914c394
SS
244/* "Observer mode" is somewhat like a more extreme version of
245 non-stop, in which all GDB operations that might affect the
246 target's execution have been disabled. */
247
491144b5
CB
248bool observer_mode = false;
249static bool observer_mode_1 = false;
d914c394
SS
250
251static void
eb4c3f4a 252set_observer_mode (const char *args, int from_tty,
d914c394
SS
253 struct cmd_list_element *c)
254{
d914c394
SS
255 if (target_has_execution)
256 {
257 observer_mode_1 = observer_mode;
258 error (_("Cannot change this setting while the inferior is running."));
259 }
260
261 observer_mode = observer_mode_1;
262
263 may_write_registers = !observer_mode;
264 may_write_memory = !observer_mode;
265 may_insert_breakpoints = !observer_mode;
266 may_insert_tracepoints = !observer_mode;
267 /* We can insert fast tracepoints in or out of observer mode,
268 but enable them if we're going into this mode. */
269 if (observer_mode)
491144b5 270 may_insert_fast_tracepoints = true;
d914c394
SS
271 may_stop = !observer_mode;
272 update_target_permissions ();
273
274 /* Going *into* observer mode we must force non-stop, then
275 going out we leave it that way. */
276 if (observer_mode)
277 {
d914c394 278 pagination_enabled = 0;
491144b5 279 non_stop = non_stop_1 = true;
d914c394
SS
280 }
281
282 if (from_tty)
283 printf_filtered (_("Observer mode is now %s.\n"),
284 (observer_mode ? "on" : "off"));
285}
286
287static void
288show_observer_mode (struct ui_file *file, int from_tty,
289 struct cmd_list_element *c, const char *value)
290{
291 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
292}
293
294/* This updates the value of observer mode based on changes in
295 permissions. Note that we are deliberately ignoring the values of
296 may-write-registers and may-write-memory, since the user may have
297 reason to enable these during a session, for instance to turn on a
298 debugging-related global. */
299
300void
301update_observer_mode (void)
302{
491144b5
CB
303 bool newval = (!may_insert_breakpoints
304 && !may_insert_tracepoints
305 && may_insert_fast_tracepoints
306 && !may_stop
307 && non_stop);
d914c394
SS
308
309 /* Let the user know if things change. */
310 if (newval != observer_mode)
311 printf_filtered (_("Observer mode is now %s.\n"),
312 (newval ? "on" : "off"));
313
314 observer_mode = observer_mode_1 = newval;
315}
c2c6d25f 316
c906108c
SS
317/* Tables of how to react to signals; the user sets them. */
318
adc6a863
PA
319static unsigned char signal_stop[GDB_SIGNAL_LAST];
320static unsigned char signal_print[GDB_SIGNAL_LAST];
321static unsigned char signal_program[GDB_SIGNAL_LAST];
c906108c 322
ab04a2af
TT
323/* Table of signals that are registered with "catch signal". A
324 non-zero entry indicates that the signal is caught by some "catch
adc6a863
PA
325 signal" command. */
326static unsigned char signal_catch[GDB_SIGNAL_LAST];
ab04a2af 327
2455069d
UW
328/* Table of signals that the target may silently handle.
329 This is automatically determined from the flags above,
330 and simply cached here. */
adc6a863 331static unsigned char signal_pass[GDB_SIGNAL_LAST];
2455069d 332
c906108c
SS
333#define SET_SIGS(nsigs,sigs,flags) \
334 do { \
335 int signum = (nsigs); \
336 while (signum-- > 0) \
337 if ((sigs)[signum]) \
338 (flags)[signum] = 1; \
339 } while (0)
340
341#define UNSET_SIGS(nsigs,sigs,flags) \
342 do { \
343 int signum = (nsigs); \
344 while (signum-- > 0) \
345 if ((sigs)[signum]) \
346 (flags)[signum] = 0; \
347 } while (0)
348
9b224c5e
PA
349/* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
350 this function is to avoid exporting `signal_program'. */
351
352void
353update_signals_program_target (void)
354{
adc6a863 355 target_program_signals (signal_program);
9b224c5e
PA
356}
357
1777feb0 358/* Value to pass to target_resume() to cause all threads to resume. */
39f77062 359
edb3359d 360#define RESUME_ALL minus_one_ptid
c906108c
SS
361
362/* Command list pointer for the "stop" placeholder. */
363
364static struct cmd_list_element *stop_command;
365
c906108c
SS
366/* Nonzero if we want to give control to the user when we're notified
367 of shared library events by the dynamic linker. */
628fe4e4 368int stop_on_solib_events;
f9e14852
GB
369
370/* Enable or disable optional shared library event breakpoints
371 as appropriate when the above flag is changed. */
372
373static void
eb4c3f4a
TT
374set_stop_on_solib_events (const char *args,
375 int from_tty, struct cmd_list_element *c)
f9e14852
GB
376{
377 update_solib_breakpoints ();
378}
379
920d2a44
AC
380static void
381show_stop_on_solib_events (struct ui_file *file, int from_tty,
382 struct cmd_list_element *c, const char *value)
383{
384 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
385 value);
386}
c906108c 387
c906108c
SS
388/* Nonzero after stop if current stack frame should be printed. */
389
390static int stop_print_frame;
391
5b6d1e4f
PA
392/* This is a cached copy of the target/ptid/waitstatus of the last
393 event returned by target_wait()/deprecated_target_wait_hook().
394 This information is returned by get_last_target_status(). */
395static process_stratum_target *target_last_proc_target;
39f77062 396static ptid_t target_last_wait_ptid;
e02bc4cc
DS
397static struct target_waitstatus target_last_waitstatus;
398
4e1c45ea 399void init_thread_stepping_state (struct thread_info *tss);
0d1e5fa7 400
53904c9e
AC
401static const char follow_fork_mode_child[] = "child";
402static const char follow_fork_mode_parent[] = "parent";
403
40478521 404static const char *const follow_fork_mode_kind_names[] = {
53904c9e
AC
405 follow_fork_mode_child,
406 follow_fork_mode_parent,
407 NULL
ef346e04 408};
c906108c 409
53904c9e 410static const char *follow_fork_mode_string = follow_fork_mode_parent;
920d2a44
AC
411static void
412show_follow_fork_mode_string (struct ui_file *file, int from_tty,
413 struct cmd_list_element *c, const char *value)
414{
3e43a32a
MS
415 fprintf_filtered (file,
416 _("Debugger response to a program "
417 "call of fork or vfork is \"%s\".\n"),
920d2a44
AC
418 value);
419}
c906108c
SS
420\f
421
d83ad864
DB
422/* Handle changes to the inferior list based on the type of fork,
423 which process is being followed, and whether the other process
424 should be detached. On entry inferior_ptid must be the ptid of
425 the fork parent. At return inferior_ptid is the ptid of the
426 followed inferior. */
427
5ab2fbf1
SM
428static bool
429follow_fork_inferior (bool follow_child, bool detach_fork)
d83ad864
DB
430{
431 int has_vforked;
79639e11 432 ptid_t parent_ptid, child_ptid;
d83ad864
DB
433
434 has_vforked = (inferior_thread ()->pending_follow.kind
435 == TARGET_WAITKIND_VFORKED);
79639e11
PA
436 parent_ptid = inferior_ptid;
437 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
d83ad864
DB
438
439 if (has_vforked
440 && !non_stop /* Non-stop always resumes both branches. */
3b12939d 441 && current_ui->prompt_state == PROMPT_BLOCKED
d83ad864
DB
442 && !(follow_child || detach_fork || sched_multi))
443 {
444 /* The parent stays blocked inside the vfork syscall until the
445 child execs or exits. If we don't let the child run, then
446 the parent stays blocked. If we're telling the parent to run
447 in the foreground, the user will not be able to ctrl-c to get
448 back the terminal, effectively hanging the debug session. */
449 fprintf_filtered (gdb_stderr, _("\
450Can not resume the parent process over vfork in the foreground while\n\
451holding the child stopped. Try \"set detach-on-fork\" or \
452\"set schedule-multiple\".\n"));
d83ad864
DB
453 return 1;
454 }
455
456 if (!follow_child)
457 {
458 /* Detach new forked process? */
459 if (detach_fork)
460 {
d83ad864
DB
461 /* Before detaching from the child, remove all breakpoints
462 from it. If we forked, then this has already been taken
463 care of by infrun.c. If we vforked however, any
464 breakpoint inserted in the parent is visible in the
465 child, even those added while stopped in a vfork
466 catchpoint. This will remove the breakpoints from the
467 parent also, but they'll be reinserted below. */
468 if (has_vforked)
469 {
470 /* Keep breakpoints list in sync. */
00431a78 471 remove_breakpoints_inf (current_inferior ());
d83ad864
DB
472 }
473
f67c0c91 474 if (print_inferior_events)
d83ad864 475 {
8dd06f7a 476 /* Ensure that we have a process ptid. */
e99b03dc 477 ptid_t process_ptid = ptid_t (child_ptid.pid ());
8dd06f7a 478
223ffa71 479 target_terminal::ours_for_output ();
d83ad864 480 fprintf_filtered (gdb_stdlog,
f67c0c91 481 _("[Detaching after %s from child %s]\n"),
6f259a23 482 has_vforked ? "vfork" : "fork",
a068643d 483 target_pid_to_str (process_ptid).c_str ());
d83ad864
DB
484 }
485 }
486 else
487 {
488 struct inferior *parent_inf, *child_inf;
d83ad864
DB
489
490 /* Add process to GDB's tables. */
e99b03dc 491 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
492
493 parent_inf = current_inferior ();
494 child_inf->attach_flag = parent_inf->attach_flag;
495 copy_terminal_info (child_inf, parent_inf);
496 child_inf->gdbarch = parent_inf->gdbarch;
497 copy_inferior_target_desc_info (child_inf, parent_inf);
498
5ed8105e 499 scoped_restore_current_pspace_and_thread restore_pspace_thread;
d83ad864 500
2a00d7ce 501 set_current_inferior (child_inf);
5b6d1e4f 502 switch_to_no_thread ();
d83ad864 503 child_inf->symfile_flags = SYMFILE_NO_READ;
5b6d1e4f 504 push_target (parent_inf->process_target ());
18493a00
PA
505 thread_info *child_thr
506 = add_thread_silent (child_inf->process_target (), child_ptid);
d83ad864
DB
507
508 /* If this is a vfork child, then the address-space is
509 shared with the parent. */
510 if (has_vforked)
511 {
512 child_inf->pspace = parent_inf->pspace;
513 child_inf->aspace = parent_inf->aspace;
514
5b6d1e4f
PA
515 exec_on_vfork ();
516
d83ad864
DB
517 /* The parent will be frozen until the child is done
518 with the shared region. Keep track of the
519 parent. */
520 child_inf->vfork_parent = parent_inf;
521 child_inf->pending_detach = 0;
522 parent_inf->vfork_child = child_inf;
523 parent_inf->pending_detach = 0;
18493a00
PA
524
525 /* Now that the inferiors and program spaces are all
526 wired up, we can switch to the child thread (which
527 switches inferior and program space too). */
528 switch_to_thread (child_thr);
d83ad864
DB
529 }
530 else
531 {
532 child_inf->aspace = new_address_space ();
564b1e3f 533 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
534 child_inf->removable = 1;
535 set_current_program_space (child_inf->pspace);
536 clone_program_space (child_inf->pspace, parent_inf->pspace);
537
18493a00
PA
538 /* solib_create_inferior_hook relies on the current
539 thread. */
540 switch_to_thread (child_thr);
541
d83ad864
DB
542 /* Let the shared library layer (e.g., solib-svr4) learn
543 about this new process, relocate the cloned exec, pull
544 in shared libraries, and install the solib event
545 breakpoint. If a "cloned-VM" event was propagated
546 better throughout the core, this wouldn't be
547 required. */
548 solib_create_inferior_hook (0);
549 }
d83ad864
DB
550 }
551
552 if (has_vforked)
553 {
554 struct inferior *parent_inf;
555
556 parent_inf = current_inferior ();
557
558 /* If we detached from the child, then we have to be careful
559 to not insert breakpoints in the parent until the child
560 is done with the shared memory region. However, if we're
561 staying attached to the child, then we can and should
562 insert breakpoints, so that we can debug it. A
563 subsequent child exec or exit is enough to know when does
564 the child stops using the parent's address space. */
565 parent_inf->waiting_for_vfork_done = detach_fork;
566 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
567 }
568 }
569 else
570 {
571 /* Follow the child. */
572 struct inferior *parent_inf, *child_inf;
573 struct program_space *parent_pspace;
574
f67c0c91 575 if (print_inferior_events)
d83ad864 576 {
f67c0c91
SDJ
577 std::string parent_pid = target_pid_to_str (parent_ptid);
578 std::string child_pid = target_pid_to_str (child_ptid);
579
223ffa71 580 target_terminal::ours_for_output ();
6f259a23 581 fprintf_filtered (gdb_stdlog,
f67c0c91
SDJ
582 _("[Attaching after %s %s to child %s]\n"),
583 parent_pid.c_str (),
6f259a23 584 has_vforked ? "vfork" : "fork",
f67c0c91 585 child_pid.c_str ());
d83ad864
DB
586 }
587
588 /* Add the new inferior first, so that the target_detach below
589 doesn't unpush the target. */
590
e99b03dc 591 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
592
593 parent_inf = current_inferior ();
594 child_inf->attach_flag = parent_inf->attach_flag;
595 copy_terminal_info (child_inf, parent_inf);
596 child_inf->gdbarch = parent_inf->gdbarch;
597 copy_inferior_target_desc_info (child_inf, parent_inf);
598
599 parent_pspace = parent_inf->pspace;
600
5b6d1e4f 601 process_stratum_target *target = parent_inf->process_target ();
d83ad864 602
5b6d1e4f
PA
603 {
604 /* Hold a strong reference to the target while (maybe)
605 detaching the parent. Otherwise detaching could close the
606 target. */
607 auto target_ref = target_ops_ref::new_reference (target);
608
609 /* If we're vforking, we want to hold on to the parent until
610 the child exits or execs. At child exec or exit time we
611 can remove the old breakpoints from the parent and detach
612 or resume debugging it. Otherwise, detach the parent now;
613 we'll want to reuse it's program/address spaces, but we
614 can't set them to the child before removing breakpoints
615 from the parent, otherwise, the breakpoints module could
616 decide to remove breakpoints from the wrong process (since
617 they'd be assigned to the same address space). */
618
619 if (has_vforked)
620 {
621 gdb_assert (child_inf->vfork_parent == NULL);
622 gdb_assert (parent_inf->vfork_child == NULL);
623 child_inf->vfork_parent = parent_inf;
624 child_inf->pending_detach = 0;
625 parent_inf->vfork_child = child_inf;
626 parent_inf->pending_detach = detach_fork;
627 parent_inf->waiting_for_vfork_done = 0;
628 }
629 else if (detach_fork)
630 {
631 if (print_inferior_events)
632 {
633 /* Ensure that we have a process ptid. */
634 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
635
636 target_terminal::ours_for_output ();
637 fprintf_filtered (gdb_stdlog,
638 _("[Detaching after fork from "
639 "parent %s]\n"),
640 target_pid_to_str (process_ptid).c_str ());
641 }
8dd06f7a 642
5b6d1e4f
PA
643 target_detach (parent_inf, 0);
644 parent_inf = NULL;
645 }
6f259a23 646
5b6d1e4f 647 /* Note that the detach above makes PARENT_INF dangling. */
d83ad864 648
5b6d1e4f
PA
649 /* Add the child thread to the appropriate lists, and switch
650 to this new thread, before cloning the program space, and
651 informing the solib layer about this new process. */
d83ad864 652
5b6d1e4f
PA
653 set_current_inferior (child_inf);
654 push_target (target);
655 }
d83ad864 656
18493a00 657 thread_info *child_thr = add_thread_silent (target, child_ptid);
d83ad864
DB
658
659 /* If this is a vfork child, then the address-space is shared
660 with the parent. If we detached from the parent, then we can
661 reuse the parent's program/address spaces. */
662 if (has_vforked || detach_fork)
663 {
664 child_inf->pspace = parent_pspace;
665 child_inf->aspace = child_inf->pspace->aspace;
5b6d1e4f
PA
666
667 exec_on_vfork ();
d83ad864
DB
668 }
669 else
670 {
671 child_inf->aspace = new_address_space ();
564b1e3f 672 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
673 child_inf->removable = 1;
674 child_inf->symfile_flags = SYMFILE_NO_READ;
675 set_current_program_space (child_inf->pspace);
676 clone_program_space (child_inf->pspace, parent_pspace);
677
678 /* Let the shared library layer (e.g., solib-svr4) learn
679 about this new process, relocate the cloned exec, pull in
680 shared libraries, and install the solib event breakpoint.
681 If a "cloned-VM" event was propagated better throughout
682 the core, this wouldn't be required. */
683 solib_create_inferior_hook (0);
684 }
18493a00
PA
685
686 switch_to_thread (child_thr);
d83ad864
DB
687 }
688
689 return target_follow_fork (follow_child, detach_fork);
690}
691
e58b0e63
PA
692/* Tell the target to follow the fork we're stopped at. Returns true
693 if the inferior should be resumed; false, if the target for some
694 reason decided it's best not to resume. */
695
5ab2fbf1
SM
696static bool
697follow_fork ()
c906108c 698{
5ab2fbf1
SM
699 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
700 bool should_resume = true;
e58b0e63
PA
701 struct thread_info *tp;
702
703 /* Copy user stepping state to the new inferior thread. FIXME: the
704 followed fork child thread should have a copy of most of the
4e3990f4
DE
705 parent thread structure's run control related fields, not just these.
706 Initialized to avoid "may be used uninitialized" warnings from gcc. */
707 struct breakpoint *step_resume_breakpoint = NULL;
186c406b 708 struct breakpoint *exception_resume_breakpoint = NULL;
4e3990f4
DE
709 CORE_ADDR step_range_start = 0;
710 CORE_ADDR step_range_end = 0;
bf4cb9be
TV
711 int current_line = 0;
712 symtab *current_symtab = NULL;
4e3990f4 713 struct frame_id step_frame_id = { 0 };
8980e177 714 struct thread_fsm *thread_fsm = NULL;
e58b0e63
PA
715
716 if (!non_stop)
717 {
5b6d1e4f 718 process_stratum_target *wait_target;
e58b0e63
PA
719 ptid_t wait_ptid;
720 struct target_waitstatus wait_status;
721
722 /* Get the last target status returned by target_wait(). */
5b6d1e4f 723 get_last_target_status (&wait_target, &wait_ptid, &wait_status);
e58b0e63
PA
724
725 /* If not stopped at a fork event, then there's nothing else to
726 do. */
727 if (wait_status.kind != TARGET_WAITKIND_FORKED
728 && wait_status.kind != TARGET_WAITKIND_VFORKED)
729 return 1;
730
731 /* Check if we switched over from WAIT_PTID, since the event was
732 reported. */
00431a78 733 if (wait_ptid != minus_one_ptid
5b6d1e4f
PA
734 && (current_inferior ()->process_target () != wait_target
735 || inferior_ptid != wait_ptid))
e58b0e63
PA
736 {
737 /* We did. Switch back to WAIT_PTID thread, to tell the
738 target to follow it (in either direction). We'll
739 afterwards refuse to resume, and inform the user what
740 happened. */
5b6d1e4f 741 thread_info *wait_thread = find_thread_ptid (wait_target, wait_ptid);
00431a78 742 switch_to_thread (wait_thread);
5ab2fbf1 743 should_resume = false;
e58b0e63
PA
744 }
745 }
746
747 tp = inferior_thread ();
748
749 /* If there were any forks/vforks that were caught and are now to be
750 followed, then do so now. */
751 switch (tp->pending_follow.kind)
752 {
753 case TARGET_WAITKIND_FORKED:
754 case TARGET_WAITKIND_VFORKED:
755 {
756 ptid_t parent, child;
757
758 /* If the user did a next/step, etc, over a fork call,
759 preserve the stepping state in the fork child. */
760 if (follow_child && should_resume)
761 {
8358c15c
JK
762 step_resume_breakpoint = clone_momentary_breakpoint
763 (tp->control.step_resume_breakpoint);
16c381f0
JK
764 step_range_start = tp->control.step_range_start;
765 step_range_end = tp->control.step_range_end;
bf4cb9be
TV
766 current_line = tp->current_line;
767 current_symtab = tp->current_symtab;
16c381f0 768 step_frame_id = tp->control.step_frame_id;
186c406b
TT
769 exception_resume_breakpoint
770 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
8980e177 771 thread_fsm = tp->thread_fsm;
e58b0e63
PA
772
773 /* For now, delete the parent's sr breakpoint, otherwise,
774 parent/child sr breakpoints are considered duplicates,
775 and the child version will not be installed. Remove
776 this when the breakpoints module becomes aware of
777 inferiors and address spaces. */
778 delete_step_resume_breakpoint (tp);
16c381f0
JK
779 tp->control.step_range_start = 0;
780 tp->control.step_range_end = 0;
781 tp->control.step_frame_id = null_frame_id;
186c406b 782 delete_exception_resume_breakpoint (tp);
8980e177 783 tp->thread_fsm = NULL;
e58b0e63
PA
784 }
785
786 parent = inferior_ptid;
787 child = tp->pending_follow.value.related_pid;
788
5b6d1e4f 789 process_stratum_target *parent_targ = tp->inf->process_target ();
d83ad864
DB
790 /* Set up inferior(s) as specified by the caller, and tell the
791 target to do whatever is necessary to follow either parent
792 or child. */
793 if (follow_fork_inferior (follow_child, detach_fork))
e58b0e63
PA
794 {
795 /* Target refused to follow, or there's some other reason
796 we shouldn't resume. */
797 should_resume = 0;
798 }
799 else
800 {
801 /* This pending follow fork event is now handled, one way
802 or another. The previous selected thread may be gone
803 from the lists by now, but if it is still around, need
804 to clear the pending follow request. */
5b6d1e4f 805 tp = find_thread_ptid (parent_targ, parent);
e58b0e63
PA
806 if (tp)
807 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
808
809 /* This makes sure we don't try to apply the "Switched
810 over from WAIT_PID" logic above. */
811 nullify_last_target_wait_ptid ();
812
1777feb0 813 /* If we followed the child, switch to it... */
e58b0e63
PA
814 if (follow_child)
815 {
5b6d1e4f 816 thread_info *child_thr = find_thread_ptid (parent_targ, child);
00431a78 817 switch_to_thread (child_thr);
e58b0e63
PA
818
819 /* ... and preserve the stepping state, in case the
820 user was stepping over the fork call. */
821 if (should_resume)
822 {
823 tp = inferior_thread ();
8358c15c
JK
824 tp->control.step_resume_breakpoint
825 = step_resume_breakpoint;
16c381f0
JK
826 tp->control.step_range_start = step_range_start;
827 tp->control.step_range_end = step_range_end;
bf4cb9be
TV
828 tp->current_line = current_line;
829 tp->current_symtab = current_symtab;
16c381f0 830 tp->control.step_frame_id = step_frame_id;
186c406b
TT
831 tp->control.exception_resume_breakpoint
832 = exception_resume_breakpoint;
8980e177 833 tp->thread_fsm = thread_fsm;
e58b0e63
PA
834 }
835 else
836 {
837 /* If we get here, it was because we're trying to
838 resume from a fork catchpoint, but, the user
839 has switched threads away from the thread that
840 forked. In that case, the resume command
841 issued is most likely not applicable to the
842 child, so just warn, and refuse to resume. */
3e43a32a 843 warning (_("Not resuming: switched threads "
fd7dcb94 844 "before following fork child."));
e58b0e63
PA
845 }
846
847 /* Reset breakpoints in the child as appropriate. */
848 follow_inferior_reset_breakpoints ();
849 }
e58b0e63
PA
850 }
851 }
852 break;
853 case TARGET_WAITKIND_SPURIOUS:
854 /* Nothing to follow. */
855 break;
856 default:
857 internal_error (__FILE__, __LINE__,
858 "Unexpected pending_follow.kind %d\n",
859 tp->pending_follow.kind);
860 break;
861 }
c906108c 862
e58b0e63 863 return should_resume;
c906108c
SS
864}
865
d83ad864 866static void
6604731b 867follow_inferior_reset_breakpoints (void)
c906108c 868{
4e1c45ea
PA
869 struct thread_info *tp = inferior_thread ();
870
6604731b
DJ
871 /* Was there a step_resume breakpoint? (There was if the user
872 did a "next" at the fork() call.) If so, explicitly reset its
a1aa2221
LM
873 thread number. Cloned step_resume breakpoints are disabled on
874 creation, so enable it here now that it is associated with the
875 correct thread.
6604731b
DJ
876
877 step_resumes are a form of bp that are made to be per-thread.
878 Since we created the step_resume bp when the parent process
879 was being debugged, and now are switching to the child process,
880 from the breakpoint package's viewpoint, that's a switch of
881 "threads". We must update the bp's notion of which thread
882 it is for, or it'll be ignored when it triggers. */
883
8358c15c 884 if (tp->control.step_resume_breakpoint)
a1aa2221
LM
885 {
886 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
887 tp->control.step_resume_breakpoint->loc->enabled = 1;
888 }
6604731b 889
a1aa2221 890 /* Treat exception_resume breakpoints like step_resume breakpoints. */
186c406b 891 if (tp->control.exception_resume_breakpoint)
a1aa2221
LM
892 {
893 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
894 tp->control.exception_resume_breakpoint->loc->enabled = 1;
895 }
186c406b 896
6604731b
DJ
897 /* Reinsert all breakpoints in the child. The user may have set
898 breakpoints after catching the fork, in which case those
899 were never set in the child, but only in the parent. This makes
900 sure the inserted breakpoints match the breakpoint list. */
901
902 breakpoint_re_set ();
903 insert_breakpoints ();
c906108c 904}
c906108c 905
6c95b8df
PA
906/* The child has exited or execed: resume threads of the parent the
907 user wanted to be executing. */
908
909static int
910proceed_after_vfork_done (struct thread_info *thread,
911 void *arg)
912{
913 int pid = * (int *) arg;
914
00431a78
PA
915 if (thread->ptid.pid () == pid
916 && thread->state == THREAD_RUNNING
917 && !thread->executing
6c95b8df 918 && !thread->stop_requested
a493e3e2 919 && thread->suspend.stop_signal == GDB_SIGNAL_0)
6c95b8df 920 {
edbcda09
SM
921 infrun_log_debug ("resuming vfork parent thread %s",
922 target_pid_to_str (thread->ptid).c_str ());
6c95b8df 923
00431a78 924 switch_to_thread (thread);
70509625 925 clear_proceed_status (0);
64ce06e4 926 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
6c95b8df
PA
927 }
928
929 return 0;
930}
931
932/* Called whenever we notice an exec or exit event, to handle
933 detaching or resuming a vfork parent. */
934
935static void
936handle_vfork_child_exec_or_exit (int exec)
937{
938 struct inferior *inf = current_inferior ();
939
940 if (inf->vfork_parent)
941 {
942 int resume_parent = -1;
943
944 /* This exec or exit marks the end of the shared memory region
b73715df
TV
945 between the parent and the child. Break the bonds. */
946 inferior *vfork_parent = inf->vfork_parent;
947 inf->vfork_parent->vfork_child = NULL;
948 inf->vfork_parent = NULL;
6c95b8df 949
b73715df
TV
950 /* If the user wanted to detach from the parent, now is the
951 time. */
952 if (vfork_parent->pending_detach)
6c95b8df 953 {
6c95b8df
PA
954 struct program_space *pspace;
955 struct address_space *aspace;
956
1777feb0 957 /* follow-fork child, detach-on-fork on. */
6c95b8df 958
b73715df 959 vfork_parent->pending_detach = 0;
68c9da30 960
18493a00 961 scoped_restore_current_pspace_and_thread restore_thread;
6c95b8df
PA
962
963 /* We're letting loose of the parent. */
18493a00 964 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
00431a78 965 switch_to_thread (tp);
6c95b8df
PA
966
967 /* We're about to detach from the parent, which implicitly
968 removes breakpoints from its address space. There's a
969 catch here: we want to reuse the spaces for the child,
970 but, parent/child are still sharing the pspace at this
971 point, although the exec in reality makes the kernel give
972 the child a fresh set of new pages. The problem here is
973 that the breakpoints module being unaware of this, would
974 likely chose the child process to write to the parent
975 address space. Swapping the child temporarily away from
976 the spaces has the desired effect. Yes, this is "sort
977 of" a hack. */
978
979 pspace = inf->pspace;
980 aspace = inf->aspace;
981 inf->aspace = NULL;
982 inf->pspace = NULL;
983
f67c0c91 984 if (print_inferior_events)
6c95b8df 985 {
a068643d 986 std::string pidstr
b73715df 987 = target_pid_to_str (ptid_t (vfork_parent->pid));
f67c0c91 988
223ffa71 989 target_terminal::ours_for_output ();
6c95b8df
PA
990
991 if (exec)
6f259a23
DB
992 {
993 fprintf_filtered (gdb_stdlog,
f67c0c91 994 _("[Detaching vfork parent %s "
a068643d 995 "after child exec]\n"), pidstr.c_str ());
6f259a23 996 }
6c95b8df 997 else
6f259a23
DB
998 {
999 fprintf_filtered (gdb_stdlog,
f67c0c91 1000 _("[Detaching vfork parent %s "
a068643d 1001 "after child exit]\n"), pidstr.c_str ());
6f259a23 1002 }
6c95b8df
PA
1003 }
1004
b73715df 1005 target_detach (vfork_parent, 0);
6c95b8df
PA
1006
1007 /* Put it back. */
1008 inf->pspace = pspace;
1009 inf->aspace = aspace;
6c95b8df
PA
1010 }
1011 else if (exec)
1012 {
1013 /* We're staying attached to the parent, so, really give the
1014 child a new address space. */
564b1e3f 1015 inf->pspace = new program_space (maybe_new_address_space ());
6c95b8df
PA
1016 inf->aspace = inf->pspace->aspace;
1017 inf->removable = 1;
1018 set_current_program_space (inf->pspace);
1019
b73715df 1020 resume_parent = vfork_parent->pid;
6c95b8df
PA
1021 }
1022 else
1023 {
6c95b8df
PA
1024 /* If this is a vfork child exiting, then the pspace and
1025 aspaces were shared with the parent. Since we're
1026 reporting the process exit, we'll be mourning all that is
1027 found in the address space, and switching to null_ptid,
1028 preparing to start a new inferior. But, since we don't
1029 want to clobber the parent's address/program spaces, we
1030 go ahead and create a new one for this exiting
1031 inferior. */
1032
18493a00 1033 /* Switch to no-thread while running clone_program_space, so
5ed8105e
PA
1034 that clone_program_space doesn't want to read the
1035 selected frame of a dead process. */
18493a00
PA
1036 scoped_restore_current_thread restore_thread;
1037 switch_to_no_thread ();
6c95b8df 1038
53af73bf
PA
1039 inf->pspace = new program_space (maybe_new_address_space ());
1040 inf->aspace = inf->pspace->aspace;
1041 set_current_program_space (inf->pspace);
6c95b8df 1042 inf->removable = 1;
7dcd53a0 1043 inf->symfile_flags = SYMFILE_NO_READ;
53af73bf 1044 clone_program_space (inf->pspace, vfork_parent->pspace);
6c95b8df 1045
b73715df 1046 resume_parent = vfork_parent->pid;
6c95b8df
PA
1047 }
1048
6c95b8df
PA
1049 gdb_assert (current_program_space == inf->pspace);
1050
1051 if (non_stop && resume_parent != -1)
1052 {
1053 /* If the user wanted the parent to be running, let it go
1054 free now. */
5ed8105e 1055 scoped_restore_current_thread restore_thread;
6c95b8df 1056
edbcda09
SM
1057 infrun_log_debug ("resuming vfork parent process %d",
1058 resume_parent);
6c95b8df
PA
1059
1060 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
6c95b8df
PA
1061 }
1062 }
1063}
1064
eb6c553b 1065/* Enum strings for "set|show follow-exec-mode". */
6c95b8df
PA
1066
1067static const char follow_exec_mode_new[] = "new";
1068static const char follow_exec_mode_same[] = "same";
40478521 1069static const char *const follow_exec_mode_names[] =
6c95b8df
PA
1070{
1071 follow_exec_mode_new,
1072 follow_exec_mode_same,
1073 NULL,
1074};
1075
1076static const char *follow_exec_mode_string = follow_exec_mode_same;
1077static void
1078show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1079 struct cmd_list_element *c, const char *value)
1080{
1081 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1082}
1083
ecf45d2c 1084/* EXEC_FILE_TARGET is assumed to be non-NULL. */
1adeb98a 1085
c906108c 1086static void
4ca51187 1087follow_exec (ptid_t ptid, const char *exec_file_target)
c906108c 1088{
6c95b8df 1089 struct inferior *inf = current_inferior ();
e99b03dc 1090 int pid = ptid.pid ();
94585166 1091 ptid_t process_ptid;
7a292a7a 1092
65d2b333
PW
1093 /* Switch terminal for any messages produced e.g. by
1094 breakpoint_re_set. */
1095 target_terminal::ours_for_output ();
1096
c906108c
SS
1097 /* This is an exec event that we actually wish to pay attention to.
1098 Refresh our symbol table to the newly exec'd program, remove any
1099 momentary bp's, etc.
1100
1101 If there are breakpoints, they aren't really inserted now,
1102 since the exec() transformed our inferior into a fresh set
1103 of instructions.
1104
1105 We want to preserve symbolic breakpoints on the list, since
1106 we have hopes that they can be reset after the new a.out's
1107 symbol table is read.
1108
1109 However, any "raw" breakpoints must be removed from the list
1110 (e.g., the solib bp's), since their address is probably invalid
1111 now.
1112
1113 And, we DON'T want to call delete_breakpoints() here, since
1114 that may write the bp's "shadow contents" (the instruction
85102364 1115 value that was overwritten with a TRAP instruction). Since
1777feb0 1116 we now have a new a.out, those shadow contents aren't valid. */
6c95b8df
PA
1117
1118 mark_breakpoints_out ();
1119
95e50b27
PA
1120 /* The target reports the exec event to the main thread, even if
1121 some other thread does the exec, and even if the main thread was
1122 stopped or already gone. We may still have non-leader threads of
1123 the process on our list. E.g., on targets that don't have thread
1124 exit events (like remote); or on native Linux in non-stop mode if
1125 there were only two threads in the inferior and the non-leader
1126 one is the one that execs (and nothing forces an update of the
1127 thread list up to here). When debugging remotely, it's best to
1128 avoid extra traffic, when possible, so avoid syncing the thread
1129 list with the target, and instead go ahead and delete all threads
1130 of the process but one that reported the event. Note this must
1131 be done before calling update_breakpoints_after_exec, as
1132 otherwise clearing the threads' resources would reference stale
1133 thread breakpoints -- it may have been one of these threads that
1134 stepped across the exec. We could just clear their stepping
1135 states, but as long as we're iterating, might as well delete
1136 them. Deleting them now rather than at the next user-visible
1137 stop provides a nicer sequence of events for user and MI
1138 notifications. */
08036331 1139 for (thread_info *th : all_threads_safe ())
d7e15655 1140 if (th->ptid.pid () == pid && th->ptid != ptid)
00431a78 1141 delete_thread (th);
95e50b27
PA
1142
1143 /* We also need to clear any left over stale state for the
1144 leader/event thread. E.g., if there was any step-resume
1145 breakpoint or similar, it's gone now. We cannot truly
1146 step-to-next statement through an exec(). */
08036331 1147 thread_info *th = inferior_thread ();
8358c15c 1148 th->control.step_resume_breakpoint = NULL;
186c406b 1149 th->control.exception_resume_breakpoint = NULL;
34b7e8a6 1150 th->control.single_step_breakpoints = NULL;
16c381f0
JK
1151 th->control.step_range_start = 0;
1152 th->control.step_range_end = 0;
c906108c 1153
95e50b27
PA
1154 /* The user may have had the main thread held stopped in the
1155 previous image (e.g., schedlock on, or non-stop). Release
1156 it now. */
a75724bc
PA
1157 th->stop_requested = 0;
1158
95e50b27
PA
1159 update_breakpoints_after_exec ();
1160
1777feb0 1161 /* What is this a.out's name? */
f2907e49 1162 process_ptid = ptid_t (pid);
6c95b8df 1163 printf_unfiltered (_("%s is executing new program: %s\n"),
a068643d 1164 target_pid_to_str (process_ptid).c_str (),
ecf45d2c 1165 exec_file_target);
c906108c
SS
1166
1167 /* We've followed the inferior through an exec. Therefore, the
1777feb0 1168 inferior has essentially been killed & reborn. */
7a292a7a 1169
6ca15a4b 1170 breakpoint_init_inferior (inf_execd);
e85a822c 1171
797bc1cb
TT
1172 gdb::unique_xmalloc_ptr<char> exec_file_host
1173 = exec_file_find (exec_file_target, NULL);
ff862be4 1174
ecf45d2c
SL
1175 /* If we were unable to map the executable target pathname onto a host
1176 pathname, tell the user that. Otherwise GDB's subsequent behavior
1177 is confusing. Maybe it would even be better to stop at this point
1178 so that the user can specify a file manually before continuing. */
1179 if (exec_file_host == NULL)
1180 warning (_("Could not load symbols for executable %s.\n"
1181 "Do you need \"set sysroot\"?"),
1182 exec_file_target);
c906108c 1183
cce9b6bf
PA
1184 /* Reset the shared library package. This ensures that we get a
1185 shlib event when the child reaches "_start", at which point the
1186 dld will have had a chance to initialize the child. */
1187 /* Also, loading a symbol file below may trigger symbol lookups, and
1188 we don't want those to be satisfied by the libraries of the
1189 previous incarnation of this process. */
1190 no_shared_libraries (NULL, 0);
1191
6c95b8df
PA
1192 if (follow_exec_mode_string == follow_exec_mode_new)
1193 {
6c95b8df
PA
1194 /* The user wants to keep the old inferior and program spaces
1195 around. Create a new fresh one, and switch to it. */
1196
35ed81d4
SM
1197 /* Do exit processing for the original inferior before setting the new
1198 inferior's pid. Having two inferiors with the same pid would confuse
1199 find_inferior_p(t)id. Transfer the terminal state and info from the
1200 old to the new inferior. */
1201 inf = add_inferior_with_spaces ();
1202 swap_terminal_info (inf, current_inferior ());
057302ce 1203 exit_inferior_silent (current_inferior ());
17d8546e 1204
94585166 1205 inf->pid = pid;
ecf45d2c 1206 target_follow_exec (inf, exec_file_target);
6c95b8df 1207
5b6d1e4f
PA
1208 inferior *org_inferior = current_inferior ();
1209 switch_to_inferior_no_thread (inf);
1210 push_target (org_inferior->process_target ());
1211 thread_info *thr = add_thread (inf->process_target (), ptid);
1212 switch_to_thread (thr);
6c95b8df 1213 }
9107fc8d
PA
1214 else
1215 {
1216 /* The old description may no longer be fit for the new image.
1217 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1218 old description; we'll read a new one below. No need to do
1219 this on "follow-exec-mode new", as the old inferior stays
1220 around (its description is later cleared/refetched on
1221 restart). */
1222 target_clear_description ();
1223 }
6c95b8df
PA
1224
1225 gdb_assert (current_program_space == inf->pspace);
1226
ecf45d2c
SL
1227 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1228 because the proper displacement for a PIE (Position Independent
1229 Executable) main symbol file will only be computed by
1230 solib_create_inferior_hook below. breakpoint_re_set would fail
1231 to insert the breakpoints with the zero displacement. */
797bc1cb 1232 try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
c906108c 1233
9107fc8d
PA
1234 /* If the target can specify a description, read it. Must do this
1235 after flipping to the new executable (because the target supplied
1236 description must be compatible with the executable's
1237 architecture, and the old executable may e.g., be 32-bit, while
1238 the new one 64-bit), and before anything involving memory or
1239 registers. */
1240 target_find_description ();
1241
268a4a75 1242 solib_create_inferior_hook (0);
c906108c 1243
4efc6507
DE
1244 jit_inferior_created_hook ();
1245
c1e56572
JK
1246 breakpoint_re_set ();
1247
c906108c
SS
1248 /* Reinsert all breakpoints. (Those which were symbolic have
1249 been reset to the proper address in the new a.out, thanks
1777feb0 1250 to symbol_file_command...). */
c906108c
SS
1251 insert_breakpoints ();
1252
1253 /* The next resume of this inferior should bring it to the shlib
1254 startup breakpoints. (If the user had also set bp's on
1255 "main" from the old (parent) process, then they'll auto-
1777feb0 1256 matically get reset there in the new process.). */
c906108c
SS
1257}
1258
c2829269
PA
1259/* The queue of threads that need to do a step-over operation to get
1260 past e.g., a breakpoint. What technique is used to step over the
1261 breakpoint/watchpoint does not matter -- all threads end up in the
1262 same queue, to maintain rough temporal order of execution, in order
1263 to avoid starvation, otherwise, we could e.g., find ourselves
1264 constantly stepping the same couple threads past their breakpoints
1265 over and over, if the single-step finish fast enough. */
1266struct thread_info *step_over_queue_head;
1267
6c4cfb24
PA
1268/* Bit flags indicating what the thread needs to step over. */
1269
8d297bbf 1270enum step_over_what_flag
6c4cfb24
PA
1271 {
1272 /* Step over a breakpoint. */
1273 STEP_OVER_BREAKPOINT = 1,
1274
1275 /* Step past a non-continuable watchpoint, in order to let the
1276 instruction execute so we can evaluate the watchpoint
1277 expression. */
1278 STEP_OVER_WATCHPOINT = 2
1279 };
8d297bbf 1280DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
6c4cfb24 1281
963f9c80 1282/* Info about an instruction that is being stepped over. */
31e77af2
PA
1283
1284struct step_over_info
1285{
963f9c80
PA
1286 /* If we're stepping past a breakpoint, this is the address space
1287 and address of the instruction the breakpoint is set at. We'll
1288 skip inserting all breakpoints here. Valid iff ASPACE is
1289 non-NULL. */
8b86c959 1290 const address_space *aspace;
31e77af2 1291 CORE_ADDR address;
963f9c80
PA
1292
1293 /* The instruction being stepped over triggers a nonsteppable
1294 watchpoint. If true, we'll skip inserting watchpoints. */
1295 int nonsteppable_watchpoint_p;
21edc42f
YQ
1296
1297 /* The thread's global number. */
1298 int thread;
31e77af2
PA
1299};
1300
1301/* The step-over info of the location that is being stepped over.
1302
1303 Note that with async/breakpoint always-inserted mode, a user might
1304 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1305 being stepped over. As setting a new breakpoint inserts all
1306 breakpoints, we need to make sure the breakpoint being stepped over
1307 isn't inserted then. We do that by only clearing the step-over
1308 info when the step-over is actually finished (or aborted).
1309
1310 Presently GDB can only step over one breakpoint at any given time.
1311 Given threads that can't run code in the same address space as the
1312 breakpoint's can't really miss the breakpoint, GDB could be taught
1313 to step-over at most one breakpoint per address space (so this info
1314 could move to the address space object if/when GDB is extended).
1315 The set of breakpoints being stepped over will normally be much
1316 smaller than the set of all breakpoints, so a flag in the
1317 breakpoint location structure would be wasteful. A separate list
1318 also saves complexity and run-time, as otherwise we'd have to go
1319 through all breakpoint locations clearing their flag whenever we
1320 start a new sequence. Similar considerations weigh against storing
1321 this info in the thread object. Plus, not all step overs actually
1322 have breakpoint locations -- e.g., stepping past a single-step
1323 breakpoint, or stepping to complete a non-continuable
1324 watchpoint. */
1325static struct step_over_info step_over_info;
1326
1327/* Record the address of the breakpoint/instruction we're currently
ce0db137
DE
1328 stepping over.
1329 N.B. We record the aspace and address now, instead of say just the thread,
1330 because when we need the info later the thread may be running. */
31e77af2
PA
1331
1332static void
8b86c959 1333set_step_over_info (const address_space *aspace, CORE_ADDR address,
21edc42f
YQ
1334 int nonsteppable_watchpoint_p,
1335 int thread)
31e77af2
PA
1336{
1337 step_over_info.aspace = aspace;
1338 step_over_info.address = address;
963f9c80 1339 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
21edc42f 1340 step_over_info.thread = thread;
31e77af2
PA
1341}
1342
1343/* Called when we're not longer stepping over a breakpoint / an
1344 instruction, so all breakpoints are free to be (re)inserted. */
1345
1346static void
1347clear_step_over_info (void)
1348{
edbcda09 1349 infrun_log_debug ("clearing step over info");
31e77af2
PA
1350 step_over_info.aspace = NULL;
1351 step_over_info.address = 0;
963f9c80 1352 step_over_info.nonsteppable_watchpoint_p = 0;
21edc42f 1353 step_over_info.thread = -1;
31e77af2
PA
1354}
1355
7f89fd65 1356/* See infrun.h. */
31e77af2
PA
1357
1358int
1359stepping_past_instruction_at (struct address_space *aspace,
1360 CORE_ADDR address)
1361{
1362 return (step_over_info.aspace != NULL
1363 && breakpoint_address_match (aspace, address,
1364 step_over_info.aspace,
1365 step_over_info.address));
1366}
1367
963f9c80
PA
1368/* See infrun.h. */
1369
21edc42f
YQ
1370int
1371thread_is_stepping_over_breakpoint (int thread)
1372{
1373 return (step_over_info.thread != -1
1374 && thread == step_over_info.thread);
1375}
1376
1377/* See infrun.h. */
1378
963f9c80
PA
1379int
1380stepping_past_nonsteppable_watchpoint (void)
1381{
1382 return step_over_info.nonsteppable_watchpoint_p;
1383}
1384
6cc83d2a
PA
1385/* Returns true if step-over info is valid. */
1386
1387static int
1388step_over_info_valid_p (void)
1389{
963f9c80
PA
1390 return (step_over_info.aspace != NULL
1391 || stepping_past_nonsteppable_watchpoint ());
6cc83d2a
PA
1392}
1393
c906108c 1394\f
237fc4c9
PA
1395/* Displaced stepping. */
1396
1397/* In non-stop debugging mode, we must take special care to manage
1398 breakpoints properly; in particular, the traditional strategy for
1399 stepping a thread past a breakpoint it has hit is unsuitable.
1400 'Displaced stepping' is a tactic for stepping one thread past a
1401 breakpoint it has hit while ensuring that other threads running
1402 concurrently will hit the breakpoint as they should.
1403
1404 The traditional way to step a thread T off a breakpoint in a
1405 multi-threaded program in all-stop mode is as follows:
1406
1407 a0) Initially, all threads are stopped, and breakpoints are not
1408 inserted.
1409 a1) We single-step T, leaving breakpoints uninserted.
1410 a2) We insert breakpoints, and resume all threads.
1411
1412 In non-stop debugging, however, this strategy is unsuitable: we
1413 don't want to have to stop all threads in the system in order to
1414 continue or step T past a breakpoint. Instead, we use displaced
1415 stepping:
1416
1417 n0) Initially, T is stopped, other threads are running, and
1418 breakpoints are inserted.
1419 n1) We copy the instruction "under" the breakpoint to a separate
1420 location, outside the main code stream, making any adjustments
1421 to the instruction, register, and memory state as directed by
1422 T's architecture.
1423 n2) We single-step T over the instruction at its new location.
1424 n3) We adjust the resulting register and memory state as directed
1425 by T's architecture. This includes resetting T's PC to point
1426 back into the main instruction stream.
1427 n4) We resume T.
1428
1429 This approach depends on the following gdbarch methods:
1430
1431 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1432 indicate where to copy the instruction, and how much space must
1433 be reserved there. We use these in step n1.
1434
1435 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1436 address, and makes any necessary adjustments to the instruction,
1437 register contents, and memory. We use this in step n1.
1438
1439 - gdbarch_displaced_step_fixup adjusts registers and memory after
85102364 1440 we have successfully single-stepped the instruction, to yield the
237fc4c9
PA
1441 same effect the instruction would have had if we had executed it
1442 at its original address. We use this in step n3.
1443
237fc4c9
PA
1444 The gdbarch_displaced_step_copy_insn and
1445 gdbarch_displaced_step_fixup functions must be written so that
1446 copying an instruction with gdbarch_displaced_step_copy_insn,
1447 single-stepping across the copied instruction, and then applying
1448 gdbarch_displaced_insn_fixup should have the same effects on the
1449 thread's memory and registers as stepping the instruction in place
1450 would have. Exactly which responsibilities fall to the copy and
1451 which fall to the fixup is up to the author of those functions.
1452
1453 See the comments in gdbarch.sh for details.
1454
1455 Note that displaced stepping and software single-step cannot
1456 currently be used in combination, although with some care I think
1457 they could be made to. Software single-step works by placing
1458 breakpoints on all possible subsequent instructions; if the
1459 displaced instruction is a PC-relative jump, those breakpoints
1460 could fall in very strange places --- on pages that aren't
1461 executable, or at addresses that are not proper instruction
1462 boundaries. (We do generally let other threads run while we wait
1463 to hit the software single-step breakpoint, and they might
1464 encounter such a corrupted instruction.) One way to work around
1465 this would be to have gdbarch_displaced_step_copy_insn fully
1466 simulate the effect of PC-relative instructions (and return NULL)
1467 on architectures that use software single-stepping.
1468
1469 In non-stop mode, we can have independent and simultaneous step
1470 requests, so more than one thread may need to simultaneously step
1471 over a breakpoint. The current implementation assumes there is
1472 only one scratch space per process. In this case, we have to
1473 serialize access to the scratch space. If thread A wants to step
1474 over a breakpoint, but we are currently waiting for some other
1475 thread to complete a displaced step, we leave thread A stopped and
1476 place it in the displaced_step_request_queue. Whenever a displaced
1477 step finishes, we pick the next thread in the queue and start a new
1478 displaced step operation on it. See displaced_step_prepare and
1479 displaced_step_fixup for details. */
1480
cfba9872
SM
1481/* Default destructor for displaced_step_closure. */
1482
1483displaced_step_closure::~displaced_step_closure () = default;
1484
fc1cf338
PA
1485/* Get the displaced stepping state of process PID. */
1486
39a36629 1487static displaced_step_inferior_state *
00431a78 1488get_displaced_stepping_state (inferior *inf)
fc1cf338 1489{
d20172fc 1490 return &inf->displaced_step_state;
fc1cf338
PA
1491}
1492
372316f1
PA
1493/* Returns true if any inferior has a thread doing a displaced
1494 step. */
1495
39a36629
SM
1496static bool
1497displaced_step_in_progress_any_inferior ()
372316f1 1498{
d20172fc 1499 for (inferior *i : all_inferiors ())
39a36629 1500 {
d20172fc 1501 if (i->displaced_step_state.step_thread != nullptr)
39a36629
SM
1502 return true;
1503 }
372316f1 1504
39a36629 1505 return false;
372316f1
PA
1506}
1507
c0987663
YQ
1508/* Return true if thread represented by PTID is doing a displaced
1509 step. */
1510
1511static int
00431a78 1512displaced_step_in_progress_thread (thread_info *thread)
c0987663 1513{
00431a78 1514 gdb_assert (thread != NULL);
c0987663 1515
d20172fc 1516 return get_displaced_stepping_state (thread->inf)->step_thread == thread;
c0987663
YQ
1517}
1518
8f572e5c
PA
1519/* Return true if process PID has a thread doing a displaced step. */
1520
1521static int
00431a78 1522displaced_step_in_progress (inferior *inf)
8f572e5c 1523{
d20172fc 1524 return get_displaced_stepping_state (inf)->step_thread != nullptr;
fc1cf338
PA
1525}
1526
a42244db
YQ
1527/* If inferior is in displaced stepping, and ADDR equals to starting address
1528 of copy area, return corresponding displaced_step_closure. Otherwise,
1529 return NULL. */
1530
1531struct displaced_step_closure*
1532get_displaced_step_closure_by_addr (CORE_ADDR addr)
1533{
d20172fc 1534 displaced_step_inferior_state *displaced
00431a78 1535 = get_displaced_stepping_state (current_inferior ());
a42244db
YQ
1536
1537 /* If checking the mode of displaced instruction in copy area. */
d20172fc 1538 if (displaced->step_thread != nullptr
00431a78 1539 && displaced->step_copy == addr)
d8d83535 1540 return displaced->step_closure.get ();
a42244db
YQ
1541
1542 return NULL;
1543}
1544
fc1cf338
PA
1545static void
1546infrun_inferior_exit (struct inferior *inf)
1547{
d20172fc 1548 inf->displaced_step_state.reset ();
fc1cf338 1549}
237fc4c9 1550
fff08868
HZ
1551/* If ON, and the architecture supports it, GDB will use displaced
1552 stepping to step over breakpoints. If OFF, or if the architecture
1553 doesn't support it, GDB will instead use the traditional
1554 hold-and-step approach. If AUTO (which is the default), GDB will
1555 decide which technique to use to step over breakpoints depending on
9822cb57 1556 whether the target works in a non-stop way (see use_displaced_stepping). */
fff08868 1557
72d0e2c5 1558static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
fff08868 1559
237fc4c9
PA
1560static void
1561show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1562 struct cmd_list_element *c,
1563 const char *value)
1564{
72d0e2c5 1565 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
3e43a32a
MS
1566 fprintf_filtered (file,
1567 _("Debugger's willingness to use displaced stepping "
1568 "to step over breakpoints is %s (currently %s).\n"),
fbea99ea 1569 value, target_is_non_stop_p () ? "on" : "off");
fff08868 1570 else
3e43a32a
MS
1571 fprintf_filtered (file,
1572 _("Debugger's willingness to use displaced stepping "
1573 "to step over breakpoints is %s.\n"), value);
237fc4c9
PA
1574}
1575
9822cb57
SM
1576/* Return true if the gdbarch implements the required methods to use
1577 displaced stepping. */
1578
1579static bool
1580gdbarch_supports_displaced_stepping (gdbarch *arch)
1581{
1582 /* Only check for the presence of step_copy_insn. Other required methods
1583 are checked by the gdbarch validation. */
1584 return gdbarch_displaced_step_copy_insn_p (arch);
1585}
1586
fff08868 1587/* Return non-zero if displaced stepping can/should be used to step
3fc8eb30 1588 over breakpoints of thread TP. */
fff08868 1589
9822cb57
SM
1590static bool
1591use_displaced_stepping (thread_info *tp)
237fc4c9 1592{
9822cb57
SM
1593 /* If the user disabled it explicitly, don't use displaced stepping. */
1594 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1595 return false;
1596
1597 /* If "auto", only use displaced stepping if the target operates in a non-stop
1598 way. */
1599 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1600 && !target_is_non_stop_p ())
1601 return false;
1602
1603 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1604
1605 /* If the architecture doesn't implement displaced stepping, don't use
1606 it. */
1607 if (!gdbarch_supports_displaced_stepping (gdbarch))
1608 return false;
1609
1610 /* If recording, don't use displaced stepping. */
1611 if (find_record_target () != nullptr)
1612 return false;
1613
d20172fc
SM
1614 displaced_step_inferior_state *displaced_state
1615 = get_displaced_stepping_state (tp->inf);
3fc8eb30 1616
9822cb57
SM
1617 /* If displaced stepping failed before for this inferior, don't bother trying
1618 again. */
1619 if (displaced_state->failed_before)
1620 return false;
1621
1622 return true;
237fc4c9
PA
1623}
1624
d8d83535
SM
1625/* Simple function wrapper around displaced_step_inferior_state::reset. */
1626
237fc4c9 1627static void
d8d83535 1628displaced_step_reset (displaced_step_inferior_state *displaced)
237fc4c9 1629{
d8d83535 1630 displaced->reset ();
237fc4c9
PA
1631}
1632
d8d83535
SM
1633/* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1634 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1635
1636using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
237fc4c9
PA
1637
1638/* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1639void
1640displaced_step_dump_bytes (struct ui_file *file,
1641 const gdb_byte *buf,
1642 size_t len)
1643{
1644 int i;
1645
1646 for (i = 0; i < len; i++)
1647 fprintf_unfiltered (file, "%02x ", buf[i]);
1648 fputs_unfiltered ("\n", file);
1649}
1650
1651/* Prepare to single-step, using displaced stepping.
1652
1653 Note that we cannot use displaced stepping when we have a signal to
1654 deliver. If we have a signal to deliver and an instruction to step
1655 over, then after the step, there will be no indication from the
1656 target whether the thread entered a signal handler or ignored the
1657 signal and stepped over the instruction successfully --- both cases
1658 result in a simple SIGTRAP. In the first case we mustn't do a
1659 fixup, and in the second case we must --- but we can't tell which.
1660 Comments in the code for 'random signals' in handle_inferior_event
1661 explain how we handle this case instead.
1662
1663 Returns 1 if preparing was successful -- this thread is going to be
7f03bd92
PA
1664 stepped now; 0 if displaced stepping this thread got queued; or -1
1665 if this instruction can't be displaced stepped. */
1666
237fc4c9 1667static int
00431a78 1668displaced_step_prepare_throw (thread_info *tp)
237fc4c9 1669{
00431a78 1670 regcache *regcache = get_thread_regcache (tp);
ac7936df 1671 struct gdbarch *gdbarch = regcache->arch ();
8b86c959 1672 const address_space *aspace = regcache->aspace ();
237fc4c9
PA
1673 CORE_ADDR original, copy;
1674 ULONGEST len;
9e529e1d 1675 int status;
237fc4c9
PA
1676
1677 /* We should never reach this function if the architecture does not
1678 support displaced stepping. */
9822cb57 1679 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
237fc4c9 1680
c2829269
PA
1681 /* Nor if the thread isn't meant to step over a breakpoint. */
1682 gdb_assert (tp->control.trap_expected);
1683
c1e36e3e
PA
1684 /* Disable range stepping while executing in the scratch pad. We
1685 want a single-step even if executing the displaced instruction in
1686 the scratch buffer lands within the stepping range (e.g., a
1687 jump/branch). */
1688 tp->control.may_range_step = 0;
1689
fc1cf338
PA
1690 /* We have to displaced step one thread at a time, as we only have
1691 access to a single scratch space per inferior. */
237fc4c9 1692
d20172fc
SM
1693 displaced_step_inferior_state *displaced
1694 = get_displaced_stepping_state (tp->inf);
fc1cf338 1695
00431a78 1696 if (displaced->step_thread != nullptr)
237fc4c9
PA
1697 {
1698 /* Already waiting for a displaced step to finish. Defer this
1699 request and place in queue. */
237fc4c9
PA
1700
1701 if (debug_displaced)
1702 fprintf_unfiltered (gdb_stdlog,
c2829269 1703 "displaced: deferring step of %s\n",
a068643d 1704 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1705
c2829269 1706 thread_step_over_chain_enqueue (tp);
237fc4c9
PA
1707 return 0;
1708 }
1709 else
1710 {
1711 if (debug_displaced)
1712 fprintf_unfiltered (gdb_stdlog,
1713 "displaced: stepping %s now\n",
a068643d 1714 target_pid_to_str (tp->ptid).c_str ());
237fc4c9
PA
1715 }
1716
d8d83535 1717 displaced_step_reset (displaced);
237fc4c9 1718
00431a78
PA
1719 scoped_restore_current_thread restore_thread;
1720
1721 switch_to_thread (tp);
ad53cd71 1722
515630c5 1723 original = regcache_read_pc (regcache);
237fc4c9
PA
1724
1725 copy = gdbarch_displaced_step_location (gdbarch);
1726 len = gdbarch_max_insn_length (gdbarch);
1727
d35ae833
PA
1728 if (breakpoint_in_range_p (aspace, copy, len))
1729 {
1730 /* There's a breakpoint set in the scratch pad location range
1731 (which is usually around the entry point). We'd either
1732 install it before resuming, which would overwrite/corrupt the
1733 scratch pad, or if it was already inserted, this displaced
1734 step would overwrite it. The latter is OK in the sense that
1735 we already assume that no thread is going to execute the code
1736 in the scratch pad range (after initial startup) anyway, but
1737 the former is unacceptable. Simply punt and fallback to
1738 stepping over this breakpoint in-line. */
1739 if (debug_displaced)
1740 {
1741 fprintf_unfiltered (gdb_stdlog,
1742 "displaced: breakpoint set in scratch pad. "
1743 "Stepping over breakpoint in-line instead.\n");
1744 }
1745
d35ae833
PA
1746 return -1;
1747 }
1748
237fc4c9 1749 /* Save the original contents of the copy area. */
d20172fc
SM
1750 displaced->step_saved_copy.resize (len);
1751 status = target_read_memory (copy, displaced->step_saved_copy.data (), len);
9e529e1d
JK
1752 if (status != 0)
1753 throw_error (MEMORY_ERROR,
1754 _("Error accessing memory address %s (%s) for "
1755 "displaced-stepping scratch space."),
1756 paddress (gdbarch, copy), safe_strerror (status));
237fc4c9
PA
1757 if (debug_displaced)
1758 {
5af949e3
UW
1759 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1760 paddress (gdbarch, copy));
fc1cf338 1761 displaced_step_dump_bytes (gdb_stdlog,
d20172fc 1762 displaced->step_saved_copy.data (),
fc1cf338 1763 len);
237fc4c9
PA
1764 };
1765
e8217e61
SM
1766 displaced->step_closure
1767 = gdbarch_displaced_step_copy_insn (gdbarch, original, copy, regcache);
1768 if (displaced->step_closure == NULL)
7f03bd92
PA
1769 {
1770 /* The architecture doesn't know how or want to displaced step
1771 this instruction or instruction sequence. Fallback to
1772 stepping over the breakpoint in-line. */
7f03bd92
PA
1773 return -1;
1774 }
237fc4c9 1775
9f5a595d
UW
1776 /* Save the information we need to fix things up if the step
1777 succeeds. */
00431a78 1778 displaced->step_thread = tp;
fc1cf338 1779 displaced->step_gdbarch = gdbarch;
fc1cf338
PA
1780 displaced->step_original = original;
1781 displaced->step_copy = copy;
9f5a595d 1782
9799571e 1783 {
d8d83535 1784 displaced_step_reset_cleanup cleanup (displaced);
237fc4c9 1785
9799571e
TT
1786 /* Resume execution at the copy. */
1787 regcache_write_pc (regcache, copy);
237fc4c9 1788
9799571e
TT
1789 cleanup.release ();
1790 }
ad53cd71 1791
237fc4c9 1792 if (debug_displaced)
5af949e3
UW
1793 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1794 paddress (gdbarch, copy));
237fc4c9 1795
237fc4c9
PA
1796 return 1;
1797}
1798
3fc8eb30
PA
1799/* Wrapper for displaced_step_prepare_throw that disabled further
1800 attempts at displaced stepping if we get a memory error. */
1801
1802static int
00431a78 1803displaced_step_prepare (thread_info *thread)
3fc8eb30
PA
1804{
1805 int prepared = -1;
1806
a70b8144 1807 try
3fc8eb30 1808 {
00431a78 1809 prepared = displaced_step_prepare_throw (thread);
3fc8eb30 1810 }
230d2906 1811 catch (const gdb_exception_error &ex)
3fc8eb30
PA
1812 {
1813 struct displaced_step_inferior_state *displaced_state;
1814
16b41842
PA
1815 if (ex.error != MEMORY_ERROR
1816 && ex.error != NOT_SUPPORTED_ERROR)
eedc3f4f 1817 throw;
3fc8eb30 1818
edbcda09
SM
1819 infrun_log_debug ("caught exception, disabling displaced stepping: %s",
1820 ex.what ());
3fc8eb30
PA
1821
1822 /* Be verbose if "set displaced-stepping" is "on", silent if
1823 "auto". */
1824 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1825 {
fd7dcb94 1826 warning (_("disabling displaced stepping: %s"),
3d6e9d23 1827 ex.what ());
3fc8eb30
PA
1828 }
1829
1830 /* Disable further displaced stepping attempts. */
1831 displaced_state
00431a78 1832 = get_displaced_stepping_state (thread->inf);
3fc8eb30
PA
1833 displaced_state->failed_before = 1;
1834 }
3fc8eb30
PA
1835
1836 return prepared;
1837}
1838
237fc4c9 1839static void
3e43a32a
MS
1840write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1841 const gdb_byte *myaddr, int len)
237fc4c9 1842{
2989a365 1843 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
abbb1732 1844
237fc4c9
PA
1845 inferior_ptid = ptid;
1846 write_memory (memaddr, myaddr, len);
237fc4c9
PA
1847}
1848
e2d96639
YQ
1849/* Restore the contents of the copy area for thread PTID. */
1850
1851static void
1852displaced_step_restore (struct displaced_step_inferior_state *displaced,
1853 ptid_t ptid)
1854{
1855 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1856
1857 write_memory_ptid (ptid, displaced->step_copy,
d20172fc 1858 displaced->step_saved_copy.data (), len);
e2d96639
YQ
1859 if (debug_displaced)
1860 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
a068643d 1861 target_pid_to_str (ptid).c_str (),
e2d96639
YQ
1862 paddress (displaced->step_gdbarch,
1863 displaced->step_copy));
1864}
1865
372316f1
PA
1866/* If we displaced stepped an instruction successfully, adjust
1867 registers and memory to yield the same effect the instruction would
1868 have had if we had executed it at its original address, and return
1869 1. If the instruction didn't complete, relocate the PC and return
1870 -1. If the thread wasn't displaced stepping, return 0. */
1871
1872static int
00431a78 1873displaced_step_fixup (thread_info *event_thread, enum gdb_signal signal)
237fc4c9 1874{
fc1cf338 1875 struct displaced_step_inferior_state *displaced
00431a78 1876 = get_displaced_stepping_state (event_thread->inf);
372316f1 1877 int ret;
fc1cf338 1878
00431a78
PA
1879 /* Was this event for the thread we displaced? */
1880 if (displaced->step_thread != event_thread)
372316f1 1881 return 0;
237fc4c9 1882
cb71640d
PA
1883 /* Fixup may need to read memory/registers. Switch to the thread
1884 that we're fixing up. Also, target_stopped_by_watchpoint checks
d43b7a2d
TBA
1885 the current thread, and displaced_step_restore performs ptid-dependent
1886 memory accesses using current_inferior() and current_top_target(). */
00431a78 1887 switch_to_thread (event_thread);
cb71640d 1888
d43b7a2d
TBA
1889 displaced_step_reset_cleanup cleanup (displaced);
1890
1891 displaced_step_restore (displaced, displaced->step_thread->ptid);
1892
237fc4c9 1893 /* Did the instruction complete successfully? */
cb71640d
PA
1894 if (signal == GDB_SIGNAL_TRAP
1895 && !(target_stopped_by_watchpoint ()
1896 && (gdbarch_have_nonsteppable_watchpoint (displaced->step_gdbarch)
1897 || target_have_steppable_watchpoint)))
237fc4c9
PA
1898 {
1899 /* Fix up the resulting state. */
fc1cf338 1900 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
d8d83535 1901 displaced->step_closure.get (),
fc1cf338
PA
1902 displaced->step_original,
1903 displaced->step_copy,
00431a78 1904 get_thread_regcache (displaced->step_thread));
372316f1 1905 ret = 1;
237fc4c9
PA
1906 }
1907 else
1908 {
1909 /* Since the instruction didn't complete, all we can do is
1910 relocate the PC. */
00431a78 1911 struct regcache *regcache = get_thread_regcache (event_thread);
515630c5 1912 CORE_ADDR pc = regcache_read_pc (regcache);
abbb1732 1913
fc1cf338 1914 pc = displaced->step_original + (pc - displaced->step_copy);
515630c5 1915 regcache_write_pc (regcache, pc);
372316f1 1916 ret = -1;
237fc4c9
PA
1917 }
1918
372316f1 1919 return ret;
c2829269 1920}
1c5cfe86 1921
4d9d9d04
PA
1922/* Data to be passed around while handling an event. This data is
1923 discarded between events. */
1924struct execution_control_state
1925{
5b6d1e4f 1926 process_stratum_target *target;
4d9d9d04
PA
1927 ptid_t ptid;
1928 /* The thread that got the event, if this was a thread event; NULL
1929 otherwise. */
1930 struct thread_info *event_thread;
1931
1932 struct target_waitstatus ws;
1933 int stop_func_filled_in;
1934 CORE_ADDR stop_func_start;
1935 CORE_ADDR stop_func_end;
1936 const char *stop_func_name;
1937 int wait_some_more;
1938
1939 /* True if the event thread hit the single-step breakpoint of
1940 another thread. Thus the event doesn't cause a stop, the thread
1941 needs to be single-stepped past the single-step breakpoint before
1942 we can switch back to the original stepping thread. */
1943 int hit_singlestep_breakpoint;
1944};
1945
1946/* Clear ECS and set it to point at TP. */
c2829269
PA
1947
1948static void
4d9d9d04
PA
1949reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1950{
1951 memset (ecs, 0, sizeof (*ecs));
1952 ecs->event_thread = tp;
1953 ecs->ptid = tp->ptid;
1954}
1955
1956static void keep_going_pass_signal (struct execution_control_state *ecs);
1957static void prepare_to_wait (struct execution_control_state *ecs);
2ac7589c 1958static int keep_going_stepped_thread (struct thread_info *tp);
8d297bbf 1959static step_over_what thread_still_needs_step_over (struct thread_info *tp);
4d9d9d04
PA
1960
1961/* Are there any pending step-over requests? If so, run all we can
1962 now and return true. Otherwise, return false. */
1963
1964static int
c2829269
PA
1965start_step_over (void)
1966{
1967 struct thread_info *tp, *next;
1968
372316f1
PA
1969 /* Don't start a new step-over if we already have an in-line
1970 step-over operation ongoing. */
1971 if (step_over_info_valid_p ())
1972 return 0;
1973
c2829269 1974 for (tp = step_over_queue_head; tp != NULL; tp = next)
237fc4c9 1975 {
4d9d9d04
PA
1976 struct execution_control_state ecss;
1977 struct execution_control_state *ecs = &ecss;
8d297bbf 1978 step_over_what step_what;
372316f1 1979 int must_be_in_line;
c2829269 1980
c65d6b55
PA
1981 gdb_assert (!tp->stop_requested);
1982
c2829269 1983 next = thread_step_over_chain_next (tp);
237fc4c9 1984
c2829269
PA
1985 /* If this inferior already has a displaced step in process,
1986 don't start a new one. */
00431a78 1987 if (displaced_step_in_progress (tp->inf))
c2829269
PA
1988 continue;
1989
372316f1
PA
1990 step_what = thread_still_needs_step_over (tp);
1991 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
1992 || ((step_what & STEP_OVER_BREAKPOINT)
3fc8eb30 1993 && !use_displaced_stepping (tp)));
372316f1
PA
1994
1995 /* We currently stop all threads of all processes to step-over
1996 in-line. If we need to start a new in-line step-over, let
1997 any pending displaced steps finish first. */
1998 if (must_be_in_line && displaced_step_in_progress_any_inferior ())
1999 return 0;
2000
c2829269
PA
2001 thread_step_over_chain_remove (tp);
2002
2003 if (step_over_queue_head == NULL)
edbcda09 2004 infrun_log_debug ("step-over queue now empty");
c2829269 2005
372316f1
PA
2006 if (tp->control.trap_expected
2007 || tp->resumed
2008 || tp->executing)
ad53cd71 2009 {
4d9d9d04
PA
2010 internal_error (__FILE__, __LINE__,
2011 "[%s] has inconsistent state: "
372316f1 2012 "trap_expected=%d, resumed=%d, executing=%d\n",
a068643d 2013 target_pid_to_str (tp->ptid).c_str (),
4d9d9d04 2014 tp->control.trap_expected,
372316f1 2015 tp->resumed,
4d9d9d04 2016 tp->executing);
ad53cd71 2017 }
1c5cfe86 2018
edbcda09
SM
2019 infrun_log_debug ("resuming [%s] for step-over",
2020 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04
PA
2021
2022 /* keep_going_pass_signal skips the step-over if the breakpoint
2023 is no longer inserted. In all-stop, we want to keep looking
2024 for a thread that needs a step-over instead of resuming TP,
2025 because we wouldn't be able to resume anything else until the
2026 target stops again. In non-stop, the resume always resumes
2027 only TP, so it's OK to let the thread resume freely. */
fbea99ea 2028 if (!target_is_non_stop_p () && !step_what)
4d9d9d04 2029 continue;
8550d3b3 2030
00431a78 2031 switch_to_thread (tp);
4d9d9d04
PA
2032 reset_ecs (ecs, tp);
2033 keep_going_pass_signal (ecs);
1c5cfe86 2034
4d9d9d04
PA
2035 if (!ecs->wait_some_more)
2036 error (_("Command aborted."));
1c5cfe86 2037
372316f1
PA
2038 gdb_assert (tp->resumed);
2039
2040 /* If we started a new in-line step-over, we're done. */
2041 if (step_over_info_valid_p ())
2042 {
2043 gdb_assert (tp->control.trap_expected);
2044 return 1;
2045 }
2046
fbea99ea 2047 if (!target_is_non_stop_p ())
4d9d9d04
PA
2048 {
2049 /* On all-stop, shouldn't have resumed unless we needed a
2050 step over. */
2051 gdb_assert (tp->control.trap_expected
2052 || tp->step_after_step_resume_breakpoint);
2053
2054 /* With remote targets (at least), in all-stop, we can't
2055 issue any further remote commands until the program stops
2056 again. */
2057 return 1;
1c5cfe86 2058 }
c2829269 2059
4d9d9d04
PA
2060 /* Either the thread no longer needed a step-over, or a new
2061 displaced stepping sequence started. Even in the latter
2062 case, continue looking. Maybe we can also start another
2063 displaced step on a thread of other process. */
237fc4c9 2064 }
4d9d9d04
PA
2065
2066 return 0;
237fc4c9
PA
2067}
2068
5231c1fd
PA
2069/* Update global variables holding ptids to hold NEW_PTID if they were
2070 holding OLD_PTID. */
2071static void
2072infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
2073{
d7e15655 2074 if (inferior_ptid == old_ptid)
5231c1fd 2075 inferior_ptid = new_ptid;
5231c1fd
PA
2076}
2077
237fc4c9 2078\f
c906108c 2079
53904c9e
AC
2080static const char schedlock_off[] = "off";
2081static const char schedlock_on[] = "on";
2082static const char schedlock_step[] = "step";
f2665db5 2083static const char schedlock_replay[] = "replay";
40478521 2084static const char *const scheduler_enums[] = {
ef346e04
AC
2085 schedlock_off,
2086 schedlock_on,
2087 schedlock_step,
f2665db5 2088 schedlock_replay,
ef346e04
AC
2089 NULL
2090};
f2665db5 2091static const char *scheduler_mode = schedlock_replay;
920d2a44
AC
2092static void
2093show_scheduler_mode (struct ui_file *file, int from_tty,
2094 struct cmd_list_element *c, const char *value)
2095{
3e43a32a
MS
2096 fprintf_filtered (file,
2097 _("Mode for locking scheduler "
2098 "during execution is \"%s\".\n"),
920d2a44
AC
2099 value);
2100}
c906108c
SS
2101
2102static void
eb4c3f4a 2103set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
c906108c 2104{
eefe576e
AC
2105 if (!target_can_lock_scheduler)
2106 {
2107 scheduler_mode = schedlock_off;
2108 error (_("Target '%s' cannot support this command."), target_shortname);
2109 }
c906108c
SS
2110}
2111
d4db2f36
PA
2112/* True if execution commands resume all threads of all processes by
2113 default; otherwise, resume only threads of the current inferior
2114 process. */
491144b5 2115bool sched_multi = false;
d4db2f36 2116
2facfe5c
DD
2117/* Try to setup for software single stepping over the specified location.
2118 Return 1 if target_resume() should use hardware single step.
2119
2120 GDBARCH the current gdbarch.
2121 PC the location to step over. */
2122
2123static int
2124maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2125{
2126 int hw_step = 1;
2127
f02253f1 2128 if (execution_direction == EXEC_FORWARD
93f9a11f
YQ
2129 && gdbarch_software_single_step_p (gdbarch))
2130 hw_step = !insert_single_step_breakpoints (gdbarch);
2131
2facfe5c
DD
2132 return hw_step;
2133}
c906108c 2134
f3263aa4
PA
2135/* See infrun.h. */
2136
09cee04b
PA
2137ptid_t
2138user_visible_resume_ptid (int step)
2139{
f3263aa4 2140 ptid_t resume_ptid;
09cee04b 2141
09cee04b
PA
2142 if (non_stop)
2143 {
2144 /* With non-stop mode on, threads are always handled
2145 individually. */
2146 resume_ptid = inferior_ptid;
2147 }
2148 else if ((scheduler_mode == schedlock_on)
03d46957 2149 || (scheduler_mode == schedlock_step && step))
09cee04b 2150 {
f3263aa4
PA
2151 /* User-settable 'scheduler' mode requires solo thread
2152 resume. */
09cee04b
PA
2153 resume_ptid = inferior_ptid;
2154 }
f2665db5
MM
2155 else if ((scheduler_mode == schedlock_replay)
2156 && target_record_will_replay (minus_one_ptid, execution_direction))
2157 {
2158 /* User-settable 'scheduler' mode requires solo thread resume in replay
2159 mode. */
2160 resume_ptid = inferior_ptid;
2161 }
f3263aa4
PA
2162 else if (!sched_multi && target_supports_multi_process ())
2163 {
2164 /* Resume all threads of the current process (and none of other
2165 processes). */
e99b03dc 2166 resume_ptid = ptid_t (inferior_ptid.pid ());
f3263aa4
PA
2167 }
2168 else
2169 {
2170 /* Resume all threads of all processes. */
2171 resume_ptid = RESUME_ALL;
2172 }
09cee04b
PA
2173
2174 return resume_ptid;
2175}
2176
5b6d1e4f
PA
2177/* See infrun.h. */
2178
2179process_stratum_target *
2180user_visible_resume_target (ptid_t resume_ptid)
2181{
2182 return (resume_ptid == minus_one_ptid && sched_multi
2183 ? NULL
2184 : current_inferior ()->process_target ());
2185}
2186
fbea99ea
PA
2187/* Return a ptid representing the set of threads that we will resume,
2188 in the perspective of the target, assuming run control handling
2189 does not require leaving some threads stopped (e.g., stepping past
2190 breakpoint). USER_STEP indicates whether we're about to start the
2191 target for a stepping command. */
2192
2193static ptid_t
2194internal_resume_ptid (int user_step)
2195{
2196 /* In non-stop, we always control threads individually. Note that
2197 the target may always work in non-stop mode even with "set
2198 non-stop off", in which case user_visible_resume_ptid could
2199 return a wildcard ptid. */
2200 if (target_is_non_stop_p ())
2201 return inferior_ptid;
2202 else
2203 return user_visible_resume_ptid (user_step);
2204}
2205
64ce06e4
PA
2206/* Wrapper for target_resume, that handles infrun-specific
2207 bookkeeping. */
2208
2209static void
2210do_target_resume (ptid_t resume_ptid, int step, enum gdb_signal sig)
2211{
2212 struct thread_info *tp = inferior_thread ();
2213
c65d6b55
PA
2214 gdb_assert (!tp->stop_requested);
2215
64ce06e4 2216 /* Install inferior's terminal modes. */
223ffa71 2217 target_terminal::inferior ();
64ce06e4
PA
2218
2219 /* Avoid confusing the next resume, if the next stop/resume
2220 happens to apply to another thread. */
2221 tp->suspend.stop_signal = GDB_SIGNAL_0;
2222
8f572e5c
PA
2223 /* Advise target which signals may be handled silently.
2224
2225 If we have removed breakpoints because we are stepping over one
2226 in-line (in any thread), we need to receive all signals to avoid
2227 accidentally skipping a breakpoint during execution of a signal
2228 handler.
2229
2230 Likewise if we're displaced stepping, otherwise a trap for a
2231 breakpoint in a signal handler might be confused with the
2232 displaced step finishing. We don't make the displaced_step_fixup
2233 step distinguish the cases instead, because:
2234
2235 - a backtrace while stopped in the signal handler would show the
2236 scratch pad as frame older than the signal handler, instead of
2237 the real mainline code.
2238
2239 - when the thread is later resumed, the signal handler would
2240 return to the scratch pad area, which would no longer be
2241 valid. */
2242 if (step_over_info_valid_p ()
00431a78 2243 || displaced_step_in_progress (tp->inf))
adc6a863 2244 target_pass_signals ({});
64ce06e4 2245 else
adc6a863 2246 target_pass_signals (signal_pass);
64ce06e4
PA
2247
2248 target_resume (resume_ptid, step, sig);
85ad3aaf
PA
2249
2250 target_commit_resume ();
5b6d1e4f
PA
2251
2252 if (target_can_async_p ())
2253 target_async (1);
64ce06e4
PA
2254}
2255
d930703d 2256/* Resume the inferior. SIG is the signal to give the inferior
71d378ae
PA
2257 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2258 call 'resume', which handles exceptions. */
c906108c 2259
71d378ae
PA
2260static void
2261resume_1 (enum gdb_signal sig)
c906108c 2262{
515630c5 2263 struct regcache *regcache = get_current_regcache ();
ac7936df 2264 struct gdbarch *gdbarch = regcache->arch ();
4e1c45ea 2265 struct thread_info *tp = inferior_thread ();
8b86c959 2266 const address_space *aspace = regcache->aspace ();
b0f16a3e 2267 ptid_t resume_ptid;
856e7dd6
PA
2268 /* This represents the user's step vs continue request. When
2269 deciding whether "set scheduler-locking step" applies, it's the
2270 user's intention that counts. */
2271 const int user_step = tp->control.stepping_command;
64ce06e4
PA
2272 /* This represents what we'll actually request the target to do.
2273 This can decay from a step to a continue, if e.g., we need to
2274 implement single-stepping with breakpoints (software
2275 single-step). */
6b403daa 2276 int step;
c7e8a53c 2277
c65d6b55 2278 gdb_assert (!tp->stop_requested);
c2829269
PA
2279 gdb_assert (!thread_is_in_step_over_chain (tp));
2280
372316f1
PA
2281 if (tp->suspend.waitstatus_pending_p)
2282 {
edbcda09
SM
2283 infrun_log_debug
2284 ("thread %s has pending wait "
2285 "status %s (currently_stepping=%d).",
2286 target_pid_to_str (tp->ptid).c_str (),
2287 target_waitstatus_to_string (&tp->suspend.waitstatus).c_str (),
2288 currently_stepping (tp));
372316f1 2289
5b6d1e4f 2290 tp->inf->process_target ()->threads_executing = true;
719546c4 2291 tp->resumed = true;
372316f1
PA
2292
2293 /* FIXME: What should we do if we are supposed to resume this
2294 thread with a signal? Maybe we should maintain a queue of
2295 pending signals to deliver. */
2296 if (sig != GDB_SIGNAL_0)
2297 {
fd7dcb94 2298 warning (_("Couldn't deliver signal %s to %s."),
a068643d
TT
2299 gdb_signal_to_name (sig),
2300 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2301 }
2302
2303 tp->suspend.stop_signal = GDB_SIGNAL_0;
372316f1
PA
2304
2305 if (target_can_async_p ())
9516f85a
AB
2306 {
2307 target_async (1);
2308 /* Tell the event loop we have an event to process. */
2309 mark_async_event_handler (infrun_async_inferior_event_token);
2310 }
372316f1
PA
2311 return;
2312 }
2313
2314 tp->stepped_breakpoint = 0;
2315
6b403daa
PA
2316 /* Depends on stepped_breakpoint. */
2317 step = currently_stepping (tp);
2318
74609e71
YQ
2319 if (current_inferior ()->waiting_for_vfork_done)
2320 {
48f9886d
PA
2321 /* Don't try to single-step a vfork parent that is waiting for
2322 the child to get out of the shared memory region (by exec'ing
2323 or exiting). This is particularly important on software
2324 single-step archs, as the child process would trip on the
2325 software single step breakpoint inserted for the parent
2326 process. Since the parent will not actually execute any
2327 instruction until the child is out of the shared region (such
2328 are vfork's semantics), it is safe to simply continue it.
2329 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2330 the parent, and tell it to `keep_going', which automatically
2331 re-sets it stepping. */
edbcda09 2332 infrun_log_debug ("resume : clear step");
a09dd441 2333 step = 0;
74609e71
YQ
2334 }
2335
7ca9b62a
TBA
2336 CORE_ADDR pc = regcache_read_pc (regcache);
2337
edbcda09
SM
2338 infrun_log_debug ("step=%d, signal=%s, trap_expected=%d, "
2339 "current thread [%s] at %s",
2340 step, gdb_signal_to_symbol_string (sig),
2341 tp->control.trap_expected,
2342 target_pid_to_str (inferior_ptid).c_str (),
2343 paddress (gdbarch, pc));
c906108c 2344
c2c6d25f
JM
2345 /* Normally, by the time we reach `resume', the breakpoints are either
2346 removed or inserted, as appropriate. The exception is if we're sitting
2347 at a permanent breakpoint; we need to step over it, but permanent
2348 breakpoints can't be removed. So we have to test for it here. */
6c95b8df 2349 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
6d350bb5 2350 {
af48d08f
PA
2351 if (sig != GDB_SIGNAL_0)
2352 {
2353 /* We have a signal to pass to the inferior. The resume
2354 may, or may not take us to the signal handler. If this
2355 is a step, we'll need to stop in the signal handler, if
2356 there's one, (if the target supports stepping into
2357 handlers), or in the next mainline instruction, if
2358 there's no handler. If this is a continue, we need to be
2359 sure to run the handler with all breakpoints inserted.
2360 In all cases, set a breakpoint at the current address
2361 (where the handler returns to), and once that breakpoint
2362 is hit, resume skipping the permanent breakpoint. If
2363 that breakpoint isn't hit, then we've stepped into the
2364 signal handler (or hit some other event). We'll delete
2365 the step-resume breakpoint then. */
2366
edbcda09
SM
2367 infrun_log_debug ("resume: skipping permanent breakpoint, "
2368 "deliver signal first");
af48d08f
PA
2369
2370 clear_step_over_info ();
2371 tp->control.trap_expected = 0;
2372
2373 if (tp->control.step_resume_breakpoint == NULL)
2374 {
2375 /* Set a "high-priority" step-resume, as we don't want
2376 user breakpoints at PC to trigger (again) when this
2377 hits. */
2378 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2379 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2380
2381 tp->step_after_step_resume_breakpoint = step;
2382 }
2383
2384 insert_breakpoints ();
2385 }
2386 else
2387 {
2388 /* There's no signal to pass, we can go ahead and skip the
2389 permanent breakpoint manually. */
edbcda09 2390 infrun_log_debug ("skipping permanent breakpoint");
af48d08f
PA
2391 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2392 /* Update pc to reflect the new address from which we will
2393 execute instructions. */
2394 pc = regcache_read_pc (regcache);
2395
2396 if (step)
2397 {
2398 /* We've already advanced the PC, so the stepping part
2399 is done. Now we need to arrange for a trap to be
2400 reported to handle_inferior_event. Set a breakpoint
2401 at the current PC, and run to it. Don't update
2402 prev_pc, because if we end in
44a1ee51
PA
2403 switch_back_to_stepped_thread, we want the "expected
2404 thread advanced also" branch to be taken. IOW, we
2405 don't want this thread to step further from PC
af48d08f 2406 (overstep). */
1ac806b8 2407 gdb_assert (!step_over_info_valid_p ());
af48d08f
PA
2408 insert_single_step_breakpoint (gdbarch, aspace, pc);
2409 insert_breakpoints ();
2410
fbea99ea 2411 resume_ptid = internal_resume_ptid (user_step);
1ac806b8 2412 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
719546c4 2413 tp->resumed = true;
af48d08f
PA
2414 return;
2415 }
2416 }
6d350bb5 2417 }
c2c6d25f 2418
c1e36e3e
PA
2419 /* If we have a breakpoint to step over, make sure to do a single
2420 step only. Same if we have software watchpoints. */
2421 if (tp->control.trap_expected || bpstat_should_step ())
2422 tp->control.may_range_step = 0;
2423
7da6a5b9
LM
2424 /* If displaced stepping is enabled, step over breakpoints by executing a
2425 copy of the instruction at a different address.
237fc4c9
PA
2426
2427 We can't use displaced stepping when we have a signal to deliver;
2428 the comments for displaced_step_prepare explain why. The
2429 comments in the handle_inferior event for dealing with 'random
74609e71
YQ
2430 signals' explain what we do instead.
2431
2432 We can't use displaced stepping when we are waiting for vfork_done
2433 event, displaced stepping breaks the vfork child similarly as single
2434 step software breakpoint. */
3fc8eb30
PA
2435 if (tp->control.trap_expected
2436 && use_displaced_stepping (tp)
cb71640d 2437 && !step_over_info_valid_p ()
a493e3e2 2438 && sig == GDB_SIGNAL_0
74609e71 2439 && !current_inferior ()->waiting_for_vfork_done)
237fc4c9 2440 {
00431a78 2441 int prepared = displaced_step_prepare (tp);
fc1cf338 2442
3fc8eb30 2443 if (prepared == 0)
d56b7306 2444 {
edbcda09 2445 infrun_log_debug ("Got placed in step-over queue");
4d9d9d04
PA
2446
2447 tp->control.trap_expected = 0;
d56b7306
VP
2448 return;
2449 }
3fc8eb30
PA
2450 else if (prepared < 0)
2451 {
2452 /* Fallback to stepping over the breakpoint in-line. */
2453
2454 if (target_is_non_stop_p ())
2455 stop_all_threads ();
2456
a01bda52 2457 set_step_over_info (regcache->aspace (),
21edc42f 2458 regcache_read_pc (regcache), 0, tp->global_num);
3fc8eb30
PA
2459
2460 step = maybe_software_singlestep (gdbarch, pc);
2461
2462 insert_breakpoints ();
2463 }
2464 else if (prepared > 0)
2465 {
2466 struct displaced_step_inferior_state *displaced;
99e40580 2467
3fc8eb30
PA
2468 /* Update pc to reflect the new address from which we will
2469 execute instructions due to displaced stepping. */
00431a78 2470 pc = regcache_read_pc (get_thread_regcache (tp));
ca7781d2 2471
00431a78 2472 displaced = get_displaced_stepping_state (tp->inf);
d8d83535
SM
2473 step = gdbarch_displaced_step_hw_singlestep
2474 (gdbarch, displaced->step_closure.get ());
3fc8eb30 2475 }
237fc4c9
PA
2476 }
2477
2facfe5c 2478 /* Do we need to do it the hard way, w/temp breakpoints? */
99e40580 2479 else if (step)
2facfe5c 2480 step = maybe_software_singlestep (gdbarch, pc);
c906108c 2481
30852783
UW
2482 /* Currently, our software single-step implementation leads to different
2483 results than hardware single-stepping in one situation: when stepping
2484 into delivering a signal which has an associated signal handler,
2485 hardware single-step will stop at the first instruction of the handler,
2486 while software single-step will simply skip execution of the handler.
2487
2488 For now, this difference in behavior is accepted since there is no
2489 easy way to actually implement single-stepping into a signal handler
2490 without kernel support.
2491
2492 However, there is one scenario where this difference leads to follow-on
2493 problems: if we're stepping off a breakpoint by removing all breakpoints
2494 and then single-stepping. In this case, the software single-step
2495 behavior means that even if there is a *breakpoint* in the signal
2496 handler, GDB still would not stop.
2497
2498 Fortunately, we can at least fix this particular issue. We detect
2499 here the case where we are about to deliver a signal while software
2500 single-stepping with breakpoints removed. In this situation, we
2501 revert the decisions to remove all breakpoints and insert single-
2502 step breakpoints, and instead we install a step-resume breakpoint
2503 at the current address, deliver the signal without stepping, and
2504 once we arrive back at the step-resume breakpoint, actually step
2505 over the breakpoint we originally wanted to step over. */
34b7e8a6 2506 if (thread_has_single_step_breakpoints_set (tp)
6cc83d2a
PA
2507 && sig != GDB_SIGNAL_0
2508 && step_over_info_valid_p ())
30852783
UW
2509 {
2510 /* If we have nested signals or a pending signal is delivered
7da6a5b9 2511 immediately after a handler returns, might already have
30852783
UW
2512 a step-resume breakpoint set on the earlier handler. We cannot
2513 set another step-resume breakpoint; just continue on until the
2514 original breakpoint is hit. */
2515 if (tp->control.step_resume_breakpoint == NULL)
2516 {
2c03e5be 2517 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
30852783
UW
2518 tp->step_after_step_resume_breakpoint = 1;
2519 }
2520
34b7e8a6 2521 delete_single_step_breakpoints (tp);
30852783 2522
31e77af2 2523 clear_step_over_info ();
30852783 2524 tp->control.trap_expected = 0;
31e77af2
PA
2525
2526 insert_breakpoints ();
30852783
UW
2527 }
2528
b0f16a3e
SM
2529 /* If STEP is set, it's a request to use hardware stepping
2530 facilities. But in that case, we should never
2531 use singlestep breakpoint. */
34b7e8a6 2532 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
dfcd3bfb 2533
fbea99ea 2534 /* Decide the set of threads to ask the target to resume. */
1946c4cc 2535 if (tp->control.trap_expected)
b0f16a3e
SM
2536 {
2537 /* We're allowing a thread to run past a breakpoint it has
1946c4cc
YQ
2538 hit, either by single-stepping the thread with the breakpoint
2539 removed, or by displaced stepping, with the breakpoint inserted.
2540 In the former case, we need to single-step only this thread,
2541 and keep others stopped, as they can miss this breakpoint if
2542 allowed to run. That's not really a problem for displaced
2543 stepping, but, we still keep other threads stopped, in case
2544 another thread is also stopped for a breakpoint waiting for
2545 its turn in the displaced stepping queue. */
b0f16a3e
SM
2546 resume_ptid = inferior_ptid;
2547 }
fbea99ea
PA
2548 else
2549 resume_ptid = internal_resume_ptid (user_step);
d4db2f36 2550
7f5ef605
PA
2551 if (execution_direction != EXEC_REVERSE
2552 && step && breakpoint_inserted_here_p (aspace, pc))
b0f16a3e 2553 {
372316f1
PA
2554 /* There are two cases where we currently need to step a
2555 breakpoint instruction when we have a signal to deliver:
2556
2557 - See handle_signal_stop where we handle random signals that
2558 could take out us out of the stepping range. Normally, in
2559 that case we end up continuing (instead of stepping) over the
7f5ef605
PA
2560 signal handler with a breakpoint at PC, but there are cases
2561 where we should _always_ single-step, even if we have a
2562 step-resume breakpoint, like when a software watchpoint is
2563 set. Assuming single-stepping and delivering a signal at the
2564 same time would takes us to the signal handler, then we could
2565 have removed the breakpoint at PC to step over it. However,
2566 some hardware step targets (like e.g., Mac OS) can't step
2567 into signal handlers, and for those, we need to leave the
2568 breakpoint at PC inserted, as otherwise if the handler
2569 recurses and executes PC again, it'll miss the breakpoint.
2570 So we leave the breakpoint inserted anyway, but we need to
2571 record that we tried to step a breakpoint instruction, so
372316f1
PA
2572 that adjust_pc_after_break doesn't end up confused.
2573
2574 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2575 in one thread after another thread that was stepping had been
2576 momentarily paused for a step-over. When we re-resume the
2577 stepping thread, it may be resumed from that address with a
2578 breakpoint that hasn't trapped yet. Seen with
2579 gdb.threads/non-stop-fair-events.exp, on targets that don't
2580 do displaced stepping. */
2581
edbcda09
SM
2582 infrun_log_debug ("resume: [%s] stepped breakpoint",
2583 target_pid_to_str (tp->ptid).c_str ());
7f5ef605
PA
2584
2585 tp->stepped_breakpoint = 1;
2586
b0f16a3e
SM
2587 /* Most targets can step a breakpoint instruction, thus
2588 executing it normally. But if this one cannot, just
2589 continue and we will hit it anyway. */
7f5ef605 2590 if (gdbarch_cannot_step_breakpoint (gdbarch))
b0f16a3e
SM
2591 step = 0;
2592 }
ef5cf84e 2593
b0f16a3e 2594 if (debug_displaced
cb71640d 2595 && tp->control.trap_expected
3fc8eb30 2596 && use_displaced_stepping (tp)
cb71640d 2597 && !step_over_info_valid_p ())
b0f16a3e 2598 {
00431a78 2599 struct regcache *resume_regcache = get_thread_regcache (tp);
ac7936df 2600 struct gdbarch *resume_gdbarch = resume_regcache->arch ();
b0f16a3e
SM
2601 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2602 gdb_byte buf[4];
2603
2604 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2605 paddress (resume_gdbarch, actual_pc));
2606 read_memory (actual_pc, buf, sizeof (buf));
2607 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2608 }
237fc4c9 2609
b0f16a3e
SM
2610 if (tp->control.may_range_step)
2611 {
2612 /* If we're resuming a thread with the PC out of the step
2613 range, then we're doing some nested/finer run control
2614 operation, like stepping the thread out of the dynamic
2615 linker or the displaced stepping scratch pad. We
2616 shouldn't have allowed a range step then. */
2617 gdb_assert (pc_in_thread_step_range (pc, tp));
2618 }
c1e36e3e 2619
64ce06e4 2620 do_target_resume (resume_ptid, step, sig);
719546c4 2621 tp->resumed = true;
c906108c 2622}
71d378ae
PA
2623
2624/* Resume the inferior. SIG is the signal to give the inferior
2625 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2626 rolls back state on error. */
2627
aff4e175 2628static void
71d378ae
PA
2629resume (gdb_signal sig)
2630{
a70b8144 2631 try
71d378ae
PA
2632 {
2633 resume_1 (sig);
2634 }
230d2906 2635 catch (const gdb_exception &ex)
71d378ae
PA
2636 {
2637 /* If resuming is being aborted for any reason, delete any
2638 single-step breakpoint resume_1 may have created, to avoid
2639 confusing the following resumption, and to avoid leaving
2640 single-step breakpoints perturbing other threads, in case
2641 we're running in non-stop mode. */
2642 if (inferior_ptid != null_ptid)
2643 delete_single_step_breakpoints (inferior_thread ());
eedc3f4f 2644 throw;
71d378ae 2645 }
71d378ae
PA
2646}
2647
c906108c 2648\f
237fc4c9 2649/* Proceeding. */
c906108c 2650
4c2f2a79
PA
2651/* See infrun.h. */
2652
2653/* Counter that tracks number of user visible stops. This can be used
2654 to tell whether a command has proceeded the inferior past the
2655 current location. This allows e.g., inferior function calls in
2656 breakpoint commands to not interrupt the command list. When the
2657 call finishes successfully, the inferior is standing at the same
2658 breakpoint as if nothing happened (and so we don't call
2659 normal_stop). */
2660static ULONGEST current_stop_id;
2661
2662/* See infrun.h. */
2663
2664ULONGEST
2665get_stop_id (void)
2666{
2667 return current_stop_id;
2668}
2669
2670/* Called when we report a user visible stop. */
2671
2672static void
2673new_stop_id (void)
2674{
2675 current_stop_id++;
2676}
2677
c906108c
SS
2678/* Clear out all variables saying what to do when inferior is continued.
2679 First do this, then set the ones you want, then call `proceed'. */
2680
a7212384
UW
2681static void
2682clear_proceed_status_thread (struct thread_info *tp)
c906108c 2683{
edbcda09 2684 infrun_log_debug ("%s", target_pid_to_str (tp->ptid).c_str ());
d6b48e9c 2685
372316f1
PA
2686 /* If we're starting a new sequence, then the previous finished
2687 single-step is no longer relevant. */
2688 if (tp->suspend.waitstatus_pending_p)
2689 {
2690 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
2691 {
edbcda09
SM
2692 infrun_log_debug ("pending event of %s was a finished step. "
2693 "Discarding.",
2694 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2695
2696 tp->suspend.waitstatus_pending_p = 0;
2697 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
2698 }
edbcda09 2699 else
372316f1 2700 {
edbcda09
SM
2701 infrun_log_debug
2702 ("thread %s has pending wait status %s (currently_stepping=%d).",
2703 target_pid_to_str (tp->ptid).c_str (),
2704 target_waitstatus_to_string (&tp->suspend.waitstatus).c_str (),
2705 currently_stepping (tp));
372316f1
PA
2706 }
2707 }
2708
70509625
PA
2709 /* If this signal should not be seen by program, give it zero.
2710 Used for debugging signals. */
2711 if (!signal_pass_state (tp->suspend.stop_signal))
2712 tp->suspend.stop_signal = GDB_SIGNAL_0;
2713
46e3ed7f 2714 delete tp->thread_fsm;
243a9253
PA
2715 tp->thread_fsm = NULL;
2716
16c381f0
JK
2717 tp->control.trap_expected = 0;
2718 tp->control.step_range_start = 0;
2719 tp->control.step_range_end = 0;
c1e36e3e 2720 tp->control.may_range_step = 0;
16c381f0
JK
2721 tp->control.step_frame_id = null_frame_id;
2722 tp->control.step_stack_frame_id = null_frame_id;
2723 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
885eeb5b 2724 tp->control.step_start_function = NULL;
a7212384 2725 tp->stop_requested = 0;
4e1c45ea 2726
16c381f0 2727 tp->control.stop_step = 0;
32400beb 2728
16c381f0 2729 tp->control.proceed_to_finish = 0;
414c69f7 2730
856e7dd6 2731 tp->control.stepping_command = 0;
17b2616c 2732
a7212384 2733 /* Discard any remaining commands or status from previous stop. */
16c381f0 2734 bpstat_clear (&tp->control.stop_bpstat);
a7212384 2735}
32400beb 2736
a7212384 2737void
70509625 2738clear_proceed_status (int step)
a7212384 2739{
f2665db5
MM
2740 /* With scheduler-locking replay, stop replaying other threads if we're
2741 not replaying the user-visible resume ptid.
2742
2743 This is a convenience feature to not require the user to explicitly
2744 stop replaying the other threads. We're assuming that the user's
2745 intent is to resume tracing the recorded process. */
2746 if (!non_stop && scheduler_mode == schedlock_replay
2747 && target_record_is_replaying (minus_one_ptid)
2748 && !target_record_will_replay (user_visible_resume_ptid (step),
2749 execution_direction))
2750 target_record_stop_replaying ();
2751
08036331 2752 if (!non_stop && inferior_ptid != null_ptid)
6c95b8df 2753 {
08036331 2754 ptid_t resume_ptid = user_visible_resume_ptid (step);
5b6d1e4f
PA
2755 process_stratum_target *resume_target
2756 = user_visible_resume_target (resume_ptid);
70509625
PA
2757
2758 /* In all-stop mode, delete the per-thread status of all threads
2759 we're about to resume, implicitly and explicitly. */
5b6d1e4f 2760 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
08036331 2761 clear_proceed_status_thread (tp);
6c95b8df
PA
2762 }
2763
d7e15655 2764 if (inferior_ptid != null_ptid)
a7212384
UW
2765 {
2766 struct inferior *inferior;
2767
2768 if (non_stop)
2769 {
6c95b8df
PA
2770 /* If in non-stop mode, only delete the per-thread status of
2771 the current thread. */
a7212384
UW
2772 clear_proceed_status_thread (inferior_thread ());
2773 }
6c95b8df 2774
d6b48e9c 2775 inferior = current_inferior ();
16c381f0 2776 inferior->control.stop_soon = NO_STOP_QUIETLY;
4e1c45ea
PA
2777 }
2778
76727919 2779 gdb::observers::about_to_proceed.notify ();
c906108c
SS
2780}
2781
99619bea
PA
2782/* Returns true if TP is still stopped at a breakpoint that needs
2783 stepping-over in order to make progress. If the breakpoint is gone
2784 meanwhile, we can skip the whole step-over dance. */
ea67f13b
DJ
2785
2786static int
6c4cfb24 2787thread_still_needs_step_over_bp (struct thread_info *tp)
99619bea
PA
2788{
2789 if (tp->stepping_over_breakpoint)
2790 {
00431a78 2791 struct regcache *regcache = get_thread_regcache (tp);
99619bea 2792
a01bda52 2793 if (breakpoint_here_p (regcache->aspace (),
af48d08f
PA
2794 regcache_read_pc (regcache))
2795 == ordinary_breakpoint_here)
99619bea
PA
2796 return 1;
2797
2798 tp->stepping_over_breakpoint = 0;
2799 }
2800
2801 return 0;
2802}
2803
6c4cfb24
PA
2804/* Check whether thread TP still needs to start a step-over in order
2805 to make progress when resumed. Returns an bitwise or of enum
2806 step_over_what bits, indicating what needs to be stepped over. */
2807
8d297bbf 2808static step_over_what
6c4cfb24
PA
2809thread_still_needs_step_over (struct thread_info *tp)
2810{
8d297bbf 2811 step_over_what what = 0;
6c4cfb24
PA
2812
2813 if (thread_still_needs_step_over_bp (tp))
2814 what |= STEP_OVER_BREAKPOINT;
2815
2816 if (tp->stepping_over_watchpoint
2817 && !target_have_steppable_watchpoint)
2818 what |= STEP_OVER_WATCHPOINT;
2819
2820 return what;
2821}
2822
483805cf
PA
2823/* Returns true if scheduler locking applies. STEP indicates whether
2824 we're about to do a step/next-like command to a thread. */
2825
2826static int
856e7dd6 2827schedlock_applies (struct thread_info *tp)
483805cf
PA
2828{
2829 return (scheduler_mode == schedlock_on
2830 || (scheduler_mode == schedlock_step
f2665db5
MM
2831 && tp->control.stepping_command)
2832 || (scheduler_mode == schedlock_replay
2833 && target_record_will_replay (minus_one_ptid,
2834 execution_direction)));
483805cf
PA
2835}
2836
5b6d1e4f
PA
2837/* Calls target_commit_resume on all targets. */
2838
2839static void
2840commit_resume_all_targets ()
2841{
2842 scoped_restore_current_thread restore_thread;
2843
2844 /* Map between process_target and a representative inferior. This
2845 is to avoid committing a resume in the same target more than
2846 once. Resumptions must be idempotent, so this is an
2847 optimization. */
2848 std::unordered_map<process_stratum_target *, inferior *> conn_inf;
2849
2850 for (inferior *inf : all_non_exited_inferiors ())
2851 if (inf->has_execution ())
2852 conn_inf[inf->process_target ()] = inf;
2853
2854 for (const auto &ci : conn_inf)
2855 {
2856 inferior *inf = ci.second;
2857 switch_to_inferior_no_thread (inf);
2858 target_commit_resume ();
2859 }
2860}
2861
2f4fcf00
PA
2862/* Check that all the targets we're about to resume are in non-stop
2863 mode. Ideally, we'd only care whether all targets support
2864 target-async, but we're not there yet. E.g., stop_all_threads
2865 doesn't know how to handle all-stop targets. Also, the remote
2866 protocol in all-stop mode is synchronous, irrespective of
2867 target-async, which means that things like a breakpoint re-set
2868 triggered by one target would try to read memory from all targets
2869 and fail. */
2870
2871static void
2872check_multi_target_resumption (process_stratum_target *resume_target)
2873{
2874 if (!non_stop && resume_target == nullptr)
2875 {
2876 scoped_restore_current_thread restore_thread;
2877
2878 /* This is used to track whether we're resuming more than one
2879 target. */
2880 process_stratum_target *first_connection = nullptr;
2881
2882 /* The first inferior we see with a target that does not work in
2883 always-non-stop mode. */
2884 inferior *first_not_non_stop = nullptr;
2885
2886 for (inferior *inf : all_non_exited_inferiors (resume_target))
2887 {
2888 switch_to_inferior_no_thread (inf);
2889
2890 if (!target_has_execution)
2891 continue;
2892
2893 process_stratum_target *proc_target
2894 = current_inferior ()->process_target();
2895
2896 if (!target_is_non_stop_p ())
2897 first_not_non_stop = inf;
2898
2899 if (first_connection == nullptr)
2900 first_connection = proc_target;
2901 else if (first_connection != proc_target
2902 && first_not_non_stop != nullptr)
2903 {
2904 switch_to_inferior_no_thread (first_not_non_stop);
2905
2906 proc_target = current_inferior ()->process_target();
2907
2908 error (_("Connection %d (%s) does not support "
2909 "multi-target resumption."),
2910 proc_target->connection_number,
2911 make_target_connection_string (proc_target).c_str ());
2912 }
2913 }
2914 }
2915}
2916
c906108c
SS
2917/* Basic routine for continuing the program in various fashions.
2918
2919 ADDR is the address to resume at, or -1 for resume where stopped.
aff4e175
AB
2920 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
2921 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
c906108c
SS
2922
2923 You should call clear_proceed_status before calling proceed. */
2924
2925void
64ce06e4 2926proceed (CORE_ADDR addr, enum gdb_signal siggnal)
c906108c 2927{
e58b0e63
PA
2928 struct regcache *regcache;
2929 struct gdbarch *gdbarch;
e58b0e63 2930 CORE_ADDR pc;
4d9d9d04
PA
2931 struct execution_control_state ecss;
2932 struct execution_control_state *ecs = &ecss;
4d9d9d04 2933 int started;
c906108c 2934
e58b0e63
PA
2935 /* If we're stopped at a fork/vfork, follow the branch set by the
2936 "set follow-fork-mode" command; otherwise, we'll just proceed
2937 resuming the current thread. */
2938 if (!follow_fork ())
2939 {
2940 /* The target for some reason decided not to resume. */
2941 normal_stop ();
f148b27e 2942 if (target_can_async_p ())
b1a35af2 2943 inferior_event_handler (INF_EXEC_COMPLETE);
e58b0e63
PA
2944 return;
2945 }
2946
842951eb
PA
2947 /* We'll update this if & when we switch to a new thread. */
2948 previous_inferior_ptid = inferior_ptid;
2949
e58b0e63 2950 regcache = get_current_regcache ();
ac7936df 2951 gdbarch = regcache->arch ();
8b86c959
YQ
2952 const address_space *aspace = regcache->aspace ();
2953
fc75c28b
TBA
2954 pc = regcache_read_pc_protected (regcache);
2955
08036331 2956 thread_info *cur_thr = inferior_thread ();
e58b0e63 2957
99619bea 2958 /* Fill in with reasonable starting values. */
08036331 2959 init_thread_stepping_state (cur_thr);
99619bea 2960
08036331 2961 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
c2829269 2962
5b6d1e4f
PA
2963 ptid_t resume_ptid
2964 = user_visible_resume_ptid (cur_thr->control.stepping_command);
2965 process_stratum_target *resume_target
2966 = user_visible_resume_target (resume_ptid);
2967
2f4fcf00
PA
2968 check_multi_target_resumption (resume_target);
2969
2acceee2 2970 if (addr == (CORE_ADDR) -1)
c906108c 2971 {
08036331 2972 if (pc == cur_thr->suspend.stop_pc
af48d08f 2973 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
b2175913 2974 && execution_direction != EXEC_REVERSE)
3352ef37
AC
2975 /* There is a breakpoint at the address we will resume at,
2976 step one instruction before inserting breakpoints so that
2977 we do not stop right away (and report a second hit at this
b2175913
MS
2978 breakpoint).
2979
2980 Note, we don't do this in reverse, because we won't
2981 actually be executing the breakpoint insn anyway.
2982 We'll be (un-)executing the previous instruction. */
08036331 2983 cur_thr->stepping_over_breakpoint = 1;
515630c5
UW
2984 else if (gdbarch_single_step_through_delay_p (gdbarch)
2985 && gdbarch_single_step_through_delay (gdbarch,
2986 get_current_frame ()))
3352ef37
AC
2987 /* We stepped onto an instruction that needs to be stepped
2988 again before re-inserting the breakpoint, do so. */
08036331 2989 cur_thr->stepping_over_breakpoint = 1;
c906108c
SS
2990 }
2991 else
2992 {
515630c5 2993 regcache_write_pc (regcache, addr);
c906108c
SS
2994 }
2995
70509625 2996 if (siggnal != GDB_SIGNAL_DEFAULT)
08036331 2997 cur_thr->suspend.stop_signal = siggnal;
70509625 2998
4d9d9d04
PA
2999 /* If an exception is thrown from this point on, make sure to
3000 propagate GDB's knowledge of the executing state to the
3001 frontend/user running state. */
5b6d1e4f 3002 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
4d9d9d04
PA
3003
3004 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3005 threads (e.g., we might need to set threads stepping over
3006 breakpoints first), from the user/frontend's point of view, all
3007 threads in RESUME_PTID are now running. Unless we're calling an
3008 inferior function, as in that case we pretend the inferior
3009 doesn't run at all. */
08036331 3010 if (!cur_thr->control.in_infcall)
719546c4 3011 set_running (resume_target, resume_ptid, true);
17b2616c 3012
edbcda09
SM
3013 infrun_log_debug ("addr=%s, signal=%s", paddress (gdbarch, addr),
3014 gdb_signal_to_symbol_string (siggnal));
527159b7 3015
4d9d9d04
PA
3016 annotate_starting ();
3017
3018 /* Make sure that output from GDB appears before output from the
3019 inferior. */
3020 gdb_flush (gdb_stdout);
3021
d930703d
PA
3022 /* Since we've marked the inferior running, give it the terminal. A
3023 QUIT/Ctrl-C from here on is forwarded to the target (which can
3024 still detect attempts to unblock a stuck connection with repeated
3025 Ctrl-C from within target_pass_ctrlc). */
3026 target_terminal::inferior ();
3027
4d9d9d04
PA
3028 /* In a multi-threaded task we may select another thread and
3029 then continue or step.
3030
3031 But if a thread that we're resuming had stopped at a breakpoint,
3032 it will immediately cause another breakpoint stop without any
3033 execution (i.e. it will report a breakpoint hit incorrectly). So
3034 we must step over it first.
3035
3036 Look for threads other than the current (TP) that reported a
3037 breakpoint hit and haven't been resumed yet since. */
3038
3039 /* If scheduler locking applies, we can avoid iterating over all
3040 threads. */
08036331 3041 if (!non_stop && !schedlock_applies (cur_thr))
94cc34af 3042 {
5b6d1e4f
PA
3043 for (thread_info *tp : all_non_exited_threads (resume_target,
3044 resume_ptid))
08036331 3045 {
f3f8ece4
PA
3046 switch_to_thread_no_regs (tp);
3047
4d9d9d04
PA
3048 /* Ignore the current thread here. It's handled
3049 afterwards. */
08036331 3050 if (tp == cur_thr)
4d9d9d04 3051 continue;
c906108c 3052
4d9d9d04
PA
3053 if (!thread_still_needs_step_over (tp))
3054 continue;
3055
3056 gdb_assert (!thread_is_in_step_over_chain (tp));
c906108c 3057
edbcda09
SM
3058 infrun_log_debug ("need to step-over [%s] first",
3059 target_pid_to_str (tp->ptid).c_str ());
99619bea 3060
4d9d9d04 3061 thread_step_over_chain_enqueue (tp);
2adfaa28 3062 }
f3f8ece4
PA
3063
3064 switch_to_thread (cur_thr);
30852783
UW
3065 }
3066
4d9d9d04
PA
3067 /* Enqueue the current thread last, so that we move all other
3068 threads over their breakpoints first. */
08036331
PA
3069 if (cur_thr->stepping_over_breakpoint)
3070 thread_step_over_chain_enqueue (cur_thr);
30852783 3071
4d9d9d04
PA
3072 /* If the thread isn't started, we'll still need to set its prev_pc,
3073 so that switch_back_to_stepped_thread knows the thread hasn't
3074 advanced. Must do this before resuming any thread, as in
3075 all-stop/remote, once we resume we can't send any other packet
3076 until the target stops again. */
fc75c28b 3077 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
99619bea 3078
a9bc57b9
TT
3079 {
3080 scoped_restore save_defer_tc = make_scoped_defer_target_commit_resume ();
85ad3aaf 3081
a9bc57b9 3082 started = start_step_over ();
c906108c 3083
a9bc57b9
TT
3084 if (step_over_info_valid_p ())
3085 {
3086 /* Either this thread started a new in-line step over, or some
3087 other thread was already doing one. In either case, don't
3088 resume anything else until the step-over is finished. */
3089 }
3090 else if (started && !target_is_non_stop_p ())
3091 {
3092 /* A new displaced stepping sequence was started. In all-stop,
3093 we can't talk to the target anymore until it next stops. */
3094 }
3095 else if (!non_stop && target_is_non_stop_p ())
3096 {
3097 /* In all-stop, but the target is always in non-stop mode.
3098 Start all other threads that are implicitly resumed too. */
5b6d1e4f
PA
3099 for (thread_info *tp : all_non_exited_threads (resume_target,
3100 resume_ptid))
3101 {
3102 switch_to_thread_no_regs (tp);
3103
f9fac3c8
SM
3104 if (!tp->inf->has_execution ())
3105 {
edbcda09
SM
3106 infrun_log_debug ("[%s] target has no execution",
3107 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3108 continue;
3109 }
f3f8ece4 3110
f9fac3c8
SM
3111 if (tp->resumed)
3112 {
edbcda09
SM
3113 infrun_log_debug ("[%s] resumed",
3114 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3115 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
3116 continue;
3117 }
fbea99ea 3118
f9fac3c8
SM
3119 if (thread_is_in_step_over_chain (tp))
3120 {
edbcda09
SM
3121 infrun_log_debug ("[%s] needs step-over",
3122 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3123 continue;
3124 }
fbea99ea 3125
edbcda09
SM
3126 infrun_log_debug ("resuming %s",
3127 target_pid_to_str (tp->ptid).c_str ());
fbea99ea 3128
f9fac3c8
SM
3129 reset_ecs (ecs, tp);
3130 switch_to_thread (tp);
3131 keep_going_pass_signal (ecs);
3132 if (!ecs->wait_some_more)
3133 error (_("Command aborted."));
3134 }
a9bc57b9 3135 }
08036331 3136 else if (!cur_thr->resumed && !thread_is_in_step_over_chain (cur_thr))
a9bc57b9
TT
3137 {
3138 /* The thread wasn't started, and isn't queued, run it now. */
08036331
PA
3139 reset_ecs (ecs, cur_thr);
3140 switch_to_thread (cur_thr);
a9bc57b9
TT
3141 keep_going_pass_signal (ecs);
3142 if (!ecs->wait_some_more)
3143 error (_("Command aborted."));
3144 }
3145 }
c906108c 3146
5b6d1e4f 3147 commit_resume_all_targets ();
85ad3aaf 3148
731f534f 3149 finish_state.release ();
c906108c 3150
873657b9
PA
3151 /* If we've switched threads above, switch back to the previously
3152 current thread. We don't want the user to see a different
3153 selected thread. */
3154 switch_to_thread (cur_thr);
3155
0b333c5e
PA
3156 /* Tell the event loop to wait for it to stop. If the target
3157 supports asynchronous execution, it'll do this from within
3158 target_resume. */
362646f5 3159 if (!target_can_async_p ())
0b333c5e 3160 mark_async_event_handler (infrun_async_inferior_event_token);
c906108c 3161}
c906108c
SS
3162\f
3163
3164/* Start remote-debugging of a machine over a serial link. */
96baa820 3165
c906108c 3166void
8621d6a9 3167start_remote (int from_tty)
c906108c 3168{
5b6d1e4f
PA
3169 inferior *inf = current_inferior ();
3170 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
43ff13b4 3171
1777feb0 3172 /* Always go on waiting for the target, regardless of the mode. */
6426a772 3173 /* FIXME: cagney/1999-09-23: At present it isn't possible to
7e73cedf 3174 indicate to wait_for_inferior that a target should timeout if
6426a772
JM
3175 nothing is returned (instead of just blocking). Because of this,
3176 targets expecting an immediate response need to, internally, set
3177 things up so that the target_wait() is forced to eventually
1777feb0 3178 timeout. */
6426a772
JM
3179 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3180 differentiate to its caller what the state of the target is after
3181 the initial open has been performed. Here we're assuming that
3182 the target has stopped. It should be possible to eventually have
3183 target_open() return to the caller an indication that the target
3184 is currently running and GDB state should be set to the same as
1777feb0 3185 for an async run. */
5b6d1e4f 3186 wait_for_inferior (inf);
8621d6a9
DJ
3187
3188 /* Now that the inferior has stopped, do any bookkeeping like
3189 loading shared libraries. We want to do this before normal_stop,
3190 so that the displayed frame is up to date. */
8b88a78e 3191 post_create_inferior (current_top_target (), from_tty);
8621d6a9 3192
6426a772 3193 normal_stop ();
c906108c
SS
3194}
3195
3196/* Initialize static vars when a new inferior begins. */
3197
3198void
96baa820 3199init_wait_for_inferior (void)
c906108c
SS
3200{
3201 /* These are meaningless until the first time through wait_for_inferior. */
c906108c 3202
c906108c
SS
3203 breakpoint_init_inferior (inf_starting);
3204
70509625 3205 clear_proceed_status (0);
9f976b41 3206
ab1ddbcf 3207 nullify_last_target_wait_ptid ();
237fc4c9 3208
842951eb 3209 previous_inferior_ptid = inferior_ptid;
c906108c 3210}
237fc4c9 3211
c906108c 3212\f
488f131b 3213
ec9499be 3214static void handle_inferior_event (struct execution_control_state *ecs);
cd0fc7c3 3215
568d6575
UW
3216static void handle_step_into_function (struct gdbarch *gdbarch,
3217 struct execution_control_state *ecs);
3218static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3219 struct execution_control_state *ecs);
4f5d7f63 3220static void handle_signal_stop (struct execution_control_state *ecs);
186c406b 3221static void check_exception_resume (struct execution_control_state *,
28106bc2 3222 struct frame_info *);
611c83ae 3223
bdc36728 3224static void end_stepping_range (struct execution_control_state *ecs);
22bcd14b 3225static void stop_waiting (struct execution_control_state *ecs);
d4f3574e 3226static void keep_going (struct execution_control_state *ecs);
94c57d6a 3227static void process_event_stop_test (struct execution_control_state *ecs);
c447ac0b 3228static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
104c1213 3229
252fbfc8
PA
3230/* This function is attached as a "thread_stop_requested" observer.
3231 Cleanup local state that assumed the PTID was to be resumed, and
3232 report the stop to the frontend. */
3233
2c0b251b 3234static void
252fbfc8
PA
3235infrun_thread_stop_requested (ptid_t ptid)
3236{
5b6d1e4f
PA
3237 process_stratum_target *curr_target = current_inferior ()->process_target ();
3238
c65d6b55
PA
3239 /* PTID was requested to stop. If the thread was already stopped,
3240 but the user/frontend doesn't know about that yet (e.g., the
3241 thread had been temporarily paused for some step-over), set up
3242 for reporting the stop now. */
5b6d1e4f 3243 for (thread_info *tp : all_threads (curr_target, ptid))
08036331
PA
3244 {
3245 if (tp->state != THREAD_RUNNING)
3246 continue;
3247 if (tp->executing)
3248 continue;
c65d6b55 3249
08036331
PA
3250 /* Remove matching threads from the step-over queue, so
3251 start_step_over doesn't try to resume them
3252 automatically. */
3253 if (thread_is_in_step_over_chain (tp))
3254 thread_step_over_chain_remove (tp);
c65d6b55 3255
08036331
PA
3256 /* If the thread is stopped, but the user/frontend doesn't
3257 know about that yet, queue a pending event, as if the
3258 thread had just stopped now. Unless the thread already had
3259 a pending event. */
3260 if (!tp->suspend.waitstatus_pending_p)
3261 {
3262 tp->suspend.waitstatus_pending_p = 1;
3263 tp->suspend.waitstatus.kind = TARGET_WAITKIND_STOPPED;
3264 tp->suspend.waitstatus.value.sig = GDB_SIGNAL_0;
3265 }
c65d6b55 3266
08036331
PA
3267 /* Clear the inline-frame state, since we're re-processing the
3268 stop. */
5b6d1e4f 3269 clear_inline_frame_state (tp);
c65d6b55 3270
08036331
PA
3271 /* If this thread was paused because some other thread was
3272 doing an inline-step over, let that finish first. Once
3273 that happens, we'll restart all threads and consume pending
3274 stop events then. */
3275 if (step_over_info_valid_p ())
3276 continue;
3277
3278 /* Otherwise we can process the (new) pending event now. Set
3279 it so this pending event is considered by
3280 do_target_wait. */
719546c4 3281 tp->resumed = true;
08036331 3282 }
252fbfc8
PA
3283}
3284
a07daef3
PA
3285static void
3286infrun_thread_thread_exit (struct thread_info *tp, int silent)
3287{
5b6d1e4f
PA
3288 if (target_last_proc_target == tp->inf->process_target ()
3289 && target_last_wait_ptid == tp->ptid)
a07daef3
PA
3290 nullify_last_target_wait_ptid ();
3291}
3292
0cbcdb96
PA
3293/* Delete the step resume, single-step and longjmp/exception resume
3294 breakpoints of TP. */
4e1c45ea 3295
0cbcdb96
PA
3296static void
3297delete_thread_infrun_breakpoints (struct thread_info *tp)
4e1c45ea 3298{
0cbcdb96
PA
3299 delete_step_resume_breakpoint (tp);
3300 delete_exception_resume_breakpoint (tp);
34b7e8a6 3301 delete_single_step_breakpoints (tp);
4e1c45ea
PA
3302}
3303
0cbcdb96
PA
3304/* If the target still has execution, call FUNC for each thread that
3305 just stopped. In all-stop, that's all the non-exited threads; in
3306 non-stop, that's the current thread, only. */
3307
3308typedef void (*for_each_just_stopped_thread_callback_func)
3309 (struct thread_info *tp);
4e1c45ea
PA
3310
3311static void
0cbcdb96 3312for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
4e1c45ea 3313{
d7e15655 3314 if (!target_has_execution || inferior_ptid == null_ptid)
4e1c45ea
PA
3315 return;
3316
fbea99ea 3317 if (target_is_non_stop_p ())
4e1c45ea 3318 {
0cbcdb96
PA
3319 /* If in non-stop mode, only the current thread stopped. */
3320 func (inferior_thread ());
4e1c45ea
PA
3321 }
3322 else
0cbcdb96 3323 {
0cbcdb96 3324 /* In all-stop mode, all threads have stopped. */
08036331
PA
3325 for (thread_info *tp : all_non_exited_threads ())
3326 func (tp);
0cbcdb96
PA
3327 }
3328}
3329
3330/* Delete the step resume and longjmp/exception resume breakpoints of
3331 the threads that just stopped. */
3332
3333static void
3334delete_just_stopped_threads_infrun_breakpoints (void)
3335{
3336 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
34b7e8a6
PA
3337}
3338
3339/* Delete the single-step breakpoints of the threads that just
3340 stopped. */
7c16b83e 3341
34b7e8a6
PA
3342static void
3343delete_just_stopped_threads_single_step_breakpoints (void)
3344{
3345 for_each_just_stopped_thread (delete_single_step_breakpoints);
4e1c45ea
PA
3346}
3347
221e1a37 3348/* See infrun.h. */
223698f8 3349
221e1a37 3350void
223698f8
DE
3351print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3352 const struct target_waitstatus *ws)
3353{
23fdd69e 3354 std::string status_string = target_waitstatus_to_string (ws);
d7e74731 3355 string_file stb;
223698f8
DE
3356
3357 /* The text is split over several lines because it was getting too long.
3358 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3359 output as a unit; we want only one timestamp printed if debug_timestamp
3360 is set. */
3361
d7e74731 3362 stb.printf ("infrun: target_wait (%d.%ld.%ld",
e99b03dc 3363 waiton_ptid.pid (),
e38504b3 3364 waiton_ptid.lwp (),
cc6bcb54 3365 waiton_ptid.tid ());
e99b03dc 3366 if (waiton_ptid.pid () != -1)
a068643d 3367 stb.printf (" [%s]", target_pid_to_str (waiton_ptid).c_str ());
d7e74731
PA
3368 stb.printf (", status) =\n");
3369 stb.printf ("infrun: %d.%ld.%ld [%s],\n",
e99b03dc 3370 result_ptid.pid (),
e38504b3 3371 result_ptid.lwp (),
cc6bcb54 3372 result_ptid.tid (),
a068643d 3373 target_pid_to_str (result_ptid).c_str ());
23fdd69e 3374 stb.printf ("infrun: %s\n", status_string.c_str ());
223698f8
DE
3375
3376 /* This uses %s in part to handle %'s in the text, but also to avoid
3377 a gcc error: the format attribute requires a string literal. */
d7e74731 3378 fprintf_unfiltered (gdb_stdlog, "%s", stb.c_str ());
223698f8
DE
3379}
3380
372316f1
PA
3381/* Select a thread at random, out of those which are resumed and have
3382 had events. */
3383
3384static struct thread_info *
5b6d1e4f 3385random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
372316f1 3386{
372316f1 3387 int num_events = 0;
08036331 3388
5b6d1e4f 3389 auto has_event = [&] (thread_info *tp)
08036331 3390 {
5b6d1e4f
PA
3391 return (tp->ptid.matches (waiton_ptid)
3392 && tp->resumed
08036331
PA
3393 && tp->suspend.waitstatus_pending_p);
3394 };
372316f1
PA
3395
3396 /* First see how many events we have. Count only resumed threads
3397 that have an event pending. */
5b6d1e4f 3398 for (thread_info *tp : inf->non_exited_threads ())
08036331 3399 if (has_event (tp))
372316f1
PA
3400 num_events++;
3401
3402 if (num_events == 0)
3403 return NULL;
3404
3405 /* Now randomly pick a thread out of those that have had events. */
08036331
PA
3406 int random_selector = (int) ((num_events * (double) rand ())
3407 / (RAND_MAX + 1.0));
372316f1 3408
edbcda09
SM
3409 if (num_events > 1)
3410 infrun_log_debug ("Found %d events, selecting #%d",
3411 num_events, random_selector);
372316f1
PA
3412
3413 /* Select the Nth thread that has had an event. */
5b6d1e4f 3414 for (thread_info *tp : inf->non_exited_threads ())
08036331 3415 if (has_event (tp))
372316f1 3416 if (random_selector-- == 0)
08036331 3417 return tp;
372316f1 3418
08036331 3419 gdb_assert_not_reached ("event thread not found");
372316f1
PA
3420}
3421
3422/* Wrapper for target_wait that first checks whether threads have
3423 pending statuses to report before actually asking the target for
5b6d1e4f
PA
3424 more events. INF is the inferior we're using to call target_wait
3425 on. */
372316f1
PA
3426
3427static ptid_t
5b6d1e4f
PA
3428do_target_wait_1 (inferior *inf, ptid_t ptid,
3429 target_waitstatus *status, int options)
372316f1
PA
3430{
3431 ptid_t event_ptid;
3432 struct thread_info *tp;
3433
24ed6739
AB
3434 /* We know that we are looking for an event in the target of inferior
3435 INF, but we don't know which thread the event might come from. As
3436 such we want to make sure that INFERIOR_PTID is reset so that none of
3437 the wait code relies on it - doing so is always a mistake. */
3438 switch_to_inferior_no_thread (inf);
3439
372316f1
PA
3440 /* First check if there is a resumed thread with a wait status
3441 pending. */
d7e15655 3442 if (ptid == minus_one_ptid || ptid.is_pid ())
372316f1 3443 {
5b6d1e4f 3444 tp = random_pending_event_thread (inf, ptid);
372316f1
PA
3445 }
3446 else
3447 {
edbcda09
SM
3448 infrun_log_debug ("Waiting for specific thread %s.",
3449 target_pid_to_str (ptid).c_str ());
372316f1
PA
3450
3451 /* We have a specific thread to check. */
5b6d1e4f 3452 tp = find_thread_ptid (inf, ptid);
372316f1
PA
3453 gdb_assert (tp != NULL);
3454 if (!tp->suspend.waitstatus_pending_p)
3455 tp = NULL;
3456 }
3457
3458 if (tp != NULL
3459 && (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3460 || tp->suspend.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
3461 {
00431a78 3462 struct regcache *regcache = get_thread_regcache (tp);
ac7936df 3463 struct gdbarch *gdbarch = regcache->arch ();
372316f1
PA
3464 CORE_ADDR pc;
3465 int discard = 0;
3466
3467 pc = regcache_read_pc (regcache);
3468
3469 if (pc != tp->suspend.stop_pc)
3470 {
edbcda09
SM
3471 infrun_log_debug ("PC of %s changed. was=%s, now=%s",
3472 target_pid_to_str (tp->ptid).c_str (),
3473 paddress (gdbarch, tp->suspend.stop_pc),
3474 paddress (gdbarch, pc));
372316f1
PA
3475 discard = 1;
3476 }
a01bda52 3477 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
372316f1 3478 {
edbcda09
SM
3479 infrun_log_debug ("previous breakpoint of %s, at %s gone",
3480 target_pid_to_str (tp->ptid).c_str (),
3481 paddress (gdbarch, pc));
372316f1
PA
3482
3483 discard = 1;
3484 }
3485
3486 if (discard)
3487 {
edbcda09
SM
3488 infrun_log_debug ("pending event of %s cancelled.",
3489 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3490
3491 tp->suspend.waitstatus.kind = TARGET_WAITKIND_SPURIOUS;
3492 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3493 }
3494 }
3495
3496 if (tp != NULL)
3497 {
edbcda09
SM
3498 infrun_log_debug ("Using pending wait status %s for %s.",
3499 target_waitstatus_to_string
3500 (&tp->suspend.waitstatus).c_str (),
3501 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3502
3503 /* Now that we've selected our final event LWP, un-adjust its PC
3504 if it was a software breakpoint (and the target doesn't
3505 always adjust the PC itself). */
3506 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3507 && !target_supports_stopped_by_sw_breakpoint ())
3508 {
3509 struct regcache *regcache;
3510 struct gdbarch *gdbarch;
3511 int decr_pc;
3512
00431a78 3513 regcache = get_thread_regcache (tp);
ac7936df 3514 gdbarch = regcache->arch ();
372316f1
PA
3515
3516 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3517 if (decr_pc != 0)
3518 {
3519 CORE_ADDR pc;
3520
3521 pc = regcache_read_pc (regcache);
3522 regcache_write_pc (regcache, pc + decr_pc);
3523 }
3524 }
3525
3526 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3527 *status = tp->suspend.waitstatus;
3528 tp->suspend.waitstatus_pending_p = 0;
3529
3530 /* Wake up the event loop again, until all pending events are
3531 processed. */
3532 if (target_is_async_p ())
3533 mark_async_event_handler (infrun_async_inferior_event_token);
3534 return tp->ptid;
3535 }
3536
3537 /* But if we don't find one, we'll have to wait. */
3538
3539 if (deprecated_target_wait_hook)
3540 event_ptid = deprecated_target_wait_hook (ptid, status, options);
3541 else
3542 event_ptid = target_wait (ptid, status, options);
3543
3544 return event_ptid;
3545}
3546
5b6d1e4f
PA
3547/* Wrapper for target_wait that first checks whether threads have
3548 pending statuses to report before actually asking the target for
cad90433 3549 more events. Polls for events from all inferiors/targets. */
5b6d1e4f
PA
3550
3551static bool
3552do_target_wait (ptid_t wait_ptid, execution_control_state *ecs, int options)
3553{
3554 int num_inferiors = 0;
3555 int random_selector;
3556
cad90433
SM
3557 /* For fairness, we pick the first inferior/target to poll at random
3558 out of all inferiors that may report events, and then continue
3559 polling the rest of the inferior list starting from that one in a
3560 circular fashion until the whole list is polled once. */
5b6d1e4f
PA
3561
3562 auto inferior_matches = [&wait_ptid] (inferior *inf)
3563 {
3564 return (inf->process_target () != NULL
5b6d1e4f
PA
3565 && ptid_t (inf->pid).matches (wait_ptid));
3566 };
3567
cad90433 3568 /* First see how many matching inferiors we have. */
5b6d1e4f
PA
3569 for (inferior *inf : all_inferiors ())
3570 if (inferior_matches (inf))
3571 num_inferiors++;
3572
3573 if (num_inferiors == 0)
3574 {
3575 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3576 return false;
3577 }
3578
cad90433 3579 /* Now randomly pick an inferior out of those that matched. */
5b6d1e4f
PA
3580 random_selector = (int)
3581 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
3582
edbcda09
SM
3583 if (num_inferiors > 1)
3584 infrun_log_debug ("Found %d inferiors, starting at #%d",
3585 num_inferiors, random_selector);
5b6d1e4f 3586
cad90433 3587 /* Select the Nth inferior that matched. */
5b6d1e4f
PA
3588
3589 inferior *selected = nullptr;
3590
3591 for (inferior *inf : all_inferiors ())
3592 if (inferior_matches (inf))
3593 if (random_selector-- == 0)
3594 {
3595 selected = inf;
3596 break;
3597 }
3598
cad90433 3599 /* Now poll for events out of each of the matching inferior's
5b6d1e4f
PA
3600 targets, starting from the selected one. */
3601
3602 auto do_wait = [&] (inferior *inf)
3603 {
5b6d1e4f
PA
3604 ecs->ptid = do_target_wait_1 (inf, wait_ptid, &ecs->ws, options);
3605 ecs->target = inf->process_target ();
3606 return (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
3607 };
3608
cad90433
SM
3609 /* Needed in 'all-stop + target-non-stop' mode, because we end up
3610 here spuriously after the target is all stopped and we've already
5b6d1e4f
PA
3611 reported the stop to the user, polling for events. */
3612 scoped_restore_current_thread restore_thread;
3613
3614 int inf_num = selected->num;
3615 for (inferior *inf = selected; inf != NULL; inf = inf->next)
3616 if (inferior_matches (inf))
3617 if (do_wait (inf))
3618 return true;
3619
3620 for (inferior *inf = inferior_list;
3621 inf != NULL && inf->num < inf_num;
3622 inf = inf->next)
3623 if (inferior_matches (inf))
3624 if (do_wait (inf))
3625 return true;
3626
3627 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3628 return false;
3629}
3630
24291992
PA
3631/* Prepare and stabilize the inferior for detaching it. E.g.,
3632 detaching while a thread is displaced stepping is a recipe for
3633 crashing it, as nothing would readjust the PC out of the scratch
3634 pad. */
3635
3636void
3637prepare_for_detach (void)
3638{
3639 struct inferior *inf = current_inferior ();
f2907e49 3640 ptid_t pid_ptid = ptid_t (inf->pid);
24291992 3641
00431a78 3642 displaced_step_inferior_state *displaced = get_displaced_stepping_state (inf);
24291992
PA
3643
3644 /* Is any thread of this process displaced stepping? If not,
3645 there's nothing else to do. */
d20172fc 3646 if (displaced->step_thread == nullptr)
24291992
PA
3647 return;
3648
edbcda09 3649 infrun_log_debug ("displaced-stepping in-process while detaching");
24291992 3650
9bcb1f16 3651 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
24291992 3652
00431a78 3653 while (displaced->step_thread != nullptr)
24291992 3654 {
24291992
PA
3655 struct execution_control_state ecss;
3656 struct execution_control_state *ecs;
3657
3658 ecs = &ecss;
3659 memset (ecs, 0, sizeof (*ecs));
3660
3661 overlay_cache_invalid = 1;
f15cb84a
YQ
3662 /* Flush target cache before starting to handle each event.
3663 Target was running and cache could be stale. This is just a
3664 heuristic. Running threads may modify target memory, but we
3665 don't get any event. */
3666 target_dcache_invalidate ();
24291992 3667
5b6d1e4f 3668 do_target_wait (pid_ptid, ecs, 0);
24291992
PA
3669
3670 if (debug_infrun)
3671 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3672
3673 /* If an error happens while handling the event, propagate GDB's
3674 knowledge of the executing state to the frontend/user running
3675 state. */
5b6d1e4f
PA
3676 scoped_finish_thread_state finish_state (inf->process_target (),
3677 minus_one_ptid);
24291992
PA
3678
3679 /* Now figure out what to do with the result of the result. */
3680 handle_inferior_event (ecs);
3681
3682 /* No error, don't finish the state yet. */
731f534f 3683 finish_state.release ();
24291992
PA
3684
3685 /* Breakpoints and watchpoints are not installed on the target
3686 at this point, and signals are passed directly to the
3687 inferior, so this must mean the process is gone. */
3688 if (!ecs->wait_some_more)
3689 {
9bcb1f16 3690 restore_detaching.release ();
24291992
PA
3691 error (_("Program exited while detaching"));
3692 }
3693 }
3694
9bcb1f16 3695 restore_detaching.release ();
24291992
PA
3696}
3697
cd0fc7c3 3698/* Wait for control to return from inferior to debugger.
ae123ec6 3699
cd0fc7c3
SS
3700 If inferior gets a signal, we may decide to start it up again
3701 instead of returning. That is why there is a loop in this function.
3702 When this function actually returns it means the inferior
3703 should be left stopped and GDB should read more commands. */
3704
5b6d1e4f
PA
3705static void
3706wait_for_inferior (inferior *inf)
cd0fc7c3 3707{
edbcda09 3708 infrun_log_debug ("wait_for_inferior ()");
527159b7 3709
4c41382a 3710 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
cd0fc7c3 3711
e6f5c25b
PA
3712 /* If an error happens while handling the event, propagate GDB's
3713 knowledge of the executing state to the frontend/user running
3714 state. */
5b6d1e4f
PA
3715 scoped_finish_thread_state finish_state
3716 (inf->process_target (), minus_one_ptid);
e6f5c25b 3717
c906108c
SS
3718 while (1)
3719 {
ae25568b
PA
3720 struct execution_control_state ecss;
3721 struct execution_control_state *ecs = &ecss;
29f49a6a 3722
ae25568b
PA
3723 memset (ecs, 0, sizeof (*ecs));
3724
ec9499be 3725 overlay_cache_invalid = 1;
ec9499be 3726
f15cb84a
YQ
3727 /* Flush target cache before starting to handle each event.
3728 Target was running and cache could be stale. This is just a
3729 heuristic. Running threads may modify target memory, but we
3730 don't get any event. */
3731 target_dcache_invalidate ();
3732
5b6d1e4f
PA
3733 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, 0);
3734 ecs->target = inf->process_target ();
c906108c 3735
f00150c9 3736 if (debug_infrun)
5b6d1e4f 3737 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
f00150c9 3738
cd0fc7c3
SS
3739 /* Now figure out what to do with the result of the result. */
3740 handle_inferior_event (ecs);
c906108c 3741
cd0fc7c3
SS
3742 if (!ecs->wait_some_more)
3743 break;
3744 }
4e1c45ea 3745
e6f5c25b 3746 /* No error, don't finish the state yet. */
731f534f 3747 finish_state.release ();
cd0fc7c3 3748}
c906108c 3749
d3d4baed
PA
3750/* Cleanup that reinstalls the readline callback handler, if the
3751 target is running in the background. If while handling the target
3752 event something triggered a secondary prompt, like e.g., a
3753 pagination prompt, we'll have removed the callback handler (see
3754 gdb_readline_wrapper_line). Need to do this as we go back to the
3755 event loop, ready to process further input. Note this has no
3756 effect if the handler hasn't actually been removed, because calling
3757 rl_callback_handler_install resets the line buffer, thus losing
3758 input. */
3759
3760static void
d238133d 3761reinstall_readline_callback_handler_cleanup ()
d3d4baed 3762{
3b12939d
PA
3763 struct ui *ui = current_ui;
3764
3765 if (!ui->async)
6c400b59
PA
3766 {
3767 /* We're not going back to the top level event loop yet. Don't
3768 install the readline callback, as it'd prep the terminal,
3769 readline-style (raw, noecho) (e.g., --batch). We'll install
3770 it the next time the prompt is displayed, when we're ready
3771 for input. */
3772 return;
3773 }
3774
3b12939d 3775 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
d3d4baed
PA
3776 gdb_rl_callback_handler_reinstall ();
3777}
3778
243a9253
PA
3779/* Clean up the FSMs of threads that are now stopped. In non-stop,
3780 that's just the event thread. In all-stop, that's all threads. */
3781
3782static void
3783clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
3784{
08036331
PA
3785 if (ecs->event_thread != NULL
3786 && ecs->event_thread->thread_fsm != NULL)
46e3ed7f 3787 ecs->event_thread->thread_fsm->clean_up (ecs->event_thread);
243a9253
PA
3788
3789 if (!non_stop)
3790 {
08036331 3791 for (thread_info *thr : all_non_exited_threads ())
243a9253
PA
3792 {
3793 if (thr->thread_fsm == NULL)
3794 continue;
3795 if (thr == ecs->event_thread)
3796 continue;
3797
00431a78 3798 switch_to_thread (thr);
46e3ed7f 3799 thr->thread_fsm->clean_up (thr);
243a9253
PA
3800 }
3801
3802 if (ecs->event_thread != NULL)
00431a78 3803 switch_to_thread (ecs->event_thread);
243a9253
PA
3804 }
3805}
3806
3b12939d
PA
3807/* Helper for all_uis_check_sync_execution_done that works on the
3808 current UI. */
3809
3810static void
3811check_curr_ui_sync_execution_done (void)
3812{
3813 struct ui *ui = current_ui;
3814
3815 if (ui->prompt_state == PROMPT_NEEDED
3816 && ui->async
3817 && !gdb_in_secondary_prompt_p (ui))
3818 {
223ffa71 3819 target_terminal::ours ();
76727919 3820 gdb::observers::sync_execution_done.notify ();
3eb7562a 3821 ui_register_input_event_handler (ui);
3b12939d
PA
3822 }
3823}
3824
3825/* See infrun.h. */
3826
3827void
3828all_uis_check_sync_execution_done (void)
3829{
0e454242 3830 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
3831 {
3832 check_curr_ui_sync_execution_done ();
3833 }
3834}
3835
a8836c93
PA
3836/* See infrun.h. */
3837
3838void
3839all_uis_on_sync_execution_starting (void)
3840{
0e454242 3841 SWITCH_THRU_ALL_UIS ()
a8836c93
PA
3842 {
3843 if (current_ui->prompt_state == PROMPT_NEEDED)
3844 async_disable_stdin ();
3845 }
3846}
3847
1777feb0 3848/* Asynchronous version of wait_for_inferior. It is called by the
43ff13b4 3849 event loop whenever a change of state is detected on the file
1777feb0
MS
3850 descriptor corresponding to the target. It can be called more than
3851 once to complete a single execution command. In such cases we need
3852 to keep the state in a global variable ECSS. If it is the last time
a474d7c2
PA
3853 that this function is called for a single execution command, then
3854 report to the user that the inferior has stopped, and do the
1777feb0 3855 necessary cleanups. */
43ff13b4
JM
3856
3857void
b1a35af2 3858fetch_inferior_event ()
43ff13b4 3859{
0d1e5fa7 3860 struct execution_control_state ecss;
a474d7c2 3861 struct execution_control_state *ecs = &ecss;
0f641c01 3862 int cmd_done = 0;
43ff13b4 3863
0d1e5fa7
PA
3864 memset (ecs, 0, sizeof (*ecs));
3865
c61db772
PA
3866 /* Events are always processed with the main UI as current UI. This
3867 way, warnings, debug output, etc. are always consistently sent to
3868 the main console. */
4b6749b9 3869 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
c61db772 3870
d3d4baed 3871 /* End up with readline processing input, if necessary. */
d238133d
TT
3872 {
3873 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
3874
3875 /* We're handling a live event, so make sure we're doing live
3876 debugging. If we're looking at traceframes while the target is
3877 running, we're going to need to get back to that mode after
3878 handling the event. */
3879 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
3880 if (non_stop)
3881 {
3882 maybe_restore_traceframe.emplace ();
3883 set_current_traceframe (-1);
3884 }
43ff13b4 3885
873657b9
PA
3886 /* The user/frontend should not notice a thread switch due to
3887 internal events. Make sure we revert to the user selected
3888 thread and frame after handling the event and running any
3889 breakpoint commands. */
3890 scoped_restore_current_thread restore_thread;
d238133d
TT
3891
3892 overlay_cache_invalid = 1;
3893 /* Flush target cache before starting to handle each event. Target
3894 was running and cache could be stale. This is just a heuristic.
3895 Running threads may modify target memory, but we don't get any
3896 event. */
3897 target_dcache_invalidate ();
3898
3899 scoped_restore save_exec_dir
3900 = make_scoped_restore (&execution_direction,
3901 target_execution_direction ());
3902
5b6d1e4f
PA
3903 if (!do_target_wait (minus_one_ptid, ecs, TARGET_WNOHANG))
3904 return;
3905
3906 gdb_assert (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
3907
3908 /* Switch to the target that generated the event, so we can do
3909 target calls. Any inferior bound to the target will do, so we
3910 just switch to the first we find. */
3911 for (inferior *inf : all_inferiors (ecs->target))
3912 {
3913 switch_to_inferior_no_thread (inf);
3914 break;
3915 }
d238133d
TT
3916
3917 if (debug_infrun)
5b6d1e4f 3918 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
d238133d
TT
3919
3920 /* If an error happens while handling the event, propagate GDB's
3921 knowledge of the executing state to the frontend/user running
3922 state. */
3923 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
5b6d1e4f 3924 scoped_finish_thread_state finish_state (ecs->target, finish_ptid);
d238133d 3925
979a0d13 3926 /* Get executed before scoped_restore_current_thread above to apply
d238133d
TT
3927 still for the thread which has thrown the exception. */
3928 auto defer_bpstat_clear
3929 = make_scope_exit (bpstat_clear_actions);
3930 auto defer_delete_threads
3931 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
3932
3933 /* Now figure out what to do with the result of the result. */
3934 handle_inferior_event (ecs);
3935
3936 if (!ecs->wait_some_more)
3937 {
5b6d1e4f 3938 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
d238133d
TT
3939 int should_stop = 1;
3940 struct thread_info *thr = ecs->event_thread;
d6b48e9c 3941
d238133d 3942 delete_just_stopped_threads_infrun_breakpoints ();
f107f563 3943
d238133d
TT
3944 if (thr != NULL)
3945 {
3946 struct thread_fsm *thread_fsm = thr->thread_fsm;
243a9253 3947
d238133d 3948 if (thread_fsm != NULL)
46e3ed7f 3949 should_stop = thread_fsm->should_stop (thr);
d238133d 3950 }
243a9253 3951
d238133d
TT
3952 if (!should_stop)
3953 {
3954 keep_going (ecs);
3955 }
3956 else
3957 {
46e3ed7f 3958 bool should_notify_stop = true;
d238133d 3959 int proceeded = 0;
1840d81a 3960
d238133d 3961 clean_up_just_stopped_threads_fsms (ecs);
243a9253 3962
d238133d 3963 if (thr != NULL && thr->thread_fsm != NULL)
46e3ed7f 3964 should_notify_stop = thr->thread_fsm->should_notify_stop ();
388a7084 3965
d238133d
TT
3966 if (should_notify_stop)
3967 {
3968 /* We may not find an inferior if this was a process exit. */
3969 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3970 proceeded = normal_stop ();
3971 }
243a9253 3972
d238133d
TT
3973 if (!proceeded)
3974 {
b1a35af2 3975 inferior_event_handler (INF_EXEC_COMPLETE);
d238133d
TT
3976 cmd_done = 1;
3977 }
873657b9
PA
3978
3979 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
3980 previously selected thread is gone. We have two
3981 choices - switch to no thread selected, or restore the
3982 previously selected thread (now exited). We chose the
3983 later, just because that's what GDB used to do. After
3984 this, "info threads" says "The current thread <Thread
3985 ID 2> has terminated." instead of "No thread
3986 selected.". */
3987 if (!non_stop
3988 && cmd_done
3989 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
3990 restore_thread.dont_restore ();
d238133d
TT
3991 }
3992 }
4f8d22e3 3993
d238133d
TT
3994 defer_delete_threads.release ();
3995 defer_bpstat_clear.release ();
29f49a6a 3996
d238133d
TT
3997 /* No error, don't finish the thread states yet. */
3998 finish_state.release ();
731f534f 3999
d238133d
TT
4000 /* This scope is used to ensure that readline callbacks are
4001 reinstalled here. */
4002 }
4f8d22e3 4003
3b12939d
PA
4004 /* If a UI was in sync execution mode, and now isn't, restore its
4005 prompt (a synchronous execution command has finished, and we're
4006 ready for input). */
4007 all_uis_check_sync_execution_done ();
0f641c01
PA
4008
4009 if (cmd_done
0f641c01 4010 && exec_done_display_p
00431a78
PA
4011 && (inferior_ptid == null_ptid
4012 || inferior_thread ()->state != THREAD_RUNNING))
0f641c01 4013 printf_unfiltered (_("completed.\n"));
43ff13b4
JM
4014}
4015
29734269
SM
4016/* See infrun.h. */
4017
edb3359d 4018void
29734269
SM
4019set_step_info (thread_info *tp, struct frame_info *frame,
4020 struct symtab_and_line sal)
edb3359d 4021{
29734269
SM
4022 /* This can be removed once this function no longer implicitly relies on the
4023 inferior_ptid value. */
4024 gdb_assert (inferior_ptid == tp->ptid);
edb3359d 4025
16c381f0
JK
4026 tp->control.step_frame_id = get_frame_id (frame);
4027 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
edb3359d
DJ
4028
4029 tp->current_symtab = sal.symtab;
4030 tp->current_line = sal.line;
4031}
4032
0d1e5fa7
PA
4033/* Clear context switchable stepping state. */
4034
4035void
4e1c45ea 4036init_thread_stepping_state (struct thread_info *tss)
0d1e5fa7 4037{
7f5ef605 4038 tss->stepped_breakpoint = 0;
0d1e5fa7 4039 tss->stepping_over_breakpoint = 0;
963f9c80 4040 tss->stepping_over_watchpoint = 0;
0d1e5fa7 4041 tss->step_after_step_resume_breakpoint = 0;
cd0fc7c3
SS
4042}
4043
ab1ddbcf 4044/* See infrun.h. */
c32c64b7 4045
6efcd9a8 4046void
5b6d1e4f
PA
4047set_last_target_status (process_stratum_target *target, ptid_t ptid,
4048 target_waitstatus status)
c32c64b7 4049{
5b6d1e4f 4050 target_last_proc_target = target;
c32c64b7
DE
4051 target_last_wait_ptid = ptid;
4052 target_last_waitstatus = status;
4053}
4054
ab1ddbcf 4055/* See infrun.h. */
e02bc4cc
DS
4056
4057void
5b6d1e4f
PA
4058get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4059 target_waitstatus *status)
e02bc4cc 4060{
5b6d1e4f
PA
4061 if (target != nullptr)
4062 *target = target_last_proc_target;
ab1ddbcf
PA
4063 if (ptid != nullptr)
4064 *ptid = target_last_wait_ptid;
4065 if (status != nullptr)
4066 *status = target_last_waitstatus;
e02bc4cc
DS
4067}
4068
ab1ddbcf
PA
4069/* See infrun.h. */
4070
ac264b3b
MS
4071void
4072nullify_last_target_wait_ptid (void)
4073{
5b6d1e4f 4074 target_last_proc_target = nullptr;
ac264b3b 4075 target_last_wait_ptid = minus_one_ptid;
ab1ddbcf 4076 target_last_waitstatus = {};
ac264b3b
MS
4077}
4078
dcf4fbde 4079/* Switch thread contexts. */
dd80620e
MS
4080
4081static void
00431a78 4082context_switch (execution_control_state *ecs)
dd80620e 4083{
edbcda09 4084 if (ecs->ptid != inferior_ptid
5b6d1e4f
PA
4085 && (inferior_ptid == null_ptid
4086 || ecs->event_thread != inferior_thread ()))
fd48f117 4087 {
edbcda09
SM
4088 infrun_log_debug ("Switching context from %s to %s",
4089 target_pid_to_str (inferior_ptid).c_str (),
4090 target_pid_to_str (ecs->ptid).c_str ());
fd48f117
DJ
4091 }
4092
00431a78 4093 switch_to_thread (ecs->event_thread);
dd80620e
MS
4094}
4095
d8dd4d5f
PA
4096/* If the target can't tell whether we've hit breakpoints
4097 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4098 check whether that could have been caused by a breakpoint. If so,
4099 adjust the PC, per gdbarch_decr_pc_after_break. */
4100
4fa8626c 4101static void
d8dd4d5f
PA
4102adjust_pc_after_break (struct thread_info *thread,
4103 struct target_waitstatus *ws)
4fa8626c 4104{
24a73cce
UW
4105 struct regcache *regcache;
4106 struct gdbarch *gdbarch;
118e6252 4107 CORE_ADDR breakpoint_pc, decr_pc;
4fa8626c 4108
4fa8626c
DJ
4109 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4110 we aren't, just return.
9709f61c
DJ
4111
4112 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
b798847d
UW
4113 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4114 implemented by software breakpoints should be handled through the normal
4115 breakpoint layer.
8fb3e588 4116
4fa8626c
DJ
4117 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4118 different signals (SIGILL or SIGEMT for instance), but it is less
4119 clear where the PC is pointing afterwards. It may not match
b798847d
UW
4120 gdbarch_decr_pc_after_break. I don't know any specific target that
4121 generates these signals at breakpoints (the code has been in GDB since at
4122 least 1992) so I can not guess how to handle them here.
8fb3e588 4123
e6cf7916
UW
4124 In earlier versions of GDB, a target with
4125 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
b798847d
UW
4126 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4127 target with both of these set in GDB history, and it seems unlikely to be
4128 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4fa8626c 4129
d8dd4d5f 4130 if (ws->kind != TARGET_WAITKIND_STOPPED)
4fa8626c
DJ
4131 return;
4132
d8dd4d5f 4133 if (ws->value.sig != GDB_SIGNAL_TRAP)
4fa8626c
DJ
4134 return;
4135
4058b839
PA
4136 /* In reverse execution, when a breakpoint is hit, the instruction
4137 under it has already been de-executed. The reported PC always
4138 points at the breakpoint address, so adjusting it further would
4139 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4140 architecture:
4141
4142 B1 0x08000000 : INSN1
4143 B2 0x08000001 : INSN2
4144 0x08000002 : INSN3
4145 PC -> 0x08000003 : INSN4
4146
4147 Say you're stopped at 0x08000003 as above. Reverse continuing
4148 from that point should hit B2 as below. Reading the PC when the
4149 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4150 been de-executed already.
4151
4152 B1 0x08000000 : INSN1
4153 B2 PC -> 0x08000001 : INSN2
4154 0x08000002 : INSN3
4155 0x08000003 : INSN4
4156
4157 We can't apply the same logic as for forward execution, because
4158 we would wrongly adjust the PC to 0x08000000, since there's a
4159 breakpoint at PC - 1. We'd then report a hit on B1, although
4160 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4161 behaviour. */
4162 if (execution_direction == EXEC_REVERSE)
4163 return;
4164
1cf4d951
PA
4165 /* If the target can tell whether the thread hit a SW breakpoint,
4166 trust it. Targets that can tell also adjust the PC
4167 themselves. */
4168 if (target_supports_stopped_by_sw_breakpoint ())
4169 return;
4170
4171 /* Note that relying on whether a breakpoint is planted in memory to
4172 determine this can fail. E.g,. the breakpoint could have been
4173 removed since. Or the thread could have been told to step an
4174 instruction the size of a breakpoint instruction, and only
4175 _after_ was a breakpoint inserted at its address. */
4176
24a73cce
UW
4177 /* If this target does not decrement the PC after breakpoints, then
4178 we have nothing to do. */
00431a78 4179 regcache = get_thread_regcache (thread);
ac7936df 4180 gdbarch = regcache->arch ();
118e6252 4181
527a273a 4182 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
118e6252 4183 if (decr_pc == 0)
24a73cce
UW
4184 return;
4185
8b86c959 4186 const address_space *aspace = regcache->aspace ();
6c95b8df 4187
8aad930b
AC
4188 /* Find the location where (if we've hit a breakpoint) the
4189 breakpoint would be. */
118e6252 4190 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
8aad930b 4191
1cf4d951
PA
4192 /* If the target can't tell whether a software breakpoint triggered,
4193 fallback to figuring it out based on breakpoints we think were
4194 inserted in the target, and on whether the thread was stepped or
4195 continued. */
4196
1c5cfe86
PA
4197 /* Check whether there actually is a software breakpoint inserted at
4198 that location.
4199
4200 If in non-stop mode, a race condition is possible where we've
4201 removed a breakpoint, but stop events for that breakpoint were
4202 already queued and arrive later. To suppress those spurious
4203 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
1cf4d951
PA
4204 and retire them after a number of stop events are reported. Note
4205 this is an heuristic and can thus get confused. The real fix is
4206 to get the "stopped by SW BP and needs adjustment" info out of
4207 the target/kernel (and thus never reach here; see above). */
6c95b8df 4208 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
fbea99ea
PA
4209 || (target_is_non_stop_p ()
4210 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
8aad930b 4211 {
07036511 4212 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
abbb1732 4213
8213266a 4214 if (record_full_is_used ())
07036511
TT
4215 restore_operation_disable.emplace
4216 (record_full_gdb_operation_disable_set ());
96429cc8 4217
1c0fdd0e
UW
4218 /* When using hardware single-step, a SIGTRAP is reported for both
4219 a completed single-step and a software breakpoint. Need to
4220 differentiate between the two, as the latter needs adjusting
4221 but the former does not.
4222
4223 The SIGTRAP can be due to a completed hardware single-step only if
4224 - we didn't insert software single-step breakpoints
1c0fdd0e
UW
4225 - this thread is currently being stepped
4226
4227 If any of these events did not occur, we must have stopped due
4228 to hitting a software breakpoint, and have to back up to the
4229 breakpoint address.
4230
4231 As a special case, we could have hardware single-stepped a
4232 software breakpoint. In this case (prev_pc == breakpoint_pc),
4233 we also need to back up to the breakpoint address. */
4234
d8dd4d5f
PA
4235 if (thread_has_single_step_breakpoints_set (thread)
4236 || !currently_stepping (thread)
4237 || (thread->stepped_breakpoint
4238 && thread->prev_pc == breakpoint_pc))
515630c5 4239 regcache_write_pc (regcache, breakpoint_pc);
8aad930b 4240 }
4fa8626c
DJ
4241}
4242
edb3359d
DJ
4243static int
4244stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4245{
4246 for (frame = get_prev_frame (frame);
4247 frame != NULL;
4248 frame = get_prev_frame (frame))
4249 {
4250 if (frame_id_eq (get_frame_id (frame), step_frame_id))
4251 return 1;
4252 if (get_frame_type (frame) != INLINE_FRAME)
4253 break;
4254 }
4255
4256 return 0;
4257}
4258
4a4c04f1
BE
4259/* Look for an inline frame that is marked for skip.
4260 If PREV_FRAME is TRUE start at the previous frame,
4261 otherwise start at the current frame. Stop at the
4262 first non-inline frame, or at the frame where the
4263 step started. */
4264
4265static bool
4266inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
4267{
4268 struct frame_info *frame = get_current_frame ();
4269
4270 if (prev_frame)
4271 frame = get_prev_frame (frame);
4272
4273 for (; frame != NULL; frame = get_prev_frame (frame))
4274 {
4275 const char *fn = NULL;
4276 symtab_and_line sal;
4277 struct symbol *sym;
4278
4279 if (frame_id_eq (get_frame_id (frame), tp->control.step_frame_id))
4280 break;
4281 if (get_frame_type (frame) != INLINE_FRAME)
4282 break;
4283
4284 sal = find_frame_sal (frame);
4285 sym = get_frame_function (frame);
4286
4287 if (sym != NULL)
4288 fn = sym->print_name ();
4289
4290 if (sal.line != 0
4291 && function_name_is_marked_for_skip (fn, sal))
4292 return true;
4293 }
4294
4295 return false;
4296}
4297
c65d6b55
PA
4298/* If the event thread has the stop requested flag set, pretend it
4299 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4300 target_stop). */
4301
4302static bool
4303handle_stop_requested (struct execution_control_state *ecs)
4304{
4305 if (ecs->event_thread->stop_requested)
4306 {
4307 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
4308 ecs->ws.value.sig = GDB_SIGNAL_0;
4309 handle_signal_stop (ecs);
4310 return true;
4311 }
4312 return false;
4313}
4314
a96d9b2e
SDJ
4315/* Auxiliary function that handles syscall entry/return events.
4316 It returns 1 if the inferior should keep going (and GDB
4317 should ignore the event), or 0 if the event deserves to be
4318 processed. */
ca2163eb 4319
a96d9b2e 4320static int
ca2163eb 4321handle_syscall_event (struct execution_control_state *ecs)
a96d9b2e 4322{
ca2163eb 4323 struct regcache *regcache;
ca2163eb
PA
4324 int syscall_number;
4325
00431a78 4326 context_switch (ecs);
ca2163eb 4327
00431a78 4328 regcache = get_thread_regcache (ecs->event_thread);
f90263c1 4329 syscall_number = ecs->ws.value.syscall_number;
f2ffa92b 4330 ecs->event_thread->suspend.stop_pc = regcache_read_pc (regcache);
ca2163eb 4331
a96d9b2e
SDJ
4332 if (catch_syscall_enabled () > 0
4333 && catching_syscall_number (syscall_number) > 0)
4334 {
edbcda09 4335 infrun_log_debug ("syscall number=%d", syscall_number);
a96d9b2e 4336
16c381f0 4337 ecs->event_thread->control.stop_bpstat
a01bda52 4338 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
4339 ecs->event_thread->suspend.stop_pc,
4340 ecs->event_thread, &ecs->ws);
ab04a2af 4341
c65d6b55
PA
4342 if (handle_stop_requested (ecs))
4343 return 0;
4344
ce12b012 4345 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
ca2163eb
PA
4346 {
4347 /* Catchpoint hit. */
ca2163eb
PA
4348 return 0;
4349 }
a96d9b2e 4350 }
ca2163eb 4351
c65d6b55
PA
4352 if (handle_stop_requested (ecs))
4353 return 0;
4354
ca2163eb 4355 /* If no catchpoint triggered for this, then keep going. */
ca2163eb
PA
4356 keep_going (ecs);
4357 return 1;
a96d9b2e
SDJ
4358}
4359
7e324e48
GB
4360/* Lazily fill in the execution_control_state's stop_func_* fields. */
4361
4362static void
4363fill_in_stop_func (struct gdbarch *gdbarch,
4364 struct execution_control_state *ecs)
4365{
4366 if (!ecs->stop_func_filled_in)
4367 {
98a617f8
KB
4368 const block *block;
4369
7e324e48
GB
4370 /* Don't care about return value; stop_func_start and stop_func_name
4371 will both be 0 if it doesn't work. */
98a617f8
KB
4372 find_pc_partial_function (ecs->event_thread->suspend.stop_pc,
4373 &ecs->stop_func_name,
4374 &ecs->stop_func_start,
4375 &ecs->stop_func_end,
4376 &block);
4377
4378 /* The call to find_pc_partial_function, above, will set
4379 stop_func_start and stop_func_end to the start and end
4380 of the range containing the stop pc. If this range
4381 contains the entry pc for the block (which is always the
4382 case for contiguous blocks), advance stop_func_start past
4383 the function's start offset and entrypoint. Note that
4384 stop_func_start is NOT advanced when in a range of a
4385 non-contiguous block that does not contain the entry pc. */
4386 if (block != nullptr
4387 && ecs->stop_func_start <= BLOCK_ENTRY_PC (block)
4388 && BLOCK_ENTRY_PC (block) < ecs->stop_func_end)
4389 {
4390 ecs->stop_func_start
4391 += gdbarch_deprecated_function_start_offset (gdbarch);
4392
4393 if (gdbarch_skip_entrypoint_p (gdbarch))
4394 ecs->stop_func_start
4395 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
4396 }
591a12a1 4397
7e324e48
GB
4398 ecs->stop_func_filled_in = 1;
4399 }
4400}
4401
4f5d7f63 4402
00431a78 4403/* Return the STOP_SOON field of the inferior pointed at by ECS. */
4f5d7f63
PA
4404
4405static enum stop_kind
00431a78 4406get_inferior_stop_soon (execution_control_state *ecs)
4f5d7f63 4407{
5b6d1e4f 4408 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4f5d7f63
PA
4409
4410 gdb_assert (inf != NULL);
4411 return inf->control.stop_soon;
4412}
4413
5b6d1e4f
PA
4414/* Poll for one event out of the current target. Store the resulting
4415 waitstatus in WS, and return the event ptid. Does not block. */
372316f1
PA
4416
4417static ptid_t
5b6d1e4f 4418poll_one_curr_target (struct target_waitstatus *ws)
372316f1
PA
4419{
4420 ptid_t event_ptid;
372316f1
PA
4421
4422 overlay_cache_invalid = 1;
4423
4424 /* Flush target cache before starting to handle each event.
4425 Target was running and cache could be stale. This is just a
4426 heuristic. Running threads may modify target memory, but we
4427 don't get any event. */
4428 target_dcache_invalidate ();
4429
4430 if (deprecated_target_wait_hook)
5b6d1e4f 4431 event_ptid = deprecated_target_wait_hook (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1 4432 else
5b6d1e4f 4433 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1
PA
4434
4435 if (debug_infrun)
5b6d1e4f 4436 print_target_wait_results (minus_one_ptid, event_ptid, ws);
372316f1
PA
4437
4438 return event_ptid;
4439}
4440
5b6d1e4f
PA
4441/* An event reported by wait_one. */
4442
4443struct wait_one_event
4444{
4445 /* The target the event came out of. */
4446 process_stratum_target *target;
4447
4448 /* The PTID the event was for. */
4449 ptid_t ptid;
4450
4451 /* The waitstatus. */
4452 target_waitstatus ws;
4453};
4454
4455/* Wait for one event out of any target. */
4456
4457static wait_one_event
4458wait_one ()
4459{
4460 while (1)
4461 {
4462 for (inferior *inf : all_inferiors ())
4463 {
4464 process_stratum_target *target = inf->process_target ();
4465 if (target == NULL
4466 || !target->is_async_p ()
4467 || !target->threads_executing)
4468 continue;
4469
4470 switch_to_inferior_no_thread (inf);
4471
4472 wait_one_event event;
4473 event.target = target;
4474 event.ptid = poll_one_curr_target (&event.ws);
4475
4476 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
4477 {
4478 /* If nothing is resumed, remove the target from the
4479 event loop. */
4480 target_async (0);
4481 }
4482 else if (event.ws.kind != TARGET_WAITKIND_IGNORE)
4483 return event;
4484 }
4485
4486 /* Block waiting for some event. */
4487
4488 fd_set readfds;
4489 int nfds = 0;
4490
4491 FD_ZERO (&readfds);
4492
4493 for (inferior *inf : all_inferiors ())
4494 {
4495 process_stratum_target *target = inf->process_target ();
4496 if (target == NULL
4497 || !target->is_async_p ()
4498 || !target->threads_executing)
4499 continue;
4500
4501 int fd = target->async_wait_fd ();
4502 FD_SET (fd, &readfds);
4503 if (nfds <= fd)
4504 nfds = fd + 1;
4505 }
4506
4507 if (nfds == 0)
4508 {
4509 /* No waitable targets left. All must be stopped. */
4510 return {NULL, minus_one_ptid, {TARGET_WAITKIND_NO_RESUMED}};
4511 }
4512
4513 QUIT;
4514
4515 int numfds = interruptible_select (nfds, &readfds, 0, NULL, 0);
4516 if (numfds < 0)
4517 {
4518 if (errno == EINTR)
4519 continue;
4520 else
4521 perror_with_name ("interruptible_select");
4522 }
4523 }
4524}
4525
372316f1
PA
4526/* Save the thread's event and stop reason to process it later. */
4527
4528static void
5b6d1e4f 4529save_waitstatus (struct thread_info *tp, const target_waitstatus *ws)
372316f1 4530{
edbcda09
SM
4531 infrun_log_debug ("saving status %s for %d.%ld.%ld",
4532 target_waitstatus_to_string (ws).c_str (),
4533 tp->ptid.pid (),
4534 tp->ptid.lwp (),
4535 tp->ptid.tid ());
372316f1
PA
4536
4537 /* Record for later. */
4538 tp->suspend.waitstatus = *ws;
4539 tp->suspend.waitstatus_pending_p = 1;
4540
00431a78 4541 struct regcache *regcache = get_thread_regcache (tp);
8b86c959 4542 const address_space *aspace = regcache->aspace ();
372316f1
PA
4543
4544 if (ws->kind == TARGET_WAITKIND_STOPPED
4545 && ws->value.sig == GDB_SIGNAL_TRAP)
4546 {
4547 CORE_ADDR pc = regcache_read_pc (regcache);
4548
4549 adjust_pc_after_break (tp, &tp->suspend.waitstatus);
4550
18493a00
PA
4551 scoped_restore_current_thread restore_thread;
4552 switch_to_thread (tp);
4553
4554 if (target_stopped_by_watchpoint ())
372316f1
PA
4555 {
4556 tp->suspend.stop_reason
4557 = TARGET_STOPPED_BY_WATCHPOINT;
4558 }
4559 else if (target_supports_stopped_by_sw_breakpoint ()
18493a00 4560 && target_stopped_by_sw_breakpoint ())
372316f1
PA
4561 {
4562 tp->suspend.stop_reason
4563 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4564 }
4565 else if (target_supports_stopped_by_hw_breakpoint ()
18493a00 4566 && target_stopped_by_hw_breakpoint ())
372316f1
PA
4567 {
4568 tp->suspend.stop_reason
4569 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4570 }
4571 else if (!target_supports_stopped_by_hw_breakpoint ()
4572 && hardware_breakpoint_inserted_here_p (aspace,
4573 pc))
4574 {
4575 tp->suspend.stop_reason
4576 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4577 }
4578 else if (!target_supports_stopped_by_sw_breakpoint ()
4579 && software_breakpoint_inserted_here_p (aspace,
4580 pc))
4581 {
4582 tp->suspend.stop_reason
4583 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4584 }
4585 else if (!thread_has_single_step_breakpoints_set (tp)
4586 && currently_stepping (tp))
4587 {
4588 tp->suspend.stop_reason
4589 = TARGET_STOPPED_BY_SINGLE_STEP;
4590 }
4591 }
4592}
4593
293b3ebc
TBA
4594/* Mark the non-executing threads accordingly. In all-stop, all
4595 threads of all processes are stopped when we get any event
4596 reported. In non-stop mode, only the event thread stops. */
4597
4598static void
4599mark_non_executing_threads (process_stratum_target *target,
4600 ptid_t event_ptid,
4601 struct target_waitstatus ws)
4602{
4603 ptid_t mark_ptid;
4604
4605 if (!target_is_non_stop_p ())
4606 mark_ptid = minus_one_ptid;
4607 else if (ws.kind == TARGET_WAITKIND_SIGNALLED
4608 || ws.kind == TARGET_WAITKIND_EXITED)
4609 {
4610 /* If we're handling a process exit in non-stop mode, even
4611 though threads haven't been deleted yet, one would think
4612 that there is nothing to do, as threads of the dead process
4613 will be soon deleted, and threads of any other process were
4614 left running. However, on some targets, threads survive a
4615 process exit event. E.g., for the "checkpoint" command,
4616 when the current checkpoint/fork exits, linux-fork.c
4617 automatically switches to another fork from within
4618 target_mourn_inferior, by associating the same
4619 inferior/thread to another fork. We haven't mourned yet at
4620 this point, but we must mark any threads left in the
4621 process as not-executing so that finish_thread_state marks
4622 them stopped (in the user's perspective) if/when we present
4623 the stop to the user. */
4624 mark_ptid = ptid_t (event_ptid.pid ());
4625 }
4626 else
4627 mark_ptid = event_ptid;
4628
4629 set_executing (target, mark_ptid, false);
4630
4631 /* Likewise the resumed flag. */
4632 set_resumed (target, mark_ptid, false);
4633}
4634
6efcd9a8 4635/* See infrun.h. */
372316f1 4636
6efcd9a8 4637void
372316f1
PA
4638stop_all_threads (void)
4639{
4640 /* We may need multiple passes to discover all threads. */
4641 int pass;
4642 int iterations = 0;
372316f1 4643
53cccef1 4644 gdb_assert (exists_non_stop_target ());
372316f1 4645
edbcda09 4646 infrun_log_debug ("stop_all_threads");
372316f1 4647
00431a78 4648 scoped_restore_current_thread restore_thread;
372316f1 4649
6ad82919
TBA
4650 /* Enable thread events of all targets. */
4651 for (auto *target : all_non_exited_process_targets ())
4652 {
4653 switch_to_target_no_thread (target);
4654 target_thread_events (true);
4655 }
4656
4657 SCOPE_EXIT
4658 {
4659 /* Disable thread events of all targets. */
4660 for (auto *target : all_non_exited_process_targets ())
4661 {
4662 switch_to_target_no_thread (target);
4663 target_thread_events (false);
4664 }
4665
edbcda09
SM
4666
4667 infrun_log_debug ("stop_all_threads done");
6ad82919 4668 };
65706a29 4669
372316f1
PA
4670 /* Request threads to stop, and then wait for the stops. Because
4671 threads we already know about can spawn more threads while we're
4672 trying to stop them, and we only learn about new threads when we
4673 update the thread list, do this in a loop, and keep iterating
4674 until two passes find no threads that need to be stopped. */
4675 for (pass = 0; pass < 2; pass++, iterations++)
4676 {
edbcda09
SM
4677 infrun_log_debug ("stop_all_threads, pass=%d, iterations=%d",
4678 pass, iterations);
372316f1
PA
4679 while (1)
4680 {
29d6859f 4681 int waits_needed = 0;
372316f1 4682
a05575d3
TBA
4683 for (auto *target : all_non_exited_process_targets ())
4684 {
4685 switch_to_target_no_thread (target);
4686 update_thread_list ();
4687 }
372316f1
PA
4688
4689 /* Go through all threads looking for threads that we need
4690 to tell the target to stop. */
08036331 4691 for (thread_info *t : all_non_exited_threads ())
372316f1 4692 {
53cccef1
TBA
4693 /* For a single-target setting with an all-stop target,
4694 we would not even arrive here. For a multi-target
4695 setting, until GDB is able to handle a mixture of
4696 all-stop and non-stop targets, simply skip all-stop
4697 targets' threads. This should be fine due to the
4698 protection of 'check_multi_target_resumption'. */
4699
4700 switch_to_thread_no_regs (t);
4701 if (!target_is_non_stop_p ())
4702 continue;
4703
372316f1
PA
4704 if (t->executing)
4705 {
4706 /* If already stopping, don't request a stop again.
4707 We just haven't seen the notification yet. */
4708 if (!t->stop_requested)
4709 {
edbcda09
SM
4710 infrun_log_debug (" %s executing, need stop",
4711 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4712 target_stop (t->ptid);
4713 t->stop_requested = 1;
4714 }
4715 else
4716 {
edbcda09
SM
4717 infrun_log_debug (" %s executing, already stopping",
4718 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4719 }
4720
4721 if (t->stop_requested)
29d6859f 4722 waits_needed++;
372316f1
PA
4723 }
4724 else
4725 {
edbcda09
SM
4726 infrun_log_debug (" %s not executing",
4727 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4728
4729 /* The thread may be not executing, but still be
4730 resumed with a pending status to process. */
719546c4 4731 t->resumed = false;
372316f1
PA
4732 }
4733 }
4734
29d6859f 4735 if (waits_needed == 0)
372316f1
PA
4736 break;
4737
4738 /* If we find new threads on the second iteration, restart
4739 over. We want to see two iterations in a row with all
4740 threads stopped. */
4741 if (pass > 0)
4742 pass = -1;
4743
29d6859f 4744 for (int i = 0; i < waits_needed; i++)
c29705b7 4745 {
29d6859f 4746 wait_one_event event = wait_one ();
a05575d3 4747
edbcda09
SM
4748 infrun_log_debug ("%s %s\n",
4749 target_waitstatus_to_string (&event.ws).c_str (),
4750 target_pid_to_str (event.ptid).c_str ());
a05575d3 4751
29d6859f 4752 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
a05575d3 4753 {
29d6859f
LM
4754 /* All resumed threads exited. */
4755 break;
a05575d3 4756 }
29d6859f
LM
4757 else if (event.ws.kind == TARGET_WAITKIND_THREAD_EXITED
4758 || event.ws.kind == TARGET_WAITKIND_EXITED
4759 || event.ws.kind == TARGET_WAITKIND_SIGNALLED)
6efcd9a8 4760 {
29d6859f 4761 /* One thread/process exited/signalled. */
6efcd9a8 4762
29d6859f 4763 thread_info *t = nullptr;
372316f1 4764
29d6859f
LM
4765 /* The target may have reported just a pid. If so, try
4766 the first non-exited thread. */
4767 if (event.ptid.is_pid ())
372316f1 4768 {
29d6859f
LM
4769 int pid = event.ptid.pid ();
4770 inferior *inf = find_inferior_pid (event.target, pid);
4771 for (thread_info *tp : inf->non_exited_threads ())
372316f1 4772 {
29d6859f
LM
4773 t = tp;
4774 break;
372316f1 4775 }
29d6859f
LM
4776
4777 /* If there is no available thread, the event would
4778 have to be appended to a per-inferior event list,
4779 which does not exist (and if it did, we'd have
4780 to adjust run control command to be able to
4781 resume such an inferior). We assert here instead
4782 of going into an infinite loop. */
4783 gdb_assert (t != nullptr);
4784
edbcda09
SM
4785 infrun_log_debug ("using %s\n",
4786 target_pid_to_str (t->ptid).c_str ());
29d6859f
LM
4787 }
4788 else
4789 {
4790 t = find_thread_ptid (event.target, event.ptid);
4791 /* Check if this is the first time we see this thread.
4792 Don't bother adding if it individually exited. */
4793 if (t == nullptr
4794 && event.ws.kind != TARGET_WAITKIND_THREAD_EXITED)
4795 t = add_thread (event.target, event.ptid);
4796 }
4797
4798 if (t != nullptr)
4799 {
4800 /* Set the threads as non-executing to avoid
4801 another stop attempt on them. */
4802 switch_to_thread_no_regs (t);
4803 mark_non_executing_threads (event.target, event.ptid,
4804 event.ws);
4805 save_waitstatus (t, &event.ws);
4806 t->stop_requested = false;
372316f1
PA
4807 }
4808 }
4809 else
4810 {
29d6859f
LM
4811 thread_info *t = find_thread_ptid (event.target, event.ptid);
4812 if (t == NULL)
4813 t = add_thread (event.target, event.ptid);
372316f1 4814
29d6859f
LM
4815 t->stop_requested = 0;
4816 t->executing = 0;
4817 t->resumed = false;
4818 t->control.may_range_step = 0;
4819
4820 /* This may be the first time we see the inferior report
4821 a stop. */
4822 inferior *inf = find_inferior_ptid (event.target, event.ptid);
4823 if (inf->needs_setup)
372316f1 4824 {
29d6859f
LM
4825 switch_to_thread_no_regs (t);
4826 setup_inferior (0);
372316f1
PA
4827 }
4828
29d6859f
LM
4829 if (event.ws.kind == TARGET_WAITKIND_STOPPED
4830 && event.ws.value.sig == GDB_SIGNAL_0)
372316f1 4831 {
29d6859f
LM
4832 /* We caught the event that we intended to catch, so
4833 there's no event pending. */
4834 t->suspend.waitstatus.kind = TARGET_WAITKIND_IGNORE;
4835 t->suspend.waitstatus_pending_p = 0;
4836
4837 if (displaced_step_fixup (t, GDB_SIGNAL_0) < 0)
4838 {
4839 /* Add it back to the step-over queue. */
edbcda09
SM
4840 infrun_log_debug ("displaced-step of %s "
4841 "canceled: adding back to the "
4842 "step-over queue\n",
4843 target_pid_to_str (t->ptid).c_str ());
4844
29d6859f
LM
4845 t->control.trap_expected = 0;
4846 thread_step_over_chain_enqueue (t);
4847 }
372316f1 4848 }
29d6859f
LM
4849 else
4850 {
4851 enum gdb_signal sig;
4852 struct regcache *regcache;
372316f1 4853
29d6859f
LM
4854 if (debug_infrun)
4855 {
4856 std::string statstr = target_waitstatus_to_string (&event.ws);
372316f1 4857
edbcda09
SM
4858 infrun_log_debug ("target_wait %s, saving "
4859 "status for %d.%ld.%ld\n",
4860 statstr.c_str (),
4861 t->ptid.pid (),
4862 t->ptid.lwp (),
4863 t->ptid.tid ());
29d6859f
LM
4864 }
4865
4866 /* Record for later. */
4867 save_waitstatus (t, &event.ws);
4868
4869 sig = (event.ws.kind == TARGET_WAITKIND_STOPPED
4870 ? event.ws.value.sig : GDB_SIGNAL_0);
4871
4872 if (displaced_step_fixup (t, sig) < 0)
4873 {
4874 /* Add it back to the step-over queue. */
4875 t->control.trap_expected = 0;
4876 thread_step_over_chain_enqueue (t);
4877 }
4878
4879 regcache = get_thread_regcache (t);
4880 t->suspend.stop_pc = regcache_read_pc (regcache);
4881
edbcda09
SM
4882 infrun_log_debug ("saved stop_pc=%s for %s "
4883 "(currently_stepping=%d)\n",
4884 paddress (target_gdbarch (),
4885 t->suspend.stop_pc),
4886 target_pid_to_str (t->ptid).c_str (),
4887 currently_stepping (t));
372316f1
PA
4888 }
4889 }
4890 }
4891 }
4892 }
372316f1
PA
4893}
4894
f4836ba9
PA
4895/* Handle a TARGET_WAITKIND_NO_RESUMED event. */
4896
4897static int
4898handle_no_resumed (struct execution_control_state *ecs)
4899{
3b12939d 4900 if (target_can_async_p ())
f4836ba9 4901 {
3b12939d 4902 int any_sync = 0;
f4836ba9 4903
2dab0c7b 4904 for (ui *ui : all_uis ())
3b12939d
PA
4905 {
4906 if (ui->prompt_state == PROMPT_BLOCKED)
4907 {
4908 any_sync = 1;
4909 break;
4910 }
4911 }
4912 if (!any_sync)
4913 {
4914 /* There were no unwaited-for children left in the target, but,
4915 we're not synchronously waiting for events either. Just
4916 ignore. */
4917
edbcda09 4918 infrun_log_debug ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
3b12939d
PA
4919 prepare_to_wait (ecs);
4920 return 1;
4921 }
f4836ba9
PA
4922 }
4923
4924 /* Otherwise, if we were running a synchronous execution command, we
4925 may need to cancel it and give the user back the terminal.
4926
4927 In non-stop mode, the target can't tell whether we've already
4928 consumed previous stop events, so it can end up sending us a
4929 no-resumed event like so:
4930
4931 #0 - thread 1 is left stopped
4932
4933 #1 - thread 2 is resumed and hits breakpoint
4934 -> TARGET_WAITKIND_STOPPED
4935
4936 #2 - thread 3 is resumed and exits
4937 this is the last resumed thread, so
4938 -> TARGET_WAITKIND_NO_RESUMED
4939
4940 #3 - gdb processes stop for thread 2 and decides to re-resume
4941 it.
4942
4943 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
4944 thread 2 is now resumed, so the event should be ignored.
4945
4946 IOW, if the stop for thread 2 doesn't end a foreground command,
4947 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
4948 event. But it could be that the event meant that thread 2 itself
4949 (or whatever other thread was the last resumed thread) exited.
4950
4951 To address this we refresh the thread list and check whether we
4952 have resumed threads _now_. In the example above, this removes
4953 thread 3 from the thread list. If thread 2 was re-resumed, we
4954 ignore this event. If we find no thread resumed, then we cancel
2ec0f7ff
PA
4955 the synchronous command and show "no unwaited-for " to the
4956 user. */
f4836ba9 4957
aecd6cb8 4958 inferior *curr_inf = current_inferior ();
2ec0f7ff 4959
aecd6cb8
PA
4960 scoped_restore_current_thread restore_thread;
4961
4962 for (auto *target : all_non_exited_process_targets ())
4963 {
4964 switch_to_target_no_thread (target);
4965 update_thread_list ();
4966 }
4967
4968 /* If:
4969
4970 - the current target has no thread executing, and
4971 - the current inferior is native, and
4972 - the current inferior is the one which has the terminal, and
4973 - we did nothing,
4974
4975 then a Ctrl-C from this point on would remain stuck in the
4976 kernel, until a thread resumes and dequeues it. That would
4977 result in the GDB CLI not reacting to Ctrl-C, not able to
4978 interrupt the program. To address this, if the current inferior
4979 no longer has any thread executing, we give the terminal to some
4980 other inferior that has at least one thread executing. */
4981 bool swap_terminal = true;
4982
4983 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
4984 whether to report it to the user. */
4985 bool ignore_event = false;
2ec0f7ff
PA
4986
4987 for (thread_info *thread : all_non_exited_threads ())
f4836ba9 4988 {
aecd6cb8
PA
4989 if (swap_terminal && thread->executing)
4990 {
4991 if (thread->inf != curr_inf)
4992 {
4993 target_terminal::ours ();
4994
4995 switch_to_thread (thread);
4996 target_terminal::inferior ();
4997 }
4998 swap_terminal = false;
4999 }
5000
5001 if (!ignore_event
5002 && (thread->executing
5003 || thread->suspend.waitstatus_pending_p))
f4836ba9 5004 {
2ec0f7ff
PA
5005 /* Either there were no unwaited-for children left in the
5006 target at some point, but there are now, or some target
5007 other than the eventing one has unwaited-for children
5008 left. Just ignore. */
edbcda09
SM
5009 infrun_log_debug ("TARGET_WAITKIND_NO_RESUMED "
5010 "(ignoring: found resumed)\n");
aecd6cb8
PA
5011
5012 ignore_event = true;
f4836ba9 5013 }
aecd6cb8
PA
5014
5015 if (ignore_event && !swap_terminal)
5016 break;
5017 }
5018
5019 if (ignore_event)
5020 {
5021 switch_to_inferior_no_thread (curr_inf);
5022 prepare_to_wait (ecs);
5023 return 1;
f4836ba9
PA
5024 }
5025
5026 /* Go ahead and report the event. */
5027 return 0;
5028}
5029
05ba8510
PA
5030/* Given an execution control state that has been freshly filled in by
5031 an event from the inferior, figure out what it means and take
5032 appropriate action.
5033
5034 The alternatives are:
5035
22bcd14b 5036 1) stop_waiting and return; to really stop and return to the
05ba8510
PA
5037 debugger.
5038
5039 2) keep_going and return; to wait for the next event (set
5040 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5041 once). */
c906108c 5042
ec9499be 5043static void
595915c1 5044handle_inferior_event (struct execution_control_state *ecs)
cd0fc7c3 5045{
595915c1
TT
5046 /* Make sure that all temporary struct value objects that were
5047 created during the handling of the event get deleted at the
5048 end. */
5049 scoped_value_mark free_values;
5050
d6b48e9c
PA
5051 enum stop_kind stop_soon;
5052
edbcda09 5053 infrun_log_debug ("%s", target_waitstatus_to_string (&ecs->ws).c_str ());
c29705b7 5054
28736962
PA
5055 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
5056 {
5057 /* We had an event in the inferior, but we are not interested in
5058 handling it at this level. The lower layers have already
5059 done what needs to be done, if anything.
5060
5061 One of the possible circumstances for this is when the
5062 inferior produces output for the console. The inferior has
5063 not stopped, and we are ignoring the event. Another possible
5064 circumstance is any event which the lower level knows will be
5065 reported multiple times without an intervening resume. */
28736962
PA
5066 prepare_to_wait (ecs);
5067 return;
5068 }
5069
65706a29
PA
5070 if (ecs->ws.kind == TARGET_WAITKIND_THREAD_EXITED)
5071 {
65706a29
PA
5072 prepare_to_wait (ecs);
5073 return;
5074 }
5075
0e5bf2a8 5076 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
f4836ba9
PA
5077 && handle_no_resumed (ecs))
5078 return;
0e5bf2a8 5079
5b6d1e4f
PA
5080 /* Cache the last target/ptid/waitstatus. */
5081 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
e02bc4cc 5082
ca005067 5083 /* Always clear state belonging to the previous time we stopped. */
aa7d318d 5084 stop_stack_dummy = STOP_NONE;
ca005067 5085
0e5bf2a8
PA
5086 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
5087 {
5088 /* No unwaited-for children left. IOW, all resumed children
5089 have exited. */
0e5bf2a8 5090 stop_print_frame = 0;
22bcd14b 5091 stop_waiting (ecs);
0e5bf2a8
PA
5092 return;
5093 }
5094
8c90c137 5095 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
64776a0b 5096 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
359f5fe6 5097 {
5b6d1e4f 5098 ecs->event_thread = find_thread_ptid (ecs->target, ecs->ptid);
359f5fe6
PA
5099 /* If it's a new thread, add it to the thread database. */
5100 if (ecs->event_thread == NULL)
5b6d1e4f 5101 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
c1e36e3e
PA
5102
5103 /* Disable range stepping. If the next step request could use a
5104 range, this will be end up re-enabled then. */
5105 ecs->event_thread->control.may_range_step = 0;
359f5fe6 5106 }
88ed393a
JK
5107
5108 /* Dependent on valid ECS->EVENT_THREAD. */
d8dd4d5f 5109 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
88ed393a
JK
5110
5111 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5112 reinit_frame_cache ();
5113
28736962
PA
5114 breakpoint_retire_moribund ();
5115
2b009048
DJ
5116 /* First, distinguish signals caused by the debugger from signals
5117 that have to do with the program's own actions. Note that
5118 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5119 on the operating system version. Here we detect when a SIGILL or
5120 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5121 something similar for SIGSEGV, since a SIGSEGV will be generated
5122 when we're trying to execute a breakpoint instruction on a
5123 non-executable stack. This happens for call dummy breakpoints
5124 for architectures like SPARC that place call dummies on the
5125 stack. */
2b009048 5126 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
a493e3e2
PA
5127 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
5128 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
5129 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
2b009048 5130 {
00431a78 5131 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
de0a0249 5132
a01bda52 5133 if (breakpoint_inserted_here_p (regcache->aspace (),
de0a0249
UW
5134 regcache_read_pc (regcache)))
5135 {
edbcda09 5136 infrun_log_debug ("Treating signal as SIGTRAP");
a493e3e2 5137 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
de0a0249 5138 }
2b009048
DJ
5139 }
5140
293b3ebc 5141 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
8c90c137 5142
488f131b
JB
5143 switch (ecs->ws.kind)
5144 {
5145 case TARGET_WAITKIND_LOADED:
00431a78 5146 context_switch (ecs);
b0f4b84b
DJ
5147 /* Ignore gracefully during startup of the inferior, as it might
5148 be the shell which has just loaded some objects, otherwise
5149 add the symbols for the newly loaded objects. Also ignore at
5150 the beginning of an attach or remote session; we will query
5151 the full list of libraries once the connection is
5152 established. */
4f5d7f63 5153
00431a78 5154 stop_soon = get_inferior_stop_soon (ecs);
c0236d92 5155 if (stop_soon == NO_STOP_QUIETLY)
488f131b 5156 {
edcc5120
TT
5157 struct regcache *regcache;
5158
00431a78 5159 regcache = get_thread_regcache (ecs->event_thread);
edcc5120
TT
5160
5161 handle_solib_event ();
5162
5163 ecs->event_thread->control.stop_bpstat
a01bda52 5164 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
5165 ecs->event_thread->suspend.stop_pc,
5166 ecs->event_thread, &ecs->ws);
ab04a2af 5167
c65d6b55
PA
5168 if (handle_stop_requested (ecs))
5169 return;
5170
ce12b012 5171 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
edcc5120
TT
5172 {
5173 /* A catchpoint triggered. */
94c57d6a
PA
5174 process_event_stop_test (ecs);
5175 return;
edcc5120 5176 }
488f131b 5177
b0f4b84b
DJ
5178 /* If requested, stop when the dynamic linker notifies
5179 gdb of events. This allows the user to get control
5180 and place breakpoints in initializer routines for
5181 dynamically loaded objects (among other things). */
a493e3e2 5182 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
b0f4b84b
DJ
5183 if (stop_on_solib_events)
5184 {
55409f9d
DJ
5185 /* Make sure we print "Stopped due to solib-event" in
5186 normal_stop. */
5187 stop_print_frame = 1;
5188
22bcd14b 5189 stop_waiting (ecs);
b0f4b84b
DJ
5190 return;
5191 }
488f131b 5192 }
b0f4b84b
DJ
5193
5194 /* If we are skipping through a shell, or through shared library
5195 loading that we aren't interested in, resume the program. If
5c09a2c5 5196 we're running the program normally, also resume. */
b0f4b84b
DJ
5197 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
5198 {
74960c60
VP
5199 /* Loading of shared libraries might have changed breakpoint
5200 addresses. Make sure new breakpoints are inserted. */
a25a5a45 5201 if (stop_soon == NO_STOP_QUIETLY)
74960c60 5202 insert_breakpoints ();
64ce06e4 5203 resume (GDB_SIGNAL_0);
b0f4b84b
DJ
5204 prepare_to_wait (ecs);
5205 return;
5206 }
5207
5c09a2c5
PA
5208 /* But stop if we're attaching or setting up a remote
5209 connection. */
5210 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5211 || stop_soon == STOP_QUIETLY_REMOTE)
5212 {
edbcda09 5213 infrun_log_debug ("quietly stopped");
22bcd14b 5214 stop_waiting (ecs);
5c09a2c5
PA
5215 return;
5216 }
5217
5218 internal_error (__FILE__, __LINE__,
5219 _("unhandled stop_soon: %d"), (int) stop_soon);
c5aa993b 5220
488f131b 5221 case TARGET_WAITKIND_SPURIOUS:
c65d6b55
PA
5222 if (handle_stop_requested (ecs))
5223 return;
00431a78 5224 context_switch (ecs);
64ce06e4 5225 resume (GDB_SIGNAL_0);
488f131b
JB
5226 prepare_to_wait (ecs);
5227 return;
c5aa993b 5228
65706a29 5229 case TARGET_WAITKIND_THREAD_CREATED:
c65d6b55
PA
5230 if (handle_stop_requested (ecs))
5231 return;
00431a78 5232 context_switch (ecs);
65706a29
PA
5233 if (!switch_back_to_stepped_thread (ecs))
5234 keep_going (ecs);
5235 return;
5236
488f131b 5237 case TARGET_WAITKIND_EXITED:
940c3c06 5238 case TARGET_WAITKIND_SIGNALLED:
18493a00
PA
5239 {
5240 /* Depending on the system, ecs->ptid may point to a thread or
5241 to a process. On some targets, target_mourn_inferior may
5242 need to have access to the just-exited thread. That is the
5243 case of GNU/Linux's "checkpoint" support, for example.
5244 Call the switch_to_xxx routine as appropriate. */
5245 thread_info *thr = find_thread_ptid (ecs->target, ecs->ptid);
5246 if (thr != nullptr)
5247 switch_to_thread (thr);
5248 else
5249 {
5250 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5251 switch_to_inferior_no_thread (inf);
5252 }
5253 }
6c95b8df 5254 handle_vfork_child_exec_or_exit (0);
223ffa71 5255 target_terminal::ours (); /* Must do this before mourn anyway. */
488f131b 5256
0c557179
SDJ
5257 /* Clearing any previous state of convenience variables. */
5258 clear_exit_convenience_vars ();
5259
940c3c06
PA
5260 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
5261 {
5262 /* Record the exit code in the convenience variable $_exitcode, so
5263 that the user can inspect this again later. */
5264 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5265 (LONGEST) ecs->ws.value.integer);
5266
5267 /* Also record this in the inferior itself. */
5268 current_inferior ()->has_exit_code = 1;
5269 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
8cf64490 5270
98eb56a4
PA
5271 /* Support the --return-child-result option. */
5272 return_child_result_value = ecs->ws.value.integer;
5273
76727919 5274 gdb::observers::exited.notify (ecs->ws.value.integer);
940c3c06
PA
5275 }
5276 else
0c557179 5277 {
00431a78 5278 struct gdbarch *gdbarch = current_inferior ()->gdbarch;
0c557179
SDJ
5279
5280 if (gdbarch_gdb_signal_to_target_p (gdbarch))
5281 {
5282 /* Set the value of the internal variable $_exitsignal,
5283 which holds the signal uncaught by the inferior. */
5284 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5285 gdbarch_gdb_signal_to_target (gdbarch,
5286 ecs->ws.value.sig));
5287 }
5288 else
5289 {
5290 /* We don't have access to the target's method used for
5291 converting between signal numbers (GDB's internal
5292 representation <-> target's representation).
5293 Therefore, we cannot do a good job at displaying this
5294 information to the user. It's better to just warn
5295 her about it (if infrun debugging is enabled), and
5296 give up. */
edbcda09
SM
5297 infrun_log_debug ("Cannot fill $_exitsignal with the correct "
5298 "signal number.");
0c557179
SDJ
5299 }
5300
76727919 5301 gdb::observers::signal_exited.notify (ecs->ws.value.sig);
0c557179 5302 }
8cf64490 5303
488f131b 5304 gdb_flush (gdb_stdout);
bc1e6c81 5305 target_mourn_inferior (inferior_ptid);
488f131b 5306 stop_print_frame = 0;
22bcd14b 5307 stop_waiting (ecs);
488f131b 5308 return;
c5aa993b 5309
488f131b 5310 case TARGET_WAITKIND_FORKED:
deb3b17b 5311 case TARGET_WAITKIND_VFORKED:
e2d96639
YQ
5312 /* Check whether the inferior is displaced stepping. */
5313 {
00431a78 5314 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
ac7936df 5315 struct gdbarch *gdbarch = regcache->arch ();
e2d96639
YQ
5316
5317 /* If checking displaced stepping is supported, and thread
5318 ecs->ptid is displaced stepping. */
00431a78 5319 if (displaced_step_in_progress_thread (ecs->event_thread))
e2d96639
YQ
5320 {
5321 struct inferior *parent_inf
5b6d1e4f 5322 = find_inferior_ptid (ecs->target, ecs->ptid);
e2d96639
YQ
5323 struct regcache *child_regcache;
5324 CORE_ADDR parent_pc;
5325
d8d83535
SM
5326 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
5327 {
5328 struct displaced_step_inferior_state *displaced
5329 = get_displaced_stepping_state (parent_inf);
5330
5331 /* Restore scratch pad for child process. */
5332 displaced_step_restore (displaced, ecs->ws.value.related_pid);
5333 }
5334
e2d96639
YQ
5335 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5336 indicating that the displaced stepping of syscall instruction
5337 has been done. Perform cleanup for parent process here. Note
5338 that this operation also cleans up the child process for vfork,
5339 because their pages are shared. */
00431a78 5340 displaced_step_fixup (ecs->event_thread, GDB_SIGNAL_TRAP);
c2829269
PA
5341 /* Start a new step-over in another thread if there's one
5342 that needs it. */
5343 start_step_over ();
e2d96639 5344
e2d96639
YQ
5345 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5346 the child's PC is also within the scratchpad. Set the child's PC
5347 to the parent's PC value, which has already been fixed up.
5348 FIXME: we use the parent's aspace here, although we're touching
5349 the child, because the child hasn't been added to the inferior
5350 list yet at this point. */
5351
5352 child_regcache
5b6d1e4f
PA
5353 = get_thread_arch_aspace_regcache (parent_inf->process_target (),
5354 ecs->ws.value.related_pid,
e2d96639
YQ
5355 gdbarch,
5356 parent_inf->aspace);
5357 /* Read PC value of parent process. */
5358 parent_pc = regcache_read_pc (regcache);
5359
5360 if (debug_displaced)
5361 fprintf_unfiltered (gdb_stdlog,
5362 "displaced: write child pc from %s to %s\n",
5363 paddress (gdbarch,
5364 regcache_read_pc (child_regcache)),
5365 paddress (gdbarch, parent_pc));
5366
5367 regcache_write_pc (child_regcache, parent_pc);
5368 }
5369 }
5370
00431a78 5371 context_switch (ecs);
5a2901d9 5372
b242c3c2
PA
5373 /* Immediately detach breakpoints from the child before there's
5374 any chance of letting the user delete breakpoints from the
5375 breakpoint lists. If we don't do this early, it's easy to
5376 leave left over traps in the child, vis: "break foo; catch
5377 fork; c; <fork>; del; c; <child calls foo>". We only follow
5378 the fork on the last `continue', and by that time the
5379 breakpoint at "foo" is long gone from the breakpoint table.
5380 If we vforked, then we don't need to unpatch here, since both
5381 parent and child are sharing the same memory pages; we'll
5382 need to unpatch at follow/detach time instead to be certain
5383 that new breakpoints added between catchpoint hit time and
5384 vfork follow are detached. */
5385 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
5386 {
b242c3c2
PA
5387 /* This won't actually modify the breakpoint list, but will
5388 physically remove the breakpoints from the child. */
d80ee84f 5389 detach_breakpoints (ecs->ws.value.related_pid);
b242c3c2
PA
5390 }
5391
34b7e8a6 5392 delete_just_stopped_threads_single_step_breakpoints ();
d03285ec 5393
e58b0e63
PA
5394 /* In case the event is caught by a catchpoint, remember that
5395 the event is to be followed at the next resume of the thread,
5396 and not immediately. */
5397 ecs->event_thread->pending_follow = ecs->ws;
5398
f2ffa92b
PA
5399 ecs->event_thread->suspend.stop_pc
5400 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
675bf4cb 5401
16c381f0 5402 ecs->event_thread->control.stop_bpstat
a01bda52 5403 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5404 ecs->event_thread->suspend.stop_pc,
5405 ecs->event_thread, &ecs->ws);
675bf4cb 5406
c65d6b55
PA
5407 if (handle_stop_requested (ecs))
5408 return;
5409
ce12b012
PA
5410 /* If no catchpoint triggered for this, then keep going. Note
5411 that we're interested in knowing the bpstat actually causes a
5412 stop, not just if it may explain the signal. Software
5413 watchpoints, for example, always appear in the bpstat. */
5414 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5415 {
5ab2fbf1 5416 bool follow_child
3e43a32a 5417 = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63 5418
a493e3e2 5419 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
e58b0e63 5420
5b6d1e4f
PA
5421 process_stratum_target *targ
5422 = ecs->event_thread->inf->process_target ();
5423
5ab2fbf1 5424 bool should_resume = follow_fork ();
e58b0e63 5425
5b6d1e4f
PA
5426 /* Note that one of these may be an invalid pointer,
5427 depending on detach_fork. */
00431a78 5428 thread_info *parent = ecs->event_thread;
5b6d1e4f
PA
5429 thread_info *child
5430 = find_thread_ptid (targ, ecs->ws.value.related_pid);
6c95b8df 5431
a2077e25
PA
5432 /* At this point, the parent is marked running, and the
5433 child is marked stopped. */
5434
5435 /* If not resuming the parent, mark it stopped. */
5436 if (follow_child && !detach_fork && !non_stop && !sched_multi)
00431a78 5437 parent->set_running (false);
a2077e25
PA
5438
5439 /* If resuming the child, mark it running. */
5440 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
00431a78 5441 child->set_running (true);
a2077e25 5442
6c95b8df 5443 /* In non-stop mode, also resume the other branch. */
fbea99ea
PA
5444 if (!detach_fork && (non_stop
5445 || (sched_multi && target_is_non_stop_p ())))
6c95b8df
PA
5446 {
5447 if (follow_child)
5448 switch_to_thread (parent);
5449 else
5450 switch_to_thread (child);
5451
5452 ecs->event_thread = inferior_thread ();
5453 ecs->ptid = inferior_ptid;
5454 keep_going (ecs);
5455 }
5456
5457 if (follow_child)
5458 switch_to_thread (child);
5459 else
5460 switch_to_thread (parent);
5461
e58b0e63
PA
5462 ecs->event_thread = inferior_thread ();
5463 ecs->ptid = inferior_ptid;
5464
5465 if (should_resume)
5466 keep_going (ecs);
5467 else
22bcd14b 5468 stop_waiting (ecs);
04e68871
DJ
5469 return;
5470 }
94c57d6a
PA
5471 process_event_stop_test (ecs);
5472 return;
488f131b 5473
6c95b8df
PA
5474 case TARGET_WAITKIND_VFORK_DONE:
5475 /* Done with the shared memory region. Re-insert breakpoints in
5476 the parent, and keep going. */
5477
00431a78 5478 context_switch (ecs);
6c95b8df
PA
5479
5480 current_inferior ()->waiting_for_vfork_done = 0;
56710373 5481 current_inferior ()->pspace->breakpoints_not_allowed = 0;
c65d6b55
PA
5482
5483 if (handle_stop_requested (ecs))
5484 return;
5485
6c95b8df
PA
5486 /* This also takes care of reinserting breakpoints in the
5487 previously locked inferior. */
5488 keep_going (ecs);
5489 return;
5490
488f131b 5491 case TARGET_WAITKIND_EXECD:
488f131b 5492
cbd2b4e3
PA
5493 /* Note we can't read registers yet (the stop_pc), because we
5494 don't yet know the inferior's post-exec architecture.
5495 'stop_pc' is explicitly read below instead. */
00431a78 5496 switch_to_thread_no_regs (ecs->event_thread);
5a2901d9 5497
6c95b8df
PA
5498 /* Do whatever is necessary to the parent branch of the vfork. */
5499 handle_vfork_child_exec_or_exit (1);
5500
795e548f
PA
5501 /* This causes the eventpoints and symbol table to be reset.
5502 Must do this now, before trying to determine whether to
5503 stop. */
71b43ef8 5504 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
795e548f 5505
17d8546e
DB
5506 /* In follow_exec we may have deleted the original thread and
5507 created a new one. Make sure that the event thread is the
5508 execd thread for that case (this is a nop otherwise). */
5509 ecs->event_thread = inferior_thread ();
5510
f2ffa92b
PA
5511 ecs->event_thread->suspend.stop_pc
5512 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
ecdc3a72 5513
16c381f0 5514 ecs->event_thread->control.stop_bpstat
a01bda52 5515 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5516 ecs->event_thread->suspend.stop_pc,
5517 ecs->event_thread, &ecs->ws);
795e548f 5518
71b43ef8
PA
5519 /* Note that this may be referenced from inside
5520 bpstat_stop_status above, through inferior_has_execd. */
5521 xfree (ecs->ws.value.execd_pathname);
5522 ecs->ws.value.execd_pathname = NULL;
5523
c65d6b55
PA
5524 if (handle_stop_requested (ecs))
5525 return;
5526
04e68871 5527 /* If no catchpoint triggered for this, then keep going. */
ce12b012 5528 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5529 {
a493e3e2 5530 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
04e68871
DJ
5531 keep_going (ecs);
5532 return;
5533 }
94c57d6a
PA
5534 process_event_stop_test (ecs);
5535 return;
488f131b 5536
b4dc5ffa
MK
5537 /* Be careful not to try to gather much state about a thread
5538 that's in a syscall. It's frequently a losing proposition. */
488f131b 5539 case TARGET_WAITKIND_SYSCALL_ENTRY:
1777feb0 5540 /* Getting the current syscall number. */
94c57d6a
PA
5541 if (handle_syscall_event (ecs) == 0)
5542 process_event_stop_test (ecs);
5543 return;
c906108c 5544
488f131b
JB
5545 /* Before examining the threads further, step this thread to
5546 get it entirely out of the syscall. (We get notice of the
5547 event when the thread is just on the verge of exiting a
5548 syscall. Stepping one instruction seems to get it back
b4dc5ffa 5549 into user code.) */
488f131b 5550 case TARGET_WAITKIND_SYSCALL_RETURN:
94c57d6a
PA
5551 if (handle_syscall_event (ecs) == 0)
5552 process_event_stop_test (ecs);
5553 return;
c906108c 5554
488f131b 5555 case TARGET_WAITKIND_STOPPED:
4f5d7f63
PA
5556 handle_signal_stop (ecs);
5557 return;
c906108c 5558
b2175913
MS
5559 case TARGET_WAITKIND_NO_HISTORY:
5560 /* Reverse execution: target ran out of history info. */
eab402df 5561
d1988021 5562 /* Switch to the stopped thread. */
00431a78 5563 context_switch (ecs);
edbcda09 5564 infrun_log_debug ("stopped");
d1988021 5565
34b7e8a6 5566 delete_just_stopped_threads_single_step_breakpoints ();
f2ffa92b
PA
5567 ecs->event_thread->suspend.stop_pc
5568 = regcache_read_pc (get_thread_regcache (inferior_thread ()));
c65d6b55
PA
5569
5570 if (handle_stop_requested (ecs))
5571 return;
5572
76727919 5573 gdb::observers::no_history.notify ();
22bcd14b 5574 stop_waiting (ecs);
b2175913 5575 return;
488f131b 5576 }
4f5d7f63
PA
5577}
5578
372316f1
PA
5579/* Restart threads back to what they were trying to do back when we
5580 paused them for an in-line step-over. The EVENT_THREAD thread is
5581 ignored. */
4d9d9d04
PA
5582
5583static void
372316f1
PA
5584restart_threads (struct thread_info *event_thread)
5585{
372316f1
PA
5586 /* In case the instruction just stepped spawned a new thread. */
5587 update_thread_list ();
5588
08036331 5589 for (thread_info *tp : all_non_exited_threads ())
372316f1 5590 {
f3f8ece4
PA
5591 switch_to_thread_no_regs (tp);
5592
372316f1
PA
5593 if (tp == event_thread)
5594 {
edbcda09
SM
5595 infrun_log_debug ("restart threads: [%s] is event thread",
5596 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5597 continue;
5598 }
5599
5600 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5601 {
edbcda09
SM
5602 infrun_log_debug ("restart threads: [%s] not meant to be running",
5603 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5604 continue;
5605 }
5606
5607 if (tp->resumed)
5608 {
edbcda09
SM
5609 infrun_log_debug ("restart threads: [%s] resumed",
5610 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5611 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
5612 continue;
5613 }
5614
5615 if (thread_is_in_step_over_chain (tp))
5616 {
edbcda09
SM
5617 infrun_log_debug ("restart threads: [%s] needs step-over",
5618 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5619 gdb_assert (!tp->resumed);
5620 continue;
5621 }
5622
5623
5624 if (tp->suspend.waitstatus_pending_p)
5625 {
edbcda09
SM
5626 infrun_log_debug ("restart threads: [%s] has pending status",
5627 target_pid_to_str (tp->ptid).c_str ());
719546c4 5628 tp->resumed = true;
372316f1
PA
5629 continue;
5630 }
5631
c65d6b55
PA
5632 gdb_assert (!tp->stop_requested);
5633
372316f1
PA
5634 /* If some thread needs to start a step-over at this point, it
5635 should still be in the step-over queue, and thus skipped
5636 above. */
5637 if (thread_still_needs_step_over (tp))
5638 {
5639 internal_error (__FILE__, __LINE__,
5640 "thread [%s] needs a step-over, but not in "
5641 "step-over queue\n",
a068643d 5642 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5643 }
5644
5645 if (currently_stepping (tp))
5646 {
edbcda09
SM
5647 infrun_log_debug ("restart threads: [%s] was stepping",
5648 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5649 keep_going_stepped_thread (tp);
5650 }
5651 else
5652 {
5653 struct execution_control_state ecss;
5654 struct execution_control_state *ecs = &ecss;
5655
edbcda09
SM
5656 infrun_log_debug ("restart threads: [%s] continuing",
5657 target_pid_to_str (tp->ptid).c_str ());
372316f1 5658 reset_ecs (ecs, tp);
00431a78 5659 switch_to_thread (tp);
372316f1
PA
5660 keep_going_pass_signal (ecs);
5661 }
5662 }
5663}
5664
5665/* Callback for iterate_over_threads. Find a resumed thread that has
5666 a pending waitstatus. */
5667
5668static int
5669resumed_thread_with_pending_status (struct thread_info *tp,
5670 void *arg)
5671{
5672 return (tp->resumed
5673 && tp->suspend.waitstatus_pending_p);
5674}
5675
5676/* Called when we get an event that may finish an in-line or
5677 out-of-line (displaced stepping) step-over started previously.
5678 Return true if the event is processed and we should go back to the
5679 event loop; false if the caller should continue processing the
5680 event. */
5681
5682static int
4d9d9d04
PA
5683finish_step_over (struct execution_control_state *ecs)
5684{
372316f1
PA
5685 int had_step_over_info;
5686
00431a78 5687 displaced_step_fixup (ecs->event_thread,
4d9d9d04
PA
5688 ecs->event_thread->suspend.stop_signal);
5689
372316f1
PA
5690 had_step_over_info = step_over_info_valid_p ();
5691
5692 if (had_step_over_info)
4d9d9d04
PA
5693 {
5694 /* If we're stepping over a breakpoint with all threads locked,
5695 then only the thread that was stepped should be reporting
5696 back an event. */
5697 gdb_assert (ecs->event_thread->control.trap_expected);
5698
c65d6b55 5699 clear_step_over_info ();
4d9d9d04
PA
5700 }
5701
fbea99ea 5702 if (!target_is_non_stop_p ())
372316f1 5703 return 0;
4d9d9d04
PA
5704
5705 /* Start a new step-over in another thread if there's one that
5706 needs it. */
5707 start_step_over ();
372316f1
PA
5708
5709 /* If we were stepping over a breakpoint before, and haven't started
5710 a new in-line step-over sequence, then restart all other threads
5711 (except the event thread). We can't do this in all-stop, as then
5712 e.g., we wouldn't be able to issue any other remote packet until
5713 these other threads stop. */
5714 if (had_step_over_info && !step_over_info_valid_p ())
5715 {
5716 struct thread_info *pending;
5717
5718 /* If we only have threads with pending statuses, the restart
5719 below won't restart any thread and so nothing re-inserts the
5720 breakpoint we just stepped over. But we need it inserted
5721 when we later process the pending events, otherwise if
5722 another thread has a pending event for this breakpoint too,
5723 we'd discard its event (because the breakpoint that
5724 originally caused the event was no longer inserted). */
00431a78 5725 context_switch (ecs);
372316f1
PA
5726 insert_breakpoints ();
5727
5728 restart_threads (ecs->event_thread);
5729
5730 /* If we have events pending, go through handle_inferior_event
5731 again, picking up a pending event at random. This avoids
5732 thread starvation. */
5733
5734 /* But not if we just stepped over a watchpoint in order to let
5735 the instruction execute so we can evaluate its expression.
5736 The set of watchpoints that triggered is recorded in the
5737 breakpoint objects themselves (see bp->watchpoint_triggered).
5738 If we processed another event first, that other event could
5739 clobber this info. */
5740 if (ecs->event_thread->stepping_over_watchpoint)
5741 return 0;
5742
5743 pending = iterate_over_threads (resumed_thread_with_pending_status,
5744 NULL);
5745 if (pending != NULL)
5746 {
5747 struct thread_info *tp = ecs->event_thread;
5748 struct regcache *regcache;
5749
edbcda09
SM
5750 infrun_log_debug ("found resumed threads with "
5751 "pending events, saving status");
372316f1
PA
5752
5753 gdb_assert (pending != tp);
5754
5755 /* Record the event thread's event for later. */
5756 save_waitstatus (tp, &ecs->ws);
5757 /* This was cleared early, by handle_inferior_event. Set it
5758 so this pending event is considered by
5759 do_target_wait. */
719546c4 5760 tp->resumed = true;
372316f1
PA
5761
5762 gdb_assert (!tp->executing);
5763
00431a78 5764 regcache = get_thread_regcache (tp);
372316f1
PA
5765 tp->suspend.stop_pc = regcache_read_pc (regcache);
5766
edbcda09
SM
5767 infrun_log_debug ("saved stop_pc=%s for %s "
5768 "(currently_stepping=%d)\n",
5769 paddress (target_gdbarch (),
5770 tp->suspend.stop_pc),
5771 target_pid_to_str (tp->ptid).c_str (),
5772 currently_stepping (tp));
372316f1
PA
5773
5774 /* This in-line step-over finished; clear this so we won't
5775 start a new one. This is what handle_signal_stop would
5776 do, if we returned false. */
5777 tp->stepping_over_breakpoint = 0;
5778
5779 /* Wake up the event loop again. */
5780 mark_async_event_handler (infrun_async_inferior_event_token);
5781
5782 prepare_to_wait (ecs);
5783 return 1;
5784 }
5785 }
5786
5787 return 0;
4d9d9d04
PA
5788}
5789
4f5d7f63
PA
5790/* Come here when the program has stopped with a signal. */
5791
5792static void
5793handle_signal_stop (struct execution_control_state *ecs)
5794{
5795 struct frame_info *frame;
5796 struct gdbarch *gdbarch;
5797 int stopped_by_watchpoint;
5798 enum stop_kind stop_soon;
5799 int random_signal;
c906108c 5800
f0407826
DE
5801 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
5802
c65d6b55
PA
5803 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
5804
f0407826
DE
5805 /* Do we need to clean up the state of a thread that has
5806 completed a displaced single-step? (Doing so usually affects
5807 the PC, so do it here, before we set stop_pc.) */
372316f1
PA
5808 if (finish_step_over (ecs))
5809 return;
f0407826
DE
5810
5811 /* If we either finished a single-step or hit a breakpoint, but
5812 the user wanted this thread to be stopped, pretend we got a
5813 SIG0 (generic unsignaled stop). */
5814 if (ecs->event_thread->stop_requested
5815 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
5816 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
237fc4c9 5817
f2ffa92b
PA
5818 ecs->event_thread->suspend.stop_pc
5819 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
488f131b 5820
527159b7 5821 if (debug_infrun)
237fc4c9 5822 {
00431a78 5823 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
b926417a 5824 struct gdbarch *reg_gdbarch = regcache->arch ();
7f82dfc7 5825
f3f8ece4 5826 switch_to_thread (ecs->event_thread);
5af949e3 5827
edbcda09
SM
5828 infrun_log_debug ("stop_pc=%s",
5829 paddress (reg_gdbarch,
5830 ecs->event_thread->suspend.stop_pc));
d92524f1 5831 if (target_stopped_by_watchpoint ())
237fc4c9
PA
5832 {
5833 CORE_ADDR addr;
abbb1732 5834
edbcda09 5835 infrun_log_debug ("stopped by watchpoint");
237fc4c9 5836
8b88a78e 5837 if (target_stopped_data_address (current_top_target (), &addr))
edbcda09
SM
5838 infrun_log_debug ("stopped data address=%s",
5839 paddress (reg_gdbarch, addr));
237fc4c9 5840 else
edbcda09 5841 infrun_log_debug ("(no data address available)");
237fc4c9
PA
5842 }
5843 }
527159b7 5844
36fa8042
PA
5845 /* This is originated from start_remote(), start_inferior() and
5846 shared libraries hook functions. */
00431a78 5847 stop_soon = get_inferior_stop_soon (ecs);
36fa8042
PA
5848 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
5849 {
00431a78 5850 context_switch (ecs);
edbcda09 5851 infrun_log_debug ("quietly stopped");
36fa8042 5852 stop_print_frame = 1;
22bcd14b 5853 stop_waiting (ecs);
36fa8042
PA
5854 return;
5855 }
5856
36fa8042
PA
5857 /* This originates from attach_command(). We need to overwrite
5858 the stop_signal here, because some kernels don't ignore a
5859 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
5860 See more comments in inferior.h. On the other hand, if we
5861 get a non-SIGSTOP, report it to the user - assume the backend
5862 will handle the SIGSTOP if it should show up later.
5863
5864 Also consider that the attach is complete when we see a
5865 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
5866 target extended-remote report it instead of a SIGSTOP
5867 (e.g. gdbserver). We already rely on SIGTRAP being our
5868 signal, so this is no exception.
5869
5870 Also consider that the attach is complete when we see a
5871 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
5872 the target to stop all threads of the inferior, in case the
5873 low level attach operation doesn't stop them implicitly. If
5874 they weren't stopped implicitly, then the stub will report a
5875 GDB_SIGNAL_0, meaning: stopped for no particular reason
5876 other than GDB's request. */
5877 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5878 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
5879 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5880 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
5881 {
5882 stop_print_frame = 1;
22bcd14b 5883 stop_waiting (ecs);
36fa8042
PA
5884 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5885 return;
5886 }
5887
488f131b 5888 /* See if something interesting happened to the non-current thread. If
b40c7d58 5889 so, then switch to that thread. */
d7e15655 5890 if (ecs->ptid != inferior_ptid)
488f131b 5891 {
edbcda09 5892 infrun_log_debug ("context switch");
527159b7 5893
00431a78 5894 context_switch (ecs);
c5aa993b 5895
9a4105ab 5896 if (deprecated_context_hook)
00431a78 5897 deprecated_context_hook (ecs->event_thread->global_num);
488f131b 5898 }
c906108c 5899
568d6575
UW
5900 /* At this point, get hold of the now-current thread's frame. */
5901 frame = get_current_frame ();
5902 gdbarch = get_frame_arch (frame);
5903
2adfaa28 5904 /* Pull the single step breakpoints out of the target. */
af48d08f 5905 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
488f131b 5906 {
af48d08f 5907 struct regcache *regcache;
af48d08f 5908 CORE_ADDR pc;
2adfaa28 5909
00431a78 5910 regcache = get_thread_regcache (ecs->event_thread);
8b86c959
YQ
5911 const address_space *aspace = regcache->aspace ();
5912
af48d08f 5913 pc = regcache_read_pc (regcache);
34b7e8a6 5914
af48d08f
PA
5915 /* However, before doing so, if this single-step breakpoint was
5916 actually for another thread, set this thread up for moving
5917 past it. */
5918 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
5919 aspace, pc))
5920 {
5921 if (single_step_breakpoint_inserted_here_p (aspace, pc))
2adfaa28 5922 {
edbcda09
SM
5923 infrun_log_debug ("[%s] hit another thread's single-step "
5924 "breakpoint",
5925 target_pid_to_str (ecs->ptid).c_str ());
af48d08f
PA
5926 ecs->hit_singlestep_breakpoint = 1;
5927 }
5928 }
5929 else
5930 {
edbcda09
SM
5931 infrun_log_debug ("[%s] hit its single-step breakpoint",
5932 target_pid_to_str (ecs->ptid).c_str ());
2adfaa28 5933 }
488f131b 5934 }
af48d08f 5935 delete_just_stopped_threads_single_step_breakpoints ();
c906108c 5936
963f9c80
PA
5937 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5938 && ecs->event_thread->control.trap_expected
5939 && ecs->event_thread->stepping_over_watchpoint)
d983da9c
DJ
5940 stopped_by_watchpoint = 0;
5941 else
5942 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
5943
5944 /* If necessary, step over this watchpoint. We'll be back to display
5945 it in a moment. */
5946 if (stopped_by_watchpoint
d92524f1 5947 && (target_have_steppable_watchpoint
568d6575 5948 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
488f131b 5949 {
488f131b
JB
5950 /* At this point, we are stopped at an instruction which has
5951 attempted to write to a piece of memory under control of
5952 a watchpoint. The instruction hasn't actually executed
5953 yet. If we were to evaluate the watchpoint expression
5954 now, we would get the old value, and therefore no change
5955 would seem to have occurred.
5956
5957 In order to make watchpoints work `right', we really need
5958 to complete the memory write, and then evaluate the
d983da9c
DJ
5959 watchpoint expression. We do this by single-stepping the
5960 target.
5961
7f89fd65 5962 It may not be necessary to disable the watchpoint to step over
d983da9c
DJ
5963 it. For example, the PA can (with some kernel cooperation)
5964 single step over a watchpoint without disabling the watchpoint.
5965
5966 It is far more common to need to disable a watchpoint to step
5967 the inferior over it. If we have non-steppable watchpoints,
5968 we must disable the current watchpoint; it's simplest to
963f9c80
PA
5969 disable all watchpoints.
5970
5971 Any breakpoint at PC must also be stepped over -- if there's
5972 one, it will have already triggered before the watchpoint
5973 triggered, and we either already reported it to the user, or
5974 it didn't cause a stop and we called keep_going. In either
5975 case, if there was a breakpoint at PC, we must be trying to
5976 step past it. */
5977 ecs->event_thread->stepping_over_watchpoint = 1;
5978 keep_going (ecs);
488f131b
JB
5979 return;
5980 }
5981
4e1c45ea 5982 ecs->event_thread->stepping_over_breakpoint = 0;
963f9c80 5983 ecs->event_thread->stepping_over_watchpoint = 0;
16c381f0
JK
5984 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
5985 ecs->event_thread->control.stop_step = 0;
488f131b 5986 stop_print_frame = 1;
488f131b 5987 stopped_by_random_signal = 0;
ddfe970e 5988 bpstat stop_chain = NULL;
488f131b 5989
edb3359d
DJ
5990 /* Hide inlined functions starting here, unless we just performed stepi or
5991 nexti. After stepi and nexti, always show the innermost frame (not any
5992 inline function call sites). */
16c381f0 5993 if (ecs->event_thread->control.step_range_end != 1)
0574c78f 5994 {
00431a78
PA
5995 const address_space *aspace
5996 = get_thread_regcache (ecs->event_thread)->aspace ();
0574c78f
GB
5997
5998 /* skip_inline_frames is expensive, so we avoid it if we can
5999 determine that the address is one where functions cannot have
6000 been inlined. This improves performance with inferiors that
6001 load a lot of shared libraries, because the solib event
6002 breakpoint is defined as the address of a function (i.e. not
6003 inline). Note that we have to check the previous PC as well
6004 as the current one to catch cases when we have just
6005 single-stepped off a breakpoint prior to reinstating it.
6006 Note that we're assuming that the code we single-step to is
6007 not inline, but that's not definitive: there's nothing
6008 preventing the event breakpoint function from containing
6009 inlined code, and the single-step ending up there. If the
6010 user had set a breakpoint on that inlined code, the missing
6011 skip_inline_frames call would break things. Fortunately
6012 that's an extremely unlikely scenario. */
f2ffa92b
PA
6013 if (!pc_at_non_inline_function (aspace,
6014 ecs->event_thread->suspend.stop_pc,
6015 &ecs->ws)
a210c238
MR
6016 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6017 && ecs->event_thread->control.trap_expected
6018 && pc_at_non_inline_function (aspace,
6019 ecs->event_thread->prev_pc,
09ac7c10 6020 &ecs->ws)))
1c5a993e 6021 {
f2ffa92b
PA
6022 stop_chain = build_bpstat_chain (aspace,
6023 ecs->event_thread->suspend.stop_pc,
6024 &ecs->ws);
00431a78 6025 skip_inline_frames (ecs->event_thread, stop_chain);
1c5a993e
MR
6026
6027 /* Re-fetch current thread's frame in case that invalidated
6028 the frame cache. */
6029 frame = get_current_frame ();
6030 gdbarch = get_frame_arch (frame);
6031 }
0574c78f 6032 }
edb3359d 6033
a493e3e2 6034 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
16c381f0 6035 && ecs->event_thread->control.trap_expected
568d6575 6036 && gdbarch_single_step_through_delay_p (gdbarch)
4e1c45ea 6037 && currently_stepping (ecs->event_thread))
3352ef37 6038 {
b50d7442 6039 /* We're trying to step off a breakpoint. Turns out that we're
3352ef37 6040 also on an instruction that needs to be stepped multiple
1777feb0 6041 times before it's been fully executing. E.g., architectures
3352ef37
AC
6042 with a delay slot. It needs to be stepped twice, once for
6043 the instruction and once for the delay slot. */
6044 int step_through_delay
568d6575 6045 = gdbarch_single_step_through_delay (gdbarch, frame);
abbb1732 6046
edbcda09
SM
6047 if (step_through_delay)
6048 infrun_log_debug ("step through delay");
6049
16c381f0
JK
6050 if (ecs->event_thread->control.step_range_end == 0
6051 && step_through_delay)
3352ef37
AC
6052 {
6053 /* The user issued a continue when stopped at a breakpoint.
6054 Set up for another trap and get out of here. */
4e1c45ea 6055 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
6056 keep_going (ecs);
6057 return;
6058 }
6059 else if (step_through_delay)
6060 {
6061 /* The user issued a step when stopped at a breakpoint.
6062 Maybe we should stop, maybe we should not - the delay
6063 slot *might* correspond to a line of source. In any
ca67fcb8
VP
6064 case, don't decide that here, just set
6065 ecs->stepping_over_breakpoint, making sure we
6066 single-step again before breakpoints are re-inserted. */
4e1c45ea 6067 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
6068 }
6069 }
6070
ab04a2af
TT
6071 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6072 handles this event. */
6073 ecs->event_thread->control.stop_bpstat
a01bda52 6074 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
6075 ecs->event_thread->suspend.stop_pc,
6076 ecs->event_thread, &ecs->ws, stop_chain);
db82e815 6077
ab04a2af
TT
6078 /* Following in case break condition called a
6079 function. */
6080 stop_print_frame = 1;
73dd234f 6081
ab04a2af
TT
6082 /* This is where we handle "moribund" watchpoints. Unlike
6083 software breakpoints traps, hardware watchpoint traps are
6084 always distinguishable from random traps. If no high-level
6085 watchpoint is associated with the reported stop data address
6086 anymore, then the bpstat does not explain the signal ---
6087 simply make sure to ignore it if `stopped_by_watchpoint' is
6088 set. */
6089
edbcda09 6090 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
47591c29 6091 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
427cd150 6092 GDB_SIGNAL_TRAP)
ab04a2af 6093 && stopped_by_watchpoint)
edbcda09
SM
6094 {
6095 infrun_log_debug ("no user watchpoint explains watchpoint SIGTRAP, "
6096 "ignoring");
6097 }
73dd234f 6098
bac7d97b 6099 /* NOTE: cagney/2003-03-29: These checks for a random signal
ab04a2af
TT
6100 at one stage in the past included checks for an inferior
6101 function call's call dummy's return breakpoint. The original
6102 comment, that went with the test, read:
03cebad2 6103
ab04a2af
TT
6104 ``End of a stack dummy. Some systems (e.g. Sony news) give
6105 another signal besides SIGTRAP, so check here as well as
6106 above.''
73dd234f 6107
ab04a2af
TT
6108 If someone ever tries to get call dummys on a
6109 non-executable stack to work (where the target would stop
6110 with something like a SIGSEGV), then those tests might need
6111 to be re-instated. Given, however, that the tests were only
6112 enabled when momentary breakpoints were not being used, I
6113 suspect that it won't be the case.
488f131b 6114
ab04a2af
TT
6115 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6116 be necessary for call dummies on a non-executable stack on
6117 SPARC. */
488f131b 6118
bac7d97b 6119 /* See if the breakpoints module can explain the signal. */
47591c29
PA
6120 random_signal
6121 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6122 ecs->event_thread->suspend.stop_signal);
bac7d97b 6123
1cf4d951
PA
6124 /* Maybe this was a trap for a software breakpoint that has since
6125 been removed. */
6126 if (random_signal && target_stopped_by_sw_breakpoint ())
6127 {
5133a315
LM
6128 if (gdbarch_program_breakpoint_here_p (gdbarch,
6129 ecs->event_thread->suspend.stop_pc))
1cf4d951
PA
6130 {
6131 struct regcache *regcache;
6132 int decr_pc;
6133
6134 /* Re-adjust PC to what the program would see if GDB was not
6135 debugging it. */
00431a78 6136 regcache = get_thread_regcache (ecs->event_thread);
527a273a 6137 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
1cf4d951
PA
6138 if (decr_pc != 0)
6139 {
07036511
TT
6140 gdb::optional<scoped_restore_tmpl<int>>
6141 restore_operation_disable;
1cf4d951
PA
6142
6143 if (record_full_is_used ())
07036511
TT
6144 restore_operation_disable.emplace
6145 (record_full_gdb_operation_disable_set ());
1cf4d951 6146
f2ffa92b
PA
6147 regcache_write_pc (regcache,
6148 ecs->event_thread->suspend.stop_pc + decr_pc);
1cf4d951
PA
6149 }
6150 }
6151 else
6152 {
6153 /* A delayed software breakpoint event. Ignore the trap. */
edbcda09 6154 infrun_log_debug ("delayed software breakpoint trap, ignoring");
1cf4d951
PA
6155 random_signal = 0;
6156 }
6157 }
6158
6159 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6160 has since been removed. */
6161 if (random_signal && target_stopped_by_hw_breakpoint ())
6162 {
6163 /* A delayed hardware breakpoint event. Ignore the trap. */
edbcda09
SM
6164 infrun_log_debug ("delayed hardware breakpoint/watchpoint "
6165 "trap, ignoring");
1cf4d951
PA
6166 random_signal = 0;
6167 }
6168
bac7d97b
PA
6169 /* If not, perhaps stepping/nexting can. */
6170 if (random_signal)
6171 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6172 && currently_stepping (ecs->event_thread));
ab04a2af 6173
2adfaa28
PA
6174 /* Perhaps the thread hit a single-step breakpoint of _another_
6175 thread. Single-step breakpoints are transparent to the
6176 breakpoints module. */
6177 if (random_signal)
6178 random_signal = !ecs->hit_singlestep_breakpoint;
6179
bac7d97b
PA
6180 /* No? Perhaps we got a moribund watchpoint. */
6181 if (random_signal)
6182 random_signal = !stopped_by_watchpoint;
ab04a2af 6183
c65d6b55
PA
6184 /* Always stop if the user explicitly requested this thread to
6185 remain stopped. */
6186 if (ecs->event_thread->stop_requested)
6187 {
6188 random_signal = 1;
edbcda09 6189 infrun_log_debug ("user-requested stop");
c65d6b55
PA
6190 }
6191
488f131b
JB
6192 /* For the program's own signals, act according to
6193 the signal handling tables. */
6194
ce12b012 6195 if (random_signal)
488f131b
JB
6196 {
6197 /* Signal not for debugging purposes. */
5b6d1e4f 6198 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
c9737c08 6199 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
488f131b 6200
edbcda09
SM
6201 infrun_log_debug ("random signal (%s)",
6202 gdb_signal_to_symbol_string (stop_signal));
527159b7 6203
488f131b
JB
6204 stopped_by_random_signal = 1;
6205
252fbfc8
PA
6206 /* Always stop on signals if we're either just gaining control
6207 of the program, or the user explicitly requested this thread
6208 to remain stopped. */
d6b48e9c 6209 if (stop_soon != NO_STOP_QUIETLY
252fbfc8 6210 || ecs->event_thread->stop_requested
24291992 6211 || (!inf->detaching
16c381f0 6212 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
488f131b 6213 {
22bcd14b 6214 stop_waiting (ecs);
488f131b
JB
6215 return;
6216 }
b57bacec
PA
6217
6218 /* Notify observers the signal has "handle print" set. Note we
6219 returned early above if stopping; normal_stop handles the
6220 printing in that case. */
6221 if (signal_print[ecs->event_thread->suspend.stop_signal])
6222 {
6223 /* The signal table tells us to print about this signal. */
223ffa71 6224 target_terminal::ours_for_output ();
76727919 6225 gdb::observers::signal_received.notify (ecs->event_thread->suspend.stop_signal);
223ffa71 6226 target_terminal::inferior ();
b57bacec 6227 }
488f131b
JB
6228
6229 /* Clear the signal if it should not be passed. */
16c381f0 6230 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
a493e3e2 6231 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
488f131b 6232
f2ffa92b 6233 if (ecs->event_thread->prev_pc == ecs->event_thread->suspend.stop_pc
16c381f0 6234 && ecs->event_thread->control.trap_expected
8358c15c 6235 && ecs->event_thread->control.step_resume_breakpoint == NULL)
68f53502
AC
6236 {
6237 /* We were just starting a new sequence, attempting to
6238 single-step off of a breakpoint and expecting a SIGTRAP.
237fc4c9 6239 Instead this signal arrives. This signal will take us out
68f53502
AC
6240 of the stepping range so GDB needs to remember to, when
6241 the signal handler returns, resume stepping off that
6242 breakpoint. */
6243 /* To simplify things, "continue" is forced to use the same
6244 code paths as single-step - set a breakpoint at the
6245 signal return address and then, once hit, step off that
6246 breakpoint. */
edbcda09 6247 infrun_log_debug ("signal arrived while stepping over breakpoint");
d3169d93 6248
2c03e5be 6249 insert_hp_step_resume_breakpoint_at_frame (frame);
4e1c45ea 6250 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6251 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6252 ecs->event_thread->control.trap_expected = 0;
d137e6dc
PA
6253
6254 /* If we were nexting/stepping some other thread, switch to
6255 it, so that we don't continue it, losing control. */
6256 if (!switch_back_to_stepped_thread (ecs))
6257 keep_going (ecs);
9d799f85 6258 return;
68f53502 6259 }
9d799f85 6260
e5f8a7cc 6261 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
f2ffa92b
PA
6262 && (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6263 ecs->event_thread)
e5f8a7cc 6264 || ecs->event_thread->control.step_range_end == 1)
edb3359d 6265 && frame_id_eq (get_stack_frame_id (frame),
16c381f0 6266 ecs->event_thread->control.step_stack_frame_id)
8358c15c 6267 && ecs->event_thread->control.step_resume_breakpoint == NULL)
d303a6c7
AC
6268 {
6269 /* The inferior is about to take a signal that will take it
6270 out of the single step range. Set a breakpoint at the
6271 current PC (which is presumably where the signal handler
6272 will eventually return) and then allow the inferior to
6273 run free.
6274
6275 Note that this is only needed for a signal delivered
6276 while in the single-step range. Nested signals aren't a
6277 problem as they eventually all return. */
edbcda09 6278 infrun_log_debug ("signal may take us out of single-step range");
237fc4c9 6279
372316f1 6280 clear_step_over_info ();
2c03e5be 6281 insert_hp_step_resume_breakpoint_at_frame (frame);
e5f8a7cc 6282 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6283 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6284 ecs->event_thread->control.trap_expected = 0;
9d799f85
AC
6285 keep_going (ecs);
6286 return;
d303a6c7 6287 }
9d799f85 6288
85102364 6289 /* Note: step_resume_breakpoint may be non-NULL. This occurs
9d799f85
AC
6290 when either there's a nested signal, or when there's a
6291 pending signal enabled just as the signal handler returns
6292 (leaving the inferior at the step-resume-breakpoint without
6293 actually executing it). Either way continue until the
6294 breakpoint is really hit. */
c447ac0b
PA
6295
6296 if (!switch_back_to_stepped_thread (ecs))
6297 {
edbcda09 6298 infrun_log_debug ("random signal, keep going");
c447ac0b
PA
6299
6300 keep_going (ecs);
6301 }
6302 return;
488f131b 6303 }
94c57d6a
PA
6304
6305 process_event_stop_test (ecs);
6306}
6307
6308/* Come here when we've got some debug event / signal we can explain
6309 (IOW, not a random signal), and test whether it should cause a
6310 stop, or whether we should resume the inferior (transparently).
6311 E.g., could be a breakpoint whose condition evaluates false; we
6312 could be still stepping within the line; etc. */
6313
6314static void
6315process_event_stop_test (struct execution_control_state *ecs)
6316{
6317 struct symtab_and_line stop_pc_sal;
6318 struct frame_info *frame;
6319 struct gdbarch *gdbarch;
cdaa5b73
PA
6320 CORE_ADDR jmp_buf_pc;
6321 struct bpstat_what what;
94c57d6a 6322
cdaa5b73 6323 /* Handle cases caused by hitting a breakpoint. */
611c83ae 6324
cdaa5b73
PA
6325 frame = get_current_frame ();
6326 gdbarch = get_frame_arch (frame);
fcf3daef 6327
cdaa5b73 6328 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
611c83ae 6329
cdaa5b73
PA
6330 if (what.call_dummy)
6331 {
6332 stop_stack_dummy = what.call_dummy;
6333 }
186c406b 6334
243a9253
PA
6335 /* A few breakpoint types have callbacks associated (e.g.,
6336 bp_jit_event). Run them now. */
6337 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
6338
cdaa5b73
PA
6339 /* If we hit an internal event that triggers symbol changes, the
6340 current frame will be invalidated within bpstat_what (e.g., if we
6341 hit an internal solib event). Re-fetch it. */
6342 frame = get_current_frame ();
6343 gdbarch = get_frame_arch (frame);
e2e4d78b 6344
cdaa5b73
PA
6345 switch (what.main_action)
6346 {
6347 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
6348 /* If we hit the breakpoint at longjmp while stepping, we
6349 install a momentary breakpoint at the target of the
6350 jmp_buf. */
186c406b 6351
edbcda09 6352 infrun_log_debug ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
186c406b 6353
cdaa5b73 6354 ecs->event_thread->stepping_over_breakpoint = 1;
611c83ae 6355
cdaa5b73
PA
6356 if (what.is_longjmp)
6357 {
6358 struct value *arg_value;
6359
6360 /* If we set the longjmp breakpoint via a SystemTap probe,
6361 then use it to extract the arguments. The destination PC
6362 is the third argument to the probe. */
6363 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6364 if (arg_value)
8fa0c4f8
AA
6365 {
6366 jmp_buf_pc = value_as_address (arg_value);
6367 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6368 }
cdaa5b73
PA
6369 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6370 || !gdbarch_get_longjmp_target (gdbarch,
6371 frame, &jmp_buf_pc))
e2e4d78b 6372 {
edbcda09
SM
6373 infrun_log_debug ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
6374 "(!gdbarch_get_longjmp_target)");
cdaa5b73
PA
6375 keep_going (ecs);
6376 return;
e2e4d78b 6377 }
e2e4d78b 6378
cdaa5b73
PA
6379 /* Insert a breakpoint at resume address. */
6380 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6381 }
6382 else
6383 check_exception_resume (ecs, frame);
6384 keep_going (ecs);
6385 return;
e81a37f7 6386
cdaa5b73
PA
6387 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6388 {
6389 struct frame_info *init_frame;
e81a37f7 6390
cdaa5b73 6391 /* There are several cases to consider.
c906108c 6392
cdaa5b73
PA
6393 1. The initiating frame no longer exists. In this case we
6394 must stop, because the exception or longjmp has gone too
6395 far.
2c03e5be 6396
cdaa5b73
PA
6397 2. The initiating frame exists, and is the same as the
6398 current frame. We stop, because the exception or longjmp
6399 has been caught.
2c03e5be 6400
cdaa5b73
PA
6401 3. The initiating frame exists and is different from the
6402 current frame. This means the exception or longjmp has
6403 been caught beneath the initiating frame, so keep going.
c906108c 6404
cdaa5b73
PA
6405 4. longjmp breakpoint has been placed just to protect
6406 against stale dummy frames and user is not interested in
6407 stopping around longjmps. */
c5aa993b 6408
edbcda09 6409 infrun_log_debug ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
c5aa993b 6410
cdaa5b73
PA
6411 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6412 != NULL);
6413 delete_exception_resume_breakpoint (ecs->event_thread);
c5aa993b 6414
cdaa5b73
PA
6415 if (what.is_longjmp)
6416 {
b67a2c6f 6417 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
c5aa993b 6418
cdaa5b73 6419 if (!frame_id_p (ecs->event_thread->initiating_frame))
e5ef252a 6420 {
cdaa5b73
PA
6421 /* Case 4. */
6422 keep_going (ecs);
6423 return;
e5ef252a 6424 }
cdaa5b73 6425 }
c5aa993b 6426
cdaa5b73 6427 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
527159b7 6428
cdaa5b73
PA
6429 if (init_frame)
6430 {
6431 struct frame_id current_id
6432 = get_frame_id (get_current_frame ());
6433 if (frame_id_eq (current_id,
6434 ecs->event_thread->initiating_frame))
6435 {
6436 /* Case 2. Fall through. */
6437 }
6438 else
6439 {
6440 /* Case 3. */
6441 keep_going (ecs);
6442 return;
6443 }
68f53502 6444 }
488f131b 6445
cdaa5b73
PA
6446 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6447 exists. */
6448 delete_step_resume_breakpoint (ecs->event_thread);
e5ef252a 6449
bdc36728 6450 end_stepping_range (ecs);
cdaa5b73
PA
6451 }
6452 return;
e5ef252a 6453
cdaa5b73 6454 case BPSTAT_WHAT_SINGLE:
edbcda09 6455 infrun_log_debug ("BPSTAT_WHAT_SINGLE");
cdaa5b73
PA
6456 ecs->event_thread->stepping_over_breakpoint = 1;
6457 /* Still need to check other stuff, at least the case where we
6458 are stepping and step out of the right range. */
6459 break;
e5ef252a 6460
cdaa5b73 6461 case BPSTAT_WHAT_STEP_RESUME:
edbcda09 6462 infrun_log_debug ("BPSTAT_WHAT_STEP_RESUME");
e5ef252a 6463
cdaa5b73
PA
6464 delete_step_resume_breakpoint (ecs->event_thread);
6465 if (ecs->event_thread->control.proceed_to_finish
6466 && execution_direction == EXEC_REVERSE)
6467 {
6468 struct thread_info *tp = ecs->event_thread;
6469
6470 /* We are finishing a function in reverse, and just hit the
6471 step-resume breakpoint at the start address of the
6472 function, and we're almost there -- just need to back up
6473 by one more single-step, which should take us back to the
6474 function call. */
6475 tp->control.step_range_start = tp->control.step_range_end = 1;
6476 keep_going (ecs);
e5ef252a 6477 return;
cdaa5b73
PA
6478 }
6479 fill_in_stop_func (gdbarch, ecs);
f2ffa92b 6480 if (ecs->event_thread->suspend.stop_pc == ecs->stop_func_start
cdaa5b73
PA
6481 && execution_direction == EXEC_REVERSE)
6482 {
6483 /* We are stepping over a function call in reverse, and just
6484 hit the step-resume breakpoint at the start address of
6485 the function. Go back to single-stepping, which should
6486 take us back to the function call. */
6487 ecs->event_thread->stepping_over_breakpoint = 1;
6488 keep_going (ecs);
6489 return;
6490 }
6491 break;
e5ef252a 6492
cdaa5b73 6493 case BPSTAT_WHAT_STOP_NOISY:
edbcda09 6494 infrun_log_debug ("BPSTAT_WHAT_STOP_NOISY");
cdaa5b73 6495 stop_print_frame = 1;
e5ef252a 6496
99619bea
PA
6497 /* Assume the thread stopped for a breapoint. We'll still check
6498 whether a/the breakpoint is there when the thread is next
6499 resumed. */
6500 ecs->event_thread->stepping_over_breakpoint = 1;
e5ef252a 6501
22bcd14b 6502 stop_waiting (ecs);
cdaa5b73 6503 return;
e5ef252a 6504
cdaa5b73 6505 case BPSTAT_WHAT_STOP_SILENT:
edbcda09 6506 infrun_log_debug ("BPSTAT_WHAT_STOP_SILENT");
cdaa5b73 6507 stop_print_frame = 0;
e5ef252a 6508
99619bea
PA
6509 /* Assume the thread stopped for a breapoint. We'll still check
6510 whether a/the breakpoint is there when the thread is next
6511 resumed. */
6512 ecs->event_thread->stepping_over_breakpoint = 1;
22bcd14b 6513 stop_waiting (ecs);
cdaa5b73
PA
6514 return;
6515
6516 case BPSTAT_WHAT_HP_STEP_RESUME:
edbcda09 6517 infrun_log_debug ("BPSTAT_WHAT_HP_STEP_RESUME");
cdaa5b73
PA
6518
6519 delete_step_resume_breakpoint (ecs->event_thread);
6520 if (ecs->event_thread->step_after_step_resume_breakpoint)
6521 {
6522 /* Back when the step-resume breakpoint was inserted, we
6523 were trying to single-step off a breakpoint. Go back to
6524 doing that. */
6525 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6526 ecs->event_thread->stepping_over_breakpoint = 1;
6527 keep_going (ecs);
6528 return;
e5ef252a 6529 }
cdaa5b73
PA
6530 break;
6531
6532 case BPSTAT_WHAT_KEEP_CHECKING:
6533 break;
e5ef252a 6534 }
c906108c 6535
af48d08f
PA
6536 /* If we stepped a permanent breakpoint and we had a high priority
6537 step-resume breakpoint for the address we stepped, but we didn't
6538 hit it, then we must have stepped into the signal handler. The
6539 step-resume was only necessary to catch the case of _not_
6540 stepping into the handler, so delete it, and fall through to
6541 checking whether the step finished. */
6542 if (ecs->event_thread->stepped_breakpoint)
6543 {
6544 struct breakpoint *sr_bp
6545 = ecs->event_thread->control.step_resume_breakpoint;
6546
8d707a12
PA
6547 if (sr_bp != NULL
6548 && sr_bp->loc->permanent
af48d08f
PA
6549 && sr_bp->type == bp_hp_step_resume
6550 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6551 {
edbcda09 6552 infrun_log_debug ("stepped permanent breakpoint, stopped in handler");
af48d08f
PA
6553 delete_step_resume_breakpoint (ecs->event_thread);
6554 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6555 }
6556 }
6557
cdaa5b73
PA
6558 /* We come here if we hit a breakpoint but should not stop for it.
6559 Possibly we also were stepping and should stop for that. So fall
6560 through and test for stepping. But, if not stepping, do not
6561 stop. */
c906108c 6562
a7212384
UW
6563 /* In all-stop mode, if we're currently stepping but have stopped in
6564 some other thread, we need to switch back to the stepped thread. */
c447ac0b
PA
6565 if (switch_back_to_stepped_thread (ecs))
6566 return;
776f04fa 6567
8358c15c 6568 if (ecs->event_thread->control.step_resume_breakpoint)
488f131b 6569 {
edbcda09 6570 infrun_log_debug ("step-resume breakpoint is inserted");
527159b7 6571
488f131b
JB
6572 /* Having a step-resume breakpoint overrides anything
6573 else having to do with stepping commands until
6574 that breakpoint is reached. */
488f131b
JB
6575 keep_going (ecs);
6576 return;
6577 }
c5aa993b 6578
16c381f0 6579 if (ecs->event_thread->control.step_range_end == 0)
488f131b 6580 {
edbcda09 6581 infrun_log_debug ("no stepping, continue");
488f131b 6582 /* Likewise if we aren't even stepping. */
488f131b
JB
6583 keep_going (ecs);
6584 return;
6585 }
c5aa993b 6586
4b7703ad
JB
6587 /* Re-fetch current thread's frame in case the code above caused
6588 the frame cache to be re-initialized, making our FRAME variable
6589 a dangling pointer. */
6590 frame = get_current_frame ();
628fe4e4 6591 gdbarch = get_frame_arch (frame);
7e324e48 6592 fill_in_stop_func (gdbarch, ecs);
4b7703ad 6593
488f131b 6594 /* If stepping through a line, keep going if still within it.
c906108c 6595
488f131b
JB
6596 Note that step_range_end is the address of the first instruction
6597 beyond the step range, and NOT the address of the last instruction
31410e84
MS
6598 within it!
6599
6600 Note also that during reverse execution, we may be stepping
6601 through a function epilogue and therefore must detect when
6602 the current-frame changes in the middle of a line. */
6603
f2ffa92b
PA
6604 if (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6605 ecs->event_thread)
31410e84 6606 && (execution_direction != EXEC_REVERSE
388a8562 6607 || frame_id_eq (get_frame_id (frame),
16c381f0 6608 ecs->event_thread->control.step_frame_id)))
488f131b 6609 {
edbcda09
SM
6610 infrun_log_debug
6611 ("stepping inside range [%s-%s]",
6612 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6613 paddress (gdbarch, ecs->event_thread->control.step_range_end));
b2175913 6614
c1e36e3e
PA
6615 /* Tentatively re-enable range stepping; `resume' disables it if
6616 necessary (e.g., if we're stepping over a breakpoint or we
6617 have software watchpoints). */
6618 ecs->event_thread->control.may_range_step = 1;
6619
b2175913
MS
6620 /* When stepping backward, stop at beginning of line range
6621 (unless it's the function entry point, in which case
6622 keep going back to the call point). */
f2ffa92b 6623 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
16c381f0 6624 if (stop_pc == ecs->event_thread->control.step_range_start
b2175913
MS
6625 && stop_pc != ecs->stop_func_start
6626 && execution_direction == EXEC_REVERSE)
bdc36728 6627 end_stepping_range (ecs);
b2175913
MS
6628 else
6629 keep_going (ecs);
6630
488f131b
JB
6631 return;
6632 }
c5aa993b 6633
488f131b 6634 /* We stepped out of the stepping range. */
c906108c 6635
488f131b 6636 /* If we are stepping at the source level and entered the runtime
388a8562
MS
6637 loader dynamic symbol resolution code...
6638
6639 EXEC_FORWARD: we keep on single stepping until we exit the run
6640 time loader code and reach the callee's address.
6641
6642 EXEC_REVERSE: we've already executed the callee (backward), and
6643 the runtime loader code is handled just like any other
6644 undebuggable function call. Now we need only keep stepping
6645 backward through the trampoline code, and that's handled further
6646 down, so there is nothing for us to do here. */
6647
6648 if (execution_direction != EXEC_REVERSE
16c381f0 6649 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
f2ffa92b 6650 && in_solib_dynsym_resolve_code (ecs->event_thread->suspend.stop_pc))
488f131b 6651 {
4c8c40e6 6652 CORE_ADDR pc_after_resolver =
f2ffa92b
PA
6653 gdbarch_skip_solib_resolver (gdbarch,
6654 ecs->event_thread->suspend.stop_pc);
c906108c 6655
edbcda09 6656 infrun_log_debug ("stepped into dynsym resolve code");
527159b7 6657
488f131b
JB
6658 if (pc_after_resolver)
6659 {
6660 /* Set up a step-resume breakpoint at the address
6661 indicated by SKIP_SOLIB_RESOLVER. */
51abb421 6662 symtab_and_line sr_sal;
488f131b 6663 sr_sal.pc = pc_after_resolver;
6c95b8df 6664 sr_sal.pspace = get_frame_program_space (frame);
488f131b 6665
a6d9a66e
UW
6666 insert_step_resume_breakpoint_at_sal (gdbarch,
6667 sr_sal, null_frame_id);
c5aa993b 6668 }
c906108c 6669
488f131b
JB
6670 keep_going (ecs);
6671 return;
6672 }
c906108c 6673
1d509aa6
MM
6674 /* Step through an indirect branch thunk. */
6675 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
f2ffa92b
PA
6676 && gdbarch_in_indirect_branch_thunk (gdbarch,
6677 ecs->event_thread->suspend.stop_pc))
1d509aa6 6678 {
edbcda09 6679 infrun_log_debug ("stepped into indirect branch thunk");
1d509aa6
MM
6680 keep_going (ecs);
6681 return;
6682 }
6683
16c381f0
JK
6684 if (ecs->event_thread->control.step_range_end != 1
6685 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6686 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
568d6575 6687 && get_frame_type (frame) == SIGTRAMP_FRAME)
488f131b 6688 {
edbcda09 6689 infrun_log_debug ("stepped into signal trampoline");
42edda50 6690 /* The inferior, while doing a "step" or "next", has ended up in
8fb3e588
AC
6691 a signal trampoline (either by a signal being delivered or by
6692 the signal handler returning). Just single-step until the
6693 inferior leaves the trampoline (either by calling the handler
6694 or returning). */
488f131b
JB
6695 keep_going (ecs);
6696 return;
6697 }
c906108c 6698
14132e89
MR
6699 /* If we're in the return path from a shared library trampoline,
6700 we want to proceed through the trampoline when stepping. */
6701 /* macro/2012-04-25: This needs to come before the subroutine
6702 call check below as on some targets return trampolines look
6703 like subroutine calls (MIPS16 return thunks). */
6704 if (gdbarch_in_solib_return_trampoline (gdbarch,
f2ffa92b
PA
6705 ecs->event_thread->suspend.stop_pc,
6706 ecs->stop_func_name)
14132e89
MR
6707 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
6708 {
6709 /* Determine where this trampoline returns. */
f2ffa92b
PA
6710 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6711 CORE_ADDR real_stop_pc
6712 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
14132e89 6713
edbcda09 6714 infrun_log_debug ("stepped into solib return tramp");
14132e89
MR
6715
6716 /* Only proceed through if we know where it's going. */
6717 if (real_stop_pc)
6718 {
6719 /* And put the step-breakpoint there and go until there. */
51abb421 6720 symtab_and_line sr_sal;
14132e89
MR
6721 sr_sal.pc = real_stop_pc;
6722 sr_sal.section = find_pc_overlay (sr_sal.pc);
6723 sr_sal.pspace = get_frame_program_space (frame);
6724
6725 /* Do not specify what the fp should be when we stop since
6726 on some machines the prologue is where the new fp value
6727 is established. */
6728 insert_step_resume_breakpoint_at_sal (gdbarch,
6729 sr_sal, null_frame_id);
6730
6731 /* Restart without fiddling with the step ranges or
6732 other state. */
6733 keep_going (ecs);
6734 return;
6735 }
6736 }
6737
c17eaafe
DJ
6738 /* Check for subroutine calls. The check for the current frame
6739 equalling the step ID is not necessary - the check of the
6740 previous frame's ID is sufficient - but it is a common case and
6741 cheaper than checking the previous frame's ID.
14e60db5
DJ
6742
6743 NOTE: frame_id_eq will never report two invalid frame IDs as
6744 being equal, so to get into this block, both the current and
6745 previous frame must have valid frame IDs. */
005ca36a
JB
6746 /* The outer_frame_id check is a heuristic to detect stepping
6747 through startup code. If we step over an instruction which
6748 sets the stack pointer from an invalid value to a valid value,
6749 we may detect that as a subroutine call from the mythical
6750 "outermost" function. This could be fixed by marking
6751 outermost frames as !stack_p,code_p,special_p. Then the
6752 initial outermost frame, before sp was valid, would
ce6cca6d 6753 have code_addr == &_start. See the comment in frame_id_eq
005ca36a 6754 for more. */
edb3359d 6755 if (!frame_id_eq (get_stack_frame_id (frame),
16c381f0 6756 ecs->event_thread->control.step_stack_frame_id)
005ca36a 6757 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
16c381f0
JK
6758 ecs->event_thread->control.step_stack_frame_id)
6759 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
005ca36a 6760 outer_frame_id)
885eeb5b 6761 || (ecs->event_thread->control.step_start_function
f2ffa92b 6762 != find_pc_function (ecs->event_thread->suspend.stop_pc)))))
488f131b 6763 {
f2ffa92b 6764 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
95918acb 6765 CORE_ADDR real_stop_pc;
8fb3e588 6766
edbcda09 6767 infrun_log_debug ("stepped into subroutine");
527159b7 6768
b7a084be 6769 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
95918acb
AC
6770 {
6771 /* I presume that step_over_calls is only 0 when we're
6772 supposed to be stepping at the assembly language level
6773 ("stepi"). Just stop. */
388a8562 6774 /* And this works the same backward as frontward. MVS */
bdc36728 6775 end_stepping_range (ecs);
95918acb
AC
6776 return;
6777 }
8fb3e588 6778
388a8562
MS
6779 /* Reverse stepping through solib trampolines. */
6780
6781 if (execution_direction == EXEC_REVERSE
16c381f0 6782 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
388a8562
MS
6783 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6784 || (ecs->stop_func_start == 0
6785 && in_solib_dynsym_resolve_code (stop_pc))))
6786 {
6787 /* Any solib trampoline code can be handled in reverse
6788 by simply continuing to single-step. We have already
6789 executed the solib function (backwards), and a few
6790 steps will take us back through the trampoline to the
6791 caller. */
6792 keep_going (ecs);
6793 return;
6794 }
6795
16c381f0 6796 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
8567c30f 6797 {
b2175913
MS
6798 /* We're doing a "next".
6799
6800 Normal (forward) execution: set a breakpoint at the
6801 callee's return address (the address at which the caller
6802 will resume).
6803
6804 Reverse (backward) execution. set the step-resume
6805 breakpoint at the start of the function that we just
6806 stepped into (backwards), and continue to there. When we
6130d0b7 6807 get there, we'll need to single-step back to the caller. */
b2175913
MS
6808
6809 if (execution_direction == EXEC_REVERSE)
6810 {
acf9414f
JK
6811 /* If we're already at the start of the function, we've either
6812 just stepped backward into a single instruction function,
6813 or stepped back out of a signal handler to the first instruction
6814 of the function. Just keep going, which will single-step back
6815 to the caller. */
58c48e72 6816 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
acf9414f 6817 {
acf9414f 6818 /* Normal function call return (static or dynamic). */
51abb421 6819 symtab_and_line sr_sal;
acf9414f
JK
6820 sr_sal.pc = ecs->stop_func_start;
6821 sr_sal.pspace = get_frame_program_space (frame);
6822 insert_step_resume_breakpoint_at_sal (gdbarch,
6823 sr_sal, null_frame_id);
6824 }
b2175913
MS
6825 }
6826 else
568d6575 6827 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6828
8567c30f
AC
6829 keep_going (ecs);
6830 return;
6831 }
a53c66de 6832
95918acb 6833 /* If we are in a function call trampoline (a stub between the
8fb3e588
AC
6834 calling routine and the real function), locate the real
6835 function. That's what tells us (a) whether we want to step
6836 into it at all, and (b) what prologue we want to run to the
6837 end of, if we do step into it. */
568d6575 6838 real_stop_pc = skip_language_trampoline (frame, stop_pc);
95918acb 6839 if (real_stop_pc == 0)
568d6575 6840 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
95918acb
AC
6841 if (real_stop_pc != 0)
6842 ecs->stop_func_start = real_stop_pc;
8fb3e588 6843
db5f024e 6844 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
1b2bfbb9 6845 {
51abb421 6846 symtab_and_line sr_sal;
1b2bfbb9 6847 sr_sal.pc = ecs->stop_func_start;
6c95b8df 6848 sr_sal.pspace = get_frame_program_space (frame);
1b2bfbb9 6849
a6d9a66e
UW
6850 insert_step_resume_breakpoint_at_sal (gdbarch,
6851 sr_sal, null_frame_id);
8fb3e588
AC
6852 keep_going (ecs);
6853 return;
1b2bfbb9
RC
6854 }
6855
95918acb 6856 /* If we have line number information for the function we are
1bfeeb0f
JL
6857 thinking of stepping into and the function isn't on the skip
6858 list, step into it.
95918acb 6859
8fb3e588
AC
6860 If there are several symtabs at that PC (e.g. with include
6861 files), just want to know whether *any* of them have line
6862 numbers. find_pc_line handles this. */
95918acb
AC
6863 {
6864 struct symtab_and_line tmp_sal;
8fb3e588 6865
95918acb 6866 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
2b914b52 6867 if (tmp_sal.line != 0
85817405 6868 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4a4c04f1
BE
6869 tmp_sal)
6870 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
95918acb 6871 {
b2175913 6872 if (execution_direction == EXEC_REVERSE)
568d6575 6873 handle_step_into_function_backward (gdbarch, ecs);
b2175913 6874 else
568d6575 6875 handle_step_into_function (gdbarch, ecs);
95918acb
AC
6876 return;
6877 }
6878 }
6879
6880 /* If we have no line number and the step-stop-if-no-debug is
8fb3e588
AC
6881 set, we stop the step so that the user has a chance to switch
6882 in assembly mode. */
16c381f0 6883 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
078130d0 6884 && step_stop_if_no_debug)
95918acb 6885 {
bdc36728 6886 end_stepping_range (ecs);
95918acb
AC
6887 return;
6888 }
6889
b2175913
MS
6890 if (execution_direction == EXEC_REVERSE)
6891 {
acf9414f
JK
6892 /* If we're already at the start of the function, we've either just
6893 stepped backward into a single instruction function without line
6894 number info, or stepped back out of a signal handler to the first
6895 instruction of the function without line number info. Just keep
6896 going, which will single-step back to the caller. */
6897 if (ecs->stop_func_start != stop_pc)
6898 {
6899 /* Set a breakpoint at callee's start address.
6900 From there we can step once and be back in the caller. */
51abb421 6901 symtab_and_line sr_sal;
acf9414f
JK
6902 sr_sal.pc = ecs->stop_func_start;
6903 sr_sal.pspace = get_frame_program_space (frame);
6904 insert_step_resume_breakpoint_at_sal (gdbarch,
6905 sr_sal, null_frame_id);
6906 }
b2175913
MS
6907 }
6908 else
6909 /* Set a breakpoint at callee's return address (the address
6910 at which the caller will resume). */
568d6575 6911 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6912
95918acb 6913 keep_going (ecs);
488f131b 6914 return;
488f131b 6915 }
c906108c 6916
fdd654f3
MS
6917 /* Reverse stepping through solib trampolines. */
6918
6919 if (execution_direction == EXEC_REVERSE
16c381f0 6920 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
fdd654f3 6921 {
f2ffa92b
PA
6922 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6923
fdd654f3
MS
6924 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6925 || (ecs->stop_func_start == 0
6926 && in_solib_dynsym_resolve_code (stop_pc)))
6927 {
6928 /* Any solib trampoline code can be handled in reverse
6929 by simply continuing to single-step. We have already
6930 executed the solib function (backwards), and a few
6931 steps will take us back through the trampoline to the
6932 caller. */
6933 keep_going (ecs);
6934 return;
6935 }
6936 else if (in_solib_dynsym_resolve_code (stop_pc))
6937 {
6938 /* Stepped backward into the solib dynsym resolver.
6939 Set a breakpoint at its start and continue, then
6940 one more step will take us out. */
51abb421 6941 symtab_and_line sr_sal;
fdd654f3 6942 sr_sal.pc = ecs->stop_func_start;
9d1807c3 6943 sr_sal.pspace = get_frame_program_space (frame);
fdd654f3
MS
6944 insert_step_resume_breakpoint_at_sal (gdbarch,
6945 sr_sal, null_frame_id);
6946 keep_going (ecs);
6947 return;
6948 }
6949 }
6950
8c95582d
AB
6951 /* This always returns the sal for the inner-most frame when we are in a
6952 stack of inlined frames, even if GDB actually believes that it is in a
6953 more outer frame. This is checked for below by calls to
6954 inline_skipped_frames. */
f2ffa92b 6955 stop_pc_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
7ed0fe66 6956
1b2bfbb9
RC
6957 /* NOTE: tausq/2004-05-24: This if block used to be done before all
6958 the trampoline processing logic, however, there are some trampolines
6959 that have no names, so we should do trampoline handling first. */
16c381f0 6960 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7ed0fe66 6961 && ecs->stop_func_name == NULL
2afb61aa 6962 && stop_pc_sal.line == 0)
1b2bfbb9 6963 {
edbcda09 6964 infrun_log_debug ("stepped into undebuggable function");
527159b7 6965
1b2bfbb9 6966 /* The inferior just stepped into, or returned to, an
7ed0fe66
DJ
6967 undebuggable function (where there is no debugging information
6968 and no line number corresponding to the address where the
1b2bfbb9
RC
6969 inferior stopped). Since we want to skip this kind of code,
6970 we keep going until the inferior returns from this
14e60db5
DJ
6971 function - unless the user has asked us not to (via
6972 set step-mode) or we no longer know how to get back
6973 to the call site. */
6974 if (step_stop_if_no_debug
c7ce8faa 6975 || !frame_id_p (frame_unwind_caller_id (frame)))
1b2bfbb9
RC
6976 {
6977 /* If we have no line number and the step-stop-if-no-debug
6978 is set, we stop the step so that the user has a chance to
6979 switch in assembly mode. */
bdc36728 6980 end_stepping_range (ecs);
1b2bfbb9
RC
6981 return;
6982 }
6983 else
6984 {
6985 /* Set a breakpoint at callee's return address (the address
6986 at which the caller will resume). */
568d6575 6987 insert_step_resume_breakpoint_at_caller (frame);
1b2bfbb9
RC
6988 keep_going (ecs);
6989 return;
6990 }
6991 }
6992
16c381f0 6993 if (ecs->event_thread->control.step_range_end == 1)
1b2bfbb9
RC
6994 {
6995 /* It is stepi or nexti. We always want to stop stepping after
6996 one instruction. */
edbcda09 6997 infrun_log_debug ("stepi/nexti");
bdc36728 6998 end_stepping_range (ecs);
1b2bfbb9
RC
6999 return;
7000 }
7001
2afb61aa 7002 if (stop_pc_sal.line == 0)
488f131b
JB
7003 {
7004 /* We have no line number information. That means to stop
7005 stepping (does this always happen right after one instruction,
7006 when we do "s" in a function with no line numbers,
7007 or can this happen as a result of a return or longjmp?). */
edbcda09 7008 infrun_log_debug ("line number info");
bdc36728 7009 end_stepping_range (ecs);
488f131b
JB
7010 return;
7011 }
c906108c 7012
edb3359d
DJ
7013 /* Look for "calls" to inlined functions, part one. If the inline
7014 frame machinery detected some skipped call sites, we have entered
7015 a new inline function. */
7016
7017 if (frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 7018 ecs->event_thread->control.step_frame_id)
00431a78 7019 && inline_skipped_frames (ecs->event_thread))
edb3359d 7020 {
edbcda09 7021 infrun_log_debug ("stepped into inlined function");
edb3359d 7022
51abb421 7023 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
edb3359d 7024
16c381f0 7025 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
edb3359d
DJ
7026 {
7027 /* For "step", we're going to stop. But if the call site
7028 for this inlined function is on the same source line as
7029 we were previously stepping, go down into the function
7030 first. Otherwise stop at the call site. */
7031
7032 if (call_sal.line == ecs->event_thread->current_line
7033 && call_sal.symtab == ecs->event_thread->current_symtab)
4a4c04f1
BE
7034 {
7035 step_into_inline_frame (ecs->event_thread);
7036 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
7037 {
7038 keep_going (ecs);
7039 return;
7040 }
7041 }
edb3359d 7042
bdc36728 7043 end_stepping_range (ecs);
edb3359d
DJ
7044 return;
7045 }
7046 else
7047 {
7048 /* For "next", we should stop at the call site if it is on a
7049 different source line. Otherwise continue through the
7050 inlined function. */
7051 if (call_sal.line == ecs->event_thread->current_line
7052 && call_sal.symtab == ecs->event_thread->current_symtab)
7053 keep_going (ecs);
7054 else
bdc36728 7055 end_stepping_range (ecs);
edb3359d
DJ
7056 return;
7057 }
7058 }
7059
7060 /* Look for "calls" to inlined functions, part two. If we are still
7061 in the same real function we were stepping through, but we have
7062 to go further up to find the exact frame ID, we are stepping
7063 through a more inlined call beyond its call site. */
7064
7065 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
7066 && !frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 7067 ecs->event_thread->control.step_frame_id)
edb3359d 7068 && stepped_in_from (get_current_frame (),
16c381f0 7069 ecs->event_thread->control.step_frame_id))
edb3359d 7070 {
edbcda09 7071 infrun_log_debug ("stepping through inlined function");
edb3359d 7072
4a4c04f1
BE
7073 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
7074 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
edb3359d
DJ
7075 keep_going (ecs);
7076 else
bdc36728 7077 end_stepping_range (ecs);
edb3359d
DJ
7078 return;
7079 }
7080
8c95582d 7081 bool refresh_step_info = true;
f2ffa92b 7082 if ((ecs->event_thread->suspend.stop_pc == stop_pc_sal.pc)
4e1c45ea
PA
7083 && (ecs->event_thread->current_line != stop_pc_sal.line
7084 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
488f131b 7085 {
8c95582d
AB
7086 if (stop_pc_sal.is_stmt)
7087 {
7088 /* We are at the start of a different line. So stop. Note that
7089 we don't stop if we step into the middle of a different line.
7090 That is said to make things like for (;;) statements work
7091 better. */
edbcda09 7092 infrun_log_debug ("infrun: stepped to a different line\n");
8c95582d
AB
7093 end_stepping_range (ecs);
7094 return;
7095 }
7096 else if (frame_id_eq (get_frame_id (get_current_frame ()),
7097 ecs->event_thread->control.step_frame_id))
7098 {
7099 /* We are at the start of a different line, however, this line is
7100 not marked as a statement, and we have not changed frame. We
7101 ignore this line table entry, and continue stepping forward,
7102 looking for a better place to stop. */
7103 refresh_step_info = false;
edbcda09
SM
7104 infrun_log_debug ("infrun: stepped to a different line, but "
7105 "it's not the start of a statement\n");
8c95582d 7106 }
488f131b 7107 }
c906108c 7108
488f131b 7109 /* We aren't done stepping.
c906108c 7110
488f131b
JB
7111 Optimize by setting the stepping range to the line.
7112 (We might not be in the original line, but if we entered a
7113 new line in mid-statement, we continue stepping. This makes
8c95582d
AB
7114 things like for(;;) statements work better.)
7115
7116 If we entered a SAL that indicates a non-statement line table entry,
7117 then we update the stepping range, but we don't update the step info,
7118 which includes things like the line number we are stepping away from.
7119 This means we will stop when we find a line table entry that is marked
7120 as is-statement, even if it matches the non-statement one we just
7121 stepped into. */
c906108c 7122
16c381f0
JK
7123 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
7124 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
c1e36e3e 7125 ecs->event_thread->control.may_range_step = 1;
8c95582d
AB
7126 if (refresh_step_info)
7127 set_step_info (ecs->event_thread, frame, stop_pc_sal);
488f131b 7128
edbcda09 7129 infrun_log_debug ("keep going");
488f131b 7130 keep_going (ecs);
104c1213
JM
7131}
7132
c447ac0b
PA
7133/* In all-stop mode, if we're currently stepping but have stopped in
7134 some other thread, we may need to switch back to the stepped
7135 thread. Returns true we set the inferior running, false if we left
7136 it stopped (and the event needs further processing). */
7137
7138static int
7139switch_back_to_stepped_thread (struct execution_control_state *ecs)
7140{
fbea99ea 7141 if (!target_is_non_stop_p ())
c447ac0b 7142 {
99619bea
PA
7143 struct thread_info *stepping_thread;
7144
7145 /* If any thread is blocked on some internal breakpoint, and we
7146 simply need to step over that breakpoint to get it going
7147 again, do that first. */
7148
7149 /* However, if we see an event for the stepping thread, then we
7150 know all other threads have been moved past their breakpoints
7151 already. Let the caller check whether the step is finished,
7152 etc., before deciding to move it past a breakpoint. */
7153 if (ecs->event_thread->control.step_range_end != 0)
7154 return 0;
7155
7156 /* Check if the current thread is blocked on an incomplete
7157 step-over, interrupted by a random signal. */
7158 if (ecs->event_thread->control.trap_expected
7159 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
c447ac0b 7160 {
edbcda09
SM
7161 infrun_log_debug ("need to finish step-over of [%s]",
7162 target_pid_to_str (ecs->event_thread->ptid).c_str ());
99619bea
PA
7163 keep_going (ecs);
7164 return 1;
7165 }
2adfaa28 7166
99619bea
PA
7167 /* Check if the current thread is blocked by a single-step
7168 breakpoint of another thread. */
7169 if (ecs->hit_singlestep_breakpoint)
7170 {
edbcda09
SM
7171 infrun_log_debug ("need to step [%s] over single-step breakpoint",
7172 target_pid_to_str (ecs->ptid).c_str ());
99619bea
PA
7173 keep_going (ecs);
7174 return 1;
7175 }
7176
4d9d9d04
PA
7177 /* If this thread needs yet another step-over (e.g., stepping
7178 through a delay slot), do it first before moving on to
7179 another thread. */
7180 if (thread_still_needs_step_over (ecs->event_thread))
7181 {
edbcda09
SM
7182 infrun_log_debug
7183 ("thread [%s] still needs step-over",
7184 target_pid_to_str (ecs->event_thread->ptid).c_str ());
4d9d9d04
PA
7185 keep_going (ecs);
7186 return 1;
7187 }
70509625 7188
483805cf
PA
7189 /* If scheduler locking applies even if not stepping, there's no
7190 need to walk over threads. Above we've checked whether the
7191 current thread is stepping. If some other thread not the
7192 event thread is stepping, then it must be that scheduler
7193 locking is not in effect. */
856e7dd6 7194 if (schedlock_applies (ecs->event_thread))
483805cf
PA
7195 return 0;
7196
4d9d9d04
PA
7197 /* Otherwise, we no longer expect a trap in the current thread.
7198 Clear the trap_expected flag before switching back -- this is
7199 what keep_going does as well, if we call it. */
7200 ecs->event_thread->control.trap_expected = 0;
7201
7202 /* Likewise, clear the signal if it should not be passed. */
7203 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7204 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7205
7206 /* Do all pending step-overs before actually proceeding with
483805cf 7207 step/next/etc. */
4d9d9d04
PA
7208 if (start_step_over ())
7209 {
7210 prepare_to_wait (ecs);
7211 return 1;
7212 }
7213
7214 /* Look for the stepping/nexting thread. */
483805cf 7215 stepping_thread = NULL;
4d9d9d04 7216
08036331 7217 for (thread_info *tp : all_non_exited_threads ())
483805cf 7218 {
f3f8ece4
PA
7219 switch_to_thread_no_regs (tp);
7220
fbea99ea
PA
7221 /* Ignore threads of processes the caller is not
7222 resuming. */
483805cf 7223 if (!sched_multi
5b6d1e4f
PA
7224 && (tp->inf->process_target () != ecs->target
7225 || tp->inf->pid != ecs->ptid.pid ()))
483805cf
PA
7226 continue;
7227
7228 /* When stepping over a breakpoint, we lock all threads
7229 except the one that needs to move past the breakpoint.
7230 If a non-event thread has this set, the "incomplete
7231 step-over" check above should have caught it earlier. */
372316f1
PA
7232 if (tp->control.trap_expected)
7233 {
7234 internal_error (__FILE__, __LINE__,
7235 "[%s] has inconsistent state: "
7236 "trap_expected=%d\n",
a068643d 7237 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
7238 tp->control.trap_expected);
7239 }
483805cf
PA
7240
7241 /* Did we find the stepping thread? */
7242 if (tp->control.step_range_end)
7243 {
7244 /* Yep. There should only one though. */
7245 gdb_assert (stepping_thread == NULL);
7246
7247 /* The event thread is handled at the top, before we
7248 enter this loop. */
7249 gdb_assert (tp != ecs->event_thread);
7250
7251 /* If some thread other than the event thread is
7252 stepping, then scheduler locking can't be in effect,
7253 otherwise we wouldn't have resumed the current event
7254 thread in the first place. */
856e7dd6 7255 gdb_assert (!schedlock_applies (tp));
483805cf
PA
7256
7257 stepping_thread = tp;
7258 }
99619bea
PA
7259 }
7260
483805cf 7261 if (stepping_thread != NULL)
99619bea 7262 {
edbcda09 7263 infrun_log_debug ("switching back to stepped thread");
c447ac0b 7264
2ac7589c
PA
7265 if (keep_going_stepped_thread (stepping_thread))
7266 {
7267 prepare_to_wait (ecs);
7268 return 1;
7269 }
7270 }
f3f8ece4
PA
7271
7272 switch_to_thread (ecs->event_thread);
2ac7589c 7273 }
2adfaa28 7274
2ac7589c
PA
7275 return 0;
7276}
2adfaa28 7277
2ac7589c
PA
7278/* Set a previously stepped thread back to stepping. Returns true on
7279 success, false if the resume is not possible (e.g., the thread
7280 vanished). */
7281
7282static int
7283keep_going_stepped_thread (struct thread_info *tp)
7284{
7285 struct frame_info *frame;
2ac7589c
PA
7286 struct execution_control_state ecss;
7287 struct execution_control_state *ecs = &ecss;
2adfaa28 7288
2ac7589c
PA
7289 /* If the stepping thread exited, then don't try to switch back and
7290 resume it, which could fail in several different ways depending
7291 on the target. Instead, just keep going.
2adfaa28 7292
2ac7589c
PA
7293 We can find a stepping dead thread in the thread list in two
7294 cases:
2adfaa28 7295
2ac7589c
PA
7296 - The target supports thread exit events, and when the target
7297 tries to delete the thread from the thread list, inferior_ptid
7298 pointed at the exiting thread. In such case, calling
7299 delete_thread does not really remove the thread from the list;
7300 instead, the thread is left listed, with 'exited' state.
64ce06e4 7301
2ac7589c
PA
7302 - The target's debug interface does not support thread exit
7303 events, and so we have no idea whatsoever if the previously
7304 stepping thread is still alive. For that reason, we need to
7305 synchronously query the target now. */
2adfaa28 7306
00431a78 7307 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
2ac7589c 7308 {
edbcda09
SM
7309 infrun_log_debug ("not resuming previously stepped thread, it has "
7310 "vanished");
2ac7589c 7311
00431a78 7312 delete_thread (tp);
2ac7589c 7313 return 0;
c447ac0b 7314 }
2ac7589c 7315
edbcda09 7316 infrun_log_debug ("resuming previously stepped thread");
2ac7589c
PA
7317
7318 reset_ecs (ecs, tp);
00431a78 7319 switch_to_thread (tp);
2ac7589c 7320
f2ffa92b 7321 tp->suspend.stop_pc = regcache_read_pc (get_thread_regcache (tp));
2ac7589c 7322 frame = get_current_frame ();
2ac7589c
PA
7323
7324 /* If the PC of the thread we were trying to single-step has
7325 changed, then that thread has trapped or been signaled, but the
7326 event has not been reported to GDB yet. Re-poll the target
7327 looking for this particular thread's event (i.e. temporarily
7328 enable schedlock) by:
7329
7330 - setting a break at the current PC
7331 - resuming that particular thread, only (by setting trap
7332 expected)
7333
7334 This prevents us continuously moving the single-step breakpoint
7335 forward, one instruction at a time, overstepping. */
7336
f2ffa92b 7337 if (tp->suspend.stop_pc != tp->prev_pc)
2ac7589c
PA
7338 {
7339 ptid_t resume_ptid;
7340
edbcda09
SM
7341 infrun_log_debug ("expected thread advanced also (%s -> %s)",
7342 paddress (target_gdbarch (), tp->prev_pc),
7343 paddress (target_gdbarch (), tp->suspend.stop_pc));
2ac7589c
PA
7344
7345 /* Clear the info of the previous step-over, as it's no longer
7346 valid (if the thread was trying to step over a breakpoint, it
7347 has already succeeded). It's what keep_going would do too,
7348 if we called it. Do this before trying to insert the sss
7349 breakpoint, otherwise if we were previously trying to step
7350 over this exact address in another thread, the breakpoint is
7351 skipped. */
7352 clear_step_over_info ();
7353 tp->control.trap_expected = 0;
7354
7355 insert_single_step_breakpoint (get_frame_arch (frame),
7356 get_frame_address_space (frame),
f2ffa92b 7357 tp->suspend.stop_pc);
2ac7589c 7358
719546c4 7359 tp->resumed = true;
fbea99ea 7360 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
2ac7589c
PA
7361 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
7362 }
7363 else
7364 {
edbcda09 7365 infrun_log_debug ("expected thread still hasn't advanced");
2ac7589c
PA
7366
7367 keep_going_pass_signal (ecs);
7368 }
7369 return 1;
c447ac0b
PA
7370}
7371
8b061563
PA
7372/* Is thread TP in the middle of (software or hardware)
7373 single-stepping? (Note the result of this function must never be
7374 passed directly as target_resume's STEP parameter.) */
104c1213 7375
a289b8f6 7376static int
b3444185 7377currently_stepping (struct thread_info *tp)
a7212384 7378{
8358c15c
JK
7379 return ((tp->control.step_range_end
7380 && tp->control.step_resume_breakpoint == NULL)
7381 || tp->control.trap_expected
af48d08f 7382 || tp->stepped_breakpoint
8358c15c 7383 || bpstat_should_step ());
a7212384
UW
7384}
7385
b2175913
MS
7386/* Inferior has stepped into a subroutine call with source code that
7387 we should not step over. Do step to the first line of code in
7388 it. */
c2c6d25f
JM
7389
7390static void
568d6575
UW
7391handle_step_into_function (struct gdbarch *gdbarch,
7392 struct execution_control_state *ecs)
c2c6d25f 7393{
7e324e48
GB
7394 fill_in_stop_func (gdbarch, ecs);
7395
f2ffa92b
PA
7396 compunit_symtab *cust
7397 = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7398 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7399 ecs->stop_func_start
7400 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
c2c6d25f 7401
51abb421 7402 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
c2c6d25f
JM
7403 /* Use the step_resume_break to step until the end of the prologue,
7404 even if that involves jumps (as it seems to on the vax under
7405 4.2). */
7406 /* If the prologue ends in the middle of a source line, continue to
7407 the end of that source line (if it is still within the function).
7408 Otherwise, just go to end of prologue. */
2afb61aa
PA
7409 if (stop_func_sal.end
7410 && stop_func_sal.pc != ecs->stop_func_start
7411 && stop_func_sal.end < ecs->stop_func_end)
7412 ecs->stop_func_start = stop_func_sal.end;
c2c6d25f 7413
2dbd5e30
KB
7414 /* Architectures which require breakpoint adjustment might not be able
7415 to place a breakpoint at the computed address. If so, the test
7416 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7417 ecs->stop_func_start to an address at which a breakpoint may be
7418 legitimately placed.
8fb3e588 7419
2dbd5e30
KB
7420 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7421 made, GDB will enter an infinite loop when stepping through
7422 optimized code consisting of VLIW instructions which contain
7423 subinstructions corresponding to different source lines. On
7424 FR-V, it's not permitted to place a breakpoint on any but the
7425 first subinstruction of a VLIW instruction. When a breakpoint is
7426 set, GDB will adjust the breakpoint address to the beginning of
7427 the VLIW instruction. Thus, we need to make the corresponding
7428 adjustment here when computing the stop address. */
8fb3e588 7429
568d6575 7430 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
2dbd5e30
KB
7431 {
7432 ecs->stop_func_start
568d6575 7433 = gdbarch_adjust_breakpoint_address (gdbarch,
8fb3e588 7434 ecs->stop_func_start);
2dbd5e30
KB
7435 }
7436
f2ffa92b 7437 if (ecs->stop_func_start == ecs->event_thread->suspend.stop_pc)
c2c6d25f
JM
7438 {
7439 /* We are already there: stop now. */
bdc36728 7440 end_stepping_range (ecs);
c2c6d25f
JM
7441 return;
7442 }
7443 else
7444 {
7445 /* Put the step-breakpoint there and go until there. */
51abb421 7446 symtab_and_line sr_sal;
c2c6d25f
JM
7447 sr_sal.pc = ecs->stop_func_start;
7448 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
6c95b8df 7449 sr_sal.pspace = get_frame_program_space (get_current_frame ());
44cbf7b5 7450
c2c6d25f 7451 /* Do not specify what the fp should be when we stop since on
488f131b
JB
7452 some machines the prologue is where the new fp value is
7453 established. */
a6d9a66e 7454 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
c2c6d25f
JM
7455
7456 /* And make sure stepping stops right away then. */
16c381f0
JK
7457 ecs->event_thread->control.step_range_end
7458 = ecs->event_thread->control.step_range_start;
c2c6d25f
JM
7459 }
7460 keep_going (ecs);
7461}
d4f3574e 7462
b2175913
MS
7463/* Inferior has stepped backward into a subroutine call with source
7464 code that we should not step over. Do step to the beginning of the
7465 last line of code in it. */
7466
7467static void
568d6575
UW
7468handle_step_into_function_backward (struct gdbarch *gdbarch,
7469 struct execution_control_state *ecs)
b2175913 7470{
43f3e411 7471 struct compunit_symtab *cust;
167e4384 7472 struct symtab_and_line stop_func_sal;
b2175913 7473
7e324e48
GB
7474 fill_in_stop_func (gdbarch, ecs);
7475
f2ffa92b 7476 cust = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7477 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7478 ecs->stop_func_start
7479 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
b2175913 7480
f2ffa92b 7481 stop_func_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
b2175913
MS
7482
7483 /* OK, we're just going to keep stepping here. */
f2ffa92b 7484 if (stop_func_sal.pc == ecs->event_thread->suspend.stop_pc)
b2175913
MS
7485 {
7486 /* We're there already. Just stop stepping now. */
bdc36728 7487 end_stepping_range (ecs);
b2175913
MS
7488 }
7489 else
7490 {
7491 /* Else just reset the step range and keep going.
7492 No step-resume breakpoint, they don't work for
7493 epilogues, which can have multiple entry paths. */
16c381f0
JK
7494 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7495 ecs->event_thread->control.step_range_end = stop_func_sal.end;
b2175913
MS
7496 keep_going (ecs);
7497 }
7498 return;
7499}
7500
d3169d93 7501/* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
44cbf7b5
AC
7502 This is used to both functions and to skip over code. */
7503
7504static void
2c03e5be
PA
7505insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7506 struct symtab_and_line sr_sal,
7507 struct frame_id sr_id,
7508 enum bptype sr_type)
44cbf7b5 7509{
611c83ae
PA
7510 /* There should never be more than one step-resume or longjmp-resume
7511 breakpoint per thread, so we should never be setting a new
44cbf7b5 7512 step_resume_breakpoint when one is already active. */
8358c15c 7513 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
2c03e5be 7514 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
d3169d93 7515
edbcda09
SM
7516 infrun_log_debug ("inserting step-resume breakpoint at %s",
7517 paddress (gdbarch, sr_sal.pc));
d3169d93 7518
8358c15c 7519 inferior_thread ()->control.step_resume_breakpoint
454dafbd 7520 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
2c03e5be
PA
7521}
7522
9da8c2a0 7523void
2c03e5be
PA
7524insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7525 struct symtab_and_line sr_sal,
7526 struct frame_id sr_id)
7527{
7528 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7529 sr_sal, sr_id,
7530 bp_step_resume);
44cbf7b5 7531}
7ce450bd 7532
2c03e5be
PA
7533/* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7534 This is used to skip a potential signal handler.
7ce450bd 7535
14e60db5
DJ
7536 This is called with the interrupted function's frame. The signal
7537 handler, when it returns, will resume the interrupted function at
7538 RETURN_FRAME.pc. */
d303a6c7
AC
7539
7540static void
2c03e5be 7541insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
d303a6c7 7542{
f4c1edd8 7543 gdb_assert (return_frame != NULL);
d303a6c7 7544
51abb421
PA
7545 struct gdbarch *gdbarch = get_frame_arch (return_frame);
7546
7547 symtab_and_line sr_sal;
568d6575 7548 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
d303a6c7 7549 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7550 sr_sal.pspace = get_frame_program_space (return_frame);
d303a6c7 7551
2c03e5be
PA
7552 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7553 get_stack_frame_id (return_frame),
7554 bp_hp_step_resume);
d303a6c7
AC
7555}
7556
2c03e5be
PA
7557/* Insert a "step-resume breakpoint" at the previous frame's PC. This
7558 is used to skip a function after stepping into it (for "next" or if
7559 the called function has no debugging information).
14e60db5
DJ
7560
7561 The current function has almost always been reached by single
7562 stepping a call or return instruction. NEXT_FRAME belongs to the
7563 current function, and the breakpoint will be set at the caller's
7564 resume address.
7565
7566 This is a separate function rather than reusing
2c03e5be 7567 insert_hp_step_resume_breakpoint_at_frame in order to avoid
14e60db5 7568 get_prev_frame, which may stop prematurely (see the implementation
c7ce8faa 7569 of frame_unwind_caller_id for an example). */
14e60db5
DJ
7570
7571static void
7572insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7573{
14e60db5
DJ
7574 /* We shouldn't have gotten here if we don't know where the call site
7575 is. */
c7ce8faa 7576 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
14e60db5 7577
51abb421 7578 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
14e60db5 7579
51abb421 7580 symtab_and_line sr_sal;
c7ce8faa
DJ
7581 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7582 frame_unwind_caller_pc (next_frame));
14e60db5 7583 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7584 sr_sal.pspace = frame_unwind_program_space (next_frame);
14e60db5 7585
a6d9a66e 7586 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
c7ce8faa 7587 frame_unwind_caller_id (next_frame));
14e60db5
DJ
7588}
7589
611c83ae
PA
7590/* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7591 new breakpoint at the target of a jmp_buf. The handling of
7592 longjmp-resume uses the same mechanisms used for handling
7593 "step-resume" breakpoints. */
7594
7595static void
a6d9a66e 7596insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
611c83ae 7597{
e81a37f7
TT
7598 /* There should never be more than one longjmp-resume breakpoint per
7599 thread, so we should never be setting a new
611c83ae 7600 longjmp_resume_breakpoint when one is already active. */
e81a37f7 7601 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
611c83ae 7602
edbcda09
SM
7603 infrun_log_debug ("inserting longjmp-resume breakpoint at %s",
7604 paddress (gdbarch, pc));
611c83ae 7605
e81a37f7 7606 inferior_thread ()->control.exception_resume_breakpoint =
454dafbd 7607 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
611c83ae
PA
7608}
7609
186c406b
TT
7610/* Insert an exception resume breakpoint. TP is the thread throwing
7611 the exception. The block B is the block of the unwinder debug hook
7612 function. FRAME is the frame corresponding to the call to this
7613 function. SYM is the symbol of the function argument holding the
7614 target PC of the exception. */
7615
7616static void
7617insert_exception_resume_breakpoint (struct thread_info *tp,
3977b71f 7618 const struct block *b,
186c406b
TT
7619 struct frame_info *frame,
7620 struct symbol *sym)
7621{
a70b8144 7622 try
186c406b 7623 {
63e43d3a 7624 struct block_symbol vsym;
186c406b
TT
7625 struct value *value;
7626 CORE_ADDR handler;
7627 struct breakpoint *bp;
7628
987012b8 7629 vsym = lookup_symbol_search_name (sym->search_name (),
de63c46b 7630 b, VAR_DOMAIN);
63e43d3a 7631 value = read_var_value (vsym.symbol, vsym.block, frame);
186c406b
TT
7632 /* If the value was optimized out, revert to the old behavior. */
7633 if (! value_optimized_out (value))
7634 {
7635 handler = value_as_address (value);
7636
edbcda09
SM
7637 infrun_log_debug ("exception resume at %lx",
7638 (unsigned long) handler);
186c406b
TT
7639
7640 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd
TT
7641 handler,
7642 bp_exception_resume).release ();
c70a6932
JK
7643
7644 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
7645 frame = NULL;
7646
5d5658a1 7647 bp->thread = tp->global_num;
186c406b
TT
7648 inferior_thread ()->control.exception_resume_breakpoint = bp;
7649 }
7650 }
230d2906 7651 catch (const gdb_exception_error &e)
492d29ea
PA
7652 {
7653 /* We want to ignore errors here. */
7654 }
186c406b
TT
7655}
7656
28106bc2
SDJ
7657/* A helper for check_exception_resume that sets an
7658 exception-breakpoint based on a SystemTap probe. */
7659
7660static void
7661insert_exception_resume_from_probe (struct thread_info *tp,
729662a5 7662 const struct bound_probe *probe,
28106bc2
SDJ
7663 struct frame_info *frame)
7664{
7665 struct value *arg_value;
7666 CORE_ADDR handler;
7667 struct breakpoint *bp;
7668
7669 arg_value = probe_safe_evaluate_at_pc (frame, 1);
7670 if (!arg_value)
7671 return;
7672
7673 handler = value_as_address (arg_value);
7674
edbcda09
SM
7675 infrun_log_debug ("exception resume at %s",
7676 paddress (probe->objfile->arch (), handler));
28106bc2
SDJ
7677
7678 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd 7679 handler, bp_exception_resume).release ();
5d5658a1 7680 bp->thread = tp->global_num;
28106bc2
SDJ
7681 inferior_thread ()->control.exception_resume_breakpoint = bp;
7682}
7683
186c406b
TT
7684/* This is called when an exception has been intercepted. Check to
7685 see whether the exception's destination is of interest, and if so,
7686 set an exception resume breakpoint there. */
7687
7688static void
7689check_exception_resume (struct execution_control_state *ecs,
28106bc2 7690 struct frame_info *frame)
186c406b 7691{
729662a5 7692 struct bound_probe probe;
28106bc2
SDJ
7693 struct symbol *func;
7694
7695 /* First see if this exception unwinding breakpoint was set via a
7696 SystemTap probe point. If so, the probe has two arguments: the
7697 CFA and the HANDLER. We ignore the CFA, extract the handler, and
7698 set a breakpoint there. */
6bac7473 7699 probe = find_probe_by_pc (get_frame_pc (frame));
935676c9 7700 if (probe.prob)
28106bc2 7701 {
729662a5 7702 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
28106bc2
SDJ
7703 return;
7704 }
7705
7706 func = get_frame_function (frame);
7707 if (!func)
7708 return;
186c406b 7709
a70b8144 7710 try
186c406b 7711 {
3977b71f 7712 const struct block *b;
8157b174 7713 struct block_iterator iter;
186c406b
TT
7714 struct symbol *sym;
7715 int argno = 0;
7716
7717 /* The exception breakpoint is a thread-specific breakpoint on
7718 the unwinder's debug hook, declared as:
7719
7720 void _Unwind_DebugHook (void *cfa, void *handler);
7721
7722 The CFA argument indicates the frame to which control is
7723 about to be transferred. HANDLER is the destination PC.
7724
7725 We ignore the CFA and set a temporary breakpoint at HANDLER.
7726 This is not extremely efficient but it avoids issues in gdb
7727 with computing the DWARF CFA, and it also works even in weird
7728 cases such as throwing an exception from inside a signal
7729 handler. */
7730
7731 b = SYMBOL_BLOCK_VALUE (func);
7732 ALL_BLOCK_SYMBOLS (b, iter, sym)
7733 {
7734 if (!SYMBOL_IS_ARGUMENT (sym))
7735 continue;
7736
7737 if (argno == 0)
7738 ++argno;
7739 else
7740 {
7741 insert_exception_resume_breakpoint (ecs->event_thread,
7742 b, frame, sym);
7743 break;
7744 }
7745 }
7746 }
230d2906 7747 catch (const gdb_exception_error &e)
492d29ea
PA
7748 {
7749 }
186c406b
TT
7750}
7751
104c1213 7752static void
22bcd14b 7753stop_waiting (struct execution_control_state *ecs)
104c1213 7754{
edbcda09 7755 infrun_log_debug ("stop_waiting");
527159b7 7756
cd0fc7c3
SS
7757 /* Let callers know we don't want to wait for the inferior anymore. */
7758 ecs->wait_some_more = 0;
fbea99ea 7759
53cccef1 7760 /* If all-stop, but there exists a non-stop target, stop all
fbea99ea 7761 threads now that we're presenting the stop to the user. */
53cccef1 7762 if (!non_stop && exists_non_stop_target ())
fbea99ea 7763 stop_all_threads ();
cd0fc7c3
SS
7764}
7765
4d9d9d04
PA
7766/* Like keep_going, but passes the signal to the inferior, even if the
7767 signal is set to nopass. */
d4f3574e
SS
7768
7769static void
4d9d9d04 7770keep_going_pass_signal (struct execution_control_state *ecs)
d4f3574e 7771{
d7e15655 7772 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
372316f1 7773 gdb_assert (!ecs->event_thread->resumed);
4d9d9d04 7774
d4f3574e 7775 /* Save the pc before execution, to compare with pc after stop. */
fb14de7b 7776 ecs->event_thread->prev_pc
fc75c28b 7777 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
d4f3574e 7778
4d9d9d04 7779 if (ecs->event_thread->control.trap_expected)
d4f3574e 7780 {
4d9d9d04
PA
7781 struct thread_info *tp = ecs->event_thread;
7782
edbcda09
SM
7783 infrun_log_debug ("%s has trap_expected set, "
7784 "resuming to collect trap",
7785 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04 7786
a9ba6bae
PA
7787 /* We haven't yet gotten our trap, and either: intercepted a
7788 non-signal event (e.g., a fork); or took a signal which we
7789 are supposed to pass through to the inferior. Simply
7790 continue. */
64ce06e4 7791 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e 7792 }
372316f1
PA
7793 else if (step_over_info_valid_p ())
7794 {
7795 /* Another thread is stepping over a breakpoint in-line. If
7796 this thread needs a step-over too, queue the request. In
7797 either case, this resume must be deferred for later. */
7798 struct thread_info *tp = ecs->event_thread;
7799
7800 if (ecs->hit_singlestep_breakpoint
7801 || thread_still_needs_step_over (tp))
7802 {
edbcda09
SM
7803 infrun_log_debug ("step-over already in progress: "
7804 "step-over for %s deferred",
7805 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
7806 thread_step_over_chain_enqueue (tp);
7807 }
7808 else
7809 {
edbcda09
SM
7810 infrun_log_debug ("step-over in progress: resume of %s deferred",
7811 target_pid_to_str (tp->ptid).c_str ());
372316f1 7812 }
372316f1 7813 }
d4f3574e
SS
7814 else
7815 {
31e77af2 7816 struct regcache *regcache = get_current_regcache ();
963f9c80
PA
7817 int remove_bp;
7818 int remove_wps;
8d297bbf 7819 step_over_what step_what;
31e77af2 7820
d4f3574e 7821 /* Either the trap was not expected, but we are continuing
a9ba6bae
PA
7822 anyway (if we got a signal, the user asked it be passed to
7823 the child)
7824 -- or --
7825 We got our expected trap, but decided we should resume from
7826 it.
d4f3574e 7827
a9ba6bae 7828 We're going to run this baby now!
d4f3574e 7829
c36b740a
VP
7830 Note that insert_breakpoints won't try to re-insert
7831 already inserted breakpoints. Therefore, we don't
7832 care if breakpoints were already inserted, or not. */
a9ba6bae 7833
31e77af2
PA
7834 /* If we need to step over a breakpoint, and we're not using
7835 displaced stepping to do so, insert all breakpoints
7836 (watchpoints, etc.) but the one we're stepping over, step one
7837 instruction, and then re-insert the breakpoint when that step
7838 is finished. */
963f9c80 7839
6c4cfb24
PA
7840 step_what = thread_still_needs_step_over (ecs->event_thread);
7841
963f9c80 7842 remove_bp = (ecs->hit_singlestep_breakpoint
6c4cfb24
PA
7843 || (step_what & STEP_OVER_BREAKPOINT));
7844 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
963f9c80 7845
cb71640d
PA
7846 /* We can't use displaced stepping if we need to step past a
7847 watchpoint. The instruction copied to the scratch pad would
7848 still trigger the watchpoint. */
7849 if (remove_bp
3fc8eb30 7850 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
45e8c884 7851 {
a01bda52 7852 set_step_over_info (regcache->aspace (),
21edc42f
YQ
7853 regcache_read_pc (regcache), remove_wps,
7854 ecs->event_thread->global_num);
45e8c884 7855 }
963f9c80 7856 else if (remove_wps)
21edc42f 7857 set_step_over_info (NULL, 0, remove_wps, -1);
372316f1
PA
7858
7859 /* If we now need to do an in-line step-over, we need to stop
7860 all other threads. Note this must be done before
7861 insert_breakpoints below, because that removes the breakpoint
7862 we're about to step over, otherwise other threads could miss
7863 it. */
fbea99ea 7864 if (step_over_info_valid_p () && target_is_non_stop_p ())
372316f1 7865 stop_all_threads ();
abbb1732 7866
31e77af2 7867 /* Stop stepping if inserting breakpoints fails. */
a70b8144 7868 try
31e77af2
PA
7869 {
7870 insert_breakpoints ();
7871 }
230d2906 7872 catch (const gdb_exception_error &e)
31e77af2
PA
7873 {
7874 exception_print (gdb_stderr, e);
22bcd14b 7875 stop_waiting (ecs);
bdf2a94a 7876 clear_step_over_info ();
31e77af2 7877 return;
d4f3574e
SS
7878 }
7879
963f9c80 7880 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
d4f3574e 7881
64ce06e4 7882 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e
SS
7883 }
7884
488f131b 7885 prepare_to_wait (ecs);
d4f3574e
SS
7886}
7887
4d9d9d04
PA
7888/* Called when we should continue running the inferior, because the
7889 current event doesn't cause a user visible stop. This does the
7890 resuming part; waiting for the next event is done elsewhere. */
7891
7892static void
7893keep_going (struct execution_control_state *ecs)
7894{
7895 if (ecs->event_thread->control.trap_expected
7896 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
7897 ecs->event_thread->control.trap_expected = 0;
7898
7899 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7900 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7901 keep_going_pass_signal (ecs);
7902}
7903
104c1213
JM
7904/* This function normally comes after a resume, before
7905 handle_inferior_event exits. It takes care of any last bits of
7906 housekeeping, and sets the all-important wait_some_more flag. */
cd0fc7c3 7907
104c1213
JM
7908static void
7909prepare_to_wait (struct execution_control_state *ecs)
cd0fc7c3 7910{
edbcda09 7911 infrun_log_debug ("prepare_to_wait");
104c1213 7912
104c1213 7913 ecs->wait_some_more = 1;
0b333c5e 7914
0e2dba2d
PA
7915 /* If the target can't async, emulate it by marking the infrun event
7916 handler such that as soon as we get back to the event-loop, we
7917 immediately end up in fetch_inferior_event again calling
7918 target_wait. */
7919 if (!target_can_async_p ())
0b333c5e 7920 mark_infrun_async_event_handler ();
c906108c 7921}
11cf8741 7922
fd664c91 7923/* We are done with the step range of a step/next/si/ni command.
b57bacec 7924 Called once for each n of a "step n" operation. */
fd664c91
PA
7925
7926static void
bdc36728 7927end_stepping_range (struct execution_control_state *ecs)
fd664c91 7928{
bdc36728 7929 ecs->event_thread->control.stop_step = 1;
bdc36728 7930 stop_waiting (ecs);
fd664c91
PA
7931}
7932
33d62d64
JK
7933/* Several print_*_reason functions to print why the inferior has stopped.
7934 We always print something when the inferior exits, or receives a signal.
7935 The rest of the cases are dealt with later on in normal_stop and
7936 print_it_typical. Ideally there should be a call to one of these
7937 print_*_reason functions functions from handle_inferior_event each time
22bcd14b 7938 stop_waiting is called.
33d62d64 7939
fd664c91
PA
7940 Note that we don't call these directly, instead we delegate that to
7941 the interpreters, through observers. Interpreters then call these
7942 with whatever uiout is right. */
33d62d64 7943
fd664c91
PA
7944void
7945print_end_stepping_range_reason (struct ui_out *uiout)
33d62d64 7946{
fd664c91 7947 /* For CLI-like interpreters, print nothing. */
33d62d64 7948
112e8700 7949 if (uiout->is_mi_like_p ())
fd664c91 7950 {
112e8700 7951 uiout->field_string ("reason",
fd664c91
PA
7952 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
7953 }
7954}
33d62d64 7955
fd664c91
PA
7956void
7957print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
11cf8741 7958{
33d62d64 7959 annotate_signalled ();
112e8700
SM
7960 if (uiout->is_mi_like_p ())
7961 uiout->field_string
7962 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
7963 uiout->text ("\nProgram terminated with signal ");
33d62d64 7964 annotate_signal_name ();
112e8700 7965 uiout->field_string ("signal-name",
2ea28649 7966 gdb_signal_to_name (siggnal));
33d62d64 7967 annotate_signal_name_end ();
112e8700 7968 uiout->text (", ");
33d62d64 7969 annotate_signal_string ();
112e8700 7970 uiout->field_string ("signal-meaning",
2ea28649 7971 gdb_signal_to_string (siggnal));
33d62d64 7972 annotate_signal_string_end ();
112e8700
SM
7973 uiout->text (".\n");
7974 uiout->text ("The program no longer exists.\n");
33d62d64
JK
7975}
7976
fd664c91
PA
7977void
7978print_exited_reason (struct ui_out *uiout, int exitstatus)
33d62d64 7979{
fda326dd 7980 struct inferior *inf = current_inferior ();
a068643d 7981 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
fda326dd 7982
33d62d64
JK
7983 annotate_exited (exitstatus);
7984 if (exitstatus)
7985 {
112e8700
SM
7986 if (uiout->is_mi_like_p ())
7987 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
6a831f06
PA
7988 std::string exit_code_str
7989 = string_printf ("0%o", (unsigned int) exitstatus);
7990 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
7991 plongest (inf->num), pidstr.c_str (),
7992 string_field ("exit-code", exit_code_str.c_str ()));
33d62d64
JK
7993 }
7994 else
11cf8741 7995 {
112e8700
SM
7996 if (uiout->is_mi_like_p ())
7997 uiout->field_string
7998 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6a831f06
PA
7999 uiout->message ("[Inferior %s (%s) exited normally]\n",
8000 plongest (inf->num), pidstr.c_str ());
33d62d64 8001 }
33d62d64
JK
8002}
8003
012b3a21
WT
8004/* Some targets/architectures can do extra processing/display of
8005 segmentation faults. E.g., Intel MPX boundary faults.
8006 Call the architecture dependent function to handle the fault. */
8007
8008static void
8009handle_segmentation_fault (struct ui_out *uiout)
8010{
8011 struct regcache *regcache = get_current_regcache ();
ac7936df 8012 struct gdbarch *gdbarch = regcache->arch ();
012b3a21
WT
8013
8014 if (gdbarch_handle_segmentation_fault_p (gdbarch))
8015 gdbarch_handle_segmentation_fault (gdbarch, uiout);
8016}
8017
fd664c91
PA
8018void
8019print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
33d62d64 8020{
f303dbd6
PA
8021 struct thread_info *thr = inferior_thread ();
8022
33d62d64
JK
8023 annotate_signal ();
8024
112e8700 8025 if (uiout->is_mi_like_p ())
f303dbd6
PA
8026 ;
8027 else if (show_thread_that_caused_stop ())
33d62d64 8028 {
f303dbd6 8029 const char *name;
33d62d64 8030
112e8700 8031 uiout->text ("\nThread ");
33eca680 8032 uiout->field_string ("thread-id", print_thread_id (thr));
f303dbd6
PA
8033
8034 name = thr->name != NULL ? thr->name : target_thread_name (thr);
8035 if (name != NULL)
8036 {
112e8700 8037 uiout->text (" \"");
33eca680 8038 uiout->field_string ("name", name);
112e8700 8039 uiout->text ("\"");
f303dbd6 8040 }
33d62d64 8041 }
f303dbd6 8042 else
112e8700 8043 uiout->text ("\nProgram");
f303dbd6 8044
112e8700
SM
8045 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
8046 uiout->text (" stopped");
33d62d64
JK
8047 else
8048 {
112e8700 8049 uiout->text (" received signal ");
8b93c638 8050 annotate_signal_name ();
112e8700
SM
8051 if (uiout->is_mi_like_p ())
8052 uiout->field_string
8053 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
8054 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8b93c638 8055 annotate_signal_name_end ();
112e8700 8056 uiout->text (", ");
8b93c638 8057 annotate_signal_string ();
112e8700 8058 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
012b3a21
WT
8059
8060 if (siggnal == GDB_SIGNAL_SEGV)
8061 handle_segmentation_fault (uiout);
8062
8b93c638 8063 annotate_signal_string_end ();
33d62d64 8064 }
112e8700 8065 uiout->text (".\n");
33d62d64 8066}
252fbfc8 8067
fd664c91
PA
8068void
8069print_no_history_reason (struct ui_out *uiout)
33d62d64 8070{
112e8700 8071 uiout->text ("\nNo more reverse-execution history.\n");
11cf8741 8072}
43ff13b4 8073
0c7e1a46
PA
8074/* Print current location without a level number, if we have changed
8075 functions or hit a breakpoint. Print source line if we have one.
8076 bpstat_print contains the logic deciding in detail what to print,
8077 based on the event(s) that just occurred. */
8078
243a9253
PA
8079static void
8080print_stop_location (struct target_waitstatus *ws)
0c7e1a46
PA
8081{
8082 int bpstat_ret;
f486487f 8083 enum print_what source_flag;
0c7e1a46
PA
8084 int do_frame_printing = 1;
8085 struct thread_info *tp = inferior_thread ();
8086
8087 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
8088 switch (bpstat_ret)
8089 {
8090 case PRINT_UNKNOWN:
8091 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8092 should) carry around the function and does (or should) use
8093 that when doing a frame comparison. */
8094 if (tp->control.stop_step
8095 && frame_id_eq (tp->control.step_frame_id,
8096 get_frame_id (get_current_frame ()))
f2ffa92b
PA
8097 && (tp->control.step_start_function
8098 == find_pc_function (tp->suspend.stop_pc)))
0c7e1a46
PA
8099 {
8100 /* Finished step, just print source line. */
8101 source_flag = SRC_LINE;
8102 }
8103 else
8104 {
8105 /* Print location and source line. */
8106 source_flag = SRC_AND_LOC;
8107 }
8108 break;
8109 case PRINT_SRC_AND_LOC:
8110 /* Print location and source line. */
8111 source_flag = SRC_AND_LOC;
8112 break;
8113 case PRINT_SRC_ONLY:
8114 source_flag = SRC_LINE;
8115 break;
8116 case PRINT_NOTHING:
8117 /* Something bogus. */
8118 source_flag = SRC_LINE;
8119 do_frame_printing = 0;
8120 break;
8121 default:
8122 internal_error (__FILE__, __LINE__, _("Unknown value."));
8123 }
8124
8125 /* The behavior of this routine with respect to the source
8126 flag is:
8127 SRC_LINE: Print only source line
8128 LOCATION: Print only location
8129 SRC_AND_LOC: Print location and source line. */
8130 if (do_frame_printing)
8131 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
243a9253
PA
8132}
8133
243a9253
PA
8134/* See infrun.h. */
8135
8136void
4c7d57e7 8137print_stop_event (struct ui_out *uiout, bool displays)
243a9253 8138{
243a9253 8139 struct target_waitstatus last;
243a9253
PA
8140 struct thread_info *tp;
8141
5b6d1e4f 8142 get_last_target_status (nullptr, nullptr, &last);
243a9253 8143
67ad9399
TT
8144 {
8145 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
0c7e1a46 8146
67ad9399 8147 print_stop_location (&last);
243a9253 8148
67ad9399 8149 /* Display the auto-display expressions. */
4c7d57e7
TT
8150 if (displays)
8151 do_displays ();
67ad9399 8152 }
243a9253
PA
8153
8154 tp = inferior_thread ();
8155 if (tp->thread_fsm != NULL
46e3ed7f 8156 && tp->thread_fsm->finished_p ())
243a9253
PA
8157 {
8158 struct return_value_info *rv;
8159
46e3ed7f 8160 rv = tp->thread_fsm->return_value ();
243a9253
PA
8161 if (rv != NULL)
8162 print_return_value (uiout, rv);
8163 }
0c7e1a46
PA
8164}
8165
388a7084
PA
8166/* See infrun.h. */
8167
8168void
8169maybe_remove_breakpoints (void)
8170{
8171 if (!breakpoints_should_be_inserted_now () && target_has_execution)
8172 {
8173 if (remove_breakpoints ())
8174 {
223ffa71 8175 target_terminal::ours_for_output ();
388a7084
PA
8176 printf_filtered (_("Cannot remove breakpoints because "
8177 "program is no longer writable.\nFurther "
8178 "execution is probably impossible.\n"));
8179 }
8180 }
8181}
8182
4c2f2a79
PA
8183/* The execution context that just caused a normal stop. */
8184
8185struct stop_context
8186{
2d844eaf
TT
8187 stop_context ();
8188 ~stop_context ();
8189
8190 DISABLE_COPY_AND_ASSIGN (stop_context);
8191
8192 bool changed () const;
8193
4c2f2a79
PA
8194 /* The stop ID. */
8195 ULONGEST stop_id;
c906108c 8196
4c2f2a79 8197 /* The event PTID. */
c906108c 8198
4c2f2a79
PA
8199 ptid_t ptid;
8200
8201 /* If stopp for a thread event, this is the thread that caused the
8202 stop. */
8203 struct thread_info *thread;
8204
8205 /* The inferior that caused the stop. */
8206 int inf_num;
8207};
8208
2d844eaf 8209/* Initializes a new stop context. If stopped for a thread event, this
4c2f2a79
PA
8210 takes a strong reference to the thread. */
8211
2d844eaf 8212stop_context::stop_context ()
4c2f2a79 8213{
2d844eaf
TT
8214 stop_id = get_stop_id ();
8215 ptid = inferior_ptid;
8216 inf_num = current_inferior ()->num;
4c2f2a79 8217
d7e15655 8218 if (inferior_ptid != null_ptid)
4c2f2a79
PA
8219 {
8220 /* Take a strong reference so that the thread can't be deleted
8221 yet. */
2d844eaf
TT
8222 thread = inferior_thread ();
8223 thread->incref ();
4c2f2a79
PA
8224 }
8225 else
2d844eaf 8226 thread = NULL;
4c2f2a79
PA
8227}
8228
8229/* Release a stop context previously created with save_stop_context.
8230 Releases the strong reference to the thread as well. */
8231
2d844eaf 8232stop_context::~stop_context ()
4c2f2a79 8233{
2d844eaf
TT
8234 if (thread != NULL)
8235 thread->decref ();
4c2f2a79
PA
8236}
8237
8238/* Return true if the current context no longer matches the saved stop
8239 context. */
8240
2d844eaf
TT
8241bool
8242stop_context::changed () const
8243{
8244 if (ptid != inferior_ptid)
8245 return true;
8246 if (inf_num != current_inferior ()->num)
8247 return true;
8248 if (thread != NULL && thread->state != THREAD_STOPPED)
8249 return true;
8250 if (get_stop_id () != stop_id)
8251 return true;
8252 return false;
4c2f2a79
PA
8253}
8254
8255/* See infrun.h. */
8256
8257int
96baa820 8258normal_stop (void)
c906108c 8259{
73b65bb0 8260 struct target_waitstatus last;
73b65bb0 8261
5b6d1e4f 8262 get_last_target_status (nullptr, nullptr, &last);
73b65bb0 8263
4c2f2a79
PA
8264 new_stop_id ();
8265
29f49a6a
PA
8266 /* If an exception is thrown from this point on, make sure to
8267 propagate GDB's knowledge of the executing state to the
8268 frontend/user running state. A QUIT is an easy exception to see
8269 here, so do this before any filtered output. */
731f534f 8270
5b6d1e4f 8271 ptid_t finish_ptid = null_ptid;
731f534f 8272
c35b1492 8273 if (!non_stop)
5b6d1e4f 8274 finish_ptid = minus_one_ptid;
e1316e60
PA
8275 else if (last.kind == TARGET_WAITKIND_SIGNALLED
8276 || last.kind == TARGET_WAITKIND_EXITED)
8277 {
8278 /* On some targets, we may still have live threads in the
8279 inferior when we get a process exit event. E.g., for
8280 "checkpoint", when the current checkpoint/fork exits,
8281 linux-fork.c automatically switches to another fork from
8282 within target_mourn_inferior. */
731f534f 8283 if (inferior_ptid != null_ptid)
5b6d1e4f 8284 finish_ptid = ptid_t (inferior_ptid.pid ());
e1316e60
PA
8285 }
8286 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
5b6d1e4f
PA
8287 finish_ptid = inferior_ptid;
8288
8289 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
8290 if (finish_ptid != null_ptid)
8291 {
8292 maybe_finish_thread_state.emplace
8293 (user_visible_resume_target (finish_ptid), finish_ptid);
8294 }
29f49a6a 8295
b57bacec
PA
8296 /* As we're presenting a stop, and potentially removing breakpoints,
8297 update the thread list so we can tell whether there are threads
8298 running on the target. With target remote, for example, we can
8299 only learn about new threads when we explicitly update the thread
8300 list. Do this before notifying the interpreters about signal
8301 stops, end of stepping ranges, etc., so that the "new thread"
8302 output is emitted before e.g., "Program received signal FOO",
8303 instead of after. */
8304 update_thread_list ();
8305
8306 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
76727919 8307 gdb::observers::signal_received.notify (inferior_thread ()->suspend.stop_signal);
b57bacec 8308
c906108c
SS
8309 /* As with the notification of thread events, we want to delay
8310 notifying the user that we've switched thread context until
8311 the inferior actually stops.
8312
73b65bb0
DJ
8313 There's no point in saying anything if the inferior has exited.
8314 Note that SIGNALLED here means "exited with a signal", not
b65dc60b
PA
8315 "received a signal".
8316
8317 Also skip saying anything in non-stop mode. In that mode, as we
8318 don't want GDB to switch threads behind the user's back, to avoid
8319 races where the user is typing a command to apply to thread x,
8320 but GDB switches to thread y before the user finishes entering
8321 the command, fetch_inferior_event installs a cleanup to restore
8322 the current thread back to the thread the user had selected right
8323 after this event is handled, so we're not really switching, only
8324 informing of a stop. */
4f8d22e3 8325 if (!non_stop
731f534f 8326 && previous_inferior_ptid != inferior_ptid
73b65bb0
DJ
8327 && target_has_execution
8328 && last.kind != TARGET_WAITKIND_SIGNALLED
0e5bf2a8
PA
8329 && last.kind != TARGET_WAITKIND_EXITED
8330 && last.kind != TARGET_WAITKIND_NO_RESUMED)
c906108c 8331 {
0e454242 8332 SWITCH_THRU_ALL_UIS ()
3b12939d 8333 {
223ffa71 8334 target_terminal::ours_for_output ();
3b12939d 8335 printf_filtered (_("[Switching to %s]\n"),
a068643d 8336 target_pid_to_str (inferior_ptid).c_str ());
3b12939d
PA
8337 annotate_thread_changed ();
8338 }
39f77062 8339 previous_inferior_ptid = inferior_ptid;
c906108c 8340 }
c906108c 8341
0e5bf2a8
PA
8342 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
8343 {
0e454242 8344 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8345 if (current_ui->prompt_state == PROMPT_BLOCKED)
8346 {
223ffa71 8347 target_terminal::ours_for_output ();
3b12939d
PA
8348 printf_filtered (_("No unwaited-for children left.\n"));
8349 }
0e5bf2a8
PA
8350 }
8351
b57bacec 8352 /* Note: this depends on the update_thread_list call above. */
388a7084 8353 maybe_remove_breakpoints ();
c906108c 8354
c906108c
SS
8355 /* If an auto-display called a function and that got a signal,
8356 delete that auto-display to avoid an infinite recursion. */
8357
8358 if (stopped_by_random_signal)
8359 disable_current_display ();
8360
0e454242 8361 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8362 {
8363 async_enable_stdin ();
8364 }
c906108c 8365
388a7084 8366 /* Let the user/frontend see the threads as stopped. */
731f534f 8367 maybe_finish_thread_state.reset ();
388a7084
PA
8368
8369 /* Select innermost stack frame - i.e., current frame is frame 0,
8370 and current location is based on that. Handle the case where the
8371 dummy call is returning after being stopped. E.g. the dummy call
8372 previously hit a breakpoint. (If the dummy call returns
8373 normally, we won't reach here.) Do this before the stop hook is
8374 run, so that it doesn't get to see the temporary dummy frame,
8375 which is not where we'll present the stop. */
8376 if (has_stack_frames ())
8377 {
8378 if (stop_stack_dummy == STOP_STACK_DUMMY)
8379 {
8380 /* Pop the empty frame that contains the stack dummy. This
8381 also restores inferior state prior to the call (struct
8382 infcall_suspend_state). */
8383 struct frame_info *frame = get_current_frame ();
8384
8385 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8386 frame_pop (frame);
8387 /* frame_pop calls reinit_frame_cache as the last thing it
8388 does which means there's now no selected frame. */
8389 }
8390
8391 select_frame (get_current_frame ());
8392
8393 /* Set the current source location. */
8394 set_current_sal_from_frame (get_current_frame ());
8395 }
dd7e2d2b
PA
8396
8397 /* Look up the hook_stop and run it (CLI internally handles problem
8398 of stop_command's pre-hook not existing). */
4c2f2a79
PA
8399 if (stop_command != NULL)
8400 {
2d844eaf 8401 stop_context saved_context;
4c2f2a79 8402
a70b8144 8403 try
bf469271
PA
8404 {
8405 execute_cmd_pre_hook (stop_command);
8406 }
230d2906 8407 catch (const gdb_exception &ex)
bf469271
PA
8408 {
8409 exception_fprintf (gdb_stderr, ex,
8410 "Error while running hook_stop:\n");
8411 }
4c2f2a79
PA
8412
8413 /* If the stop hook resumes the target, then there's no point in
8414 trying to notify about the previous stop; its context is
8415 gone. Likewise if the command switches thread or inferior --
8416 the observers would print a stop for the wrong
8417 thread/inferior. */
2d844eaf
TT
8418 if (saved_context.changed ())
8419 return 1;
4c2f2a79 8420 }
dd7e2d2b 8421
388a7084
PA
8422 /* Notify observers about the stop. This is where the interpreters
8423 print the stop event. */
d7e15655 8424 if (inferior_ptid != null_ptid)
76727919 8425 gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
388a7084
PA
8426 stop_print_frame);
8427 else
76727919 8428 gdb::observers::normal_stop.notify (NULL, stop_print_frame);
347bddb7 8429
243a9253
PA
8430 annotate_stopped ();
8431
48844aa6
PA
8432 if (target_has_execution)
8433 {
8434 if (last.kind != TARGET_WAITKIND_SIGNALLED
fe726667
PA
8435 && last.kind != TARGET_WAITKIND_EXITED
8436 && last.kind != TARGET_WAITKIND_NO_RESUMED)
48844aa6
PA
8437 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8438 Delete any breakpoint that is to be deleted at the next stop. */
16c381f0 8439 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
94cc34af 8440 }
6c95b8df
PA
8441
8442 /* Try to get rid of automatically added inferiors that are no
8443 longer needed. Keeping those around slows down things linearly.
8444 Note that this never removes the current inferior. */
8445 prune_inferiors ();
4c2f2a79
PA
8446
8447 return 0;
c906108c 8448}
c906108c 8449\f
c5aa993b 8450int
96baa820 8451signal_stop_state (int signo)
c906108c 8452{
d6b48e9c 8453 return signal_stop[signo];
c906108c
SS
8454}
8455
c5aa993b 8456int
96baa820 8457signal_print_state (int signo)
c906108c
SS
8458{
8459 return signal_print[signo];
8460}
8461
c5aa993b 8462int
96baa820 8463signal_pass_state (int signo)
c906108c
SS
8464{
8465 return signal_program[signo];
8466}
8467
2455069d
UW
8468static void
8469signal_cache_update (int signo)
8470{
8471 if (signo == -1)
8472 {
a493e3e2 8473 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
2455069d
UW
8474 signal_cache_update (signo);
8475
8476 return;
8477 }
8478
8479 signal_pass[signo] = (signal_stop[signo] == 0
8480 && signal_print[signo] == 0
ab04a2af
TT
8481 && signal_program[signo] == 1
8482 && signal_catch[signo] == 0);
2455069d
UW
8483}
8484
488f131b 8485int
7bda5e4a 8486signal_stop_update (int signo, int state)
d4f3574e
SS
8487{
8488 int ret = signal_stop[signo];
abbb1732 8489
d4f3574e 8490 signal_stop[signo] = state;
2455069d 8491 signal_cache_update (signo);
d4f3574e
SS
8492 return ret;
8493}
8494
488f131b 8495int
7bda5e4a 8496signal_print_update (int signo, int state)
d4f3574e
SS
8497{
8498 int ret = signal_print[signo];
abbb1732 8499
d4f3574e 8500 signal_print[signo] = state;
2455069d 8501 signal_cache_update (signo);
d4f3574e
SS
8502 return ret;
8503}
8504
488f131b 8505int
7bda5e4a 8506signal_pass_update (int signo, int state)
d4f3574e
SS
8507{
8508 int ret = signal_program[signo];
abbb1732 8509
d4f3574e 8510 signal_program[signo] = state;
2455069d 8511 signal_cache_update (signo);
d4f3574e
SS
8512 return ret;
8513}
8514
ab04a2af
TT
8515/* Update the global 'signal_catch' from INFO and notify the
8516 target. */
8517
8518void
8519signal_catch_update (const unsigned int *info)
8520{
8521 int i;
8522
8523 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8524 signal_catch[i] = info[i] > 0;
8525 signal_cache_update (-1);
adc6a863 8526 target_pass_signals (signal_pass);
ab04a2af
TT
8527}
8528
c906108c 8529static void
96baa820 8530sig_print_header (void)
c906108c 8531{
3e43a32a
MS
8532 printf_filtered (_("Signal Stop\tPrint\tPass "
8533 "to program\tDescription\n"));
c906108c
SS
8534}
8535
8536static void
2ea28649 8537sig_print_info (enum gdb_signal oursig)
c906108c 8538{
2ea28649 8539 const char *name = gdb_signal_to_name (oursig);
c906108c 8540 int name_padding = 13 - strlen (name);
96baa820 8541
c906108c
SS
8542 if (name_padding <= 0)
8543 name_padding = 0;
8544
8545 printf_filtered ("%s", name);
488f131b 8546 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
c906108c
SS
8547 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8548 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
8549 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
2ea28649 8550 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
c906108c
SS
8551}
8552
8553/* Specify how various signals in the inferior should be handled. */
8554
8555static void
0b39b52e 8556handle_command (const char *args, int from_tty)
c906108c 8557{
c906108c 8558 int digits, wordlen;
b926417a 8559 int sigfirst, siglast;
2ea28649 8560 enum gdb_signal oursig;
c906108c 8561 int allsigs;
c906108c
SS
8562
8563 if (args == NULL)
8564 {
e2e0b3e5 8565 error_no_arg (_("signal to handle"));
c906108c
SS
8566 }
8567
1777feb0 8568 /* Allocate and zero an array of flags for which signals to handle. */
c906108c 8569
adc6a863
PA
8570 const size_t nsigs = GDB_SIGNAL_LAST;
8571 unsigned char sigs[nsigs] {};
c906108c 8572
1777feb0 8573 /* Break the command line up into args. */
c906108c 8574
773a1edc 8575 gdb_argv built_argv (args);
c906108c
SS
8576
8577 /* Walk through the args, looking for signal oursigs, signal names, and
8578 actions. Signal numbers and signal names may be interspersed with
8579 actions, with the actions being performed for all signals cumulatively
1777feb0 8580 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
c906108c 8581
773a1edc 8582 for (char *arg : built_argv)
c906108c 8583 {
773a1edc
TT
8584 wordlen = strlen (arg);
8585 for (digits = 0; isdigit (arg[digits]); digits++)
c906108c
SS
8586 {;
8587 }
8588 allsigs = 0;
8589 sigfirst = siglast = -1;
8590
773a1edc 8591 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
c906108c
SS
8592 {
8593 /* Apply action to all signals except those used by the
1777feb0 8594 debugger. Silently skip those. */
c906108c
SS
8595 allsigs = 1;
8596 sigfirst = 0;
8597 siglast = nsigs - 1;
8598 }
773a1edc 8599 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
c906108c
SS
8600 {
8601 SET_SIGS (nsigs, sigs, signal_stop);
8602 SET_SIGS (nsigs, sigs, signal_print);
8603 }
773a1edc 8604 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
c906108c
SS
8605 {
8606 UNSET_SIGS (nsigs, sigs, signal_program);
8607 }
773a1edc 8608 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
c906108c
SS
8609 {
8610 SET_SIGS (nsigs, sigs, signal_print);
8611 }
773a1edc 8612 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
c906108c
SS
8613 {
8614 SET_SIGS (nsigs, sigs, signal_program);
8615 }
773a1edc 8616 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
c906108c
SS
8617 {
8618 UNSET_SIGS (nsigs, sigs, signal_stop);
8619 }
773a1edc 8620 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
c906108c
SS
8621 {
8622 SET_SIGS (nsigs, sigs, signal_program);
8623 }
773a1edc 8624 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
c906108c
SS
8625 {
8626 UNSET_SIGS (nsigs, sigs, signal_print);
8627 UNSET_SIGS (nsigs, sigs, signal_stop);
8628 }
773a1edc 8629 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
c906108c
SS
8630 {
8631 UNSET_SIGS (nsigs, sigs, signal_program);
8632 }
8633 else if (digits > 0)
8634 {
8635 /* It is numeric. The numeric signal refers to our own
8636 internal signal numbering from target.h, not to host/target
8637 signal number. This is a feature; users really should be
8638 using symbolic names anyway, and the common ones like
8639 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8640
8641 sigfirst = siglast = (int)
773a1edc
TT
8642 gdb_signal_from_command (atoi (arg));
8643 if (arg[digits] == '-')
c906108c
SS
8644 {
8645 siglast = (int)
773a1edc 8646 gdb_signal_from_command (atoi (arg + digits + 1));
c906108c
SS
8647 }
8648 if (sigfirst > siglast)
8649 {
1777feb0 8650 /* Bet he didn't figure we'd think of this case... */
b926417a 8651 std::swap (sigfirst, siglast);
c906108c
SS
8652 }
8653 }
8654 else
8655 {
773a1edc 8656 oursig = gdb_signal_from_name (arg);
a493e3e2 8657 if (oursig != GDB_SIGNAL_UNKNOWN)
c906108c
SS
8658 {
8659 sigfirst = siglast = (int) oursig;
8660 }
8661 else
8662 {
8663 /* Not a number and not a recognized flag word => complain. */
773a1edc 8664 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
c906108c
SS
8665 }
8666 }
8667
8668 /* If any signal numbers or symbol names were found, set flags for
1777feb0 8669 which signals to apply actions to. */
c906108c 8670
b926417a 8671 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
c906108c 8672 {
2ea28649 8673 switch ((enum gdb_signal) signum)
c906108c 8674 {
a493e3e2
PA
8675 case GDB_SIGNAL_TRAP:
8676 case GDB_SIGNAL_INT:
c906108c
SS
8677 if (!allsigs && !sigs[signum])
8678 {
9e2f0ad4 8679 if (query (_("%s is used by the debugger.\n\
3e43a32a 8680Are you sure you want to change it? "),
2ea28649 8681 gdb_signal_to_name ((enum gdb_signal) signum)))
c906108c
SS
8682 {
8683 sigs[signum] = 1;
8684 }
8685 else
c119e040 8686 printf_unfiltered (_("Not confirmed, unchanged.\n"));
c906108c
SS
8687 }
8688 break;
a493e3e2
PA
8689 case GDB_SIGNAL_0:
8690 case GDB_SIGNAL_DEFAULT:
8691 case GDB_SIGNAL_UNKNOWN:
c906108c
SS
8692 /* Make sure that "all" doesn't print these. */
8693 break;
8694 default:
8695 sigs[signum] = 1;
8696 break;
8697 }
8698 }
c906108c
SS
8699 }
8700
b926417a 8701 for (int signum = 0; signum < nsigs; signum++)
3a031f65
PA
8702 if (sigs[signum])
8703 {
2455069d 8704 signal_cache_update (-1);
adc6a863
PA
8705 target_pass_signals (signal_pass);
8706 target_program_signals (signal_program);
c906108c 8707
3a031f65
PA
8708 if (from_tty)
8709 {
8710 /* Show the results. */
8711 sig_print_header ();
8712 for (; signum < nsigs; signum++)
8713 if (sigs[signum])
aead7601 8714 sig_print_info ((enum gdb_signal) signum);
3a031f65
PA
8715 }
8716
8717 break;
8718 }
c906108c
SS
8719}
8720
de0bea00
MF
8721/* Complete the "handle" command. */
8722
eb3ff9a5 8723static void
de0bea00 8724handle_completer (struct cmd_list_element *ignore,
eb3ff9a5 8725 completion_tracker &tracker,
6f937416 8726 const char *text, const char *word)
de0bea00 8727{
de0bea00
MF
8728 static const char * const keywords[] =
8729 {
8730 "all",
8731 "stop",
8732 "ignore",
8733 "print",
8734 "pass",
8735 "nostop",
8736 "noignore",
8737 "noprint",
8738 "nopass",
8739 NULL,
8740 };
8741
eb3ff9a5
PA
8742 signal_completer (ignore, tracker, text, word);
8743 complete_on_enum (tracker, keywords, word, word);
de0bea00
MF
8744}
8745
2ea28649
PA
8746enum gdb_signal
8747gdb_signal_from_command (int num)
ed01b82c
PA
8748{
8749 if (num >= 1 && num <= 15)
2ea28649 8750 return (enum gdb_signal) num;
ed01b82c
PA
8751 error (_("Only signals 1-15 are valid as numeric signals.\n\
8752Use \"info signals\" for a list of symbolic signals."));
8753}
8754
c906108c
SS
8755/* Print current contents of the tables set by the handle command.
8756 It is possible we should just be printing signals actually used
8757 by the current target (but for things to work right when switching
8758 targets, all signals should be in the signal tables). */
8759
8760static void
1d12d88f 8761info_signals_command (const char *signum_exp, int from_tty)
c906108c 8762{
2ea28649 8763 enum gdb_signal oursig;
abbb1732 8764
c906108c
SS
8765 sig_print_header ();
8766
8767 if (signum_exp)
8768 {
8769 /* First see if this is a symbol name. */
2ea28649 8770 oursig = gdb_signal_from_name (signum_exp);
a493e3e2 8771 if (oursig == GDB_SIGNAL_UNKNOWN)
c906108c
SS
8772 {
8773 /* No, try numeric. */
8774 oursig =
2ea28649 8775 gdb_signal_from_command (parse_and_eval_long (signum_exp));
c906108c
SS
8776 }
8777 sig_print_info (oursig);
8778 return;
8779 }
8780
8781 printf_filtered ("\n");
8782 /* These ugly casts brought to you by the native VAX compiler. */
a493e3e2
PA
8783 for (oursig = GDB_SIGNAL_FIRST;
8784 (int) oursig < (int) GDB_SIGNAL_LAST;
2ea28649 8785 oursig = (enum gdb_signal) ((int) oursig + 1))
c906108c
SS
8786 {
8787 QUIT;
8788
a493e3e2
PA
8789 if (oursig != GDB_SIGNAL_UNKNOWN
8790 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
c906108c
SS
8791 sig_print_info (oursig);
8792 }
8793
3e43a32a
MS
8794 printf_filtered (_("\nUse the \"handle\" command "
8795 "to change these tables.\n"));
c906108c 8796}
4aa995e1
PA
8797
8798/* The $_siginfo convenience variable is a bit special. We don't know
8799 for sure the type of the value until we actually have a chance to
7a9dd1b2 8800 fetch the data. The type can change depending on gdbarch, so it is
4aa995e1
PA
8801 also dependent on which thread you have selected.
8802
8803 1. making $_siginfo be an internalvar that creates a new value on
8804 access.
8805
8806 2. making the value of $_siginfo be an lval_computed value. */
8807
8808/* This function implements the lval_computed support for reading a
8809 $_siginfo value. */
8810
8811static void
8812siginfo_value_read (struct value *v)
8813{
8814 LONGEST transferred;
8815
a911d87a
PA
8816 /* If we can access registers, so can we access $_siginfo. Likewise
8817 vice versa. */
8818 validate_registers_access ();
c709acd1 8819
4aa995e1 8820 transferred =
8b88a78e 8821 target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO,
4aa995e1
PA
8822 NULL,
8823 value_contents_all_raw (v),
8824 value_offset (v),
8825 TYPE_LENGTH (value_type (v)));
8826
8827 if (transferred != TYPE_LENGTH (value_type (v)))
8828 error (_("Unable to read siginfo"));
8829}
8830
8831/* This function implements the lval_computed support for writing a
8832 $_siginfo value. */
8833
8834static void
8835siginfo_value_write (struct value *v, struct value *fromval)
8836{
8837 LONGEST transferred;
8838
a911d87a
PA
8839 /* If we can access registers, so can we access $_siginfo. Likewise
8840 vice versa. */
8841 validate_registers_access ();
c709acd1 8842
8b88a78e 8843 transferred = target_write (current_top_target (),
4aa995e1
PA
8844 TARGET_OBJECT_SIGNAL_INFO,
8845 NULL,
8846 value_contents_all_raw (fromval),
8847 value_offset (v),
8848 TYPE_LENGTH (value_type (fromval)));
8849
8850 if (transferred != TYPE_LENGTH (value_type (fromval)))
8851 error (_("Unable to write siginfo"));
8852}
8853
c8f2448a 8854static const struct lval_funcs siginfo_value_funcs =
4aa995e1
PA
8855 {
8856 siginfo_value_read,
8857 siginfo_value_write
8858 };
8859
8860/* Return a new value with the correct type for the siginfo object of
78267919
UW
8861 the current thread using architecture GDBARCH. Return a void value
8862 if there's no object available. */
4aa995e1 8863
2c0b251b 8864static struct value *
22d2b532
SDJ
8865siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
8866 void *ignore)
4aa995e1 8867{
4aa995e1 8868 if (target_has_stack
d7e15655 8869 && inferior_ptid != null_ptid
78267919 8870 && gdbarch_get_siginfo_type_p (gdbarch))
4aa995e1 8871 {
78267919 8872 struct type *type = gdbarch_get_siginfo_type (gdbarch);
abbb1732 8873
78267919 8874 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
4aa995e1
PA
8875 }
8876
78267919 8877 return allocate_value (builtin_type (gdbarch)->builtin_void);
4aa995e1
PA
8878}
8879
c906108c 8880\f
16c381f0
JK
8881/* infcall_suspend_state contains state about the program itself like its
8882 registers and any signal it received when it last stopped.
8883 This state must be restored regardless of how the inferior function call
8884 ends (either successfully, or after it hits a breakpoint or signal)
8885 if the program is to properly continue where it left off. */
8886
6bf78e29 8887class infcall_suspend_state
7a292a7a 8888{
6bf78e29
AB
8889public:
8890 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
8891 once the inferior function call has finished. */
8892 infcall_suspend_state (struct gdbarch *gdbarch,
8893 const struct thread_info *tp,
8894 struct regcache *regcache)
8895 : m_thread_suspend (tp->suspend),
8896 m_registers (new readonly_detached_regcache (*regcache))
8897 {
8898 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
8899
8900 if (gdbarch_get_siginfo_type_p (gdbarch))
8901 {
8902 struct type *type = gdbarch_get_siginfo_type (gdbarch);
8903 size_t len = TYPE_LENGTH (type);
8904
8905 siginfo_data.reset ((gdb_byte *) xmalloc (len));
8906
8907 if (target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
8908 siginfo_data.get (), 0, len) != len)
8909 {
8910 /* Errors ignored. */
8911 siginfo_data.reset (nullptr);
8912 }
8913 }
8914
8915 if (siginfo_data)
8916 {
8917 m_siginfo_gdbarch = gdbarch;
8918 m_siginfo_data = std::move (siginfo_data);
8919 }
8920 }
8921
8922 /* Return a pointer to the stored register state. */
16c381f0 8923
6bf78e29
AB
8924 readonly_detached_regcache *registers () const
8925 {
8926 return m_registers.get ();
8927 }
8928
8929 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
8930
8931 void restore (struct gdbarch *gdbarch,
8932 struct thread_info *tp,
8933 struct regcache *regcache) const
8934 {
8935 tp->suspend = m_thread_suspend;
8936
8937 if (m_siginfo_gdbarch == gdbarch)
8938 {
8939 struct type *type = gdbarch_get_siginfo_type (gdbarch);
8940
8941 /* Errors ignored. */
8942 target_write (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
8943 m_siginfo_data.get (), 0, TYPE_LENGTH (type));
8944 }
8945
8946 /* The inferior can be gone if the user types "print exit(0)"
8947 (and perhaps other times). */
8948 if (target_has_execution)
8949 /* NB: The register write goes through to the target. */
8950 regcache->restore (registers ());
8951 }
8952
8953private:
8954 /* How the current thread stopped before the inferior function call was
8955 executed. */
8956 struct thread_suspend_state m_thread_suspend;
8957
8958 /* The registers before the inferior function call was executed. */
8959 std::unique_ptr<readonly_detached_regcache> m_registers;
1736ad11 8960
35515841 8961 /* Format of SIGINFO_DATA or NULL if it is not present. */
6bf78e29 8962 struct gdbarch *m_siginfo_gdbarch = nullptr;
1736ad11
JK
8963
8964 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
8965 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
8966 content would be invalid. */
6bf78e29 8967 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
b89667eb
DE
8968};
8969
cb524840
TT
8970infcall_suspend_state_up
8971save_infcall_suspend_state ()
b89667eb 8972{
b89667eb 8973 struct thread_info *tp = inferior_thread ();
1736ad11 8974 struct regcache *regcache = get_current_regcache ();
ac7936df 8975 struct gdbarch *gdbarch = regcache->arch ();
1736ad11 8976
6bf78e29
AB
8977 infcall_suspend_state_up inf_state
8978 (new struct infcall_suspend_state (gdbarch, tp, regcache));
1736ad11 8979
6bf78e29
AB
8980 /* Having saved the current state, adjust the thread state, discarding
8981 any stop signal information. The stop signal is not useful when
8982 starting an inferior function call, and run_inferior_call will not use
8983 the signal due to its `proceed' call with GDB_SIGNAL_0. */
a493e3e2 8984 tp->suspend.stop_signal = GDB_SIGNAL_0;
35515841 8985
b89667eb
DE
8986 return inf_state;
8987}
8988
8989/* Restore inferior session state to INF_STATE. */
8990
8991void
16c381f0 8992restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb
DE
8993{
8994 struct thread_info *tp = inferior_thread ();
1736ad11 8995 struct regcache *regcache = get_current_regcache ();
ac7936df 8996 struct gdbarch *gdbarch = regcache->arch ();
b89667eb 8997
6bf78e29 8998 inf_state->restore (gdbarch, tp, regcache);
16c381f0 8999 discard_infcall_suspend_state (inf_state);
b89667eb
DE
9000}
9001
b89667eb 9002void
16c381f0 9003discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb 9004{
dd848631 9005 delete inf_state;
b89667eb
DE
9006}
9007
daf6667d 9008readonly_detached_regcache *
16c381f0 9009get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
b89667eb 9010{
6bf78e29 9011 return inf_state->registers ();
b89667eb
DE
9012}
9013
16c381f0
JK
9014/* infcall_control_state contains state regarding gdb's control of the
9015 inferior itself like stepping control. It also contains session state like
9016 the user's currently selected frame. */
b89667eb 9017
16c381f0 9018struct infcall_control_state
b89667eb 9019{
16c381f0
JK
9020 struct thread_control_state thread_control;
9021 struct inferior_control_state inferior_control;
d82142e2
JK
9022
9023 /* Other fields: */
ee841dd8
TT
9024 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
9025 int stopped_by_random_signal = 0;
7a292a7a 9026
b89667eb 9027 /* ID if the selected frame when the inferior function call was made. */
ee841dd8 9028 struct frame_id selected_frame_id {};
7a292a7a
SS
9029};
9030
c906108c 9031/* Save all of the information associated with the inferior<==>gdb
b89667eb 9032 connection. */
c906108c 9033
cb524840
TT
9034infcall_control_state_up
9035save_infcall_control_state ()
c906108c 9036{
cb524840 9037 infcall_control_state_up inf_status (new struct infcall_control_state);
4e1c45ea 9038 struct thread_info *tp = inferior_thread ();
d6b48e9c 9039 struct inferior *inf = current_inferior ();
7a292a7a 9040
16c381f0
JK
9041 inf_status->thread_control = tp->control;
9042 inf_status->inferior_control = inf->control;
d82142e2 9043
8358c15c 9044 tp->control.step_resume_breakpoint = NULL;
5b79abe7 9045 tp->control.exception_resume_breakpoint = NULL;
8358c15c 9046
16c381f0
JK
9047 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9048 chain. If caller's caller is walking the chain, they'll be happier if we
9049 hand them back the original chain when restore_infcall_control_state is
9050 called. */
9051 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
d82142e2
JK
9052
9053 /* Other fields: */
9054 inf_status->stop_stack_dummy = stop_stack_dummy;
9055 inf_status->stopped_by_random_signal = stopped_by_random_signal;
c5aa993b 9056
206415a3 9057 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
b89667eb 9058
7a292a7a 9059 return inf_status;
c906108c
SS
9060}
9061
bf469271
PA
9062static void
9063restore_selected_frame (const frame_id &fid)
c906108c 9064{
bf469271 9065 frame_info *frame = frame_find_by_id (fid);
c906108c 9066
aa0cd9c1
AC
9067 /* If inf_status->selected_frame_id is NULL, there was no previously
9068 selected frame. */
101dcfbe 9069 if (frame == NULL)
c906108c 9070 {
8a3fe4f8 9071 warning (_("Unable to restore previously selected frame."));
bf469271 9072 return;
c906108c
SS
9073 }
9074
0f7d239c 9075 select_frame (frame);
c906108c
SS
9076}
9077
b89667eb
DE
9078/* Restore inferior session state to INF_STATUS. */
9079
c906108c 9080void
16c381f0 9081restore_infcall_control_state (struct infcall_control_state *inf_status)
c906108c 9082{
4e1c45ea 9083 struct thread_info *tp = inferior_thread ();
d6b48e9c 9084 struct inferior *inf = current_inferior ();
4e1c45ea 9085
8358c15c
JK
9086 if (tp->control.step_resume_breakpoint)
9087 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
9088
5b79abe7
TT
9089 if (tp->control.exception_resume_breakpoint)
9090 tp->control.exception_resume_breakpoint->disposition
9091 = disp_del_at_next_stop;
9092
d82142e2 9093 /* Handle the bpstat_copy of the chain. */
16c381f0 9094 bpstat_clear (&tp->control.stop_bpstat);
d82142e2 9095
16c381f0
JK
9096 tp->control = inf_status->thread_control;
9097 inf->control = inf_status->inferior_control;
d82142e2
JK
9098
9099 /* Other fields: */
9100 stop_stack_dummy = inf_status->stop_stack_dummy;
9101 stopped_by_random_signal = inf_status->stopped_by_random_signal;
c906108c 9102
b89667eb 9103 if (target_has_stack)
c906108c 9104 {
bf469271 9105 /* The point of the try/catch is that if the stack is clobbered,
101dcfbe
AC
9106 walking the stack might encounter a garbage pointer and
9107 error() trying to dereference it. */
a70b8144 9108 try
bf469271
PA
9109 {
9110 restore_selected_frame (inf_status->selected_frame_id);
9111 }
230d2906 9112 catch (const gdb_exception_error &ex)
bf469271
PA
9113 {
9114 exception_fprintf (gdb_stderr, ex,
9115 "Unable to restore previously selected frame:\n");
9116 /* Error in restoring the selected frame. Select the
9117 innermost frame. */
9118 select_frame (get_current_frame ());
9119 }
c906108c 9120 }
c906108c 9121
ee841dd8 9122 delete inf_status;
7a292a7a 9123}
c906108c
SS
9124
9125void
16c381f0 9126discard_infcall_control_state (struct infcall_control_state *inf_status)
7a292a7a 9127{
8358c15c
JK
9128 if (inf_status->thread_control.step_resume_breakpoint)
9129 inf_status->thread_control.step_resume_breakpoint->disposition
9130 = disp_del_at_next_stop;
9131
5b79abe7
TT
9132 if (inf_status->thread_control.exception_resume_breakpoint)
9133 inf_status->thread_control.exception_resume_breakpoint->disposition
9134 = disp_del_at_next_stop;
9135
1777feb0 9136 /* See save_infcall_control_state for info on stop_bpstat. */
16c381f0 9137 bpstat_clear (&inf_status->thread_control.stop_bpstat);
8358c15c 9138
ee841dd8 9139 delete inf_status;
7a292a7a 9140}
b89667eb 9141\f
7f89fd65 9142/* See infrun.h. */
0c557179
SDJ
9143
9144void
9145clear_exit_convenience_vars (void)
9146{
9147 clear_internalvar (lookup_internalvar ("_exitsignal"));
9148 clear_internalvar (lookup_internalvar ("_exitcode"));
9149}
c5aa993b 9150\f
488f131b 9151
b2175913
MS
9152/* User interface for reverse debugging:
9153 Set exec-direction / show exec-direction commands
9154 (returns error unless target implements to_set_exec_direction method). */
9155
170742de 9156enum exec_direction_kind execution_direction = EXEC_FORWARD;
b2175913
MS
9157static const char exec_forward[] = "forward";
9158static const char exec_reverse[] = "reverse";
9159static const char *exec_direction = exec_forward;
40478521 9160static const char *const exec_direction_names[] = {
b2175913
MS
9161 exec_forward,
9162 exec_reverse,
9163 NULL
9164};
9165
9166static void
eb4c3f4a 9167set_exec_direction_func (const char *args, int from_tty,
b2175913
MS
9168 struct cmd_list_element *cmd)
9169{
9170 if (target_can_execute_reverse)
9171 {
9172 if (!strcmp (exec_direction, exec_forward))
9173 execution_direction = EXEC_FORWARD;
9174 else if (!strcmp (exec_direction, exec_reverse))
9175 execution_direction = EXEC_REVERSE;
9176 }
8bbed405
MS
9177 else
9178 {
9179 exec_direction = exec_forward;
9180 error (_("Target does not support this operation."));
9181 }
b2175913
MS
9182}
9183
9184static void
9185show_exec_direction_func (struct ui_file *out, int from_tty,
9186 struct cmd_list_element *cmd, const char *value)
9187{
9188 switch (execution_direction) {
9189 case EXEC_FORWARD:
9190 fprintf_filtered (out, _("Forward.\n"));
9191 break;
9192 case EXEC_REVERSE:
9193 fprintf_filtered (out, _("Reverse.\n"));
9194 break;
b2175913 9195 default:
d8b34453
PA
9196 internal_error (__FILE__, __LINE__,
9197 _("bogus execution_direction value: %d"),
9198 (int) execution_direction);
b2175913
MS
9199 }
9200}
9201
d4db2f36
PA
9202static void
9203show_schedule_multiple (struct ui_file *file, int from_tty,
9204 struct cmd_list_element *c, const char *value)
9205{
3e43a32a
MS
9206 fprintf_filtered (file, _("Resuming the execution of threads "
9207 "of all processes is %s.\n"), value);
d4db2f36 9208}
ad52ddc6 9209
22d2b532
SDJ
9210/* Implementation of `siginfo' variable. */
9211
9212static const struct internalvar_funcs siginfo_funcs =
9213{
9214 siginfo_make_value,
9215 NULL,
9216 NULL
9217};
9218
372316f1
PA
9219/* Callback for infrun's target events source. This is marked when a
9220 thread has a pending status to process. */
9221
9222static void
9223infrun_async_inferior_event_handler (gdb_client_data data)
9224{
b1a35af2 9225 inferior_event_handler (INF_REG_EVENT);
372316f1
PA
9226}
9227
6c265988 9228void _initialize_infrun ();
c906108c 9229void
6c265988 9230_initialize_infrun ()
c906108c 9231{
de0bea00 9232 struct cmd_list_element *c;
c906108c 9233
372316f1
PA
9234 /* Register extra event sources in the event loop. */
9235 infrun_async_inferior_event_token
9236 = create_async_event_handler (infrun_async_inferior_event_handler, NULL);
9237
11db9430 9238 add_info ("signals", info_signals_command, _("\
1bedd215
AC
9239What debugger does when program gets various signals.\n\
9240Specify a signal as argument to print info on that signal only."));
c906108c
SS
9241 add_info_alias ("handle", "signals", 0);
9242
de0bea00 9243 c = add_com ("handle", class_run, handle_command, _("\
dfbd5e7b 9244Specify how to handle signals.\n\
486c7739 9245Usage: handle SIGNAL [ACTIONS]\n\
c906108c 9246Args are signals and actions to apply to those signals.\n\
dfbd5e7b 9247If no actions are specified, the current settings for the specified signals\n\
486c7739
MF
9248will be displayed instead.\n\
9249\n\
c906108c
SS
9250Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9251from 1-15 are allowed for compatibility with old versions of GDB.\n\
9252Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9253The special arg \"all\" is recognized to mean all signals except those\n\
1bedd215 9254used by the debugger, typically SIGTRAP and SIGINT.\n\
486c7739 9255\n\
1bedd215 9256Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
c906108c
SS
9257\"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9258Stop means reenter debugger if this signal happens (implies print).\n\
9259Print means print a message if this signal happens.\n\
9260Pass means let program see this signal; otherwise program doesn't know.\n\
9261Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
dfbd5e7b
PA
9262Pass and Stop may be combined.\n\
9263\n\
9264Multiple signals may be specified. Signal numbers and signal names\n\
9265may be interspersed with actions, with the actions being performed for\n\
9266all signals cumulatively specified."));
de0bea00 9267 set_cmd_completer (c, handle_completer);
486c7739 9268
c906108c 9269 if (!dbx_commands)
1a966eab
AC
9270 stop_command = add_cmd ("stop", class_obscure,
9271 not_just_help_class_command, _("\
9272There is no `stop' command, but you can set a hook on `stop'.\n\
c906108c 9273This allows you to set a list of commands to be run each time execution\n\
1a966eab 9274of the program stops."), &cmdlist);
c906108c 9275
ccce17b0 9276 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
85c07804
AC
9277Set inferior debugging."), _("\
9278Show inferior debugging."), _("\
9279When non-zero, inferior specific debugging is enabled."),
ccce17b0
YQ
9280 NULL,
9281 show_debug_infrun,
9282 &setdebuglist, &showdebuglist);
527159b7 9283
3e43a32a
MS
9284 add_setshow_boolean_cmd ("displaced", class_maintenance,
9285 &debug_displaced, _("\
237fc4c9
PA
9286Set displaced stepping debugging."), _("\
9287Show displaced stepping debugging."), _("\
9288When non-zero, displaced stepping specific debugging is enabled."),
9289 NULL,
9290 show_debug_displaced,
9291 &setdebuglist, &showdebuglist);
9292
ad52ddc6
PA
9293 add_setshow_boolean_cmd ("non-stop", no_class,
9294 &non_stop_1, _("\
9295Set whether gdb controls the inferior in non-stop mode."), _("\
9296Show whether gdb controls the inferior in non-stop mode."), _("\
9297When debugging a multi-threaded program and this setting is\n\
9298off (the default, also called all-stop mode), when one thread stops\n\
9299(for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9300all other threads in the program while you interact with the thread of\n\
9301interest. When you continue or step a thread, you can allow the other\n\
9302threads to run, or have them remain stopped, but while you inspect any\n\
9303thread's state, all threads stop.\n\
9304\n\
9305In non-stop mode, when one thread stops, other threads can continue\n\
9306to run freely. You'll be able to step each thread independently,\n\
9307leave it stopped or free to run as needed."),
9308 set_non_stop,
9309 show_non_stop,
9310 &setlist,
9311 &showlist);
9312
adc6a863 9313 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
c906108c
SS
9314 {
9315 signal_stop[i] = 1;
9316 signal_print[i] = 1;
9317 signal_program[i] = 1;
ab04a2af 9318 signal_catch[i] = 0;
c906108c
SS
9319 }
9320
4d9d9d04
PA
9321 /* Signals caused by debugger's own actions should not be given to
9322 the program afterwards.
9323
9324 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9325 explicitly specifies that it should be delivered to the target
9326 program. Typically, that would occur when a user is debugging a
9327 target monitor on a simulator: the target monitor sets a
9328 breakpoint; the simulator encounters this breakpoint and halts
9329 the simulation handing control to GDB; GDB, noting that the stop
9330 address doesn't map to any known breakpoint, returns control back
9331 to the simulator; the simulator then delivers the hardware
9332 equivalent of a GDB_SIGNAL_TRAP to the program being
9333 debugged. */
a493e3e2
PA
9334 signal_program[GDB_SIGNAL_TRAP] = 0;
9335 signal_program[GDB_SIGNAL_INT] = 0;
c906108c
SS
9336
9337 /* Signals that are not errors should not normally enter the debugger. */
a493e3e2
PA
9338 signal_stop[GDB_SIGNAL_ALRM] = 0;
9339 signal_print[GDB_SIGNAL_ALRM] = 0;
9340 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9341 signal_print[GDB_SIGNAL_VTALRM] = 0;
9342 signal_stop[GDB_SIGNAL_PROF] = 0;
9343 signal_print[GDB_SIGNAL_PROF] = 0;
9344 signal_stop[GDB_SIGNAL_CHLD] = 0;
9345 signal_print[GDB_SIGNAL_CHLD] = 0;
9346 signal_stop[GDB_SIGNAL_IO] = 0;
9347 signal_print[GDB_SIGNAL_IO] = 0;
9348 signal_stop[GDB_SIGNAL_POLL] = 0;
9349 signal_print[GDB_SIGNAL_POLL] = 0;
9350 signal_stop[GDB_SIGNAL_URG] = 0;
9351 signal_print[GDB_SIGNAL_URG] = 0;
9352 signal_stop[GDB_SIGNAL_WINCH] = 0;
9353 signal_print[GDB_SIGNAL_WINCH] = 0;
9354 signal_stop[GDB_SIGNAL_PRIO] = 0;
9355 signal_print[GDB_SIGNAL_PRIO] = 0;
c906108c 9356
cd0fc7c3
SS
9357 /* These signals are used internally by user-level thread
9358 implementations. (See signal(5) on Solaris.) Like the above
9359 signals, a healthy program receives and handles them as part of
9360 its normal operation. */
a493e3e2
PA
9361 signal_stop[GDB_SIGNAL_LWP] = 0;
9362 signal_print[GDB_SIGNAL_LWP] = 0;
9363 signal_stop[GDB_SIGNAL_WAITING] = 0;
9364 signal_print[GDB_SIGNAL_WAITING] = 0;
9365 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9366 signal_print[GDB_SIGNAL_CANCEL] = 0;
bc7b765a
JB
9367 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9368 signal_print[GDB_SIGNAL_LIBRT] = 0;
cd0fc7c3 9369
2455069d
UW
9370 /* Update cached state. */
9371 signal_cache_update (-1);
9372
85c07804
AC
9373 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9374 &stop_on_solib_events, _("\
9375Set stopping for shared library events."), _("\
9376Show stopping for shared library events."), _("\
c906108c
SS
9377If nonzero, gdb will give control to the user when the dynamic linker\n\
9378notifies gdb of shared library events. The most common event of interest\n\
85c07804 9379to the user would be loading/unloading of a new library."),
f9e14852 9380 set_stop_on_solib_events,
920d2a44 9381 show_stop_on_solib_events,
85c07804 9382 &setlist, &showlist);
c906108c 9383
7ab04401
AC
9384 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9385 follow_fork_mode_kind_names,
9386 &follow_fork_mode_string, _("\
9387Set debugger response to a program call of fork or vfork."), _("\
9388Show debugger response to a program call of fork or vfork."), _("\
c906108c
SS
9389A fork or vfork creates a new process. follow-fork-mode can be:\n\
9390 parent - the original process is debugged after a fork\n\
9391 child - the new process is debugged after a fork\n\
ea1dd7bc 9392The unfollowed process will continue to run.\n\
7ab04401
AC
9393By default, the debugger will follow the parent process."),
9394 NULL,
920d2a44 9395 show_follow_fork_mode_string,
7ab04401
AC
9396 &setlist, &showlist);
9397
6c95b8df
PA
9398 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9399 follow_exec_mode_names,
9400 &follow_exec_mode_string, _("\
9401Set debugger response to a program call of exec."), _("\
9402Show debugger response to a program call of exec."), _("\
9403An exec call replaces the program image of a process.\n\
9404\n\
9405follow-exec-mode can be:\n\
9406\n\
cce7e648 9407 new - the debugger creates a new inferior and rebinds the process\n\
6c95b8df
PA
9408to this new inferior. The program the process was running before\n\
9409the exec call can be restarted afterwards by restarting the original\n\
9410inferior.\n\
9411\n\
9412 same - the debugger keeps the process bound to the same inferior.\n\
9413The new executable image replaces the previous executable loaded in\n\
9414the inferior. Restarting the inferior after the exec call restarts\n\
9415the executable the process was running after the exec call.\n\
9416\n\
9417By default, the debugger will use the same inferior."),
9418 NULL,
9419 show_follow_exec_mode_string,
9420 &setlist, &showlist);
9421
7ab04401
AC
9422 add_setshow_enum_cmd ("scheduler-locking", class_run,
9423 scheduler_enums, &scheduler_mode, _("\
9424Set mode for locking scheduler during execution."), _("\
9425Show mode for locking scheduler during execution."), _("\
f2665db5
MM
9426off == no locking (threads may preempt at any time)\n\
9427on == full locking (no thread except the current thread may run)\n\
9428 This applies to both normal execution and replay mode.\n\
9429step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9430 In this mode, other threads may run during other commands.\n\
9431 This applies to both normal execution and replay mode.\n\
9432replay == scheduler locked in replay mode and unlocked during normal execution."),
7ab04401 9433 set_schedlock_func, /* traps on target vector */
920d2a44 9434 show_scheduler_mode,
7ab04401 9435 &setlist, &showlist);
5fbbeb29 9436
d4db2f36
PA
9437 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9438Set mode for resuming threads of all processes."), _("\
9439Show mode for resuming threads of all processes."), _("\
9440When on, execution commands (such as 'continue' or 'next') resume all\n\
9441threads of all processes. When off (which is the default), execution\n\
9442commands only resume the threads of the current process. The set of\n\
9443threads that are resumed is further refined by the scheduler-locking\n\
9444mode (see help set scheduler-locking)."),
9445 NULL,
9446 show_schedule_multiple,
9447 &setlist, &showlist);
9448
5bf193a2
AC
9449 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9450Set mode of the step operation."), _("\
9451Show mode of the step operation."), _("\
9452When set, doing a step over a function without debug line information\n\
9453will stop at the first instruction of that function. Otherwise, the\n\
9454function is skipped and the step command stops at a different source line."),
9455 NULL,
920d2a44 9456 show_step_stop_if_no_debug,
5bf193a2 9457 &setlist, &showlist);
ca6724c1 9458
72d0e2c5
YQ
9459 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9460 &can_use_displaced_stepping, _("\
237fc4c9
PA
9461Set debugger's willingness to use displaced stepping."), _("\
9462Show debugger's willingness to use displaced stepping."), _("\
fff08868
HZ
9463If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9464supported by the target architecture. If off, gdb will not use displaced\n\
9465stepping to step over breakpoints, even if such is supported by the target\n\
9466architecture. If auto (which is the default), gdb will use displaced stepping\n\
9467if the target architecture supports it and non-stop mode is active, but will not\n\
9468use it in all-stop mode (see help set non-stop)."),
72d0e2c5
YQ
9469 NULL,
9470 show_can_use_displaced_stepping,
9471 &setlist, &showlist);
237fc4c9 9472
b2175913
MS
9473 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9474 &exec_direction, _("Set direction of execution.\n\
9475Options are 'forward' or 'reverse'."),
9476 _("Show direction of execution (forward/reverse)."),
9477 _("Tells gdb whether to execute forward or backward."),
9478 set_exec_direction_func, show_exec_direction_func,
9479 &setlist, &showlist);
9480
6c95b8df
PA
9481 /* Set/show detach-on-fork: user-settable mode. */
9482
9483 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9484Set whether gdb will detach the child of a fork."), _("\
9485Show whether gdb will detach the child of a fork."), _("\
9486Tells gdb whether to detach the child of a fork."),
9487 NULL, NULL, &setlist, &showlist);
9488
03583c20
UW
9489 /* Set/show disable address space randomization mode. */
9490
9491 add_setshow_boolean_cmd ("disable-randomization", class_support,
9492 &disable_randomization, _("\
9493Set disabling of debuggee's virtual address space randomization."), _("\
9494Show disabling of debuggee's virtual address space randomization."), _("\
9495When this mode is on (which is the default), randomization of the virtual\n\
9496address space is disabled. Standalone programs run with the randomization\n\
9497enabled by default on some platforms."),
9498 &set_disable_randomization,
9499 &show_disable_randomization,
9500 &setlist, &showlist);
9501
ca6724c1 9502 /* ptid initializations */
ca6724c1
KB
9503 inferior_ptid = null_ptid;
9504 target_last_wait_ptid = minus_one_ptid;
5231c1fd 9505
76727919
TT
9506 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed);
9507 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested);
9508 gdb::observers::thread_exit.attach (infrun_thread_thread_exit);
9509 gdb::observers::inferior_exit.attach (infrun_inferior_exit);
4aa995e1
PA
9510
9511 /* Explicitly create without lookup, since that tries to create a
9512 value with a void typed value, and when we get here, gdbarch
9513 isn't initialized yet. At this point, we're quite sure there
9514 isn't another convenience variable of the same name. */
22d2b532 9515 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
d914c394
SS
9516
9517 add_setshow_boolean_cmd ("observer", no_class,
9518 &observer_mode_1, _("\
9519Set whether gdb controls the inferior in observer mode."), _("\
9520Show whether gdb controls the inferior in observer mode."), _("\
9521In observer mode, GDB can get data from the inferior, but not\n\
9522affect its execution. Registers and memory may not be changed,\n\
9523breakpoints may not be set, and the program cannot be interrupted\n\
9524or signalled."),
9525 set_observer_mode,
9526 show_observer_mode,
9527 &setlist,
9528 &showlist);
c906108c 9529}
This page took 2.797426 seconds and 4 git commands to generate.