Update Swedish translation for the gas sub-directory and a new Serbian translation...
[deliverable/binutils-gdb.git] / gdb / infrun.c
CommitLineData
ca557f44
AC
1/* Target-struct-independent code to start (run) and stop an inferior
2 process.
8926118c 3
b811d2c2 4 Copyright (C) 1986-2020 Free Software Foundation, Inc.
c906108c 5
c5aa993b 6 This file is part of GDB.
c906108c 7
c5aa993b
JM
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
c5aa993b 11 (at your option) any later version.
c906108c 12
c5aa993b
JM
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
c906108c 17
c5aa993b 18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
c906108c
SS
20
21#include "defs.h"
45741a9c 22#include "infrun.h"
c906108c
SS
23#include <ctype.h>
24#include "symtab.h"
25#include "frame.h"
26#include "inferior.h"
27#include "breakpoint.h"
c906108c
SS
28#include "gdbcore.h"
29#include "gdbcmd.h"
30#include "target.h"
2f4fcf00 31#include "target-connection.h"
c906108c
SS
32#include "gdbthread.h"
33#include "annotate.h"
1adeb98a 34#include "symfile.h"
7a292a7a 35#include "top.h"
2acceee2 36#include "inf-loop.h"
4e052eda 37#include "regcache.h"
fd0407d6 38#include "value.h"
76727919 39#include "observable.h"
f636b87d 40#include "language.h"
a77053c2 41#include "solib.h"
f17517ea 42#include "main.h"
186c406b 43#include "block.h"
034dad6f 44#include "mi/mi-common.h"
4f8d22e3 45#include "event-top.h"
96429cc8 46#include "record.h"
d02ed0bb 47#include "record-full.h"
edb3359d 48#include "inline-frame.h"
4efc6507 49#include "jit.h"
06cd862c 50#include "tracepoint.h"
1bfeeb0f 51#include "skip.h"
28106bc2
SDJ
52#include "probe.h"
53#include "objfiles.h"
de0bea00 54#include "completer.h"
9107fc8d 55#include "target-descriptions.h"
f15cb84a 56#include "target-dcache.h"
d83ad864 57#include "terminal.h"
ff862be4 58#include "solist.h"
400b5eca 59#include "gdbsupport/event-loop.h"
243a9253 60#include "thread-fsm.h"
268a13a5 61#include "gdbsupport/enum-flags.h"
5ed8105e 62#include "progspace-and-thread.h"
268a13a5 63#include "gdbsupport/gdb_optional.h"
46a62268 64#include "arch-utils.h"
268a13a5
TT
65#include "gdbsupport/scope-exit.h"
66#include "gdbsupport/forward-scope-exit.h"
06cc9596 67#include "gdbsupport/gdb_select.h"
5b6d1e4f 68#include <unordered_map>
93b54c8e 69#include "async-event.h"
c906108c
SS
70
71/* Prototypes for local functions */
72
2ea28649 73static void sig_print_info (enum gdb_signal);
c906108c 74
96baa820 75static void sig_print_header (void);
c906108c 76
d83ad864
DB
77static void follow_inferior_reset_breakpoints (void);
78
a289b8f6
JK
79static int currently_stepping (struct thread_info *tp);
80
2c03e5be 81static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
2484c66b
UW
82
83static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
84
2484c66b
UW
85static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
86
8550d3b3
YQ
87static int maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
88
aff4e175
AB
89static void resume (gdb_signal sig);
90
5b6d1e4f
PA
91static void wait_for_inferior (inferior *inf);
92
372316f1
PA
93/* Asynchronous signal handler registered as event loop source for
94 when we have pending events ready to be passed to the core. */
95static struct async_event_handler *infrun_async_inferior_event_token;
96
97/* Stores whether infrun_async was previously enabled or disabled.
98 Starts off as -1, indicating "never enabled/disabled". */
99static int infrun_is_async = -1;
100
101/* See infrun.h. */
102
103void
104infrun_async (int enable)
105{
106 if (infrun_is_async != enable)
107 {
108 infrun_is_async = enable;
109
110 if (debug_infrun)
111 fprintf_unfiltered (gdb_stdlog,
112 "infrun: infrun_async(%d)\n",
113 enable);
114
115 if (enable)
116 mark_async_event_handler (infrun_async_inferior_event_token);
117 else
118 clear_async_event_handler (infrun_async_inferior_event_token);
119 }
120}
121
0b333c5e
PA
122/* See infrun.h. */
123
124void
125mark_infrun_async_event_handler (void)
126{
127 mark_async_event_handler (infrun_async_inferior_event_token);
128}
129
5fbbeb29
CF
130/* When set, stop the 'step' command if we enter a function which has
131 no line number information. The normal behavior is that we step
132 over such function. */
491144b5 133bool step_stop_if_no_debug = false;
920d2a44
AC
134static void
135show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
136 struct cmd_list_element *c, const char *value)
137{
138 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
139}
5fbbeb29 140
b9f437de
PA
141/* proceed and normal_stop use this to notify the user when the
142 inferior stopped in a different thread than it had been running
143 in. */
96baa820 144
39f77062 145static ptid_t previous_inferior_ptid;
7a292a7a 146
07107ca6
LM
147/* If set (default for legacy reasons), when following a fork, GDB
148 will detach from one of the fork branches, child or parent.
149 Exactly which branch is detached depends on 'set follow-fork-mode'
150 setting. */
151
491144b5 152static bool detach_fork = true;
6c95b8df 153
491144b5 154bool debug_displaced = false;
237fc4c9
PA
155static void
156show_debug_displaced (struct ui_file *file, int from_tty,
157 struct cmd_list_element *c, const char *value)
158{
159 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
160}
161
ccce17b0 162unsigned int debug_infrun = 0;
920d2a44
AC
163static void
164show_debug_infrun (struct ui_file *file, int from_tty,
165 struct cmd_list_element *c, const char *value)
166{
167 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
168}
527159b7 169
03583c20
UW
170
171/* Support for disabling address space randomization. */
172
491144b5 173bool disable_randomization = true;
03583c20
UW
174
175static void
176show_disable_randomization (struct ui_file *file, int from_tty,
177 struct cmd_list_element *c, const char *value)
178{
179 if (target_supports_disable_randomization ())
180 fprintf_filtered (file,
181 _("Disabling randomization of debuggee's "
182 "virtual address space is %s.\n"),
183 value);
184 else
185 fputs_filtered (_("Disabling randomization of debuggee's "
186 "virtual address space is unsupported on\n"
187 "this platform.\n"), file);
188}
189
190static void
eb4c3f4a 191set_disable_randomization (const char *args, int from_tty,
03583c20
UW
192 struct cmd_list_element *c)
193{
194 if (!target_supports_disable_randomization ())
195 error (_("Disabling randomization of debuggee's "
196 "virtual address space is unsupported on\n"
197 "this platform."));
198}
199
d32dc48e
PA
200/* User interface for non-stop mode. */
201
491144b5
CB
202bool non_stop = false;
203static bool non_stop_1 = false;
d32dc48e
PA
204
205static void
eb4c3f4a 206set_non_stop (const char *args, int from_tty,
d32dc48e
PA
207 struct cmd_list_element *c)
208{
209 if (target_has_execution)
210 {
211 non_stop_1 = non_stop;
212 error (_("Cannot change this setting while the inferior is running."));
213 }
214
215 non_stop = non_stop_1;
216}
217
218static void
219show_non_stop (struct ui_file *file, int from_tty,
220 struct cmd_list_element *c, const char *value)
221{
222 fprintf_filtered (file,
223 _("Controlling the inferior in non-stop mode is %s.\n"),
224 value);
225}
226
d914c394
SS
227/* "Observer mode" is somewhat like a more extreme version of
228 non-stop, in which all GDB operations that might affect the
229 target's execution have been disabled. */
230
491144b5
CB
231bool observer_mode = false;
232static bool observer_mode_1 = false;
d914c394
SS
233
234static void
eb4c3f4a 235set_observer_mode (const char *args, int from_tty,
d914c394
SS
236 struct cmd_list_element *c)
237{
d914c394
SS
238 if (target_has_execution)
239 {
240 observer_mode_1 = observer_mode;
241 error (_("Cannot change this setting while the inferior is running."));
242 }
243
244 observer_mode = observer_mode_1;
245
246 may_write_registers = !observer_mode;
247 may_write_memory = !observer_mode;
248 may_insert_breakpoints = !observer_mode;
249 may_insert_tracepoints = !observer_mode;
250 /* We can insert fast tracepoints in or out of observer mode,
251 but enable them if we're going into this mode. */
252 if (observer_mode)
491144b5 253 may_insert_fast_tracepoints = true;
d914c394
SS
254 may_stop = !observer_mode;
255 update_target_permissions ();
256
257 /* Going *into* observer mode we must force non-stop, then
258 going out we leave it that way. */
259 if (observer_mode)
260 {
d914c394 261 pagination_enabled = 0;
491144b5 262 non_stop = non_stop_1 = true;
d914c394
SS
263 }
264
265 if (from_tty)
266 printf_filtered (_("Observer mode is now %s.\n"),
267 (observer_mode ? "on" : "off"));
268}
269
270static void
271show_observer_mode (struct ui_file *file, int from_tty,
272 struct cmd_list_element *c, const char *value)
273{
274 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
275}
276
277/* This updates the value of observer mode based on changes in
278 permissions. Note that we are deliberately ignoring the values of
279 may-write-registers and may-write-memory, since the user may have
280 reason to enable these during a session, for instance to turn on a
281 debugging-related global. */
282
283void
284update_observer_mode (void)
285{
491144b5
CB
286 bool newval = (!may_insert_breakpoints
287 && !may_insert_tracepoints
288 && may_insert_fast_tracepoints
289 && !may_stop
290 && non_stop);
d914c394
SS
291
292 /* Let the user know if things change. */
293 if (newval != observer_mode)
294 printf_filtered (_("Observer mode is now %s.\n"),
295 (newval ? "on" : "off"));
296
297 observer_mode = observer_mode_1 = newval;
298}
c2c6d25f 299
c906108c
SS
300/* Tables of how to react to signals; the user sets them. */
301
adc6a863
PA
302static unsigned char signal_stop[GDB_SIGNAL_LAST];
303static unsigned char signal_print[GDB_SIGNAL_LAST];
304static unsigned char signal_program[GDB_SIGNAL_LAST];
c906108c 305
ab04a2af
TT
306/* Table of signals that are registered with "catch signal". A
307 non-zero entry indicates that the signal is caught by some "catch
adc6a863
PA
308 signal" command. */
309static unsigned char signal_catch[GDB_SIGNAL_LAST];
ab04a2af 310
2455069d
UW
311/* Table of signals that the target may silently handle.
312 This is automatically determined from the flags above,
313 and simply cached here. */
adc6a863 314static unsigned char signal_pass[GDB_SIGNAL_LAST];
2455069d 315
c906108c
SS
316#define SET_SIGS(nsigs,sigs,flags) \
317 do { \
318 int signum = (nsigs); \
319 while (signum-- > 0) \
320 if ((sigs)[signum]) \
321 (flags)[signum] = 1; \
322 } while (0)
323
324#define UNSET_SIGS(nsigs,sigs,flags) \
325 do { \
326 int signum = (nsigs); \
327 while (signum-- > 0) \
328 if ((sigs)[signum]) \
329 (flags)[signum] = 0; \
330 } while (0)
331
9b224c5e
PA
332/* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
333 this function is to avoid exporting `signal_program'. */
334
335void
336update_signals_program_target (void)
337{
adc6a863 338 target_program_signals (signal_program);
9b224c5e
PA
339}
340
1777feb0 341/* Value to pass to target_resume() to cause all threads to resume. */
39f77062 342
edb3359d 343#define RESUME_ALL minus_one_ptid
c906108c
SS
344
345/* Command list pointer for the "stop" placeholder. */
346
347static struct cmd_list_element *stop_command;
348
c906108c
SS
349/* Nonzero if we want to give control to the user when we're notified
350 of shared library events by the dynamic linker. */
628fe4e4 351int stop_on_solib_events;
f9e14852
GB
352
353/* Enable or disable optional shared library event breakpoints
354 as appropriate when the above flag is changed. */
355
356static void
eb4c3f4a
TT
357set_stop_on_solib_events (const char *args,
358 int from_tty, struct cmd_list_element *c)
f9e14852
GB
359{
360 update_solib_breakpoints ();
361}
362
920d2a44
AC
363static void
364show_stop_on_solib_events (struct ui_file *file, int from_tty,
365 struct cmd_list_element *c, const char *value)
366{
367 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
368 value);
369}
c906108c 370
c906108c
SS
371/* Nonzero after stop if current stack frame should be printed. */
372
373static int stop_print_frame;
374
5b6d1e4f
PA
375/* This is a cached copy of the target/ptid/waitstatus of the last
376 event returned by target_wait()/deprecated_target_wait_hook().
377 This information is returned by get_last_target_status(). */
378static process_stratum_target *target_last_proc_target;
39f77062 379static ptid_t target_last_wait_ptid;
e02bc4cc
DS
380static struct target_waitstatus target_last_waitstatus;
381
4e1c45ea 382void init_thread_stepping_state (struct thread_info *tss);
0d1e5fa7 383
53904c9e
AC
384static const char follow_fork_mode_child[] = "child";
385static const char follow_fork_mode_parent[] = "parent";
386
40478521 387static const char *const follow_fork_mode_kind_names[] = {
53904c9e
AC
388 follow_fork_mode_child,
389 follow_fork_mode_parent,
390 NULL
ef346e04 391};
c906108c 392
53904c9e 393static const char *follow_fork_mode_string = follow_fork_mode_parent;
920d2a44
AC
394static void
395show_follow_fork_mode_string (struct ui_file *file, int from_tty,
396 struct cmd_list_element *c, const char *value)
397{
3e43a32a
MS
398 fprintf_filtered (file,
399 _("Debugger response to a program "
400 "call of fork or vfork is \"%s\".\n"),
920d2a44
AC
401 value);
402}
c906108c
SS
403\f
404
d83ad864
DB
405/* Handle changes to the inferior list based on the type of fork,
406 which process is being followed, and whether the other process
407 should be detached. On entry inferior_ptid must be the ptid of
408 the fork parent. At return inferior_ptid is the ptid of the
409 followed inferior. */
410
5ab2fbf1
SM
411static bool
412follow_fork_inferior (bool follow_child, bool detach_fork)
d83ad864
DB
413{
414 int has_vforked;
79639e11 415 ptid_t parent_ptid, child_ptid;
d83ad864
DB
416
417 has_vforked = (inferior_thread ()->pending_follow.kind
418 == TARGET_WAITKIND_VFORKED);
79639e11
PA
419 parent_ptid = inferior_ptid;
420 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
d83ad864
DB
421
422 if (has_vforked
423 && !non_stop /* Non-stop always resumes both branches. */
3b12939d 424 && current_ui->prompt_state == PROMPT_BLOCKED
d83ad864
DB
425 && !(follow_child || detach_fork || sched_multi))
426 {
427 /* The parent stays blocked inside the vfork syscall until the
428 child execs or exits. If we don't let the child run, then
429 the parent stays blocked. If we're telling the parent to run
430 in the foreground, the user will not be able to ctrl-c to get
431 back the terminal, effectively hanging the debug session. */
432 fprintf_filtered (gdb_stderr, _("\
433Can not resume the parent process over vfork in the foreground while\n\
434holding the child stopped. Try \"set detach-on-fork\" or \
435\"set schedule-multiple\".\n"));
d83ad864
DB
436 return 1;
437 }
438
439 if (!follow_child)
440 {
441 /* Detach new forked process? */
442 if (detach_fork)
443 {
d83ad864
DB
444 /* Before detaching from the child, remove all breakpoints
445 from it. If we forked, then this has already been taken
446 care of by infrun.c. If we vforked however, any
447 breakpoint inserted in the parent is visible in the
448 child, even those added while stopped in a vfork
449 catchpoint. This will remove the breakpoints from the
450 parent also, but they'll be reinserted below. */
451 if (has_vforked)
452 {
453 /* Keep breakpoints list in sync. */
00431a78 454 remove_breakpoints_inf (current_inferior ());
d83ad864
DB
455 }
456
f67c0c91 457 if (print_inferior_events)
d83ad864 458 {
8dd06f7a 459 /* Ensure that we have a process ptid. */
e99b03dc 460 ptid_t process_ptid = ptid_t (child_ptid.pid ());
8dd06f7a 461
223ffa71 462 target_terminal::ours_for_output ();
d83ad864 463 fprintf_filtered (gdb_stdlog,
f67c0c91 464 _("[Detaching after %s from child %s]\n"),
6f259a23 465 has_vforked ? "vfork" : "fork",
a068643d 466 target_pid_to_str (process_ptid).c_str ());
d83ad864
DB
467 }
468 }
469 else
470 {
471 struct inferior *parent_inf, *child_inf;
d83ad864
DB
472
473 /* Add process to GDB's tables. */
e99b03dc 474 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
475
476 parent_inf = current_inferior ();
477 child_inf->attach_flag = parent_inf->attach_flag;
478 copy_terminal_info (child_inf, parent_inf);
479 child_inf->gdbarch = parent_inf->gdbarch;
480 copy_inferior_target_desc_info (child_inf, parent_inf);
481
5ed8105e 482 scoped_restore_current_pspace_and_thread restore_pspace_thread;
d83ad864 483
2a00d7ce 484 set_current_inferior (child_inf);
5b6d1e4f 485 switch_to_no_thread ();
d83ad864 486 child_inf->symfile_flags = SYMFILE_NO_READ;
5b6d1e4f
PA
487 push_target (parent_inf->process_target ());
488 add_thread_silent (child_inf->process_target (), child_ptid);
489 inferior_ptid = child_ptid;
d83ad864
DB
490
491 /* If this is a vfork child, then the address-space is
492 shared with the parent. */
493 if (has_vforked)
494 {
495 child_inf->pspace = parent_inf->pspace;
496 child_inf->aspace = parent_inf->aspace;
497
5b6d1e4f
PA
498 exec_on_vfork ();
499
d83ad864
DB
500 /* The parent will be frozen until the child is done
501 with the shared region. Keep track of the
502 parent. */
503 child_inf->vfork_parent = parent_inf;
504 child_inf->pending_detach = 0;
505 parent_inf->vfork_child = child_inf;
506 parent_inf->pending_detach = 0;
507 }
508 else
509 {
510 child_inf->aspace = new_address_space ();
564b1e3f 511 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
512 child_inf->removable = 1;
513 set_current_program_space (child_inf->pspace);
514 clone_program_space (child_inf->pspace, parent_inf->pspace);
515
516 /* Let the shared library layer (e.g., solib-svr4) learn
517 about this new process, relocate the cloned exec, pull
518 in shared libraries, and install the solib event
519 breakpoint. If a "cloned-VM" event was propagated
520 better throughout the core, this wouldn't be
521 required. */
522 solib_create_inferior_hook (0);
523 }
d83ad864
DB
524 }
525
526 if (has_vforked)
527 {
528 struct inferior *parent_inf;
529
530 parent_inf = current_inferior ();
531
532 /* If we detached from the child, then we have to be careful
533 to not insert breakpoints in the parent until the child
534 is done with the shared memory region. However, if we're
535 staying attached to the child, then we can and should
536 insert breakpoints, so that we can debug it. A
537 subsequent child exec or exit is enough to know when does
538 the child stops using the parent's address space. */
539 parent_inf->waiting_for_vfork_done = detach_fork;
540 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
541 }
542 }
543 else
544 {
545 /* Follow the child. */
546 struct inferior *parent_inf, *child_inf;
547 struct program_space *parent_pspace;
548
f67c0c91 549 if (print_inferior_events)
d83ad864 550 {
f67c0c91
SDJ
551 std::string parent_pid = target_pid_to_str (parent_ptid);
552 std::string child_pid = target_pid_to_str (child_ptid);
553
223ffa71 554 target_terminal::ours_for_output ();
6f259a23 555 fprintf_filtered (gdb_stdlog,
f67c0c91
SDJ
556 _("[Attaching after %s %s to child %s]\n"),
557 parent_pid.c_str (),
6f259a23 558 has_vforked ? "vfork" : "fork",
f67c0c91 559 child_pid.c_str ());
d83ad864
DB
560 }
561
562 /* Add the new inferior first, so that the target_detach below
563 doesn't unpush the target. */
564
e99b03dc 565 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
566
567 parent_inf = current_inferior ();
568 child_inf->attach_flag = parent_inf->attach_flag;
569 copy_terminal_info (child_inf, parent_inf);
570 child_inf->gdbarch = parent_inf->gdbarch;
571 copy_inferior_target_desc_info (child_inf, parent_inf);
572
573 parent_pspace = parent_inf->pspace;
574
5b6d1e4f 575 process_stratum_target *target = parent_inf->process_target ();
d83ad864 576
5b6d1e4f
PA
577 {
578 /* Hold a strong reference to the target while (maybe)
579 detaching the parent. Otherwise detaching could close the
580 target. */
581 auto target_ref = target_ops_ref::new_reference (target);
582
583 /* If we're vforking, we want to hold on to the parent until
584 the child exits or execs. At child exec or exit time we
585 can remove the old breakpoints from the parent and detach
586 or resume debugging it. Otherwise, detach the parent now;
587 we'll want to reuse it's program/address spaces, but we
588 can't set them to the child before removing breakpoints
589 from the parent, otherwise, the breakpoints module could
590 decide to remove breakpoints from the wrong process (since
591 they'd be assigned to the same address space). */
592
593 if (has_vforked)
594 {
595 gdb_assert (child_inf->vfork_parent == NULL);
596 gdb_assert (parent_inf->vfork_child == NULL);
597 child_inf->vfork_parent = parent_inf;
598 child_inf->pending_detach = 0;
599 parent_inf->vfork_child = child_inf;
600 parent_inf->pending_detach = detach_fork;
601 parent_inf->waiting_for_vfork_done = 0;
602 }
603 else if (detach_fork)
604 {
605 if (print_inferior_events)
606 {
607 /* Ensure that we have a process ptid. */
608 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
609
610 target_terminal::ours_for_output ();
611 fprintf_filtered (gdb_stdlog,
612 _("[Detaching after fork from "
613 "parent %s]\n"),
614 target_pid_to_str (process_ptid).c_str ());
615 }
8dd06f7a 616
5b6d1e4f
PA
617 target_detach (parent_inf, 0);
618 parent_inf = NULL;
619 }
6f259a23 620
5b6d1e4f 621 /* Note that the detach above makes PARENT_INF dangling. */
d83ad864 622
5b6d1e4f
PA
623 /* Add the child thread to the appropriate lists, and switch
624 to this new thread, before cloning the program space, and
625 informing the solib layer about this new process. */
d83ad864 626
5b6d1e4f
PA
627 set_current_inferior (child_inf);
628 push_target (target);
629 }
d83ad864 630
5b6d1e4f 631 add_thread_silent (target, child_ptid);
79639e11 632 inferior_ptid = child_ptid;
d83ad864
DB
633
634 /* If this is a vfork child, then the address-space is shared
635 with the parent. If we detached from the parent, then we can
636 reuse the parent's program/address spaces. */
637 if (has_vforked || detach_fork)
638 {
639 child_inf->pspace = parent_pspace;
640 child_inf->aspace = child_inf->pspace->aspace;
5b6d1e4f
PA
641
642 exec_on_vfork ();
d83ad864
DB
643 }
644 else
645 {
646 child_inf->aspace = new_address_space ();
564b1e3f 647 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
648 child_inf->removable = 1;
649 child_inf->symfile_flags = SYMFILE_NO_READ;
650 set_current_program_space (child_inf->pspace);
651 clone_program_space (child_inf->pspace, parent_pspace);
652
653 /* Let the shared library layer (e.g., solib-svr4) learn
654 about this new process, relocate the cloned exec, pull in
655 shared libraries, and install the solib event breakpoint.
656 If a "cloned-VM" event was propagated better throughout
657 the core, this wouldn't be required. */
658 solib_create_inferior_hook (0);
659 }
660 }
661
662 return target_follow_fork (follow_child, detach_fork);
663}
664
e58b0e63
PA
665/* Tell the target to follow the fork we're stopped at. Returns true
666 if the inferior should be resumed; false, if the target for some
667 reason decided it's best not to resume. */
668
5ab2fbf1
SM
669static bool
670follow_fork ()
c906108c 671{
5ab2fbf1
SM
672 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
673 bool should_resume = true;
e58b0e63
PA
674 struct thread_info *tp;
675
676 /* Copy user stepping state to the new inferior thread. FIXME: the
677 followed fork child thread should have a copy of most of the
4e3990f4
DE
678 parent thread structure's run control related fields, not just these.
679 Initialized to avoid "may be used uninitialized" warnings from gcc. */
680 struct breakpoint *step_resume_breakpoint = NULL;
186c406b 681 struct breakpoint *exception_resume_breakpoint = NULL;
4e3990f4
DE
682 CORE_ADDR step_range_start = 0;
683 CORE_ADDR step_range_end = 0;
bf4cb9be
TV
684 int current_line = 0;
685 symtab *current_symtab = NULL;
4e3990f4 686 struct frame_id step_frame_id = { 0 };
8980e177 687 struct thread_fsm *thread_fsm = NULL;
e58b0e63
PA
688
689 if (!non_stop)
690 {
5b6d1e4f 691 process_stratum_target *wait_target;
e58b0e63
PA
692 ptid_t wait_ptid;
693 struct target_waitstatus wait_status;
694
695 /* Get the last target status returned by target_wait(). */
5b6d1e4f 696 get_last_target_status (&wait_target, &wait_ptid, &wait_status);
e58b0e63
PA
697
698 /* If not stopped at a fork event, then there's nothing else to
699 do. */
700 if (wait_status.kind != TARGET_WAITKIND_FORKED
701 && wait_status.kind != TARGET_WAITKIND_VFORKED)
702 return 1;
703
704 /* Check if we switched over from WAIT_PTID, since the event was
705 reported. */
00431a78 706 if (wait_ptid != minus_one_ptid
5b6d1e4f
PA
707 && (current_inferior ()->process_target () != wait_target
708 || inferior_ptid != wait_ptid))
e58b0e63
PA
709 {
710 /* We did. Switch back to WAIT_PTID thread, to tell the
711 target to follow it (in either direction). We'll
712 afterwards refuse to resume, and inform the user what
713 happened. */
5b6d1e4f 714 thread_info *wait_thread = find_thread_ptid (wait_target, wait_ptid);
00431a78 715 switch_to_thread (wait_thread);
5ab2fbf1 716 should_resume = false;
e58b0e63
PA
717 }
718 }
719
720 tp = inferior_thread ();
721
722 /* If there were any forks/vforks that were caught and are now to be
723 followed, then do so now. */
724 switch (tp->pending_follow.kind)
725 {
726 case TARGET_WAITKIND_FORKED:
727 case TARGET_WAITKIND_VFORKED:
728 {
729 ptid_t parent, child;
730
731 /* If the user did a next/step, etc, over a fork call,
732 preserve the stepping state in the fork child. */
733 if (follow_child && should_resume)
734 {
8358c15c
JK
735 step_resume_breakpoint = clone_momentary_breakpoint
736 (tp->control.step_resume_breakpoint);
16c381f0
JK
737 step_range_start = tp->control.step_range_start;
738 step_range_end = tp->control.step_range_end;
bf4cb9be
TV
739 current_line = tp->current_line;
740 current_symtab = tp->current_symtab;
16c381f0 741 step_frame_id = tp->control.step_frame_id;
186c406b
TT
742 exception_resume_breakpoint
743 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
8980e177 744 thread_fsm = tp->thread_fsm;
e58b0e63
PA
745
746 /* For now, delete the parent's sr breakpoint, otherwise,
747 parent/child sr breakpoints are considered duplicates,
748 and the child version will not be installed. Remove
749 this when the breakpoints module becomes aware of
750 inferiors and address spaces. */
751 delete_step_resume_breakpoint (tp);
16c381f0
JK
752 tp->control.step_range_start = 0;
753 tp->control.step_range_end = 0;
754 tp->control.step_frame_id = null_frame_id;
186c406b 755 delete_exception_resume_breakpoint (tp);
8980e177 756 tp->thread_fsm = NULL;
e58b0e63
PA
757 }
758
759 parent = inferior_ptid;
760 child = tp->pending_follow.value.related_pid;
761
5b6d1e4f 762 process_stratum_target *parent_targ = tp->inf->process_target ();
d83ad864
DB
763 /* Set up inferior(s) as specified by the caller, and tell the
764 target to do whatever is necessary to follow either parent
765 or child. */
766 if (follow_fork_inferior (follow_child, detach_fork))
e58b0e63
PA
767 {
768 /* Target refused to follow, or there's some other reason
769 we shouldn't resume. */
770 should_resume = 0;
771 }
772 else
773 {
774 /* This pending follow fork event is now handled, one way
775 or another. The previous selected thread may be gone
776 from the lists by now, but if it is still around, need
777 to clear the pending follow request. */
5b6d1e4f 778 tp = find_thread_ptid (parent_targ, parent);
e58b0e63
PA
779 if (tp)
780 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
781
782 /* This makes sure we don't try to apply the "Switched
783 over from WAIT_PID" logic above. */
784 nullify_last_target_wait_ptid ();
785
1777feb0 786 /* If we followed the child, switch to it... */
e58b0e63
PA
787 if (follow_child)
788 {
5b6d1e4f 789 thread_info *child_thr = find_thread_ptid (parent_targ, child);
00431a78 790 switch_to_thread (child_thr);
e58b0e63
PA
791
792 /* ... and preserve the stepping state, in case the
793 user was stepping over the fork call. */
794 if (should_resume)
795 {
796 tp = inferior_thread ();
8358c15c
JK
797 tp->control.step_resume_breakpoint
798 = step_resume_breakpoint;
16c381f0
JK
799 tp->control.step_range_start = step_range_start;
800 tp->control.step_range_end = step_range_end;
bf4cb9be
TV
801 tp->current_line = current_line;
802 tp->current_symtab = current_symtab;
16c381f0 803 tp->control.step_frame_id = step_frame_id;
186c406b
TT
804 tp->control.exception_resume_breakpoint
805 = exception_resume_breakpoint;
8980e177 806 tp->thread_fsm = thread_fsm;
e58b0e63
PA
807 }
808 else
809 {
810 /* If we get here, it was because we're trying to
811 resume from a fork catchpoint, but, the user
812 has switched threads away from the thread that
813 forked. In that case, the resume command
814 issued is most likely not applicable to the
815 child, so just warn, and refuse to resume. */
3e43a32a 816 warning (_("Not resuming: switched threads "
fd7dcb94 817 "before following fork child."));
e58b0e63
PA
818 }
819
820 /* Reset breakpoints in the child as appropriate. */
821 follow_inferior_reset_breakpoints ();
822 }
e58b0e63
PA
823 }
824 }
825 break;
826 case TARGET_WAITKIND_SPURIOUS:
827 /* Nothing to follow. */
828 break;
829 default:
830 internal_error (__FILE__, __LINE__,
831 "Unexpected pending_follow.kind %d\n",
832 tp->pending_follow.kind);
833 break;
834 }
c906108c 835
e58b0e63 836 return should_resume;
c906108c
SS
837}
838
d83ad864 839static void
6604731b 840follow_inferior_reset_breakpoints (void)
c906108c 841{
4e1c45ea
PA
842 struct thread_info *tp = inferior_thread ();
843
6604731b
DJ
844 /* Was there a step_resume breakpoint? (There was if the user
845 did a "next" at the fork() call.) If so, explicitly reset its
a1aa2221
LM
846 thread number. Cloned step_resume breakpoints are disabled on
847 creation, so enable it here now that it is associated with the
848 correct thread.
6604731b
DJ
849
850 step_resumes are a form of bp that are made to be per-thread.
851 Since we created the step_resume bp when the parent process
852 was being debugged, and now are switching to the child process,
853 from the breakpoint package's viewpoint, that's a switch of
854 "threads". We must update the bp's notion of which thread
855 it is for, or it'll be ignored when it triggers. */
856
8358c15c 857 if (tp->control.step_resume_breakpoint)
a1aa2221
LM
858 {
859 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
860 tp->control.step_resume_breakpoint->loc->enabled = 1;
861 }
6604731b 862
a1aa2221 863 /* Treat exception_resume breakpoints like step_resume breakpoints. */
186c406b 864 if (tp->control.exception_resume_breakpoint)
a1aa2221
LM
865 {
866 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
867 tp->control.exception_resume_breakpoint->loc->enabled = 1;
868 }
186c406b 869
6604731b
DJ
870 /* Reinsert all breakpoints in the child. The user may have set
871 breakpoints after catching the fork, in which case those
872 were never set in the child, but only in the parent. This makes
873 sure the inserted breakpoints match the breakpoint list. */
874
875 breakpoint_re_set ();
876 insert_breakpoints ();
c906108c 877}
c906108c 878
6c95b8df
PA
879/* The child has exited or execed: resume threads of the parent the
880 user wanted to be executing. */
881
882static int
883proceed_after_vfork_done (struct thread_info *thread,
884 void *arg)
885{
886 int pid = * (int *) arg;
887
00431a78
PA
888 if (thread->ptid.pid () == pid
889 && thread->state == THREAD_RUNNING
890 && !thread->executing
6c95b8df 891 && !thread->stop_requested
a493e3e2 892 && thread->suspend.stop_signal == GDB_SIGNAL_0)
6c95b8df
PA
893 {
894 if (debug_infrun)
895 fprintf_unfiltered (gdb_stdlog,
896 "infrun: resuming vfork parent thread %s\n",
a068643d 897 target_pid_to_str (thread->ptid).c_str ());
6c95b8df 898
00431a78 899 switch_to_thread (thread);
70509625 900 clear_proceed_status (0);
64ce06e4 901 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
6c95b8df
PA
902 }
903
904 return 0;
905}
906
5ed8105e
PA
907/* Save/restore inferior_ptid, current program space and current
908 inferior. Only use this if the current context points at an exited
909 inferior (and therefore there's no current thread to save). */
910class scoped_restore_exited_inferior
911{
912public:
913 scoped_restore_exited_inferior ()
914 : m_saved_ptid (&inferior_ptid)
915 {}
916
917private:
918 scoped_restore_tmpl<ptid_t> m_saved_ptid;
919 scoped_restore_current_program_space m_pspace;
920 scoped_restore_current_inferior m_inferior;
921};
922
6c95b8df
PA
923/* Called whenever we notice an exec or exit event, to handle
924 detaching or resuming a vfork parent. */
925
926static void
927handle_vfork_child_exec_or_exit (int exec)
928{
929 struct inferior *inf = current_inferior ();
930
931 if (inf->vfork_parent)
932 {
933 int resume_parent = -1;
934
935 /* This exec or exit marks the end of the shared memory region
b73715df
TV
936 between the parent and the child. Break the bonds. */
937 inferior *vfork_parent = inf->vfork_parent;
938 inf->vfork_parent->vfork_child = NULL;
939 inf->vfork_parent = NULL;
6c95b8df 940
b73715df
TV
941 /* If the user wanted to detach from the parent, now is the
942 time. */
943 if (vfork_parent->pending_detach)
6c95b8df
PA
944 {
945 struct thread_info *tp;
6c95b8df
PA
946 struct program_space *pspace;
947 struct address_space *aspace;
948
1777feb0 949 /* follow-fork child, detach-on-fork on. */
6c95b8df 950
b73715df 951 vfork_parent->pending_detach = 0;
68c9da30 952
5ed8105e
PA
953 gdb::optional<scoped_restore_exited_inferior>
954 maybe_restore_inferior;
955 gdb::optional<scoped_restore_current_pspace_and_thread>
956 maybe_restore_thread;
957
958 /* If we're handling a child exit, then inferior_ptid points
959 at the inferior's pid, not to a thread. */
f50f4e56 960 if (!exec)
5ed8105e 961 maybe_restore_inferior.emplace ();
f50f4e56 962 else
5ed8105e 963 maybe_restore_thread.emplace ();
6c95b8df
PA
964
965 /* We're letting loose of the parent. */
b73715df 966 tp = any_live_thread_of_inferior (vfork_parent);
00431a78 967 switch_to_thread (tp);
6c95b8df
PA
968
969 /* We're about to detach from the parent, which implicitly
970 removes breakpoints from its address space. There's a
971 catch here: we want to reuse the spaces for the child,
972 but, parent/child are still sharing the pspace at this
973 point, although the exec in reality makes the kernel give
974 the child a fresh set of new pages. The problem here is
975 that the breakpoints module being unaware of this, would
976 likely chose the child process to write to the parent
977 address space. Swapping the child temporarily away from
978 the spaces has the desired effect. Yes, this is "sort
979 of" a hack. */
980
981 pspace = inf->pspace;
982 aspace = inf->aspace;
983 inf->aspace = NULL;
984 inf->pspace = NULL;
985
f67c0c91 986 if (print_inferior_events)
6c95b8df 987 {
a068643d 988 std::string pidstr
b73715df 989 = target_pid_to_str (ptid_t (vfork_parent->pid));
f67c0c91 990
223ffa71 991 target_terminal::ours_for_output ();
6c95b8df
PA
992
993 if (exec)
6f259a23
DB
994 {
995 fprintf_filtered (gdb_stdlog,
f67c0c91 996 _("[Detaching vfork parent %s "
a068643d 997 "after child exec]\n"), pidstr.c_str ());
6f259a23 998 }
6c95b8df 999 else
6f259a23
DB
1000 {
1001 fprintf_filtered (gdb_stdlog,
f67c0c91 1002 _("[Detaching vfork parent %s "
a068643d 1003 "after child exit]\n"), pidstr.c_str ());
6f259a23 1004 }
6c95b8df
PA
1005 }
1006
b73715df 1007 target_detach (vfork_parent, 0);
6c95b8df
PA
1008
1009 /* Put it back. */
1010 inf->pspace = pspace;
1011 inf->aspace = aspace;
6c95b8df
PA
1012 }
1013 else if (exec)
1014 {
1015 /* We're staying attached to the parent, so, really give the
1016 child a new address space. */
564b1e3f 1017 inf->pspace = new program_space (maybe_new_address_space ());
6c95b8df
PA
1018 inf->aspace = inf->pspace->aspace;
1019 inf->removable = 1;
1020 set_current_program_space (inf->pspace);
1021
b73715df 1022 resume_parent = vfork_parent->pid;
6c95b8df
PA
1023 }
1024 else
1025 {
6c95b8df
PA
1026 /* If this is a vfork child exiting, then the pspace and
1027 aspaces were shared with the parent. Since we're
1028 reporting the process exit, we'll be mourning all that is
1029 found in the address space, and switching to null_ptid,
1030 preparing to start a new inferior. But, since we don't
1031 want to clobber the parent's address/program spaces, we
1032 go ahead and create a new one for this exiting
1033 inferior. */
1034
5ed8105e
PA
1035 /* Switch to null_ptid while running clone_program_space, so
1036 that clone_program_space doesn't want to read the
1037 selected frame of a dead process. */
1038 scoped_restore restore_ptid
1039 = make_scoped_restore (&inferior_ptid, null_ptid);
6c95b8df 1040
53af73bf
PA
1041 inf->pspace = new program_space (maybe_new_address_space ());
1042 inf->aspace = inf->pspace->aspace;
1043 set_current_program_space (inf->pspace);
6c95b8df 1044 inf->removable = 1;
7dcd53a0 1045 inf->symfile_flags = SYMFILE_NO_READ;
53af73bf 1046 clone_program_space (inf->pspace, vfork_parent->pspace);
6c95b8df 1047
b73715df 1048 resume_parent = vfork_parent->pid;
6c95b8df
PA
1049 }
1050
6c95b8df
PA
1051 gdb_assert (current_program_space == inf->pspace);
1052
1053 if (non_stop && resume_parent != -1)
1054 {
1055 /* If the user wanted the parent to be running, let it go
1056 free now. */
5ed8105e 1057 scoped_restore_current_thread restore_thread;
6c95b8df
PA
1058
1059 if (debug_infrun)
3e43a32a
MS
1060 fprintf_unfiltered (gdb_stdlog,
1061 "infrun: resuming vfork parent process %d\n",
6c95b8df
PA
1062 resume_parent);
1063
1064 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
6c95b8df
PA
1065 }
1066 }
1067}
1068
eb6c553b 1069/* Enum strings for "set|show follow-exec-mode". */
6c95b8df
PA
1070
1071static const char follow_exec_mode_new[] = "new";
1072static const char follow_exec_mode_same[] = "same";
40478521 1073static const char *const follow_exec_mode_names[] =
6c95b8df
PA
1074{
1075 follow_exec_mode_new,
1076 follow_exec_mode_same,
1077 NULL,
1078};
1079
1080static const char *follow_exec_mode_string = follow_exec_mode_same;
1081static void
1082show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1083 struct cmd_list_element *c, const char *value)
1084{
1085 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1086}
1087
ecf45d2c 1088/* EXEC_FILE_TARGET is assumed to be non-NULL. */
1adeb98a 1089
c906108c 1090static void
4ca51187 1091follow_exec (ptid_t ptid, const char *exec_file_target)
c906108c 1092{
6c95b8df 1093 struct inferior *inf = current_inferior ();
e99b03dc 1094 int pid = ptid.pid ();
94585166 1095 ptid_t process_ptid;
7a292a7a 1096
65d2b333
PW
1097 /* Switch terminal for any messages produced e.g. by
1098 breakpoint_re_set. */
1099 target_terminal::ours_for_output ();
1100
c906108c
SS
1101 /* This is an exec event that we actually wish to pay attention to.
1102 Refresh our symbol table to the newly exec'd program, remove any
1103 momentary bp's, etc.
1104
1105 If there are breakpoints, they aren't really inserted now,
1106 since the exec() transformed our inferior into a fresh set
1107 of instructions.
1108
1109 We want to preserve symbolic breakpoints on the list, since
1110 we have hopes that they can be reset after the new a.out's
1111 symbol table is read.
1112
1113 However, any "raw" breakpoints must be removed from the list
1114 (e.g., the solib bp's), since their address is probably invalid
1115 now.
1116
1117 And, we DON'T want to call delete_breakpoints() here, since
1118 that may write the bp's "shadow contents" (the instruction
85102364 1119 value that was overwritten with a TRAP instruction). Since
1777feb0 1120 we now have a new a.out, those shadow contents aren't valid. */
6c95b8df
PA
1121
1122 mark_breakpoints_out ();
1123
95e50b27
PA
1124 /* The target reports the exec event to the main thread, even if
1125 some other thread does the exec, and even if the main thread was
1126 stopped or already gone. We may still have non-leader threads of
1127 the process on our list. E.g., on targets that don't have thread
1128 exit events (like remote); or on native Linux in non-stop mode if
1129 there were only two threads in the inferior and the non-leader
1130 one is the one that execs (and nothing forces an update of the
1131 thread list up to here). When debugging remotely, it's best to
1132 avoid extra traffic, when possible, so avoid syncing the thread
1133 list with the target, and instead go ahead and delete all threads
1134 of the process but one that reported the event. Note this must
1135 be done before calling update_breakpoints_after_exec, as
1136 otherwise clearing the threads' resources would reference stale
1137 thread breakpoints -- it may have been one of these threads that
1138 stepped across the exec. We could just clear their stepping
1139 states, but as long as we're iterating, might as well delete
1140 them. Deleting them now rather than at the next user-visible
1141 stop provides a nicer sequence of events for user and MI
1142 notifications. */
08036331 1143 for (thread_info *th : all_threads_safe ())
d7e15655 1144 if (th->ptid.pid () == pid && th->ptid != ptid)
00431a78 1145 delete_thread (th);
95e50b27
PA
1146
1147 /* We also need to clear any left over stale state for the
1148 leader/event thread. E.g., if there was any step-resume
1149 breakpoint or similar, it's gone now. We cannot truly
1150 step-to-next statement through an exec(). */
08036331 1151 thread_info *th = inferior_thread ();
8358c15c 1152 th->control.step_resume_breakpoint = NULL;
186c406b 1153 th->control.exception_resume_breakpoint = NULL;
34b7e8a6 1154 th->control.single_step_breakpoints = NULL;
16c381f0
JK
1155 th->control.step_range_start = 0;
1156 th->control.step_range_end = 0;
c906108c 1157
95e50b27
PA
1158 /* The user may have had the main thread held stopped in the
1159 previous image (e.g., schedlock on, or non-stop). Release
1160 it now. */
a75724bc
PA
1161 th->stop_requested = 0;
1162
95e50b27
PA
1163 update_breakpoints_after_exec ();
1164
1777feb0 1165 /* What is this a.out's name? */
f2907e49 1166 process_ptid = ptid_t (pid);
6c95b8df 1167 printf_unfiltered (_("%s is executing new program: %s\n"),
a068643d 1168 target_pid_to_str (process_ptid).c_str (),
ecf45d2c 1169 exec_file_target);
c906108c
SS
1170
1171 /* We've followed the inferior through an exec. Therefore, the
1777feb0 1172 inferior has essentially been killed & reborn. */
7a292a7a 1173
6ca15a4b 1174 breakpoint_init_inferior (inf_execd);
e85a822c 1175
797bc1cb
TT
1176 gdb::unique_xmalloc_ptr<char> exec_file_host
1177 = exec_file_find (exec_file_target, NULL);
ff862be4 1178
ecf45d2c
SL
1179 /* If we were unable to map the executable target pathname onto a host
1180 pathname, tell the user that. Otherwise GDB's subsequent behavior
1181 is confusing. Maybe it would even be better to stop at this point
1182 so that the user can specify a file manually before continuing. */
1183 if (exec_file_host == NULL)
1184 warning (_("Could not load symbols for executable %s.\n"
1185 "Do you need \"set sysroot\"?"),
1186 exec_file_target);
c906108c 1187
cce9b6bf
PA
1188 /* Reset the shared library package. This ensures that we get a
1189 shlib event when the child reaches "_start", at which point the
1190 dld will have had a chance to initialize the child. */
1191 /* Also, loading a symbol file below may trigger symbol lookups, and
1192 we don't want those to be satisfied by the libraries of the
1193 previous incarnation of this process. */
1194 no_shared_libraries (NULL, 0);
1195
6c95b8df
PA
1196 if (follow_exec_mode_string == follow_exec_mode_new)
1197 {
6c95b8df
PA
1198 /* The user wants to keep the old inferior and program spaces
1199 around. Create a new fresh one, and switch to it. */
1200
35ed81d4
SM
1201 /* Do exit processing for the original inferior before setting the new
1202 inferior's pid. Having two inferiors with the same pid would confuse
1203 find_inferior_p(t)id. Transfer the terminal state and info from the
1204 old to the new inferior. */
1205 inf = add_inferior_with_spaces ();
1206 swap_terminal_info (inf, current_inferior ());
057302ce 1207 exit_inferior_silent (current_inferior ());
17d8546e 1208
94585166 1209 inf->pid = pid;
ecf45d2c 1210 target_follow_exec (inf, exec_file_target);
6c95b8df 1211
5b6d1e4f
PA
1212 inferior *org_inferior = current_inferior ();
1213 switch_to_inferior_no_thread (inf);
1214 push_target (org_inferior->process_target ());
1215 thread_info *thr = add_thread (inf->process_target (), ptid);
1216 switch_to_thread (thr);
6c95b8df 1217 }
9107fc8d
PA
1218 else
1219 {
1220 /* The old description may no longer be fit for the new image.
1221 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1222 old description; we'll read a new one below. No need to do
1223 this on "follow-exec-mode new", as the old inferior stays
1224 around (its description is later cleared/refetched on
1225 restart). */
1226 target_clear_description ();
1227 }
6c95b8df
PA
1228
1229 gdb_assert (current_program_space == inf->pspace);
1230
ecf45d2c
SL
1231 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1232 because the proper displacement for a PIE (Position Independent
1233 Executable) main symbol file will only be computed by
1234 solib_create_inferior_hook below. breakpoint_re_set would fail
1235 to insert the breakpoints with the zero displacement. */
797bc1cb 1236 try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
c906108c 1237
9107fc8d
PA
1238 /* If the target can specify a description, read it. Must do this
1239 after flipping to the new executable (because the target supplied
1240 description must be compatible with the executable's
1241 architecture, and the old executable may e.g., be 32-bit, while
1242 the new one 64-bit), and before anything involving memory or
1243 registers. */
1244 target_find_description ();
1245
268a4a75 1246 solib_create_inferior_hook (0);
c906108c 1247
4efc6507
DE
1248 jit_inferior_created_hook ();
1249
c1e56572
JK
1250 breakpoint_re_set ();
1251
c906108c
SS
1252 /* Reinsert all breakpoints. (Those which were symbolic have
1253 been reset to the proper address in the new a.out, thanks
1777feb0 1254 to symbol_file_command...). */
c906108c
SS
1255 insert_breakpoints ();
1256
1257 /* The next resume of this inferior should bring it to the shlib
1258 startup breakpoints. (If the user had also set bp's on
1259 "main" from the old (parent) process, then they'll auto-
1777feb0 1260 matically get reset there in the new process.). */
c906108c
SS
1261}
1262
c2829269
PA
1263/* The queue of threads that need to do a step-over operation to get
1264 past e.g., a breakpoint. What technique is used to step over the
1265 breakpoint/watchpoint does not matter -- all threads end up in the
1266 same queue, to maintain rough temporal order of execution, in order
1267 to avoid starvation, otherwise, we could e.g., find ourselves
1268 constantly stepping the same couple threads past their breakpoints
1269 over and over, if the single-step finish fast enough. */
1270struct thread_info *step_over_queue_head;
1271
6c4cfb24
PA
1272/* Bit flags indicating what the thread needs to step over. */
1273
8d297bbf 1274enum step_over_what_flag
6c4cfb24
PA
1275 {
1276 /* Step over a breakpoint. */
1277 STEP_OVER_BREAKPOINT = 1,
1278
1279 /* Step past a non-continuable watchpoint, in order to let the
1280 instruction execute so we can evaluate the watchpoint
1281 expression. */
1282 STEP_OVER_WATCHPOINT = 2
1283 };
8d297bbf 1284DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
6c4cfb24 1285
963f9c80 1286/* Info about an instruction that is being stepped over. */
31e77af2
PA
1287
1288struct step_over_info
1289{
963f9c80
PA
1290 /* If we're stepping past a breakpoint, this is the address space
1291 and address of the instruction the breakpoint is set at. We'll
1292 skip inserting all breakpoints here. Valid iff ASPACE is
1293 non-NULL. */
8b86c959 1294 const address_space *aspace;
31e77af2 1295 CORE_ADDR address;
963f9c80
PA
1296
1297 /* The instruction being stepped over triggers a nonsteppable
1298 watchpoint. If true, we'll skip inserting watchpoints. */
1299 int nonsteppable_watchpoint_p;
21edc42f
YQ
1300
1301 /* The thread's global number. */
1302 int thread;
31e77af2
PA
1303};
1304
1305/* The step-over info of the location that is being stepped over.
1306
1307 Note that with async/breakpoint always-inserted mode, a user might
1308 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1309 being stepped over. As setting a new breakpoint inserts all
1310 breakpoints, we need to make sure the breakpoint being stepped over
1311 isn't inserted then. We do that by only clearing the step-over
1312 info when the step-over is actually finished (or aborted).
1313
1314 Presently GDB can only step over one breakpoint at any given time.
1315 Given threads that can't run code in the same address space as the
1316 breakpoint's can't really miss the breakpoint, GDB could be taught
1317 to step-over at most one breakpoint per address space (so this info
1318 could move to the address space object if/when GDB is extended).
1319 The set of breakpoints being stepped over will normally be much
1320 smaller than the set of all breakpoints, so a flag in the
1321 breakpoint location structure would be wasteful. A separate list
1322 also saves complexity and run-time, as otherwise we'd have to go
1323 through all breakpoint locations clearing their flag whenever we
1324 start a new sequence. Similar considerations weigh against storing
1325 this info in the thread object. Plus, not all step overs actually
1326 have breakpoint locations -- e.g., stepping past a single-step
1327 breakpoint, or stepping to complete a non-continuable
1328 watchpoint. */
1329static struct step_over_info step_over_info;
1330
1331/* Record the address of the breakpoint/instruction we're currently
ce0db137
DE
1332 stepping over.
1333 N.B. We record the aspace and address now, instead of say just the thread,
1334 because when we need the info later the thread may be running. */
31e77af2
PA
1335
1336static void
8b86c959 1337set_step_over_info (const address_space *aspace, CORE_ADDR address,
21edc42f
YQ
1338 int nonsteppable_watchpoint_p,
1339 int thread)
31e77af2
PA
1340{
1341 step_over_info.aspace = aspace;
1342 step_over_info.address = address;
963f9c80 1343 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
21edc42f 1344 step_over_info.thread = thread;
31e77af2
PA
1345}
1346
1347/* Called when we're not longer stepping over a breakpoint / an
1348 instruction, so all breakpoints are free to be (re)inserted. */
1349
1350static void
1351clear_step_over_info (void)
1352{
372316f1
PA
1353 if (debug_infrun)
1354 fprintf_unfiltered (gdb_stdlog,
1355 "infrun: clear_step_over_info\n");
31e77af2
PA
1356 step_over_info.aspace = NULL;
1357 step_over_info.address = 0;
963f9c80 1358 step_over_info.nonsteppable_watchpoint_p = 0;
21edc42f 1359 step_over_info.thread = -1;
31e77af2
PA
1360}
1361
7f89fd65 1362/* See infrun.h. */
31e77af2
PA
1363
1364int
1365stepping_past_instruction_at (struct address_space *aspace,
1366 CORE_ADDR address)
1367{
1368 return (step_over_info.aspace != NULL
1369 && breakpoint_address_match (aspace, address,
1370 step_over_info.aspace,
1371 step_over_info.address));
1372}
1373
963f9c80
PA
1374/* See infrun.h. */
1375
21edc42f
YQ
1376int
1377thread_is_stepping_over_breakpoint (int thread)
1378{
1379 return (step_over_info.thread != -1
1380 && thread == step_over_info.thread);
1381}
1382
1383/* See infrun.h. */
1384
963f9c80
PA
1385int
1386stepping_past_nonsteppable_watchpoint (void)
1387{
1388 return step_over_info.nonsteppable_watchpoint_p;
1389}
1390
6cc83d2a
PA
1391/* Returns true if step-over info is valid. */
1392
1393static int
1394step_over_info_valid_p (void)
1395{
963f9c80
PA
1396 return (step_over_info.aspace != NULL
1397 || stepping_past_nonsteppable_watchpoint ());
6cc83d2a
PA
1398}
1399
c906108c 1400\f
237fc4c9
PA
1401/* Displaced stepping. */
1402
1403/* In non-stop debugging mode, we must take special care to manage
1404 breakpoints properly; in particular, the traditional strategy for
1405 stepping a thread past a breakpoint it has hit is unsuitable.
1406 'Displaced stepping' is a tactic for stepping one thread past a
1407 breakpoint it has hit while ensuring that other threads running
1408 concurrently will hit the breakpoint as they should.
1409
1410 The traditional way to step a thread T off a breakpoint in a
1411 multi-threaded program in all-stop mode is as follows:
1412
1413 a0) Initially, all threads are stopped, and breakpoints are not
1414 inserted.
1415 a1) We single-step T, leaving breakpoints uninserted.
1416 a2) We insert breakpoints, and resume all threads.
1417
1418 In non-stop debugging, however, this strategy is unsuitable: we
1419 don't want to have to stop all threads in the system in order to
1420 continue or step T past a breakpoint. Instead, we use displaced
1421 stepping:
1422
1423 n0) Initially, T is stopped, other threads are running, and
1424 breakpoints are inserted.
1425 n1) We copy the instruction "under" the breakpoint to a separate
1426 location, outside the main code stream, making any adjustments
1427 to the instruction, register, and memory state as directed by
1428 T's architecture.
1429 n2) We single-step T over the instruction at its new location.
1430 n3) We adjust the resulting register and memory state as directed
1431 by T's architecture. This includes resetting T's PC to point
1432 back into the main instruction stream.
1433 n4) We resume T.
1434
1435 This approach depends on the following gdbarch methods:
1436
1437 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1438 indicate where to copy the instruction, and how much space must
1439 be reserved there. We use these in step n1.
1440
1441 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1442 address, and makes any necessary adjustments to the instruction,
1443 register contents, and memory. We use this in step n1.
1444
1445 - gdbarch_displaced_step_fixup adjusts registers and memory after
85102364 1446 we have successfully single-stepped the instruction, to yield the
237fc4c9
PA
1447 same effect the instruction would have had if we had executed it
1448 at its original address. We use this in step n3.
1449
237fc4c9
PA
1450 The gdbarch_displaced_step_copy_insn and
1451 gdbarch_displaced_step_fixup functions must be written so that
1452 copying an instruction with gdbarch_displaced_step_copy_insn,
1453 single-stepping across the copied instruction, and then applying
1454 gdbarch_displaced_insn_fixup should have the same effects on the
1455 thread's memory and registers as stepping the instruction in place
1456 would have. Exactly which responsibilities fall to the copy and
1457 which fall to the fixup is up to the author of those functions.
1458
1459 See the comments in gdbarch.sh for details.
1460
1461 Note that displaced stepping and software single-step cannot
1462 currently be used in combination, although with some care I think
1463 they could be made to. Software single-step works by placing
1464 breakpoints on all possible subsequent instructions; if the
1465 displaced instruction is a PC-relative jump, those breakpoints
1466 could fall in very strange places --- on pages that aren't
1467 executable, or at addresses that are not proper instruction
1468 boundaries. (We do generally let other threads run while we wait
1469 to hit the software single-step breakpoint, and they might
1470 encounter such a corrupted instruction.) One way to work around
1471 this would be to have gdbarch_displaced_step_copy_insn fully
1472 simulate the effect of PC-relative instructions (and return NULL)
1473 on architectures that use software single-stepping.
1474
1475 In non-stop mode, we can have independent and simultaneous step
1476 requests, so more than one thread may need to simultaneously step
1477 over a breakpoint. The current implementation assumes there is
1478 only one scratch space per process. In this case, we have to
1479 serialize access to the scratch space. If thread A wants to step
1480 over a breakpoint, but we are currently waiting for some other
1481 thread to complete a displaced step, we leave thread A stopped and
1482 place it in the displaced_step_request_queue. Whenever a displaced
1483 step finishes, we pick the next thread in the queue and start a new
1484 displaced step operation on it. See displaced_step_prepare and
1485 displaced_step_fixup for details. */
1486
cfba9872
SM
1487/* Default destructor for displaced_step_closure. */
1488
1489displaced_step_closure::~displaced_step_closure () = default;
1490
fc1cf338
PA
1491/* Get the displaced stepping state of process PID. */
1492
39a36629 1493static displaced_step_inferior_state *
00431a78 1494get_displaced_stepping_state (inferior *inf)
fc1cf338 1495{
d20172fc 1496 return &inf->displaced_step_state;
fc1cf338
PA
1497}
1498
372316f1
PA
1499/* Returns true if any inferior has a thread doing a displaced
1500 step. */
1501
39a36629
SM
1502static bool
1503displaced_step_in_progress_any_inferior ()
372316f1 1504{
d20172fc 1505 for (inferior *i : all_inferiors ())
39a36629 1506 {
d20172fc 1507 if (i->displaced_step_state.step_thread != nullptr)
39a36629
SM
1508 return true;
1509 }
372316f1 1510
39a36629 1511 return false;
372316f1
PA
1512}
1513
c0987663
YQ
1514/* Return true if thread represented by PTID is doing a displaced
1515 step. */
1516
1517static int
00431a78 1518displaced_step_in_progress_thread (thread_info *thread)
c0987663 1519{
00431a78 1520 gdb_assert (thread != NULL);
c0987663 1521
d20172fc 1522 return get_displaced_stepping_state (thread->inf)->step_thread == thread;
c0987663
YQ
1523}
1524
8f572e5c
PA
1525/* Return true if process PID has a thread doing a displaced step. */
1526
1527static int
00431a78 1528displaced_step_in_progress (inferior *inf)
8f572e5c 1529{
d20172fc 1530 return get_displaced_stepping_state (inf)->step_thread != nullptr;
fc1cf338
PA
1531}
1532
a42244db
YQ
1533/* If inferior is in displaced stepping, and ADDR equals to starting address
1534 of copy area, return corresponding displaced_step_closure. Otherwise,
1535 return NULL. */
1536
1537struct displaced_step_closure*
1538get_displaced_step_closure_by_addr (CORE_ADDR addr)
1539{
d20172fc 1540 displaced_step_inferior_state *displaced
00431a78 1541 = get_displaced_stepping_state (current_inferior ());
a42244db
YQ
1542
1543 /* If checking the mode of displaced instruction in copy area. */
d20172fc 1544 if (displaced->step_thread != nullptr
00431a78 1545 && displaced->step_copy == addr)
d8d83535 1546 return displaced->step_closure.get ();
a42244db
YQ
1547
1548 return NULL;
1549}
1550
fc1cf338
PA
1551static void
1552infrun_inferior_exit (struct inferior *inf)
1553{
d20172fc 1554 inf->displaced_step_state.reset ();
fc1cf338 1555}
237fc4c9 1556
fff08868
HZ
1557/* If ON, and the architecture supports it, GDB will use displaced
1558 stepping to step over breakpoints. If OFF, or if the architecture
1559 doesn't support it, GDB will instead use the traditional
1560 hold-and-step approach. If AUTO (which is the default), GDB will
1561 decide which technique to use to step over breakpoints depending on
9822cb57 1562 whether the target works in a non-stop way (see use_displaced_stepping). */
fff08868 1563
72d0e2c5 1564static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
fff08868 1565
237fc4c9
PA
1566static void
1567show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1568 struct cmd_list_element *c,
1569 const char *value)
1570{
72d0e2c5 1571 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
3e43a32a
MS
1572 fprintf_filtered (file,
1573 _("Debugger's willingness to use displaced stepping "
1574 "to step over breakpoints is %s (currently %s).\n"),
fbea99ea 1575 value, target_is_non_stop_p () ? "on" : "off");
fff08868 1576 else
3e43a32a
MS
1577 fprintf_filtered (file,
1578 _("Debugger's willingness to use displaced stepping "
1579 "to step over breakpoints is %s.\n"), value);
237fc4c9
PA
1580}
1581
9822cb57
SM
1582/* Return true if the gdbarch implements the required methods to use
1583 displaced stepping. */
1584
1585static bool
1586gdbarch_supports_displaced_stepping (gdbarch *arch)
1587{
1588 /* Only check for the presence of step_copy_insn. Other required methods
1589 are checked by the gdbarch validation. */
1590 return gdbarch_displaced_step_copy_insn_p (arch);
1591}
1592
fff08868 1593/* Return non-zero if displaced stepping can/should be used to step
3fc8eb30 1594 over breakpoints of thread TP. */
fff08868 1595
9822cb57
SM
1596static bool
1597use_displaced_stepping (thread_info *tp)
237fc4c9 1598{
9822cb57
SM
1599 /* If the user disabled it explicitly, don't use displaced stepping. */
1600 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1601 return false;
1602
1603 /* If "auto", only use displaced stepping if the target operates in a non-stop
1604 way. */
1605 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1606 && !target_is_non_stop_p ())
1607 return false;
1608
1609 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1610
1611 /* If the architecture doesn't implement displaced stepping, don't use
1612 it. */
1613 if (!gdbarch_supports_displaced_stepping (gdbarch))
1614 return false;
1615
1616 /* If recording, don't use displaced stepping. */
1617 if (find_record_target () != nullptr)
1618 return false;
1619
d20172fc
SM
1620 displaced_step_inferior_state *displaced_state
1621 = get_displaced_stepping_state (tp->inf);
3fc8eb30 1622
9822cb57
SM
1623 /* If displaced stepping failed before for this inferior, don't bother trying
1624 again. */
1625 if (displaced_state->failed_before)
1626 return false;
1627
1628 return true;
237fc4c9
PA
1629}
1630
d8d83535
SM
1631/* Simple function wrapper around displaced_step_inferior_state::reset. */
1632
237fc4c9 1633static void
d8d83535 1634displaced_step_reset (displaced_step_inferior_state *displaced)
237fc4c9 1635{
d8d83535 1636 displaced->reset ();
237fc4c9
PA
1637}
1638
d8d83535
SM
1639/* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1640 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1641
1642using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
237fc4c9
PA
1643
1644/* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1645void
1646displaced_step_dump_bytes (struct ui_file *file,
1647 const gdb_byte *buf,
1648 size_t len)
1649{
1650 int i;
1651
1652 for (i = 0; i < len; i++)
1653 fprintf_unfiltered (file, "%02x ", buf[i]);
1654 fputs_unfiltered ("\n", file);
1655}
1656
1657/* Prepare to single-step, using displaced stepping.
1658
1659 Note that we cannot use displaced stepping when we have a signal to
1660 deliver. If we have a signal to deliver and an instruction to step
1661 over, then after the step, there will be no indication from the
1662 target whether the thread entered a signal handler or ignored the
1663 signal and stepped over the instruction successfully --- both cases
1664 result in a simple SIGTRAP. In the first case we mustn't do a
1665 fixup, and in the second case we must --- but we can't tell which.
1666 Comments in the code for 'random signals' in handle_inferior_event
1667 explain how we handle this case instead.
1668
1669 Returns 1 if preparing was successful -- this thread is going to be
7f03bd92
PA
1670 stepped now; 0 if displaced stepping this thread got queued; or -1
1671 if this instruction can't be displaced stepped. */
1672
237fc4c9 1673static int
00431a78 1674displaced_step_prepare_throw (thread_info *tp)
237fc4c9 1675{
00431a78 1676 regcache *regcache = get_thread_regcache (tp);
ac7936df 1677 struct gdbarch *gdbarch = regcache->arch ();
8b86c959 1678 const address_space *aspace = regcache->aspace ();
237fc4c9
PA
1679 CORE_ADDR original, copy;
1680 ULONGEST len;
9e529e1d 1681 int status;
237fc4c9
PA
1682
1683 /* We should never reach this function if the architecture does not
1684 support displaced stepping. */
9822cb57 1685 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
237fc4c9 1686
c2829269
PA
1687 /* Nor if the thread isn't meant to step over a breakpoint. */
1688 gdb_assert (tp->control.trap_expected);
1689
c1e36e3e
PA
1690 /* Disable range stepping while executing in the scratch pad. We
1691 want a single-step even if executing the displaced instruction in
1692 the scratch buffer lands within the stepping range (e.g., a
1693 jump/branch). */
1694 tp->control.may_range_step = 0;
1695
fc1cf338
PA
1696 /* We have to displaced step one thread at a time, as we only have
1697 access to a single scratch space per inferior. */
237fc4c9 1698
d20172fc
SM
1699 displaced_step_inferior_state *displaced
1700 = get_displaced_stepping_state (tp->inf);
fc1cf338 1701
00431a78 1702 if (displaced->step_thread != nullptr)
237fc4c9
PA
1703 {
1704 /* Already waiting for a displaced step to finish. Defer this
1705 request and place in queue. */
237fc4c9
PA
1706
1707 if (debug_displaced)
1708 fprintf_unfiltered (gdb_stdlog,
c2829269 1709 "displaced: deferring step of %s\n",
a068643d 1710 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1711
c2829269 1712 thread_step_over_chain_enqueue (tp);
237fc4c9
PA
1713 return 0;
1714 }
1715 else
1716 {
1717 if (debug_displaced)
1718 fprintf_unfiltered (gdb_stdlog,
1719 "displaced: stepping %s now\n",
a068643d 1720 target_pid_to_str (tp->ptid).c_str ());
237fc4c9
PA
1721 }
1722
d8d83535 1723 displaced_step_reset (displaced);
237fc4c9 1724
00431a78
PA
1725 scoped_restore_current_thread restore_thread;
1726
1727 switch_to_thread (tp);
ad53cd71 1728
515630c5 1729 original = regcache_read_pc (regcache);
237fc4c9
PA
1730
1731 copy = gdbarch_displaced_step_location (gdbarch);
1732 len = gdbarch_max_insn_length (gdbarch);
1733
d35ae833
PA
1734 if (breakpoint_in_range_p (aspace, copy, len))
1735 {
1736 /* There's a breakpoint set in the scratch pad location range
1737 (which is usually around the entry point). We'd either
1738 install it before resuming, which would overwrite/corrupt the
1739 scratch pad, or if it was already inserted, this displaced
1740 step would overwrite it. The latter is OK in the sense that
1741 we already assume that no thread is going to execute the code
1742 in the scratch pad range (after initial startup) anyway, but
1743 the former is unacceptable. Simply punt and fallback to
1744 stepping over this breakpoint in-line. */
1745 if (debug_displaced)
1746 {
1747 fprintf_unfiltered (gdb_stdlog,
1748 "displaced: breakpoint set in scratch pad. "
1749 "Stepping over breakpoint in-line instead.\n");
1750 }
1751
d35ae833
PA
1752 return -1;
1753 }
1754
237fc4c9 1755 /* Save the original contents of the copy area. */
d20172fc
SM
1756 displaced->step_saved_copy.resize (len);
1757 status = target_read_memory (copy, displaced->step_saved_copy.data (), len);
9e529e1d
JK
1758 if (status != 0)
1759 throw_error (MEMORY_ERROR,
1760 _("Error accessing memory address %s (%s) for "
1761 "displaced-stepping scratch space."),
1762 paddress (gdbarch, copy), safe_strerror (status));
237fc4c9
PA
1763 if (debug_displaced)
1764 {
5af949e3
UW
1765 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1766 paddress (gdbarch, copy));
fc1cf338 1767 displaced_step_dump_bytes (gdb_stdlog,
d20172fc 1768 displaced->step_saved_copy.data (),
fc1cf338 1769 len);
237fc4c9
PA
1770 };
1771
e8217e61
SM
1772 displaced->step_closure
1773 = gdbarch_displaced_step_copy_insn (gdbarch, original, copy, regcache);
1774 if (displaced->step_closure == NULL)
7f03bd92
PA
1775 {
1776 /* The architecture doesn't know how or want to displaced step
1777 this instruction or instruction sequence. Fallback to
1778 stepping over the breakpoint in-line. */
7f03bd92
PA
1779 return -1;
1780 }
237fc4c9 1781
9f5a595d
UW
1782 /* Save the information we need to fix things up if the step
1783 succeeds. */
00431a78 1784 displaced->step_thread = tp;
fc1cf338 1785 displaced->step_gdbarch = gdbarch;
fc1cf338
PA
1786 displaced->step_original = original;
1787 displaced->step_copy = copy;
9f5a595d 1788
9799571e 1789 {
d8d83535 1790 displaced_step_reset_cleanup cleanup (displaced);
237fc4c9 1791
9799571e
TT
1792 /* Resume execution at the copy. */
1793 regcache_write_pc (regcache, copy);
237fc4c9 1794
9799571e
TT
1795 cleanup.release ();
1796 }
ad53cd71 1797
237fc4c9 1798 if (debug_displaced)
5af949e3
UW
1799 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1800 paddress (gdbarch, copy));
237fc4c9 1801
237fc4c9
PA
1802 return 1;
1803}
1804
3fc8eb30
PA
1805/* Wrapper for displaced_step_prepare_throw that disabled further
1806 attempts at displaced stepping if we get a memory error. */
1807
1808static int
00431a78 1809displaced_step_prepare (thread_info *thread)
3fc8eb30
PA
1810{
1811 int prepared = -1;
1812
a70b8144 1813 try
3fc8eb30 1814 {
00431a78 1815 prepared = displaced_step_prepare_throw (thread);
3fc8eb30 1816 }
230d2906 1817 catch (const gdb_exception_error &ex)
3fc8eb30
PA
1818 {
1819 struct displaced_step_inferior_state *displaced_state;
1820
16b41842
PA
1821 if (ex.error != MEMORY_ERROR
1822 && ex.error != NOT_SUPPORTED_ERROR)
eedc3f4f 1823 throw;
3fc8eb30
PA
1824
1825 if (debug_infrun)
1826 {
1827 fprintf_unfiltered (gdb_stdlog,
1828 "infrun: disabling displaced stepping: %s\n",
3d6e9d23 1829 ex.what ());
3fc8eb30
PA
1830 }
1831
1832 /* Be verbose if "set displaced-stepping" is "on", silent if
1833 "auto". */
1834 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1835 {
fd7dcb94 1836 warning (_("disabling displaced stepping: %s"),
3d6e9d23 1837 ex.what ());
3fc8eb30
PA
1838 }
1839
1840 /* Disable further displaced stepping attempts. */
1841 displaced_state
00431a78 1842 = get_displaced_stepping_state (thread->inf);
3fc8eb30
PA
1843 displaced_state->failed_before = 1;
1844 }
3fc8eb30
PA
1845
1846 return prepared;
1847}
1848
237fc4c9 1849static void
3e43a32a
MS
1850write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1851 const gdb_byte *myaddr, int len)
237fc4c9 1852{
2989a365 1853 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
abbb1732 1854
237fc4c9
PA
1855 inferior_ptid = ptid;
1856 write_memory (memaddr, myaddr, len);
237fc4c9
PA
1857}
1858
e2d96639
YQ
1859/* Restore the contents of the copy area for thread PTID. */
1860
1861static void
1862displaced_step_restore (struct displaced_step_inferior_state *displaced,
1863 ptid_t ptid)
1864{
1865 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1866
1867 write_memory_ptid (ptid, displaced->step_copy,
d20172fc 1868 displaced->step_saved_copy.data (), len);
e2d96639
YQ
1869 if (debug_displaced)
1870 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
a068643d 1871 target_pid_to_str (ptid).c_str (),
e2d96639
YQ
1872 paddress (displaced->step_gdbarch,
1873 displaced->step_copy));
1874}
1875
372316f1
PA
1876/* If we displaced stepped an instruction successfully, adjust
1877 registers and memory to yield the same effect the instruction would
1878 have had if we had executed it at its original address, and return
1879 1. If the instruction didn't complete, relocate the PC and return
1880 -1. If the thread wasn't displaced stepping, return 0. */
1881
1882static int
00431a78 1883displaced_step_fixup (thread_info *event_thread, enum gdb_signal signal)
237fc4c9 1884{
fc1cf338 1885 struct displaced_step_inferior_state *displaced
00431a78 1886 = get_displaced_stepping_state (event_thread->inf);
372316f1 1887 int ret;
fc1cf338 1888
00431a78
PA
1889 /* Was this event for the thread we displaced? */
1890 if (displaced->step_thread != event_thread)
372316f1 1891 return 0;
237fc4c9 1892
cb71640d
PA
1893 /* Fixup may need to read memory/registers. Switch to the thread
1894 that we're fixing up. Also, target_stopped_by_watchpoint checks
d43b7a2d
TBA
1895 the current thread, and displaced_step_restore performs ptid-dependent
1896 memory accesses using current_inferior() and current_top_target(). */
00431a78 1897 switch_to_thread (event_thread);
cb71640d 1898
d43b7a2d
TBA
1899 displaced_step_reset_cleanup cleanup (displaced);
1900
1901 displaced_step_restore (displaced, displaced->step_thread->ptid);
1902
237fc4c9 1903 /* Did the instruction complete successfully? */
cb71640d
PA
1904 if (signal == GDB_SIGNAL_TRAP
1905 && !(target_stopped_by_watchpoint ()
1906 && (gdbarch_have_nonsteppable_watchpoint (displaced->step_gdbarch)
1907 || target_have_steppable_watchpoint)))
237fc4c9
PA
1908 {
1909 /* Fix up the resulting state. */
fc1cf338 1910 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
d8d83535 1911 displaced->step_closure.get (),
fc1cf338
PA
1912 displaced->step_original,
1913 displaced->step_copy,
00431a78 1914 get_thread_regcache (displaced->step_thread));
372316f1 1915 ret = 1;
237fc4c9
PA
1916 }
1917 else
1918 {
1919 /* Since the instruction didn't complete, all we can do is
1920 relocate the PC. */
00431a78 1921 struct regcache *regcache = get_thread_regcache (event_thread);
515630c5 1922 CORE_ADDR pc = regcache_read_pc (regcache);
abbb1732 1923
fc1cf338 1924 pc = displaced->step_original + (pc - displaced->step_copy);
515630c5 1925 regcache_write_pc (regcache, pc);
372316f1 1926 ret = -1;
237fc4c9
PA
1927 }
1928
372316f1 1929 return ret;
c2829269 1930}
1c5cfe86 1931
4d9d9d04
PA
1932/* Data to be passed around while handling an event. This data is
1933 discarded between events. */
1934struct execution_control_state
1935{
5b6d1e4f 1936 process_stratum_target *target;
4d9d9d04
PA
1937 ptid_t ptid;
1938 /* The thread that got the event, if this was a thread event; NULL
1939 otherwise. */
1940 struct thread_info *event_thread;
1941
1942 struct target_waitstatus ws;
1943 int stop_func_filled_in;
1944 CORE_ADDR stop_func_start;
1945 CORE_ADDR stop_func_end;
1946 const char *stop_func_name;
1947 int wait_some_more;
1948
1949 /* True if the event thread hit the single-step breakpoint of
1950 another thread. Thus the event doesn't cause a stop, the thread
1951 needs to be single-stepped past the single-step breakpoint before
1952 we can switch back to the original stepping thread. */
1953 int hit_singlestep_breakpoint;
1954};
1955
1956/* Clear ECS and set it to point at TP. */
c2829269
PA
1957
1958static void
4d9d9d04
PA
1959reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1960{
1961 memset (ecs, 0, sizeof (*ecs));
1962 ecs->event_thread = tp;
1963 ecs->ptid = tp->ptid;
1964}
1965
1966static void keep_going_pass_signal (struct execution_control_state *ecs);
1967static void prepare_to_wait (struct execution_control_state *ecs);
2ac7589c 1968static int keep_going_stepped_thread (struct thread_info *tp);
8d297bbf 1969static step_over_what thread_still_needs_step_over (struct thread_info *tp);
4d9d9d04
PA
1970
1971/* Are there any pending step-over requests? If so, run all we can
1972 now and return true. Otherwise, return false. */
1973
1974static int
c2829269
PA
1975start_step_over (void)
1976{
1977 struct thread_info *tp, *next;
1978
372316f1
PA
1979 /* Don't start a new step-over if we already have an in-line
1980 step-over operation ongoing. */
1981 if (step_over_info_valid_p ())
1982 return 0;
1983
c2829269 1984 for (tp = step_over_queue_head; tp != NULL; tp = next)
237fc4c9 1985 {
4d9d9d04
PA
1986 struct execution_control_state ecss;
1987 struct execution_control_state *ecs = &ecss;
8d297bbf 1988 step_over_what step_what;
372316f1 1989 int must_be_in_line;
c2829269 1990
c65d6b55
PA
1991 gdb_assert (!tp->stop_requested);
1992
c2829269 1993 next = thread_step_over_chain_next (tp);
237fc4c9 1994
c2829269
PA
1995 /* If this inferior already has a displaced step in process,
1996 don't start a new one. */
00431a78 1997 if (displaced_step_in_progress (tp->inf))
c2829269
PA
1998 continue;
1999
372316f1
PA
2000 step_what = thread_still_needs_step_over (tp);
2001 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
2002 || ((step_what & STEP_OVER_BREAKPOINT)
3fc8eb30 2003 && !use_displaced_stepping (tp)));
372316f1
PA
2004
2005 /* We currently stop all threads of all processes to step-over
2006 in-line. If we need to start a new in-line step-over, let
2007 any pending displaced steps finish first. */
2008 if (must_be_in_line && displaced_step_in_progress_any_inferior ())
2009 return 0;
2010
c2829269
PA
2011 thread_step_over_chain_remove (tp);
2012
2013 if (step_over_queue_head == NULL)
2014 {
2015 if (debug_infrun)
2016 fprintf_unfiltered (gdb_stdlog,
2017 "infrun: step-over queue now empty\n");
2018 }
2019
372316f1
PA
2020 if (tp->control.trap_expected
2021 || tp->resumed
2022 || tp->executing)
ad53cd71 2023 {
4d9d9d04
PA
2024 internal_error (__FILE__, __LINE__,
2025 "[%s] has inconsistent state: "
372316f1 2026 "trap_expected=%d, resumed=%d, executing=%d\n",
a068643d 2027 target_pid_to_str (tp->ptid).c_str (),
4d9d9d04 2028 tp->control.trap_expected,
372316f1 2029 tp->resumed,
4d9d9d04 2030 tp->executing);
ad53cd71 2031 }
1c5cfe86 2032
4d9d9d04
PA
2033 if (debug_infrun)
2034 fprintf_unfiltered (gdb_stdlog,
2035 "infrun: resuming [%s] for step-over\n",
a068643d 2036 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04
PA
2037
2038 /* keep_going_pass_signal skips the step-over if the breakpoint
2039 is no longer inserted. In all-stop, we want to keep looking
2040 for a thread that needs a step-over instead of resuming TP,
2041 because we wouldn't be able to resume anything else until the
2042 target stops again. In non-stop, the resume always resumes
2043 only TP, so it's OK to let the thread resume freely. */
fbea99ea 2044 if (!target_is_non_stop_p () && !step_what)
4d9d9d04 2045 continue;
8550d3b3 2046
00431a78 2047 switch_to_thread (tp);
4d9d9d04
PA
2048 reset_ecs (ecs, tp);
2049 keep_going_pass_signal (ecs);
1c5cfe86 2050
4d9d9d04
PA
2051 if (!ecs->wait_some_more)
2052 error (_("Command aborted."));
1c5cfe86 2053
372316f1
PA
2054 gdb_assert (tp->resumed);
2055
2056 /* If we started a new in-line step-over, we're done. */
2057 if (step_over_info_valid_p ())
2058 {
2059 gdb_assert (tp->control.trap_expected);
2060 return 1;
2061 }
2062
fbea99ea 2063 if (!target_is_non_stop_p ())
4d9d9d04
PA
2064 {
2065 /* On all-stop, shouldn't have resumed unless we needed a
2066 step over. */
2067 gdb_assert (tp->control.trap_expected
2068 || tp->step_after_step_resume_breakpoint);
2069
2070 /* With remote targets (at least), in all-stop, we can't
2071 issue any further remote commands until the program stops
2072 again. */
2073 return 1;
1c5cfe86 2074 }
c2829269 2075
4d9d9d04
PA
2076 /* Either the thread no longer needed a step-over, or a new
2077 displaced stepping sequence started. Even in the latter
2078 case, continue looking. Maybe we can also start another
2079 displaced step on a thread of other process. */
237fc4c9 2080 }
4d9d9d04
PA
2081
2082 return 0;
237fc4c9
PA
2083}
2084
5231c1fd
PA
2085/* Update global variables holding ptids to hold NEW_PTID if they were
2086 holding OLD_PTID. */
2087static void
2088infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
2089{
d7e15655 2090 if (inferior_ptid == old_ptid)
5231c1fd 2091 inferior_ptid = new_ptid;
5231c1fd
PA
2092}
2093
237fc4c9 2094\f
c906108c 2095
53904c9e
AC
2096static const char schedlock_off[] = "off";
2097static const char schedlock_on[] = "on";
2098static const char schedlock_step[] = "step";
f2665db5 2099static const char schedlock_replay[] = "replay";
40478521 2100static const char *const scheduler_enums[] = {
ef346e04
AC
2101 schedlock_off,
2102 schedlock_on,
2103 schedlock_step,
f2665db5 2104 schedlock_replay,
ef346e04
AC
2105 NULL
2106};
f2665db5 2107static const char *scheduler_mode = schedlock_replay;
920d2a44
AC
2108static void
2109show_scheduler_mode (struct ui_file *file, int from_tty,
2110 struct cmd_list_element *c, const char *value)
2111{
3e43a32a
MS
2112 fprintf_filtered (file,
2113 _("Mode for locking scheduler "
2114 "during execution is \"%s\".\n"),
920d2a44
AC
2115 value);
2116}
c906108c
SS
2117
2118static void
eb4c3f4a 2119set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
c906108c 2120{
eefe576e
AC
2121 if (!target_can_lock_scheduler)
2122 {
2123 scheduler_mode = schedlock_off;
2124 error (_("Target '%s' cannot support this command."), target_shortname);
2125 }
c906108c
SS
2126}
2127
d4db2f36
PA
2128/* True if execution commands resume all threads of all processes by
2129 default; otherwise, resume only threads of the current inferior
2130 process. */
491144b5 2131bool sched_multi = false;
d4db2f36 2132
2facfe5c
DD
2133/* Try to setup for software single stepping over the specified location.
2134 Return 1 if target_resume() should use hardware single step.
2135
2136 GDBARCH the current gdbarch.
2137 PC the location to step over. */
2138
2139static int
2140maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2141{
2142 int hw_step = 1;
2143
f02253f1 2144 if (execution_direction == EXEC_FORWARD
93f9a11f
YQ
2145 && gdbarch_software_single_step_p (gdbarch))
2146 hw_step = !insert_single_step_breakpoints (gdbarch);
2147
2facfe5c
DD
2148 return hw_step;
2149}
c906108c 2150
f3263aa4
PA
2151/* See infrun.h. */
2152
09cee04b
PA
2153ptid_t
2154user_visible_resume_ptid (int step)
2155{
f3263aa4 2156 ptid_t resume_ptid;
09cee04b 2157
09cee04b
PA
2158 if (non_stop)
2159 {
2160 /* With non-stop mode on, threads are always handled
2161 individually. */
2162 resume_ptid = inferior_ptid;
2163 }
2164 else if ((scheduler_mode == schedlock_on)
03d46957 2165 || (scheduler_mode == schedlock_step && step))
09cee04b 2166 {
f3263aa4
PA
2167 /* User-settable 'scheduler' mode requires solo thread
2168 resume. */
09cee04b
PA
2169 resume_ptid = inferior_ptid;
2170 }
f2665db5
MM
2171 else if ((scheduler_mode == schedlock_replay)
2172 && target_record_will_replay (minus_one_ptid, execution_direction))
2173 {
2174 /* User-settable 'scheduler' mode requires solo thread resume in replay
2175 mode. */
2176 resume_ptid = inferior_ptid;
2177 }
f3263aa4
PA
2178 else if (!sched_multi && target_supports_multi_process ())
2179 {
2180 /* Resume all threads of the current process (and none of other
2181 processes). */
e99b03dc 2182 resume_ptid = ptid_t (inferior_ptid.pid ());
f3263aa4
PA
2183 }
2184 else
2185 {
2186 /* Resume all threads of all processes. */
2187 resume_ptid = RESUME_ALL;
2188 }
09cee04b
PA
2189
2190 return resume_ptid;
2191}
2192
5b6d1e4f
PA
2193/* See infrun.h. */
2194
2195process_stratum_target *
2196user_visible_resume_target (ptid_t resume_ptid)
2197{
2198 return (resume_ptid == minus_one_ptid && sched_multi
2199 ? NULL
2200 : current_inferior ()->process_target ());
2201}
2202
fbea99ea
PA
2203/* Return a ptid representing the set of threads that we will resume,
2204 in the perspective of the target, assuming run control handling
2205 does not require leaving some threads stopped (e.g., stepping past
2206 breakpoint). USER_STEP indicates whether we're about to start the
2207 target for a stepping command. */
2208
2209static ptid_t
2210internal_resume_ptid (int user_step)
2211{
2212 /* In non-stop, we always control threads individually. Note that
2213 the target may always work in non-stop mode even with "set
2214 non-stop off", in which case user_visible_resume_ptid could
2215 return a wildcard ptid. */
2216 if (target_is_non_stop_p ())
2217 return inferior_ptid;
2218 else
2219 return user_visible_resume_ptid (user_step);
2220}
2221
64ce06e4
PA
2222/* Wrapper for target_resume, that handles infrun-specific
2223 bookkeeping. */
2224
2225static void
2226do_target_resume (ptid_t resume_ptid, int step, enum gdb_signal sig)
2227{
2228 struct thread_info *tp = inferior_thread ();
2229
c65d6b55
PA
2230 gdb_assert (!tp->stop_requested);
2231
64ce06e4 2232 /* Install inferior's terminal modes. */
223ffa71 2233 target_terminal::inferior ();
64ce06e4
PA
2234
2235 /* Avoid confusing the next resume, if the next stop/resume
2236 happens to apply to another thread. */
2237 tp->suspend.stop_signal = GDB_SIGNAL_0;
2238
8f572e5c
PA
2239 /* Advise target which signals may be handled silently.
2240
2241 If we have removed breakpoints because we are stepping over one
2242 in-line (in any thread), we need to receive all signals to avoid
2243 accidentally skipping a breakpoint during execution of a signal
2244 handler.
2245
2246 Likewise if we're displaced stepping, otherwise a trap for a
2247 breakpoint in a signal handler might be confused with the
2248 displaced step finishing. We don't make the displaced_step_fixup
2249 step distinguish the cases instead, because:
2250
2251 - a backtrace while stopped in the signal handler would show the
2252 scratch pad as frame older than the signal handler, instead of
2253 the real mainline code.
2254
2255 - when the thread is later resumed, the signal handler would
2256 return to the scratch pad area, which would no longer be
2257 valid. */
2258 if (step_over_info_valid_p ()
00431a78 2259 || displaced_step_in_progress (tp->inf))
adc6a863 2260 target_pass_signals ({});
64ce06e4 2261 else
adc6a863 2262 target_pass_signals (signal_pass);
64ce06e4
PA
2263
2264 target_resume (resume_ptid, step, sig);
85ad3aaf
PA
2265
2266 target_commit_resume ();
5b6d1e4f
PA
2267
2268 if (target_can_async_p ())
2269 target_async (1);
64ce06e4
PA
2270}
2271
d930703d 2272/* Resume the inferior. SIG is the signal to give the inferior
71d378ae
PA
2273 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2274 call 'resume', which handles exceptions. */
c906108c 2275
71d378ae
PA
2276static void
2277resume_1 (enum gdb_signal sig)
c906108c 2278{
515630c5 2279 struct regcache *regcache = get_current_regcache ();
ac7936df 2280 struct gdbarch *gdbarch = regcache->arch ();
4e1c45ea 2281 struct thread_info *tp = inferior_thread ();
515630c5 2282 CORE_ADDR pc = regcache_read_pc (regcache);
8b86c959 2283 const address_space *aspace = regcache->aspace ();
b0f16a3e 2284 ptid_t resume_ptid;
856e7dd6
PA
2285 /* This represents the user's step vs continue request. When
2286 deciding whether "set scheduler-locking step" applies, it's the
2287 user's intention that counts. */
2288 const int user_step = tp->control.stepping_command;
64ce06e4
PA
2289 /* This represents what we'll actually request the target to do.
2290 This can decay from a step to a continue, if e.g., we need to
2291 implement single-stepping with breakpoints (software
2292 single-step). */
6b403daa 2293 int step;
c7e8a53c 2294
c65d6b55 2295 gdb_assert (!tp->stop_requested);
c2829269
PA
2296 gdb_assert (!thread_is_in_step_over_chain (tp));
2297
372316f1
PA
2298 if (tp->suspend.waitstatus_pending_p)
2299 {
2300 if (debug_infrun)
2301 {
23fdd69e
SM
2302 std::string statstr
2303 = target_waitstatus_to_string (&tp->suspend.waitstatus);
372316f1 2304
372316f1 2305 fprintf_unfiltered (gdb_stdlog,
23fdd69e
SM
2306 "infrun: resume: thread %s has pending wait "
2307 "status %s (currently_stepping=%d).\n",
a068643d
TT
2308 target_pid_to_str (tp->ptid).c_str (),
2309 statstr.c_str (),
372316f1 2310 currently_stepping (tp));
372316f1
PA
2311 }
2312
5b6d1e4f 2313 tp->inf->process_target ()->threads_executing = true;
719546c4 2314 tp->resumed = true;
372316f1
PA
2315
2316 /* FIXME: What should we do if we are supposed to resume this
2317 thread with a signal? Maybe we should maintain a queue of
2318 pending signals to deliver. */
2319 if (sig != GDB_SIGNAL_0)
2320 {
fd7dcb94 2321 warning (_("Couldn't deliver signal %s to %s."),
a068643d
TT
2322 gdb_signal_to_name (sig),
2323 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2324 }
2325
2326 tp->suspend.stop_signal = GDB_SIGNAL_0;
372316f1
PA
2327
2328 if (target_can_async_p ())
9516f85a
AB
2329 {
2330 target_async (1);
2331 /* Tell the event loop we have an event to process. */
2332 mark_async_event_handler (infrun_async_inferior_event_token);
2333 }
372316f1
PA
2334 return;
2335 }
2336
2337 tp->stepped_breakpoint = 0;
2338
6b403daa
PA
2339 /* Depends on stepped_breakpoint. */
2340 step = currently_stepping (tp);
2341
74609e71
YQ
2342 if (current_inferior ()->waiting_for_vfork_done)
2343 {
48f9886d
PA
2344 /* Don't try to single-step a vfork parent that is waiting for
2345 the child to get out of the shared memory region (by exec'ing
2346 or exiting). This is particularly important on software
2347 single-step archs, as the child process would trip on the
2348 software single step breakpoint inserted for the parent
2349 process. Since the parent will not actually execute any
2350 instruction until the child is out of the shared region (such
2351 are vfork's semantics), it is safe to simply continue it.
2352 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2353 the parent, and tell it to `keep_going', which automatically
2354 re-sets it stepping. */
74609e71
YQ
2355 if (debug_infrun)
2356 fprintf_unfiltered (gdb_stdlog,
2357 "infrun: resume : clear step\n");
a09dd441 2358 step = 0;
74609e71
YQ
2359 }
2360
527159b7 2361 if (debug_infrun)
237fc4c9 2362 fprintf_unfiltered (gdb_stdlog,
c9737c08 2363 "infrun: resume (step=%d, signal=%s), "
0d9a9a5f 2364 "trap_expected=%d, current thread [%s] at %s\n",
c9737c08
PA
2365 step, gdb_signal_to_symbol_string (sig),
2366 tp->control.trap_expected,
a068643d 2367 target_pid_to_str (inferior_ptid).c_str (),
0d9a9a5f 2368 paddress (gdbarch, pc));
c906108c 2369
c2c6d25f
JM
2370 /* Normally, by the time we reach `resume', the breakpoints are either
2371 removed or inserted, as appropriate. The exception is if we're sitting
2372 at a permanent breakpoint; we need to step over it, but permanent
2373 breakpoints can't be removed. So we have to test for it here. */
6c95b8df 2374 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
6d350bb5 2375 {
af48d08f
PA
2376 if (sig != GDB_SIGNAL_0)
2377 {
2378 /* We have a signal to pass to the inferior. The resume
2379 may, or may not take us to the signal handler. If this
2380 is a step, we'll need to stop in the signal handler, if
2381 there's one, (if the target supports stepping into
2382 handlers), or in the next mainline instruction, if
2383 there's no handler. If this is a continue, we need to be
2384 sure to run the handler with all breakpoints inserted.
2385 In all cases, set a breakpoint at the current address
2386 (where the handler returns to), and once that breakpoint
2387 is hit, resume skipping the permanent breakpoint. If
2388 that breakpoint isn't hit, then we've stepped into the
2389 signal handler (or hit some other event). We'll delete
2390 the step-resume breakpoint then. */
2391
2392 if (debug_infrun)
2393 fprintf_unfiltered (gdb_stdlog,
2394 "infrun: resume: skipping permanent breakpoint, "
2395 "deliver signal first\n");
2396
2397 clear_step_over_info ();
2398 tp->control.trap_expected = 0;
2399
2400 if (tp->control.step_resume_breakpoint == NULL)
2401 {
2402 /* Set a "high-priority" step-resume, as we don't want
2403 user breakpoints at PC to trigger (again) when this
2404 hits. */
2405 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2406 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2407
2408 tp->step_after_step_resume_breakpoint = step;
2409 }
2410
2411 insert_breakpoints ();
2412 }
2413 else
2414 {
2415 /* There's no signal to pass, we can go ahead and skip the
2416 permanent breakpoint manually. */
2417 if (debug_infrun)
2418 fprintf_unfiltered (gdb_stdlog,
2419 "infrun: resume: skipping permanent breakpoint\n");
2420 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2421 /* Update pc to reflect the new address from which we will
2422 execute instructions. */
2423 pc = regcache_read_pc (regcache);
2424
2425 if (step)
2426 {
2427 /* We've already advanced the PC, so the stepping part
2428 is done. Now we need to arrange for a trap to be
2429 reported to handle_inferior_event. Set a breakpoint
2430 at the current PC, and run to it. Don't update
2431 prev_pc, because if we end in
44a1ee51
PA
2432 switch_back_to_stepped_thread, we want the "expected
2433 thread advanced also" branch to be taken. IOW, we
2434 don't want this thread to step further from PC
af48d08f 2435 (overstep). */
1ac806b8 2436 gdb_assert (!step_over_info_valid_p ());
af48d08f
PA
2437 insert_single_step_breakpoint (gdbarch, aspace, pc);
2438 insert_breakpoints ();
2439
fbea99ea 2440 resume_ptid = internal_resume_ptid (user_step);
1ac806b8 2441 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
719546c4 2442 tp->resumed = true;
af48d08f
PA
2443 return;
2444 }
2445 }
6d350bb5 2446 }
c2c6d25f 2447
c1e36e3e
PA
2448 /* If we have a breakpoint to step over, make sure to do a single
2449 step only. Same if we have software watchpoints. */
2450 if (tp->control.trap_expected || bpstat_should_step ())
2451 tp->control.may_range_step = 0;
2452
7da6a5b9
LM
2453 /* If displaced stepping is enabled, step over breakpoints by executing a
2454 copy of the instruction at a different address.
237fc4c9
PA
2455
2456 We can't use displaced stepping when we have a signal to deliver;
2457 the comments for displaced_step_prepare explain why. The
2458 comments in the handle_inferior event for dealing with 'random
74609e71
YQ
2459 signals' explain what we do instead.
2460
2461 We can't use displaced stepping when we are waiting for vfork_done
2462 event, displaced stepping breaks the vfork child similarly as single
2463 step software breakpoint. */
3fc8eb30
PA
2464 if (tp->control.trap_expected
2465 && use_displaced_stepping (tp)
cb71640d 2466 && !step_over_info_valid_p ()
a493e3e2 2467 && sig == GDB_SIGNAL_0
74609e71 2468 && !current_inferior ()->waiting_for_vfork_done)
237fc4c9 2469 {
00431a78 2470 int prepared = displaced_step_prepare (tp);
fc1cf338 2471
3fc8eb30 2472 if (prepared == 0)
d56b7306 2473 {
4d9d9d04
PA
2474 if (debug_infrun)
2475 fprintf_unfiltered (gdb_stdlog,
2476 "Got placed in step-over queue\n");
2477
2478 tp->control.trap_expected = 0;
d56b7306
VP
2479 return;
2480 }
3fc8eb30
PA
2481 else if (prepared < 0)
2482 {
2483 /* Fallback to stepping over the breakpoint in-line. */
2484
2485 if (target_is_non_stop_p ())
2486 stop_all_threads ();
2487
a01bda52 2488 set_step_over_info (regcache->aspace (),
21edc42f 2489 regcache_read_pc (regcache), 0, tp->global_num);
3fc8eb30
PA
2490
2491 step = maybe_software_singlestep (gdbarch, pc);
2492
2493 insert_breakpoints ();
2494 }
2495 else if (prepared > 0)
2496 {
2497 struct displaced_step_inferior_state *displaced;
99e40580 2498
3fc8eb30
PA
2499 /* Update pc to reflect the new address from which we will
2500 execute instructions due to displaced stepping. */
00431a78 2501 pc = regcache_read_pc (get_thread_regcache (tp));
ca7781d2 2502
00431a78 2503 displaced = get_displaced_stepping_state (tp->inf);
d8d83535
SM
2504 step = gdbarch_displaced_step_hw_singlestep
2505 (gdbarch, displaced->step_closure.get ());
3fc8eb30 2506 }
237fc4c9
PA
2507 }
2508
2facfe5c 2509 /* Do we need to do it the hard way, w/temp breakpoints? */
99e40580 2510 else if (step)
2facfe5c 2511 step = maybe_software_singlestep (gdbarch, pc);
c906108c 2512
30852783
UW
2513 /* Currently, our software single-step implementation leads to different
2514 results than hardware single-stepping in one situation: when stepping
2515 into delivering a signal which has an associated signal handler,
2516 hardware single-step will stop at the first instruction of the handler,
2517 while software single-step will simply skip execution of the handler.
2518
2519 For now, this difference in behavior is accepted since there is no
2520 easy way to actually implement single-stepping into a signal handler
2521 without kernel support.
2522
2523 However, there is one scenario where this difference leads to follow-on
2524 problems: if we're stepping off a breakpoint by removing all breakpoints
2525 and then single-stepping. In this case, the software single-step
2526 behavior means that even if there is a *breakpoint* in the signal
2527 handler, GDB still would not stop.
2528
2529 Fortunately, we can at least fix this particular issue. We detect
2530 here the case where we are about to deliver a signal while software
2531 single-stepping with breakpoints removed. In this situation, we
2532 revert the decisions to remove all breakpoints and insert single-
2533 step breakpoints, and instead we install a step-resume breakpoint
2534 at the current address, deliver the signal without stepping, and
2535 once we arrive back at the step-resume breakpoint, actually step
2536 over the breakpoint we originally wanted to step over. */
34b7e8a6 2537 if (thread_has_single_step_breakpoints_set (tp)
6cc83d2a
PA
2538 && sig != GDB_SIGNAL_0
2539 && step_over_info_valid_p ())
30852783
UW
2540 {
2541 /* If we have nested signals or a pending signal is delivered
7da6a5b9 2542 immediately after a handler returns, might already have
30852783
UW
2543 a step-resume breakpoint set on the earlier handler. We cannot
2544 set another step-resume breakpoint; just continue on until the
2545 original breakpoint is hit. */
2546 if (tp->control.step_resume_breakpoint == NULL)
2547 {
2c03e5be 2548 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
30852783
UW
2549 tp->step_after_step_resume_breakpoint = 1;
2550 }
2551
34b7e8a6 2552 delete_single_step_breakpoints (tp);
30852783 2553
31e77af2 2554 clear_step_over_info ();
30852783 2555 tp->control.trap_expected = 0;
31e77af2
PA
2556
2557 insert_breakpoints ();
30852783
UW
2558 }
2559
b0f16a3e
SM
2560 /* If STEP is set, it's a request to use hardware stepping
2561 facilities. But in that case, we should never
2562 use singlestep breakpoint. */
34b7e8a6 2563 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
dfcd3bfb 2564
fbea99ea 2565 /* Decide the set of threads to ask the target to resume. */
1946c4cc 2566 if (tp->control.trap_expected)
b0f16a3e
SM
2567 {
2568 /* We're allowing a thread to run past a breakpoint it has
1946c4cc
YQ
2569 hit, either by single-stepping the thread with the breakpoint
2570 removed, or by displaced stepping, with the breakpoint inserted.
2571 In the former case, we need to single-step only this thread,
2572 and keep others stopped, as they can miss this breakpoint if
2573 allowed to run. That's not really a problem for displaced
2574 stepping, but, we still keep other threads stopped, in case
2575 another thread is also stopped for a breakpoint waiting for
2576 its turn in the displaced stepping queue. */
b0f16a3e
SM
2577 resume_ptid = inferior_ptid;
2578 }
fbea99ea
PA
2579 else
2580 resume_ptid = internal_resume_ptid (user_step);
d4db2f36 2581
7f5ef605
PA
2582 if (execution_direction != EXEC_REVERSE
2583 && step && breakpoint_inserted_here_p (aspace, pc))
b0f16a3e 2584 {
372316f1
PA
2585 /* There are two cases where we currently need to step a
2586 breakpoint instruction when we have a signal to deliver:
2587
2588 - See handle_signal_stop where we handle random signals that
2589 could take out us out of the stepping range. Normally, in
2590 that case we end up continuing (instead of stepping) over the
7f5ef605
PA
2591 signal handler with a breakpoint at PC, but there are cases
2592 where we should _always_ single-step, even if we have a
2593 step-resume breakpoint, like when a software watchpoint is
2594 set. Assuming single-stepping and delivering a signal at the
2595 same time would takes us to the signal handler, then we could
2596 have removed the breakpoint at PC to step over it. However,
2597 some hardware step targets (like e.g., Mac OS) can't step
2598 into signal handlers, and for those, we need to leave the
2599 breakpoint at PC inserted, as otherwise if the handler
2600 recurses and executes PC again, it'll miss the breakpoint.
2601 So we leave the breakpoint inserted anyway, but we need to
2602 record that we tried to step a breakpoint instruction, so
372316f1
PA
2603 that adjust_pc_after_break doesn't end up confused.
2604
2605 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2606 in one thread after another thread that was stepping had been
2607 momentarily paused for a step-over. When we re-resume the
2608 stepping thread, it may be resumed from that address with a
2609 breakpoint that hasn't trapped yet. Seen with
2610 gdb.threads/non-stop-fair-events.exp, on targets that don't
2611 do displaced stepping. */
2612
2613 if (debug_infrun)
2614 fprintf_unfiltered (gdb_stdlog,
2615 "infrun: resume: [%s] stepped breakpoint\n",
a068643d 2616 target_pid_to_str (tp->ptid).c_str ());
7f5ef605
PA
2617
2618 tp->stepped_breakpoint = 1;
2619
b0f16a3e
SM
2620 /* Most targets can step a breakpoint instruction, thus
2621 executing it normally. But if this one cannot, just
2622 continue and we will hit it anyway. */
7f5ef605 2623 if (gdbarch_cannot_step_breakpoint (gdbarch))
b0f16a3e
SM
2624 step = 0;
2625 }
ef5cf84e 2626
b0f16a3e 2627 if (debug_displaced
cb71640d 2628 && tp->control.trap_expected
3fc8eb30 2629 && use_displaced_stepping (tp)
cb71640d 2630 && !step_over_info_valid_p ())
b0f16a3e 2631 {
00431a78 2632 struct regcache *resume_regcache = get_thread_regcache (tp);
ac7936df 2633 struct gdbarch *resume_gdbarch = resume_regcache->arch ();
b0f16a3e
SM
2634 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2635 gdb_byte buf[4];
2636
2637 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2638 paddress (resume_gdbarch, actual_pc));
2639 read_memory (actual_pc, buf, sizeof (buf));
2640 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2641 }
237fc4c9 2642
b0f16a3e
SM
2643 if (tp->control.may_range_step)
2644 {
2645 /* If we're resuming a thread with the PC out of the step
2646 range, then we're doing some nested/finer run control
2647 operation, like stepping the thread out of the dynamic
2648 linker or the displaced stepping scratch pad. We
2649 shouldn't have allowed a range step then. */
2650 gdb_assert (pc_in_thread_step_range (pc, tp));
2651 }
c1e36e3e 2652
64ce06e4 2653 do_target_resume (resume_ptid, step, sig);
719546c4 2654 tp->resumed = true;
c906108c 2655}
71d378ae
PA
2656
2657/* Resume the inferior. SIG is the signal to give the inferior
2658 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2659 rolls back state on error. */
2660
aff4e175 2661static void
71d378ae
PA
2662resume (gdb_signal sig)
2663{
a70b8144 2664 try
71d378ae
PA
2665 {
2666 resume_1 (sig);
2667 }
230d2906 2668 catch (const gdb_exception &ex)
71d378ae
PA
2669 {
2670 /* If resuming is being aborted for any reason, delete any
2671 single-step breakpoint resume_1 may have created, to avoid
2672 confusing the following resumption, and to avoid leaving
2673 single-step breakpoints perturbing other threads, in case
2674 we're running in non-stop mode. */
2675 if (inferior_ptid != null_ptid)
2676 delete_single_step_breakpoints (inferior_thread ());
eedc3f4f 2677 throw;
71d378ae 2678 }
71d378ae
PA
2679}
2680
c906108c 2681\f
237fc4c9 2682/* Proceeding. */
c906108c 2683
4c2f2a79
PA
2684/* See infrun.h. */
2685
2686/* Counter that tracks number of user visible stops. This can be used
2687 to tell whether a command has proceeded the inferior past the
2688 current location. This allows e.g., inferior function calls in
2689 breakpoint commands to not interrupt the command list. When the
2690 call finishes successfully, the inferior is standing at the same
2691 breakpoint as if nothing happened (and so we don't call
2692 normal_stop). */
2693static ULONGEST current_stop_id;
2694
2695/* See infrun.h. */
2696
2697ULONGEST
2698get_stop_id (void)
2699{
2700 return current_stop_id;
2701}
2702
2703/* Called when we report a user visible stop. */
2704
2705static void
2706new_stop_id (void)
2707{
2708 current_stop_id++;
2709}
2710
c906108c
SS
2711/* Clear out all variables saying what to do when inferior is continued.
2712 First do this, then set the ones you want, then call `proceed'. */
2713
a7212384
UW
2714static void
2715clear_proceed_status_thread (struct thread_info *tp)
c906108c 2716{
a7212384
UW
2717 if (debug_infrun)
2718 fprintf_unfiltered (gdb_stdlog,
2719 "infrun: clear_proceed_status_thread (%s)\n",
a068643d 2720 target_pid_to_str (tp->ptid).c_str ());
d6b48e9c 2721
372316f1
PA
2722 /* If we're starting a new sequence, then the previous finished
2723 single-step is no longer relevant. */
2724 if (tp->suspend.waitstatus_pending_p)
2725 {
2726 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
2727 {
2728 if (debug_infrun)
2729 fprintf_unfiltered (gdb_stdlog,
2730 "infrun: clear_proceed_status: pending "
2731 "event of %s was a finished step. "
2732 "Discarding.\n",
a068643d 2733 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2734
2735 tp->suspend.waitstatus_pending_p = 0;
2736 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
2737 }
2738 else if (debug_infrun)
2739 {
23fdd69e
SM
2740 std::string statstr
2741 = target_waitstatus_to_string (&tp->suspend.waitstatus);
372316f1 2742
372316f1
PA
2743 fprintf_unfiltered (gdb_stdlog,
2744 "infrun: clear_proceed_status_thread: thread %s "
2745 "has pending wait status %s "
2746 "(currently_stepping=%d).\n",
a068643d
TT
2747 target_pid_to_str (tp->ptid).c_str (),
2748 statstr.c_str (),
372316f1 2749 currently_stepping (tp));
372316f1
PA
2750 }
2751 }
2752
70509625
PA
2753 /* If this signal should not be seen by program, give it zero.
2754 Used for debugging signals. */
2755 if (!signal_pass_state (tp->suspend.stop_signal))
2756 tp->suspend.stop_signal = GDB_SIGNAL_0;
2757
46e3ed7f 2758 delete tp->thread_fsm;
243a9253
PA
2759 tp->thread_fsm = NULL;
2760
16c381f0
JK
2761 tp->control.trap_expected = 0;
2762 tp->control.step_range_start = 0;
2763 tp->control.step_range_end = 0;
c1e36e3e 2764 tp->control.may_range_step = 0;
16c381f0
JK
2765 tp->control.step_frame_id = null_frame_id;
2766 tp->control.step_stack_frame_id = null_frame_id;
2767 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
885eeb5b 2768 tp->control.step_start_function = NULL;
a7212384 2769 tp->stop_requested = 0;
4e1c45ea 2770
16c381f0 2771 tp->control.stop_step = 0;
32400beb 2772
16c381f0 2773 tp->control.proceed_to_finish = 0;
414c69f7 2774
856e7dd6 2775 tp->control.stepping_command = 0;
17b2616c 2776
a7212384 2777 /* Discard any remaining commands or status from previous stop. */
16c381f0 2778 bpstat_clear (&tp->control.stop_bpstat);
a7212384 2779}
32400beb 2780
a7212384 2781void
70509625 2782clear_proceed_status (int step)
a7212384 2783{
f2665db5
MM
2784 /* With scheduler-locking replay, stop replaying other threads if we're
2785 not replaying the user-visible resume ptid.
2786
2787 This is a convenience feature to not require the user to explicitly
2788 stop replaying the other threads. We're assuming that the user's
2789 intent is to resume tracing the recorded process. */
2790 if (!non_stop && scheduler_mode == schedlock_replay
2791 && target_record_is_replaying (minus_one_ptid)
2792 && !target_record_will_replay (user_visible_resume_ptid (step),
2793 execution_direction))
2794 target_record_stop_replaying ();
2795
08036331 2796 if (!non_stop && inferior_ptid != null_ptid)
6c95b8df 2797 {
08036331 2798 ptid_t resume_ptid = user_visible_resume_ptid (step);
5b6d1e4f
PA
2799 process_stratum_target *resume_target
2800 = user_visible_resume_target (resume_ptid);
70509625
PA
2801
2802 /* In all-stop mode, delete the per-thread status of all threads
2803 we're about to resume, implicitly and explicitly. */
5b6d1e4f 2804 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
08036331 2805 clear_proceed_status_thread (tp);
6c95b8df
PA
2806 }
2807
d7e15655 2808 if (inferior_ptid != null_ptid)
a7212384
UW
2809 {
2810 struct inferior *inferior;
2811
2812 if (non_stop)
2813 {
6c95b8df
PA
2814 /* If in non-stop mode, only delete the per-thread status of
2815 the current thread. */
a7212384
UW
2816 clear_proceed_status_thread (inferior_thread ());
2817 }
6c95b8df 2818
d6b48e9c 2819 inferior = current_inferior ();
16c381f0 2820 inferior->control.stop_soon = NO_STOP_QUIETLY;
4e1c45ea
PA
2821 }
2822
76727919 2823 gdb::observers::about_to_proceed.notify ();
c906108c
SS
2824}
2825
99619bea
PA
2826/* Returns true if TP is still stopped at a breakpoint that needs
2827 stepping-over in order to make progress. If the breakpoint is gone
2828 meanwhile, we can skip the whole step-over dance. */
ea67f13b
DJ
2829
2830static int
6c4cfb24 2831thread_still_needs_step_over_bp (struct thread_info *tp)
99619bea
PA
2832{
2833 if (tp->stepping_over_breakpoint)
2834 {
00431a78 2835 struct regcache *regcache = get_thread_regcache (tp);
99619bea 2836
a01bda52 2837 if (breakpoint_here_p (regcache->aspace (),
af48d08f
PA
2838 regcache_read_pc (regcache))
2839 == ordinary_breakpoint_here)
99619bea
PA
2840 return 1;
2841
2842 tp->stepping_over_breakpoint = 0;
2843 }
2844
2845 return 0;
2846}
2847
6c4cfb24
PA
2848/* Check whether thread TP still needs to start a step-over in order
2849 to make progress when resumed. Returns an bitwise or of enum
2850 step_over_what bits, indicating what needs to be stepped over. */
2851
8d297bbf 2852static step_over_what
6c4cfb24
PA
2853thread_still_needs_step_over (struct thread_info *tp)
2854{
8d297bbf 2855 step_over_what what = 0;
6c4cfb24
PA
2856
2857 if (thread_still_needs_step_over_bp (tp))
2858 what |= STEP_OVER_BREAKPOINT;
2859
2860 if (tp->stepping_over_watchpoint
2861 && !target_have_steppable_watchpoint)
2862 what |= STEP_OVER_WATCHPOINT;
2863
2864 return what;
2865}
2866
483805cf
PA
2867/* Returns true if scheduler locking applies. STEP indicates whether
2868 we're about to do a step/next-like command to a thread. */
2869
2870static int
856e7dd6 2871schedlock_applies (struct thread_info *tp)
483805cf
PA
2872{
2873 return (scheduler_mode == schedlock_on
2874 || (scheduler_mode == schedlock_step
f2665db5
MM
2875 && tp->control.stepping_command)
2876 || (scheduler_mode == schedlock_replay
2877 && target_record_will_replay (minus_one_ptid,
2878 execution_direction)));
483805cf
PA
2879}
2880
5b6d1e4f
PA
2881/* Calls target_commit_resume on all targets. */
2882
2883static void
2884commit_resume_all_targets ()
2885{
2886 scoped_restore_current_thread restore_thread;
2887
2888 /* Map between process_target and a representative inferior. This
2889 is to avoid committing a resume in the same target more than
2890 once. Resumptions must be idempotent, so this is an
2891 optimization. */
2892 std::unordered_map<process_stratum_target *, inferior *> conn_inf;
2893
2894 for (inferior *inf : all_non_exited_inferiors ())
2895 if (inf->has_execution ())
2896 conn_inf[inf->process_target ()] = inf;
2897
2898 for (const auto &ci : conn_inf)
2899 {
2900 inferior *inf = ci.second;
2901 switch_to_inferior_no_thread (inf);
2902 target_commit_resume ();
2903 }
2904}
2905
2f4fcf00
PA
2906/* Check that all the targets we're about to resume are in non-stop
2907 mode. Ideally, we'd only care whether all targets support
2908 target-async, but we're not there yet. E.g., stop_all_threads
2909 doesn't know how to handle all-stop targets. Also, the remote
2910 protocol in all-stop mode is synchronous, irrespective of
2911 target-async, which means that things like a breakpoint re-set
2912 triggered by one target would try to read memory from all targets
2913 and fail. */
2914
2915static void
2916check_multi_target_resumption (process_stratum_target *resume_target)
2917{
2918 if (!non_stop && resume_target == nullptr)
2919 {
2920 scoped_restore_current_thread restore_thread;
2921
2922 /* This is used to track whether we're resuming more than one
2923 target. */
2924 process_stratum_target *first_connection = nullptr;
2925
2926 /* The first inferior we see with a target that does not work in
2927 always-non-stop mode. */
2928 inferior *first_not_non_stop = nullptr;
2929
2930 for (inferior *inf : all_non_exited_inferiors (resume_target))
2931 {
2932 switch_to_inferior_no_thread (inf);
2933
2934 if (!target_has_execution)
2935 continue;
2936
2937 process_stratum_target *proc_target
2938 = current_inferior ()->process_target();
2939
2940 if (!target_is_non_stop_p ())
2941 first_not_non_stop = inf;
2942
2943 if (first_connection == nullptr)
2944 first_connection = proc_target;
2945 else if (first_connection != proc_target
2946 && first_not_non_stop != nullptr)
2947 {
2948 switch_to_inferior_no_thread (first_not_non_stop);
2949
2950 proc_target = current_inferior ()->process_target();
2951
2952 error (_("Connection %d (%s) does not support "
2953 "multi-target resumption."),
2954 proc_target->connection_number,
2955 make_target_connection_string (proc_target).c_str ());
2956 }
2957 }
2958 }
2959}
2960
c906108c
SS
2961/* Basic routine for continuing the program in various fashions.
2962
2963 ADDR is the address to resume at, or -1 for resume where stopped.
aff4e175
AB
2964 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
2965 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
c906108c
SS
2966
2967 You should call clear_proceed_status before calling proceed. */
2968
2969void
64ce06e4 2970proceed (CORE_ADDR addr, enum gdb_signal siggnal)
c906108c 2971{
e58b0e63
PA
2972 struct regcache *regcache;
2973 struct gdbarch *gdbarch;
e58b0e63 2974 CORE_ADDR pc;
4d9d9d04
PA
2975 struct execution_control_state ecss;
2976 struct execution_control_state *ecs = &ecss;
4d9d9d04 2977 int started;
c906108c 2978
e58b0e63
PA
2979 /* If we're stopped at a fork/vfork, follow the branch set by the
2980 "set follow-fork-mode" command; otherwise, we'll just proceed
2981 resuming the current thread. */
2982 if (!follow_fork ())
2983 {
2984 /* The target for some reason decided not to resume. */
2985 normal_stop ();
f148b27e
PA
2986 if (target_can_async_p ())
2987 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
e58b0e63
PA
2988 return;
2989 }
2990
842951eb
PA
2991 /* We'll update this if & when we switch to a new thread. */
2992 previous_inferior_ptid = inferior_ptid;
2993
e58b0e63 2994 regcache = get_current_regcache ();
ac7936df 2995 gdbarch = regcache->arch ();
8b86c959
YQ
2996 const address_space *aspace = regcache->aspace ();
2997
e58b0e63 2998 pc = regcache_read_pc (regcache);
08036331 2999 thread_info *cur_thr = inferior_thread ();
e58b0e63 3000
99619bea 3001 /* Fill in with reasonable starting values. */
08036331 3002 init_thread_stepping_state (cur_thr);
99619bea 3003
08036331 3004 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
c2829269 3005
5b6d1e4f
PA
3006 ptid_t resume_ptid
3007 = user_visible_resume_ptid (cur_thr->control.stepping_command);
3008 process_stratum_target *resume_target
3009 = user_visible_resume_target (resume_ptid);
3010
2f4fcf00
PA
3011 check_multi_target_resumption (resume_target);
3012
2acceee2 3013 if (addr == (CORE_ADDR) -1)
c906108c 3014 {
08036331 3015 if (pc == cur_thr->suspend.stop_pc
af48d08f 3016 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
b2175913 3017 && execution_direction != EXEC_REVERSE)
3352ef37
AC
3018 /* There is a breakpoint at the address we will resume at,
3019 step one instruction before inserting breakpoints so that
3020 we do not stop right away (and report a second hit at this
b2175913
MS
3021 breakpoint).
3022
3023 Note, we don't do this in reverse, because we won't
3024 actually be executing the breakpoint insn anyway.
3025 We'll be (un-)executing the previous instruction. */
08036331 3026 cur_thr->stepping_over_breakpoint = 1;
515630c5
UW
3027 else if (gdbarch_single_step_through_delay_p (gdbarch)
3028 && gdbarch_single_step_through_delay (gdbarch,
3029 get_current_frame ()))
3352ef37
AC
3030 /* We stepped onto an instruction that needs to be stepped
3031 again before re-inserting the breakpoint, do so. */
08036331 3032 cur_thr->stepping_over_breakpoint = 1;
c906108c
SS
3033 }
3034 else
3035 {
515630c5 3036 regcache_write_pc (regcache, addr);
c906108c
SS
3037 }
3038
70509625 3039 if (siggnal != GDB_SIGNAL_DEFAULT)
08036331 3040 cur_thr->suspend.stop_signal = siggnal;
70509625 3041
4d9d9d04
PA
3042 /* If an exception is thrown from this point on, make sure to
3043 propagate GDB's knowledge of the executing state to the
3044 frontend/user running state. */
5b6d1e4f 3045 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
4d9d9d04
PA
3046
3047 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3048 threads (e.g., we might need to set threads stepping over
3049 breakpoints first), from the user/frontend's point of view, all
3050 threads in RESUME_PTID are now running. Unless we're calling an
3051 inferior function, as in that case we pretend the inferior
3052 doesn't run at all. */
08036331 3053 if (!cur_thr->control.in_infcall)
719546c4 3054 set_running (resume_target, resume_ptid, true);
17b2616c 3055
527159b7 3056 if (debug_infrun)
8a9de0e4 3057 fprintf_unfiltered (gdb_stdlog,
64ce06e4 3058 "infrun: proceed (addr=%s, signal=%s)\n",
c9737c08 3059 paddress (gdbarch, addr),
64ce06e4 3060 gdb_signal_to_symbol_string (siggnal));
527159b7 3061
4d9d9d04
PA
3062 annotate_starting ();
3063
3064 /* Make sure that output from GDB appears before output from the
3065 inferior. */
3066 gdb_flush (gdb_stdout);
3067
d930703d
PA
3068 /* Since we've marked the inferior running, give it the terminal. A
3069 QUIT/Ctrl-C from here on is forwarded to the target (which can
3070 still detect attempts to unblock a stuck connection with repeated
3071 Ctrl-C from within target_pass_ctrlc). */
3072 target_terminal::inferior ();
3073
4d9d9d04
PA
3074 /* In a multi-threaded task we may select another thread and
3075 then continue or step.
3076
3077 But if a thread that we're resuming had stopped at a breakpoint,
3078 it will immediately cause another breakpoint stop without any
3079 execution (i.e. it will report a breakpoint hit incorrectly). So
3080 we must step over it first.
3081
3082 Look for threads other than the current (TP) that reported a
3083 breakpoint hit and haven't been resumed yet since. */
3084
3085 /* If scheduler locking applies, we can avoid iterating over all
3086 threads. */
08036331 3087 if (!non_stop && !schedlock_applies (cur_thr))
94cc34af 3088 {
5b6d1e4f
PA
3089 for (thread_info *tp : all_non_exited_threads (resume_target,
3090 resume_ptid))
08036331 3091 {
f3f8ece4
PA
3092 switch_to_thread_no_regs (tp);
3093
4d9d9d04
PA
3094 /* Ignore the current thread here. It's handled
3095 afterwards. */
08036331 3096 if (tp == cur_thr)
4d9d9d04 3097 continue;
c906108c 3098
4d9d9d04
PA
3099 if (!thread_still_needs_step_over (tp))
3100 continue;
3101
3102 gdb_assert (!thread_is_in_step_over_chain (tp));
c906108c 3103
99619bea
PA
3104 if (debug_infrun)
3105 fprintf_unfiltered (gdb_stdlog,
3106 "infrun: need to step-over [%s] first\n",
a068643d 3107 target_pid_to_str (tp->ptid).c_str ());
99619bea 3108
4d9d9d04 3109 thread_step_over_chain_enqueue (tp);
2adfaa28 3110 }
f3f8ece4
PA
3111
3112 switch_to_thread (cur_thr);
30852783
UW
3113 }
3114
4d9d9d04
PA
3115 /* Enqueue the current thread last, so that we move all other
3116 threads over their breakpoints first. */
08036331
PA
3117 if (cur_thr->stepping_over_breakpoint)
3118 thread_step_over_chain_enqueue (cur_thr);
30852783 3119
4d9d9d04
PA
3120 /* If the thread isn't started, we'll still need to set its prev_pc,
3121 so that switch_back_to_stepped_thread knows the thread hasn't
3122 advanced. Must do this before resuming any thread, as in
3123 all-stop/remote, once we resume we can't send any other packet
3124 until the target stops again. */
08036331 3125 cur_thr->prev_pc = regcache_read_pc (regcache);
99619bea 3126
a9bc57b9
TT
3127 {
3128 scoped_restore save_defer_tc = make_scoped_defer_target_commit_resume ();
85ad3aaf 3129
a9bc57b9 3130 started = start_step_over ();
c906108c 3131
a9bc57b9
TT
3132 if (step_over_info_valid_p ())
3133 {
3134 /* Either this thread started a new in-line step over, or some
3135 other thread was already doing one. In either case, don't
3136 resume anything else until the step-over is finished. */
3137 }
3138 else if (started && !target_is_non_stop_p ())
3139 {
3140 /* A new displaced stepping sequence was started. In all-stop,
3141 we can't talk to the target anymore until it next stops. */
3142 }
3143 else if (!non_stop && target_is_non_stop_p ())
3144 {
3145 /* In all-stop, but the target is always in non-stop mode.
3146 Start all other threads that are implicitly resumed too. */
5b6d1e4f
PA
3147 for (thread_info *tp : all_non_exited_threads (resume_target,
3148 resume_ptid))
3149 {
3150 switch_to_thread_no_regs (tp);
3151
f9fac3c8
SM
3152 if (!tp->inf->has_execution ())
3153 {
3154 if (debug_infrun)
3155 fprintf_unfiltered (gdb_stdlog,
3156 "infrun: proceed: [%s] target has "
3157 "no execution\n",
3158 target_pid_to_str (tp->ptid).c_str ());
3159 continue;
3160 }
f3f8ece4 3161
f9fac3c8
SM
3162 if (tp->resumed)
3163 {
3164 if (debug_infrun)
3165 fprintf_unfiltered (gdb_stdlog,
3166 "infrun: proceed: [%s] resumed\n",
3167 target_pid_to_str (tp->ptid).c_str ());
3168 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
3169 continue;
3170 }
fbea99ea 3171
f9fac3c8
SM
3172 if (thread_is_in_step_over_chain (tp))
3173 {
3174 if (debug_infrun)
3175 fprintf_unfiltered (gdb_stdlog,
3176 "infrun: proceed: [%s] needs step-over\n",
3177 target_pid_to_str (tp->ptid).c_str ());
3178 continue;
3179 }
fbea99ea 3180
f9fac3c8
SM
3181 if (debug_infrun)
3182 fprintf_unfiltered (gdb_stdlog,
3183 "infrun: proceed: resuming %s\n",
3184 target_pid_to_str (tp->ptid).c_str ());
fbea99ea 3185
f9fac3c8
SM
3186 reset_ecs (ecs, tp);
3187 switch_to_thread (tp);
3188 keep_going_pass_signal (ecs);
3189 if (!ecs->wait_some_more)
3190 error (_("Command aborted."));
3191 }
a9bc57b9 3192 }
08036331 3193 else if (!cur_thr->resumed && !thread_is_in_step_over_chain (cur_thr))
a9bc57b9
TT
3194 {
3195 /* The thread wasn't started, and isn't queued, run it now. */
08036331
PA
3196 reset_ecs (ecs, cur_thr);
3197 switch_to_thread (cur_thr);
a9bc57b9
TT
3198 keep_going_pass_signal (ecs);
3199 if (!ecs->wait_some_more)
3200 error (_("Command aborted."));
3201 }
3202 }
c906108c 3203
5b6d1e4f 3204 commit_resume_all_targets ();
85ad3aaf 3205
731f534f 3206 finish_state.release ();
c906108c 3207
873657b9
PA
3208 /* If we've switched threads above, switch back to the previously
3209 current thread. We don't want the user to see a different
3210 selected thread. */
3211 switch_to_thread (cur_thr);
3212
0b333c5e
PA
3213 /* Tell the event loop to wait for it to stop. If the target
3214 supports asynchronous execution, it'll do this from within
3215 target_resume. */
362646f5 3216 if (!target_can_async_p ())
0b333c5e 3217 mark_async_event_handler (infrun_async_inferior_event_token);
c906108c 3218}
c906108c
SS
3219\f
3220
3221/* Start remote-debugging of a machine over a serial link. */
96baa820 3222
c906108c 3223void
8621d6a9 3224start_remote (int from_tty)
c906108c 3225{
5b6d1e4f
PA
3226 inferior *inf = current_inferior ();
3227 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
43ff13b4 3228
1777feb0 3229 /* Always go on waiting for the target, regardless of the mode. */
6426a772 3230 /* FIXME: cagney/1999-09-23: At present it isn't possible to
7e73cedf 3231 indicate to wait_for_inferior that a target should timeout if
6426a772
JM
3232 nothing is returned (instead of just blocking). Because of this,
3233 targets expecting an immediate response need to, internally, set
3234 things up so that the target_wait() is forced to eventually
1777feb0 3235 timeout. */
6426a772
JM
3236 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3237 differentiate to its caller what the state of the target is after
3238 the initial open has been performed. Here we're assuming that
3239 the target has stopped. It should be possible to eventually have
3240 target_open() return to the caller an indication that the target
3241 is currently running and GDB state should be set to the same as
1777feb0 3242 for an async run. */
5b6d1e4f 3243 wait_for_inferior (inf);
8621d6a9
DJ
3244
3245 /* Now that the inferior has stopped, do any bookkeeping like
3246 loading shared libraries. We want to do this before normal_stop,
3247 so that the displayed frame is up to date. */
8b88a78e 3248 post_create_inferior (current_top_target (), from_tty);
8621d6a9 3249
6426a772 3250 normal_stop ();
c906108c
SS
3251}
3252
3253/* Initialize static vars when a new inferior begins. */
3254
3255void
96baa820 3256init_wait_for_inferior (void)
c906108c
SS
3257{
3258 /* These are meaningless until the first time through wait_for_inferior. */
c906108c 3259
c906108c
SS
3260 breakpoint_init_inferior (inf_starting);
3261
70509625 3262 clear_proceed_status (0);
9f976b41 3263
ab1ddbcf 3264 nullify_last_target_wait_ptid ();
237fc4c9 3265
842951eb 3266 previous_inferior_ptid = inferior_ptid;
c906108c 3267}
237fc4c9 3268
c906108c 3269\f
488f131b 3270
ec9499be 3271static void handle_inferior_event (struct execution_control_state *ecs);
cd0fc7c3 3272
568d6575
UW
3273static void handle_step_into_function (struct gdbarch *gdbarch,
3274 struct execution_control_state *ecs);
3275static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3276 struct execution_control_state *ecs);
4f5d7f63 3277static void handle_signal_stop (struct execution_control_state *ecs);
186c406b 3278static void check_exception_resume (struct execution_control_state *,
28106bc2 3279 struct frame_info *);
611c83ae 3280
bdc36728 3281static void end_stepping_range (struct execution_control_state *ecs);
22bcd14b 3282static void stop_waiting (struct execution_control_state *ecs);
d4f3574e 3283static void keep_going (struct execution_control_state *ecs);
94c57d6a 3284static void process_event_stop_test (struct execution_control_state *ecs);
c447ac0b 3285static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
104c1213 3286
252fbfc8
PA
3287/* This function is attached as a "thread_stop_requested" observer.
3288 Cleanup local state that assumed the PTID was to be resumed, and
3289 report the stop to the frontend. */
3290
2c0b251b 3291static void
252fbfc8
PA
3292infrun_thread_stop_requested (ptid_t ptid)
3293{
5b6d1e4f
PA
3294 process_stratum_target *curr_target = current_inferior ()->process_target ();
3295
c65d6b55
PA
3296 /* PTID was requested to stop. If the thread was already stopped,
3297 but the user/frontend doesn't know about that yet (e.g., the
3298 thread had been temporarily paused for some step-over), set up
3299 for reporting the stop now. */
5b6d1e4f 3300 for (thread_info *tp : all_threads (curr_target, ptid))
08036331
PA
3301 {
3302 if (tp->state != THREAD_RUNNING)
3303 continue;
3304 if (tp->executing)
3305 continue;
c65d6b55 3306
08036331
PA
3307 /* Remove matching threads from the step-over queue, so
3308 start_step_over doesn't try to resume them
3309 automatically. */
3310 if (thread_is_in_step_over_chain (tp))
3311 thread_step_over_chain_remove (tp);
c65d6b55 3312
08036331
PA
3313 /* If the thread is stopped, but the user/frontend doesn't
3314 know about that yet, queue a pending event, as if the
3315 thread had just stopped now. Unless the thread already had
3316 a pending event. */
3317 if (!tp->suspend.waitstatus_pending_p)
3318 {
3319 tp->suspend.waitstatus_pending_p = 1;
3320 tp->suspend.waitstatus.kind = TARGET_WAITKIND_STOPPED;
3321 tp->suspend.waitstatus.value.sig = GDB_SIGNAL_0;
3322 }
c65d6b55 3323
08036331
PA
3324 /* Clear the inline-frame state, since we're re-processing the
3325 stop. */
5b6d1e4f 3326 clear_inline_frame_state (tp);
c65d6b55 3327
08036331
PA
3328 /* If this thread was paused because some other thread was
3329 doing an inline-step over, let that finish first. Once
3330 that happens, we'll restart all threads and consume pending
3331 stop events then. */
3332 if (step_over_info_valid_p ())
3333 continue;
3334
3335 /* Otherwise we can process the (new) pending event now. Set
3336 it so this pending event is considered by
3337 do_target_wait. */
719546c4 3338 tp->resumed = true;
08036331 3339 }
252fbfc8
PA
3340}
3341
a07daef3
PA
3342static void
3343infrun_thread_thread_exit (struct thread_info *tp, int silent)
3344{
5b6d1e4f
PA
3345 if (target_last_proc_target == tp->inf->process_target ()
3346 && target_last_wait_ptid == tp->ptid)
a07daef3
PA
3347 nullify_last_target_wait_ptid ();
3348}
3349
0cbcdb96
PA
3350/* Delete the step resume, single-step and longjmp/exception resume
3351 breakpoints of TP. */
4e1c45ea 3352
0cbcdb96
PA
3353static void
3354delete_thread_infrun_breakpoints (struct thread_info *tp)
4e1c45ea 3355{
0cbcdb96
PA
3356 delete_step_resume_breakpoint (tp);
3357 delete_exception_resume_breakpoint (tp);
34b7e8a6 3358 delete_single_step_breakpoints (tp);
4e1c45ea
PA
3359}
3360
0cbcdb96
PA
3361/* If the target still has execution, call FUNC for each thread that
3362 just stopped. In all-stop, that's all the non-exited threads; in
3363 non-stop, that's the current thread, only. */
3364
3365typedef void (*for_each_just_stopped_thread_callback_func)
3366 (struct thread_info *tp);
4e1c45ea
PA
3367
3368static void
0cbcdb96 3369for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
4e1c45ea 3370{
d7e15655 3371 if (!target_has_execution || inferior_ptid == null_ptid)
4e1c45ea
PA
3372 return;
3373
fbea99ea 3374 if (target_is_non_stop_p ())
4e1c45ea 3375 {
0cbcdb96
PA
3376 /* If in non-stop mode, only the current thread stopped. */
3377 func (inferior_thread ());
4e1c45ea
PA
3378 }
3379 else
0cbcdb96 3380 {
0cbcdb96 3381 /* In all-stop mode, all threads have stopped. */
08036331
PA
3382 for (thread_info *tp : all_non_exited_threads ())
3383 func (tp);
0cbcdb96
PA
3384 }
3385}
3386
3387/* Delete the step resume and longjmp/exception resume breakpoints of
3388 the threads that just stopped. */
3389
3390static void
3391delete_just_stopped_threads_infrun_breakpoints (void)
3392{
3393 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
34b7e8a6
PA
3394}
3395
3396/* Delete the single-step breakpoints of the threads that just
3397 stopped. */
7c16b83e 3398
34b7e8a6
PA
3399static void
3400delete_just_stopped_threads_single_step_breakpoints (void)
3401{
3402 for_each_just_stopped_thread (delete_single_step_breakpoints);
4e1c45ea
PA
3403}
3404
221e1a37 3405/* See infrun.h. */
223698f8 3406
221e1a37 3407void
223698f8
DE
3408print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3409 const struct target_waitstatus *ws)
3410{
23fdd69e 3411 std::string status_string = target_waitstatus_to_string (ws);
d7e74731 3412 string_file stb;
223698f8
DE
3413
3414 /* The text is split over several lines because it was getting too long.
3415 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3416 output as a unit; we want only one timestamp printed if debug_timestamp
3417 is set. */
3418
d7e74731 3419 stb.printf ("infrun: target_wait (%d.%ld.%ld",
e99b03dc 3420 waiton_ptid.pid (),
e38504b3 3421 waiton_ptid.lwp (),
cc6bcb54 3422 waiton_ptid.tid ());
e99b03dc 3423 if (waiton_ptid.pid () != -1)
a068643d 3424 stb.printf (" [%s]", target_pid_to_str (waiton_ptid).c_str ());
d7e74731
PA
3425 stb.printf (", status) =\n");
3426 stb.printf ("infrun: %d.%ld.%ld [%s],\n",
e99b03dc 3427 result_ptid.pid (),
e38504b3 3428 result_ptid.lwp (),
cc6bcb54 3429 result_ptid.tid (),
a068643d 3430 target_pid_to_str (result_ptid).c_str ());
23fdd69e 3431 stb.printf ("infrun: %s\n", status_string.c_str ());
223698f8
DE
3432
3433 /* This uses %s in part to handle %'s in the text, but also to avoid
3434 a gcc error: the format attribute requires a string literal. */
d7e74731 3435 fprintf_unfiltered (gdb_stdlog, "%s", stb.c_str ());
223698f8
DE
3436}
3437
372316f1
PA
3438/* Select a thread at random, out of those which are resumed and have
3439 had events. */
3440
3441static struct thread_info *
5b6d1e4f 3442random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
372316f1 3443{
372316f1 3444 int num_events = 0;
08036331 3445
5b6d1e4f 3446 auto has_event = [&] (thread_info *tp)
08036331 3447 {
5b6d1e4f
PA
3448 return (tp->ptid.matches (waiton_ptid)
3449 && tp->resumed
08036331
PA
3450 && tp->suspend.waitstatus_pending_p);
3451 };
372316f1
PA
3452
3453 /* First see how many events we have. Count only resumed threads
3454 that have an event pending. */
5b6d1e4f 3455 for (thread_info *tp : inf->non_exited_threads ())
08036331 3456 if (has_event (tp))
372316f1
PA
3457 num_events++;
3458
3459 if (num_events == 0)
3460 return NULL;
3461
3462 /* Now randomly pick a thread out of those that have had events. */
08036331
PA
3463 int random_selector = (int) ((num_events * (double) rand ())
3464 / (RAND_MAX + 1.0));
372316f1
PA
3465
3466 if (debug_infrun && num_events > 1)
3467 fprintf_unfiltered (gdb_stdlog,
3468 "infrun: Found %d events, selecting #%d\n",
3469 num_events, random_selector);
3470
3471 /* Select the Nth thread that has had an event. */
5b6d1e4f 3472 for (thread_info *tp : inf->non_exited_threads ())
08036331 3473 if (has_event (tp))
372316f1 3474 if (random_selector-- == 0)
08036331 3475 return tp;
372316f1 3476
08036331 3477 gdb_assert_not_reached ("event thread not found");
372316f1
PA
3478}
3479
3480/* Wrapper for target_wait that first checks whether threads have
3481 pending statuses to report before actually asking the target for
5b6d1e4f
PA
3482 more events. INF is the inferior we're using to call target_wait
3483 on. */
372316f1
PA
3484
3485static ptid_t
5b6d1e4f
PA
3486do_target_wait_1 (inferior *inf, ptid_t ptid,
3487 target_waitstatus *status, int options)
372316f1
PA
3488{
3489 ptid_t event_ptid;
3490 struct thread_info *tp;
3491
24ed6739
AB
3492 /* We know that we are looking for an event in the target of inferior
3493 INF, but we don't know which thread the event might come from. As
3494 such we want to make sure that INFERIOR_PTID is reset so that none of
3495 the wait code relies on it - doing so is always a mistake. */
3496 switch_to_inferior_no_thread (inf);
3497
372316f1
PA
3498 /* First check if there is a resumed thread with a wait status
3499 pending. */
d7e15655 3500 if (ptid == minus_one_ptid || ptid.is_pid ())
372316f1 3501 {
5b6d1e4f 3502 tp = random_pending_event_thread (inf, ptid);
372316f1
PA
3503 }
3504 else
3505 {
3506 if (debug_infrun)
3507 fprintf_unfiltered (gdb_stdlog,
3508 "infrun: Waiting for specific thread %s.\n",
a068643d 3509 target_pid_to_str (ptid).c_str ());
372316f1
PA
3510
3511 /* We have a specific thread to check. */
5b6d1e4f 3512 tp = find_thread_ptid (inf, ptid);
372316f1
PA
3513 gdb_assert (tp != NULL);
3514 if (!tp->suspend.waitstatus_pending_p)
3515 tp = NULL;
3516 }
3517
3518 if (tp != NULL
3519 && (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3520 || tp->suspend.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
3521 {
00431a78 3522 struct regcache *regcache = get_thread_regcache (tp);
ac7936df 3523 struct gdbarch *gdbarch = regcache->arch ();
372316f1
PA
3524 CORE_ADDR pc;
3525 int discard = 0;
3526
3527 pc = regcache_read_pc (regcache);
3528
3529 if (pc != tp->suspend.stop_pc)
3530 {
3531 if (debug_infrun)
3532 fprintf_unfiltered (gdb_stdlog,
3533 "infrun: PC of %s changed. was=%s, now=%s\n",
a068643d 3534 target_pid_to_str (tp->ptid).c_str (),
defd2172 3535 paddress (gdbarch, tp->suspend.stop_pc),
372316f1
PA
3536 paddress (gdbarch, pc));
3537 discard = 1;
3538 }
a01bda52 3539 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
372316f1
PA
3540 {
3541 if (debug_infrun)
3542 fprintf_unfiltered (gdb_stdlog,
3543 "infrun: previous breakpoint of %s, at %s gone\n",
a068643d 3544 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
3545 paddress (gdbarch, pc));
3546
3547 discard = 1;
3548 }
3549
3550 if (discard)
3551 {
3552 if (debug_infrun)
3553 fprintf_unfiltered (gdb_stdlog,
3554 "infrun: pending event of %s cancelled.\n",
a068643d 3555 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3556
3557 tp->suspend.waitstatus.kind = TARGET_WAITKIND_SPURIOUS;
3558 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3559 }
3560 }
3561
3562 if (tp != NULL)
3563 {
3564 if (debug_infrun)
3565 {
23fdd69e
SM
3566 std::string statstr
3567 = target_waitstatus_to_string (&tp->suspend.waitstatus);
372316f1 3568
372316f1
PA
3569 fprintf_unfiltered (gdb_stdlog,
3570 "infrun: Using pending wait status %s for %s.\n",
23fdd69e 3571 statstr.c_str (),
a068643d 3572 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3573 }
3574
3575 /* Now that we've selected our final event LWP, un-adjust its PC
3576 if it was a software breakpoint (and the target doesn't
3577 always adjust the PC itself). */
3578 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3579 && !target_supports_stopped_by_sw_breakpoint ())
3580 {
3581 struct regcache *regcache;
3582 struct gdbarch *gdbarch;
3583 int decr_pc;
3584
00431a78 3585 regcache = get_thread_regcache (tp);
ac7936df 3586 gdbarch = regcache->arch ();
372316f1
PA
3587
3588 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3589 if (decr_pc != 0)
3590 {
3591 CORE_ADDR pc;
3592
3593 pc = regcache_read_pc (regcache);
3594 regcache_write_pc (regcache, pc + decr_pc);
3595 }
3596 }
3597
3598 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3599 *status = tp->suspend.waitstatus;
3600 tp->suspend.waitstatus_pending_p = 0;
3601
3602 /* Wake up the event loop again, until all pending events are
3603 processed. */
3604 if (target_is_async_p ())
3605 mark_async_event_handler (infrun_async_inferior_event_token);
3606 return tp->ptid;
3607 }
3608
3609 /* But if we don't find one, we'll have to wait. */
3610
3611 if (deprecated_target_wait_hook)
3612 event_ptid = deprecated_target_wait_hook (ptid, status, options);
3613 else
3614 event_ptid = target_wait (ptid, status, options);
3615
3616 return event_ptid;
3617}
3618
5b6d1e4f
PA
3619/* Returns true if INF has any resumed thread with a status
3620 pending. */
3621
3622static bool
3623threads_are_resumed_pending_p (inferior *inf)
3624{
3625 for (thread_info *tp : inf->non_exited_threads ())
3626 if (tp->resumed
3627 && tp->suspend.waitstatus_pending_p)
3628 return true;
3629
3630 return false;
3631}
3632
3633/* Wrapper for target_wait that first checks whether threads have
3634 pending statuses to report before actually asking the target for
3635 more events. Polls for events from all inferiors/targets. */
3636
3637static bool
3638do_target_wait (ptid_t wait_ptid, execution_control_state *ecs, int options)
3639{
3640 int num_inferiors = 0;
3641 int random_selector;
3642
3643 /* For fairness, we pick the first inferior/target to poll at
3644 random, and then continue polling the rest of the inferior list
3645 starting from that one in a circular fashion until the whole list
3646 is polled once. */
3647
3648 auto inferior_matches = [&wait_ptid] (inferior *inf)
3649 {
3650 return (inf->process_target () != NULL
3651 && (threads_are_executing (inf->process_target ())
3652 || threads_are_resumed_pending_p (inf))
3653 && ptid_t (inf->pid).matches (wait_ptid));
3654 };
3655
3656 /* First see how many resumed inferiors we have. */
3657 for (inferior *inf : all_inferiors ())
3658 if (inferior_matches (inf))
3659 num_inferiors++;
3660
3661 if (num_inferiors == 0)
3662 {
3663 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3664 return false;
3665 }
3666
3667 /* Now randomly pick an inferior out of those that were resumed. */
3668 random_selector = (int)
3669 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
3670
3671 if (debug_infrun && num_inferiors > 1)
3672 fprintf_unfiltered (gdb_stdlog,
3673 "infrun: Found %d inferiors, starting at #%d\n",
3674 num_inferiors, random_selector);
3675
3676 /* Select the Nth inferior that was resumed. */
3677
3678 inferior *selected = nullptr;
3679
3680 for (inferior *inf : all_inferiors ())
3681 if (inferior_matches (inf))
3682 if (random_selector-- == 0)
3683 {
3684 selected = inf;
3685 break;
3686 }
3687
3688 /* Now poll for events out of each of the resumed inferior's
3689 targets, starting from the selected one. */
3690
3691 auto do_wait = [&] (inferior *inf)
3692 {
5b6d1e4f
PA
3693 ecs->ptid = do_target_wait_1 (inf, wait_ptid, &ecs->ws, options);
3694 ecs->target = inf->process_target ();
3695 return (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
3696 };
3697
3698 /* Needed in all-stop+target-non-stop mode, because we end up here
3699 spuriously after the target is all stopped and we've already
3700 reported the stop to the user, polling for events. */
3701 scoped_restore_current_thread restore_thread;
3702
3703 int inf_num = selected->num;
3704 for (inferior *inf = selected; inf != NULL; inf = inf->next)
3705 if (inferior_matches (inf))
3706 if (do_wait (inf))
3707 return true;
3708
3709 for (inferior *inf = inferior_list;
3710 inf != NULL && inf->num < inf_num;
3711 inf = inf->next)
3712 if (inferior_matches (inf))
3713 if (do_wait (inf))
3714 return true;
3715
3716 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3717 return false;
3718}
3719
24291992
PA
3720/* Prepare and stabilize the inferior for detaching it. E.g.,
3721 detaching while a thread is displaced stepping is a recipe for
3722 crashing it, as nothing would readjust the PC out of the scratch
3723 pad. */
3724
3725void
3726prepare_for_detach (void)
3727{
3728 struct inferior *inf = current_inferior ();
f2907e49 3729 ptid_t pid_ptid = ptid_t (inf->pid);
24291992 3730
00431a78 3731 displaced_step_inferior_state *displaced = get_displaced_stepping_state (inf);
24291992
PA
3732
3733 /* Is any thread of this process displaced stepping? If not,
3734 there's nothing else to do. */
d20172fc 3735 if (displaced->step_thread == nullptr)
24291992
PA
3736 return;
3737
3738 if (debug_infrun)
3739 fprintf_unfiltered (gdb_stdlog,
3740 "displaced-stepping in-process while detaching");
3741
9bcb1f16 3742 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
24291992 3743
00431a78 3744 while (displaced->step_thread != nullptr)
24291992 3745 {
24291992
PA
3746 struct execution_control_state ecss;
3747 struct execution_control_state *ecs;
3748
3749 ecs = &ecss;
3750 memset (ecs, 0, sizeof (*ecs));
3751
3752 overlay_cache_invalid = 1;
f15cb84a
YQ
3753 /* Flush target cache before starting to handle each event.
3754 Target was running and cache could be stale. This is just a
3755 heuristic. Running threads may modify target memory, but we
3756 don't get any event. */
3757 target_dcache_invalidate ();
24291992 3758
5b6d1e4f 3759 do_target_wait (pid_ptid, ecs, 0);
24291992
PA
3760
3761 if (debug_infrun)
3762 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3763
3764 /* If an error happens while handling the event, propagate GDB's
3765 knowledge of the executing state to the frontend/user running
3766 state. */
5b6d1e4f
PA
3767 scoped_finish_thread_state finish_state (inf->process_target (),
3768 minus_one_ptid);
24291992
PA
3769
3770 /* Now figure out what to do with the result of the result. */
3771 handle_inferior_event (ecs);
3772
3773 /* No error, don't finish the state yet. */
731f534f 3774 finish_state.release ();
24291992
PA
3775
3776 /* Breakpoints and watchpoints are not installed on the target
3777 at this point, and signals are passed directly to the
3778 inferior, so this must mean the process is gone. */
3779 if (!ecs->wait_some_more)
3780 {
9bcb1f16 3781 restore_detaching.release ();
24291992
PA
3782 error (_("Program exited while detaching"));
3783 }
3784 }
3785
9bcb1f16 3786 restore_detaching.release ();
24291992
PA
3787}
3788
cd0fc7c3 3789/* Wait for control to return from inferior to debugger.
ae123ec6 3790
cd0fc7c3
SS
3791 If inferior gets a signal, we may decide to start it up again
3792 instead of returning. That is why there is a loop in this function.
3793 When this function actually returns it means the inferior
3794 should be left stopped and GDB should read more commands. */
3795
5b6d1e4f
PA
3796static void
3797wait_for_inferior (inferior *inf)
cd0fc7c3 3798{
527159b7 3799 if (debug_infrun)
ae123ec6 3800 fprintf_unfiltered
e4c8541f 3801 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
527159b7 3802
4c41382a 3803 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
cd0fc7c3 3804
e6f5c25b
PA
3805 /* If an error happens while handling the event, propagate GDB's
3806 knowledge of the executing state to the frontend/user running
3807 state. */
5b6d1e4f
PA
3808 scoped_finish_thread_state finish_state
3809 (inf->process_target (), minus_one_ptid);
e6f5c25b 3810
c906108c
SS
3811 while (1)
3812 {
ae25568b
PA
3813 struct execution_control_state ecss;
3814 struct execution_control_state *ecs = &ecss;
29f49a6a 3815
ae25568b
PA
3816 memset (ecs, 0, sizeof (*ecs));
3817
ec9499be 3818 overlay_cache_invalid = 1;
ec9499be 3819
f15cb84a
YQ
3820 /* Flush target cache before starting to handle each event.
3821 Target was running and cache could be stale. This is just a
3822 heuristic. Running threads may modify target memory, but we
3823 don't get any event. */
3824 target_dcache_invalidate ();
3825
5b6d1e4f
PA
3826 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, 0);
3827 ecs->target = inf->process_target ();
c906108c 3828
f00150c9 3829 if (debug_infrun)
5b6d1e4f 3830 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
f00150c9 3831
cd0fc7c3
SS
3832 /* Now figure out what to do with the result of the result. */
3833 handle_inferior_event (ecs);
c906108c 3834
cd0fc7c3
SS
3835 if (!ecs->wait_some_more)
3836 break;
3837 }
4e1c45ea 3838
e6f5c25b 3839 /* No error, don't finish the state yet. */
731f534f 3840 finish_state.release ();
cd0fc7c3 3841}
c906108c 3842
d3d4baed
PA
3843/* Cleanup that reinstalls the readline callback handler, if the
3844 target is running in the background. If while handling the target
3845 event something triggered a secondary prompt, like e.g., a
3846 pagination prompt, we'll have removed the callback handler (see
3847 gdb_readline_wrapper_line). Need to do this as we go back to the
3848 event loop, ready to process further input. Note this has no
3849 effect if the handler hasn't actually been removed, because calling
3850 rl_callback_handler_install resets the line buffer, thus losing
3851 input. */
3852
3853static void
d238133d 3854reinstall_readline_callback_handler_cleanup ()
d3d4baed 3855{
3b12939d
PA
3856 struct ui *ui = current_ui;
3857
3858 if (!ui->async)
6c400b59
PA
3859 {
3860 /* We're not going back to the top level event loop yet. Don't
3861 install the readline callback, as it'd prep the terminal,
3862 readline-style (raw, noecho) (e.g., --batch). We'll install
3863 it the next time the prompt is displayed, when we're ready
3864 for input. */
3865 return;
3866 }
3867
3b12939d 3868 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
d3d4baed
PA
3869 gdb_rl_callback_handler_reinstall ();
3870}
3871
243a9253
PA
3872/* Clean up the FSMs of threads that are now stopped. In non-stop,
3873 that's just the event thread. In all-stop, that's all threads. */
3874
3875static void
3876clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
3877{
08036331
PA
3878 if (ecs->event_thread != NULL
3879 && ecs->event_thread->thread_fsm != NULL)
46e3ed7f 3880 ecs->event_thread->thread_fsm->clean_up (ecs->event_thread);
243a9253
PA
3881
3882 if (!non_stop)
3883 {
08036331 3884 for (thread_info *thr : all_non_exited_threads ())
243a9253
PA
3885 {
3886 if (thr->thread_fsm == NULL)
3887 continue;
3888 if (thr == ecs->event_thread)
3889 continue;
3890
00431a78 3891 switch_to_thread (thr);
46e3ed7f 3892 thr->thread_fsm->clean_up (thr);
243a9253
PA
3893 }
3894
3895 if (ecs->event_thread != NULL)
00431a78 3896 switch_to_thread (ecs->event_thread);
243a9253
PA
3897 }
3898}
3899
3b12939d
PA
3900/* Helper for all_uis_check_sync_execution_done that works on the
3901 current UI. */
3902
3903static void
3904check_curr_ui_sync_execution_done (void)
3905{
3906 struct ui *ui = current_ui;
3907
3908 if (ui->prompt_state == PROMPT_NEEDED
3909 && ui->async
3910 && !gdb_in_secondary_prompt_p (ui))
3911 {
223ffa71 3912 target_terminal::ours ();
76727919 3913 gdb::observers::sync_execution_done.notify ();
3eb7562a 3914 ui_register_input_event_handler (ui);
3b12939d
PA
3915 }
3916}
3917
3918/* See infrun.h. */
3919
3920void
3921all_uis_check_sync_execution_done (void)
3922{
0e454242 3923 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
3924 {
3925 check_curr_ui_sync_execution_done ();
3926 }
3927}
3928
a8836c93
PA
3929/* See infrun.h. */
3930
3931void
3932all_uis_on_sync_execution_starting (void)
3933{
0e454242 3934 SWITCH_THRU_ALL_UIS ()
a8836c93
PA
3935 {
3936 if (current_ui->prompt_state == PROMPT_NEEDED)
3937 async_disable_stdin ();
3938 }
3939}
3940
1777feb0 3941/* Asynchronous version of wait_for_inferior. It is called by the
43ff13b4 3942 event loop whenever a change of state is detected on the file
1777feb0
MS
3943 descriptor corresponding to the target. It can be called more than
3944 once to complete a single execution command. In such cases we need
3945 to keep the state in a global variable ECSS. If it is the last time
a474d7c2
PA
3946 that this function is called for a single execution command, then
3947 report to the user that the inferior has stopped, and do the
1777feb0 3948 necessary cleanups. */
43ff13b4
JM
3949
3950void
fba45db2 3951fetch_inferior_event (void *client_data)
43ff13b4 3952{
0d1e5fa7 3953 struct execution_control_state ecss;
a474d7c2 3954 struct execution_control_state *ecs = &ecss;
0f641c01 3955 int cmd_done = 0;
43ff13b4 3956
0d1e5fa7
PA
3957 memset (ecs, 0, sizeof (*ecs));
3958
c61db772
PA
3959 /* Events are always processed with the main UI as current UI. This
3960 way, warnings, debug output, etc. are always consistently sent to
3961 the main console. */
4b6749b9 3962 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
c61db772 3963
d3d4baed 3964 /* End up with readline processing input, if necessary. */
d238133d
TT
3965 {
3966 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
3967
3968 /* We're handling a live event, so make sure we're doing live
3969 debugging. If we're looking at traceframes while the target is
3970 running, we're going to need to get back to that mode after
3971 handling the event. */
3972 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
3973 if (non_stop)
3974 {
3975 maybe_restore_traceframe.emplace ();
3976 set_current_traceframe (-1);
3977 }
43ff13b4 3978
873657b9
PA
3979 /* The user/frontend should not notice a thread switch due to
3980 internal events. Make sure we revert to the user selected
3981 thread and frame after handling the event and running any
3982 breakpoint commands. */
3983 scoped_restore_current_thread restore_thread;
d238133d
TT
3984
3985 overlay_cache_invalid = 1;
3986 /* Flush target cache before starting to handle each event. Target
3987 was running and cache could be stale. This is just a heuristic.
3988 Running threads may modify target memory, but we don't get any
3989 event. */
3990 target_dcache_invalidate ();
3991
3992 scoped_restore save_exec_dir
3993 = make_scoped_restore (&execution_direction,
3994 target_execution_direction ());
3995
5b6d1e4f
PA
3996 if (!do_target_wait (minus_one_ptid, ecs, TARGET_WNOHANG))
3997 return;
3998
3999 gdb_assert (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
4000
4001 /* Switch to the target that generated the event, so we can do
4002 target calls. Any inferior bound to the target will do, so we
4003 just switch to the first we find. */
4004 for (inferior *inf : all_inferiors (ecs->target))
4005 {
4006 switch_to_inferior_no_thread (inf);
4007 break;
4008 }
d238133d
TT
4009
4010 if (debug_infrun)
5b6d1e4f 4011 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
d238133d
TT
4012
4013 /* If an error happens while handling the event, propagate GDB's
4014 knowledge of the executing state to the frontend/user running
4015 state. */
4016 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
5b6d1e4f 4017 scoped_finish_thread_state finish_state (ecs->target, finish_ptid);
d238133d 4018
979a0d13 4019 /* Get executed before scoped_restore_current_thread above to apply
d238133d
TT
4020 still for the thread which has thrown the exception. */
4021 auto defer_bpstat_clear
4022 = make_scope_exit (bpstat_clear_actions);
4023 auto defer_delete_threads
4024 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
4025
4026 /* Now figure out what to do with the result of the result. */
4027 handle_inferior_event (ecs);
4028
4029 if (!ecs->wait_some_more)
4030 {
5b6d1e4f 4031 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
d238133d
TT
4032 int should_stop = 1;
4033 struct thread_info *thr = ecs->event_thread;
d6b48e9c 4034
d238133d 4035 delete_just_stopped_threads_infrun_breakpoints ();
f107f563 4036
d238133d
TT
4037 if (thr != NULL)
4038 {
4039 struct thread_fsm *thread_fsm = thr->thread_fsm;
243a9253 4040
d238133d 4041 if (thread_fsm != NULL)
46e3ed7f 4042 should_stop = thread_fsm->should_stop (thr);
d238133d 4043 }
243a9253 4044
d238133d
TT
4045 if (!should_stop)
4046 {
4047 keep_going (ecs);
4048 }
4049 else
4050 {
46e3ed7f 4051 bool should_notify_stop = true;
d238133d 4052 int proceeded = 0;
1840d81a 4053
d238133d 4054 clean_up_just_stopped_threads_fsms (ecs);
243a9253 4055
d238133d 4056 if (thr != NULL && thr->thread_fsm != NULL)
46e3ed7f 4057 should_notify_stop = thr->thread_fsm->should_notify_stop ();
388a7084 4058
d238133d
TT
4059 if (should_notify_stop)
4060 {
4061 /* We may not find an inferior if this was a process exit. */
4062 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
4063 proceeded = normal_stop ();
4064 }
243a9253 4065
d238133d
TT
4066 if (!proceeded)
4067 {
4068 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
4069 cmd_done = 1;
4070 }
873657b9
PA
4071
4072 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4073 previously selected thread is gone. We have two
4074 choices - switch to no thread selected, or restore the
4075 previously selected thread (now exited). We chose the
4076 later, just because that's what GDB used to do. After
4077 this, "info threads" says "The current thread <Thread
4078 ID 2> has terminated." instead of "No thread
4079 selected.". */
4080 if (!non_stop
4081 && cmd_done
4082 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
4083 restore_thread.dont_restore ();
d238133d
TT
4084 }
4085 }
4f8d22e3 4086
d238133d
TT
4087 defer_delete_threads.release ();
4088 defer_bpstat_clear.release ();
29f49a6a 4089
d238133d
TT
4090 /* No error, don't finish the thread states yet. */
4091 finish_state.release ();
731f534f 4092
d238133d
TT
4093 /* This scope is used to ensure that readline callbacks are
4094 reinstalled here. */
4095 }
4f8d22e3 4096
3b12939d
PA
4097 /* If a UI was in sync execution mode, and now isn't, restore its
4098 prompt (a synchronous execution command has finished, and we're
4099 ready for input). */
4100 all_uis_check_sync_execution_done ();
0f641c01
PA
4101
4102 if (cmd_done
0f641c01 4103 && exec_done_display_p
00431a78
PA
4104 && (inferior_ptid == null_ptid
4105 || inferior_thread ()->state != THREAD_RUNNING))
0f641c01 4106 printf_unfiltered (_("completed.\n"));
43ff13b4
JM
4107}
4108
29734269
SM
4109/* See infrun.h. */
4110
edb3359d 4111void
29734269
SM
4112set_step_info (thread_info *tp, struct frame_info *frame,
4113 struct symtab_and_line sal)
edb3359d 4114{
29734269
SM
4115 /* This can be removed once this function no longer implicitly relies on the
4116 inferior_ptid value. */
4117 gdb_assert (inferior_ptid == tp->ptid);
edb3359d 4118
16c381f0
JK
4119 tp->control.step_frame_id = get_frame_id (frame);
4120 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
edb3359d
DJ
4121
4122 tp->current_symtab = sal.symtab;
4123 tp->current_line = sal.line;
4124}
4125
0d1e5fa7
PA
4126/* Clear context switchable stepping state. */
4127
4128void
4e1c45ea 4129init_thread_stepping_state (struct thread_info *tss)
0d1e5fa7 4130{
7f5ef605 4131 tss->stepped_breakpoint = 0;
0d1e5fa7 4132 tss->stepping_over_breakpoint = 0;
963f9c80 4133 tss->stepping_over_watchpoint = 0;
0d1e5fa7 4134 tss->step_after_step_resume_breakpoint = 0;
cd0fc7c3
SS
4135}
4136
ab1ddbcf 4137/* See infrun.h. */
c32c64b7 4138
6efcd9a8 4139void
5b6d1e4f
PA
4140set_last_target_status (process_stratum_target *target, ptid_t ptid,
4141 target_waitstatus status)
c32c64b7 4142{
5b6d1e4f 4143 target_last_proc_target = target;
c32c64b7
DE
4144 target_last_wait_ptid = ptid;
4145 target_last_waitstatus = status;
4146}
4147
ab1ddbcf 4148/* See infrun.h. */
e02bc4cc
DS
4149
4150void
5b6d1e4f
PA
4151get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4152 target_waitstatus *status)
e02bc4cc 4153{
5b6d1e4f
PA
4154 if (target != nullptr)
4155 *target = target_last_proc_target;
ab1ddbcf
PA
4156 if (ptid != nullptr)
4157 *ptid = target_last_wait_ptid;
4158 if (status != nullptr)
4159 *status = target_last_waitstatus;
e02bc4cc
DS
4160}
4161
ab1ddbcf
PA
4162/* See infrun.h. */
4163
ac264b3b
MS
4164void
4165nullify_last_target_wait_ptid (void)
4166{
5b6d1e4f 4167 target_last_proc_target = nullptr;
ac264b3b 4168 target_last_wait_ptid = minus_one_ptid;
ab1ddbcf 4169 target_last_waitstatus = {};
ac264b3b
MS
4170}
4171
dcf4fbde 4172/* Switch thread contexts. */
dd80620e
MS
4173
4174static void
00431a78 4175context_switch (execution_control_state *ecs)
dd80620e 4176{
00431a78
PA
4177 if (debug_infrun
4178 && ecs->ptid != inferior_ptid
5b6d1e4f
PA
4179 && (inferior_ptid == null_ptid
4180 || ecs->event_thread != inferior_thread ()))
fd48f117
DJ
4181 {
4182 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
a068643d 4183 target_pid_to_str (inferior_ptid).c_str ());
fd48f117 4184 fprintf_unfiltered (gdb_stdlog, "to %s\n",
a068643d 4185 target_pid_to_str (ecs->ptid).c_str ());
fd48f117
DJ
4186 }
4187
00431a78 4188 switch_to_thread (ecs->event_thread);
dd80620e
MS
4189}
4190
d8dd4d5f
PA
4191/* If the target can't tell whether we've hit breakpoints
4192 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4193 check whether that could have been caused by a breakpoint. If so,
4194 adjust the PC, per gdbarch_decr_pc_after_break. */
4195
4fa8626c 4196static void
d8dd4d5f
PA
4197adjust_pc_after_break (struct thread_info *thread,
4198 struct target_waitstatus *ws)
4fa8626c 4199{
24a73cce
UW
4200 struct regcache *regcache;
4201 struct gdbarch *gdbarch;
118e6252 4202 CORE_ADDR breakpoint_pc, decr_pc;
4fa8626c 4203
4fa8626c
DJ
4204 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4205 we aren't, just return.
9709f61c
DJ
4206
4207 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
b798847d
UW
4208 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4209 implemented by software breakpoints should be handled through the normal
4210 breakpoint layer.
8fb3e588 4211
4fa8626c
DJ
4212 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4213 different signals (SIGILL or SIGEMT for instance), but it is less
4214 clear where the PC is pointing afterwards. It may not match
b798847d
UW
4215 gdbarch_decr_pc_after_break. I don't know any specific target that
4216 generates these signals at breakpoints (the code has been in GDB since at
4217 least 1992) so I can not guess how to handle them here.
8fb3e588 4218
e6cf7916
UW
4219 In earlier versions of GDB, a target with
4220 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
b798847d
UW
4221 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4222 target with both of these set in GDB history, and it seems unlikely to be
4223 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4fa8626c 4224
d8dd4d5f 4225 if (ws->kind != TARGET_WAITKIND_STOPPED)
4fa8626c
DJ
4226 return;
4227
d8dd4d5f 4228 if (ws->value.sig != GDB_SIGNAL_TRAP)
4fa8626c
DJ
4229 return;
4230
4058b839
PA
4231 /* In reverse execution, when a breakpoint is hit, the instruction
4232 under it has already been de-executed. The reported PC always
4233 points at the breakpoint address, so adjusting it further would
4234 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4235 architecture:
4236
4237 B1 0x08000000 : INSN1
4238 B2 0x08000001 : INSN2
4239 0x08000002 : INSN3
4240 PC -> 0x08000003 : INSN4
4241
4242 Say you're stopped at 0x08000003 as above. Reverse continuing
4243 from that point should hit B2 as below. Reading the PC when the
4244 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4245 been de-executed already.
4246
4247 B1 0x08000000 : INSN1
4248 B2 PC -> 0x08000001 : INSN2
4249 0x08000002 : INSN3
4250 0x08000003 : INSN4
4251
4252 We can't apply the same logic as for forward execution, because
4253 we would wrongly adjust the PC to 0x08000000, since there's a
4254 breakpoint at PC - 1. We'd then report a hit on B1, although
4255 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4256 behaviour. */
4257 if (execution_direction == EXEC_REVERSE)
4258 return;
4259
1cf4d951
PA
4260 /* If the target can tell whether the thread hit a SW breakpoint,
4261 trust it. Targets that can tell also adjust the PC
4262 themselves. */
4263 if (target_supports_stopped_by_sw_breakpoint ())
4264 return;
4265
4266 /* Note that relying on whether a breakpoint is planted in memory to
4267 determine this can fail. E.g,. the breakpoint could have been
4268 removed since. Or the thread could have been told to step an
4269 instruction the size of a breakpoint instruction, and only
4270 _after_ was a breakpoint inserted at its address. */
4271
24a73cce
UW
4272 /* If this target does not decrement the PC after breakpoints, then
4273 we have nothing to do. */
00431a78 4274 regcache = get_thread_regcache (thread);
ac7936df 4275 gdbarch = regcache->arch ();
118e6252 4276
527a273a 4277 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
118e6252 4278 if (decr_pc == 0)
24a73cce
UW
4279 return;
4280
8b86c959 4281 const address_space *aspace = regcache->aspace ();
6c95b8df 4282
8aad930b
AC
4283 /* Find the location where (if we've hit a breakpoint) the
4284 breakpoint would be. */
118e6252 4285 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
8aad930b 4286
1cf4d951
PA
4287 /* If the target can't tell whether a software breakpoint triggered,
4288 fallback to figuring it out based on breakpoints we think were
4289 inserted in the target, and on whether the thread was stepped or
4290 continued. */
4291
1c5cfe86
PA
4292 /* Check whether there actually is a software breakpoint inserted at
4293 that location.
4294
4295 If in non-stop mode, a race condition is possible where we've
4296 removed a breakpoint, but stop events for that breakpoint were
4297 already queued and arrive later. To suppress those spurious
4298 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
1cf4d951
PA
4299 and retire them after a number of stop events are reported. Note
4300 this is an heuristic and can thus get confused. The real fix is
4301 to get the "stopped by SW BP and needs adjustment" info out of
4302 the target/kernel (and thus never reach here; see above). */
6c95b8df 4303 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
fbea99ea
PA
4304 || (target_is_non_stop_p ()
4305 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
8aad930b 4306 {
07036511 4307 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
abbb1732 4308
8213266a 4309 if (record_full_is_used ())
07036511
TT
4310 restore_operation_disable.emplace
4311 (record_full_gdb_operation_disable_set ());
96429cc8 4312
1c0fdd0e
UW
4313 /* When using hardware single-step, a SIGTRAP is reported for both
4314 a completed single-step and a software breakpoint. Need to
4315 differentiate between the two, as the latter needs adjusting
4316 but the former does not.
4317
4318 The SIGTRAP can be due to a completed hardware single-step only if
4319 - we didn't insert software single-step breakpoints
1c0fdd0e
UW
4320 - this thread is currently being stepped
4321
4322 If any of these events did not occur, we must have stopped due
4323 to hitting a software breakpoint, and have to back up to the
4324 breakpoint address.
4325
4326 As a special case, we could have hardware single-stepped a
4327 software breakpoint. In this case (prev_pc == breakpoint_pc),
4328 we also need to back up to the breakpoint address. */
4329
d8dd4d5f
PA
4330 if (thread_has_single_step_breakpoints_set (thread)
4331 || !currently_stepping (thread)
4332 || (thread->stepped_breakpoint
4333 && thread->prev_pc == breakpoint_pc))
515630c5 4334 regcache_write_pc (regcache, breakpoint_pc);
8aad930b 4335 }
4fa8626c
DJ
4336}
4337
edb3359d
DJ
4338static int
4339stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4340{
4341 for (frame = get_prev_frame (frame);
4342 frame != NULL;
4343 frame = get_prev_frame (frame))
4344 {
4345 if (frame_id_eq (get_frame_id (frame), step_frame_id))
4346 return 1;
4347 if (get_frame_type (frame) != INLINE_FRAME)
4348 break;
4349 }
4350
4351 return 0;
4352}
4353
4a4c04f1
BE
4354/* Look for an inline frame that is marked for skip.
4355 If PREV_FRAME is TRUE start at the previous frame,
4356 otherwise start at the current frame. Stop at the
4357 first non-inline frame, or at the frame where the
4358 step started. */
4359
4360static bool
4361inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
4362{
4363 struct frame_info *frame = get_current_frame ();
4364
4365 if (prev_frame)
4366 frame = get_prev_frame (frame);
4367
4368 for (; frame != NULL; frame = get_prev_frame (frame))
4369 {
4370 const char *fn = NULL;
4371 symtab_and_line sal;
4372 struct symbol *sym;
4373
4374 if (frame_id_eq (get_frame_id (frame), tp->control.step_frame_id))
4375 break;
4376 if (get_frame_type (frame) != INLINE_FRAME)
4377 break;
4378
4379 sal = find_frame_sal (frame);
4380 sym = get_frame_function (frame);
4381
4382 if (sym != NULL)
4383 fn = sym->print_name ();
4384
4385 if (sal.line != 0
4386 && function_name_is_marked_for_skip (fn, sal))
4387 return true;
4388 }
4389
4390 return false;
4391}
4392
c65d6b55
PA
4393/* If the event thread has the stop requested flag set, pretend it
4394 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4395 target_stop). */
4396
4397static bool
4398handle_stop_requested (struct execution_control_state *ecs)
4399{
4400 if (ecs->event_thread->stop_requested)
4401 {
4402 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
4403 ecs->ws.value.sig = GDB_SIGNAL_0;
4404 handle_signal_stop (ecs);
4405 return true;
4406 }
4407 return false;
4408}
4409
a96d9b2e
SDJ
4410/* Auxiliary function that handles syscall entry/return events.
4411 It returns 1 if the inferior should keep going (and GDB
4412 should ignore the event), or 0 if the event deserves to be
4413 processed. */
ca2163eb 4414
a96d9b2e 4415static int
ca2163eb 4416handle_syscall_event (struct execution_control_state *ecs)
a96d9b2e 4417{
ca2163eb 4418 struct regcache *regcache;
ca2163eb
PA
4419 int syscall_number;
4420
00431a78 4421 context_switch (ecs);
ca2163eb 4422
00431a78 4423 regcache = get_thread_regcache (ecs->event_thread);
f90263c1 4424 syscall_number = ecs->ws.value.syscall_number;
f2ffa92b 4425 ecs->event_thread->suspend.stop_pc = regcache_read_pc (regcache);
ca2163eb 4426
a96d9b2e
SDJ
4427 if (catch_syscall_enabled () > 0
4428 && catching_syscall_number (syscall_number) > 0)
4429 {
4430 if (debug_infrun)
4431 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
4432 syscall_number);
a96d9b2e 4433
16c381f0 4434 ecs->event_thread->control.stop_bpstat
a01bda52 4435 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
4436 ecs->event_thread->suspend.stop_pc,
4437 ecs->event_thread, &ecs->ws);
ab04a2af 4438
c65d6b55
PA
4439 if (handle_stop_requested (ecs))
4440 return 0;
4441
ce12b012 4442 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
ca2163eb
PA
4443 {
4444 /* Catchpoint hit. */
ca2163eb
PA
4445 return 0;
4446 }
a96d9b2e 4447 }
ca2163eb 4448
c65d6b55
PA
4449 if (handle_stop_requested (ecs))
4450 return 0;
4451
ca2163eb 4452 /* If no catchpoint triggered for this, then keep going. */
ca2163eb
PA
4453 keep_going (ecs);
4454 return 1;
a96d9b2e
SDJ
4455}
4456
7e324e48
GB
4457/* Lazily fill in the execution_control_state's stop_func_* fields. */
4458
4459static void
4460fill_in_stop_func (struct gdbarch *gdbarch,
4461 struct execution_control_state *ecs)
4462{
4463 if (!ecs->stop_func_filled_in)
4464 {
98a617f8
KB
4465 const block *block;
4466
7e324e48
GB
4467 /* Don't care about return value; stop_func_start and stop_func_name
4468 will both be 0 if it doesn't work. */
98a617f8
KB
4469 find_pc_partial_function (ecs->event_thread->suspend.stop_pc,
4470 &ecs->stop_func_name,
4471 &ecs->stop_func_start,
4472 &ecs->stop_func_end,
4473 &block);
4474
4475 /* The call to find_pc_partial_function, above, will set
4476 stop_func_start and stop_func_end to the start and end
4477 of the range containing the stop pc. If this range
4478 contains the entry pc for the block (which is always the
4479 case for contiguous blocks), advance stop_func_start past
4480 the function's start offset and entrypoint. Note that
4481 stop_func_start is NOT advanced when in a range of a
4482 non-contiguous block that does not contain the entry pc. */
4483 if (block != nullptr
4484 && ecs->stop_func_start <= BLOCK_ENTRY_PC (block)
4485 && BLOCK_ENTRY_PC (block) < ecs->stop_func_end)
4486 {
4487 ecs->stop_func_start
4488 += gdbarch_deprecated_function_start_offset (gdbarch);
4489
4490 if (gdbarch_skip_entrypoint_p (gdbarch))
4491 ecs->stop_func_start
4492 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
4493 }
591a12a1 4494
7e324e48
GB
4495 ecs->stop_func_filled_in = 1;
4496 }
4497}
4498
4f5d7f63 4499
00431a78 4500/* Return the STOP_SOON field of the inferior pointed at by ECS. */
4f5d7f63
PA
4501
4502static enum stop_kind
00431a78 4503get_inferior_stop_soon (execution_control_state *ecs)
4f5d7f63 4504{
5b6d1e4f 4505 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4f5d7f63
PA
4506
4507 gdb_assert (inf != NULL);
4508 return inf->control.stop_soon;
4509}
4510
5b6d1e4f
PA
4511/* Poll for one event out of the current target. Store the resulting
4512 waitstatus in WS, and return the event ptid. Does not block. */
372316f1
PA
4513
4514static ptid_t
5b6d1e4f 4515poll_one_curr_target (struct target_waitstatus *ws)
372316f1
PA
4516{
4517 ptid_t event_ptid;
372316f1
PA
4518
4519 overlay_cache_invalid = 1;
4520
4521 /* Flush target cache before starting to handle each event.
4522 Target was running and cache could be stale. This is just a
4523 heuristic. Running threads may modify target memory, but we
4524 don't get any event. */
4525 target_dcache_invalidate ();
4526
4527 if (deprecated_target_wait_hook)
5b6d1e4f 4528 event_ptid = deprecated_target_wait_hook (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1 4529 else
5b6d1e4f 4530 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1
PA
4531
4532 if (debug_infrun)
5b6d1e4f 4533 print_target_wait_results (minus_one_ptid, event_ptid, ws);
372316f1
PA
4534
4535 return event_ptid;
4536}
4537
5b6d1e4f
PA
4538/* An event reported by wait_one. */
4539
4540struct wait_one_event
4541{
4542 /* The target the event came out of. */
4543 process_stratum_target *target;
4544
4545 /* The PTID the event was for. */
4546 ptid_t ptid;
4547
4548 /* The waitstatus. */
4549 target_waitstatus ws;
4550};
4551
4552/* Wait for one event out of any target. */
4553
4554static wait_one_event
4555wait_one ()
4556{
4557 while (1)
4558 {
4559 for (inferior *inf : all_inferiors ())
4560 {
4561 process_stratum_target *target = inf->process_target ();
4562 if (target == NULL
4563 || !target->is_async_p ()
4564 || !target->threads_executing)
4565 continue;
4566
4567 switch_to_inferior_no_thread (inf);
4568
4569 wait_one_event event;
4570 event.target = target;
4571 event.ptid = poll_one_curr_target (&event.ws);
4572
4573 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
4574 {
4575 /* If nothing is resumed, remove the target from the
4576 event loop. */
4577 target_async (0);
4578 }
4579 else if (event.ws.kind != TARGET_WAITKIND_IGNORE)
4580 return event;
4581 }
4582
4583 /* Block waiting for some event. */
4584
4585 fd_set readfds;
4586 int nfds = 0;
4587
4588 FD_ZERO (&readfds);
4589
4590 for (inferior *inf : all_inferiors ())
4591 {
4592 process_stratum_target *target = inf->process_target ();
4593 if (target == NULL
4594 || !target->is_async_p ()
4595 || !target->threads_executing)
4596 continue;
4597
4598 int fd = target->async_wait_fd ();
4599 FD_SET (fd, &readfds);
4600 if (nfds <= fd)
4601 nfds = fd + 1;
4602 }
4603
4604 if (nfds == 0)
4605 {
4606 /* No waitable targets left. All must be stopped. */
4607 return {NULL, minus_one_ptid, {TARGET_WAITKIND_NO_RESUMED}};
4608 }
4609
4610 QUIT;
4611
4612 int numfds = interruptible_select (nfds, &readfds, 0, NULL, 0);
4613 if (numfds < 0)
4614 {
4615 if (errno == EINTR)
4616 continue;
4617 else
4618 perror_with_name ("interruptible_select");
4619 }
4620 }
4621}
4622
372316f1
PA
4623/* Generate a wrapper for target_stopped_by_REASON that works on PTID
4624 instead of the current thread. */
4625#define THREAD_STOPPED_BY(REASON) \
4626static int \
4627thread_stopped_by_ ## REASON (ptid_t ptid) \
4628{ \
2989a365 4629 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid); \
372316f1
PA
4630 inferior_ptid = ptid; \
4631 \
2989a365 4632 return target_stopped_by_ ## REASON (); \
372316f1
PA
4633}
4634
4635/* Generate thread_stopped_by_watchpoint. */
4636THREAD_STOPPED_BY (watchpoint)
4637/* Generate thread_stopped_by_sw_breakpoint. */
4638THREAD_STOPPED_BY (sw_breakpoint)
4639/* Generate thread_stopped_by_hw_breakpoint. */
4640THREAD_STOPPED_BY (hw_breakpoint)
4641
372316f1
PA
4642/* Save the thread's event and stop reason to process it later. */
4643
4644static void
5b6d1e4f 4645save_waitstatus (struct thread_info *tp, const target_waitstatus *ws)
372316f1 4646{
372316f1
PA
4647 if (debug_infrun)
4648 {
23fdd69e 4649 std::string statstr = target_waitstatus_to_string (ws);
372316f1 4650
372316f1
PA
4651 fprintf_unfiltered (gdb_stdlog,
4652 "infrun: saving status %s for %d.%ld.%ld\n",
23fdd69e 4653 statstr.c_str (),
e99b03dc 4654 tp->ptid.pid (),
e38504b3 4655 tp->ptid.lwp (),
cc6bcb54 4656 tp->ptid.tid ());
372316f1
PA
4657 }
4658
4659 /* Record for later. */
4660 tp->suspend.waitstatus = *ws;
4661 tp->suspend.waitstatus_pending_p = 1;
4662
00431a78 4663 struct regcache *regcache = get_thread_regcache (tp);
8b86c959 4664 const address_space *aspace = regcache->aspace ();
372316f1
PA
4665
4666 if (ws->kind == TARGET_WAITKIND_STOPPED
4667 && ws->value.sig == GDB_SIGNAL_TRAP)
4668 {
4669 CORE_ADDR pc = regcache_read_pc (regcache);
4670
4671 adjust_pc_after_break (tp, &tp->suspend.waitstatus);
4672
4673 if (thread_stopped_by_watchpoint (tp->ptid))
4674 {
4675 tp->suspend.stop_reason
4676 = TARGET_STOPPED_BY_WATCHPOINT;
4677 }
4678 else if (target_supports_stopped_by_sw_breakpoint ()
4679 && thread_stopped_by_sw_breakpoint (tp->ptid))
4680 {
4681 tp->suspend.stop_reason
4682 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4683 }
4684 else if (target_supports_stopped_by_hw_breakpoint ()
4685 && thread_stopped_by_hw_breakpoint (tp->ptid))
4686 {
4687 tp->suspend.stop_reason
4688 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4689 }
4690 else if (!target_supports_stopped_by_hw_breakpoint ()
4691 && hardware_breakpoint_inserted_here_p (aspace,
4692 pc))
4693 {
4694 tp->suspend.stop_reason
4695 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4696 }
4697 else if (!target_supports_stopped_by_sw_breakpoint ()
4698 && software_breakpoint_inserted_here_p (aspace,
4699 pc))
4700 {
4701 tp->suspend.stop_reason
4702 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4703 }
4704 else if (!thread_has_single_step_breakpoints_set (tp)
4705 && currently_stepping (tp))
4706 {
4707 tp->suspend.stop_reason
4708 = TARGET_STOPPED_BY_SINGLE_STEP;
4709 }
4710 }
4711}
4712
6efcd9a8 4713/* See infrun.h. */
372316f1 4714
6efcd9a8 4715void
372316f1
PA
4716stop_all_threads (void)
4717{
4718 /* We may need multiple passes to discover all threads. */
4719 int pass;
4720 int iterations = 0;
372316f1 4721
53cccef1 4722 gdb_assert (exists_non_stop_target ());
372316f1
PA
4723
4724 if (debug_infrun)
4725 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads\n");
4726
00431a78 4727 scoped_restore_current_thread restore_thread;
372316f1 4728
65706a29 4729 target_thread_events (1);
9885e6bb 4730 SCOPE_EXIT { target_thread_events (0); };
65706a29 4731
372316f1
PA
4732 /* Request threads to stop, and then wait for the stops. Because
4733 threads we already know about can spawn more threads while we're
4734 trying to stop them, and we only learn about new threads when we
4735 update the thread list, do this in a loop, and keep iterating
4736 until two passes find no threads that need to be stopped. */
4737 for (pass = 0; pass < 2; pass++, iterations++)
4738 {
4739 if (debug_infrun)
4740 fprintf_unfiltered (gdb_stdlog,
4741 "infrun: stop_all_threads, pass=%d, "
4742 "iterations=%d\n", pass, iterations);
4743 while (1)
4744 {
372316f1 4745 int need_wait = 0;
372316f1
PA
4746
4747 update_thread_list ();
4748
4749 /* Go through all threads looking for threads that we need
4750 to tell the target to stop. */
08036331 4751 for (thread_info *t : all_non_exited_threads ())
372316f1 4752 {
53cccef1
TBA
4753 /* For a single-target setting with an all-stop target,
4754 we would not even arrive here. For a multi-target
4755 setting, until GDB is able to handle a mixture of
4756 all-stop and non-stop targets, simply skip all-stop
4757 targets' threads. This should be fine due to the
4758 protection of 'check_multi_target_resumption'. */
4759
4760 switch_to_thread_no_regs (t);
4761 if (!target_is_non_stop_p ())
4762 continue;
4763
372316f1
PA
4764 if (t->executing)
4765 {
4766 /* If already stopping, don't request a stop again.
4767 We just haven't seen the notification yet. */
4768 if (!t->stop_requested)
4769 {
4770 if (debug_infrun)
4771 fprintf_unfiltered (gdb_stdlog,
4772 "infrun: %s executing, "
4773 "need stop\n",
a068643d 4774 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4775 target_stop (t->ptid);
4776 t->stop_requested = 1;
4777 }
4778 else
4779 {
4780 if (debug_infrun)
4781 fprintf_unfiltered (gdb_stdlog,
4782 "infrun: %s executing, "
4783 "already stopping\n",
a068643d 4784 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4785 }
4786
4787 if (t->stop_requested)
4788 need_wait = 1;
4789 }
4790 else
4791 {
4792 if (debug_infrun)
4793 fprintf_unfiltered (gdb_stdlog,
4794 "infrun: %s not executing\n",
a068643d 4795 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4796
4797 /* The thread may be not executing, but still be
4798 resumed with a pending status to process. */
719546c4 4799 t->resumed = false;
372316f1
PA
4800 }
4801 }
4802
4803 if (!need_wait)
4804 break;
4805
4806 /* If we find new threads on the second iteration, restart
4807 over. We want to see two iterations in a row with all
4808 threads stopped. */
4809 if (pass > 0)
4810 pass = -1;
4811
5b6d1e4f
PA
4812 wait_one_event event = wait_one ();
4813
c29705b7 4814 if (debug_infrun)
372316f1 4815 {
c29705b7
PW
4816 fprintf_unfiltered (gdb_stdlog,
4817 "infrun: stop_all_threads %s %s\n",
5b6d1e4f
PA
4818 target_waitstatus_to_string (&event.ws).c_str (),
4819 target_pid_to_str (event.ptid).c_str ());
372316f1 4820 }
372316f1 4821
5b6d1e4f
PA
4822 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED
4823 || event.ws.kind == TARGET_WAITKIND_THREAD_EXITED
4824 || event.ws.kind == TARGET_WAITKIND_EXITED
4825 || event.ws.kind == TARGET_WAITKIND_SIGNALLED)
c29705b7
PW
4826 {
4827 /* All resumed threads exited
4828 or one thread/process exited/signalled. */
372316f1
PA
4829 }
4830 else
4831 {
5b6d1e4f 4832 thread_info *t = find_thread_ptid (event.target, event.ptid);
372316f1 4833 if (t == NULL)
5b6d1e4f 4834 t = add_thread (event.target, event.ptid);
372316f1
PA
4835
4836 t->stop_requested = 0;
4837 t->executing = 0;
719546c4 4838 t->resumed = false;
372316f1
PA
4839 t->control.may_range_step = 0;
4840
6efcd9a8
PA
4841 /* This may be the first time we see the inferior report
4842 a stop. */
5b6d1e4f 4843 inferior *inf = find_inferior_ptid (event.target, event.ptid);
6efcd9a8
PA
4844 if (inf->needs_setup)
4845 {
4846 switch_to_thread_no_regs (t);
4847 setup_inferior (0);
4848 }
4849
5b6d1e4f
PA
4850 if (event.ws.kind == TARGET_WAITKIND_STOPPED
4851 && event.ws.value.sig == GDB_SIGNAL_0)
372316f1
PA
4852 {
4853 /* We caught the event that we intended to catch, so
4854 there's no event pending. */
4855 t->suspend.waitstatus.kind = TARGET_WAITKIND_IGNORE;
4856 t->suspend.waitstatus_pending_p = 0;
4857
00431a78 4858 if (displaced_step_fixup (t, GDB_SIGNAL_0) < 0)
372316f1
PA
4859 {
4860 /* Add it back to the step-over queue. */
4861 if (debug_infrun)
4862 {
4863 fprintf_unfiltered (gdb_stdlog,
4864 "infrun: displaced-step of %s "
4865 "canceled: adding back to the "
4866 "step-over queue\n",
a068643d 4867 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4868 }
4869 t->control.trap_expected = 0;
4870 thread_step_over_chain_enqueue (t);
4871 }
4872 }
4873 else
4874 {
4875 enum gdb_signal sig;
4876 struct regcache *regcache;
372316f1
PA
4877
4878 if (debug_infrun)
4879 {
5b6d1e4f 4880 std::string statstr = target_waitstatus_to_string (&event.ws);
372316f1 4881
372316f1
PA
4882 fprintf_unfiltered (gdb_stdlog,
4883 "infrun: target_wait %s, saving "
4884 "status for %d.%ld.%ld\n",
23fdd69e 4885 statstr.c_str (),
e99b03dc 4886 t->ptid.pid (),
e38504b3 4887 t->ptid.lwp (),
cc6bcb54 4888 t->ptid.tid ());
372316f1
PA
4889 }
4890
4891 /* Record for later. */
5b6d1e4f 4892 save_waitstatus (t, &event.ws);
372316f1 4893
5b6d1e4f
PA
4894 sig = (event.ws.kind == TARGET_WAITKIND_STOPPED
4895 ? event.ws.value.sig : GDB_SIGNAL_0);
372316f1 4896
00431a78 4897 if (displaced_step_fixup (t, sig) < 0)
372316f1
PA
4898 {
4899 /* Add it back to the step-over queue. */
4900 t->control.trap_expected = 0;
4901 thread_step_over_chain_enqueue (t);
4902 }
4903
00431a78 4904 regcache = get_thread_regcache (t);
372316f1
PA
4905 t->suspend.stop_pc = regcache_read_pc (regcache);
4906
4907 if (debug_infrun)
4908 {
4909 fprintf_unfiltered (gdb_stdlog,
4910 "infrun: saved stop_pc=%s for %s "
4911 "(currently_stepping=%d)\n",
4912 paddress (target_gdbarch (),
4913 t->suspend.stop_pc),
a068643d 4914 target_pid_to_str (t->ptid).c_str (),
372316f1
PA
4915 currently_stepping (t));
4916 }
4917 }
4918 }
4919 }
4920 }
4921
372316f1
PA
4922 if (debug_infrun)
4923 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads done\n");
4924}
4925
f4836ba9
PA
4926/* Handle a TARGET_WAITKIND_NO_RESUMED event. */
4927
4928static int
4929handle_no_resumed (struct execution_control_state *ecs)
4930{
3b12939d 4931 if (target_can_async_p ())
f4836ba9 4932 {
3b12939d
PA
4933 struct ui *ui;
4934 int any_sync = 0;
f4836ba9 4935
3b12939d
PA
4936 ALL_UIS (ui)
4937 {
4938 if (ui->prompt_state == PROMPT_BLOCKED)
4939 {
4940 any_sync = 1;
4941 break;
4942 }
4943 }
4944 if (!any_sync)
4945 {
4946 /* There were no unwaited-for children left in the target, but,
4947 we're not synchronously waiting for events either. Just
4948 ignore. */
4949
4950 if (debug_infrun)
4951 fprintf_unfiltered (gdb_stdlog,
4952 "infrun: TARGET_WAITKIND_NO_RESUMED "
4953 "(ignoring: bg)\n");
4954 prepare_to_wait (ecs);
4955 return 1;
4956 }
f4836ba9
PA
4957 }
4958
4959 /* Otherwise, if we were running a synchronous execution command, we
4960 may need to cancel it and give the user back the terminal.
4961
4962 In non-stop mode, the target can't tell whether we've already
4963 consumed previous stop events, so it can end up sending us a
4964 no-resumed event like so:
4965
4966 #0 - thread 1 is left stopped
4967
4968 #1 - thread 2 is resumed and hits breakpoint
4969 -> TARGET_WAITKIND_STOPPED
4970
4971 #2 - thread 3 is resumed and exits
4972 this is the last resumed thread, so
4973 -> TARGET_WAITKIND_NO_RESUMED
4974
4975 #3 - gdb processes stop for thread 2 and decides to re-resume
4976 it.
4977
4978 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
4979 thread 2 is now resumed, so the event should be ignored.
4980
4981 IOW, if the stop for thread 2 doesn't end a foreground command,
4982 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
4983 event. But it could be that the event meant that thread 2 itself
4984 (or whatever other thread was the last resumed thread) exited.
4985
4986 To address this we refresh the thread list and check whether we
4987 have resumed threads _now_. In the example above, this removes
4988 thread 3 from the thread list. If thread 2 was re-resumed, we
4989 ignore this event. If we find no thread resumed, then we cancel
4990 the synchronous command show "no unwaited-for " to the user. */
4991 update_thread_list ();
4992
5b6d1e4f 4993 for (thread_info *thread : all_non_exited_threads (ecs->target))
f4836ba9
PA
4994 {
4995 if (thread->executing
4996 || thread->suspend.waitstatus_pending_p)
4997 {
4998 /* There were no unwaited-for children left in the target at
4999 some point, but there are now. Just ignore. */
5000 if (debug_infrun)
5001 fprintf_unfiltered (gdb_stdlog,
5002 "infrun: TARGET_WAITKIND_NO_RESUMED "
5003 "(ignoring: found resumed)\n");
5004 prepare_to_wait (ecs);
5005 return 1;
5006 }
5007 }
5008
5009 /* Note however that we may find no resumed thread because the whole
5010 process exited meanwhile (thus updating the thread list results
5011 in an empty thread list). In this case we know we'll be getting
5012 a process exit event shortly. */
5b6d1e4f 5013 for (inferior *inf : all_non_exited_inferiors (ecs->target))
f4836ba9 5014 {
08036331 5015 thread_info *thread = any_live_thread_of_inferior (inf);
f4836ba9
PA
5016 if (thread == NULL)
5017 {
5018 if (debug_infrun)
5019 fprintf_unfiltered (gdb_stdlog,
5020 "infrun: TARGET_WAITKIND_NO_RESUMED "
5021 "(expect process exit)\n");
5022 prepare_to_wait (ecs);
5023 return 1;
5024 }
5025 }
5026
5027 /* Go ahead and report the event. */
5028 return 0;
5029}
5030
05ba8510
PA
5031/* Given an execution control state that has been freshly filled in by
5032 an event from the inferior, figure out what it means and take
5033 appropriate action.
5034
5035 The alternatives are:
5036
22bcd14b 5037 1) stop_waiting and return; to really stop and return to the
05ba8510
PA
5038 debugger.
5039
5040 2) keep_going and return; to wait for the next event (set
5041 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5042 once). */
c906108c 5043
ec9499be 5044static void
595915c1 5045handle_inferior_event (struct execution_control_state *ecs)
cd0fc7c3 5046{
595915c1
TT
5047 /* Make sure that all temporary struct value objects that were
5048 created during the handling of the event get deleted at the
5049 end. */
5050 scoped_value_mark free_values;
5051
d6b48e9c
PA
5052 enum stop_kind stop_soon;
5053
c29705b7
PW
5054 if (debug_infrun)
5055 fprintf_unfiltered (gdb_stdlog, "infrun: handle_inferior_event %s\n",
5056 target_waitstatus_to_string (&ecs->ws).c_str ());
5057
28736962
PA
5058 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
5059 {
5060 /* We had an event in the inferior, but we are not interested in
5061 handling it at this level. The lower layers have already
5062 done what needs to be done, if anything.
5063
5064 One of the possible circumstances for this is when the
5065 inferior produces output for the console. The inferior has
5066 not stopped, and we are ignoring the event. Another possible
5067 circumstance is any event which the lower level knows will be
5068 reported multiple times without an intervening resume. */
28736962
PA
5069 prepare_to_wait (ecs);
5070 return;
5071 }
5072
65706a29
PA
5073 if (ecs->ws.kind == TARGET_WAITKIND_THREAD_EXITED)
5074 {
65706a29
PA
5075 prepare_to_wait (ecs);
5076 return;
5077 }
5078
0e5bf2a8 5079 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
f4836ba9
PA
5080 && handle_no_resumed (ecs))
5081 return;
0e5bf2a8 5082
5b6d1e4f
PA
5083 /* Cache the last target/ptid/waitstatus. */
5084 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
e02bc4cc 5085
ca005067 5086 /* Always clear state belonging to the previous time we stopped. */
aa7d318d 5087 stop_stack_dummy = STOP_NONE;
ca005067 5088
0e5bf2a8
PA
5089 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
5090 {
5091 /* No unwaited-for children left. IOW, all resumed children
5092 have exited. */
0e5bf2a8 5093 stop_print_frame = 0;
22bcd14b 5094 stop_waiting (ecs);
0e5bf2a8
PA
5095 return;
5096 }
5097
8c90c137 5098 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
64776a0b 5099 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
359f5fe6 5100 {
5b6d1e4f 5101 ecs->event_thread = find_thread_ptid (ecs->target, ecs->ptid);
359f5fe6
PA
5102 /* If it's a new thread, add it to the thread database. */
5103 if (ecs->event_thread == NULL)
5b6d1e4f 5104 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
c1e36e3e
PA
5105
5106 /* Disable range stepping. If the next step request could use a
5107 range, this will be end up re-enabled then. */
5108 ecs->event_thread->control.may_range_step = 0;
359f5fe6 5109 }
88ed393a
JK
5110
5111 /* Dependent on valid ECS->EVENT_THREAD. */
d8dd4d5f 5112 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
88ed393a
JK
5113
5114 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5115 reinit_frame_cache ();
5116
28736962
PA
5117 breakpoint_retire_moribund ();
5118
2b009048
DJ
5119 /* First, distinguish signals caused by the debugger from signals
5120 that have to do with the program's own actions. Note that
5121 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5122 on the operating system version. Here we detect when a SIGILL or
5123 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5124 something similar for SIGSEGV, since a SIGSEGV will be generated
5125 when we're trying to execute a breakpoint instruction on a
5126 non-executable stack. This happens for call dummy breakpoints
5127 for architectures like SPARC that place call dummies on the
5128 stack. */
2b009048 5129 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
a493e3e2
PA
5130 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
5131 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
5132 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
2b009048 5133 {
00431a78 5134 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
de0a0249 5135
a01bda52 5136 if (breakpoint_inserted_here_p (regcache->aspace (),
de0a0249
UW
5137 regcache_read_pc (regcache)))
5138 {
5139 if (debug_infrun)
5140 fprintf_unfiltered (gdb_stdlog,
5141 "infrun: Treating signal as SIGTRAP\n");
a493e3e2 5142 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
de0a0249 5143 }
2b009048
DJ
5144 }
5145
28736962
PA
5146 /* Mark the non-executing threads accordingly. In all-stop, all
5147 threads of all processes are stopped when we get any event
e1316e60 5148 reported. In non-stop mode, only the event thread stops. */
372316f1
PA
5149 {
5150 ptid_t mark_ptid;
5151
fbea99ea 5152 if (!target_is_non_stop_p ())
372316f1
PA
5153 mark_ptid = minus_one_ptid;
5154 else if (ecs->ws.kind == TARGET_WAITKIND_SIGNALLED
5155 || ecs->ws.kind == TARGET_WAITKIND_EXITED)
5156 {
5157 /* If we're handling a process exit in non-stop mode, even
5158 though threads haven't been deleted yet, one would think
5159 that there is nothing to do, as threads of the dead process
5160 will be soon deleted, and threads of any other process were
5161 left running. However, on some targets, threads survive a
5162 process exit event. E.g., for the "checkpoint" command,
5163 when the current checkpoint/fork exits, linux-fork.c
5164 automatically switches to another fork from within
5165 target_mourn_inferior, by associating the same
5166 inferior/thread to another fork. We haven't mourned yet at
5167 this point, but we must mark any threads left in the
5168 process as not-executing so that finish_thread_state marks
5169 them stopped (in the user's perspective) if/when we present
5170 the stop to the user. */
e99b03dc 5171 mark_ptid = ptid_t (ecs->ptid.pid ());
372316f1
PA
5172 }
5173 else
5174 mark_ptid = ecs->ptid;
5175
719546c4 5176 set_executing (ecs->target, mark_ptid, false);
372316f1
PA
5177
5178 /* Likewise the resumed flag. */
719546c4 5179 set_resumed (ecs->target, mark_ptid, false);
372316f1 5180 }
8c90c137 5181
488f131b
JB
5182 switch (ecs->ws.kind)
5183 {
5184 case TARGET_WAITKIND_LOADED:
00431a78 5185 context_switch (ecs);
b0f4b84b
DJ
5186 /* Ignore gracefully during startup of the inferior, as it might
5187 be the shell which has just loaded some objects, otherwise
5188 add the symbols for the newly loaded objects. Also ignore at
5189 the beginning of an attach or remote session; we will query
5190 the full list of libraries once the connection is
5191 established. */
4f5d7f63 5192
00431a78 5193 stop_soon = get_inferior_stop_soon (ecs);
c0236d92 5194 if (stop_soon == NO_STOP_QUIETLY)
488f131b 5195 {
edcc5120
TT
5196 struct regcache *regcache;
5197
00431a78 5198 regcache = get_thread_regcache (ecs->event_thread);
edcc5120
TT
5199
5200 handle_solib_event ();
5201
5202 ecs->event_thread->control.stop_bpstat
a01bda52 5203 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
5204 ecs->event_thread->suspend.stop_pc,
5205 ecs->event_thread, &ecs->ws);
ab04a2af 5206
c65d6b55
PA
5207 if (handle_stop_requested (ecs))
5208 return;
5209
ce12b012 5210 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
edcc5120
TT
5211 {
5212 /* A catchpoint triggered. */
94c57d6a
PA
5213 process_event_stop_test (ecs);
5214 return;
edcc5120 5215 }
488f131b 5216
b0f4b84b
DJ
5217 /* If requested, stop when the dynamic linker notifies
5218 gdb of events. This allows the user to get control
5219 and place breakpoints in initializer routines for
5220 dynamically loaded objects (among other things). */
a493e3e2 5221 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
b0f4b84b
DJ
5222 if (stop_on_solib_events)
5223 {
55409f9d
DJ
5224 /* Make sure we print "Stopped due to solib-event" in
5225 normal_stop. */
5226 stop_print_frame = 1;
5227
22bcd14b 5228 stop_waiting (ecs);
b0f4b84b
DJ
5229 return;
5230 }
488f131b 5231 }
b0f4b84b
DJ
5232
5233 /* If we are skipping through a shell, or through shared library
5234 loading that we aren't interested in, resume the program. If
5c09a2c5 5235 we're running the program normally, also resume. */
b0f4b84b
DJ
5236 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
5237 {
74960c60
VP
5238 /* Loading of shared libraries might have changed breakpoint
5239 addresses. Make sure new breakpoints are inserted. */
a25a5a45 5240 if (stop_soon == NO_STOP_QUIETLY)
74960c60 5241 insert_breakpoints ();
64ce06e4 5242 resume (GDB_SIGNAL_0);
b0f4b84b
DJ
5243 prepare_to_wait (ecs);
5244 return;
5245 }
5246
5c09a2c5
PA
5247 /* But stop if we're attaching or setting up a remote
5248 connection. */
5249 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5250 || stop_soon == STOP_QUIETLY_REMOTE)
5251 {
5252 if (debug_infrun)
5253 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
22bcd14b 5254 stop_waiting (ecs);
5c09a2c5
PA
5255 return;
5256 }
5257
5258 internal_error (__FILE__, __LINE__,
5259 _("unhandled stop_soon: %d"), (int) stop_soon);
c5aa993b 5260
488f131b 5261 case TARGET_WAITKIND_SPURIOUS:
c65d6b55
PA
5262 if (handle_stop_requested (ecs))
5263 return;
00431a78 5264 context_switch (ecs);
64ce06e4 5265 resume (GDB_SIGNAL_0);
488f131b
JB
5266 prepare_to_wait (ecs);
5267 return;
c5aa993b 5268
65706a29 5269 case TARGET_WAITKIND_THREAD_CREATED:
c65d6b55
PA
5270 if (handle_stop_requested (ecs))
5271 return;
00431a78 5272 context_switch (ecs);
65706a29
PA
5273 if (!switch_back_to_stepped_thread (ecs))
5274 keep_going (ecs);
5275 return;
5276
488f131b 5277 case TARGET_WAITKIND_EXITED:
940c3c06 5278 case TARGET_WAITKIND_SIGNALLED:
fb66883a 5279 inferior_ptid = ecs->ptid;
5b6d1e4f 5280 set_current_inferior (find_inferior_ptid (ecs->target, ecs->ptid));
6c95b8df
PA
5281 set_current_program_space (current_inferior ()->pspace);
5282 handle_vfork_child_exec_or_exit (0);
223ffa71 5283 target_terminal::ours (); /* Must do this before mourn anyway. */
488f131b 5284
0c557179
SDJ
5285 /* Clearing any previous state of convenience variables. */
5286 clear_exit_convenience_vars ();
5287
940c3c06
PA
5288 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
5289 {
5290 /* Record the exit code in the convenience variable $_exitcode, so
5291 that the user can inspect this again later. */
5292 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5293 (LONGEST) ecs->ws.value.integer);
5294
5295 /* Also record this in the inferior itself. */
5296 current_inferior ()->has_exit_code = 1;
5297 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
8cf64490 5298
98eb56a4
PA
5299 /* Support the --return-child-result option. */
5300 return_child_result_value = ecs->ws.value.integer;
5301
76727919 5302 gdb::observers::exited.notify (ecs->ws.value.integer);
940c3c06
PA
5303 }
5304 else
0c557179 5305 {
00431a78 5306 struct gdbarch *gdbarch = current_inferior ()->gdbarch;
0c557179
SDJ
5307
5308 if (gdbarch_gdb_signal_to_target_p (gdbarch))
5309 {
5310 /* Set the value of the internal variable $_exitsignal,
5311 which holds the signal uncaught by the inferior. */
5312 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5313 gdbarch_gdb_signal_to_target (gdbarch,
5314 ecs->ws.value.sig));
5315 }
5316 else
5317 {
5318 /* We don't have access to the target's method used for
5319 converting between signal numbers (GDB's internal
5320 representation <-> target's representation).
5321 Therefore, we cannot do a good job at displaying this
5322 information to the user. It's better to just warn
5323 her about it (if infrun debugging is enabled), and
5324 give up. */
5325 if (debug_infrun)
5326 fprintf_filtered (gdb_stdlog, _("\
5327Cannot fill $_exitsignal with the correct signal number.\n"));
5328 }
5329
76727919 5330 gdb::observers::signal_exited.notify (ecs->ws.value.sig);
0c557179 5331 }
8cf64490 5332
488f131b 5333 gdb_flush (gdb_stdout);
bc1e6c81 5334 target_mourn_inferior (inferior_ptid);
488f131b 5335 stop_print_frame = 0;
22bcd14b 5336 stop_waiting (ecs);
488f131b 5337 return;
c5aa993b 5338
488f131b 5339 case TARGET_WAITKIND_FORKED:
deb3b17b 5340 case TARGET_WAITKIND_VFORKED:
e2d96639
YQ
5341 /* Check whether the inferior is displaced stepping. */
5342 {
00431a78 5343 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
ac7936df 5344 struct gdbarch *gdbarch = regcache->arch ();
e2d96639
YQ
5345
5346 /* If checking displaced stepping is supported, and thread
5347 ecs->ptid is displaced stepping. */
00431a78 5348 if (displaced_step_in_progress_thread (ecs->event_thread))
e2d96639
YQ
5349 {
5350 struct inferior *parent_inf
5b6d1e4f 5351 = find_inferior_ptid (ecs->target, ecs->ptid);
e2d96639
YQ
5352 struct regcache *child_regcache;
5353 CORE_ADDR parent_pc;
5354
d8d83535
SM
5355 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
5356 {
5357 struct displaced_step_inferior_state *displaced
5358 = get_displaced_stepping_state (parent_inf);
5359
5360 /* Restore scratch pad for child process. */
5361 displaced_step_restore (displaced, ecs->ws.value.related_pid);
5362 }
5363
e2d96639
YQ
5364 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5365 indicating that the displaced stepping of syscall instruction
5366 has been done. Perform cleanup for parent process here. Note
5367 that this operation also cleans up the child process for vfork,
5368 because their pages are shared. */
00431a78 5369 displaced_step_fixup (ecs->event_thread, GDB_SIGNAL_TRAP);
c2829269
PA
5370 /* Start a new step-over in another thread if there's one
5371 that needs it. */
5372 start_step_over ();
e2d96639 5373
e2d96639
YQ
5374 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5375 the child's PC is also within the scratchpad. Set the child's PC
5376 to the parent's PC value, which has already been fixed up.
5377 FIXME: we use the parent's aspace here, although we're touching
5378 the child, because the child hasn't been added to the inferior
5379 list yet at this point. */
5380
5381 child_regcache
5b6d1e4f
PA
5382 = get_thread_arch_aspace_regcache (parent_inf->process_target (),
5383 ecs->ws.value.related_pid,
e2d96639
YQ
5384 gdbarch,
5385 parent_inf->aspace);
5386 /* Read PC value of parent process. */
5387 parent_pc = regcache_read_pc (regcache);
5388
5389 if (debug_displaced)
5390 fprintf_unfiltered (gdb_stdlog,
5391 "displaced: write child pc from %s to %s\n",
5392 paddress (gdbarch,
5393 regcache_read_pc (child_regcache)),
5394 paddress (gdbarch, parent_pc));
5395
5396 regcache_write_pc (child_regcache, parent_pc);
5397 }
5398 }
5399
00431a78 5400 context_switch (ecs);
5a2901d9 5401
b242c3c2
PA
5402 /* Immediately detach breakpoints from the child before there's
5403 any chance of letting the user delete breakpoints from the
5404 breakpoint lists. If we don't do this early, it's easy to
5405 leave left over traps in the child, vis: "break foo; catch
5406 fork; c; <fork>; del; c; <child calls foo>". We only follow
5407 the fork on the last `continue', and by that time the
5408 breakpoint at "foo" is long gone from the breakpoint table.
5409 If we vforked, then we don't need to unpatch here, since both
5410 parent and child are sharing the same memory pages; we'll
5411 need to unpatch at follow/detach time instead to be certain
5412 that new breakpoints added between catchpoint hit time and
5413 vfork follow are detached. */
5414 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
5415 {
b242c3c2
PA
5416 /* This won't actually modify the breakpoint list, but will
5417 physically remove the breakpoints from the child. */
d80ee84f 5418 detach_breakpoints (ecs->ws.value.related_pid);
b242c3c2
PA
5419 }
5420
34b7e8a6 5421 delete_just_stopped_threads_single_step_breakpoints ();
d03285ec 5422
e58b0e63
PA
5423 /* In case the event is caught by a catchpoint, remember that
5424 the event is to be followed at the next resume of the thread,
5425 and not immediately. */
5426 ecs->event_thread->pending_follow = ecs->ws;
5427
f2ffa92b
PA
5428 ecs->event_thread->suspend.stop_pc
5429 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
675bf4cb 5430
16c381f0 5431 ecs->event_thread->control.stop_bpstat
a01bda52 5432 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5433 ecs->event_thread->suspend.stop_pc,
5434 ecs->event_thread, &ecs->ws);
675bf4cb 5435
c65d6b55
PA
5436 if (handle_stop_requested (ecs))
5437 return;
5438
ce12b012
PA
5439 /* If no catchpoint triggered for this, then keep going. Note
5440 that we're interested in knowing the bpstat actually causes a
5441 stop, not just if it may explain the signal. Software
5442 watchpoints, for example, always appear in the bpstat. */
5443 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5444 {
5ab2fbf1 5445 bool follow_child
3e43a32a 5446 = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63 5447
a493e3e2 5448 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
e58b0e63 5449
5b6d1e4f
PA
5450 process_stratum_target *targ
5451 = ecs->event_thread->inf->process_target ();
5452
5ab2fbf1 5453 bool should_resume = follow_fork ();
e58b0e63 5454
5b6d1e4f
PA
5455 /* Note that one of these may be an invalid pointer,
5456 depending on detach_fork. */
00431a78 5457 thread_info *parent = ecs->event_thread;
5b6d1e4f
PA
5458 thread_info *child
5459 = find_thread_ptid (targ, ecs->ws.value.related_pid);
6c95b8df 5460
a2077e25
PA
5461 /* At this point, the parent is marked running, and the
5462 child is marked stopped. */
5463
5464 /* If not resuming the parent, mark it stopped. */
5465 if (follow_child && !detach_fork && !non_stop && !sched_multi)
00431a78 5466 parent->set_running (false);
a2077e25
PA
5467
5468 /* If resuming the child, mark it running. */
5469 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
00431a78 5470 child->set_running (true);
a2077e25 5471
6c95b8df 5472 /* In non-stop mode, also resume the other branch. */
fbea99ea
PA
5473 if (!detach_fork && (non_stop
5474 || (sched_multi && target_is_non_stop_p ())))
6c95b8df
PA
5475 {
5476 if (follow_child)
5477 switch_to_thread (parent);
5478 else
5479 switch_to_thread (child);
5480
5481 ecs->event_thread = inferior_thread ();
5482 ecs->ptid = inferior_ptid;
5483 keep_going (ecs);
5484 }
5485
5486 if (follow_child)
5487 switch_to_thread (child);
5488 else
5489 switch_to_thread (parent);
5490
e58b0e63
PA
5491 ecs->event_thread = inferior_thread ();
5492 ecs->ptid = inferior_ptid;
5493
5494 if (should_resume)
5495 keep_going (ecs);
5496 else
22bcd14b 5497 stop_waiting (ecs);
04e68871
DJ
5498 return;
5499 }
94c57d6a
PA
5500 process_event_stop_test (ecs);
5501 return;
488f131b 5502
6c95b8df
PA
5503 case TARGET_WAITKIND_VFORK_DONE:
5504 /* Done with the shared memory region. Re-insert breakpoints in
5505 the parent, and keep going. */
5506
00431a78 5507 context_switch (ecs);
6c95b8df
PA
5508
5509 current_inferior ()->waiting_for_vfork_done = 0;
56710373 5510 current_inferior ()->pspace->breakpoints_not_allowed = 0;
c65d6b55
PA
5511
5512 if (handle_stop_requested (ecs))
5513 return;
5514
6c95b8df
PA
5515 /* This also takes care of reinserting breakpoints in the
5516 previously locked inferior. */
5517 keep_going (ecs);
5518 return;
5519
488f131b 5520 case TARGET_WAITKIND_EXECD:
488f131b 5521
cbd2b4e3
PA
5522 /* Note we can't read registers yet (the stop_pc), because we
5523 don't yet know the inferior's post-exec architecture.
5524 'stop_pc' is explicitly read below instead. */
00431a78 5525 switch_to_thread_no_regs (ecs->event_thread);
5a2901d9 5526
6c95b8df
PA
5527 /* Do whatever is necessary to the parent branch of the vfork. */
5528 handle_vfork_child_exec_or_exit (1);
5529
795e548f
PA
5530 /* This causes the eventpoints and symbol table to be reset.
5531 Must do this now, before trying to determine whether to
5532 stop. */
71b43ef8 5533 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
795e548f 5534
17d8546e
DB
5535 /* In follow_exec we may have deleted the original thread and
5536 created a new one. Make sure that the event thread is the
5537 execd thread for that case (this is a nop otherwise). */
5538 ecs->event_thread = inferior_thread ();
5539
f2ffa92b
PA
5540 ecs->event_thread->suspend.stop_pc
5541 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
ecdc3a72 5542
16c381f0 5543 ecs->event_thread->control.stop_bpstat
a01bda52 5544 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5545 ecs->event_thread->suspend.stop_pc,
5546 ecs->event_thread, &ecs->ws);
795e548f 5547
71b43ef8
PA
5548 /* Note that this may be referenced from inside
5549 bpstat_stop_status above, through inferior_has_execd. */
5550 xfree (ecs->ws.value.execd_pathname);
5551 ecs->ws.value.execd_pathname = NULL;
5552
c65d6b55
PA
5553 if (handle_stop_requested (ecs))
5554 return;
5555
04e68871 5556 /* If no catchpoint triggered for this, then keep going. */
ce12b012 5557 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5558 {
a493e3e2 5559 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
04e68871
DJ
5560 keep_going (ecs);
5561 return;
5562 }
94c57d6a
PA
5563 process_event_stop_test (ecs);
5564 return;
488f131b 5565
b4dc5ffa
MK
5566 /* Be careful not to try to gather much state about a thread
5567 that's in a syscall. It's frequently a losing proposition. */
488f131b 5568 case TARGET_WAITKIND_SYSCALL_ENTRY:
1777feb0 5569 /* Getting the current syscall number. */
94c57d6a
PA
5570 if (handle_syscall_event (ecs) == 0)
5571 process_event_stop_test (ecs);
5572 return;
c906108c 5573
488f131b
JB
5574 /* Before examining the threads further, step this thread to
5575 get it entirely out of the syscall. (We get notice of the
5576 event when the thread is just on the verge of exiting a
5577 syscall. Stepping one instruction seems to get it back
b4dc5ffa 5578 into user code.) */
488f131b 5579 case TARGET_WAITKIND_SYSCALL_RETURN:
94c57d6a
PA
5580 if (handle_syscall_event (ecs) == 0)
5581 process_event_stop_test (ecs);
5582 return;
c906108c 5583
488f131b 5584 case TARGET_WAITKIND_STOPPED:
4f5d7f63
PA
5585 handle_signal_stop (ecs);
5586 return;
c906108c 5587
b2175913
MS
5588 case TARGET_WAITKIND_NO_HISTORY:
5589 /* Reverse execution: target ran out of history info. */
eab402df 5590
d1988021 5591 /* Switch to the stopped thread. */
00431a78 5592 context_switch (ecs);
d1988021
MM
5593 if (debug_infrun)
5594 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
5595
34b7e8a6 5596 delete_just_stopped_threads_single_step_breakpoints ();
f2ffa92b
PA
5597 ecs->event_thread->suspend.stop_pc
5598 = regcache_read_pc (get_thread_regcache (inferior_thread ()));
c65d6b55
PA
5599
5600 if (handle_stop_requested (ecs))
5601 return;
5602
76727919 5603 gdb::observers::no_history.notify ();
22bcd14b 5604 stop_waiting (ecs);
b2175913 5605 return;
488f131b 5606 }
4f5d7f63
PA
5607}
5608
372316f1
PA
5609/* Restart threads back to what they were trying to do back when we
5610 paused them for an in-line step-over. The EVENT_THREAD thread is
5611 ignored. */
4d9d9d04
PA
5612
5613static void
372316f1
PA
5614restart_threads (struct thread_info *event_thread)
5615{
372316f1
PA
5616 /* In case the instruction just stepped spawned a new thread. */
5617 update_thread_list ();
5618
08036331 5619 for (thread_info *tp : all_non_exited_threads ())
372316f1 5620 {
f3f8ece4
PA
5621 switch_to_thread_no_regs (tp);
5622
372316f1
PA
5623 if (tp == event_thread)
5624 {
5625 if (debug_infrun)
5626 fprintf_unfiltered (gdb_stdlog,
5627 "infrun: restart threads: "
5628 "[%s] is event thread\n",
a068643d 5629 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5630 continue;
5631 }
5632
5633 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5634 {
5635 if (debug_infrun)
5636 fprintf_unfiltered (gdb_stdlog,
5637 "infrun: restart threads: "
5638 "[%s] not meant to be running\n",
a068643d 5639 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5640 continue;
5641 }
5642
5643 if (tp->resumed)
5644 {
5645 if (debug_infrun)
5646 fprintf_unfiltered (gdb_stdlog,
5647 "infrun: restart threads: [%s] resumed\n",
a068643d 5648 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5649 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
5650 continue;
5651 }
5652
5653 if (thread_is_in_step_over_chain (tp))
5654 {
5655 if (debug_infrun)
5656 fprintf_unfiltered (gdb_stdlog,
5657 "infrun: restart threads: "
5658 "[%s] needs step-over\n",
a068643d 5659 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5660 gdb_assert (!tp->resumed);
5661 continue;
5662 }
5663
5664
5665 if (tp->suspend.waitstatus_pending_p)
5666 {
5667 if (debug_infrun)
5668 fprintf_unfiltered (gdb_stdlog,
5669 "infrun: restart threads: "
5670 "[%s] has pending status\n",
a068643d 5671 target_pid_to_str (tp->ptid).c_str ());
719546c4 5672 tp->resumed = true;
372316f1
PA
5673 continue;
5674 }
5675
c65d6b55
PA
5676 gdb_assert (!tp->stop_requested);
5677
372316f1
PA
5678 /* If some thread needs to start a step-over at this point, it
5679 should still be in the step-over queue, and thus skipped
5680 above. */
5681 if (thread_still_needs_step_over (tp))
5682 {
5683 internal_error (__FILE__, __LINE__,
5684 "thread [%s] needs a step-over, but not in "
5685 "step-over queue\n",
a068643d 5686 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5687 }
5688
5689 if (currently_stepping (tp))
5690 {
5691 if (debug_infrun)
5692 fprintf_unfiltered (gdb_stdlog,
5693 "infrun: restart threads: [%s] was stepping\n",
a068643d 5694 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5695 keep_going_stepped_thread (tp);
5696 }
5697 else
5698 {
5699 struct execution_control_state ecss;
5700 struct execution_control_state *ecs = &ecss;
5701
5702 if (debug_infrun)
5703 fprintf_unfiltered (gdb_stdlog,
5704 "infrun: restart threads: [%s] continuing\n",
a068643d 5705 target_pid_to_str (tp->ptid).c_str ());
372316f1 5706 reset_ecs (ecs, tp);
00431a78 5707 switch_to_thread (tp);
372316f1
PA
5708 keep_going_pass_signal (ecs);
5709 }
5710 }
5711}
5712
5713/* Callback for iterate_over_threads. Find a resumed thread that has
5714 a pending waitstatus. */
5715
5716static int
5717resumed_thread_with_pending_status (struct thread_info *tp,
5718 void *arg)
5719{
5720 return (tp->resumed
5721 && tp->suspend.waitstatus_pending_p);
5722}
5723
5724/* Called when we get an event that may finish an in-line or
5725 out-of-line (displaced stepping) step-over started previously.
5726 Return true if the event is processed and we should go back to the
5727 event loop; false if the caller should continue processing the
5728 event. */
5729
5730static int
4d9d9d04
PA
5731finish_step_over (struct execution_control_state *ecs)
5732{
372316f1
PA
5733 int had_step_over_info;
5734
00431a78 5735 displaced_step_fixup (ecs->event_thread,
4d9d9d04
PA
5736 ecs->event_thread->suspend.stop_signal);
5737
372316f1
PA
5738 had_step_over_info = step_over_info_valid_p ();
5739
5740 if (had_step_over_info)
4d9d9d04
PA
5741 {
5742 /* If we're stepping over a breakpoint with all threads locked,
5743 then only the thread that was stepped should be reporting
5744 back an event. */
5745 gdb_assert (ecs->event_thread->control.trap_expected);
5746
c65d6b55 5747 clear_step_over_info ();
4d9d9d04
PA
5748 }
5749
fbea99ea 5750 if (!target_is_non_stop_p ())
372316f1 5751 return 0;
4d9d9d04
PA
5752
5753 /* Start a new step-over in another thread if there's one that
5754 needs it. */
5755 start_step_over ();
372316f1
PA
5756
5757 /* If we were stepping over a breakpoint before, and haven't started
5758 a new in-line step-over sequence, then restart all other threads
5759 (except the event thread). We can't do this in all-stop, as then
5760 e.g., we wouldn't be able to issue any other remote packet until
5761 these other threads stop. */
5762 if (had_step_over_info && !step_over_info_valid_p ())
5763 {
5764 struct thread_info *pending;
5765
5766 /* If we only have threads with pending statuses, the restart
5767 below won't restart any thread and so nothing re-inserts the
5768 breakpoint we just stepped over. But we need it inserted
5769 when we later process the pending events, otherwise if
5770 another thread has a pending event for this breakpoint too,
5771 we'd discard its event (because the breakpoint that
5772 originally caused the event was no longer inserted). */
00431a78 5773 context_switch (ecs);
372316f1
PA
5774 insert_breakpoints ();
5775
5776 restart_threads (ecs->event_thread);
5777
5778 /* If we have events pending, go through handle_inferior_event
5779 again, picking up a pending event at random. This avoids
5780 thread starvation. */
5781
5782 /* But not if we just stepped over a watchpoint in order to let
5783 the instruction execute so we can evaluate its expression.
5784 The set of watchpoints that triggered is recorded in the
5785 breakpoint objects themselves (see bp->watchpoint_triggered).
5786 If we processed another event first, that other event could
5787 clobber this info. */
5788 if (ecs->event_thread->stepping_over_watchpoint)
5789 return 0;
5790
5791 pending = iterate_over_threads (resumed_thread_with_pending_status,
5792 NULL);
5793 if (pending != NULL)
5794 {
5795 struct thread_info *tp = ecs->event_thread;
5796 struct regcache *regcache;
5797
5798 if (debug_infrun)
5799 {
5800 fprintf_unfiltered (gdb_stdlog,
5801 "infrun: found resumed threads with "
5802 "pending events, saving status\n");
5803 }
5804
5805 gdb_assert (pending != tp);
5806
5807 /* Record the event thread's event for later. */
5808 save_waitstatus (tp, &ecs->ws);
5809 /* This was cleared early, by handle_inferior_event. Set it
5810 so this pending event is considered by
5811 do_target_wait. */
719546c4 5812 tp->resumed = true;
372316f1
PA
5813
5814 gdb_assert (!tp->executing);
5815
00431a78 5816 regcache = get_thread_regcache (tp);
372316f1
PA
5817 tp->suspend.stop_pc = regcache_read_pc (regcache);
5818
5819 if (debug_infrun)
5820 {
5821 fprintf_unfiltered (gdb_stdlog,
5822 "infrun: saved stop_pc=%s for %s "
5823 "(currently_stepping=%d)\n",
5824 paddress (target_gdbarch (),
5825 tp->suspend.stop_pc),
a068643d 5826 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
5827 currently_stepping (tp));
5828 }
5829
5830 /* This in-line step-over finished; clear this so we won't
5831 start a new one. This is what handle_signal_stop would
5832 do, if we returned false. */
5833 tp->stepping_over_breakpoint = 0;
5834
5835 /* Wake up the event loop again. */
5836 mark_async_event_handler (infrun_async_inferior_event_token);
5837
5838 prepare_to_wait (ecs);
5839 return 1;
5840 }
5841 }
5842
5843 return 0;
4d9d9d04
PA
5844}
5845
4f5d7f63
PA
5846/* Come here when the program has stopped with a signal. */
5847
5848static void
5849handle_signal_stop (struct execution_control_state *ecs)
5850{
5851 struct frame_info *frame;
5852 struct gdbarch *gdbarch;
5853 int stopped_by_watchpoint;
5854 enum stop_kind stop_soon;
5855 int random_signal;
c906108c 5856
f0407826
DE
5857 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
5858
c65d6b55
PA
5859 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
5860
f0407826
DE
5861 /* Do we need to clean up the state of a thread that has
5862 completed a displaced single-step? (Doing so usually affects
5863 the PC, so do it here, before we set stop_pc.) */
372316f1
PA
5864 if (finish_step_over (ecs))
5865 return;
f0407826
DE
5866
5867 /* If we either finished a single-step or hit a breakpoint, but
5868 the user wanted this thread to be stopped, pretend we got a
5869 SIG0 (generic unsignaled stop). */
5870 if (ecs->event_thread->stop_requested
5871 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
5872 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
237fc4c9 5873
f2ffa92b
PA
5874 ecs->event_thread->suspend.stop_pc
5875 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
488f131b 5876
527159b7 5877 if (debug_infrun)
237fc4c9 5878 {
00431a78 5879 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
b926417a 5880 struct gdbarch *reg_gdbarch = regcache->arch ();
7f82dfc7 5881
f3f8ece4 5882 switch_to_thread (ecs->event_thread);
5af949e3
UW
5883
5884 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
b926417a 5885 paddress (reg_gdbarch,
f2ffa92b 5886 ecs->event_thread->suspend.stop_pc));
d92524f1 5887 if (target_stopped_by_watchpoint ())
237fc4c9
PA
5888 {
5889 CORE_ADDR addr;
abbb1732 5890
237fc4c9
PA
5891 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
5892
8b88a78e 5893 if (target_stopped_data_address (current_top_target (), &addr))
237fc4c9 5894 fprintf_unfiltered (gdb_stdlog,
5af949e3 5895 "infrun: stopped data address = %s\n",
b926417a 5896 paddress (reg_gdbarch, addr));
237fc4c9
PA
5897 else
5898 fprintf_unfiltered (gdb_stdlog,
5899 "infrun: (no data address available)\n");
5900 }
5901 }
527159b7 5902
36fa8042
PA
5903 /* This is originated from start_remote(), start_inferior() and
5904 shared libraries hook functions. */
00431a78 5905 stop_soon = get_inferior_stop_soon (ecs);
36fa8042
PA
5906 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
5907 {
00431a78 5908 context_switch (ecs);
36fa8042
PA
5909 if (debug_infrun)
5910 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
5911 stop_print_frame = 1;
22bcd14b 5912 stop_waiting (ecs);
36fa8042
PA
5913 return;
5914 }
5915
36fa8042
PA
5916 /* This originates from attach_command(). We need to overwrite
5917 the stop_signal here, because some kernels don't ignore a
5918 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
5919 See more comments in inferior.h. On the other hand, if we
5920 get a non-SIGSTOP, report it to the user - assume the backend
5921 will handle the SIGSTOP if it should show up later.
5922
5923 Also consider that the attach is complete when we see a
5924 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
5925 target extended-remote report it instead of a SIGSTOP
5926 (e.g. gdbserver). We already rely on SIGTRAP being our
5927 signal, so this is no exception.
5928
5929 Also consider that the attach is complete when we see a
5930 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
5931 the target to stop all threads of the inferior, in case the
5932 low level attach operation doesn't stop them implicitly. If
5933 they weren't stopped implicitly, then the stub will report a
5934 GDB_SIGNAL_0, meaning: stopped for no particular reason
5935 other than GDB's request. */
5936 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5937 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
5938 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5939 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
5940 {
5941 stop_print_frame = 1;
22bcd14b 5942 stop_waiting (ecs);
36fa8042
PA
5943 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5944 return;
5945 }
5946
488f131b 5947 /* See if something interesting happened to the non-current thread. If
b40c7d58 5948 so, then switch to that thread. */
d7e15655 5949 if (ecs->ptid != inferior_ptid)
488f131b 5950 {
527159b7 5951 if (debug_infrun)
8a9de0e4 5952 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
527159b7 5953
00431a78 5954 context_switch (ecs);
c5aa993b 5955
9a4105ab 5956 if (deprecated_context_hook)
00431a78 5957 deprecated_context_hook (ecs->event_thread->global_num);
488f131b 5958 }
c906108c 5959
568d6575
UW
5960 /* At this point, get hold of the now-current thread's frame. */
5961 frame = get_current_frame ();
5962 gdbarch = get_frame_arch (frame);
5963
2adfaa28 5964 /* Pull the single step breakpoints out of the target. */
af48d08f 5965 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
488f131b 5966 {
af48d08f 5967 struct regcache *regcache;
af48d08f 5968 CORE_ADDR pc;
2adfaa28 5969
00431a78 5970 regcache = get_thread_regcache (ecs->event_thread);
8b86c959
YQ
5971 const address_space *aspace = regcache->aspace ();
5972
af48d08f 5973 pc = regcache_read_pc (regcache);
34b7e8a6 5974
af48d08f
PA
5975 /* However, before doing so, if this single-step breakpoint was
5976 actually for another thread, set this thread up for moving
5977 past it. */
5978 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
5979 aspace, pc))
5980 {
5981 if (single_step_breakpoint_inserted_here_p (aspace, pc))
2adfaa28
PA
5982 {
5983 if (debug_infrun)
5984 {
5985 fprintf_unfiltered (gdb_stdlog,
af48d08f 5986 "infrun: [%s] hit another thread's "
34b7e8a6 5987 "single-step breakpoint\n",
a068643d 5988 target_pid_to_str (ecs->ptid).c_str ());
2adfaa28 5989 }
af48d08f
PA
5990 ecs->hit_singlestep_breakpoint = 1;
5991 }
5992 }
5993 else
5994 {
5995 if (debug_infrun)
5996 {
5997 fprintf_unfiltered (gdb_stdlog,
5998 "infrun: [%s] hit its "
5999 "single-step breakpoint\n",
a068643d 6000 target_pid_to_str (ecs->ptid).c_str ());
2adfaa28
PA
6001 }
6002 }
488f131b 6003 }
af48d08f 6004 delete_just_stopped_threads_single_step_breakpoints ();
c906108c 6005
963f9c80
PA
6006 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6007 && ecs->event_thread->control.trap_expected
6008 && ecs->event_thread->stepping_over_watchpoint)
d983da9c
DJ
6009 stopped_by_watchpoint = 0;
6010 else
6011 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
6012
6013 /* If necessary, step over this watchpoint. We'll be back to display
6014 it in a moment. */
6015 if (stopped_by_watchpoint
d92524f1 6016 && (target_have_steppable_watchpoint
568d6575 6017 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
488f131b 6018 {
488f131b
JB
6019 /* At this point, we are stopped at an instruction which has
6020 attempted to write to a piece of memory under control of
6021 a watchpoint. The instruction hasn't actually executed
6022 yet. If we were to evaluate the watchpoint expression
6023 now, we would get the old value, and therefore no change
6024 would seem to have occurred.
6025
6026 In order to make watchpoints work `right', we really need
6027 to complete the memory write, and then evaluate the
d983da9c
DJ
6028 watchpoint expression. We do this by single-stepping the
6029 target.
6030
7f89fd65 6031 It may not be necessary to disable the watchpoint to step over
d983da9c
DJ
6032 it. For example, the PA can (with some kernel cooperation)
6033 single step over a watchpoint without disabling the watchpoint.
6034
6035 It is far more common to need to disable a watchpoint to step
6036 the inferior over it. If we have non-steppable watchpoints,
6037 we must disable the current watchpoint; it's simplest to
963f9c80
PA
6038 disable all watchpoints.
6039
6040 Any breakpoint at PC must also be stepped over -- if there's
6041 one, it will have already triggered before the watchpoint
6042 triggered, and we either already reported it to the user, or
6043 it didn't cause a stop and we called keep_going. In either
6044 case, if there was a breakpoint at PC, we must be trying to
6045 step past it. */
6046 ecs->event_thread->stepping_over_watchpoint = 1;
6047 keep_going (ecs);
488f131b
JB
6048 return;
6049 }
6050
4e1c45ea 6051 ecs->event_thread->stepping_over_breakpoint = 0;
963f9c80 6052 ecs->event_thread->stepping_over_watchpoint = 0;
16c381f0
JK
6053 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
6054 ecs->event_thread->control.stop_step = 0;
488f131b 6055 stop_print_frame = 1;
488f131b 6056 stopped_by_random_signal = 0;
ddfe970e 6057 bpstat stop_chain = NULL;
488f131b 6058
edb3359d
DJ
6059 /* Hide inlined functions starting here, unless we just performed stepi or
6060 nexti. After stepi and nexti, always show the innermost frame (not any
6061 inline function call sites). */
16c381f0 6062 if (ecs->event_thread->control.step_range_end != 1)
0574c78f 6063 {
00431a78
PA
6064 const address_space *aspace
6065 = get_thread_regcache (ecs->event_thread)->aspace ();
0574c78f
GB
6066
6067 /* skip_inline_frames is expensive, so we avoid it if we can
6068 determine that the address is one where functions cannot have
6069 been inlined. This improves performance with inferiors that
6070 load a lot of shared libraries, because the solib event
6071 breakpoint is defined as the address of a function (i.e. not
6072 inline). Note that we have to check the previous PC as well
6073 as the current one to catch cases when we have just
6074 single-stepped off a breakpoint prior to reinstating it.
6075 Note that we're assuming that the code we single-step to is
6076 not inline, but that's not definitive: there's nothing
6077 preventing the event breakpoint function from containing
6078 inlined code, and the single-step ending up there. If the
6079 user had set a breakpoint on that inlined code, the missing
6080 skip_inline_frames call would break things. Fortunately
6081 that's an extremely unlikely scenario. */
f2ffa92b
PA
6082 if (!pc_at_non_inline_function (aspace,
6083 ecs->event_thread->suspend.stop_pc,
6084 &ecs->ws)
a210c238
MR
6085 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6086 && ecs->event_thread->control.trap_expected
6087 && pc_at_non_inline_function (aspace,
6088 ecs->event_thread->prev_pc,
09ac7c10 6089 &ecs->ws)))
1c5a993e 6090 {
f2ffa92b
PA
6091 stop_chain = build_bpstat_chain (aspace,
6092 ecs->event_thread->suspend.stop_pc,
6093 &ecs->ws);
00431a78 6094 skip_inline_frames (ecs->event_thread, stop_chain);
1c5a993e
MR
6095
6096 /* Re-fetch current thread's frame in case that invalidated
6097 the frame cache. */
6098 frame = get_current_frame ();
6099 gdbarch = get_frame_arch (frame);
6100 }
0574c78f 6101 }
edb3359d 6102
a493e3e2 6103 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
16c381f0 6104 && ecs->event_thread->control.trap_expected
568d6575 6105 && gdbarch_single_step_through_delay_p (gdbarch)
4e1c45ea 6106 && currently_stepping (ecs->event_thread))
3352ef37 6107 {
b50d7442 6108 /* We're trying to step off a breakpoint. Turns out that we're
3352ef37 6109 also on an instruction that needs to be stepped multiple
1777feb0 6110 times before it's been fully executing. E.g., architectures
3352ef37
AC
6111 with a delay slot. It needs to be stepped twice, once for
6112 the instruction and once for the delay slot. */
6113 int step_through_delay
568d6575 6114 = gdbarch_single_step_through_delay (gdbarch, frame);
abbb1732 6115
527159b7 6116 if (debug_infrun && step_through_delay)
8a9de0e4 6117 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
16c381f0
JK
6118 if (ecs->event_thread->control.step_range_end == 0
6119 && step_through_delay)
3352ef37
AC
6120 {
6121 /* The user issued a continue when stopped at a breakpoint.
6122 Set up for another trap and get out of here. */
4e1c45ea 6123 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
6124 keep_going (ecs);
6125 return;
6126 }
6127 else if (step_through_delay)
6128 {
6129 /* The user issued a step when stopped at a breakpoint.
6130 Maybe we should stop, maybe we should not - the delay
6131 slot *might* correspond to a line of source. In any
ca67fcb8
VP
6132 case, don't decide that here, just set
6133 ecs->stepping_over_breakpoint, making sure we
6134 single-step again before breakpoints are re-inserted. */
4e1c45ea 6135 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
6136 }
6137 }
6138
ab04a2af
TT
6139 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6140 handles this event. */
6141 ecs->event_thread->control.stop_bpstat
a01bda52 6142 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
6143 ecs->event_thread->suspend.stop_pc,
6144 ecs->event_thread, &ecs->ws, stop_chain);
db82e815 6145
ab04a2af
TT
6146 /* Following in case break condition called a
6147 function. */
6148 stop_print_frame = 1;
73dd234f 6149
ab04a2af
TT
6150 /* This is where we handle "moribund" watchpoints. Unlike
6151 software breakpoints traps, hardware watchpoint traps are
6152 always distinguishable from random traps. If no high-level
6153 watchpoint is associated with the reported stop data address
6154 anymore, then the bpstat does not explain the signal ---
6155 simply make sure to ignore it if `stopped_by_watchpoint' is
6156 set. */
6157
6158 if (debug_infrun
6159 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
47591c29 6160 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
427cd150 6161 GDB_SIGNAL_TRAP)
ab04a2af
TT
6162 && stopped_by_watchpoint)
6163 fprintf_unfiltered (gdb_stdlog,
6164 "infrun: no user watchpoint explains "
6165 "watchpoint SIGTRAP, ignoring\n");
73dd234f 6166
bac7d97b 6167 /* NOTE: cagney/2003-03-29: These checks for a random signal
ab04a2af
TT
6168 at one stage in the past included checks for an inferior
6169 function call's call dummy's return breakpoint. The original
6170 comment, that went with the test, read:
03cebad2 6171
ab04a2af
TT
6172 ``End of a stack dummy. Some systems (e.g. Sony news) give
6173 another signal besides SIGTRAP, so check here as well as
6174 above.''
73dd234f 6175
ab04a2af
TT
6176 If someone ever tries to get call dummys on a
6177 non-executable stack to work (where the target would stop
6178 with something like a SIGSEGV), then those tests might need
6179 to be re-instated. Given, however, that the tests were only
6180 enabled when momentary breakpoints were not being used, I
6181 suspect that it won't be the case.
488f131b 6182
ab04a2af
TT
6183 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6184 be necessary for call dummies on a non-executable stack on
6185 SPARC. */
488f131b 6186
bac7d97b 6187 /* See if the breakpoints module can explain the signal. */
47591c29
PA
6188 random_signal
6189 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6190 ecs->event_thread->suspend.stop_signal);
bac7d97b 6191
1cf4d951
PA
6192 /* Maybe this was a trap for a software breakpoint that has since
6193 been removed. */
6194 if (random_signal && target_stopped_by_sw_breakpoint ())
6195 {
5133a315
LM
6196 if (gdbarch_program_breakpoint_here_p (gdbarch,
6197 ecs->event_thread->suspend.stop_pc))
1cf4d951
PA
6198 {
6199 struct regcache *regcache;
6200 int decr_pc;
6201
6202 /* Re-adjust PC to what the program would see if GDB was not
6203 debugging it. */
00431a78 6204 regcache = get_thread_regcache (ecs->event_thread);
527a273a 6205 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
1cf4d951
PA
6206 if (decr_pc != 0)
6207 {
07036511
TT
6208 gdb::optional<scoped_restore_tmpl<int>>
6209 restore_operation_disable;
1cf4d951
PA
6210
6211 if (record_full_is_used ())
07036511
TT
6212 restore_operation_disable.emplace
6213 (record_full_gdb_operation_disable_set ());
1cf4d951 6214
f2ffa92b
PA
6215 regcache_write_pc (regcache,
6216 ecs->event_thread->suspend.stop_pc + decr_pc);
1cf4d951
PA
6217 }
6218 }
6219 else
6220 {
6221 /* A delayed software breakpoint event. Ignore the trap. */
6222 if (debug_infrun)
6223 fprintf_unfiltered (gdb_stdlog,
6224 "infrun: delayed software breakpoint "
6225 "trap, ignoring\n");
6226 random_signal = 0;
6227 }
6228 }
6229
6230 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6231 has since been removed. */
6232 if (random_signal && target_stopped_by_hw_breakpoint ())
6233 {
6234 /* A delayed hardware breakpoint event. Ignore the trap. */
6235 if (debug_infrun)
6236 fprintf_unfiltered (gdb_stdlog,
6237 "infrun: delayed hardware breakpoint/watchpoint "
6238 "trap, ignoring\n");
6239 random_signal = 0;
6240 }
6241
bac7d97b
PA
6242 /* If not, perhaps stepping/nexting can. */
6243 if (random_signal)
6244 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6245 && currently_stepping (ecs->event_thread));
ab04a2af 6246
2adfaa28
PA
6247 /* Perhaps the thread hit a single-step breakpoint of _another_
6248 thread. Single-step breakpoints are transparent to the
6249 breakpoints module. */
6250 if (random_signal)
6251 random_signal = !ecs->hit_singlestep_breakpoint;
6252
bac7d97b
PA
6253 /* No? Perhaps we got a moribund watchpoint. */
6254 if (random_signal)
6255 random_signal = !stopped_by_watchpoint;
ab04a2af 6256
c65d6b55
PA
6257 /* Always stop if the user explicitly requested this thread to
6258 remain stopped. */
6259 if (ecs->event_thread->stop_requested)
6260 {
6261 random_signal = 1;
6262 if (debug_infrun)
6263 fprintf_unfiltered (gdb_stdlog, "infrun: user-requested stop\n");
6264 }
6265
488f131b
JB
6266 /* For the program's own signals, act according to
6267 the signal handling tables. */
6268
ce12b012 6269 if (random_signal)
488f131b
JB
6270 {
6271 /* Signal not for debugging purposes. */
5b6d1e4f 6272 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
c9737c08 6273 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
488f131b 6274
527159b7 6275 if (debug_infrun)
c9737c08
PA
6276 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
6277 gdb_signal_to_symbol_string (stop_signal));
527159b7 6278
488f131b
JB
6279 stopped_by_random_signal = 1;
6280
252fbfc8
PA
6281 /* Always stop on signals if we're either just gaining control
6282 of the program, or the user explicitly requested this thread
6283 to remain stopped. */
d6b48e9c 6284 if (stop_soon != NO_STOP_QUIETLY
252fbfc8 6285 || ecs->event_thread->stop_requested
24291992 6286 || (!inf->detaching
16c381f0 6287 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
488f131b 6288 {
22bcd14b 6289 stop_waiting (ecs);
488f131b
JB
6290 return;
6291 }
b57bacec
PA
6292
6293 /* Notify observers the signal has "handle print" set. Note we
6294 returned early above if stopping; normal_stop handles the
6295 printing in that case. */
6296 if (signal_print[ecs->event_thread->suspend.stop_signal])
6297 {
6298 /* The signal table tells us to print about this signal. */
223ffa71 6299 target_terminal::ours_for_output ();
76727919 6300 gdb::observers::signal_received.notify (ecs->event_thread->suspend.stop_signal);
223ffa71 6301 target_terminal::inferior ();
b57bacec 6302 }
488f131b
JB
6303
6304 /* Clear the signal if it should not be passed. */
16c381f0 6305 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
a493e3e2 6306 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
488f131b 6307
f2ffa92b 6308 if (ecs->event_thread->prev_pc == ecs->event_thread->suspend.stop_pc
16c381f0 6309 && ecs->event_thread->control.trap_expected
8358c15c 6310 && ecs->event_thread->control.step_resume_breakpoint == NULL)
68f53502
AC
6311 {
6312 /* We were just starting a new sequence, attempting to
6313 single-step off of a breakpoint and expecting a SIGTRAP.
237fc4c9 6314 Instead this signal arrives. This signal will take us out
68f53502
AC
6315 of the stepping range so GDB needs to remember to, when
6316 the signal handler returns, resume stepping off that
6317 breakpoint. */
6318 /* To simplify things, "continue" is forced to use the same
6319 code paths as single-step - set a breakpoint at the
6320 signal return address and then, once hit, step off that
6321 breakpoint. */
237fc4c9
PA
6322 if (debug_infrun)
6323 fprintf_unfiltered (gdb_stdlog,
6324 "infrun: signal arrived while stepping over "
6325 "breakpoint\n");
d3169d93 6326
2c03e5be 6327 insert_hp_step_resume_breakpoint_at_frame (frame);
4e1c45ea 6328 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6329 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6330 ecs->event_thread->control.trap_expected = 0;
d137e6dc
PA
6331
6332 /* If we were nexting/stepping some other thread, switch to
6333 it, so that we don't continue it, losing control. */
6334 if (!switch_back_to_stepped_thread (ecs))
6335 keep_going (ecs);
9d799f85 6336 return;
68f53502 6337 }
9d799f85 6338
e5f8a7cc 6339 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
f2ffa92b
PA
6340 && (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6341 ecs->event_thread)
e5f8a7cc 6342 || ecs->event_thread->control.step_range_end == 1)
edb3359d 6343 && frame_id_eq (get_stack_frame_id (frame),
16c381f0 6344 ecs->event_thread->control.step_stack_frame_id)
8358c15c 6345 && ecs->event_thread->control.step_resume_breakpoint == NULL)
d303a6c7
AC
6346 {
6347 /* The inferior is about to take a signal that will take it
6348 out of the single step range. Set a breakpoint at the
6349 current PC (which is presumably where the signal handler
6350 will eventually return) and then allow the inferior to
6351 run free.
6352
6353 Note that this is only needed for a signal delivered
6354 while in the single-step range. Nested signals aren't a
6355 problem as they eventually all return. */
237fc4c9
PA
6356 if (debug_infrun)
6357 fprintf_unfiltered (gdb_stdlog,
6358 "infrun: signal may take us out of "
6359 "single-step range\n");
6360
372316f1 6361 clear_step_over_info ();
2c03e5be 6362 insert_hp_step_resume_breakpoint_at_frame (frame);
e5f8a7cc 6363 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6364 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6365 ecs->event_thread->control.trap_expected = 0;
9d799f85
AC
6366 keep_going (ecs);
6367 return;
d303a6c7 6368 }
9d799f85 6369
85102364 6370 /* Note: step_resume_breakpoint may be non-NULL. This occurs
9d799f85
AC
6371 when either there's a nested signal, or when there's a
6372 pending signal enabled just as the signal handler returns
6373 (leaving the inferior at the step-resume-breakpoint without
6374 actually executing it). Either way continue until the
6375 breakpoint is really hit. */
c447ac0b
PA
6376
6377 if (!switch_back_to_stepped_thread (ecs))
6378 {
6379 if (debug_infrun)
6380 fprintf_unfiltered (gdb_stdlog,
6381 "infrun: random signal, keep going\n");
6382
6383 keep_going (ecs);
6384 }
6385 return;
488f131b 6386 }
94c57d6a
PA
6387
6388 process_event_stop_test (ecs);
6389}
6390
6391/* Come here when we've got some debug event / signal we can explain
6392 (IOW, not a random signal), and test whether it should cause a
6393 stop, or whether we should resume the inferior (transparently).
6394 E.g., could be a breakpoint whose condition evaluates false; we
6395 could be still stepping within the line; etc. */
6396
6397static void
6398process_event_stop_test (struct execution_control_state *ecs)
6399{
6400 struct symtab_and_line stop_pc_sal;
6401 struct frame_info *frame;
6402 struct gdbarch *gdbarch;
cdaa5b73
PA
6403 CORE_ADDR jmp_buf_pc;
6404 struct bpstat_what what;
94c57d6a 6405
cdaa5b73 6406 /* Handle cases caused by hitting a breakpoint. */
611c83ae 6407
cdaa5b73
PA
6408 frame = get_current_frame ();
6409 gdbarch = get_frame_arch (frame);
fcf3daef 6410
cdaa5b73 6411 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
611c83ae 6412
cdaa5b73
PA
6413 if (what.call_dummy)
6414 {
6415 stop_stack_dummy = what.call_dummy;
6416 }
186c406b 6417
243a9253
PA
6418 /* A few breakpoint types have callbacks associated (e.g.,
6419 bp_jit_event). Run them now. */
6420 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
6421
cdaa5b73
PA
6422 /* If we hit an internal event that triggers symbol changes, the
6423 current frame will be invalidated within bpstat_what (e.g., if we
6424 hit an internal solib event). Re-fetch it. */
6425 frame = get_current_frame ();
6426 gdbarch = get_frame_arch (frame);
e2e4d78b 6427
cdaa5b73
PA
6428 switch (what.main_action)
6429 {
6430 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
6431 /* If we hit the breakpoint at longjmp while stepping, we
6432 install a momentary breakpoint at the target of the
6433 jmp_buf. */
186c406b 6434
cdaa5b73
PA
6435 if (debug_infrun)
6436 fprintf_unfiltered (gdb_stdlog,
6437 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
186c406b 6438
cdaa5b73 6439 ecs->event_thread->stepping_over_breakpoint = 1;
611c83ae 6440
cdaa5b73
PA
6441 if (what.is_longjmp)
6442 {
6443 struct value *arg_value;
6444
6445 /* If we set the longjmp breakpoint via a SystemTap probe,
6446 then use it to extract the arguments. The destination PC
6447 is the third argument to the probe. */
6448 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6449 if (arg_value)
8fa0c4f8
AA
6450 {
6451 jmp_buf_pc = value_as_address (arg_value);
6452 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6453 }
cdaa5b73
PA
6454 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6455 || !gdbarch_get_longjmp_target (gdbarch,
6456 frame, &jmp_buf_pc))
e2e4d78b 6457 {
cdaa5b73
PA
6458 if (debug_infrun)
6459 fprintf_unfiltered (gdb_stdlog,
6460 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
6461 "(!gdbarch_get_longjmp_target)\n");
6462 keep_going (ecs);
6463 return;
e2e4d78b 6464 }
e2e4d78b 6465
cdaa5b73
PA
6466 /* Insert a breakpoint at resume address. */
6467 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6468 }
6469 else
6470 check_exception_resume (ecs, frame);
6471 keep_going (ecs);
6472 return;
e81a37f7 6473
cdaa5b73
PA
6474 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6475 {
6476 struct frame_info *init_frame;
e81a37f7 6477
cdaa5b73 6478 /* There are several cases to consider.
c906108c 6479
cdaa5b73
PA
6480 1. The initiating frame no longer exists. In this case we
6481 must stop, because the exception or longjmp has gone too
6482 far.
2c03e5be 6483
cdaa5b73
PA
6484 2. The initiating frame exists, and is the same as the
6485 current frame. We stop, because the exception or longjmp
6486 has been caught.
2c03e5be 6487
cdaa5b73
PA
6488 3. The initiating frame exists and is different from the
6489 current frame. This means the exception or longjmp has
6490 been caught beneath the initiating frame, so keep going.
c906108c 6491
cdaa5b73
PA
6492 4. longjmp breakpoint has been placed just to protect
6493 against stale dummy frames and user is not interested in
6494 stopping around longjmps. */
c5aa993b 6495
cdaa5b73
PA
6496 if (debug_infrun)
6497 fprintf_unfiltered (gdb_stdlog,
6498 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
c5aa993b 6499
cdaa5b73
PA
6500 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6501 != NULL);
6502 delete_exception_resume_breakpoint (ecs->event_thread);
c5aa993b 6503
cdaa5b73
PA
6504 if (what.is_longjmp)
6505 {
b67a2c6f 6506 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
c5aa993b 6507
cdaa5b73 6508 if (!frame_id_p (ecs->event_thread->initiating_frame))
e5ef252a 6509 {
cdaa5b73
PA
6510 /* Case 4. */
6511 keep_going (ecs);
6512 return;
e5ef252a 6513 }
cdaa5b73 6514 }
c5aa993b 6515
cdaa5b73 6516 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
527159b7 6517
cdaa5b73
PA
6518 if (init_frame)
6519 {
6520 struct frame_id current_id
6521 = get_frame_id (get_current_frame ());
6522 if (frame_id_eq (current_id,
6523 ecs->event_thread->initiating_frame))
6524 {
6525 /* Case 2. Fall through. */
6526 }
6527 else
6528 {
6529 /* Case 3. */
6530 keep_going (ecs);
6531 return;
6532 }
68f53502 6533 }
488f131b 6534
cdaa5b73
PA
6535 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6536 exists. */
6537 delete_step_resume_breakpoint (ecs->event_thread);
e5ef252a 6538
bdc36728 6539 end_stepping_range (ecs);
cdaa5b73
PA
6540 }
6541 return;
e5ef252a 6542
cdaa5b73
PA
6543 case BPSTAT_WHAT_SINGLE:
6544 if (debug_infrun)
6545 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
6546 ecs->event_thread->stepping_over_breakpoint = 1;
6547 /* Still need to check other stuff, at least the case where we
6548 are stepping and step out of the right range. */
6549 break;
e5ef252a 6550
cdaa5b73
PA
6551 case BPSTAT_WHAT_STEP_RESUME:
6552 if (debug_infrun)
6553 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
e5ef252a 6554
cdaa5b73
PA
6555 delete_step_resume_breakpoint (ecs->event_thread);
6556 if (ecs->event_thread->control.proceed_to_finish
6557 && execution_direction == EXEC_REVERSE)
6558 {
6559 struct thread_info *tp = ecs->event_thread;
6560
6561 /* We are finishing a function in reverse, and just hit the
6562 step-resume breakpoint at the start address of the
6563 function, and we're almost there -- just need to back up
6564 by one more single-step, which should take us back to the
6565 function call. */
6566 tp->control.step_range_start = tp->control.step_range_end = 1;
6567 keep_going (ecs);
e5ef252a 6568 return;
cdaa5b73
PA
6569 }
6570 fill_in_stop_func (gdbarch, ecs);
f2ffa92b 6571 if (ecs->event_thread->suspend.stop_pc == ecs->stop_func_start
cdaa5b73
PA
6572 && execution_direction == EXEC_REVERSE)
6573 {
6574 /* We are stepping over a function call in reverse, and just
6575 hit the step-resume breakpoint at the start address of
6576 the function. Go back to single-stepping, which should
6577 take us back to the function call. */
6578 ecs->event_thread->stepping_over_breakpoint = 1;
6579 keep_going (ecs);
6580 return;
6581 }
6582 break;
e5ef252a 6583
cdaa5b73
PA
6584 case BPSTAT_WHAT_STOP_NOISY:
6585 if (debug_infrun)
6586 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
6587 stop_print_frame = 1;
e5ef252a 6588
99619bea
PA
6589 /* Assume the thread stopped for a breapoint. We'll still check
6590 whether a/the breakpoint is there when the thread is next
6591 resumed. */
6592 ecs->event_thread->stepping_over_breakpoint = 1;
e5ef252a 6593
22bcd14b 6594 stop_waiting (ecs);
cdaa5b73 6595 return;
e5ef252a 6596
cdaa5b73
PA
6597 case BPSTAT_WHAT_STOP_SILENT:
6598 if (debug_infrun)
6599 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
6600 stop_print_frame = 0;
e5ef252a 6601
99619bea
PA
6602 /* Assume the thread stopped for a breapoint. We'll still check
6603 whether a/the breakpoint is there when the thread is next
6604 resumed. */
6605 ecs->event_thread->stepping_over_breakpoint = 1;
22bcd14b 6606 stop_waiting (ecs);
cdaa5b73
PA
6607 return;
6608
6609 case BPSTAT_WHAT_HP_STEP_RESUME:
6610 if (debug_infrun)
6611 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
6612
6613 delete_step_resume_breakpoint (ecs->event_thread);
6614 if (ecs->event_thread->step_after_step_resume_breakpoint)
6615 {
6616 /* Back when the step-resume breakpoint was inserted, we
6617 were trying to single-step off a breakpoint. Go back to
6618 doing that. */
6619 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6620 ecs->event_thread->stepping_over_breakpoint = 1;
6621 keep_going (ecs);
6622 return;
e5ef252a 6623 }
cdaa5b73
PA
6624 break;
6625
6626 case BPSTAT_WHAT_KEEP_CHECKING:
6627 break;
e5ef252a 6628 }
c906108c 6629
af48d08f
PA
6630 /* If we stepped a permanent breakpoint and we had a high priority
6631 step-resume breakpoint for the address we stepped, but we didn't
6632 hit it, then we must have stepped into the signal handler. The
6633 step-resume was only necessary to catch the case of _not_
6634 stepping into the handler, so delete it, and fall through to
6635 checking whether the step finished. */
6636 if (ecs->event_thread->stepped_breakpoint)
6637 {
6638 struct breakpoint *sr_bp
6639 = ecs->event_thread->control.step_resume_breakpoint;
6640
8d707a12
PA
6641 if (sr_bp != NULL
6642 && sr_bp->loc->permanent
af48d08f
PA
6643 && sr_bp->type == bp_hp_step_resume
6644 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6645 {
6646 if (debug_infrun)
6647 fprintf_unfiltered (gdb_stdlog,
6648 "infrun: stepped permanent breakpoint, stopped in "
6649 "handler\n");
6650 delete_step_resume_breakpoint (ecs->event_thread);
6651 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6652 }
6653 }
6654
cdaa5b73
PA
6655 /* We come here if we hit a breakpoint but should not stop for it.
6656 Possibly we also were stepping and should stop for that. So fall
6657 through and test for stepping. But, if not stepping, do not
6658 stop. */
c906108c 6659
a7212384
UW
6660 /* In all-stop mode, if we're currently stepping but have stopped in
6661 some other thread, we need to switch back to the stepped thread. */
c447ac0b
PA
6662 if (switch_back_to_stepped_thread (ecs))
6663 return;
776f04fa 6664
8358c15c 6665 if (ecs->event_thread->control.step_resume_breakpoint)
488f131b 6666 {
527159b7 6667 if (debug_infrun)
d3169d93
DJ
6668 fprintf_unfiltered (gdb_stdlog,
6669 "infrun: step-resume breakpoint is inserted\n");
527159b7 6670
488f131b
JB
6671 /* Having a step-resume breakpoint overrides anything
6672 else having to do with stepping commands until
6673 that breakpoint is reached. */
488f131b
JB
6674 keep_going (ecs);
6675 return;
6676 }
c5aa993b 6677
16c381f0 6678 if (ecs->event_thread->control.step_range_end == 0)
488f131b 6679 {
527159b7 6680 if (debug_infrun)
8a9de0e4 6681 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
488f131b 6682 /* Likewise if we aren't even stepping. */
488f131b
JB
6683 keep_going (ecs);
6684 return;
6685 }
c5aa993b 6686
4b7703ad
JB
6687 /* Re-fetch current thread's frame in case the code above caused
6688 the frame cache to be re-initialized, making our FRAME variable
6689 a dangling pointer. */
6690 frame = get_current_frame ();
628fe4e4 6691 gdbarch = get_frame_arch (frame);
7e324e48 6692 fill_in_stop_func (gdbarch, ecs);
4b7703ad 6693
488f131b 6694 /* If stepping through a line, keep going if still within it.
c906108c 6695
488f131b
JB
6696 Note that step_range_end is the address of the first instruction
6697 beyond the step range, and NOT the address of the last instruction
31410e84
MS
6698 within it!
6699
6700 Note also that during reverse execution, we may be stepping
6701 through a function epilogue and therefore must detect when
6702 the current-frame changes in the middle of a line. */
6703
f2ffa92b
PA
6704 if (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6705 ecs->event_thread)
31410e84 6706 && (execution_direction != EXEC_REVERSE
388a8562 6707 || frame_id_eq (get_frame_id (frame),
16c381f0 6708 ecs->event_thread->control.step_frame_id)))
488f131b 6709 {
527159b7 6710 if (debug_infrun)
5af949e3
UW
6711 fprintf_unfiltered
6712 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
16c381f0
JK
6713 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6714 paddress (gdbarch, ecs->event_thread->control.step_range_end));
b2175913 6715
c1e36e3e
PA
6716 /* Tentatively re-enable range stepping; `resume' disables it if
6717 necessary (e.g., if we're stepping over a breakpoint or we
6718 have software watchpoints). */
6719 ecs->event_thread->control.may_range_step = 1;
6720
b2175913
MS
6721 /* When stepping backward, stop at beginning of line range
6722 (unless it's the function entry point, in which case
6723 keep going back to the call point). */
f2ffa92b 6724 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
16c381f0 6725 if (stop_pc == ecs->event_thread->control.step_range_start
b2175913
MS
6726 && stop_pc != ecs->stop_func_start
6727 && execution_direction == EXEC_REVERSE)
bdc36728 6728 end_stepping_range (ecs);
b2175913
MS
6729 else
6730 keep_going (ecs);
6731
488f131b
JB
6732 return;
6733 }
c5aa993b 6734
488f131b 6735 /* We stepped out of the stepping range. */
c906108c 6736
488f131b 6737 /* If we are stepping at the source level and entered the runtime
388a8562
MS
6738 loader dynamic symbol resolution code...
6739
6740 EXEC_FORWARD: we keep on single stepping until we exit the run
6741 time loader code and reach the callee's address.
6742
6743 EXEC_REVERSE: we've already executed the callee (backward), and
6744 the runtime loader code is handled just like any other
6745 undebuggable function call. Now we need only keep stepping
6746 backward through the trampoline code, and that's handled further
6747 down, so there is nothing for us to do here. */
6748
6749 if (execution_direction != EXEC_REVERSE
16c381f0 6750 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
f2ffa92b 6751 && in_solib_dynsym_resolve_code (ecs->event_thread->suspend.stop_pc))
488f131b 6752 {
4c8c40e6 6753 CORE_ADDR pc_after_resolver =
f2ffa92b
PA
6754 gdbarch_skip_solib_resolver (gdbarch,
6755 ecs->event_thread->suspend.stop_pc);
c906108c 6756
527159b7 6757 if (debug_infrun)
3e43a32a
MS
6758 fprintf_unfiltered (gdb_stdlog,
6759 "infrun: stepped into dynsym resolve code\n");
527159b7 6760
488f131b
JB
6761 if (pc_after_resolver)
6762 {
6763 /* Set up a step-resume breakpoint at the address
6764 indicated by SKIP_SOLIB_RESOLVER. */
51abb421 6765 symtab_and_line sr_sal;
488f131b 6766 sr_sal.pc = pc_after_resolver;
6c95b8df 6767 sr_sal.pspace = get_frame_program_space (frame);
488f131b 6768
a6d9a66e
UW
6769 insert_step_resume_breakpoint_at_sal (gdbarch,
6770 sr_sal, null_frame_id);
c5aa993b 6771 }
c906108c 6772
488f131b
JB
6773 keep_going (ecs);
6774 return;
6775 }
c906108c 6776
1d509aa6
MM
6777 /* Step through an indirect branch thunk. */
6778 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
f2ffa92b
PA
6779 && gdbarch_in_indirect_branch_thunk (gdbarch,
6780 ecs->event_thread->suspend.stop_pc))
1d509aa6
MM
6781 {
6782 if (debug_infrun)
6783 fprintf_unfiltered (gdb_stdlog,
6784 "infrun: stepped into indirect branch thunk\n");
6785 keep_going (ecs);
6786 return;
6787 }
6788
16c381f0
JK
6789 if (ecs->event_thread->control.step_range_end != 1
6790 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6791 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
568d6575 6792 && get_frame_type (frame) == SIGTRAMP_FRAME)
488f131b 6793 {
527159b7 6794 if (debug_infrun)
3e43a32a
MS
6795 fprintf_unfiltered (gdb_stdlog,
6796 "infrun: stepped into signal trampoline\n");
42edda50 6797 /* The inferior, while doing a "step" or "next", has ended up in
8fb3e588
AC
6798 a signal trampoline (either by a signal being delivered or by
6799 the signal handler returning). Just single-step until the
6800 inferior leaves the trampoline (either by calling the handler
6801 or returning). */
488f131b
JB
6802 keep_going (ecs);
6803 return;
6804 }
c906108c 6805
14132e89
MR
6806 /* If we're in the return path from a shared library trampoline,
6807 we want to proceed through the trampoline when stepping. */
6808 /* macro/2012-04-25: This needs to come before the subroutine
6809 call check below as on some targets return trampolines look
6810 like subroutine calls (MIPS16 return thunks). */
6811 if (gdbarch_in_solib_return_trampoline (gdbarch,
f2ffa92b
PA
6812 ecs->event_thread->suspend.stop_pc,
6813 ecs->stop_func_name)
14132e89
MR
6814 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
6815 {
6816 /* Determine where this trampoline returns. */
f2ffa92b
PA
6817 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6818 CORE_ADDR real_stop_pc
6819 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
14132e89
MR
6820
6821 if (debug_infrun)
6822 fprintf_unfiltered (gdb_stdlog,
6823 "infrun: stepped into solib return tramp\n");
6824
6825 /* Only proceed through if we know where it's going. */
6826 if (real_stop_pc)
6827 {
6828 /* And put the step-breakpoint there and go until there. */
51abb421 6829 symtab_and_line sr_sal;
14132e89
MR
6830 sr_sal.pc = real_stop_pc;
6831 sr_sal.section = find_pc_overlay (sr_sal.pc);
6832 sr_sal.pspace = get_frame_program_space (frame);
6833
6834 /* Do not specify what the fp should be when we stop since
6835 on some machines the prologue is where the new fp value
6836 is established. */
6837 insert_step_resume_breakpoint_at_sal (gdbarch,
6838 sr_sal, null_frame_id);
6839
6840 /* Restart without fiddling with the step ranges or
6841 other state. */
6842 keep_going (ecs);
6843 return;
6844 }
6845 }
6846
c17eaafe
DJ
6847 /* Check for subroutine calls. The check for the current frame
6848 equalling the step ID is not necessary - the check of the
6849 previous frame's ID is sufficient - but it is a common case and
6850 cheaper than checking the previous frame's ID.
14e60db5
DJ
6851
6852 NOTE: frame_id_eq will never report two invalid frame IDs as
6853 being equal, so to get into this block, both the current and
6854 previous frame must have valid frame IDs. */
005ca36a
JB
6855 /* The outer_frame_id check is a heuristic to detect stepping
6856 through startup code. If we step over an instruction which
6857 sets the stack pointer from an invalid value to a valid value,
6858 we may detect that as a subroutine call from the mythical
6859 "outermost" function. This could be fixed by marking
6860 outermost frames as !stack_p,code_p,special_p. Then the
6861 initial outermost frame, before sp was valid, would
ce6cca6d 6862 have code_addr == &_start. See the comment in frame_id_eq
005ca36a 6863 for more. */
edb3359d 6864 if (!frame_id_eq (get_stack_frame_id (frame),
16c381f0 6865 ecs->event_thread->control.step_stack_frame_id)
005ca36a 6866 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
16c381f0
JK
6867 ecs->event_thread->control.step_stack_frame_id)
6868 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
005ca36a 6869 outer_frame_id)
885eeb5b 6870 || (ecs->event_thread->control.step_start_function
f2ffa92b 6871 != find_pc_function (ecs->event_thread->suspend.stop_pc)))))
488f131b 6872 {
f2ffa92b 6873 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
95918acb 6874 CORE_ADDR real_stop_pc;
8fb3e588 6875
527159b7 6876 if (debug_infrun)
8a9de0e4 6877 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
527159b7 6878
b7a084be 6879 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
95918acb
AC
6880 {
6881 /* I presume that step_over_calls is only 0 when we're
6882 supposed to be stepping at the assembly language level
6883 ("stepi"). Just stop. */
388a8562 6884 /* And this works the same backward as frontward. MVS */
bdc36728 6885 end_stepping_range (ecs);
95918acb
AC
6886 return;
6887 }
8fb3e588 6888
388a8562
MS
6889 /* Reverse stepping through solib trampolines. */
6890
6891 if (execution_direction == EXEC_REVERSE
16c381f0 6892 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
388a8562
MS
6893 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6894 || (ecs->stop_func_start == 0
6895 && in_solib_dynsym_resolve_code (stop_pc))))
6896 {
6897 /* Any solib trampoline code can be handled in reverse
6898 by simply continuing to single-step. We have already
6899 executed the solib function (backwards), and a few
6900 steps will take us back through the trampoline to the
6901 caller. */
6902 keep_going (ecs);
6903 return;
6904 }
6905
16c381f0 6906 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
8567c30f 6907 {
b2175913
MS
6908 /* We're doing a "next".
6909
6910 Normal (forward) execution: set a breakpoint at the
6911 callee's return address (the address at which the caller
6912 will resume).
6913
6914 Reverse (backward) execution. set the step-resume
6915 breakpoint at the start of the function that we just
6916 stepped into (backwards), and continue to there. When we
6130d0b7 6917 get there, we'll need to single-step back to the caller. */
b2175913
MS
6918
6919 if (execution_direction == EXEC_REVERSE)
6920 {
acf9414f
JK
6921 /* If we're already at the start of the function, we've either
6922 just stepped backward into a single instruction function,
6923 or stepped back out of a signal handler to the first instruction
6924 of the function. Just keep going, which will single-step back
6925 to the caller. */
58c48e72 6926 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
acf9414f 6927 {
acf9414f 6928 /* Normal function call return (static or dynamic). */
51abb421 6929 symtab_and_line sr_sal;
acf9414f
JK
6930 sr_sal.pc = ecs->stop_func_start;
6931 sr_sal.pspace = get_frame_program_space (frame);
6932 insert_step_resume_breakpoint_at_sal (gdbarch,
6933 sr_sal, null_frame_id);
6934 }
b2175913
MS
6935 }
6936 else
568d6575 6937 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6938
8567c30f
AC
6939 keep_going (ecs);
6940 return;
6941 }
a53c66de 6942
95918acb 6943 /* If we are in a function call trampoline (a stub between the
8fb3e588
AC
6944 calling routine and the real function), locate the real
6945 function. That's what tells us (a) whether we want to step
6946 into it at all, and (b) what prologue we want to run to the
6947 end of, if we do step into it. */
568d6575 6948 real_stop_pc = skip_language_trampoline (frame, stop_pc);
95918acb 6949 if (real_stop_pc == 0)
568d6575 6950 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
95918acb
AC
6951 if (real_stop_pc != 0)
6952 ecs->stop_func_start = real_stop_pc;
8fb3e588 6953
db5f024e 6954 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
1b2bfbb9 6955 {
51abb421 6956 symtab_and_line sr_sal;
1b2bfbb9 6957 sr_sal.pc = ecs->stop_func_start;
6c95b8df 6958 sr_sal.pspace = get_frame_program_space (frame);
1b2bfbb9 6959
a6d9a66e
UW
6960 insert_step_resume_breakpoint_at_sal (gdbarch,
6961 sr_sal, null_frame_id);
8fb3e588
AC
6962 keep_going (ecs);
6963 return;
1b2bfbb9
RC
6964 }
6965
95918acb 6966 /* If we have line number information for the function we are
1bfeeb0f
JL
6967 thinking of stepping into and the function isn't on the skip
6968 list, step into it.
95918acb 6969
8fb3e588
AC
6970 If there are several symtabs at that PC (e.g. with include
6971 files), just want to know whether *any* of them have line
6972 numbers. find_pc_line handles this. */
95918acb
AC
6973 {
6974 struct symtab_and_line tmp_sal;
8fb3e588 6975
95918acb 6976 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
2b914b52 6977 if (tmp_sal.line != 0
85817405 6978 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4a4c04f1
BE
6979 tmp_sal)
6980 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
95918acb 6981 {
b2175913 6982 if (execution_direction == EXEC_REVERSE)
568d6575 6983 handle_step_into_function_backward (gdbarch, ecs);
b2175913 6984 else
568d6575 6985 handle_step_into_function (gdbarch, ecs);
95918acb
AC
6986 return;
6987 }
6988 }
6989
6990 /* If we have no line number and the step-stop-if-no-debug is
8fb3e588
AC
6991 set, we stop the step so that the user has a chance to switch
6992 in assembly mode. */
16c381f0 6993 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
078130d0 6994 && step_stop_if_no_debug)
95918acb 6995 {
bdc36728 6996 end_stepping_range (ecs);
95918acb
AC
6997 return;
6998 }
6999
b2175913
MS
7000 if (execution_direction == EXEC_REVERSE)
7001 {
acf9414f
JK
7002 /* If we're already at the start of the function, we've either just
7003 stepped backward into a single instruction function without line
7004 number info, or stepped back out of a signal handler to the first
7005 instruction of the function without line number info. Just keep
7006 going, which will single-step back to the caller. */
7007 if (ecs->stop_func_start != stop_pc)
7008 {
7009 /* Set a breakpoint at callee's start address.
7010 From there we can step once and be back in the caller. */
51abb421 7011 symtab_and_line sr_sal;
acf9414f
JK
7012 sr_sal.pc = ecs->stop_func_start;
7013 sr_sal.pspace = get_frame_program_space (frame);
7014 insert_step_resume_breakpoint_at_sal (gdbarch,
7015 sr_sal, null_frame_id);
7016 }
b2175913
MS
7017 }
7018 else
7019 /* Set a breakpoint at callee's return address (the address
7020 at which the caller will resume). */
568d6575 7021 insert_step_resume_breakpoint_at_caller (frame);
b2175913 7022
95918acb 7023 keep_going (ecs);
488f131b 7024 return;
488f131b 7025 }
c906108c 7026
fdd654f3
MS
7027 /* Reverse stepping through solib trampolines. */
7028
7029 if (execution_direction == EXEC_REVERSE
16c381f0 7030 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
fdd654f3 7031 {
f2ffa92b
PA
7032 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
7033
fdd654f3
MS
7034 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7035 || (ecs->stop_func_start == 0
7036 && in_solib_dynsym_resolve_code (stop_pc)))
7037 {
7038 /* Any solib trampoline code can be handled in reverse
7039 by simply continuing to single-step. We have already
7040 executed the solib function (backwards), and a few
7041 steps will take us back through the trampoline to the
7042 caller. */
7043 keep_going (ecs);
7044 return;
7045 }
7046 else if (in_solib_dynsym_resolve_code (stop_pc))
7047 {
7048 /* Stepped backward into the solib dynsym resolver.
7049 Set a breakpoint at its start and continue, then
7050 one more step will take us out. */
51abb421 7051 symtab_and_line sr_sal;
fdd654f3 7052 sr_sal.pc = ecs->stop_func_start;
9d1807c3 7053 sr_sal.pspace = get_frame_program_space (frame);
fdd654f3
MS
7054 insert_step_resume_breakpoint_at_sal (gdbarch,
7055 sr_sal, null_frame_id);
7056 keep_going (ecs);
7057 return;
7058 }
7059 }
7060
8c95582d
AB
7061 /* This always returns the sal for the inner-most frame when we are in a
7062 stack of inlined frames, even if GDB actually believes that it is in a
7063 more outer frame. This is checked for below by calls to
7064 inline_skipped_frames. */
f2ffa92b 7065 stop_pc_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
7ed0fe66 7066
1b2bfbb9
RC
7067 /* NOTE: tausq/2004-05-24: This if block used to be done before all
7068 the trampoline processing logic, however, there are some trampolines
7069 that have no names, so we should do trampoline handling first. */
16c381f0 7070 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7ed0fe66 7071 && ecs->stop_func_name == NULL
2afb61aa 7072 && stop_pc_sal.line == 0)
1b2bfbb9 7073 {
527159b7 7074 if (debug_infrun)
3e43a32a
MS
7075 fprintf_unfiltered (gdb_stdlog,
7076 "infrun: stepped into undebuggable function\n");
527159b7 7077
1b2bfbb9 7078 /* The inferior just stepped into, or returned to, an
7ed0fe66
DJ
7079 undebuggable function (where there is no debugging information
7080 and no line number corresponding to the address where the
1b2bfbb9
RC
7081 inferior stopped). Since we want to skip this kind of code,
7082 we keep going until the inferior returns from this
14e60db5
DJ
7083 function - unless the user has asked us not to (via
7084 set step-mode) or we no longer know how to get back
7085 to the call site. */
7086 if (step_stop_if_no_debug
c7ce8faa 7087 || !frame_id_p (frame_unwind_caller_id (frame)))
1b2bfbb9
RC
7088 {
7089 /* If we have no line number and the step-stop-if-no-debug
7090 is set, we stop the step so that the user has a chance to
7091 switch in assembly mode. */
bdc36728 7092 end_stepping_range (ecs);
1b2bfbb9
RC
7093 return;
7094 }
7095 else
7096 {
7097 /* Set a breakpoint at callee's return address (the address
7098 at which the caller will resume). */
568d6575 7099 insert_step_resume_breakpoint_at_caller (frame);
1b2bfbb9
RC
7100 keep_going (ecs);
7101 return;
7102 }
7103 }
7104
16c381f0 7105 if (ecs->event_thread->control.step_range_end == 1)
1b2bfbb9
RC
7106 {
7107 /* It is stepi or nexti. We always want to stop stepping after
7108 one instruction. */
527159b7 7109 if (debug_infrun)
8a9de0e4 7110 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
bdc36728 7111 end_stepping_range (ecs);
1b2bfbb9
RC
7112 return;
7113 }
7114
2afb61aa 7115 if (stop_pc_sal.line == 0)
488f131b
JB
7116 {
7117 /* We have no line number information. That means to stop
7118 stepping (does this always happen right after one instruction,
7119 when we do "s" in a function with no line numbers,
7120 or can this happen as a result of a return or longjmp?). */
527159b7 7121 if (debug_infrun)
8a9de0e4 7122 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
bdc36728 7123 end_stepping_range (ecs);
488f131b
JB
7124 return;
7125 }
c906108c 7126
edb3359d
DJ
7127 /* Look for "calls" to inlined functions, part one. If the inline
7128 frame machinery detected some skipped call sites, we have entered
7129 a new inline function. */
7130
7131 if (frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 7132 ecs->event_thread->control.step_frame_id)
00431a78 7133 && inline_skipped_frames (ecs->event_thread))
edb3359d 7134 {
edb3359d
DJ
7135 if (debug_infrun)
7136 fprintf_unfiltered (gdb_stdlog,
7137 "infrun: stepped into inlined function\n");
7138
51abb421 7139 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
edb3359d 7140
16c381f0 7141 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
edb3359d
DJ
7142 {
7143 /* For "step", we're going to stop. But if the call site
7144 for this inlined function is on the same source line as
7145 we were previously stepping, go down into the function
7146 first. Otherwise stop at the call site. */
7147
7148 if (call_sal.line == ecs->event_thread->current_line
7149 && call_sal.symtab == ecs->event_thread->current_symtab)
4a4c04f1
BE
7150 {
7151 step_into_inline_frame (ecs->event_thread);
7152 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
7153 {
7154 keep_going (ecs);
7155 return;
7156 }
7157 }
edb3359d 7158
bdc36728 7159 end_stepping_range (ecs);
edb3359d
DJ
7160 return;
7161 }
7162 else
7163 {
7164 /* For "next", we should stop at the call site if it is on a
7165 different source line. Otherwise continue through the
7166 inlined function. */
7167 if (call_sal.line == ecs->event_thread->current_line
7168 && call_sal.symtab == ecs->event_thread->current_symtab)
7169 keep_going (ecs);
7170 else
bdc36728 7171 end_stepping_range (ecs);
edb3359d
DJ
7172 return;
7173 }
7174 }
7175
7176 /* Look for "calls" to inlined functions, part two. If we are still
7177 in the same real function we were stepping through, but we have
7178 to go further up to find the exact frame ID, we are stepping
7179 through a more inlined call beyond its call site. */
7180
7181 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
7182 && !frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 7183 ecs->event_thread->control.step_frame_id)
edb3359d 7184 && stepped_in_from (get_current_frame (),
16c381f0 7185 ecs->event_thread->control.step_frame_id))
edb3359d
DJ
7186 {
7187 if (debug_infrun)
7188 fprintf_unfiltered (gdb_stdlog,
7189 "infrun: stepping through inlined function\n");
7190
4a4c04f1
BE
7191 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
7192 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
edb3359d
DJ
7193 keep_going (ecs);
7194 else
bdc36728 7195 end_stepping_range (ecs);
edb3359d
DJ
7196 return;
7197 }
7198
8c95582d 7199 bool refresh_step_info = true;
f2ffa92b 7200 if ((ecs->event_thread->suspend.stop_pc == stop_pc_sal.pc)
4e1c45ea
PA
7201 && (ecs->event_thread->current_line != stop_pc_sal.line
7202 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
488f131b 7203 {
8c95582d
AB
7204 if (stop_pc_sal.is_stmt)
7205 {
7206 /* We are at the start of a different line. So stop. Note that
7207 we don't stop if we step into the middle of a different line.
7208 That is said to make things like for (;;) statements work
7209 better. */
7210 if (debug_infrun)
7211 fprintf_unfiltered (gdb_stdlog,
7212 "infrun: stepped to a different line\n");
7213 end_stepping_range (ecs);
7214 return;
7215 }
7216 else if (frame_id_eq (get_frame_id (get_current_frame ()),
7217 ecs->event_thread->control.step_frame_id))
7218 {
7219 /* We are at the start of a different line, however, this line is
7220 not marked as a statement, and we have not changed frame. We
7221 ignore this line table entry, and continue stepping forward,
7222 looking for a better place to stop. */
7223 refresh_step_info = false;
7224 if (debug_infrun)
7225 fprintf_unfiltered (gdb_stdlog,
7226 "infrun: stepped to a different line, but "
7227 "it's not the start of a statement\n");
7228 }
488f131b 7229 }
c906108c 7230
488f131b 7231 /* We aren't done stepping.
c906108c 7232
488f131b
JB
7233 Optimize by setting the stepping range to the line.
7234 (We might not be in the original line, but if we entered a
7235 new line in mid-statement, we continue stepping. This makes
8c95582d
AB
7236 things like for(;;) statements work better.)
7237
7238 If we entered a SAL that indicates a non-statement line table entry,
7239 then we update the stepping range, but we don't update the step info,
7240 which includes things like the line number we are stepping away from.
7241 This means we will stop when we find a line table entry that is marked
7242 as is-statement, even if it matches the non-statement one we just
7243 stepped into. */
c906108c 7244
16c381f0
JK
7245 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
7246 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
c1e36e3e 7247 ecs->event_thread->control.may_range_step = 1;
8c95582d
AB
7248 if (refresh_step_info)
7249 set_step_info (ecs->event_thread, frame, stop_pc_sal);
488f131b 7250
527159b7 7251 if (debug_infrun)
8a9de0e4 7252 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
488f131b 7253 keep_going (ecs);
104c1213
JM
7254}
7255
c447ac0b
PA
7256/* In all-stop mode, if we're currently stepping but have stopped in
7257 some other thread, we may need to switch back to the stepped
7258 thread. Returns true we set the inferior running, false if we left
7259 it stopped (and the event needs further processing). */
7260
7261static int
7262switch_back_to_stepped_thread (struct execution_control_state *ecs)
7263{
fbea99ea 7264 if (!target_is_non_stop_p ())
c447ac0b 7265 {
99619bea
PA
7266 struct thread_info *stepping_thread;
7267
7268 /* If any thread is blocked on some internal breakpoint, and we
7269 simply need to step over that breakpoint to get it going
7270 again, do that first. */
7271
7272 /* However, if we see an event for the stepping thread, then we
7273 know all other threads have been moved past their breakpoints
7274 already. Let the caller check whether the step is finished,
7275 etc., before deciding to move it past a breakpoint. */
7276 if (ecs->event_thread->control.step_range_end != 0)
7277 return 0;
7278
7279 /* Check if the current thread is blocked on an incomplete
7280 step-over, interrupted by a random signal. */
7281 if (ecs->event_thread->control.trap_expected
7282 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
c447ac0b 7283 {
99619bea
PA
7284 if (debug_infrun)
7285 {
7286 fprintf_unfiltered (gdb_stdlog,
7287 "infrun: need to finish step-over of [%s]\n",
a068643d 7288 target_pid_to_str (ecs->event_thread->ptid).c_str ());
99619bea
PA
7289 }
7290 keep_going (ecs);
7291 return 1;
7292 }
2adfaa28 7293
99619bea
PA
7294 /* Check if the current thread is blocked by a single-step
7295 breakpoint of another thread. */
7296 if (ecs->hit_singlestep_breakpoint)
7297 {
7298 if (debug_infrun)
7299 {
7300 fprintf_unfiltered (gdb_stdlog,
7301 "infrun: need to step [%s] over single-step "
7302 "breakpoint\n",
a068643d 7303 target_pid_to_str (ecs->ptid).c_str ());
99619bea
PA
7304 }
7305 keep_going (ecs);
7306 return 1;
7307 }
7308
4d9d9d04
PA
7309 /* If this thread needs yet another step-over (e.g., stepping
7310 through a delay slot), do it first before moving on to
7311 another thread. */
7312 if (thread_still_needs_step_over (ecs->event_thread))
7313 {
7314 if (debug_infrun)
7315 {
7316 fprintf_unfiltered (gdb_stdlog,
7317 "infrun: thread [%s] still needs step-over\n",
a068643d 7318 target_pid_to_str (ecs->event_thread->ptid).c_str ());
4d9d9d04
PA
7319 }
7320 keep_going (ecs);
7321 return 1;
7322 }
70509625 7323
483805cf
PA
7324 /* If scheduler locking applies even if not stepping, there's no
7325 need to walk over threads. Above we've checked whether the
7326 current thread is stepping. If some other thread not the
7327 event thread is stepping, then it must be that scheduler
7328 locking is not in effect. */
856e7dd6 7329 if (schedlock_applies (ecs->event_thread))
483805cf
PA
7330 return 0;
7331
4d9d9d04
PA
7332 /* Otherwise, we no longer expect a trap in the current thread.
7333 Clear the trap_expected flag before switching back -- this is
7334 what keep_going does as well, if we call it. */
7335 ecs->event_thread->control.trap_expected = 0;
7336
7337 /* Likewise, clear the signal if it should not be passed. */
7338 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7339 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7340
7341 /* Do all pending step-overs before actually proceeding with
483805cf 7342 step/next/etc. */
4d9d9d04
PA
7343 if (start_step_over ())
7344 {
7345 prepare_to_wait (ecs);
7346 return 1;
7347 }
7348
7349 /* Look for the stepping/nexting thread. */
483805cf 7350 stepping_thread = NULL;
4d9d9d04 7351
08036331 7352 for (thread_info *tp : all_non_exited_threads ())
483805cf 7353 {
f3f8ece4
PA
7354 switch_to_thread_no_regs (tp);
7355
fbea99ea
PA
7356 /* Ignore threads of processes the caller is not
7357 resuming. */
483805cf 7358 if (!sched_multi
5b6d1e4f
PA
7359 && (tp->inf->process_target () != ecs->target
7360 || tp->inf->pid != ecs->ptid.pid ()))
483805cf
PA
7361 continue;
7362
7363 /* When stepping over a breakpoint, we lock all threads
7364 except the one that needs to move past the breakpoint.
7365 If a non-event thread has this set, the "incomplete
7366 step-over" check above should have caught it earlier. */
372316f1
PA
7367 if (tp->control.trap_expected)
7368 {
7369 internal_error (__FILE__, __LINE__,
7370 "[%s] has inconsistent state: "
7371 "trap_expected=%d\n",
a068643d 7372 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
7373 tp->control.trap_expected);
7374 }
483805cf
PA
7375
7376 /* Did we find the stepping thread? */
7377 if (tp->control.step_range_end)
7378 {
7379 /* Yep. There should only one though. */
7380 gdb_assert (stepping_thread == NULL);
7381
7382 /* The event thread is handled at the top, before we
7383 enter this loop. */
7384 gdb_assert (tp != ecs->event_thread);
7385
7386 /* If some thread other than the event thread is
7387 stepping, then scheduler locking can't be in effect,
7388 otherwise we wouldn't have resumed the current event
7389 thread in the first place. */
856e7dd6 7390 gdb_assert (!schedlock_applies (tp));
483805cf
PA
7391
7392 stepping_thread = tp;
7393 }
99619bea
PA
7394 }
7395
483805cf 7396 if (stepping_thread != NULL)
99619bea 7397 {
c447ac0b
PA
7398 if (debug_infrun)
7399 fprintf_unfiltered (gdb_stdlog,
7400 "infrun: switching back to stepped thread\n");
7401
2ac7589c
PA
7402 if (keep_going_stepped_thread (stepping_thread))
7403 {
7404 prepare_to_wait (ecs);
7405 return 1;
7406 }
7407 }
f3f8ece4
PA
7408
7409 switch_to_thread (ecs->event_thread);
2ac7589c 7410 }
2adfaa28 7411
2ac7589c
PA
7412 return 0;
7413}
2adfaa28 7414
2ac7589c
PA
7415/* Set a previously stepped thread back to stepping. Returns true on
7416 success, false if the resume is not possible (e.g., the thread
7417 vanished). */
7418
7419static int
7420keep_going_stepped_thread (struct thread_info *tp)
7421{
7422 struct frame_info *frame;
2ac7589c
PA
7423 struct execution_control_state ecss;
7424 struct execution_control_state *ecs = &ecss;
2adfaa28 7425
2ac7589c
PA
7426 /* If the stepping thread exited, then don't try to switch back and
7427 resume it, which could fail in several different ways depending
7428 on the target. Instead, just keep going.
2adfaa28 7429
2ac7589c
PA
7430 We can find a stepping dead thread in the thread list in two
7431 cases:
2adfaa28 7432
2ac7589c
PA
7433 - The target supports thread exit events, and when the target
7434 tries to delete the thread from the thread list, inferior_ptid
7435 pointed at the exiting thread. In such case, calling
7436 delete_thread does not really remove the thread from the list;
7437 instead, the thread is left listed, with 'exited' state.
64ce06e4 7438
2ac7589c
PA
7439 - The target's debug interface does not support thread exit
7440 events, and so we have no idea whatsoever if the previously
7441 stepping thread is still alive. For that reason, we need to
7442 synchronously query the target now. */
2adfaa28 7443
00431a78 7444 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
2ac7589c
PA
7445 {
7446 if (debug_infrun)
7447 fprintf_unfiltered (gdb_stdlog,
7448 "infrun: not resuming previously "
7449 "stepped thread, it has vanished\n");
7450
00431a78 7451 delete_thread (tp);
2ac7589c 7452 return 0;
c447ac0b 7453 }
2ac7589c
PA
7454
7455 if (debug_infrun)
7456 fprintf_unfiltered (gdb_stdlog,
7457 "infrun: resuming previously stepped thread\n");
7458
7459 reset_ecs (ecs, tp);
00431a78 7460 switch_to_thread (tp);
2ac7589c 7461
f2ffa92b 7462 tp->suspend.stop_pc = regcache_read_pc (get_thread_regcache (tp));
2ac7589c 7463 frame = get_current_frame ();
2ac7589c
PA
7464
7465 /* If the PC of the thread we were trying to single-step has
7466 changed, then that thread has trapped or been signaled, but the
7467 event has not been reported to GDB yet. Re-poll the target
7468 looking for this particular thread's event (i.e. temporarily
7469 enable schedlock) by:
7470
7471 - setting a break at the current PC
7472 - resuming that particular thread, only (by setting trap
7473 expected)
7474
7475 This prevents us continuously moving the single-step breakpoint
7476 forward, one instruction at a time, overstepping. */
7477
f2ffa92b 7478 if (tp->suspend.stop_pc != tp->prev_pc)
2ac7589c
PA
7479 {
7480 ptid_t resume_ptid;
7481
7482 if (debug_infrun)
7483 fprintf_unfiltered (gdb_stdlog,
7484 "infrun: expected thread advanced also (%s -> %s)\n",
7485 paddress (target_gdbarch (), tp->prev_pc),
f2ffa92b 7486 paddress (target_gdbarch (), tp->suspend.stop_pc));
2ac7589c
PA
7487
7488 /* Clear the info of the previous step-over, as it's no longer
7489 valid (if the thread was trying to step over a breakpoint, it
7490 has already succeeded). It's what keep_going would do too,
7491 if we called it. Do this before trying to insert the sss
7492 breakpoint, otherwise if we were previously trying to step
7493 over this exact address in another thread, the breakpoint is
7494 skipped. */
7495 clear_step_over_info ();
7496 tp->control.trap_expected = 0;
7497
7498 insert_single_step_breakpoint (get_frame_arch (frame),
7499 get_frame_address_space (frame),
f2ffa92b 7500 tp->suspend.stop_pc);
2ac7589c 7501
719546c4 7502 tp->resumed = true;
fbea99ea 7503 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
2ac7589c
PA
7504 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
7505 }
7506 else
7507 {
7508 if (debug_infrun)
7509 fprintf_unfiltered (gdb_stdlog,
7510 "infrun: expected thread still hasn't advanced\n");
7511
7512 keep_going_pass_signal (ecs);
7513 }
7514 return 1;
c447ac0b
PA
7515}
7516
8b061563
PA
7517/* Is thread TP in the middle of (software or hardware)
7518 single-stepping? (Note the result of this function must never be
7519 passed directly as target_resume's STEP parameter.) */
104c1213 7520
a289b8f6 7521static int
b3444185 7522currently_stepping (struct thread_info *tp)
a7212384 7523{
8358c15c
JK
7524 return ((tp->control.step_range_end
7525 && tp->control.step_resume_breakpoint == NULL)
7526 || tp->control.trap_expected
af48d08f 7527 || tp->stepped_breakpoint
8358c15c 7528 || bpstat_should_step ());
a7212384
UW
7529}
7530
b2175913
MS
7531/* Inferior has stepped into a subroutine call with source code that
7532 we should not step over. Do step to the first line of code in
7533 it. */
c2c6d25f
JM
7534
7535static void
568d6575
UW
7536handle_step_into_function (struct gdbarch *gdbarch,
7537 struct execution_control_state *ecs)
c2c6d25f 7538{
7e324e48
GB
7539 fill_in_stop_func (gdbarch, ecs);
7540
f2ffa92b
PA
7541 compunit_symtab *cust
7542 = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7543 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7544 ecs->stop_func_start
7545 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
c2c6d25f 7546
51abb421 7547 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
c2c6d25f
JM
7548 /* Use the step_resume_break to step until the end of the prologue,
7549 even if that involves jumps (as it seems to on the vax under
7550 4.2). */
7551 /* If the prologue ends in the middle of a source line, continue to
7552 the end of that source line (if it is still within the function).
7553 Otherwise, just go to end of prologue. */
2afb61aa
PA
7554 if (stop_func_sal.end
7555 && stop_func_sal.pc != ecs->stop_func_start
7556 && stop_func_sal.end < ecs->stop_func_end)
7557 ecs->stop_func_start = stop_func_sal.end;
c2c6d25f 7558
2dbd5e30
KB
7559 /* Architectures which require breakpoint adjustment might not be able
7560 to place a breakpoint at the computed address. If so, the test
7561 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7562 ecs->stop_func_start to an address at which a breakpoint may be
7563 legitimately placed.
8fb3e588 7564
2dbd5e30
KB
7565 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7566 made, GDB will enter an infinite loop when stepping through
7567 optimized code consisting of VLIW instructions which contain
7568 subinstructions corresponding to different source lines. On
7569 FR-V, it's not permitted to place a breakpoint on any but the
7570 first subinstruction of a VLIW instruction. When a breakpoint is
7571 set, GDB will adjust the breakpoint address to the beginning of
7572 the VLIW instruction. Thus, we need to make the corresponding
7573 adjustment here when computing the stop address. */
8fb3e588 7574
568d6575 7575 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
2dbd5e30
KB
7576 {
7577 ecs->stop_func_start
568d6575 7578 = gdbarch_adjust_breakpoint_address (gdbarch,
8fb3e588 7579 ecs->stop_func_start);
2dbd5e30
KB
7580 }
7581
f2ffa92b 7582 if (ecs->stop_func_start == ecs->event_thread->suspend.stop_pc)
c2c6d25f
JM
7583 {
7584 /* We are already there: stop now. */
bdc36728 7585 end_stepping_range (ecs);
c2c6d25f
JM
7586 return;
7587 }
7588 else
7589 {
7590 /* Put the step-breakpoint there and go until there. */
51abb421 7591 symtab_and_line sr_sal;
c2c6d25f
JM
7592 sr_sal.pc = ecs->stop_func_start;
7593 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
6c95b8df 7594 sr_sal.pspace = get_frame_program_space (get_current_frame ());
44cbf7b5 7595
c2c6d25f 7596 /* Do not specify what the fp should be when we stop since on
488f131b
JB
7597 some machines the prologue is where the new fp value is
7598 established. */
a6d9a66e 7599 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
c2c6d25f
JM
7600
7601 /* And make sure stepping stops right away then. */
16c381f0
JK
7602 ecs->event_thread->control.step_range_end
7603 = ecs->event_thread->control.step_range_start;
c2c6d25f
JM
7604 }
7605 keep_going (ecs);
7606}
d4f3574e 7607
b2175913
MS
7608/* Inferior has stepped backward into a subroutine call with source
7609 code that we should not step over. Do step to the beginning of the
7610 last line of code in it. */
7611
7612static void
568d6575
UW
7613handle_step_into_function_backward (struct gdbarch *gdbarch,
7614 struct execution_control_state *ecs)
b2175913 7615{
43f3e411 7616 struct compunit_symtab *cust;
167e4384 7617 struct symtab_and_line stop_func_sal;
b2175913 7618
7e324e48
GB
7619 fill_in_stop_func (gdbarch, ecs);
7620
f2ffa92b 7621 cust = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7622 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7623 ecs->stop_func_start
7624 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
b2175913 7625
f2ffa92b 7626 stop_func_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
b2175913
MS
7627
7628 /* OK, we're just going to keep stepping here. */
f2ffa92b 7629 if (stop_func_sal.pc == ecs->event_thread->suspend.stop_pc)
b2175913
MS
7630 {
7631 /* We're there already. Just stop stepping now. */
bdc36728 7632 end_stepping_range (ecs);
b2175913
MS
7633 }
7634 else
7635 {
7636 /* Else just reset the step range and keep going.
7637 No step-resume breakpoint, they don't work for
7638 epilogues, which can have multiple entry paths. */
16c381f0
JK
7639 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7640 ecs->event_thread->control.step_range_end = stop_func_sal.end;
b2175913
MS
7641 keep_going (ecs);
7642 }
7643 return;
7644}
7645
d3169d93 7646/* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
44cbf7b5
AC
7647 This is used to both functions and to skip over code. */
7648
7649static void
2c03e5be
PA
7650insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7651 struct symtab_and_line sr_sal,
7652 struct frame_id sr_id,
7653 enum bptype sr_type)
44cbf7b5 7654{
611c83ae
PA
7655 /* There should never be more than one step-resume or longjmp-resume
7656 breakpoint per thread, so we should never be setting a new
44cbf7b5 7657 step_resume_breakpoint when one is already active. */
8358c15c 7658 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
2c03e5be 7659 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
d3169d93
DJ
7660
7661 if (debug_infrun)
7662 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
7663 "infrun: inserting step-resume breakpoint at %s\n",
7664 paddress (gdbarch, sr_sal.pc));
d3169d93 7665
8358c15c 7666 inferior_thread ()->control.step_resume_breakpoint
454dafbd 7667 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
2c03e5be
PA
7668}
7669
9da8c2a0 7670void
2c03e5be
PA
7671insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7672 struct symtab_and_line sr_sal,
7673 struct frame_id sr_id)
7674{
7675 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7676 sr_sal, sr_id,
7677 bp_step_resume);
44cbf7b5 7678}
7ce450bd 7679
2c03e5be
PA
7680/* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7681 This is used to skip a potential signal handler.
7ce450bd 7682
14e60db5
DJ
7683 This is called with the interrupted function's frame. The signal
7684 handler, when it returns, will resume the interrupted function at
7685 RETURN_FRAME.pc. */
d303a6c7
AC
7686
7687static void
2c03e5be 7688insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
d303a6c7 7689{
f4c1edd8 7690 gdb_assert (return_frame != NULL);
d303a6c7 7691
51abb421
PA
7692 struct gdbarch *gdbarch = get_frame_arch (return_frame);
7693
7694 symtab_and_line sr_sal;
568d6575 7695 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
d303a6c7 7696 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7697 sr_sal.pspace = get_frame_program_space (return_frame);
d303a6c7 7698
2c03e5be
PA
7699 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7700 get_stack_frame_id (return_frame),
7701 bp_hp_step_resume);
d303a6c7
AC
7702}
7703
2c03e5be
PA
7704/* Insert a "step-resume breakpoint" at the previous frame's PC. This
7705 is used to skip a function after stepping into it (for "next" or if
7706 the called function has no debugging information).
14e60db5
DJ
7707
7708 The current function has almost always been reached by single
7709 stepping a call or return instruction. NEXT_FRAME belongs to the
7710 current function, and the breakpoint will be set at the caller's
7711 resume address.
7712
7713 This is a separate function rather than reusing
2c03e5be 7714 insert_hp_step_resume_breakpoint_at_frame in order to avoid
14e60db5 7715 get_prev_frame, which may stop prematurely (see the implementation
c7ce8faa 7716 of frame_unwind_caller_id for an example). */
14e60db5
DJ
7717
7718static void
7719insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7720{
14e60db5
DJ
7721 /* We shouldn't have gotten here if we don't know where the call site
7722 is. */
c7ce8faa 7723 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
14e60db5 7724
51abb421 7725 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
14e60db5 7726
51abb421 7727 symtab_and_line sr_sal;
c7ce8faa
DJ
7728 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7729 frame_unwind_caller_pc (next_frame));
14e60db5 7730 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7731 sr_sal.pspace = frame_unwind_program_space (next_frame);
14e60db5 7732
a6d9a66e 7733 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
c7ce8faa 7734 frame_unwind_caller_id (next_frame));
14e60db5
DJ
7735}
7736
611c83ae
PA
7737/* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7738 new breakpoint at the target of a jmp_buf. The handling of
7739 longjmp-resume uses the same mechanisms used for handling
7740 "step-resume" breakpoints. */
7741
7742static void
a6d9a66e 7743insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
611c83ae 7744{
e81a37f7
TT
7745 /* There should never be more than one longjmp-resume breakpoint per
7746 thread, so we should never be setting a new
611c83ae 7747 longjmp_resume_breakpoint when one is already active. */
e81a37f7 7748 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
611c83ae
PA
7749
7750 if (debug_infrun)
7751 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
7752 "infrun: inserting longjmp-resume breakpoint at %s\n",
7753 paddress (gdbarch, pc));
611c83ae 7754
e81a37f7 7755 inferior_thread ()->control.exception_resume_breakpoint =
454dafbd 7756 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
611c83ae
PA
7757}
7758
186c406b
TT
7759/* Insert an exception resume breakpoint. TP is the thread throwing
7760 the exception. The block B is the block of the unwinder debug hook
7761 function. FRAME is the frame corresponding to the call to this
7762 function. SYM is the symbol of the function argument holding the
7763 target PC of the exception. */
7764
7765static void
7766insert_exception_resume_breakpoint (struct thread_info *tp,
3977b71f 7767 const struct block *b,
186c406b
TT
7768 struct frame_info *frame,
7769 struct symbol *sym)
7770{
a70b8144 7771 try
186c406b 7772 {
63e43d3a 7773 struct block_symbol vsym;
186c406b
TT
7774 struct value *value;
7775 CORE_ADDR handler;
7776 struct breakpoint *bp;
7777
987012b8 7778 vsym = lookup_symbol_search_name (sym->search_name (),
de63c46b 7779 b, VAR_DOMAIN);
63e43d3a 7780 value = read_var_value (vsym.symbol, vsym.block, frame);
186c406b
TT
7781 /* If the value was optimized out, revert to the old behavior. */
7782 if (! value_optimized_out (value))
7783 {
7784 handler = value_as_address (value);
7785
7786 if (debug_infrun)
7787 fprintf_unfiltered (gdb_stdlog,
7788 "infrun: exception resume at %lx\n",
7789 (unsigned long) handler);
7790
7791 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd
TT
7792 handler,
7793 bp_exception_resume).release ();
c70a6932
JK
7794
7795 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
7796 frame = NULL;
7797
5d5658a1 7798 bp->thread = tp->global_num;
186c406b
TT
7799 inferior_thread ()->control.exception_resume_breakpoint = bp;
7800 }
7801 }
230d2906 7802 catch (const gdb_exception_error &e)
492d29ea
PA
7803 {
7804 /* We want to ignore errors here. */
7805 }
186c406b
TT
7806}
7807
28106bc2
SDJ
7808/* A helper for check_exception_resume that sets an
7809 exception-breakpoint based on a SystemTap probe. */
7810
7811static void
7812insert_exception_resume_from_probe (struct thread_info *tp,
729662a5 7813 const struct bound_probe *probe,
28106bc2
SDJ
7814 struct frame_info *frame)
7815{
7816 struct value *arg_value;
7817 CORE_ADDR handler;
7818 struct breakpoint *bp;
7819
7820 arg_value = probe_safe_evaluate_at_pc (frame, 1);
7821 if (!arg_value)
7822 return;
7823
7824 handler = value_as_address (arg_value);
7825
7826 if (debug_infrun)
7827 fprintf_unfiltered (gdb_stdlog,
7828 "infrun: exception resume at %s\n",
08feed99 7829 paddress (probe->objfile->arch (),
28106bc2
SDJ
7830 handler));
7831
7832 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd 7833 handler, bp_exception_resume).release ();
5d5658a1 7834 bp->thread = tp->global_num;
28106bc2
SDJ
7835 inferior_thread ()->control.exception_resume_breakpoint = bp;
7836}
7837
186c406b
TT
7838/* This is called when an exception has been intercepted. Check to
7839 see whether the exception's destination is of interest, and if so,
7840 set an exception resume breakpoint there. */
7841
7842static void
7843check_exception_resume (struct execution_control_state *ecs,
28106bc2 7844 struct frame_info *frame)
186c406b 7845{
729662a5 7846 struct bound_probe probe;
28106bc2
SDJ
7847 struct symbol *func;
7848
7849 /* First see if this exception unwinding breakpoint was set via a
7850 SystemTap probe point. If so, the probe has two arguments: the
7851 CFA and the HANDLER. We ignore the CFA, extract the handler, and
7852 set a breakpoint there. */
6bac7473 7853 probe = find_probe_by_pc (get_frame_pc (frame));
935676c9 7854 if (probe.prob)
28106bc2 7855 {
729662a5 7856 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
28106bc2
SDJ
7857 return;
7858 }
7859
7860 func = get_frame_function (frame);
7861 if (!func)
7862 return;
186c406b 7863
a70b8144 7864 try
186c406b 7865 {
3977b71f 7866 const struct block *b;
8157b174 7867 struct block_iterator iter;
186c406b
TT
7868 struct symbol *sym;
7869 int argno = 0;
7870
7871 /* The exception breakpoint is a thread-specific breakpoint on
7872 the unwinder's debug hook, declared as:
7873
7874 void _Unwind_DebugHook (void *cfa, void *handler);
7875
7876 The CFA argument indicates the frame to which control is
7877 about to be transferred. HANDLER is the destination PC.
7878
7879 We ignore the CFA and set a temporary breakpoint at HANDLER.
7880 This is not extremely efficient but it avoids issues in gdb
7881 with computing the DWARF CFA, and it also works even in weird
7882 cases such as throwing an exception from inside a signal
7883 handler. */
7884
7885 b = SYMBOL_BLOCK_VALUE (func);
7886 ALL_BLOCK_SYMBOLS (b, iter, sym)
7887 {
7888 if (!SYMBOL_IS_ARGUMENT (sym))
7889 continue;
7890
7891 if (argno == 0)
7892 ++argno;
7893 else
7894 {
7895 insert_exception_resume_breakpoint (ecs->event_thread,
7896 b, frame, sym);
7897 break;
7898 }
7899 }
7900 }
230d2906 7901 catch (const gdb_exception_error &e)
492d29ea
PA
7902 {
7903 }
186c406b
TT
7904}
7905
104c1213 7906static void
22bcd14b 7907stop_waiting (struct execution_control_state *ecs)
104c1213 7908{
527159b7 7909 if (debug_infrun)
22bcd14b 7910 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
527159b7 7911
cd0fc7c3
SS
7912 /* Let callers know we don't want to wait for the inferior anymore. */
7913 ecs->wait_some_more = 0;
fbea99ea 7914
53cccef1 7915 /* If all-stop, but there exists a non-stop target, stop all
fbea99ea 7916 threads now that we're presenting the stop to the user. */
53cccef1 7917 if (!non_stop && exists_non_stop_target ())
fbea99ea 7918 stop_all_threads ();
cd0fc7c3
SS
7919}
7920
4d9d9d04
PA
7921/* Like keep_going, but passes the signal to the inferior, even if the
7922 signal is set to nopass. */
d4f3574e
SS
7923
7924static void
4d9d9d04 7925keep_going_pass_signal (struct execution_control_state *ecs)
d4f3574e 7926{
d7e15655 7927 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
372316f1 7928 gdb_assert (!ecs->event_thread->resumed);
4d9d9d04 7929
d4f3574e 7930 /* Save the pc before execution, to compare with pc after stop. */
fb14de7b 7931 ecs->event_thread->prev_pc
00431a78 7932 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
d4f3574e 7933
4d9d9d04 7934 if (ecs->event_thread->control.trap_expected)
d4f3574e 7935 {
4d9d9d04
PA
7936 struct thread_info *tp = ecs->event_thread;
7937
7938 if (debug_infrun)
7939 fprintf_unfiltered (gdb_stdlog,
7940 "infrun: %s has trap_expected set, "
7941 "resuming to collect trap\n",
a068643d 7942 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04 7943
a9ba6bae
PA
7944 /* We haven't yet gotten our trap, and either: intercepted a
7945 non-signal event (e.g., a fork); or took a signal which we
7946 are supposed to pass through to the inferior. Simply
7947 continue. */
64ce06e4 7948 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e 7949 }
372316f1
PA
7950 else if (step_over_info_valid_p ())
7951 {
7952 /* Another thread is stepping over a breakpoint in-line. If
7953 this thread needs a step-over too, queue the request. In
7954 either case, this resume must be deferred for later. */
7955 struct thread_info *tp = ecs->event_thread;
7956
7957 if (ecs->hit_singlestep_breakpoint
7958 || thread_still_needs_step_over (tp))
7959 {
7960 if (debug_infrun)
7961 fprintf_unfiltered (gdb_stdlog,
7962 "infrun: step-over already in progress: "
7963 "step-over for %s deferred\n",
a068643d 7964 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
7965 thread_step_over_chain_enqueue (tp);
7966 }
7967 else
7968 {
7969 if (debug_infrun)
7970 fprintf_unfiltered (gdb_stdlog,
7971 "infrun: step-over in progress: "
7972 "resume of %s deferred\n",
a068643d 7973 target_pid_to_str (tp->ptid).c_str ());
372316f1 7974 }
372316f1 7975 }
d4f3574e
SS
7976 else
7977 {
31e77af2 7978 struct regcache *regcache = get_current_regcache ();
963f9c80
PA
7979 int remove_bp;
7980 int remove_wps;
8d297bbf 7981 step_over_what step_what;
31e77af2 7982
d4f3574e 7983 /* Either the trap was not expected, but we are continuing
a9ba6bae
PA
7984 anyway (if we got a signal, the user asked it be passed to
7985 the child)
7986 -- or --
7987 We got our expected trap, but decided we should resume from
7988 it.
d4f3574e 7989
a9ba6bae 7990 We're going to run this baby now!
d4f3574e 7991
c36b740a
VP
7992 Note that insert_breakpoints won't try to re-insert
7993 already inserted breakpoints. Therefore, we don't
7994 care if breakpoints were already inserted, or not. */
a9ba6bae 7995
31e77af2
PA
7996 /* If we need to step over a breakpoint, and we're not using
7997 displaced stepping to do so, insert all breakpoints
7998 (watchpoints, etc.) but the one we're stepping over, step one
7999 instruction, and then re-insert the breakpoint when that step
8000 is finished. */
963f9c80 8001
6c4cfb24
PA
8002 step_what = thread_still_needs_step_over (ecs->event_thread);
8003
963f9c80 8004 remove_bp = (ecs->hit_singlestep_breakpoint
6c4cfb24
PA
8005 || (step_what & STEP_OVER_BREAKPOINT));
8006 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
963f9c80 8007
cb71640d
PA
8008 /* We can't use displaced stepping if we need to step past a
8009 watchpoint. The instruction copied to the scratch pad would
8010 still trigger the watchpoint. */
8011 if (remove_bp
3fc8eb30 8012 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
45e8c884 8013 {
a01bda52 8014 set_step_over_info (regcache->aspace (),
21edc42f
YQ
8015 regcache_read_pc (regcache), remove_wps,
8016 ecs->event_thread->global_num);
45e8c884 8017 }
963f9c80 8018 else if (remove_wps)
21edc42f 8019 set_step_over_info (NULL, 0, remove_wps, -1);
372316f1
PA
8020
8021 /* If we now need to do an in-line step-over, we need to stop
8022 all other threads. Note this must be done before
8023 insert_breakpoints below, because that removes the breakpoint
8024 we're about to step over, otherwise other threads could miss
8025 it. */
fbea99ea 8026 if (step_over_info_valid_p () && target_is_non_stop_p ())
372316f1 8027 stop_all_threads ();
abbb1732 8028
31e77af2 8029 /* Stop stepping if inserting breakpoints fails. */
a70b8144 8030 try
31e77af2
PA
8031 {
8032 insert_breakpoints ();
8033 }
230d2906 8034 catch (const gdb_exception_error &e)
31e77af2
PA
8035 {
8036 exception_print (gdb_stderr, e);
22bcd14b 8037 stop_waiting (ecs);
bdf2a94a 8038 clear_step_over_info ();
31e77af2 8039 return;
d4f3574e
SS
8040 }
8041
963f9c80 8042 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
d4f3574e 8043
64ce06e4 8044 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e
SS
8045 }
8046
488f131b 8047 prepare_to_wait (ecs);
d4f3574e
SS
8048}
8049
4d9d9d04
PA
8050/* Called when we should continue running the inferior, because the
8051 current event doesn't cause a user visible stop. This does the
8052 resuming part; waiting for the next event is done elsewhere. */
8053
8054static void
8055keep_going (struct execution_control_state *ecs)
8056{
8057 if (ecs->event_thread->control.trap_expected
8058 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
8059 ecs->event_thread->control.trap_expected = 0;
8060
8061 if (!signal_program[ecs->event_thread->suspend.stop_signal])
8062 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
8063 keep_going_pass_signal (ecs);
8064}
8065
104c1213
JM
8066/* This function normally comes after a resume, before
8067 handle_inferior_event exits. It takes care of any last bits of
8068 housekeeping, and sets the all-important wait_some_more flag. */
cd0fc7c3 8069
104c1213
JM
8070static void
8071prepare_to_wait (struct execution_control_state *ecs)
cd0fc7c3 8072{
527159b7 8073 if (debug_infrun)
8a9de0e4 8074 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
104c1213 8075
104c1213 8076 ecs->wait_some_more = 1;
0b333c5e
PA
8077
8078 if (!target_is_async_p ())
8079 mark_infrun_async_event_handler ();
c906108c 8080}
11cf8741 8081
fd664c91 8082/* We are done with the step range of a step/next/si/ni command.
b57bacec 8083 Called once for each n of a "step n" operation. */
fd664c91
PA
8084
8085static void
bdc36728 8086end_stepping_range (struct execution_control_state *ecs)
fd664c91 8087{
bdc36728 8088 ecs->event_thread->control.stop_step = 1;
bdc36728 8089 stop_waiting (ecs);
fd664c91
PA
8090}
8091
33d62d64
JK
8092/* Several print_*_reason functions to print why the inferior has stopped.
8093 We always print something when the inferior exits, or receives a signal.
8094 The rest of the cases are dealt with later on in normal_stop and
8095 print_it_typical. Ideally there should be a call to one of these
8096 print_*_reason functions functions from handle_inferior_event each time
22bcd14b 8097 stop_waiting is called.
33d62d64 8098
fd664c91
PA
8099 Note that we don't call these directly, instead we delegate that to
8100 the interpreters, through observers. Interpreters then call these
8101 with whatever uiout is right. */
33d62d64 8102
fd664c91
PA
8103void
8104print_end_stepping_range_reason (struct ui_out *uiout)
33d62d64 8105{
fd664c91 8106 /* For CLI-like interpreters, print nothing. */
33d62d64 8107
112e8700 8108 if (uiout->is_mi_like_p ())
fd664c91 8109 {
112e8700 8110 uiout->field_string ("reason",
fd664c91
PA
8111 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
8112 }
8113}
33d62d64 8114
fd664c91
PA
8115void
8116print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
11cf8741 8117{
33d62d64 8118 annotate_signalled ();
112e8700
SM
8119 if (uiout->is_mi_like_p ())
8120 uiout->field_string
8121 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
8122 uiout->text ("\nProgram terminated with signal ");
33d62d64 8123 annotate_signal_name ();
112e8700 8124 uiout->field_string ("signal-name",
2ea28649 8125 gdb_signal_to_name (siggnal));
33d62d64 8126 annotate_signal_name_end ();
112e8700 8127 uiout->text (", ");
33d62d64 8128 annotate_signal_string ();
112e8700 8129 uiout->field_string ("signal-meaning",
2ea28649 8130 gdb_signal_to_string (siggnal));
33d62d64 8131 annotate_signal_string_end ();
112e8700
SM
8132 uiout->text (".\n");
8133 uiout->text ("The program no longer exists.\n");
33d62d64
JK
8134}
8135
fd664c91
PA
8136void
8137print_exited_reason (struct ui_out *uiout, int exitstatus)
33d62d64 8138{
fda326dd 8139 struct inferior *inf = current_inferior ();
a068643d 8140 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
fda326dd 8141
33d62d64
JK
8142 annotate_exited (exitstatus);
8143 if (exitstatus)
8144 {
112e8700
SM
8145 if (uiout->is_mi_like_p ())
8146 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
6a831f06
PA
8147 std::string exit_code_str
8148 = string_printf ("0%o", (unsigned int) exitstatus);
8149 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
8150 plongest (inf->num), pidstr.c_str (),
8151 string_field ("exit-code", exit_code_str.c_str ()));
33d62d64
JK
8152 }
8153 else
11cf8741 8154 {
112e8700
SM
8155 if (uiout->is_mi_like_p ())
8156 uiout->field_string
8157 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6a831f06
PA
8158 uiout->message ("[Inferior %s (%s) exited normally]\n",
8159 plongest (inf->num), pidstr.c_str ());
33d62d64 8160 }
33d62d64
JK
8161}
8162
012b3a21
WT
8163/* Some targets/architectures can do extra processing/display of
8164 segmentation faults. E.g., Intel MPX boundary faults.
8165 Call the architecture dependent function to handle the fault. */
8166
8167static void
8168handle_segmentation_fault (struct ui_out *uiout)
8169{
8170 struct regcache *regcache = get_current_regcache ();
ac7936df 8171 struct gdbarch *gdbarch = regcache->arch ();
012b3a21
WT
8172
8173 if (gdbarch_handle_segmentation_fault_p (gdbarch))
8174 gdbarch_handle_segmentation_fault (gdbarch, uiout);
8175}
8176
fd664c91
PA
8177void
8178print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
33d62d64 8179{
f303dbd6
PA
8180 struct thread_info *thr = inferior_thread ();
8181
33d62d64
JK
8182 annotate_signal ();
8183
112e8700 8184 if (uiout->is_mi_like_p ())
f303dbd6
PA
8185 ;
8186 else if (show_thread_that_caused_stop ())
33d62d64 8187 {
f303dbd6 8188 const char *name;
33d62d64 8189
112e8700 8190 uiout->text ("\nThread ");
33eca680 8191 uiout->field_string ("thread-id", print_thread_id (thr));
f303dbd6
PA
8192
8193 name = thr->name != NULL ? thr->name : target_thread_name (thr);
8194 if (name != NULL)
8195 {
112e8700 8196 uiout->text (" \"");
33eca680 8197 uiout->field_string ("name", name);
112e8700 8198 uiout->text ("\"");
f303dbd6 8199 }
33d62d64 8200 }
f303dbd6 8201 else
112e8700 8202 uiout->text ("\nProgram");
f303dbd6 8203
112e8700
SM
8204 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
8205 uiout->text (" stopped");
33d62d64
JK
8206 else
8207 {
112e8700 8208 uiout->text (" received signal ");
8b93c638 8209 annotate_signal_name ();
112e8700
SM
8210 if (uiout->is_mi_like_p ())
8211 uiout->field_string
8212 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
8213 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8b93c638 8214 annotate_signal_name_end ();
112e8700 8215 uiout->text (", ");
8b93c638 8216 annotate_signal_string ();
112e8700 8217 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
012b3a21
WT
8218
8219 if (siggnal == GDB_SIGNAL_SEGV)
8220 handle_segmentation_fault (uiout);
8221
8b93c638 8222 annotate_signal_string_end ();
33d62d64 8223 }
112e8700 8224 uiout->text (".\n");
33d62d64 8225}
252fbfc8 8226
fd664c91
PA
8227void
8228print_no_history_reason (struct ui_out *uiout)
33d62d64 8229{
112e8700 8230 uiout->text ("\nNo more reverse-execution history.\n");
11cf8741 8231}
43ff13b4 8232
0c7e1a46
PA
8233/* Print current location without a level number, if we have changed
8234 functions or hit a breakpoint. Print source line if we have one.
8235 bpstat_print contains the logic deciding in detail what to print,
8236 based on the event(s) that just occurred. */
8237
243a9253
PA
8238static void
8239print_stop_location (struct target_waitstatus *ws)
0c7e1a46
PA
8240{
8241 int bpstat_ret;
f486487f 8242 enum print_what source_flag;
0c7e1a46
PA
8243 int do_frame_printing = 1;
8244 struct thread_info *tp = inferior_thread ();
8245
8246 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
8247 switch (bpstat_ret)
8248 {
8249 case PRINT_UNKNOWN:
8250 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8251 should) carry around the function and does (or should) use
8252 that when doing a frame comparison. */
8253 if (tp->control.stop_step
8254 && frame_id_eq (tp->control.step_frame_id,
8255 get_frame_id (get_current_frame ()))
f2ffa92b
PA
8256 && (tp->control.step_start_function
8257 == find_pc_function (tp->suspend.stop_pc)))
0c7e1a46
PA
8258 {
8259 /* Finished step, just print source line. */
8260 source_flag = SRC_LINE;
8261 }
8262 else
8263 {
8264 /* Print location and source line. */
8265 source_flag = SRC_AND_LOC;
8266 }
8267 break;
8268 case PRINT_SRC_AND_LOC:
8269 /* Print location and source line. */
8270 source_flag = SRC_AND_LOC;
8271 break;
8272 case PRINT_SRC_ONLY:
8273 source_flag = SRC_LINE;
8274 break;
8275 case PRINT_NOTHING:
8276 /* Something bogus. */
8277 source_flag = SRC_LINE;
8278 do_frame_printing = 0;
8279 break;
8280 default:
8281 internal_error (__FILE__, __LINE__, _("Unknown value."));
8282 }
8283
8284 /* The behavior of this routine with respect to the source
8285 flag is:
8286 SRC_LINE: Print only source line
8287 LOCATION: Print only location
8288 SRC_AND_LOC: Print location and source line. */
8289 if (do_frame_printing)
8290 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
243a9253
PA
8291}
8292
243a9253
PA
8293/* See infrun.h. */
8294
8295void
4c7d57e7 8296print_stop_event (struct ui_out *uiout, bool displays)
243a9253 8297{
243a9253 8298 struct target_waitstatus last;
243a9253
PA
8299 struct thread_info *tp;
8300
5b6d1e4f 8301 get_last_target_status (nullptr, nullptr, &last);
243a9253 8302
67ad9399
TT
8303 {
8304 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
0c7e1a46 8305
67ad9399 8306 print_stop_location (&last);
243a9253 8307
67ad9399 8308 /* Display the auto-display expressions. */
4c7d57e7
TT
8309 if (displays)
8310 do_displays ();
67ad9399 8311 }
243a9253
PA
8312
8313 tp = inferior_thread ();
8314 if (tp->thread_fsm != NULL
46e3ed7f 8315 && tp->thread_fsm->finished_p ())
243a9253
PA
8316 {
8317 struct return_value_info *rv;
8318
46e3ed7f 8319 rv = tp->thread_fsm->return_value ();
243a9253
PA
8320 if (rv != NULL)
8321 print_return_value (uiout, rv);
8322 }
0c7e1a46
PA
8323}
8324
388a7084
PA
8325/* See infrun.h. */
8326
8327void
8328maybe_remove_breakpoints (void)
8329{
8330 if (!breakpoints_should_be_inserted_now () && target_has_execution)
8331 {
8332 if (remove_breakpoints ())
8333 {
223ffa71 8334 target_terminal::ours_for_output ();
388a7084
PA
8335 printf_filtered (_("Cannot remove breakpoints because "
8336 "program is no longer writable.\nFurther "
8337 "execution is probably impossible.\n"));
8338 }
8339 }
8340}
8341
4c2f2a79
PA
8342/* The execution context that just caused a normal stop. */
8343
8344struct stop_context
8345{
2d844eaf
TT
8346 stop_context ();
8347 ~stop_context ();
8348
8349 DISABLE_COPY_AND_ASSIGN (stop_context);
8350
8351 bool changed () const;
8352
4c2f2a79
PA
8353 /* The stop ID. */
8354 ULONGEST stop_id;
c906108c 8355
4c2f2a79 8356 /* The event PTID. */
c906108c 8357
4c2f2a79
PA
8358 ptid_t ptid;
8359
8360 /* If stopp for a thread event, this is the thread that caused the
8361 stop. */
8362 struct thread_info *thread;
8363
8364 /* The inferior that caused the stop. */
8365 int inf_num;
8366};
8367
2d844eaf 8368/* Initializes a new stop context. If stopped for a thread event, this
4c2f2a79
PA
8369 takes a strong reference to the thread. */
8370
2d844eaf 8371stop_context::stop_context ()
4c2f2a79 8372{
2d844eaf
TT
8373 stop_id = get_stop_id ();
8374 ptid = inferior_ptid;
8375 inf_num = current_inferior ()->num;
4c2f2a79 8376
d7e15655 8377 if (inferior_ptid != null_ptid)
4c2f2a79
PA
8378 {
8379 /* Take a strong reference so that the thread can't be deleted
8380 yet. */
2d844eaf
TT
8381 thread = inferior_thread ();
8382 thread->incref ();
4c2f2a79
PA
8383 }
8384 else
2d844eaf 8385 thread = NULL;
4c2f2a79
PA
8386}
8387
8388/* Release a stop context previously created with save_stop_context.
8389 Releases the strong reference to the thread as well. */
8390
2d844eaf 8391stop_context::~stop_context ()
4c2f2a79 8392{
2d844eaf
TT
8393 if (thread != NULL)
8394 thread->decref ();
4c2f2a79
PA
8395}
8396
8397/* Return true if the current context no longer matches the saved stop
8398 context. */
8399
2d844eaf
TT
8400bool
8401stop_context::changed () const
8402{
8403 if (ptid != inferior_ptid)
8404 return true;
8405 if (inf_num != current_inferior ()->num)
8406 return true;
8407 if (thread != NULL && thread->state != THREAD_STOPPED)
8408 return true;
8409 if (get_stop_id () != stop_id)
8410 return true;
8411 return false;
4c2f2a79
PA
8412}
8413
8414/* See infrun.h. */
8415
8416int
96baa820 8417normal_stop (void)
c906108c 8418{
73b65bb0 8419 struct target_waitstatus last;
73b65bb0 8420
5b6d1e4f 8421 get_last_target_status (nullptr, nullptr, &last);
73b65bb0 8422
4c2f2a79
PA
8423 new_stop_id ();
8424
29f49a6a
PA
8425 /* If an exception is thrown from this point on, make sure to
8426 propagate GDB's knowledge of the executing state to the
8427 frontend/user running state. A QUIT is an easy exception to see
8428 here, so do this before any filtered output. */
731f534f 8429
5b6d1e4f 8430 ptid_t finish_ptid = null_ptid;
731f534f 8431
c35b1492 8432 if (!non_stop)
5b6d1e4f 8433 finish_ptid = minus_one_ptid;
e1316e60
PA
8434 else if (last.kind == TARGET_WAITKIND_SIGNALLED
8435 || last.kind == TARGET_WAITKIND_EXITED)
8436 {
8437 /* On some targets, we may still have live threads in the
8438 inferior when we get a process exit event. E.g., for
8439 "checkpoint", when the current checkpoint/fork exits,
8440 linux-fork.c automatically switches to another fork from
8441 within target_mourn_inferior. */
731f534f 8442 if (inferior_ptid != null_ptid)
5b6d1e4f 8443 finish_ptid = ptid_t (inferior_ptid.pid ());
e1316e60
PA
8444 }
8445 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
5b6d1e4f
PA
8446 finish_ptid = inferior_ptid;
8447
8448 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
8449 if (finish_ptid != null_ptid)
8450 {
8451 maybe_finish_thread_state.emplace
8452 (user_visible_resume_target (finish_ptid), finish_ptid);
8453 }
29f49a6a 8454
b57bacec
PA
8455 /* As we're presenting a stop, and potentially removing breakpoints,
8456 update the thread list so we can tell whether there are threads
8457 running on the target. With target remote, for example, we can
8458 only learn about new threads when we explicitly update the thread
8459 list. Do this before notifying the interpreters about signal
8460 stops, end of stepping ranges, etc., so that the "new thread"
8461 output is emitted before e.g., "Program received signal FOO",
8462 instead of after. */
8463 update_thread_list ();
8464
8465 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
76727919 8466 gdb::observers::signal_received.notify (inferior_thread ()->suspend.stop_signal);
b57bacec 8467
c906108c
SS
8468 /* As with the notification of thread events, we want to delay
8469 notifying the user that we've switched thread context until
8470 the inferior actually stops.
8471
73b65bb0
DJ
8472 There's no point in saying anything if the inferior has exited.
8473 Note that SIGNALLED here means "exited with a signal", not
b65dc60b
PA
8474 "received a signal".
8475
8476 Also skip saying anything in non-stop mode. In that mode, as we
8477 don't want GDB to switch threads behind the user's back, to avoid
8478 races where the user is typing a command to apply to thread x,
8479 but GDB switches to thread y before the user finishes entering
8480 the command, fetch_inferior_event installs a cleanup to restore
8481 the current thread back to the thread the user had selected right
8482 after this event is handled, so we're not really switching, only
8483 informing of a stop. */
4f8d22e3 8484 if (!non_stop
731f534f 8485 && previous_inferior_ptid != inferior_ptid
73b65bb0
DJ
8486 && target_has_execution
8487 && last.kind != TARGET_WAITKIND_SIGNALLED
0e5bf2a8
PA
8488 && last.kind != TARGET_WAITKIND_EXITED
8489 && last.kind != TARGET_WAITKIND_NO_RESUMED)
c906108c 8490 {
0e454242 8491 SWITCH_THRU_ALL_UIS ()
3b12939d 8492 {
223ffa71 8493 target_terminal::ours_for_output ();
3b12939d 8494 printf_filtered (_("[Switching to %s]\n"),
a068643d 8495 target_pid_to_str (inferior_ptid).c_str ());
3b12939d
PA
8496 annotate_thread_changed ();
8497 }
39f77062 8498 previous_inferior_ptid = inferior_ptid;
c906108c 8499 }
c906108c 8500
0e5bf2a8
PA
8501 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
8502 {
0e454242 8503 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8504 if (current_ui->prompt_state == PROMPT_BLOCKED)
8505 {
223ffa71 8506 target_terminal::ours_for_output ();
3b12939d
PA
8507 printf_filtered (_("No unwaited-for children left.\n"));
8508 }
0e5bf2a8
PA
8509 }
8510
b57bacec 8511 /* Note: this depends on the update_thread_list call above. */
388a7084 8512 maybe_remove_breakpoints ();
c906108c 8513
c906108c
SS
8514 /* If an auto-display called a function and that got a signal,
8515 delete that auto-display to avoid an infinite recursion. */
8516
8517 if (stopped_by_random_signal)
8518 disable_current_display ();
8519
0e454242 8520 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8521 {
8522 async_enable_stdin ();
8523 }
c906108c 8524
388a7084 8525 /* Let the user/frontend see the threads as stopped. */
731f534f 8526 maybe_finish_thread_state.reset ();
388a7084
PA
8527
8528 /* Select innermost stack frame - i.e., current frame is frame 0,
8529 and current location is based on that. Handle the case where the
8530 dummy call is returning after being stopped. E.g. the dummy call
8531 previously hit a breakpoint. (If the dummy call returns
8532 normally, we won't reach here.) Do this before the stop hook is
8533 run, so that it doesn't get to see the temporary dummy frame,
8534 which is not where we'll present the stop. */
8535 if (has_stack_frames ())
8536 {
8537 if (stop_stack_dummy == STOP_STACK_DUMMY)
8538 {
8539 /* Pop the empty frame that contains the stack dummy. This
8540 also restores inferior state prior to the call (struct
8541 infcall_suspend_state). */
8542 struct frame_info *frame = get_current_frame ();
8543
8544 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8545 frame_pop (frame);
8546 /* frame_pop calls reinit_frame_cache as the last thing it
8547 does which means there's now no selected frame. */
8548 }
8549
8550 select_frame (get_current_frame ());
8551
8552 /* Set the current source location. */
8553 set_current_sal_from_frame (get_current_frame ());
8554 }
dd7e2d2b
PA
8555
8556 /* Look up the hook_stop and run it (CLI internally handles problem
8557 of stop_command's pre-hook not existing). */
4c2f2a79
PA
8558 if (stop_command != NULL)
8559 {
2d844eaf 8560 stop_context saved_context;
4c2f2a79 8561
a70b8144 8562 try
bf469271
PA
8563 {
8564 execute_cmd_pre_hook (stop_command);
8565 }
230d2906 8566 catch (const gdb_exception &ex)
bf469271
PA
8567 {
8568 exception_fprintf (gdb_stderr, ex,
8569 "Error while running hook_stop:\n");
8570 }
4c2f2a79
PA
8571
8572 /* If the stop hook resumes the target, then there's no point in
8573 trying to notify about the previous stop; its context is
8574 gone. Likewise if the command switches thread or inferior --
8575 the observers would print a stop for the wrong
8576 thread/inferior. */
2d844eaf
TT
8577 if (saved_context.changed ())
8578 return 1;
4c2f2a79 8579 }
dd7e2d2b 8580
388a7084
PA
8581 /* Notify observers about the stop. This is where the interpreters
8582 print the stop event. */
d7e15655 8583 if (inferior_ptid != null_ptid)
76727919 8584 gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
388a7084
PA
8585 stop_print_frame);
8586 else
76727919 8587 gdb::observers::normal_stop.notify (NULL, stop_print_frame);
347bddb7 8588
243a9253
PA
8589 annotate_stopped ();
8590
48844aa6
PA
8591 if (target_has_execution)
8592 {
8593 if (last.kind != TARGET_WAITKIND_SIGNALLED
fe726667
PA
8594 && last.kind != TARGET_WAITKIND_EXITED
8595 && last.kind != TARGET_WAITKIND_NO_RESUMED)
48844aa6
PA
8596 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8597 Delete any breakpoint that is to be deleted at the next stop. */
16c381f0 8598 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
94cc34af 8599 }
6c95b8df
PA
8600
8601 /* Try to get rid of automatically added inferiors that are no
8602 longer needed. Keeping those around slows down things linearly.
8603 Note that this never removes the current inferior. */
8604 prune_inferiors ();
4c2f2a79
PA
8605
8606 return 0;
c906108c 8607}
c906108c 8608\f
c5aa993b 8609int
96baa820 8610signal_stop_state (int signo)
c906108c 8611{
d6b48e9c 8612 return signal_stop[signo];
c906108c
SS
8613}
8614
c5aa993b 8615int
96baa820 8616signal_print_state (int signo)
c906108c
SS
8617{
8618 return signal_print[signo];
8619}
8620
c5aa993b 8621int
96baa820 8622signal_pass_state (int signo)
c906108c
SS
8623{
8624 return signal_program[signo];
8625}
8626
2455069d
UW
8627static void
8628signal_cache_update (int signo)
8629{
8630 if (signo == -1)
8631 {
a493e3e2 8632 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
2455069d
UW
8633 signal_cache_update (signo);
8634
8635 return;
8636 }
8637
8638 signal_pass[signo] = (signal_stop[signo] == 0
8639 && signal_print[signo] == 0
ab04a2af
TT
8640 && signal_program[signo] == 1
8641 && signal_catch[signo] == 0);
2455069d
UW
8642}
8643
488f131b 8644int
7bda5e4a 8645signal_stop_update (int signo, int state)
d4f3574e
SS
8646{
8647 int ret = signal_stop[signo];
abbb1732 8648
d4f3574e 8649 signal_stop[signo] = state;
2455069d 8650 signal_cache_update (signo);
d4f3574e
SS
8651 return ret;
8652}
8653
488f131b 8654int
7bda5e4a 8655signal_print_update (int signo, int state)
d4f3574e
SS
8656{
8657 int ret = signal_print[signo];
abbb1732 8658
d4f3574e 8659 signal_print[signo] = state;
2455069d 8660 signal_cache_update (signo);
d4f3574e
SS
8661 return ret;
8662}
8663
488f131b 8664int
7bda5e4a 8665signal_pass_update (int signo, int state)
d4f3574e
SS
8666{
8667 int ret = signal_program[signo];
abbb1732 8668
d4f3574e 8669 signal_program[signo] = state;
2455069d 8670 signal_cache_update (signo);
d4f3574e
SS
8671 return ret;
8672}
8673
ab04a2af
TT
8674/* Update the global 'signal_catch' from INFO and notify the
8675 target. */
8676
8677void
8678signal_catch_update (const unsigned int *info)
8679{
8680 int i;
8681
8682 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8683 signal_catch[i] = info[i] > 0;
8684 signal_cache_update (-1);
adc6a863 8685 target_pass_signals (signal_pass);
ab04a2af
TT
8686}
8687
c906108c 8688static void
96baa820 8689sig_print_header (void)
c906108c 8690{
3e43a32a
MS
8691 printf_filtered (_("Signal Stop\tPrint\tPass "
8692 "to program\tDescription\n"));
c906108c
SS
8693}
8694
8695static void
2ea28649 8696sig_print_info (enum gdb_signal oursig)
c906108c 8697{
2ea28649 8698 const char *name = gdb_signal_to_name (oursig);
c906108c 8699 int name_padding = 13 - strlen (name);
96baa820 8700
c906108c
SS
8701 if (name_padding <= 0)
8702 name_padding = 0;
8703
8704 printf_filtered ("%s", name);
488f131b 8705 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
c906108c
SS
8706 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8707 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
8708 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
2ea28649 8709 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
c906108c
SS
8710}
8711
8712/* Specify how various signals in the inferior should be handled. */
8713
8714static void
0b39b52e 8715handle_command (const char *args, int from_tty)
c906108c 8716{
c906108c 8717 int digits, wordlen;
b926417a 8718 int sigfirst, siglast;
2ea28649 8719 enum gdb_signal oursig;
c906108c 8720 int allsigs;
c906108c
SS
8721
8722 if (args == NULL)
8723 {
e2e0b3e5 8724 error_no_arg (_("signal to handle"));
c906108c
SS
8725 }
8726
1777feb0 8727 /* Allocate and zero an array of flags for which signals to handle. */
c906108c 8728
adc6a863
PA
8729 const size_t nsigs = GDB_SIGNAL_LAST;
8730 unsigned char sigs[nsigs] {};
c906108c 8731
1777feb0 8732 /* Break the command line up into args. */
c906108c 8733
773a1edc 8734 gdb_argv built_argv (args);
c906108c
SS
8735
8736 /* Walk through the args, looking for signal oursigs, signal names, and
8737 actions. Signal numbers and signal names may be interspersed with
8738 actions, with the actions being performed for all signals cumulatively
1777feb0 8739 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
c906108c 8740
773a1edc 8741 for (char *arg : built_argv)
c906108c 8742 {
773a1edc
TT
8743 wordlen = strlen (arg);
8744 for (digits = 0; isdigit (arg[digits]); digits++)
c906108c
SS
8745 {;
8746 }
8747 allsigs = 0;
8748 sigfirst = siglast = -1;
8749
773a1edc 8750 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
c906108c
SS
8751 {
8752 /* Apply action to all signals except those used by the
1777feb0 8753 debugger. Silently skip those. */
c906108c
SS
8754 allsigs = 1;
8755 sigfirst = 0;
8756 siglast = nsigs - 1;
8757 }
773a1edc 8758 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
c906108c
SS
8759 {
8760 SET_SIGS (nsigs, sigs, signal_stop);
8761 SET_SIGS (nsigs, sigs, signal_print);
8762 }
773a1edc 8763 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
c906108c
SS
8764 {
8765 UNSET_SIGS (nsigs, sigs, signal_program);
8766 }
773a1edc 8767 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
c906108c
SS
8768 {
8769 SET_SIGS (nsigs, sigs, signal_print);
8770 }
773a1edc 8771 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
c906108c
SS
8772 {
8773 SET_SIGS (nsigs, sigs, signal_program);
8774 }
773a1edc 8775 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
c906108c
SS
8776 {
8777 UNSET_SIGS (nsigs, sigs, signal_stop);
8778 }
773a1edc 8779 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
c906108c
SS
8780 {
8781 SET_SIGS (nsigs, sigs, signal_program);
8782 }
773a1edc 8783 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
c906108c
SS
8784 {
8785 UNSET_SIGS (nsigs, sigs, signal_print);
8786 UNSET_SIGS (nsigs, sigs, signal_stop);
8787 }
773a1edc 8788 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
c906108c
SS
8789 {
8790 UNSET_SIGS (nsigs, sigs, signal_program);
8791 }
8792 else if (digits > 0)
8793 {
8794 /* It is numeric. The numeric signal refers to our own
8795 internal signal numbering from target.h, not to host/target
8796 signal number. This is a feature; users really should be
8797 using symbolic names anyway, and the common ones like
8798 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8799
8800 sigfirst = siglast = (int)
773a1edc
TT
8801 gdb_signal_from_command (atoi (arg));
8802 if (arg[digits] == '-')
c906108c
SS
8803 {
8804 siglast = (int)
773a1edc 8805 gdb_signal_from_command (atoi (arg + digits + 1));
c906108c
SS
8806 }
8807 if (sigfirst > siglast)
8808 {
1777feb0 8809 /* Bet he didn't figure we'd think of this case... */
b926417a 8810 std::swap (sigfirst, siglast);
c906108c
SS
8811 }
8812 }
8813 else
8814 {
773a1edc 8815 oursig = gdb_signal_from_name (arg);
a493e3e2 8816 if (oursig != GDB_SIGNAL_UNKNOWN)
c906108c
SS
8817 {
8818 sigfirst = siglast = (int) oursig;
8819 }
8820 else
8821 {
8822 /* Not a number and not a recognized flag word => complain. */
773a1edc 8823 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
c906108c
SS
8824 }
8825 }
8826
8827 /* If any signal numbers or symbol names were found, set flags for
1777feb0 8828 which signals to apply actions to. */
c906108c 8829
b926417a 8830 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
c906108c 8831 {
2ea28649 8832 switch ((enum gdb_signal) signum)
c906108c 8833 {
a493e3e2
PA
8834 case GDB_SIGNAL_TRAP:
8835 case GDB_SIGNAL_INT:
c906108c
SS
8836 if (!allsigs && !sigs[signum])
8837 {
9e2f0ad4 8838 if (query (_("%s is used by the debugger.\n\
3e43a32a 8839Are you sure you want to change it? "),
2ea28649 8840 gdb_signal_to_name ((enum gdb_signal) signum)))
c906108c
SS
8841 {
8842 sigs[signum] = 1;
8843 }
8844 else
c119e040 8845 printf_unfiltered (_("Not confirmed, unchanged.\n"));
c906108c
SS
8846 }
8847 break;
a493e3e2
PA
8848 case GDB_SIGNAL_0:
8849 case GDB_SIGNAL_DEFAULT:
8850 case GDB_SIGNAL_UNKNOWN:
c906108c
SS
8851 /* Make sure that "all" doesn't print these. */
8852 break;
8853 default:
8854 sigs[signum] = 1;
8855 break;
8856 }
8857 }
c906108c
SS
8858 }
8859
b926417a 8860 for (int signum = 0; signum < nsigs; signum++)
3a031f65
PA
8861 if (sigs[signum])
8862 {
2455069d 8863 signal_cache_update (-1);
adc6a863
PA
8864 target_pass_signals (signal_pass);
8865 target_program_signals (signal_program);
c906108c 8866
3a031f65
PA
8867 if (from_tty)
8868 {
8869 /* Show the results. */
8870 sig_print_header ();
8871 for (; signum < nsigs; signum++)
8872 if (sigs[signum])
aead7601 8873 sig_print_info ((enum gdb_signal) signum);
3a031f65
PA
8874 }
8875
8876 break;
8877 }
c906108c
SS
8878}
8879
de0bea00
MF
8880/* Complete the "handle" command. */
8881
eb3ff9a5 8882static void
de0bea00 8883handle_completer (struct cmd_list_element *ignore,
eb3ff9a5 8884 completion_tracker &tracker,
6f937416 8885 const char *text, const char *word)
de0bea00 8886{
de0bea00
MF
8887 static const char * const keywords[] =
8888 {
8889 "all",
8890 "stop",
8891 "ignore",
8892 "print",
8893 "pass",
8894 "nostop",
8895 "noignore",
8896 "noprint",
8897 "nopass",
8898 NULL,
8899 };
8900
eb3ff9a5
PA
8901 signal_completer (ignore, tracker, text, word);
8902 complete_on_enum (tracker, keywords, word, word);
de0bea00
MF
8903}
8904
2ea28649
PA
8905enum gdb_signal
8906gdb_signal_from_command (int num)
ed01b82c
PA
8907{
8908 if (num >= 1 && num <= 15)
2ea28649 8909 return (enum gdb_signal) num;
ed01b82c
PA
8910 error (_("Only signals 1-15 are valid as numeric signals.\n\
8911Use \"info signals\" for a list of symbolic signals."));
8912}
8913
c906108c
SS
8914/* Print current contents of the tables set by the handle command.
8915 It is possible we should just be printing signals actually used
8916 by the current target (but for things to work right when switching
8917 targets, all signals should be in the signal tables). */
8918
8919static void
1d12d88f 8920info_signals_command (const char *signum_exp, int from_tty)
c906108c 8921{
2ea28649 8922 enum gdb_signal oursig;
abbb1732 8923
c906108c
SS
8924 sig_print_header ();
8925
8926 if (signum_exp)
8927 {
8928 /* First see if this is a symbol name. */
2ea28649 8929 oursig = gdb_signal_from_name (signum_exp);
a493e3e2 8930 if (oursig == GDB_SIGNAL_UNKNOWN)
c906108c
SS
8931 {
8932 /* No, try numeric. */
8933 oursig =
2ea28649 8934 gdb_signal_from_command (parse_and_eval_long (signum_exp));
c906108c
SS
8935 }
8936 sig_print_info (oursig);
8937 return;
8938 }
8939
8940 printf_filtered ("\n");
8941 /* These ugly casts brought to you by the native VAX compiler. */
a493e3e2
PA
8942 for (oursig = GDB_SIGNAL_FIRST;
8943 (int) oursig < (int) GDB_SIGNAL_LAST;
2ea28649 8944 oursig = (enum gdb_signal) ((int) oursig + 1))
c906108c
SS
8945 {
8946 QUIT;
8947
a493e3e2
PA
8948 if (oursig != GDB_SIGNAL_UNKNOWN
8949 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
c906108c
SS
8950 sig_print_info (oursig);
8951 }
8952
3e43a32a
MS
8953 printf_filtered (_("\nUse the \"handle\" command "
8954 "to change these tables.\n"));
c906108c 8955}
4aa995e1
PA
8956
8957/* The $_siginfo convenience variable is a bit special. We don't know
8958 for sure the type of the value until we actually have a chance to
7a9dd1b2 8959 fetch the data. The type can change depending on gdbarch, so it is
4aa995e1
PA
8960 also dependent on which thread you have selected.
8961
8962 1. making $_siginfo be an internalvar that creates a new value on
8963 access.
8964
8965 2. making the value of $_siginfo be an lval_computed value. */
8966
8967/* This function implements the lval_computed support for reading a
8968 $_siginfo value. */
8969
8970static void
8971siginfo_value_read (struct value *v)
8972{
8973 LONGEST transferred;
8974
a911d87a
PA
8975 /* If we can access registers, so can we access $_siginfo. Likewise
8976 vice versa. */
8977 validate_registers_access ();
c709acd1 8978
4aa995e1 8979 transferred =
8b88a78e 8980 target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO,
4aa995e1
PA
8981 NULL,
8982 value_contents_all_raw (v),
8983 value_offset (v),
8984 TYPE_LENGTH (value_type (v)));
8985
8986 if (transferred != TYPE_LENGTH (value_type (v)))
8987 error (_("Unable to read siginfo"));
8988}
8989
8990/* This function implements the lval_computed support for writing a
8991 $_siginfo value. */
8992
8993static void
8994siginfo_value_write (struct value *v, struct value *fromval)
8995{
8996 LONGEST transferred;
8997
a911d87a
PA
8998 /* If we can access registers, so can we access $_siginfo. Likewise
8999 vice versa. */
9000 validate_registers_access ();
c709acd1 9001
8b88a78e 9002 transferred = target_write (current_top_target (),
4aa995e1
PA
9003 TARGET_OBJECT_SIGNAL_INFO,
9004 NULL,
9005 value_contents_all_raw (fromval),
9006 value_offset (v),
9007 TYPE_LENGTH (value_type (fromval)));
9008
9009 if (transferred != TYPE_LENGTH (value_type (fromval)))
9010 error (_("Unable to write siginfo"));
9011}
9012
c8f2448a 9013static const struct lval_funcs siginfo_value_funcs =
4aa995e1
PA
9014 {
9015 siginfo_value_read,
9016 siginfo_value_write
9017 };
9018
9019/* Return a new value with the correct type for the siginfo object of
78267919
UW
9020 the current thread using architecture GDBARCH. Return a void value
9021 if there's no object available. */
4aa995e1 9022
2c0b251b 9023static struct value *
22d2b532
SDJ
9024siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
9025 void *ignore)
4aa995e1 9026{
4aa995e1 9027 if (target_has_stack
d7e15655 9028 && inferior_ptid != null_ptid
78267919 9029 && gdbarch_get_siginfo_type_p (gdbarch))
4aa995e1 9030 {
78267919 9031 struct type *type = gdbarch_get_siginfo_type (gdbarch);
abbb1732 9032
78267919 9033 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
4aa995e1
PA
9034 }
9035
78267919 9036 return allocate_value (builtin_type (gdbarch)->builtin_void);
4aa995e1
PA
9037}
9038
c906108c 9039\f
16c381f0
JK
9040/* infcall_suspend_state contains state about the program itself like its
9041 registers and any signal it received when it last stopped.
9042 This state must be restored regardless of how the inferior function call
9043 ends (either successfully, or after it hits a breakpoint or signal)
9044 if the program is to properly continue where it left off. */
9045
6bf78e29 9046class infcall_suspend_state
7a292a7a 9047{
6bf78e29
AB
9048public:
9049 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
9050 once the inferior function call has finished. */
9051 infcall_suspend_state (struct gdbarch *gdbarch,
9052 const struct thread_info *tp,
9053 struct regcache *regcache)
9054 : m_thread_suspend (tp->suspend),
9055 m_registers (new readonly_detached_regcache (*regcache))
9056 {
9057 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
9058
9059 if (gdbarch_get_siginfo_type_p (gdbarch))
9060 {
9061 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9062 size_t len = TYPE_LENGTH (type);
9063
9064 siginfo_data.reset ((gdb_byte *) xmalloc (len));
9065
9066 if (target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
9067 siginfo_data.get (), 0, len) != len)
9068 {
9069 /* Errors ignored. */
9070 siginfo_data.reset (nullptr);
9071 }
9072 }
9073
9074 if (siginfo_data)
9075 {
9076 m_siginfo_gdbarch = gdbarch;
9077 m_siginfo_data = std::move (siginfo_data);
9078 }
9079 }
9080
9081 /* Return a pointer to the stored register state. */
16c381f0 9082
6bf78e29
AB
9083 readonly_detached_regcache *registers () const
9084 {
9085 return m_registers.get ();
9086 }
9087
9088 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
9089
9090 void restore (struct gdbarch *gdbarch,
9091 struct thread_info *tp,
9092 struct regcache *regcache) const
9093 {
9094 tp->suspend = m_thread_suspend;
9095
9096 if (m_siginfo_gdbarch == gdbarch)
9097 {
9098 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9099
9100 /* Errors ignored. */
9101 target_write (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
9102 m_siginfo_data.get (), 0, TYPE_LENGTH (type));
9103 }
9104
9105 /* The inferior can be gone if the user types "print exit(0)"
9106 (and perhaps other times). */
9107 if (target_has_execution)
9108 /* NB: The register write goes through to the target. */
9109 regcache->restore (registers ());
9110 }
9111
9112private:
9113 /* How the current thread stopped before the inferior function call was
9114 executed. */
9115 struct thread_suspend_state m_thread_suspend;
9116
9117 /* The registers before the inferior function call was executed. */
9118 std::unique_ptr<readonly_detached_regcache> m_registers;
1736ad11 9119
35515841 9120 /* Format of SIGINFO_DATA or NULL if it is not present. */
6bf78e29 9121 struct gdbarch *m_siginfo_gdbarch = nullptr;
1736ad11
JK
9122
9123 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
9124 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
9125 content would be invalid. */
6bf78e29 9126 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
b89667eb
DE
9127};
9128
cb524840
TT
9129infcall_suspend_state_up
9130save_infcall_suspend_state ()
b89667eb 9131{
b89667eb 9132 struct thread_info *tp = inferior_thread ();
1736ad11 9133 struct regcache *regcache = get_current_regcache ();
ac7936df 9134 struct gdbarch *gdbarch = regcache->arch ();
1736ad11 9135
6bf78e29
AB
9136 infcall_suspend_state_up inf_state
9137 (new struct infcall_suspend_state (gdbarch, tp, regcache));
1736ad11 9138
6bf78e29
AB
9139 /* Having saved the current state, adjust the thread state, discarding
9140 any stop signal information. The stop signal is not useful when
9141 starting an inferior function call, and run_inferior_call will not use
9142 the signal due to its `proceed' call with GDB_SIGNAL_0. */
a493e3e2 9143 tp->suspend.stop_signal = GDB_SIGNAL_0;
35515841 9144
b89667eb
DE
9145 return inf_state;
9146}
9147
9148/* Restore inferior session state to INF_STATE. */
9149
9150void
16c381f0 9151restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb
DE
9152{
9153 struct thread_info *tp = inferior_thread ();
1736ad11 9154 struct regcache *regcache = get_current_regcache ();
ac7936df 9155 struct gdbarch *gdbarch = regcache->arch ();
b89667eb 9156
6bf78e29 9157 inf_state->restore (gdbarch, tp, regcache);
16c381f0 9158 discard_infcall_suspend_state (inf_state);
b89667eb
DE
9159}
9160
b89667eb 9161void
16c381f0 9162discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb 9163{
dd848631 9164 delete inf_state;
b89667eb
DE
9165}
9166
daf6667d 9167readonly_detached_regcache *
16c381f0 9168get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
b89667eb 9169{
6bf78e29 9170 return inf_state->registers ();
b89667eb
DE
9171}
9172
16c381f0
JK
9173/* infcall_control_state contains state regarding gdb's control of the
9174 inferior itself like stepping control. It also contains session state like
9175 the user's currently selected frame. */
b89667eb 9176
16c381f0 9177struct infcall_control_state
b89667eb 9178{
16c381f0
JK
9179 struct thread_control_state thread_control;
9180 struct inferior_control_state inferior_control;
d82142e2
JK
9181
9182 /* Other fields: */
ee841dd8
TT
9183 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
9184 int stopped_by_random_signal = 0;
7a292a7a 9185
b89667eb 9186 /* ID if the selected frame when the inferior function call was made. */
ee841dd8 9187 struct frame_id selected_frame_id {};
7a292a7a
SS
9188};
9189
c906108c 9190/* Save all of the information associated with the inferior<==>gdb
b89667eb 9191 connection. */
c906108c 9192
cb524840
TT
9193infcall_control_state_up
9194save_infcall_control_state ()
c906108c 9195{
cb524840 9196 infcall_control_state_up inf_status (new struct infcall_control_state);
4e1c45ea 9197 struct thread_info *tp = inferior_thread ();
d6b48e9c 9198 struct inferior *inf = current_inferior ();
7a292a7a 9199
16c381f0
JK
9200 inf_status->thread_control = tp->control;
9201 inf_status->inferior_control = inf->control;
d82142e2 9202
8358c15c 9203 tp->control.step_resume_breakpoint = NULL;
5b79abe7 9204 tp->control.exception_resume_breakpoint = NULL;
8358c15c 9205
16c381f0
JK
9206 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9207 chain. If caller's caller is walking the chain, they'll be happier if we
9208 hand them back the original chain when restore_infcall_control_state is
9209 called. */
9210 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
d82142e2
JK
9211
9212 /* Other fields: */
9213 inf_status->stop_stack_dummy = stop_stack_dummy;
9214 inf_status->stopped_by_random_signal = stopped_by_random_signal;
c5aa993b 9215
206415a3 9216 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
b89667eb 9217
7a292a7a 9218 return inf_status;
c906108c
SS
9219}
9220
bf469271
PA
9221static void
9222restore_selected_frame (const frame_id &fid)
c906108c 9223{
bf469271 9224 frame_info *frame = frame_find_by_id (fid);
c906108c 9225
aa0cd9c1
AC
9226 /* If inf_status->selected_frame_id is NULL, there was no previously
9227 selected frame. */
101dcfbe 9228 if (frame == NULL)
c906108c 9229 {
8a3fe4f8 9230 warning (_("Unable to restore previously selected frame."));
bf469271 9231 return;
c906108c
SS
9232 }
9233
0f7d239c 9234 select_frame (frame);
c906108c
SS
9235}
9236
b89667eb
DE
9237/* Restore inferior session state to INF_STATUS. */
9238
c906108c 9239void
16c381f0 9240restore_infcall_control_state (struct infcall_control_state *inf_status)
c906108c 9241{
4e1c45ea 9242 struct thread_info *tp = inferior_thread ();
d6b48e9c 9243 struct inferior *inf = current_inferior ();
4e1c45ea 9244
8358c15c
JK
9245 if (tp->control.step_resume_breakpoint)
9246 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
9247
5b79abe7
TT
9248 if (tp->control.exception_resume_breakpoint)
9249 tp->control.exception_resume_breakpoint->disposition
9250 = disp_del_at_next_stop;
9251
d82142e2 9252 /* Handle the bpstat_copy of the chain. */
16c381f0 9253 bpstat_clear (&tp->control.stop_bpstat);
d82142e2 9254
16c381f0
JK
9255 tp->control = inf_status->thread_control;
9256 inf->control = inf_status->inferior_control;
d82142e2
JK
9257
9258 /* Other fields: */
9259 stop_stack_dummy = inf_status->stop_stack_dummy;
9260 stopped_by_random_signal = inf_status->stopped_by_random_signal;
c906108c 9261
b89667eb 9262 if (target_has_stack)
c906108c 9263 {
bf469271 9264 /* The point of the try/catch is that if the stack is clobbered,
101dcfbe
AC
9265 walking the stack might encounter a garbage pointer and
9266 error() trying to dereference it. */
a70b8144 9267 try
bf469271
PA
9268 {
9269 restore_selected_frame (inf_status->selected_frame_id);
9270 }
230d2906 9271 catch (const gdb_exception_error &ex)
bf469271
PA
9272 {
9273 exception_fprintf (gdb_stderr, ex,
9274 "Unable to restore previously selected frame:\n");
9275 /* Error in restoring the selected frame. Select the
9276 innermost frame. */
9277 select_frame (get_current_frame ());
9278 }
c906108c 9279 }
c906108c 9280
ee841dd8 9281 delete inf_status;
7a292a7a 9282}
c906108c
SS
9283
9284void
16c381f0 9285discard_infcall_control_state (struct infcall_control_state *inf_status)
7a292a7a 9286{
8358c15c
JK
9287 if (inf_status->thread_control.step_resume_breakpoint)
9288 inf_status->thread_control.step_resume_breakpoint->disposition
9289 = disp_del_at_next_stop;
9290
5b79abe7
TT
9291 if (inf_status->thread_control.exception_resume_breakpoint)
9292 inf_status->thread_control.exception_resume_breakpoint->disposition
9293 = disp_del_at_next_stop;
9294
1777feb0 9295 /* See save_infcall_control_state for info on stop_bpstat. */
16c381f0 9296 bpstat_clear (&inf_status->thread_control.stop_bpstat);
8358c15c 9297
ee841dd8 9298 delete inf_status;
7a292a7a 9299}
b89667eb 9300\f
7f89fd65 9301/* See infrun.h. */
0c557179
SDJ
9302
9303void
9304clear_exit_convenience_vars (void)
9305{
9306 clear_internalvar (lookup_internalvar ("_exitsignal"));
9307 clear_internalvar (lookup_internalvar ("_exitcode"));
9308}
c5aa993b 9309\f
488f131b 9310
b2175913
MS
9311/* User interface for reverse debugging:
9312 Set exec-direction / show exec-direction commands
9313 (returns error unless target implements to_set_exec_direction method). */
9314
170742de 9315enum exec_direction_kind execution_direction = EXEC_FORWARD;
b2175913
MS
9316static const char exec_forward[] = "forward";
9317static const char exec_reverse[] = "reverse";
9318static const char *exec_direction = exec_forward;
40478521 9319static const char *const exec_direction_names[] = {
b2175913
MS
9320 exec_forward,
9321 exec_reverse,
9322 NULL
9323};
9324
9325static void
eb4c3f4a 9326set_exec_direction_func (const char *args, int from_tty,
b2175913
MS
9327 struct cmd_list_element *cmd)
9328{
9329 if (target_can_execute_reverse)
9330 {
9331 if (!strcmp (exec_direction, exec_forward))
9332 execution_direction = EXEC_FORWARD;
9333 else if (!strcmp (exec_direction, exec_reverse))
9334 execution_direction = EXEC_REVERSE;
9335 }
8bbed405
MS
9336 else
9337 {
9338 exec_direction = exec_forward;
9339 error (_("Target does not support this operation."));
9340 }
b2175913
MS
9341}
9342
9343static void
9344show_exec_direction_func (struct ui_file *out, int from_tty,
9345 struct cmd_list_element *cmd, const char *value)
9346{
9347 switch (execution_direction) {
9348 case EXEC_FORWARD:
9349 fprintf_filtered (out, _("Forward.\n"));
9350 break;
9351 case EXEC_REVERSE:
9352 fprintf_filtered (out, _("Reverse.\n"));
9353 break;
b2175913 9354 default:
d8b34453
PA
9355 internal_error (__FILE__, __LINE__,
9356 _("bogus execution_direction value: %d"),
9357 (int) execution_direction);
b2175913
MS
9358 }
9359}
9360
d4db2f36
PA
9361static void
9362show_schedule_multiple (struct ui_file *file, int from_tty,
9363 struct cmd_list_element *c, const char *value)
9364{
3e43a32a
MS
9365 fprintf_filtered (file, _("Resuming the execution of threads "
9366 "of all processes is %s.\n"), value);
d4db2f36 9367}
ad52ddc6 9368
22d2b532
SDJ
9369/* Implementation of `siginfo' variable. */
9370
9371static const struct internalvar_funcs siginfo_funcs =
9372{
9373 siginfo_make_value,
9374 NULL,
9375 NULL
9376};
9377
372316f1
PA
9378/* Callback for infrun's target events source. This is marked when a
9379 thread has a pending status to process. */
9380
9381static void
9382infrun_async_inferior_event_handler (gdb_client_data data)
9383{
372316f1
PA
9384 inferior_event_handler (INF_REG_EVENT, NULL);
9385}
9386
6c265988 9387void _initialize_infrun ();
c906108c 9388void
6c265988 9389_initialize_infrun ()
c906108c 9390{
de0bea00 9391 struct cmd_list_element *c;
c906108c 9392
372316f1
PA
9393 /* Register extra event sources in the event loop. */
9394 infrun_async_inferior_event_token
9395 = create_async_event_handler (infrun_async_inferior_event_handler, NULL);
9396
11db9430 9397 add_info ("signals", info_signals_command, _("\
1bedd215
AC
9398What debugger does when program gets various signals.\n\
9399Specify a signal as argument to print info on that signal only."));
c906108c
SS
9400 add_info_alias ("handle", "signals", 0);
9401
de0bea00 9402 c = add_com ("handle", class_run, handle_command, _("\
dfbd5e7b 9403Specify how to handle signals.\n\
486c7739 9404Usage: handle SIGNAL [ACTIONS]\n\
c906108c 9405Args are signals and actions to apply to those signals.\n\
dfbd5e7b 9406If no actions are specified, the current settings for the specified signals\n\
486c7739
MF
9407will be displayed instead.\n\
9408\n\
c906108c
SS
9409Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9410from 1-15 are allowed for compatibility with old versions of GDB.\n\
9411Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9412The special arg \"all\" is recognized to mean all signals except those\n\
1bedd215 9413used by the debugger, typically SIGTRAP and SIGINT.\n\
486c7739 9414\n\
1bedd215 9415Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
c906108c
SS
9416\"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9417Stop means reenter debugger if this signal happens (implies print).\n\
9418Print means print a message if this signal happens.\n\
9419Pass means let program see this signal; otherwise program doesn't know.\n\
9420Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
dfbd5e7b
PA
9421Pass and Stop may be combined.\n\
9422\n\
9423Multiple signals may be specified. Signal numbers and signal names\n\
9424may be interspersed with actions, with the actions being performed for\n\
9425all signals cumulatively specified."));
de0bea00 9426 set_cmd_completer (c, handle_completer);
486c7739 9427
c906108c 9428 if (!dbx_commands)
1a966eab
AC
9429 stop_command = add_cmd ("stop", class_obscure,
9430 not_just_help_class_command, _("\
9431There is no `stop' command, but you can set a hook on `stop'.\n\
c906108c 9432This allows you to set a list of commands to be run each time execution\n\
1a966eab 9433of the program stops."), &cmdlist);
c906108c 9434
ccce17b0 9435 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
85c07804
AC
9436Set inferior debugging."), _("\
9437Show inferior debugging."), _("\
9438When non-zero, inferior specific debugging is enabled."),
ccce17b0
YQ
9439 NULL,
9440 show_debug_infrun,
9441 &setdebuglist, &showdebuglist);
527159b7 9442
3e43a32a
MS
9443 add_setshow_boolean_cmd ("displaced", class_maintenance,
9444 &debug_displaced, _("\
237fc4c9
PA
9445Set displaced stepping debugging."), _("\
9446Show displaced stepping debugging."), _("\
9447When non-zero, displaced stepping specific debugging is enabled."),
9448 NULL,
9449 show_debug_displaced,
9450 &setdebuglist, &showdebuglist);
9451
ad52ddc6
PA
9452 add_setshow_boolean_cmd ("non-stop", no_class,
9453 &non_stop_1, _("\
9454Set whether gdb controls the inferior in non-stop mode."), _("\
9455Show whether gdb controls the inferior in non-stop mode."), _("\
9456When debugging a multi-threaded program and this setting is\n\
9457off (the default, also called all-stop mode), when one thread stops\n\
9458(for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9459all other threads in the program while you interact with the thread of\n\
9460interest. When you continue or step a thread, you can allow the other\n\
9461threads to run, or have them remain stopped, but while you inspect any\n\
9462thread's state, all threads stop.\n\
9463\n\
9464In non-stop mode, when one thread stops, other threads can continue\n\
9465to run freely. You'll be able to step each thread independently,\n\
9466leave it stopped or free to run as needed."),
9467 set_non_stop,
9468 show_non_stop,
9469 &setlist,
9470 &showlist);
9471
adc6a863 9472 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
c906108c
SS
9473 {
9474 signal_stop[i] = 1;
9475 signal_print[i] = 1;
9476 signal_program[i] = 1;
ab04a2af 9477 signal_catch[i] = 0;
c906108c
SS
9478 }
9479
4d9d9d04
PA
9480 /* Signals caused by debugger's own actions should not be given to
9481 the program afterwards.
9482
9483 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9484 explicitly specifies that it should be delivered to the target
9485 program. Typically, that would occur when a user is debugging a
9486 target monitor on a simulator: the target monitor sets a
9487 breakpoint; the simulator encounters this breakpoint and halts
9488 the simulation handing control to GDB; GDB, noting that the stop
9489 address doesn't map to any known breakpoint, returns control back
9490 to the simulator; the simulator then delivers the hardware
9491 equivalent of a GDB_SIGNAL_TRAP to the program being
9492 debugged. */
a493e3e2
PA
9493 signal_program[GDB_SIGNAL_TRAP] = 0;
9494 signal_program[GDB_SIGNAL_INT] = 0;
c906108c
SS
9495
9496 /* Signals that are not errors should not normally enter the debugger. */
a493e3e2
PA
9497 signal_stop[GDB_SIGNAL_ALRM] = 0;
9498 signal_print[GDB_SIGNAL_ALRM] = 0;
9499 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9500 signal_print[GDB_SIGNAL_VTALRM] = 0;
9501 signal_stop[GDB_SIGNAL_PROF] = 0;
9502 signal_print[GDB_SIGNAL_PROF] = 0;
9503 signal_stop[GDB_SIGNAL_CHLD] = 0;
9504 signal_print[GDB_SIGNAL_CHLD] = 0;
9505 signal_stop[GDB_SIGNAL_IO] = 0;
9506 signal_print[GDB_SIGNAL_IO] = 0;
9507 signal_stop[GDB_SIGNAL_POLL] = 0;
9508 signal_print[GDB_SIGNAL_POLL] = 0;
9509 signal_stop[GDB_SIGNAL_URG] = 0;
9510 signal_print[GDB_SIGNAL_URG] = 0;
9511 signal_stop[GDB_SIGNAL_WINCH] = 0;
9512 signal_print[GDB_SIGNAL_WINCH] = 0;
9513 signal_stop[GDB_SIGNAL_PRIO] = 0;
9514 signal_print[GDB_SIGNAL_PRIO] = 0;
c906108c 9515
cd0fc7c3
SS
9516 /* These signals are used internally by user-level thread
9517 implementations. (See signal(5) on Solaris.) Like the above
9518 signals, a healthy program receives and handles them as part of
9519 its normal operation. */
a493e3e2
PA
9520 signal_stop[GDB_SIGNAL_LWP] = 0;
9521 signal_print[GDB_SIGNAL_LWP] = 0;
9522 signal_stop[GDB_SIGNAL_WAITING] = 0;
9523 signal_print[GDB_SIGNAL_WAITING] = 0;
9524 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9525 signal_print[GDB_SIGNAL_CANCEL] = 0;
bc7b765a
JB
9526 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9527 signal_print[GDB_SIGNAL_LIBRT] = 0;
cd0fc7c3 9528
2455069d
UW
9529 /* Update cached state. */
9530 signal_cache_update (-1);
9531
85c07804
AC
9532 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9533 &stop_on_solib_events, _("\
9534Set stopping for shared library events."), _("\
9535Show stopping for shared library events."), _("\
c906108c
SS
9536If nonzero, gdb will give control to the user when the dynamic linker\n\
9537notifies gdb of shared library events. The most common event of interest\n\
85c07804 9538to the user would be loading/unloading of a new library."),
f9e14852 9539 set_stop_on_solib_events,
920d2a44 9540 show_stop_on_solib_events,
85c07804 9541 &setlist, &showlist);
c906108c 9542
7ab04401
AC
9543 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9544 follow_fork_mode_kind_names,
9545 &follow_fork_mode_string, _("\
9546Set debugger response to a program call of fork or vfork."), _("\
9547Show debugger response to a program call of fork or vfork."), _("\
c906108c
SS
9548A fork or vfork creates a new process. follow-fork-mode can be:\n\
9549 parent - the original process is debugged after a fork\n\
9550 child - the new process is debugged after a fork\n\
ea1dd7bc 9551The unfollowed process will continue to run.\n\
7ab04401
AC
9552By default, the debugger will follow the parent process."),
9553 NULL,
920d2a44 9554 show_follow_fork_mode_string,
7ab04401
AC
9555 &setlist, &showlist);
9556
6c95b8df
PA
9557 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9558 follow_exec_mode_names,
9559 &follow_exec_mode_string, _("\
9560Set debugger response to a program call of exec."), _("\
9561Show debugger response to a program call of exec."), _("\
9562An exec call replaces the program image of a process.\n\
9563\n\
9564follow-exec-mode can be:\n\
9565\n\
cce7e648 9566 new - the debugger creates a new inferior and rebinds the process\n\
6c95b8df
PA
9567to this new inferior. The program the process was running before\n\
9568the exec call can be restarted afterwards by restarting the original\n\
9569inferior.\n\
9570\n\
9571 same - the debugger keeps the process bound to the same inferior.\n\
9572The new executable image replaces the previous executable loaded in\n\
9573the inferior. Restarting the inferior after the exec call restarts\n\
9574the executable the process was running after the exec call.\n\
9575\n\
9576By default, the debugger will use the same inferior."),
9577 NULL,
9578 show_follow_exec_mode_string,
9579 &setlist, &showlist);
9580
7ab04401
AC
9581 add_setshow_enum_cmd ("scheduler-locking", class_run,
9582 scheduler_enums, &scheduler_mode, _("\
9583Set mode for locking scheduler during execution."), _("\
9584Show mode for locking scheduler during execution."), _("\
f2665db5
MM
9585off == no locking (threads may preempt at any time)\n\
9586on == full locking (no thread except the current thread may run)\n\
9587 This applies to both normal execution and replay mode.\n\
9588step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9589 In this mode, other threads may run during other commands.\n\
9590 This applies to both normal execution and replay mode.\n\
9591replay == scheduler locked in replay mode and unlocked during normal execution."),
7ab04401 9592 set_schedlock_func, /* traps on target vector */
920d2a44 9593 show_scheduler_mode,
7ab04401 9594 &setlist, &showlist);
5fbbeb29 9595
d4db2f36
PA
9596 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9597Set mode for resuming threads of all processes."), _("\
9598Show mode for resuming threads of all processes."), _("\
9599When on, execution commands (such as 'continue' or 'next') resume all\n\
9600threads of all processes. When off (which is the default), execution\n\
9601commands only resume the threads of the current process. The set of\n\
9602threads that are resumed is further refined by the scheduler-locking\n\
9603mode (see help set scheduler-locking)."),
9604 NULL,
9605 show_schedule_multiple,
9606 &setlist, &showlist);
9607
5bf193a2
AC
9608 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9609Set mode of the step operation."), _("\
9610Show mode of the step operation."), _("\
9611When set, doing a step over a function without debug line information\n\
9612will stop at the first instruction of that function. Otherwise, the\n\
9613function is skipped and the step command stops at a different source line."),
9614 NULL,
920d2a44 9615 show_step_stop_if_no_debug,
5bf193a2 9616 &setlist, &showlist);
ca6724c1 9617
72d0e2c5
YQ
9618 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9619 &can_use_displaced_stepping, _("\
237fc4c9
PA
9620Set debugger's willingness to use displaced stepping."), _("\
9621Show debugger's willingness to use displaced stepping."), _("\
fff08868
HZ
9622If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9623supported by the target architecture. If off, gdb will not use displaced\n\
9624stepping to step over breakpoints, even if such is supported by the target\n\
9625architecture. If auto (which is the default), gdb will use displaced stepping\n\
9626if the target architecture supports it and non-stop mode is active, but will not\n\
9627use it in all-stop mode (see help set non-stop)."),
72d0e2c5
YQ
9628 NULL,
9629 show_can_use_displaced_stepping,
9630 &setlist, &showlist);
237fc4c9 9631
b2175913
MS
9632 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9633 &exec_direction, _("Set direction of execution.\n\
9634Options are 'forward' or 'reverse'."),
9635 _("Show direction of execution (forward/reverse)."),
9636 _("Tells gdb whether to execute forward or backward."),
9637 set_exec_direction_func, show_exec_direction_func,
9638 &setlist, &showlist);
9639
6c95b8df
PA
9640 /* Set/show detach-on-fork: user-settable mode. */
9641
9642 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9643Set whether gdb will detach the child of a fork."), _("\
9644Show whether gdb will detach the child of a fork."), _("\
9645Tells gdb whether to detach the child of a fork."),
9646 NULL, NULL, &setlist, &showlist);
9647
03583c20
UW
9648 /* Set/show disable address space randomization mode. */
9649
9650 add_setshow_boolean_cmd ("disable-randomization", class_support,
9651 &disable_randomization, _("\
9652Set disabling of debuggee's virtual address space randomization."), _("\
9653Show disabling of debuggee's virtual address space randomization."), _("\
9654When this mode is on (which is the default), randomization of the virtual\n\
9655address space is disabled. Standalone programs run with the randomization\n\
9656enabled by default on some platforms."),
9657 &set_disable_randomization,
9658 &show_disable_randomization,
9659 &setlist, &showlist);
9660
ca6724c1 9661 /* ptid initializations */
ca6724c1
KB
9662 inferior_ptid = null_ptid;
9663 target_last_wait_ptid = minus_one_ptid;
5231c1fd 9664
76727919
TT
9665 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed);
9666 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested);
9667 gdb::observers::thread_exit.attach (infrun_thread_thread_exit);
9668 gdb::observers::inferior_exit.attach (infrun_inferior_exit);
4aa995e1
PA
9669
9670 /* Explicitly create without lookup, since that tries to create a
9671 value with a void typed value, and when we get here, gdbarch
9672 isn't initialized yet. At this point, we're quite sure there
9673 isn't another convenience variable of the same name. */
22d2b532 9674 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
d914c394
SS
9675
9676 add_setshow_boolean_cmd ("observer", no_class,
9677 &observer_mode_1, _("\
9678Set whether gdb controls the inferior in observer mode."), _("\
9679Show whether gdb controls the inferior in observer mode."), _("\
9680In observer mode, GDB can get data from the inferior, but not\n\
9681affect its execution. Registers and memory may not be changed,\n\
9682breakpoints may not be set, and the program cannot be interrupted\n\
9683or signalled."),
9684 set_observer_mode,
9685 show_observer_mode,
9686 &setlist,
9687 &showlist);
c906108c 9688}
This page took 2.870411 seconds and 4 git commands to generate.