Use all_non_exited_inferiors in infrun.c
[deliverable/binutils-gdb.git] / gdb / infrun.c
CommitLineData
ca557f44
AC
1/* Target-struct-independent code to start (run) and stop an inferior
2 process.
8926118c 3
b811d2c2 4 Copyright (C) 1986-2020 Free Software Foundation, Inc.
c906108c 5
c5aa993b 6 This file is part of GDB.
c906108c 7
c5aa993b
JM
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
c5aa993b 11 (at your option) any later version.
c906108c 12
c5aa993b
JM
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
c906108c 17
c5aa993b 18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
c906108c
SS
20
21#include "defs.h"
45741a9c 22#include "infrun.h"
c906108c
SS
23#include <ctype.h>
24#include "symtab.h"
25#include "frame.h"
26#include "inferior.h"
27#include "breakpoint.h"
c906108c
SS
28#include "gdbcore.h"
29#include "gdbcmd.h"
30#include "target.h"
31#include "gdbthread.h"
32#include "annotate.h"
1adeb98a 33#include "symfile.h"
7a292a7a 34#include "top.h"
2acceee2 35#include "inf-loop.h"
4e052eda 36#include "regcache.h"
fd0407d6 37#include "value.h"
76727919 38#include "observable.h"
f636b87d 39#include "language.h"
a77053c2 40#include "solib.h"
f17517ea 41#include "main.h"
186c406b 42#include "block.h"
034dad6f 43#include "mi/mi-common.h"
4f8d22e3 44#include "event-top.h"
96429cc8 45#include "record.h"
d02ed0bb 46#include "record-full.h"
edb3359d 47#include "inline-frame.h"
4efc6507 48#include "jit.h"
06cd862c 49#include "tracepoint.h"
1bfeeb0f 50#include "skip.h"
28106bc2
SDJ
51#include "probe.h"
52#include "objfiles.h"
de0bea00 53#include "completer.h"
9107fc8d 54#include "target-descriptions.h"
f15cb84a 55#include "target-dcache.h"
d83ad864 56#include "terminal.h"
ff862be4 57#include "solist.h"
372316f1 58#include "event-loop.h"
243a9253 59#include "thread-fsm.h"
268a13a5 60#include "gdbsupport/enum-flags.h"
5ed8105e 61#include "progspace-and-thread.h"
268a13a5 62#include "gdbsupport/gdb_optional.h"
46a62268 63#include "arch-utils.h"
268a13a5
TT
64#include "gdbsupport/scope-exit.h"
65#include "gdbsupport/forward-scope-exit.h"
c906108c
SS
66
67/* Prototypes for local functions */
68
2ea28649 69static void sig_print_info (enum gdb_signal);
c906108c 70
96baa820 71static void sig_print_header (void);
c906108c 72
4ef3f3be 73static int follow_fork (void);
96baa820 74
d83ad864
DB
75static int follow_fork_inferior (int follow_child, int detach_fork);
76
77static void follow_inferior_reset_breakpoints (void);
78
a289b8f6
JK
79static int currently_stepping (struct thread_info *tp);
80
2c03e5be 81static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
2484c66b
UW
82
83static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
84
2484c66b
UW
85static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
86
8550d3b3
YQ
87static int maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
88
aff4e175
AB
89static void resume (gdb_signal sig);
90
372316f1
PA
91/* Asynchronous signal handler registered as event loop source for
92 when we have pending events ready to be passed to the core. */
93static struct async_event_handler *infrun_async_inferior_event_token;
94
95/* Stores whether infrun_async was previously enabled or disabled.
96 Starts off as -1, indicating "never enabled/disabled". */
97static int infrun_is_async = -1;
98
99/* See infrun.h. */
100
101void
102infrun_async (int enable)
103{
104 if (infrun_is_async != enable)
105 {
106 infrun_is_async = enable;
107
108 if (debug_infrun)
109 fprintf_unfiltered (gdb_stdlog,
110 "infrun: infrun_async(%d)\n",
111 enable);
112
113 if (enable)
114 mark_async_event_handler (infrun_async_inferior_event_token);
115 else
116 clear_async_event_handler (infrun_async_inferior_event_token);
117 }
118}
119
0b333c5e
PA
120/* See infrun.h. */
121
122void
123mark_infrun_async_event_handler (void)
124{
125 mark_async_event_handler (infrun_async_inferior_event_token);
126}
127
5fbbeb29
CF
128/* When set, stop the 'step' command if we enter a function which has
129 no line number information. The normal behavior is that we step
130 over such function. */
491144b5 131bool step_stop_if_no_debug = false;
920d2a44
AC
132static void
133show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
134 struct cmd_list_element *c, const char *value)
135{
136 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
137}
5fbbeb29 138
b9f437de
PA
139/* proceed and normal_stop use this to notify the user when the
140 inferior stopped in a different thread than it had been running
141 in. */
96baa820 142
39f77062 143static ptid_t previous_inferior_ptid;
7a292a7a 144
07107ca6
LM
145/* If set (default for legacy reasons), when following a fork, GDB
146 will detach from one of the fork branches, child or parent.
147 Exactly which branch is detached depends on 'set follow-fork-mode'
148 setting. */
149
491144b5 150static bool detach_fork = true;
6c95b8df 151
491144b5 152bool debug_displaced = false;
237fc4c9
PA
153static void
154show_debug_displaced (struct ui_file *file, int from_tty,
155 struct cmd_list_element *c, const char *value)
156{
157 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
158}
159
ccce17b0 160unsigned int debug_infrun = 0;
920d2a44
AC
161static void
162show_debug_infrun (struct ui_file *file, int from_tty,
163 struct cmd_list_element *c, const char *value)
164{
165 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
166}
527159b7 167
03583c20
UW
168
169/* Support for disabling address space randomization. */
170
491144b5 171bool disable_randomization = true;
03583c20
UW
172
173static void
174show_disable_randomization (struct ui_file *file, int from_tty,
175 struct cmd_list_element *c, const char *value)
176{
177 if (target_supports_disable_randomization ())
178 fprintf_filtered (file,
179 _("Disabling randomization of debuggee's "
180 "virtual address space is %s.\n"),
181 value);
182 else
183 fputs_filtered (_("Disabling randomization of debuggee's "
184 "virtual address space is unsupported on\n"
185 "this platform.\n"), file);
186}
187
188static void
eb4c3f4a 189set_disable_randomization (const char *args, int from_tty,
03583c20
UW
190 struct cmd_list_element *c)
191{
192 if (!target_supports_disable_randomization ())
193 error (_("Disabling randomization of debuggee's "
194 "virtual address space is unsupported on\n"
195 "this platform."));
196}
197
d32dc48e
PA
198/* User interface for non-stop mode. */
199
491144b5
CB
200bool non_stop = false;
201static bool non_stop_1 = false;
d32dc48e
PA
202
203static void
eb4c3f4a 204set_non_stop (const char *args, int from_tty,
d32dc48e
PA
205 struct cmd_list_element *c)
206{
207 if (target_has_execution)
208 {
209 non_stop_1 = non_stop;
210 error (_("Cannot change this setting while the inferior is running."));
211 }
212
213 non_stop = non_stop_1;
214}
215
216static void
217show_non_stop (struct ui_file *file, int from_tty,
218 struct cmd_list_element *c, const char *value)
219{
220 fprintf_filtered (file,
221 _("Controlling the inferior in non-stop mode is %s.\n"),
222 value);
223}
224
d914c394
SS
225/* "Observer mode" is somewhat like a more extreme version of
226 non-stop, in which all GDB operations that might affect the
227 target's execution have been disabled. */
228
491144b5
CB
229bool observer_mode = false;
230static bool observer_mode_1 = false;
d914c394
SS
231
232static void
eb4c3f4a 233set_observer_mode (const char *args, int from_tty,
d914c394
SS
234 struct cmd_list_element *c)
235{
d914c394
SS
236 if (target_has_execution)
237 {
238 observer_mode_1 = observer_mode;
239 error (_("Cannot change this setting while the inferior is running."));
240 }
241
242 observer_mode = observer_mode_1;
243
244 may_write_registers = !observer_mode;
245 may_write_memory = !observer_mode;
246 may_insert_breakpoints = !observer_mode;
247 may_insert_tracepoints = !observer_mode;
248 /* We can insert fast tracepoints in or out of observer mode,
249 but enable them if we're going into this mode. */
250 if (observer_mode)
491144b5 251 may_insert_fast_tracepoints = true;
d914c394
SS
252 may_stop = !observer_mode;
253 update_target_permissions ();
254
255 /* Going *into* observer mode we must force non-stop, then
256 going out we leave it that way. */
257 if (observer_mode)
258 {
d914c394 259 pagination_enabled = 0;
491144b5 260 non_stop = non_stop_1 = true;
d914c394
SS
261 }
262
263 if (from_tty)
264 printf_filtered (_("Observer mode is now %s.\n"),
265 (observer_mode ? "on" : "off"));
266}
267
268static void
269show_observer_mode (struct ui_file *file, int from_tty,
270 struct cmd_list_element *c, const char *value)
271{
272 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
273}
274
275/* This updates the value of observer mode based on changes in
276 permissions. Note that we are deliberately ignoring the values of
277 may-write-registers and may-write-memory, since the user may have
278 reason to enable these during a session, for instance to turn on a
279 debugging-related global. */
280
281void
282update_observer_mode (void)
283{
491144b5
CB
284 bool newval = (!may_insert_breakpoints
285 && !may_insert_tracepoints
286 && may_insert_fast_tracepoints
287 && !may_stop
288 && non_stop);
d914c394
SS
289
290 /* Let the user know if things change. */
291 if (newval != observer_mode)
292 printf_filtered (_("Observer mode is now %s.\n"),
293 (newval ? "on" : "off"));
294
295 observer_mode = observer_mode_1 = newval;
296}
c2c6d25f 297
c906108c
SS
298/* Tables of how to react to signals; the user sets them. */
299
adc6a863
PA
300static unsigned char signal_stop[GDB_SIGNAL_LAST];
301static unsigned char signal_print[GDB_SIGNAL_LAST];
302static unsigned char signal_program[GDB_SIGNAL_LAST];
c906108c 303
ab04a2af
TT
304/* Table of signals that are registered with "catch signal". A
305 non-zero entry indicates that the signal is caught by some "catch
adc6a863
PA
306 signal" command. */
307static unsigned char signal_catch[GDB_SIGNAL_LAST];
ab04a2af 308
2455069d
UW
309/* Table of signals that the target may silently handle.
310 This is automatically determined from the flags above,
311 and simply cached here. */
adc6a863 312static unsigned char signal_pass[GDB_SIGNAL_LAST];
2455069d 313
c906108c
SS
314#define SET_SIGS(nsigs,sigs,flags) \
315 do { \
316 int signum = (nsigs); \
317 while (signum-- > 0) \
318 if ((sigs)[signum]) \
319 (flags)[signum] = 1; \
320 } while (0)
321
322#define UNSET_SIGS(nsigs,sigs,flags) \
323 do { \
324 int signum = (nsigs); \
325 while (signum-- > 0) \
326 if ((sigs)[signum]) \
327 (flags)[signum] = 0; \
328 } while (0)
329
9b224c5e
PA
330/* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
331 this function is to avoid exporting `signal_program'. */
332
333void
334update_signals_program_target (void)
335{
adc6a863 336 target_program_signals (signal_program);
9b224c5e
PA
337}
338
1777feb0 339/* Value to pass to target_resume() to cause all threads to resume. */
39f77062 340
edb3359d 341#define RESUME_ALL minus_one_ptid
c906108c
SS
342
343/* Command list pointer for the "stop" placeholder. */
344
345static struct cmd_list_element *stop_command;
346
c906108c
SS
347/* Nonzero if we want to give control to the user when we're notified
348 of shared library events by the dynamic linker. */
628fe4e4 349int stop_on_solib_events;
f9e14852
GB
350
351/* Enable or disable optional shared library event breakpoints
352 as appropriate when the above flag is changed. */
353
354static void
eb4c3f4a
TT
355set_stop_on_solib_events (const char *args,
356 int from_tty, struct cmd_list_element *c)
f9e14852
GB
357{
358 update_solib_breakpoints ();
359}
360
920d2a44
AC
361static void
362show_stop_on_solib_events (struct ui_file *file, int from_tty,
363 struct cmd_list_element *c, const char *value)
364{
365 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
366 value);
367}
c906108c 368
c906108c
SS
369/* Nonzero after stop if current stack frame should be printed. */
370
371static int stop_print_frame;
372
e02bc4cc 373/* This is a cached copy of the pid/waitstatus of the last event
9a4105ab
AC
374 returned by target_wait()/deprecated_target_wait_hook(). This
375 information is returned by get_last_target_status(). */
39f77062 376static ptid_t target_last_wait_ptid;
e02bc4cc
DS
377static struct target_waitstatus target_last_waitstatus;
378
4e1c45ea 379void init_thread_stepping_state (struct thread_info *tss);
0d1e5fa7 380
53904c9e
AC
381static const char follow_fork_mode_child[] = "child";
382static const char follow_fork_mode_parent[] = "parent";
383
40478521 384static const char *const follow_fork_mode_kind_names[] = {
53904c9e
AC
385 follow_fork_mode_child,
386 follow_fork_mode_parent,
387 NULL
ef346e04 388};
c906108c 389
53904c9e 390static const char *follow_fork_mode_string = follow_fork_mode_parent;
920d2a44
AC
391static void
392show_follow_fork_mode_string (struct ui_file *file, int from_tty,
393 struct cmd_list_element *c, const char *value)
394{
3e43a32a
MS
395 fprintf_filtered (file,
396 _("Debugger response to a program "
397 "call of fork or vfork is \"%s\".\n"),
920d2a44
AC
398 value);
399}
c906108c
SS
400\f
401
d83ad864
DB
402/* Handle changes to the inferior list based on the type of fork,
403 which process is being followed, and whether the other process
404 should be detached. On entry inferior_ptid must be the ptid of
405 the fork parent. At return inferior_ptid is the ptid of the
406 followed inferior. */
407
408static int
409follow_fork_inferior (int follow_child, int detach_fork)
410{
411 int has_vforked;
79639e11 412 ptid_t parent_ptid, child_ptid;
d83ad864
DB
413
414 has_vforked = (inferior_thread ()->pending_follow.kind
415 == TARGET_WAITKIND_VFORKED);
79639e11
PA
416 parent_ptid = inferior_ptid;
417 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
d83ad864
DB
418
419 if (has_vforked
420 && !non_stop /* Non-stop always resumes both branches. */
3b12939d 421 && current_ui->prompt_state == PROMPT_BLOCKED
d83ad864
DB
422 && !(follow_child || detach_fork || sched_multi))
423 {
424 /* The parent stays blocked inside the vfork syscall until the
425 child execs or exits. If we don't let the child run, then
426 the parent stays blocked. If we're telling the parent to run
427 in the foreground, the user will not be able to ctrl-c to get
428 back the terminal, effectively hanging the debug session. */
429 fprintf_filtered (gdb_stderr, _("\
430Can not resume the parent process over vfork in the foreground while\n\
431holding the child stopped. Try \"set detach-on-fork\" or \
432\"set schedule-multiple\".\n"));
d83ad864
DB
433 return 1;
434 }
435
436 if (!follow_child)
437 {
438 /* Detach new forked process? */
439 if (detach_fork)
440 {
d83ad864
DB
441 /* Before detaching from the child, remove all breakpoints
442 from it. If we forked, then this has already been taken
443 care of by infrun.c. If we vforked however, any
444 breakpoint inserted in the parent is visible in the
445 child, even those added while stopped in a vfork
446 catchpoint. This will remove the breakpoints from the
447 parent also, but they'll be reinserted below. */
448 if (has_vforked)
449 {
450 /* Keep breakpoints list in sync. */
00431a78 451 remove_breakpoints_inf (current_inferior ());
d83ad864
DB
452 }
453
f67c0c91 454 if (print_inferior_events)
d83ad864 455 {
8dd06f7a 456 /* Ensure that we have a process ptid. */
e99b03dc 457 ptid_t process_ptid = ptid_t (child_ptid.pid ());
8dd06f7a 458
223ffa71 459 target_terminal::ours_for_output ();
d83ad864 460 fprintf_filtered (gdb_stdlog,
f67c0c91 461 _("[Detaching after %s from child %s]\n"),
6f259a23 462 has_vforked ? "vfork" : "fork",
a068643d 463 target_pid_to_str (process_ptid).c_str ());
d83ad864
DB
464 }
465 }
466 else
467 {
468 struct inferior *parent_inf, *child_inf;
d83ad864
DB
469
470 /* Add process to GDB's tables. */
e99b03dc 471 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
472
473 parent_inf = current_inferior ();
474 child_inf->attach_flag = parent_inf->attach_flag;
475 copy_terminal_info (child_inf, parent_inf);
476 child_inf->gdbarch = parent_inf->gdbarch;
477 copy_inferior_target_desc_info (child_inf, parent_inf);
478
5ed8105e 479 scoped_restore_current_pspace_and_thread restore_pspace_thread;
d83ad864 480
79639e11 481 inferior_ptid = child_ptid;
f67c0c91 482 add_thread_silent (inferior_ptid);
2a00d7ce 483 set_current_inferior (child_inf);
d83ad864
DB
484 child_inf->symfile_flags = SYMFILE_NO_READ;
485
486 /* If this is a vfork child, then the address-space is
487 shared with the parent. */
488 if (has_vforked)
489 {
490 child_inf->pspace = parent_inf->pspace;
491 child_inf->aspace = parent_inf->aspace;
492
493 /* The parent will be frozen until the child is done
494 with the shared region. Keep track of the
495 parent. */
496 child_inf->vfork_parent = parent_inf;
497 child_inf->pending_detach = 0;
498 parent_inf->vfork_child = child_inf;
499 parent_inf->pending_detach = 0;
500 }
501 else
502 {
503 child_inf->aspace = new_address_space ();
564b1e3f 504 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
505 child_inf->removable = 1;
506 set_current_program_space (child_inf->pspace);
507 clone_program_space (child_inf->pspace, parent_inf->pspace);
508
509 /* Let the shared library layer (e.g., solib-svr4) learn
510 about this new process, relocate the cloned exec, pull
511 in shared libraries, and install the solib event
512 breakpoint. If a "cloned-VM" event was propagated
513 better throughout the core, this wouldn't be
514 required. */
515 solib_create_inferior_hook (0);
516 }
d83ad864
DB
517 }
518
519 if (has_vforked)
520 {
521 struct inferior *parent_inf;
522
523 parent_inf = current_inferior ();
524
525 /* If we detached from the child, then we have to be careful
526 to not insert breakpoints in the parent until the child
527 is done with the shared memory region. However, if we're
528 staying attached to the child, then we can and should
529 insert breakpoints, so that we can debug it. A
530 subsequent child exec or exit is enough to know when does
531 the child stops using the parent's address space. */
532 parent_inf->waiting_for_vfork_done = detach_fork;
533 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
534 }
535 }
536 else
537 {
538 /* Follow the child. */
539 struct inferior *parent_inf, *child_inf;
540 struct program_space *parent_pspace;
541
f67c0c91 542 if (print_inferior_events)
d83ad864 543 {
f67c0c91
SDJ
544 std::string parent_pid = target_pid_to_str (parent_ptid);
545 std::string child_pid = target_pid_to_str (child_ptid);
546
223ffa71 547 target_terminal::ours_for_output ();
6f259a23 548 fprintf_filtered (gdb_stdlog,
f67c0c91
SDJ
549 _("[Attaching after %s %s to child %s]\n"),
550 parent_pid.c_str (),
6f259a23 551 has_vforked ? "vfork" : "fork",
f67c0c91 552 child_pid.c_str ());
d83ad864
DB
553 }
554
555 /* Add the new inferior first, so that the target_detach below
556 doesn't unpush the target. */
557
e99b03dc 558 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
559
560 parent_inf = current_inferior ();
561 child_inf->attach_flag = parent_inf->attach_flag;
562 copy_terminal_info (child_inf, parent_inf);
563 child_inf->gdbarch = parent_inf->gdbarch;
564 copy_inferior_target_desc_info (child_inf, parent_inf);
565
566 parent_pspace = parent_inf->pspace;
567
568 /* If we're vforking, we want to hold on to the parent until the
569 child exits or execs. At child exec or exit time we can
570 remove the old breakpoints from the parent and detach or
571 resume debugging it. Otherwise, detach the parent now; we'll
572 want to reuse it's program/address spaces, but we can't set
573 them to the child before removing breakpoints from the
574 parent, otherwise, the breakpoints module could decide to
575 remove breakpoints from the wrong process (since they'd be
576 assigned to the same address space). */
577
578 if (has_vforked)
579 {
580 gdb_assert (child_inf->vfork_parent == NULL);
581 gdb_assert (parent_inf->vfork_child == NULL);
582 child_inf->vfork_parent = parent_inf;
583 child_inf->pending_detach = 0;
584 parent_inf->vfork_child = child_inf;
585 parent_inf->pending_detach = detach_fork;
586 parent_inf->waiting_for_vfork_done = 0;
587 }
588 else if (detach_fork)
6f259a23 589 {
f67c0c91 590 if (print_inferior_events)
6f259a23 591 {
8dd06f7a 592 /* Ensure that we have a process ptid. */
e99b03dc 593 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
8dd06f7a 594
223ffa71 595 target_terminal::ours_for_output ();
6f259a23 596 fprintf_filtered (gdb_stdlog,
f67c0c91
SDJ
597 _("[Detaching after fork from "
598 "parent %s]\n"),
a068643d 599 target_pid_to_str (process_ptid).c_str ());
6f259a23
DB
600 }
601
6e1e1966 602 target_detach (parent_inf, 0);
6f259a23 603 }
d83ad864
DB
604
605 /* Note that the detach above makes PARENT_INF dangling. */
606
607 /* Add the child thread to the appropriate lists, and switch to
608 this new thread, before cloning the program space, and
609 informing the solib layer about this new process. */
610
79639e11 611 inferior_ptid = child_ptid;
f67c0c91 612 add_thread_silent (inferior_ptid);
2a00d7ce 613 set_current_inferior (child_inf);
d83ad864
DB
614
615 /* If this is a vfork child, then the address-space is shared
616 with the parent. If we detached from the parent, then we can
617 reuse the parent's program/address spaces. */
618 if (has_vforked || detach_fork)
619 {
620 child_inf->pspace = parent_pspace;
621 child_inf->aspace = child_inf->pspace->aspace;
622 }
623 else
624 {
625 child_inf->aspace = new_address_space ();
564b1e3f 626 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
627 child_inf->removable = 1;
628 child_inf->symfile_flags = SYMFILE_NO_READ;
629 set_current_program_space (child_inf->pspace);
630 clone_program_space (child_inf->pspace, parent_pspace);
631
632 /* Let the shared library layer (e.g., solib-svr4) learn
633 about this new process, relocate the cloned exec, pull in
634 shared libraries, and install the solib event breakpoint.
635 If a "cloned-VM" event was propagated better throughout
636 the core, this wouldn't be required. */
637 solib_create_inferior_hook (0);
638 }
639 }
640
641 return target_follow_fork (follow_child, detach_fork);
642}
643
e58b0e63
PA
644/* Tell the target to follow the fork we're stopped at. Returns true
645 if the inferior should be resumed; false, if the target for some
646 reason decided it's best not to resume. */
647
6604731b 648static int
4ef3f3be 649follow_fork (void)
c906108c 650{
ea1dd7bc 651 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63
PA
652 int should_resume = 1;
653 struct thread_info *tp;
654
655 /* Copy user stepping state to the new inferior thread. FIXME: the
656 followed fork child thread should have a copy of most of the
4e3990f4
DE
657 parent thread structure's run control related fields, not just these.
658 Initialized to avoid "may be used uninitialized" warnings from gcc. */
659 struct breakpoint *step_resume_breakpoint = NULL;
186c406b 660 struct breakpoint *exception_resume_breakpoint = NULL;
4e3990f4
DE
661 CORE_ADDR step_range_start = 0;
662 CORE_ADDR step_range_end = 0;
663 struct frame_id step_frame_id = { 0 };
8980e177 664 struct thread_fsm *thread_fsm = NULL;
e58b0e63
PA
665
666 if (!non_stop)
667 {
668 ptid_t wait_ptid;
669 struct target_waitstatus wait_status;
670
671 /* Get the last target status returned by target_wait(). */
672 get_last_target_status (&wait_ptid, &wait_status);
673
674 /* If not stopped at a fork event, then there's nothing else to
675 do. */
676 if (wait_status.kind != TARGET_WAITKIND_FORKED
677 && wait_status.kind != TARGET_WAITKIND_VFORKED)
678 return 1;
679
680 /* Check if we switched over from WAIT_PTID, since the event was
681 reported. */
00431a78
PA
682 if (wait_ptid != minus_one_ptid
683 && inferior_ptid != wait_ptid)
e58b0e63
PA
684 {
685 /* We did. Switch back to WAIT_PTID thread, to tell the
686 target to follow it (in either direction). We'll
687 afterwards refuse to resume, and inform the user what
688 happened. */
00431a78
PA
689 thread_info *wait_thread
690 = find_thread_ptid (wait_ptid);
691 switch_to_thread (wait_thread);
e58b0e63
PA
692 should_resume = 0;
693 }
694 }
695
696 tp = inferior_thread ();
697
698 /* If there were any forks/vforks that were caught and are now to be
699 followed, then do so now. */
700 switch (tp->pending_follow.kind)
701 {
702 case TARGET_WAITKIND_FORKED:
703 case TARGET_WAITKIND_VFORKED:
704 {
705 ptid_t parent, child;
706
707 /* If the user did a next/step, etc, over a fork call,
708 preserve the stepping state in the fork child. */
709 if (follow_child && should_resume)
710 {
8358c15c
JK
711 step_resume_breakpoint = clone_momentary_breakpoint
712 (tp->control.step_resume_breakpoint);
16c381f0
JK
713 step_range_start = tp->control.step_range_start;
714 step_range_end = tp->control.step_range_end;
715 step_frame_id = tp->control.step_frame_id;
186c406b
TT
716 exception_resume_breakpoint
717 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
8980e177 718 thread_fsm = tp->thread_fsm;
e58b0e63
PA
719
720 /* For now, delete the parent's sr breakpoint, otherwise,
721 parent/child sr breakpoints are considered duplicates,
722 and the child version will not be installed. Remove
723 this when the breakpoints module becomes aware of
724 inferiors and address spaces. */
725 delete_step_resume_breakpoint (tp);
16c381f0
JK
726 tp->control.step_range_start = 0;
727 tp->control.step_range_end = 0;
728 tp->control.step_frame_id = null_frame_id;
186c406b 729 delete_exception_resume_breakpoint (tp);
8980e177 730 tp->thread_fsm = NULL;
e58b0e63
PA
731 }
732
733 parent = inferior_ptid;
734 child = tp->pending_follow.value.related_pid;
735
d83ad864
DB
736 /* Set up inferior(s) as specified by the caller, and tell the
737 target to do whatever is necessary to follow either parent
738 or child. */
739 if (follow_fork_inferior (follow_child, detach_fork))
e58b0e63
PA
740 {
741 /* Target refused to follow, or there's some other reason
742 we shouldn't resume. */
743 should_resume = 0;
744 }
745 else
746 {
747 /* This pending follow fork event is now handled, one way
748 or another. The previous selected thread may be gone
749 from the lists by now, but if it is still around, need
750 to clear the pending follow request. */
e09875d4 751 tp = find_thread_ptid (parent);
e58b0e63
PA
752 if (tp)
753 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
754
755 /* This makes sure we don't try to apply the "Switched
756 over from WAIT_PID" logic above. */
757 nullify_last_target_wait_ptid ();
758
1777feb0 759 /* If we followed the child, switch to it... */
e58b0e63
PA
760 if (follow_child)
761 {
00431a78
PA
762 thread_info *child_thr = find_thread_ptid (child);
763 switch_to_thread (child_thr);
e58b0e63
PA
764
765 /* ... and preserve the stepping state, in case the
766 user was stepping over the fork call. */
767 if (should_resume)
768 {
769 tp = inferior_thread ();
8358c15c
JK
770 tp->control.step_resume_breakpoint
771 = step_resume_breakpoint;
16c381f0
JK
772 tp->control.step_range_start = step_range_start;
773 tp->control.step_range_end = step_range_end;
774 tp->control.step_frame_id = step_frame_id;
186c406b
TT
775 tp->control.exception_resume_breakpoint
776 = exception_resume_breakpoint;
8980e177 777 tp->thread_fsm = thread_fsm;
e58b0e63
PA
778 }
779 else
780 {
781 /* If we get here, it was because we're trying to
782 resume from a fork catchpoint, but, the user
783 has switched threads away from the thread that
784 forked. In that case, the resume command
785 issued is most likely not applicable to the
786 child, so just warn, and refuse to resume. */
3e43a32a 787 warning (_("Not resuming: switched threads "
fd7dcb94 788 "before following fork child."));
e58b0e63
PA
789 }
790
791 /* Reset breakpoints in the child as appropriate. */
792 follow_inferior_reset_breakpoints ();
793 }
e58b0e63
PA
794 }
795 }
796 break;
797 case TARGET_WAITKIND_SPURIOUS:
798 /* Nothing to follow. */
799 break;
800 default:
801 internal_error (__FILE__, __LINE__,
802 "Unexpected pending_follow.kind %d\n",
803 tp->pending_follow.kind);
804 break;
805 }
c906108c 806
e58b0e63 807 return should_resume;
c906108c
SS
808}
809
d83ad864 810static void
6604731b 811follow_inferior_reset_breakpoints (void)
c906108c 812{
4e1c45ea
PA
813 struct thread_info *tp = inferior_thread ();
814
6604731b
DJ
815 /* Was there a step_resume breakpoint? (There was if the user
816 did a "next" at the fork() call.) If so, explicitly reset its
a1aa2221
LM
817 thread number. Cloned step_resume breakpoints are disabled on
818 creation, so enable it here now that it is associated with the
819 correct thread.
6604731b
DJ
820
821 step_resumes are a form of bp that are made to be per-thread.
822 Since we created the step_resume bp when the parent process
823 was being debugged, and now are switching to the child process,
824 from the breakpoint package's viewpoint, that's a switch of
825 "threads". We must update the bp's notion of which thread
826 it is for, or it'll be ignored when it triggers. */
827
8358c15c 828 if (tp->control.step_resume_breakpoint)
a1aa2221
LM
829 {
830 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
831 tp->control.step_resume_breakpoint->loc->enabled = 1;
832 }
6604731b 833
a1aa2221 834 /* Treat exception_resume breakpoints like step_resume breakpoints. */
186c406b 835 if (tp->control.exception_resume_breakpoint)
a1aa2221
LM
836 {
837 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
838 tp->control.exception_resume_breakpoint->loc->enabled = 1;
839 }
186c406b 840
6604731b
DJ
841 /* Reinsert all breakpoints in the child. The user may have set
842 breakpoints after catching the fork, in which case those
843 were never set in the child, but only in the parent. This makes
844 sure the inserted breakpoints match the breakpoint list. */
845
846 breakpoint_re_set ();
847 insert_breakpoints ();
c906108c 848}
c906108c 849
6c95b8df
PA
850/* The child has exited or execed: resume threads of the parent the
851 user wanted to be executing. */
852
853static int
854proceed_after_vfork_done (struct thread_info *thread,
855 void *arg)
856{
857 int pid = * (int *) arg;
858
00431a78
PA
859 if (thread->ptid.pid () == pid
860 && thread->state == THREAD_RUNNING
861 && !thread->executing
6c95b8df 862 && !thread->stop_requested
a493e3e2 863 && thread->suspend.stop_signal == GDB_SIGNAL_0)
6c95b8df
PA
864 {
865 if (debug_infrun)
866 fprintf_unfiltered (gdb_stdlog,
867 "infrun: resuming vfork parent thread %s\n",
a068643d 868 target_pid_to_str (thread->ptid).c_str ());
6c95b8df 869
00431a78 870 switch_to_thread (thread);
70509625 871 clear_proceed_status (0);
64ce06e4 872 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
6c95b8df
PA
873 }
874
875 return 0;
876}
877
5ed8105e
PA
878/* Save/restore inferior_ptid, current program space and current
879 inferior. Only use this if the current context points at an exited
880 inferior (and therefore there's no current thread to save). */
881class scoped_restore_exited_inferior
882{
883public:
884 scoped_restore_exited_inferior ()
885 : m_saved_ptid (&inferior_ptid)
886 {}
887
888private:
889 scoped_restore_tmpl<ptid_t> m_saved_ptid;
890 scoped_restore_current_program_space m_pspace;
891 scoped_restore_current_inferior m_inferior;
892};
893
6c95b8df
PA
894/* Called whenever we notice an exec or exit event, to handle
895 detaching or resuming a vfork parent. */
896
897static void
898handle_vfork_child_exec_or_exit (int exec)
899{
900 struct inferior *inf = current_inferior ();
901
902 if (inf->vfork_parent)
903 {
904 int resume_parent = -1;
905
906 /* This exec or exit marks the end of the shared memory region
b73715df
TV
907 between the parent and the child. Break the bonds. */
908 inferior *vfork_parent = inf->vfork_parent;
909 inf->vfork_parent->vfork_child = NULL;
910 inf->vfork_parent = NULL;
6c95b8df 911
b73715df
TV
912 /* If the user wanted to detach from the parent, now is the
913 time. */
914 if (vfork_parent->pending_detach)
6c95b8df
PA
915 {
916 struct thread_info *tp;
6c95b8df
PA
917 struct program_space *pspace;
918 struct address_space *aspace;
919
1777feb0 920 /* follow-fork child, detach-on-fork on. */
6c95b8df 921
b73715df 922 vfork_parent->pending_detach = 0;
68c9da30 923
5ed8105e
PA
924 gdb::optional<scoped_restore_exited_inferior>
925 maybe_restore_inferior;
926 gdb::optional<scoped_restore_current_pspace_and_thread>
927 maybe_restore_thread;
928
929 /* If we're handling a child exit, then inferior_ptid points
930 at the inferior's pid, not to a thread. */
f50f4e56 931 if (!exec)
5ed8105e 932 maybe_restore_inferior.emplace ();
f50f4e56 933 else
5ed8105e 934 maybe_restore_thread.emplace ();
6c95b8df
PA
935
936 /* We're letting loose of the parent. */
b73715df 937 tp = any_live_thread_of_inferior (vfork_parent);
00431a78 938 switch_to_thread (tp);
6c95b8df
PA
939
940 /* We're about to detach from the parent, which implicitly
941 removes breakpoints from its address space. There's a
942 catch here: we want to reuse the spaces for the child,
943 but, parent/child are still sharing the pspace at this
944 point, although the exec in reality makes the kernel give
945 the child a fresh set of new pages. The problem here is
946 that the breakpoints module being unaware of this, would
947 likely chose the child process to write to the parent
948 address space. Swapping the child temporarily away from
949 the spaces has the desired effect. Yes, this is "sort
950 of" a hack. */
951
952 pspace = inf->pspace;
953 aspace = inf->aspace;
954 inf->aspace = NULL;
955 inf->pspace = NULL;
956
f67c0c91 957 if (print_inferior_events)
6c95b8df 958 {
a068643d 959 std::string pidstr
b73715df 960 = target_pid_to_str (ptid_t (vfork_parent->pid));
f67c0c91 961
223ffa71 962 target_terminal::ours_for_output ();
6c95b8df
PA
963
964 if (exec)
6f259a23
DB
965 {
966 fprintf_filtered (gdb_stdlog,
f67c0c91 967 _("[Detaching vfork parent %s "
a068643d 968 "after child exec]\n"), pidstr.c_str ());
6f259a23 969 }
6c95b8df 970 else
6f259a23
DB
971 {
972 fprintf_filtered (gdb_stdlog,
f67c0c91 973 _("[Detaching vfork parent %s "
a068643d 974 "after child exit]\n"), pidstr.c_str ());
6f259a23 975 }
6c95b8df
PA
976 }
977
b73715df 978 target_detach (vfork_parent, 0);
6c95b8df
PA
979
980 /* Put it back. */
981 inf->pspace = pspace;
982 inf->aspace = aspace;
6c95b8df
PA
983 }
984 else if (exec)
985 {
986 /* We're staying attached to the parent, so, really give the
987 child a new address space. */
564b1e3f 988 inf->pspace = new program_space (maybe_new_address_space ());
6c95b8df
PA
989 inf->aspace = inf->pspace->aspace;
990 inf->removable = 1;
991 set_current_program_space (inf->pspace);
992
b73715df 993 resume_parent = vfork_parent->pid;
6c95b8df
PA
994 }
995 else
996 {
6c95b8df
PA
997 struct program_space *pspace;
998
999 /* If this is a vfork child exiting, then the pspace and
1000 aspaces were shared with the parent. Since we're
1001 reporting the process exit, we'll be mourning all that is
1002 found in the address space, and switching to null_ptid,
1003 preparing to start a new inferior. But, since we don't
1004 want to clobber the parent's address/program spaces, we
1005 go ahead and create a new one for this exiting
1006 inferior. */
1007
5ed8105e
PA
1008 /* Switch to null_ptid while running clone_program_space, so
1009 that clone_program_space doesn't want to read the
1010 selected frame of a dead process. */
1011 scoped_restore restore_ptid
1012 = make_scoped_restore (&inferior_ptid, null_ptid);
6c95b8df
PA
1013
1014 /* This inferior is dead, so avoid giving the breakpoints
1015 module the option to write through to it (cloning a
1016 program space resets breakpoints). */
1017 inf->aspace = NULL;
1018 inf->pspace = NULL;
564b1e3f 1019 pspace = new program_space (maybe_new_address_space ());
6c95b8df
PA
1020 set_current_program_space (pspace);
1021 inf->removable = 1;
7dcd53a0 1022 inf->symfile_flags = SYMFILE_NO_READ;
b73715df 1023 clone_program_space (pspace, vfork_parent->pspace);
6c95b8df
PA
1024 inf->pspace = pspace;
1025 inf->aspace = pspace->aspace;
1026
b73715df 1027 resume_parent = vfork_parent->pid;
6c95b8df
PA
1028 }
1029
6c95b8df
PA
1030 gdb_assert (current_program_space == inf->pspace);
1031
1032 if (non_stop && resume_parent != -1)
1033 {
1034 /* If the user wanted the parent to be running, let it go
1035 free now. */
5ed8105e 1036 scoped_restore_current_thread restore_thread;
6c95b8df
PA
1037
1038 if (debug_infrun)
3e43a32a
MS
1039 fprintf_unfiltered (gdb_stdlog,
1040 "infrun: resuming vfork parent process %d\n",
6c95b8df
PA
1041 resume_parent);
1042
1043 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
6c95b8df
PA
1044 }
1045 }
1046}
1047
eb6c553b 1048/* Enum strings for "set|show follow-exec-mode". */
6c95b8df
PA
1049
1050static const char follow_exec_mode_new[] = "new";
1051static const char follow_exec_mode_same[] = "same";
40478521 1052static const char *const follow_exec_mode_names[] =
6c95b8df
PA
1053{
1054 follow_exec_mode_new,
1055 follow_exec_mode_same,
1056 NULL,
1057};
1058
1059static const char *follow_exec_mode_string = follow_exec_mode_same;
1060static void
1061show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1062 struct cmd_list_element *c, const char *value)
1063{
1064 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1065}
1066
ecf45d2c 1067/* EXEC_FILE_TARGET is assumed to be non-NULL. */
1adeb98a 1068
c906108c 1069static void
4ca51187 1070follow_exec (ptid_t ptid, const char *exec_file_target)
c906108c 1071{
6c95b8df 1072 struct inferior *inf = current_inferior ();
e99b03dc 1073 int pid = ptid.pid ();
94585166 1074 ptid_t process_ptid;
7a292a7a 1075
65d2b333
PW
1076 /* Switch terminal for any messages produced e.g. by
1077 breakpoint_re_set. */
1078 target_terminal::ours_for_output ();
1079
c906108c
SS
1080 /* This is an exec event that we actually wish to pay attention to.
1081 Refresh our symbol table to the newly exec'd program, remove any
1082 momentary bp's, etc.
1083
1084 If there are breakpoints, they aren't really inserted now,
1085 since the exec() transformed our inferior into a fresh set
1086 of instructions.
1087
1088 We want to preserve symbolic breakpoints on the list, since
1089 we have hopes that they can be reset after the new a.out's
1090 symbol table is read.
1091
1092 However, any "raw" breakpoints must be removed from the list
1093 (e.g., the solib bp's), since their address is probably invalid
1094 now.
1095
1096 And, we DON'T want to call delete_breakpoints() here, since
1097 that may write the bp's "shadow contents" (the instruction
85102364 1098 value that was overwritten with a TRAP instruction). Since
1777feb0 1099 we now have a new a.out, those shadow contents aren't valid. */
6c95b8df
PA
1100
1101 mark_breakpoints_out ();
1102
95e50b27
PA
1103 /* The target reports the exec event to the main thread, even if
1104 some other thread does the exec, and even if the main thread was
1105 stopped or already gone. We may still have non-leader threads of
1106 the process on our list. E.g., on targets that don't have thread
1107 exit events (like remote); or on native Linux in non-stop mode if
1108 there were only two threads in the inferior and the non-leader
1109 one is the one that execs (and nothing forces an update of the
1110 thread list up to here). When debugging remotely, it's best to
1111 avoid extra traffic, when possible, so avoid syncing the thread
1112 list with the target, and instead go ahead and delete all threads
1113 of the process but one that reported the event. Note this must
1114 be done before calling update_breakpoints_after_exec, as
1115 otherwise clearing the threads' resources would reference stale
1116 thread breakpoints -- it may have been one of these threads that
1117 stepped across the exec. We could just clear their stepping
1118 states, but as long as we're iterating, might as well delete
1119 them. Deleting them now rather than at the next user-visible
1120 stop provides a nicer sequence of events for user and MI
1121 notifications. */
08036331 1122 for (thread_info *th : all_threads_safe ())
d7e15655 1123 if (th->ptid.pid () == pid && th->ptid != ptid)
00431a78 1124 delete_thread (th);
95e50b27
PA
1125
1126 /* We also need to clear any left over stale state for the
1127 leader/event thread. E.g., if there was any step-resume
1128 breakpoint or similar, it's gone now. We cannot truly
1129 step-to-next statement through an exec(). */
08036331 1130 thread_info *th = inferior_thread ();
8358c15c 1131 th->control.step_resume_breakpoint = NULL;
186c406b 1132 th->control.exception_resume_breakpoint = NULL;
34b7e8a6 1133 th->control.single_step_breakpoints = NULL;
16c381f0
JK
1134 th->control.step_range_start = 0;
1135 th->control.step_range_end = 0;
c906108c 1136
95e50b27
PA
1137 /* The user may have had the main thread held stopped in the
1138 previous image (e.g., schedlock on, or non-stop). Release
1139 it now. */
a75724bc
PA
1140 th->stop_requested = 0;
1141
95e50b27
PA
1142 update_breakpoints_after_exec ();
1143
1777feb0 1144 /* What is this a.out's name? */
f2907e49 1145 process_ptid = ptid_t (pid);
6c95b8df 1146 printf_unfiltered (_("%s is executing new program: %s\n"),
a068643d 1147 target_pid_to_str (process_ptid).c_str (),
ecf45d2c 1148 exec_file_target);
c906108c
SS
1149
1150 /* We've followed the inferior through an exec. Therefore, the
1777feb0 1151 inferior has essentially been killed & reborn. */
7a292a7a 1152
6ca15a4b 1153 breakpoint_init_inferior (inf_execd);
e85a822c 1154
797bc1cb
TT
1155 gdb::unique_xmalloc_ptr<char> exec_file_host
1156 = exec_file_find (exec_file_target, NULL);
ff862be4 1157
ecf45d2c
SL
1158 /* If we were unable to map the executable target pathname onto a host
1159 pathname, tell the user that. Otherwise GDB's subsequent behavior
1160 is confusing. Maybe it would even be better to stop at this point
1161 so that the user can specify a file manually before continuing. */
1162 if (exec_file_host == NULL)
1163 warning (_("Could not load symbols for executable %s.\n"
1164 "Do you need \"set sysroot\"?"),
1165 exec_file_target);
c906108c 1166
cce9b6bf
PA
1167 /* Reset the shared library package. This ensures that we get a
1168 shlib event when the child reaches "_start", at which point the
1169 dld will have had a chance to initialize the child. */
1170 /* Also, loading a symbol file below may trigger symbol lookups, and
1171 we don't want those to be satisfied by the libraries of the
1172 previous incarnation of this process. */
1173 no_shared_libraries (NULL, 0);
1174
6c95b8df
PA
1175 if (follow_exec_mode_string == follow_exec_mode_new)
1176 {
6c95b8df
PA
1177 /* The user wants to keep the old inferior and program spaces
1178 around. Create a new fresh one, and switch to it. */
1179
35ed81d4
SM
1180 /* Do exit processing for the original inferior before setting the new
1181 inferior's pid. Having two inferiors with the same pid would confuse
1182 find_inferior_p(t)id. Transfer the terminal state and info from the
1183 old to the new inferior. */
1184 inf = add_inferior_with_spaces ();
1185 swap_terminal_info (inf, current_inferior ());
057302ce 1186 exit_inferior_silent (current_inferior ());
17d8546e 1187
94585166 1188 inf->pid = pid;
ecf45d2c 1189 target_follow_exec (inf, exec_file_target);
6c95b8df
PA
1190
1191 set_current_inferior (inf);
94585166 1192 set_current_program_space (inf->pspace);
c4c17fb0 1193 add_thread (ptid);
6c95b8df 1194 }
9107fc8d
PA
1195 else
1196 {
1197 /* The old description may no longer be fit for the new image.
1198 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1199 old description; we'll read a new one below. No need to do
1200 this on "follow-exec-mode new", as the old inferior stays
1201 around (its description is later cleared/refetched on
1202 restart). */
1203 target_clear_description ();
1204 }
6c95b8df
PA
1205
1206 gdb_assert (current_program_space == inf->pspace);
1207
ecf45d2c
SL
1208 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1209 because the proper displacement for a PIE (Position Independent
1210 Executable) main symbol file will only be computed by
1211 solib_create_inferior_hook below. breakpoint_re_set would fail
1212 to insert the breakpoints with the zero displacement. */
797bc1cb 1213 try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
c906108c 1214
9107fc8d
PA
1215 /* If the target can specify a description, read it. Must do this
1216 after flipping to the new executable (because the target supplied
1217 description must be compatible with the executable's
1218 architecture, and the old executable may e.g., be 32-bit, while
1219 the new one 64-bit), and before anything involving memory or
1220 registers. */
1221 target_find_description ();
1222
268a4a75 1223 solib_create_inferior_hook (0);
c906108c 1224
4efc6507
DE
1225 jit_inferior_created_hook ();
1226
c1e56572
JK
1227 breakpoint_re_set ();
1228
c906108c
SS
1229 /* Reinsert all breakpoints. (Those which were symbolic have
1230 been reset to the proper address in the new a.out, thanks
1777feb0 1231 to symbol_file_command...). */
c906108c
SS
1232 insert_breakpoints ();
1233
1234 /* The next resume of this inferior should bring it to the shlib
1235 startup breakpoints. (If the user had also set bp's on
1236 "main" from the old (parent) process, then they'll auto-
1777feb0 1237 matically get reset there in the new process.). */
c906108c
SS
1238}
1239
c2829269
PA
1240/* The queue of threads that need to do a step-over operation to get
1241 past e.g., a breakpoint. What technique is used to step over the
1242 breakpoint/watchpoint does not matter -- all threads end up in the
1243 same queue, to maintain rough temporal order of execution, in order
1244 to avoid starvation, otherwise, we could e.g., find ourselves
1245 constantly stepping the same couple threads past their breakpoints
1246 over and over, if the single-step finish fast enough. */
1247struct thread_info *step_over_queue_head;
1248
6c4cfb24
PA
1249/* Bit flags indicating what the thread needs to step over. */
1250
8d297bbf 1251enum step_over_what_flag
6c4cfb24
PA
1252 {
1253 /* Step over a breakpoint. */
1254 STEP_OVER_BREAKPOINT = 1,
1255
1256 /* Step past a non-continuable watchpoint, in order to let the
1257 instruction execute so we can evaluate the watchpoint
1258 expression. */
1259 STEP_OVER_WATCHPOINT = 2
1260 };
8d297bbf 1261DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
6c4cfb24 1262
963f9c80 1263/* Info about an instruction that is being stepped over. */
31e77af2
PA
1264
1265struct step_over_info
1266{
963f9c80
PA
1267 /* If we're stepping past a breakpoint, this is the address space
1268 and address of the instruction the breakpoint is set at. We'll
1269 skip inserting all breakpoints here. Valid iff ASPACE is
1270 non-NULL. */
8b86c959 1271 const address_space *aspace;
31e77af2 1272 CORE_ADDR address;
963f9c80
PA
1273
1274 /* The instruction being stepped over triggers a nonsteppable
1275 watchpoint. If true, we'll skip inserting watchpoints. */
1276 int nonsteppable_watchpoint_p;
21edc42f
YQ
1277
1278 /* The thread's global number. */
1279 int thread;
31e77af2
PA
1280};
1281
1282/* The step-over info of the location that is being stepped over.
1283
1284 Note that with async/breakpoint always-inserted mode, a user might
1285 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1286 being stepped over. As setting a new breakpoint inserts all
1287 breakpoints, we need to make sure the breakpoint being stepped over
1288 isn't inserted then. We do that by only clearing the step-over
1289 info when the step-over is actually finished (or aborted).
1290
1291 Presently GDB can only step over one breakpoint at any given time.
1292 Given threads that can't run code in the same address space as the
1293 breakpoint's can't really miss the breakpoint, GDB could be taught
1294 to step-over at most one breakpoint per address space (so this info
1295 could move to the address space object if/when GDB is extended).
1296 The set of breakpoints being stepped over will normally be much
1297 smaller than the set of all breakpoints, so a flag in the
1298 breakpoint location structure would be wasteful. A separate list
1299 also saves complexity and run-time, as otherwise we'd have to go
1300 through all breakpoint locations clearing their flag whenever we
1301 start a new sequence. Similar considerations weigh against storing
1302 this info in the thread object. Plus, not all step overs actually
1303 have breakpoint locations -- e.g., stepping past a single-step
1304 breakpoint, or stepping to complete a non-continuable
1305 watchpoint. */
1306static struct step_over_info step_over_info;
1307
1308/* Record the address of the breakpoint/instruction we're currently
ce0db137
DE
1309 stepping over.
1310 N.B. We record the aspace and address now, instead of say just the thread,
1311 because when we need the info later the thread may be running. */
31e77af2
PA
1312
1313static void
8b86c959 1314set_step_over_info (const address_space *aspace, CORE_ADDR address,
21edc42f
YQ
1315 int nonsteppable_watchpoint_p,
1316 int thread)
31e77af2
PA
1317{
1318 step_over_info.aspace = aspace;
1319 step_over_info.address = address;
963f9c80 1320 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
21edc42f 1321 step_over_info.thread = thread;
31e77af2
PA
1322}
1323
1324/* Called when we're not longer stepping over a breakpoint / an
1325 instruction, so all breakpoints are free to be (re)inserted. */
1326
1327static void
1328clear_step_over_info (void)
1329{
372316f1
PA
1330 if (debug_infrun)
1331 fprintf_unfiltered (gdb_stdlog,
1332 "infrun: clear_step_over_info\n");
31e77af2
PA
1333 step_over_info.aspace = NULL;
1334 step_over_info.address = 0;
963f9c80 1335 step_over_info.nonsteppable_watchpoint_p = 0;
21edc42f 1336 step_over_info.thread = -1;
31e77af2
PA
1337}
1338
7f89fd65 1339/* See infrun.h. */
31e77af2
PA
1340
1341int
1342stepping_past_instruction_at (struct address_space *aspace,
1343 CORE_ADDR address)
1344{
1345 return (step_over_info.aspace != NULL
1346 && breakpoint_address_match (aspace, address,
1347 step_over_info.aspace,
1348 step_over_info.address));
1349}
1350
963f9c80
PA
1351/* See infrun.h. */
1352
21edc42f
YQ
1353int
1354thread_is_stepping_over_breakpoint (int thread)
1355{
1356 return (step_over_info.thread != -1
1357 && thread == step_over_info.thread);
1358}
1359
1360/* See infrun.h. */
1361
963f9c80
PA
1362int
1363stepping_past_nonsteppable_watchpoint (void)
1364{
1365 return step_over_info.nonsteppable_watchpoint_p;
1366}
1367
6cc83d2a
PA
1368/* Returns true if step-over info is valid. */
1369
1370static int
1371step_over_info_valid_p (void)
1372{
963f9c80
PA
1373 return (step_over_info.aspace != NULL
1374 || stepping_past_nonsteppable_watchpoint ());
6cc83d2a
PA
1375}
1376
c906108c 1377\f
237fc4c9
PA
1378/* Displaced stepping. */
1379
1380/* In non-stop debugging mode, we must take special care to manage
1381 breakpoints properly; in particular, the traditional strategy for
1382 stepping a thread past a breakpoint it has hit is unsuitable.
1383 'Displaced stepping' is a tactic for stepping one thread past a
1384 breakpoint it has hit while ensuring that other threads running
1385 concurrently will hit the breakpoint as they should.
1386
1387 The traditional way to step a thread T off a breakpoint in a
1388 multi-threaded program in all-stop mode is as follows:
1389
1390 a0) Initially, all threads are stopped, and breakpoints are not
1391 inserted.
1392 a1) We single-step T, leaving breakpoints uninserted.
1393 a2) We insert breakpoints, and resume all threads.
1394
1395 In non-stop debugging, however, this strategy is unsuitable: we
1396 don't want to have to stop all threads in the system in order to
1397 continue or step T past a breakpoint. Instead, we use displaced
1398 stepping:
1399
1400 n0) Initially, T is stopped, other threads are running, and
1401 breakpoints are inserted.
1402 n1) We copy the instruction "under" the breakpoint to a separate
1403 location, outside the main code stream, making any adjustments
1404 to the instruction, register, and memory state as directed by
1405 T's architecture.
1406 n2) We single-step T over the instruction at its new location.
1407 n3) We adjust the resulting register and memory state as directed
1408 by T's architecture. This includes resetting T's PC to point
1409 back into the main instruction stream.
1410 n4) We resume T.
1411
1412 This approach depends on the following gdbarch methods:
1413
1414 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1415 indicate where to copy the instruction, and how much space must
1416 be reserved there. We use these in step n1.
1417
1418 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1419 address, and makes any necessary adjustments to the instruction,
1420 register contents, and memory. We use this in step n1.
1421
1422 - gdbarch_displaced_step_fixup adjusts registers and memory after
85102364 1423 we have successfully single-stepped the instruction, to yield the
237fc4c9
PA
1424 same effect the instruction would have had if we had executed it
1425 at its original address. We use this in step n3.
1426
237fc4c9
PA
1427 The gdbarch_displaced_step_copy_insn and
1428 gdbarch_displaced_step_fixup functions must be written so that
1429 copying an instruction with gdbarch_displaced_step_copy_insn,
1430 single-stepping across the copied instruction, and then applying
1431 gdbarch_displaced_insn_fixup should have the same effects on the
1432 thread's memory and registers as stepping the instruction in place
1433 would have. Exactly which responsibilities fall to the copy and
1434 which fall to the fixup is up to the author of those functions.
1435
1436 See the comments in gdbarch.sh for details.
1437
1438 Note that displaced stepping and software single-step cannot
1439 currently be used in combination, although with some care I think
1440 they could be made to. Software single-step works by placing
1441 breakpoints on all possible subsequent instructions; if the
1442 displaced instruction is a PC-relative jump, those breakpoints
1443 could fall in very strange places --- on pages that aren't
1444 executable, or at addresses that are not proper instruction
1445 boundaries. (We do generally let other threads run while we wait
1446 to hit the software single-step breakpoint, and they might
1447 encounter such a corrupted instruction.) One way to work around
1448 this would be to have gdbarch_displaced_step_copy_insn fully
1449 simulate the effect of PC-relative instructions (and return NULL)
1450 on architectures that use software single-stepping.
1451
1452 In non-stop mode, we can have independent and simultaneous step
1453 requests, so more than one thread may need to simultaneously step
1454 over a breakpoint. The current implementation assumes there is
1455 only one scratch space per process. In this case, we have to
1456 serialize access to the scratch space. If thread A wants to step
1457 over a breakpoint, but we are currently waiting for some other
1458 thread to complete a displaced step, we leave thread A stopped and
1459 place it in the displaced_step_request_queue. Whenever a displaced
1460 step finishes, we pick the next thread in the queue and start a new
1461 displaced step operation on it. See displaced_step_prepare and
1462 displaced_step_fixup for details. */
1463
cfba9872
SM
1464/* Default destructor for displaced_step_closure. */
1465
1466displaced_step_closure::~displaced_step_closure () = default;
1467
fc1cf338
PA
1468/* Get the displaced stepping state of process PID. */
1469
39a36629 1470static displaced_step_inferior_state *
00431a78 1471get_displaced_stepping_state (inferior *inf)
fc1cf338 1472{
d20172fc 1473 return &inf->displaced_step_state;
fc1cf338
PA
1474}
1475
372316f1
PA
1476/* Returns true if any inferior has a thread doing a displaced
1477 step. */
1478
39a36629
SM
1479static bool
1480displaced_step_in_progress_any_inferior ()
372316f1 1481{
d20172fc 1482 for (inferior *i : all_inferiors ())
39a36629 1483 {
d20172fc 1484 if (i->displaced_step_state.step_thread != nullptr)
39a36629
SM
1485 return true;
1486 }
372316f1 1487
39a36629 1488 return false;
372316f1
PA
1489}
1490
c0987663
YQ
1491/* Return true if thread represented by PTID is doing a displaced
1492 step. */
1493
1494static int
00431a78 1495displaced_step_in_progress_thread (thread_info *thread)
c0987663 1496{
00431a78 1497 gdb_assert (thread != NULL);
c0987663 1498
d20172fc 1499 return get_displaced_stepping_state (thread->inf)->step_thread == thread;
c0987663
YQ
1500}
1501
8f572e5c
PA
1502/* Return true if process PID has a thread doing a displaced step. */
1503
1504static int
00431a78 1505displaced_step_in_progress (inferior *inf)
8f572e5c 1506{
d20172fc 1507 return get_displaced_stepping_state (inf)->step_thread != nullptr;
fc1cf338
PA
1508}
1509
a42244db
YQ
1510/* If inferior is in displaced stepping, and ADDR equals to starting address
1511 of copy area, return corresponding displaced_step_closure. Otherwise,
1512 return NULL. */
1513
1514struct displaced_step_closure*
1515get_displaced_step_closure_by_addr (CORE_ADDR addr)
1516{
d20172fc 1517 displaced_step_inferior_state *displaced
00431a78 1518 = get_displaced_stepping_state (current_inferior ());
a42244db
YQ
1519
1520 /* If checking the mode of displaced instruction in copy area. */
d20172fc 1521 if (displaced->step_thread != nullptr
00431a78 1522 && displaced->step_copy == addr)
a42244db
YQ
1523 return displaced->step_closure;
1524
1525 return NULL;
1526}
1527
fc1cf338
PA
1528static void
1529infrun_inferior_exit (struct inferior *inf)
1530{
d20172fc 1531 inf->displaced_step_state.reset ();
fc1cf338 1532}
237fc4c9 1533
fff08868
HZ
1534/* If ON, and the architecture supports it, GDB will use displaced
1535 stepping to step over breakpoints. If OFF, or if the architecture
1536 doesn't support it, GDB will instead use the traditional
1537 hold-and-step approach. If AUTO (which is the default), GDB will
1538 decide which technique to use to step over breakpoints depending on
1539 which of all-stop or non-stop mode is active --- displaced stepping
1540 in non-stop mode; hold-and-step in all-stop mode. */
1541
72d0e2c5 1542static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
fff08868 1543
237fc4c9
PA
1544static void
1545show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1546 struct cmd_list_element *c,
1547 const char *value)
1548{
72d0e2c5 1549 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
3e43a32a
MS
1550 fprintf_filtered (file,
1551 _("Debugger's willingness to use displaced stepping "
1552 "to step over breakpoints is %s (currently %s).\n"),
fbea99ea 1553 value, target_is_non_stop_p () ? "on" : "off");
fff08868 1554 else
3e43a32a
MS
1555 fprintf_filtered (file,
1556 _("Debugger's willingness to use displaced stepping "
1557 "to step over breakpoints is %s.\n"), value);
237fc4c9
PA
1558}
1559
fff08868 1560/* Return non-zero if displaced stepping can/should be used to step
3fc8eb30 1561 over breakpoints of thread TP. */
fff08868 1562
237fc4c9 1563static int
3fc8eb30 1564use_displaced_stepping (struct thread_info *tp)
237fc4c9 1565{
00431a78 1566 struct regcache *regcache = get_thread_regcache (tp);
ac7936df 1567 struct gdbarch *gdbarch = regcache->arch ();
d20172fc
SM
1568 displaced_step_inferior_state *displaced_state
1569 = get_displaced_stepping_state (tp->inf);
3fc8eb30 1570
fbea99ea
PA
1571 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1572 && target_is_non_stop_p ())
72d0e2c5 1573 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
96429cc8 1574 && gdbarch_displaced_step_copy_insn_p (gdbarch)
3fc8eb30 1575 && find_record_target () == NULL
d20172fc 1576 && !displaced_state->failed_before);
237fc4c9
PA
1577}
1578
1579/* Clean out any stray displaced stepping state. */
1580static void
fc1cf338 1581displaced_step_clear (struct displaced_step_inferior_state *displaced)
237fc4c9
PA
1582{
1583 /* Indicate that there is no cleanup pending. */
00431a78 1584 displaced->step_thread = nullptr;
237fc4c9 1585
cfba9872 1586 delete displaced->step_closure;
6d45d4b4 1587 displaced->step_closure = NULL;
237fc4c9
PA
1588}
1589
9799571e
TT
1590/* A cleanup that wraps displaced_step_clear. */
1591using displaced_step_clear_cleanup
1592 = FORWARD_SCOPE_EXIT (displaced_step_clear);
237fc4c9
PA
1593
1594/* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1595void
1596displaced_step_dump_bytes (struct ui_file *file,
1597 const gdb_byte *buf,
1598 size_t len)
1599{
1600 int i;
1601
1602 for (i = 0; i < len; i++)
1603 fprintf_unfiltered (file, "%02x ", buf[i]);
1604 fputs_unfiltered ("\n", file);
1605}
1606
1607/* Prepare to single-step, using displaced stepping.
1608
1609 Note that we cannot use displaced stepping when we have a signal to
1610 deliver. If we have a signal to deliver and an instruction to step
1611 over, then after the step, there will be no indication from the
1612 target whether the thread entered a signal handler or ignored the
1613 signal and stepped over the instruction successfully --- both cases
1614 result in a simple SIGTRAP. In the first case we mustn't do a
1615 fixup, and in the second case we must --- but we can't tell which.
1616 Comments in the code for 'random signals' in handle_inferior_event
1617 explain how we handle this case instead.
1618
1619 Returns 1 if preparing was successful -- this thread is going to be
7f03bd92
PA
1620 stepped now; 0 if displaced stepping this thread got queued; or -1
1621 if this instruction can't be displaced stepped. */
1622
237fc4c9 1623static int
00431a78 1624displaced_step_prepare_throw (thread_info *tp)
237fc4c9 1625{
00431a78 1626 regcache *regcache = get_thread_regcache (tp);
ac7936df 1627 struct gdbarch *gdbarch = regcache->arch ();
8b86c959 1628 const address_space *aspace = regcache->aspace ();
237fc4c9
PA
1629 CORE_ADDR original, copy;
1630 ULONGEST len;
1631 struct displaced_step_closure *closure;
9e529e1d 1632 int status;
237fc4c9
PA
1633
1634 /* We should never reach this function if the architecture does not
1635 support displaced stepping. */
1636 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1637
c2829269
PA
1638 /* Nor if the thread isn't meant to step over a breakpoint. */
1639 gdb_assert (tp->control.trap_expected);
1640
c1e36e3e
PA
1641 /* Disable range stepping while executing in the scratch pad. We
1642 want a single-step even if executing the displaced instruction in
1643 the scratch buffer lands within the stepping range (e.g., a
1644 jump/branch). */
1645 tp->control.may_range_step = 0;
1646
fc1cf338
PA
1647 /* We have to displaced step one thread at a time, as we only have
1648 access to a single scratch space per inferior. */
237fc4c9 1649
d20172fc
SM
1650 displaced_step_inferior_state *displaced
1651 = get_displaced_stepping_state (tp->inf);
fc1cf338 1652
00431a78 1653 if (displaced->step_thread != nullptr)
237fc4c9
PA
1654 {
1655 /* Already waiting for a displaced step to finish. Defer this
1656 request and place in queue. */
237fc4c9
PA
1657
1658 if (debug_displaced)
1659 fprintf_unfiltered (gdb_stdlog,
c2829269 1660 "displaced: deferring step of %s\n",
a068643d 1661 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1662
c2829269 1663 thread_step_over_chain_enqueue (tp);
237fc4c9
PA
1664 return 0;
1665 }
1666 else
1667 {
1668 if (debug_displaced)
1669 fprintf_unfiltered (gdb_stdlog,
1670 "displaced: stepping %s now\n",
a068643d 1671 target_pid_to_str (tp->ptid).c_str ());
237fc4c9
PA
1672 }
1673
fc1cf338 1674 displaced_step_clear (displaced);
237fc4c9 1675
00431a78
PA
1676 scoped_restore_current_thread restore_thread;
1677
1678 switch_to_thread (tp);
ad53cd71 1679
515630c5 1680 original = regcache_read_pc (regcache);
237fc4c9
PA
1681
1682 copy = gdbarch_displaced_step_location (gdbarch);
1683 len = gdbarch_max_insn_length (gdbarch);
1684
d35ae833
PA
1685 if (breakpoint_in_range_p (aspace, copy, len))
1686 {
1687 /* There's a breakpoint set in the scratch pad location range
1688 (which is usually around the entry point). We'd either
1689 install it before resuming, which would overwrite/corrupt the
1690 scratch pad, or if it was already inserted, this displaced
1691 step would overwrite it. The latter is OK in the sense that
1692 we already assume that no thread is going to execute the code
1693 in the scratch pad range (after initial startup) anyway, but
1694 the former is unacceptable. Simply punt and fallback to
1695 stepping over this breakpoint in-line. */
1696 if (debug_displaced)
1697 {
1698 fprintf_unfiltered (gdb_stdlog,
1699 "displaced: breakpoint set in scratch pad. "
1700 "Stepping over breakpoint in-line instead.\n");
1701 }
1702
d35ae833
PA
1703 return -1;
1704 }
1705
237fc4c9 1706 /* Save the original contents of the copy area. */
d20172fc
SM
1707 displaced->step_saved_copy.resize (len);
1708 status = target_read_memory (copy, displaced->step_saved_copy.data (), len);
9e529e1d
JK
1709 if (status != 0)
1710 throw_error (MEMORY_ERROR,
1711 _("Error accessing memory address %s (%s) for "
1712 "displaced-stepping scratch space."),
1713 paddress (gdbarch, copy), safe_strerror (status));
237fc4c9
PA
1714 if (debug_displaced)
1715 {
5af949e3
UW
1716 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1717 paddress (gdbarch, copy));
fc1cf338 1718 displaced_step_dump_bytes (gdb_stdlog,
d20172fc 1719 displaced->step_saved_copy.data (),
fc1cf338 1720 len);
237fc4c9
PA
1721 };
1722
1723 closure = gdbarch_displaced_step_copy_insn (gdbarch,
ad53cd71 1724 original, copy, regcache);
7f03bd92
PA
1725 if (closure == NULL)
1726 {
1727 /* The architecture doesn't know how or want to displaced step
1728 this instruction or instruction sequence. Fallback to
1729 stepping over the breakpoint in-line. */
7f03bd92
PA
1730 return -1;
1731 }
237fc4c9 1732
9f5a595d
UW
1733 /* Save the information we need to fix things up if the step
1734 succeeds. */
00431a78 1735 displaced->step_thread = tp;
fc1cf338
PA
1736 displaced->step_gdbarch = gdbarch;
1737 displaced->step_closure = closure;
1738 displaced->step_original = original;
1739 displaced->step_copy = copy;
9f5a595d 1740
9799571e
TT
1741 {
1742 displaced_step_clear_cleanup cleanup (displaced);
237fc4c9 1743
9799571e
TT
1744 /* Resume execution at the copy. */
1745 regcache_write_pc (regcache, copy);
237fc4c9 1746
9799571e
TT
1747 cleanup.release ();
1748 }
ad53cd71 1749
237fc4c9 1750 if (debug_displaced)
5af949e3
UW
1751 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1752 paddress (gdbarch, copy));
237fc4c9 1753
237fc4c9
PA
1754 return 1;
1755}
1756
3fc8eb30
PA
1757/* Wrapper for displaced_step_prepare_throw that disabled further
1758 attempts at displaced stepping if we get a memory error. */
1759
1760static int
00431a78 1761displaced_step_prepare (thread_info *thread)
3fc8eb30
PA
1762{
1763 int prepared = -1;
1764
a70b8144 1765 try
3fc8eb30 1766 {
00431a78 1767 prepared = displaced_step_prepare_throw (thread);
3fc8eb30 1768 }
230d2906 1769 catch (const gdb_exception_error &ex)
3fc8eb30
PA
1770 {
1771 struct displaced_step_inferior_state *displaced_state;
1772
16b41842
PA
1773 if (ex.error != MEMORY_ERROR
1774 && ex.error != NOT_SUPPORTED_ERROR)
eedc3f4f 1775 throw;
3fc8eb30
PA
1776
1777 if (debug_infrun)
1778 {
1779 fprintf_unfiltered (gdb_stdlog,
1780 "infrun: disabling displaced stepping: %s\n",
3d6e9d23 1781 ex.what ());
3fc8eb30
PA
1782 }
1783
1784 /* Be verbose if "set displaced-stepping" is "on", silent if
1785 "auto". */
1786 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1787 {
fd7dcb94 1788 warning (_("disabling displaced stepping: %s"),
3d6e9d23 1789 ex.what ());
3fc8eb30
PA
1790 }
1791
1792 /* Disable further displaced stepping attempts. */
1793 displaced_state
00431a78 1794 = get_displaced_stepping_state (thread->inf);
3fc8eb30
PA
1795 displaced_state->failed_before = 1;
1796 }
3fc8eb30
PA
1797
1798 return prepared;
1799}
1800
237fc4c9 1801static void
3e43a32a
MS
1802write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1803 const gdb_byte *myaddr, int len)
237fc4c9 1804{
2989a365 1805 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
abbb1732 1806
237fc4c9
PA
1807 inferior_ptid = ptid;
1808 write_memory (memaddr, myaddr, len);
237fc4c9
PA
1809}
1810
e2d96639
YQ
1811/* Restore the contents of the copy area for thread PTID. */
1812
1813static void
1814displaced_step_restore (struct displaced_step_inferior_state *displaced,
1815 ptid_t ptid)
1816{
1817 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1818
1819 write_memory_ptid (ptid, displaced->step_copy,
d20172fc 1820 displaced->step_saved_copy.data (), len);
e2d96639
YQ
1821 if (debug_displaced)
1822 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
a068643d 1823 target_pid_to_str (ptid).c_str (),
e2d96639
YQ
1824 paddress (displaced->step_gdbarch,
1825 displaced->step_copy));
1826}
1827
372316f1
PA
1828/* If we displaced stepped an instruction successfully, adjust
1829 registers and memory to yield the same effect the instruction would
1830 have had if we had executed it at its original address, and return
1831 1. If the instruction didn't complete, relocate the PC and return
1832 -1. If the thread wasn't displaced stepping, return 0. */
1833
1834static int
00431a78 1835displaced_step_fixup (thread_info *event_thread, enum gdb_signal signal)
237fc4c9 1836{
fc1cf338 1837 struct displaced_step_inferior_state *displaced
00431a78 1838 = get_displaced_stepping_state (event_thread->inf);
372316f1 1839 int ret;
fc1cf338 1840
00431a78
PA
1841 /* Was this event for the thread we displaced? */
1842 if (displaced->step_thread != event_thread)
372316f1 1843 return 0;
237fc4c9 1844
9799571e 1845 displaced_step_clear_cleanup cleanup (displaced);
237fc4c9 1846
00431a78 1847 displaced_step_restore (displaced, displaced->step_thread->ptid);
237fc4c9 1848
cb71640d
PA
1849 /* Fixup may need to read memory/registers. Switch to the thread
1850 that we're fixing up. Also, target_stopped_by_watchpoint checks
1851 the current thread. */
00431a78 1852 switch_to_thread (event_thread);
cb71640d 1853
237fc4c9 1854 /* Did the instruction complete successfully? */
cb71640d
PA
1855 if (signal == GDB_SIGNAL_TRAP
1856 && !(target_stopped_by_watchpoint ()
1857 && (gdbarch_have_nonsteppable_watchpoint (displaced->step_gdbarch)
1858 || target_have_steppable_watchpoint)))
237fc4c9
PA
1859 {
1860 /* Fix up the resulting state. */
fc1cf338
PA
1861 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1862 displaced->step_closure,
1863 displaced->step_original,
1864 displaced->step_copy,
00431a78 1865 get_thread_regcache (displaced->step_thread));
372316f1 1866 ret = 1;
237fc4c9
PA
1867 }
1868 else
1869 {
1870 /* Since the instruction didn't complete, all we can do is
1871 relocate the PC. */
00431a78 1872 struct regcache *regcache = get_thread_regcache (event_thread);
515630c5 1873 CORE_ADDR pc = regcache_read_pc (regcache);
abbb1732 1874
fc1cf338 1875 pc = displaced->step_original + (pc - displaced->step_copy);
515630c5 1876 regcache_write_pc (regcache, pc);
372316f1 1877 ret = -1;
237fc4c9
PA
1878 }
1879
372316f1 1880 return ret;
c2829269 1881}
1c5cfe86 1882
4d9d9d04
PA
1883/* Data to be passed around while handling an event. This data is
1884 discarded between events. */
1885struct execution_control_state
1886{
1887 ptid_t ptid;
1888 /* The thread that got the event, if this was a thread event; NULL
1889 otherwise. */
1890 struct thread_info *event_thread;
1891
1892 struct target_waitstatus ws;
1893 int stop_func_filled_in;
1894 CORE_ADDR stop_func_start;
1895 CORE_ADDR stop_func_end;
1896 const char *stop_func_name;
1897 int wait_some_more;
1898
1899 /* True if the event thread hit the single-step breakpoint of
1900 another thread. Thus the event doesn't cause a stop, the thread
1901 needs to be single-stepped past the single-step breakpoint before
1902 we can switch back to the original stepping thread. */
1903 int hit_singlestep_breakpoint;
1904};
1905
1906/* Clear ECS and set it to point at TP. */
c2829269
PA
1907
1908static void
4d9d9d04
PA
1909reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1910{
1911 memset (ecs, 0, sizeof (*ecs));
1912 ecs->event_thread = tp;
1913 ecs->ptid = tp->ptid;
1914}
1915
1916static void keep_going_pass_signal (struct execution_control_state *ecs);
1917static void prepare_to_wait (struct execution_control_state *ecs);
2ac7589c 1918static int keep_going_stepped_thread (struct thread_info *tp);
8d297bbf 1919static step_over_what thread_still_needs_step_over (struct thread_info *tp);
4d9d9d04
PA
1920
1921/* Are there any pending step-over requests? If so, run all we can
1922 now and return true. Otherwise, return false. */
1923
1924static int
c2829269
PA
1925start_step_over (void)
1926{
1927 struct thread_info *tp, *next;
1928
372316f1
PA
1929 /* Don't start a new step-over if we already have an in-line
1930 step-over operation ongoing. */
1931 if (step_over_info_valid_p ())
1932 return 0;
1933
c2829269 1934 for (tp = step_over_queue_head; tp != NULL; tp = next)
237fc4c9 1935 {
4d9d9d04
PA
1936 struct execution_control_state ecss;
1937 struct execution_control_state *ecs = &ecss;
8d297bbf 1938 step_over_what step_what;
372316f1 1939 int must_be_in_line;
c2829269 1940
c65d6b55
PA
1941 gdb_assert (!tp->stop_requested);
1942
c2829269 1943 next = thread_step_over_chain_next (tp);
237fc4c9 1944
c2829269
PA
1945 /* If this inferior already has a displaced step in process,
1946 don't start a new one. */
00431a78 1947 if (displaced_step_in_progress (tp->inf))
c2829269
PA
1948 continue;
1949
372316f1
PA
1950 step_what = thread_still_needs_step_over (tp);
1951 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
1952 || ((step_what & STEP_OVER_BREAKPOINT)
3fc8eb30 1953 && !use_displaced_stepping (tp)));
372316f1
PA
1954
1955 /* We currently stop all threads of all processes to step-over
1956 in-line. If we need to start a new in-line step-over, let
1957 any pending displaced steps finish first. */
1958 if (must_be_in_line && displaced_step_in_progress_any_inferior ())
1959 return 0;
1960
c2829269
PA
1961 thread_step_over_chain_remove (tp);
1962
1963 if (step_over_queue_head == NULL)
1964 {
1965 if (debug_infrun)
1966 fprintf_unfiltered (gdb_stdlog,
1967 "infrun: step-over queue now empty\n");
1968 }
1969
372316f1
PA
1970 if (tp->control.trap_expected
1971 || tp->resumed
1972 || tp->executing)
ad53cd71 1973 {
4d9d9d04
PA
1974 internal_error (__FILE__, __LINE__,
1975 "[%s] has inconsistent state: "
372316f1 1976 "trap_expected=%d, resumed=%d, executing=%d\n",
a068643d 1977 target_pid_to_str (tp->ptid).c_str (),
4d9d9d04 1978 tp->control.trap_expected,
372316f1 1979 tp->resumed,
4d9d9d04 1980 tp->executing);
ad53cd71 1981 }
1c5cfe86 1982
4d9d9d04
PA
1983 if (debug_infrun)
1984 fprintf_unfiltered (gdb_stdlog,
1985 "infrun: resuming [%s] for step-over\n",
a068643d 1986 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04
PA
1987
1988 /* keep_going_pass_signal skips the step-over if the breakpoint
1989 is no longer inserted. In all-stop, we want to keep looking
1990 for a thread that needs a step-over instead of resuming TP,
1991 because we wouldn't be able to resume anything else until the
1992 target stops again. In non-stop, the resume always resumes
1993 only TP, so it's OK to let the thread resume freely. */
fbea99ea 1994 if (!target_is_non_stop_p () && !step_what)
4d9d9d04 1995 continue;
8550d3b3 1996
00431a78 1997 switch_to_thread (tp);
4d9d9d04
PA
1998 reset_ecs (ecs, tp);
1999 keep_going_pass_signal (ecs);
1c5cfe86 2000
4d9d9d04
PA
2001 if (!ecs->wait_some_more)
2002 error (_("Command aborted."));
1c5cfe86 2003
372316f1
PA
2004 gdb_assert (tp->resumed);
2005
2006 /* If we started a new in-line step-over, we're done. */
2007 if (step_over_info_valid_p ())
2008 {
2009 gdb_assert (tp->control.trap_expected);
2010 return 1;
2011 }
2012
fbea99ea 2013 if (!target_is_non_stop_p ())
4d9d9d04
PA
2014 {
2015 /* On all-stop, shouldn't have resumed unless we needed a
2016 step over. */
2017 gdb_assert (tp->control.trap_expected
2018 || tp->step_after_step_resume_breakpoint);
2019
2020 /* With remote targets (at least), in all-stop, we can't
2021 issue any further remote commands until the program stops
2022 again. */
2023 return 1;
1c5cfe86 2024 }
c2829269 2025
4d9d9d04
PA
2026 /* Either the thread no longer needed a step-over, or a new
2027 displaced stepping sequence started. Even in the latter
2028 case, continue looking. Maybe we can also start another
2029 displaced step on a thread of other process. */
237fc4c9 2030 }
4d9d9d04
PA
2031
2032 return 0;
237fc4c9
PA
2033}
2034
5231c1fd
PA
2035/* Update global variables holding ptids to hold NEW_PTID if they were
2036 holding OLD_PTID. */
2037static void
2038infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
2039{
d7e15655 2040 if (inferior_ptid == old_ptid)
5231c1fd 2041 inferior_ptid = new_ptid;
5231c1fd
PA
2042}
2043
237fc4c9 2044\f
c906108c 2045
53904c9e
AC
2046static const char schedlock_off[] = "off";
2047static const char schedlock_on[] = "on";
2048static const char schedlock_step[] = "step";
f2665db5 2049static const char schedlock_replay[] = "replay";
40478521 2050static const char *const scheduler_enums[] = {
ef346e04
AC
2051 schedlock_off,
2052 schedlock_on,
2053 schedlock_step,
f2665db5 2054 schedlock_replay,
ef346e04
AC
2055 NULL
2056};
f2665db5 2057static const char *scheduler_mode = schedlock_replay;
920d2a44
AC
2058static void
2059show_scheduler_mode (struct ui_file *file, int from_tty,
2060 struct cmd_list_element *c, const char *value)
2061{
3e43a32a
MS
2062 fprintf_filtered (file,
2063 _("Mode for locking scheduler "
2064 "during execution is \"%s\".\n"),
920d2a44
AC
2065 value);
2066}
c906108c
SS
2067
2068static void
eb4c3f4a 2069set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
c906108c 2070{
eefe576e
AC
2071 if (!target_can_lock_scheduler)
2072 {
2073 scheduler_mode = schedlock_off;
2074 error (_("Target '%s' cannot support this command."), target_shortname);
2075 }
c906108c
SS
2076}
2077
d4db2f36
PA
2078/* True if execution commands resume all threads of all processes by
2079 default; otherwise, resume only threads of the current inferior
2080 process. */
491144b5 2081bool sched_multi = false;
d4db2f36 2082
2facfe5c
DD
2083/* Try to setup for software single stepping over the specified location.
2084 Return 1 if target_resume() should use hardware single step.
2085
2086 GDBARCH the current gdbarch.
2087 PC the location to step over. */
2088
2089static int
2090maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2091{
2092 int hw_step = 1;
2093
f02253f1 2094 if (execution_direction == EXEC_FORWARD
93f9a11f
YQ
2095 && gdbarch_software_single_step_p (gdbarch))
2096 hw_step = !insert_single_step_breakpoints (gdbarch);
2097
2facfe5c
DD
2098 return hw_step;
2099}
c906108c 2100
f3263aa4
PA
2101/* See infrun.h. */
2102
09cee04b
PA
2103ptid_t
2104user_visible_resume_ptid (int step)
2105{
f3263aa4 2106 ptid_t resume_ptid;
09cee04b 2107
09cee04b
PA
2108 if (non_stop)
2109 {
2110 /* With non-stop mode on, threads are always handled
2111 individually. */
2112 resume_ptid = inferior_ptid;
2113 }
2114 else if ((scheduler_mode == schedlock_on)
03d46957 2115 || (scheduler_mode == schedlock_step && step))
09cee04b 2116 {
f3263aa4
PA
2117 /* User-settable 'scheduler' mode requires solo thread
2118 resume. */
09cee04b
PA
2119 resume_ptid = inferior_ptid;
2120 }
f2665db5
MM
2121 else if ((scheduler_mode == schedlock_replay)
2122 && target_record_will_replay (minus_one_ptid, execution_direction))
2123 {
2124 /* User-settable 'scheduler' mode requires solo thread resume in replay
2125 mode. */
2126 resume_ptid = inferior_ptid;
2127 }
f3263aa4
PA
2128 else if (!sched_multi && target_supports_multi_process ())
2129 {
2130 /* Resume all threads of the current process (and none of other
2131 processes). */
e99b03dc 2132 resume_ptid = ptid_t (inferior_ptid.pid ());
f3263aa4
PA
2133 }
2134 else
2135 {
2136 /* Resume all threads of all processes. */
2137 resume_ptid = RESUME_ALL;
2138 }
09cee04b
PA
2139
2140 return resume_ptid;
2141}
2142
fbea99ea
PA
2143/* Return a ptid representing the set of threads that we will resume,
2144 in the perspective of the target, assuming run control handling
2145 does not require leaving some threads stopped (e.g., stepping past
2146 breakpoint). USER_STEP indicates whether we're about to start the
2147 target for a stepping command. */
2148
2149static ptid_t
2150internal_resume_ptid (int user_step)
2151{
2152 /* In non-stop, we always control threads individually. Note that
2153 the target may always work in non-stop mode even with "set
2154 non-stop off", in which case user_visible_resume_ptid could
2155 return a wildcard ptid. */
2156 if (target_is_non_stop_p ())
2157 return inferior_ptid;
2158 else
2159 return user_visible_resume_ptid (user_step);
2160}
2161
64ce06e4
PA
2162/* Wrapper for target_resume, that handles infrun-specific
2163 bookkeeping. */
2164
2165static void
2166do_target_resume (ptid_t resume_ptid, int step, enum gdb_signal sig)
2167{
2168 struct thread_info *tp = inferior_thread ();
2169
c65d6b55
PA
2170 gdb_assert (!tp->stop_requested);
2171
64ce06e4 2172 /* Install inferior's terminal modes. */
223ffa71 2173 target_terminal::inferior ();
64ce06e4
PA
2174
2175 /* Avoid confusing the next resume, if the next stop/resume
2176 happens to apply to another thread. */
2177 tp->suspend.stop_signal = GDB_SIGNAL_0;
2178
8f572e5c
PA
2179 /* Advise target which signals may be handled silently.
2180
2181 If we have removed breakpoints because we are stepping over one
2182 in-line (in any thread), we need to receive all signals to avoid
2183 accidentally skipping a breakpoint during execution of a signal
2184 handler.
2185
2186 Likewise if we're displaced stepping, otherwise a trap for a
2187 breakpoint in a signal handler might be confused with the
2188 displaced step finishing. We don't make the displaced_step_fixup
2189 step distinguish the cases instead, because:
2190
2191 - a backtrace while stopped in the signal handler would show the
2192 scratch pad as frame older than the signal handler, instead of
2193 the real mainline code.
2194
2195 - when the thread is later resumed, the signal handler would
2196 return to the scratch pad area, which would no longer be
2197 valid. */
2198 if (step_over_info_valid_p ()
00431a78 2199 || displaced_step_in_progress (tp->inf))
adc6a863 2200 target_pass_signals ({});
64ce06e4 2201 else
adc6a863 2202 target_pass_signals (signal_pass);
64ce06e4
PA
2203
2204 target_resume (resume_ptid, step, sig);
85ad3aaf
PA
2205
2206 target_commit_resume ();
64ce06e4
PA
2207}
2208
d930703d 2209/* Resume the inferior. SIG is the signal to give the inferior
71d378ae
PA
2210 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2211 call 'resume', which handles exceptions. */
c906108c 2212
71d378ae
PA
2213static void
2214resume_1 (enum gdb_signal sig)
c906108c 2215{
515630c5 2216 struct regcache *regcache = get_current_regcache ();
ac7936df 2217 struct gdbarch *gdbarch = regcache->arch ();
4e1c45ea 2218 struct thread_info *tp = inferior_thread ();
515630c5 2219 CORE_ADDR pc = regcache_read_pc (regcache);
8b86c959 2220 const address_space *aspace = regcache->aspace ();
b0f16a3e 2221 ptid_t resume_ptid;
856e7dd6
PA
2222 /* This represents the user's step vs continue request. When
2223 deciding whether "set scheduler-locking step" applies, it's the
2224 user's intention that counts. */
2225 const int user_step = tp->control.stepping_command;
64ce06e4
PA
2226 /* This represents what we'll actually request the target to do.
2227 This can decay from a step to a continue, if e.g., we need to
2228 implement single-stepping with breakpoints (software
2229 single-step). */
6b403daa 2230 int step;
c7e8a53c 2231
c65d6b55 2232 gdb_assert (!tp->stop_requested);
c2829269
PA
2233 gdb_assert (!thread_is_in_step_over_chain (tp));
2234
372316f1
PA
2235 if (tp->suspend.waitstatus_pending_p)
2236 {
2237 if (debug_infrun)
2238 {
23fdd69e
SM
2239 std::string statstr
2240 = target_waitstatus_to_string (&tp->suspend.waitstatus);
372316f1 2241
372316f1 2242 fprintf_unfiltered (gdb_stdlog,
23fdd69e
SM
2243 "infrun: resume: thread %s has pending wait "
2244 "status %s (currently_stepping=%d).\n",
a068643d
TT
2245 target_pid_to_str (tp->ptid).c_str (),
2246 statstr.c_str (),
372316f1 2247 currently_stepping (tp));
372316f1
PA
2248 }
2249
2250 tp->resumed = 1;
2251
2252 /* FIXME: What should we do if we are supposed to resume this
2253 thread with a signal? Maybe we should maintain a queue of
2254 pending signals to deliver. */
2255 if (sig != GDB_SIGNAL_0)
2256 {
fd7dcb94 2257 warning (_("Couldn't deliver signal %s to %s."),
a068643d
TT
2258 gdb_signal_to_name (sig),
2259 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2260 }
2261
2262 tp->suspend.stop_signal = GDB_SIGNAL_0;
372316f1
PA
2263
2264 if (target_can_async_p ())
9516f85a
AB
2265 {
2266 target_async (1);
2267 /* Tell the event loop we have an event to process. */
2268 mark_async_event_handler (infrun_async_inferior_event_token);
2269 }
372316f1
PA
2270 return;
2271 }
2272
2273 tp->stepped_breakpoint = 0;
2274
6b403daa
PA
2275 /* Depends on stepped_breakpoint. */
2276 step = currently_stepping (tp);
2277
74609e71
YQ
2278 if (current_inferior ()->waiting_for_vfork_done)
2279 {
48f9886d
PA
2280 /* Don't try to single-step a vfork parent that is waiting for
2281 the child to get out of the shared memory region (by exec'ing
2282 or exiting). This is particularly important on software
2283 single-step archs, as the child process would trip on the
2284 software single step breakpoint inserted for the parent
2285 process. Since the parent will not actually execute any
2286 instruction until the child is out of the shared region (such
2287 are vfork's semantics), it is safe to simply continue it.
2288 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2289 the parent, and tell it to `keep_going', which automatically
2290 re-sets it stepping. */
74609e71
YQ
2291 if (debug_infrun)
2292 fprintf_unfiltered (gdb_stdlog,
2293 "infrun: resume : clear step\n");
a09dd441 2294 step = 0;
74609e71
YQ
2295 }
2296
527159b7 2297 if (debug_infrun)
237fc4c9 2298 fprintf_unfiltered (gdb_stdlog,
c9737c08 2299 "infrun: resume (step=%d, signal=%s), "
0d9a9a5f 2300 "trap_expected=%d, current thread [%s] at %s\n",
c9737c08
PA
2301 step, gdb_signal_to_symbol_string (sig),
2302 tp->control.trap_expected,
a068643d 2303 target_pid_to_str (inferior_ptid).c_str (),
0d9a9a5f 2304 paddress (gdbarch, pc));
c906108c 2305
c2c6d25f
JM
2306 /* Normally, by the time we reach `resume', the breakpoints are either
2307 removed or inserted, as appropriate. The exception is if we're sitting
2308 at a permanent breakpoint; we need to step over it, but permanent
2309 breakpoints can't be removed. So we have to test for it here. */
6c95b8df 2310 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
6d350bb5 2311 {
af48d08f
PA
2312 if (sig != GDB_SIGNAL_0)
2313 {
2314 /* We have a signal to pass to the inferior. The resume
2315 may, or may not take us to the signal handler. If this
2316 is a step, we'll need to stop in the signal handler, if
2317 there's one, (if the target supports stepping into
2318 handlers), or in the next mainline instruction, if
2319 there's no handler. If this is a continue, we need to be
2320 sure to run the handler with all breakpoints inserted.
2321 In all cases, set a breakpoint at the current address
2322 (where the handler returns to), and once that breakpoint
2323 is hit, resume skipping the permanent breakpoint. If
2324 that breakpoint isn't hit, then we've stepped into the
2325 signal handler (or hit some other event). We'll delete
2326 the step-resume breakpoint then. */
2327
2328 if (debug_infrun)
2329 fprintf_unfiltered (gdb_stdlog,
2330 "infrun: resume: skipping permanent breakpoint, "
2331 "deliver signal first\n");
2332
2333 clear_step_over_info ();
2334 tp->control.trap_expected = 0;
2335
2336 if (tp->control.step_resume_breakpoint == NULL)
2337 {
2338 /* Set a "high-priority" step-resume, as we don't want
2339 user breakpoints at PC to trigger (again) when this
2340 hits. */
2341 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2342 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2343
2344 tp->step_after_step_resume_breakpoint = step;
2345 }
2346
2347 insert_breakpoints ();
2348 }
2349 else
2350 {
2351 /* There's no signal to pass, we can go ahead and skip the
2352 permanent breakpoint manually. */
2353 if (debug_infrun)
2354 fprintf_unfiltered (gdb_stdlog,
2355 "infrun: resume: skipping permanent breakpoint\n");
2356 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2357 /* Update pc to reflect the new address from which we will
2358 execute instructions. */
2359 pc = regcache_read_pc (regcache);
2360
2361 if (step)
2362 {
2363 /* We've already advanced the PC, so the stepping part
2364 is done. Now we need to arrange for a trap to be
2365 reported to handle_inferior_event. Set a breakpoint
2366 at the current PC, and run to it. Don't update
2367 prev_pc, because if we end in
44a1ee51
PA
2368 switch_back_to_stepped_thread, we want the "expected
2369 thread advanced also" branch to be taken. IOW, we
2370 don't want this thread to step further from PC
af48d08f 2371 (overstep). */
1ac806b8 2372 gdb_assert (!step_over_info_valid_p ());
af48d08f
PA
2373 insert_single_step_breakpoint (gdbarch, aspace, pc);
2374 insert_breakpoints ();
2375
fbea99ea 2376 resume_ptid = internal_resume_ptid (user_step);
1ac806b8 2377 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
372316f1 2378 tp->resumed = 1;
af48d08f
PA
2379 return;
2380 }
2381 }
6d350bb5 2382 }
c2c6d25f 2383
c1e36e3e
PA
2384 /* If we have a breakpoint to step over, make sure to do a single
2385 step only. Same if we have software watchpoints. */
2386 if (tp->control.trap_expected || bpstat_should_step ())
2387 tp->control.may_range_step = 0;
2388
237fc4c9
PA
2389 /* If enabled, step over breakpoints by executing a copy of the
2390 instruction at a different address.
2391
2392 We can't use displaced stepping when we have a signal to deliver;
2393 the comments for displaced_step_prepare explain why. The
2394 comments in the handle_inferior event for dealing with 'random
74609e71
YQ
2395 signals' explain what we do instead.
2396
2397 We can't use displaced stepping when we are waiting for vfork_done
2398 event, displaced stepping breaks the vfork child similarly as single
2399 step software breakpoint. */
3fc8eb30
PA
2400 if (tp->control.trap_expected
2401 && use_displaced_stepping (tp)
cb71640d 2402 && !step_over_info_valid_p ()
a493e3e2 2403 && sig == GDB_SIGNAL_0
74609e71 2404 && !current_inferior ()->waiting_for_vfork_done)
237fc4c9 2405 {
00431a78 2406 int prepared = displaced_step_prepare (tp);
fc1cf338 2407
3fc8eb30 2408 if (prepared == 0)
d56b7306 2409 {
4d9d9d04
PA
2410 if (debug_infrun)
2411 fprintf_unfiltered (gdb_stdlog,
2412 "Got placed in step-over queue\n");
2413
2414 tp->control.trap_expected = 0;
d56b7306
VP
2415 return;
2416 }
3fc8eb30
PA
2417 else if (prepared < 0)
2418 {
2419 /* Fallback to stepping over the breakpoint in-line. */
2420
2421 if (target_is_non_stop_p ())
2422 stop_all_threads ();
2423
a01bda52 2424 set_step_over_info (regcache->aspace (),
21edc42f 2425 regcache_read_pc (regcache), 0, tp->global_num);
3fc8eb30
PA
2426
2427 step = maybe_software_singlestep (gdbarch, pc);
2428
2429 insert_breakpoints ();
2430 }
2431 else if (prepared > 0)
2432 {
2433 struct displaced_step_inferior_state *displaced;
99e40580 2434
3fc8eb30
PA
2435 /* Update pc to reflect the new address from which we will
2436 execute instructions due to displaced stepping. */
00431a78 2437 pc = regcache_read_pc (get_thread_regcache (tp));
ca7781d2 2438
00431a78 2439 displaced = get_displaced_stepping_state (tp->inf);
3fc8eb30
PA
2440 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
2441 displaced->step_closure);
2442 }
237fc4c9
PA
2443 }
2444
2facfe5c 2445 /* Do we need to do it the hard way, w/temp breakpoints? */
99e40580 2446 else if (step)
2facfe5c 2447 step = maybe_software_singlestep (gdbarch, pc);
c906108c 2448
30852783
UW
2449 /* Currently, our software single-step implementation leads to different
2450 results than hardware single-stepping in one situation: when stepping
2451 into delivering a signal which has an associated signal handler,
2452 hardware single-step will stop at the first instruction of the handler,
2453 while software single-step will simply skip execution of the handler.
2454
2455 For now, this difference in behavior is accepted since there is no
2456 easy way to actually implement single-stepping into a signal handler
2457 without kernel support.
2458
2459 However, there is one scenario where this difference leads to follow-on
2460 problems: if we're stepping off a breakpoint by removing all breakpoints
2461 and then single-stepping. In this case, the software single-step
2462 behavior means that even if there is a *breakpoint* in the signal
2463 handler, GDB still would not stop.
2464
2465 Fortunately, we can at least fix this particular issue. We detect
2466 here the case where we are about to deliver a signal while software
2467 single-stepping with breakpoints removed. In this situation, we
2468 revert the decisions to remove all breakpoints and insert single-
2469 step breakpoints, and instead we install a step-resume breakpoint
2470 at the current address, deliver the signal without stepping, and
2471 once we arrive back at the step-resume breakpoint, actually step
2472 over the breakpoint we originally wanted to step over. */
34b7e8a6 2473 if (thread_has_single_step_breakpoints_set (tp)
6cc83d2a
PA
2474 && sig != GDB_SIGNAL_0
2475 && step_over_info_valid_p ())
30852783
UW
2476 {
2477 /* If we have nested signals or a pending signal is delivered
2478 immediately after a handler returns, might might already have
2479 a step-resume breakpoint set on the earlier handler. We cannot
2480 set another step-resume breakpoint; just continue on until the
2481 original breakpoint is hit. */
2482 if (tp->control.step_resume_breakpoint == NULL)
2483 {
2c03e5be 2484 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
30852783
UW
2485 tp->step_after_step_resume_breakpoint = 1;
2486 }
2487
34b7e8a6 2488 delete_single_step_breakpoints (tp);
30852783 2489
31e77af2 2490 clear_step_over_info ();
30852783 2491 tp->control.trap_expected = 0;
31e77af2
PA
2492
2493 insert_breakpoints ();
30852783
UW
2494 }
2495
b0f16a3e
SM
2496 /* If STEP is set, it's a request to use hardware stepping
2497 facilities. But in that case, we should never
2498 use singlestep breakpoint. */
34b7e8a6 2499 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
dfcd3bfb 2500
fbea99ea 2501 /* Decide the set of threads to ask the target to resume. */
1946c4cc 2502 if (tp->control.trap_expected)
b0f16a3e
SM
2503 {
2504 /* We're allowing a thread to run past a breakpoint it has
1946c4cc
YQ
2505 hit, either by single-stepping the thread with the breakpoint
2506 removed, or by displaced stepping, with the breakpoint inserted.
2507 In the former case, we need to single-step only this thread,
2508 and keep others stopped, as they can miss this breakpoint if
2509 allowed to run. That's not really a problem for displaced
2510 stepping, but, we still keep other threads stopped, in case
2511 another thread is also stopped for a breakpoint waiting for
2512 its turn in the displaced stepping queue. */
b0f16a3e
SM
2513 resume_ptid = inferior_ptid;
2514 }
fbea99ea
PA
2515 else
2516 resume_ptid = internal_resume_ptid (user_step);
d4db2f36 2517
7f5ef605
PA
2518 if (execution_direction != EXEC_REVERSE
2519 && step && breakpoint_inserted_here_p (aspace, pc))
b0f16a3e 2520 {
372316f1
PA
2521 /* There are two cases where we currently need to step a
2522 breakpoint instruction when we have a signal to deliver:
2523
2524 - See handle_signal_stop where we handle random signals that
2525 could take out us out of the stepping range. Normally, in
2526 that case we end up continuing (instead of stepping) over the
7f5ef605
PA
2527 signal handler with a breakpoint at PC, but there are cases
2528 where we should _always_ single-step, even if we have a
2529 step-resume breakpoint, like when a software watchpoint is
2530 set. Assuming single-stepping and delivering a signal at the
2531 same time would takes us to the signal handler, then we could
2532 have removed the breakpoint at PC to step over it. However,
2533 some hardware step targets (like e.g., Mac OS) can't step
2534 into signal handlers, and for those, we need to leave the
2535 breakpoint at PC inserted, as otherwise if the handler
2536 recurses and executes PC again, it'll miss the breakpoint.
2537 So we leave the breakpoint inserted anyway, but we need to
2538 record that we tried to step a breakpoint instruction, so
372316f1
PA
2539 that adjust_pc_after_break doesn't end up confused.
2540
2541 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2542 in one thread after another thread that was stepping had been
2543 momentarily paused for a step-over. When we re-resume the
2544 stepping thread, it may be resumed from that address with a
2545 breakpoint that hasn't trapped yet. Seen with
2546 gdb.threads/non-stop-fair-events.exp, on targets that don't
2547 do displaced stepping. */
2548
2549 if (debug_infrun)
2550 fprintf_unfiltered (gdb_stdlog,
2551 "infrun: resume: [%s] stepped breakpoint\n",
a068643d 2552 target_pid_to_str (tp->ptid).c_str ());
7f5ef605
PA
2553
2554 tp->stepped_breakpoint = 1;
2555
b0f16a3e
SM
2556 /* Most targets can step a breakpoint instruction, thus
2557 executing it normally. But if this one cannot, just
2558 continue and we will hit it anyway. */
7f5ef605 2559 if (gdbarch_cannot_step_breakpoint (gdbarch))
b0f16a3e
SM
2560 step = 0;
2561 }
ef5cf84e 2562
b0f16a3e 2563 if (debug_displaced
cb71640d 2564 && tp->control.trap_expected
3fc8eb30 2565 && use_displaced_stepping (tp)
cb71640d 2566 && !step_over_info_valid_p ())
b0f16a3e 2567 {
00431a78 2568 struct regcache *resume_regcache = get_thread_regcache (tp);
ac7936df 2569 struct gdbarch *resume_gdbarch = resume_regcache->arch ();
b0f16a3e
SM
2570 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2571 gdb_byte buf[4];
2572
2573 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2574 paddress (resume_gdbarch, actual_pc));
2575 read_memory (actual_pc, buf, sizeof (buf));
2576 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2577 }
237fc4c9 2578
b0f16a3e
SM
2579 if (tp->control.may_range_step)
2580 {
2581 /* If we're resuming a thread with the PC out of the step
2582 range, then we're doing some nested/finer run control
2583 operation, like stepping the thread out of the dynamic
2584 linker or the displaced stepping scratch pad. We
2585 shouldn't have allowed a range step then. */
2586 gdb_assert (pc_in_thread_step_range (pc, tp));
2587 }
c1e36e3e 2588
64ce06e4 2589 do_target_resume (resume_ptid, step, sig);
372316f1 2590 tp->resumed = 1;
c906108c 2591}
71d378ae
PA
2592
2593/* Resume the inferior. SIG is the signal to give the inferior
2594 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2595 rolls back state on error. */
2596
aff4e175 2597static void
71d378ae
PA
2598resume (gdb_signal sig)
2599{
a70b8144 2600 try
71d378ae
PA
2601 {
2602 resume_1 (sig);
2603 }
230d2906 2604 catch (const gdb_exception &ex)
71d378ae
PA
2605 {
2606 /* If resuming is being aborted for any reason, delete any
2607 single-step breakpoint resume_1 may have created, to avoid
2608 confusing the following resumption, and to avoid leaving
2609 single-step breakpoints perturbing other threads, in case
2610 we're running in non-stop mode. */
2611 if (inferior_ptid != null_ptid)
2612 delete_single_step_breakpoints (inferior_thread ());
eedc3f4f 2613 throw;
71d378ae 2614 }
71d378ae
PA
2615}
2616
c906108c 2617\f
237fc4c9 2618/* Proceeding. */
c906108c 2619
4c2f2a79
PA
2620/* See infrun.h. */
2621
2622/* Counter that tracks number of user visible stops. This can be used
2623 to tell whether a command has proceeded the inferior past the
2624 current location. This allows e.g., inferior function calls in
2625 breakpoint commands to not interrupt the command list. When the
2626 call finishes successfully, the inferior is standing at the same
2627 breakpoint as if nothing happened (and so we don't call
2628 normal_stop). */
2629static ULONGEST current_stop_id;
2630
2631/* See infrun.h. */
2632
2633ULONGEST
2634get_stop_id (void)
2635{
2636 return current_stop_id;
2637}
2638
2639/* Called when we report a user visible stop. */
2640
2641static void
2642new_stop_id (void)
2643{
2644 current_stop_id++;
2645}
2646
c906108c
SS
2647/* Clear out all variables saying what to do when inferior is continued.
2648 First do this, then set the ones you want, then call `proceed'. */
2649
a7212384
UW
2650static void
2651clear_proceed_status_thread (struct thread_info *tp)
c906108c 2652{
a7212384
UW
2653 if (debug_infrun)
2654 fprintf_unfiltered (gdb_stdlog,
2655 "infrun: clear_proceed_status_thread (%s)\n",
a068643d 2656 target_pid_to_str (tp->ptid).c_str ());
d6b48e9c 2657
372316f1
PA
2658 /* If we're starting a new sequence, then the previous finished
2659 single-step is no longer relevant. */
2660 if (tp->suspend.waitstatus_pending_p)
2661 {
2662 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
2663 {
2664 if (debug_infrun)
2665 fprintf_unfiltered (gdb_stdlog,
2666 "infrun: clear_proceed_status: pending "
2667 "event of %s was a finished step. "
2668 "Discarding.\n",
a068643d 2669 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2670
2671 tp->suspend.waitstatus_pending_p = 0;
2672 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
2673 }
2674 else if (debug_infrun)
2675 {
23fdd69e
SM
2676 std::string statstr
2677 = target_waitstatus_to_string (&tp->suspend.waitstatus);
372316f1 2678
372316f1
PA
2679 fprintf_unfiltered (gdb_stdlog,
2680 "infrun: clear_proceed_status_thread: thread %s "
2681 "has pending wait status %s "
2682 "(currently_stepping=%d).\n",
a068643d
TT
2683 target_pid_to_str (tp->ptid).c_str (),
2684 statstr.c_str (),
372316f1 2685 currently_stepping (tp));
372316f1
PA
2686 }
2687 }
2688
70509625
PA
2689 /* If this signal should not be seen by program, give it zero.
2690 Used for debugging signals. */
2691 if (!signal_pass_state (tp->suspend.stop_signal))
2692 tp->suspend.stop_signal = GDB_SIGNAL_0;
2693
46e3ed7f 2694 delete tp->thread_fsm;
243a9253
PA
2695 tp->thread_fsm = NULL;
2696
16c381f0
JK
2697 tp->control.trap_expected = 0;
2698 tp->control.step_range_start = 0;
2699 tp->control.step_range_end = 0;
c1e36e3e 2700 tp->control.may_range_step = 0;
16c381f0
JK
2701 tp->control.step_frame_id = null_frame_id;
2702 tp->control.step_stack_frame_id = null_frame_id;
2703 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
885eeb5b 2704 tp->control.step_start_function = NULL;
a7212384 2705 tp->stop_requested = 0;
4e1c45ea 2706
16c381f0 2707 tp->control.stop_step = 0;
32400beb 2708
16c381f0 2709 tp->control.proceed_to_finish = 0;
414c69f7 2710
856e7dd6 2711 tp->control.stepping_command = 0;
17b2616c 2712
a7212384 2713 /* Discard any remaining commands or status from previous stop. */
16c381f0 2714 bpstat_clear (&tp->control.stop_bpstat);
a7212384 2715}
32400beb 2716
a7212384 2717void
70509625 2718clear_proceed_status (int step)
a7212384 2719{
f2665db5
MM
2720 /* With scheduler-locking replay, stop replaying other threads if we're
2721 not replaying the user-visible resume ptid.
2722
2723 This is a convenience feature to not require the user to explicitly
2724 stop replaying the other threads. We're assuming that the user's
2725 intent is to resume tracing the recorded process. */
2726 if (!non_stop && scheduler_mode == schedlock_replay
2727 && target_record_is_replaying (minus_one_ptid)
2728 && !target_record_will_replay (user_visible_resume_ptid (step),
2729 execution_direction))
2730 target_record_stop_replaying ();
2731
08036331 2732 if (!non_stop && inferior_ptid != null_ptid)
6c95b8df 2733 {
08036331 2734 ptid_t resume_ptid = user_visible_resume_ptid (step);
70509625
PA
2735
2736 /* In all-stop mode, delete the per-thread status of all threads
2737 we're about to resume, implicitly and explicitly. */
08036331
PA
2738 for (thread_info *tp : all_non_exited_threads (resume_ptid))
2739 clear_proceed_status_thread (tp);
6c95b8df
PA
2740 }
2741
d7e15655 2742 if (inferior_ptid != null_ptid)
a7212384
UW
2743 {
2744 struct inferior *inferior;
2745
2746 if (non_stop)
2747 {
6c95b8df
PA
2748 /* If in non-stop mode, only delete the per-thread status of
2749 the current thread. */
a7212384
UW
2750 clear_proceed_status_thread (inferior_thread ());
2751 }
6c95b8df 2752
d6b48e9c 2753 inferior = current_inferior ();
16c381f0 2754 inferior->control.stop_soon = NO_STOP_QUIETLY;
4e1c45ea
PA
2755 }
2756
76727919 2757 gdb::observers::about_to_proceed.notify ();
c906108c
SS
2758}
2759
99619bea
PA
2760/* Returns true if TP is still stopped at a breakpoint that needs
2761 stepping-over in order to make progress. If the breakpoint is gone
2762 meanwhile, we can skip the whole step-over dance. */
ea67f13b
DJ
2763
2764static int
6c4cfb24 2765thread_still_needs_step_over_bp (struct thread_info *tp)
99619bea
PA
2766{
2767 if (tp->stepping_over_breakpoint)
2768 {
00431a78 2769 struct regcache *regcache = get_thread_regcache (tp);
99619bea 2770
a01bda52 2771 if (breakpoint_here_p (regcache->aspace (),
af48d08f
PA
2772 regcache_read_pc (regcache))
2773 == ordinary_breakpoint_here)
99619bea
PA
2774 return 1;
2775
2776 tp->stepping_over_breakpoint = 0;
2777 }
2778
2779 return 0;
2780}
2781
6c4cfb24
PA
2782/* Check whether thread TP still needs to start a step-over in order
2783 to make progress when resumed. Returns an bitwise or of enum
2784 step_over_what bits, indicating what needs to be stepped over. */
2785
8d297bbf 2786static step_over_what
6c4cfb24
PA
2787thread_still_needs_step_over (struct thread_info *tp)
2788{
8d297bbf 2789 step_over_what what = 0;
6c4cfb24
PA
2790
2791 if (thread_still_needs_step_over_bp (tp))
2792 what |= STEP_OVER_BREAKPOINT;
2793
2794 if (tp->stepping_over_watchpoint
2795 && !target_have_steppable_watchpoint)
2796 what |= STEP_OVER_WATCHPOINT;
2797
2798 return what;
2799}
2800
483805cf
PA
2801/* Returns true if scheduler locking applies. STEP indicates whether
2802 we're about to do a step/next-like command to a thread. */
2803
2804static int
856e7dd6 2805schedlock_applies (struct thread_info *tp)
483805cf
PA
2806{
2807 return (scheduler_mode == schedlock_on
2808 || (scheduler_mode == schedlock_step
f2665db5
MM
2809 && tp->control.stepping_command)
2810 || (scheduler_mode == schedlock_replay
2811 && target_record_will_replay (minus_one_ptid,
2812 execution_direction)));
483805cf
PA
2813}
2814
c906108c
SS
2815/* Basic routine for continuing the program in various fashions.
2816
2817 ADDR is the address to resume at, or -1 for resume where stopped.
aff4e175
AB
2818 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
2819 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
c906108c
SS
2820
2821 You should call clear_proceed_status before calling proceed. */
2822
2823void
64ce06e4 2824proceed (CORE_ADDR addr, enum gdb_signal siggnal)
c906108c 2825{
e58b0e63
PA
2826 struct regcache *regcache;
2827 struct gdbarch *gdbarch;
e58b0e63 2828 CORE_ADDR pc;
4d9d9d04
PA
2829 ptid_t resume_ptid;
2830 struct execution_control_state ecss;
2831 struct execution_control_state *ecs = &ecss;
4d9d9d04 2832 int started;
c906108c 2833
e58b0e63
PA
2834 /* If we're stopped at a fork/vfork, follow the branch set by the
2835 "set follow-fork-mode" command; otherwise, we'll just proceed
2836 resuming the current thread. */
2837 if (!follow_fork ())
2838 {
2839 /* The target for some reason decided not to resume. */
2840 normal_stop ();
f148b27e
PA
2841 if (target_can_async_p ())
2842 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
e58b0e63
PA
2843 return;
2844 }
2845
842951eb
PA
2846 /* We'll update this if & when we switch to a new thread. */
2847 previous_inferior_ptid = inferior_ptid;
2848
e58b0e63 2849 regcache = get_current_regcache ();
ac7936df 2850 gdbarch = regcache->arch ();
8b86c959
YQ
2851 const address_space *aspace = regcache->aspace ();
2852
e58b0e63 2853 pc = regcache_read_pc (regcache);
08036331 2854 thread_info *cur_thr = inferior_thread ();
e58b0e63 2855
99619bea 2856 /* Fill in with reasonable starting values. */
08036331 2857 init_thread_stepping_state (cur_thr);
99619bea 2858
08036331 2859 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
c2829269 2860
2acceee2 2861 if (addr == (CORE_ADDR) -1)
c906108c 2862 {
08036331 2863 if (pc == cur_thr->suspend.stop_pc
af48d08f 2864 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
b2175913 2865 && execution_direction != EXEC_REVERSE)
3352ef37
AC
2866 /* There is a breakpoint at the address we will resume at,
2867 step one instruction before inserting breakpoints so that
2868 we do not stop right away (and report a second hit at this
b2175913
MS
2869 breakpoint).
2870
2871 Note, we don't do this in reverse, because we won't
2872 actually be executing the breakpoint insn anyway.
2873 We'll be (un-)executing the previous instruction. */
08036331 2874 cur_thr->stepping_over_breakpoint = 1;
515630c5
UW
2875 else if (gdbarch_single_step_through_delay_p (gdbarch)
2876 && gdbarch_single_step_through_delay (gdbarch,
2877 get_current_frame ()))
3352ef37
AC
2878 /* We stepped onto an instruction that needs to be stepped
2879 again before re-inserting the breakpoint, do so. */
08036331 2880 cur_thr->stepping_over_breakpoint = 1;
c906108c
SS
2881 }
2882 else
2883 {
515630c5 2884 regcache_write_pc (regcache, addr);
c906108c
SS
2885 }
2886
70509625 2887 if (siggnal != GDB_SIGNAL_DEFAULT)
08036331 2888 cur_thr->suspend.stop_signal = siggnal;
70509625 2889
08036331 2890 resume_ptid = user_visible_resume_ptid (cur_thr->control.stepping_command);
4d9d9d04
PA
2891
2892 /* If an exception is thrown from this point on, make sure to
2893 propagate GDB's knowledge of the executing state to the
2894 frontend/user running state. */
731f534f 2895 scoped_finish_thread_state finish_state (resume_ptid);
4d9d9d04
PA
2896
2897 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
2898 threads (e.g., we might need to set threads stepping over
2899 breakpoints first), from the user/frontend's point of view, all
2900 threads in RESUME_PTID are now running. Unless we're calling an
2901 inferior function, as in that case we pretend the inferior
2902 doesn't run at all. */
08036331 2903 if (!cur_thr->control.in_infcall)
4d9d9d04 2904 set_running (resume_ptid, 1);
17b2616c 2905
527159b7 2906 if (debug_infrun)
8a9de0e4 2907 fprintf_unfiltered (gdb_stdlog,
64ce06e4 2908 "infrun: proceed (addr=%s, signal=%s)\n",
c9737c08 2909 paddress (gdbarch, addr),
64ce06e4 2910 gdb_signal_to_symbol_string (siggnal));
527159b7 2911
4d9d9d04
PA
2912 annotate_starting ();
2913
2914 /* Make sure that output from GDB appears before output from the
2915 inferior. */
2916 gdb_flush (gdb_stdout);
2917
d930703d
PA
2918 /* Since we've marked the inferior running, give it the terminal. A
2919 QUIT/Ctrl-C from here on is forwarded to the target (which can
2920 still detect attempts to unblock a stuck connection with repeated
2921 Ctrl-C from within target_pass_ctrlc). */
2922 target_terminal::inferior ();
2923
4d9d9d04
PA
2924 /* In a multi-threaded task we may select another thread and
2925 then continue or step.
2926
2927 But if a thread that we're resuming had stopped at a breakpoint,
2928 it will immediately cause another breakpoint stop without any
2929 execution (i.e. it will report a breakpoint hit incorrectly). So
2930 we must step over it first.
2931
2932 Look for threads other than the current (TP) that reported a
2933 breakpoint hit and haven't been resumed yet since. */
2934
2935 /* If scheduler locking applies, we can avoid iterating over all
2936 threads. */
08036331 2937 if (!non_stop && !schedlock_applies (cur_thr))
94cc34af 2938 {
08036331
PA
2939 for (thread_info *tp : all_non_exited_threads (resume_ptid))
2940 {
f3f8ece4
PA
2941 switch_to_thread_no_regs (tp);
2942
4d9d9d04
PA
2943 /* Ignore the current thread here. It's handled
2944 afterwards. */
08036331 2945 if (tp == cur_thr)
4d9d9d04 2946 continue;
c906108c 2947
4d9d9d04
PA
2948 if (!thread_still_needs_step_over (tp))
2949 continue;
2950
2951 gdb_assert (!thread_is_in_step_over_chain (tp));
c906108c 2952
99619bea
PA
2953 if (debug_infrun)
2954 fprintf_unfiltered (gdb_stdlog,
2955 "infrun: need to step-over [%s] first\n",
a068643d 2956 target_pid_to_str (tp->ptid).c_str ());
99619bea 2957
4d9d9d04 2958 thread_step_over_chain_enqueue (tp);
2adfaa28 2959 }
f3f8ece4
PA
2960
2961 switch_to_thread (cur_thr);
30852783
UW
2962 }
2963
4d9d9d04
PA
2964 /* Enqueue the current thread last, so that we move all other
2965 threads over their breakpoints first. */
08036331
PA
2966 if (cur_thr->stepping_over_breakpoint)
2967 thread_step_over_chain_enqueue (cur_thr);
30852783 2968
4d9d9d04
PA
2969 /* If the thread isn't started, we'll still need to set its prev_pc,
2970 so that switch_back_to_stepped_thread knows the thread hasn't
2971 advanced. Must do this before resuming any thread, as in
2972 all-stop/remote, once we resume we can't send any other packet
2973 until the target stops again. */
08036331 2974 cur_thr->prev_pc = regcache_read_pc (regcache);
99619bea 2975
a9bc57b9
TT
2976 {
2977 scoped_restore save_defer_tc = make_scoped_defer_target_commit_resume ();
85ad3aaf 2978
a9bc57b9 2979 started = start_step_over ();
c906108c 2980
a9bc57b9
TT
2981 if (step_over_info_valid_p ())
2982 {
2983 /* Either this thread started a new in-line step over, or some
2984 other thread was already doing one. In either case, don't
2985 resume anything else until the step-over is finished. */
2986 }
2987 else if (started && !target_is_non_stop_p ())
2988 {
2989 /* A new displaced stepping sequence was started. In all-stop,
2990 we can't talk to the target anymore until it next stops. */
2991 }
2992 else if (!non_stop && target_is_non_stop_p ())
2993 {
2994 /* In all-stop, but the target is always in non-stop mode.
2995 Start all other threads that are implicitly resumed too. */
08036331 2996 for (thread_info *tp : all_non_exited_threads (resume_ptid))
fbea99ea 2997 {
f3f8ece4
PA
2998 switch_to_thread_no_regs (tp);
2999
fbea99ea
PA
3000 if (tp->resumed)
3001 {
3002 if (debug_infrun)
3003 fprintf_unfiltered (gdb_stdlog,
3004 "infrun: proceed: [%s] resumed\n",
a068643d 3005 target_pid_to_str (tp->ptid).c_str ());
fbea99ea
PA
3006 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
3007 continue;
3008 }
3009
3010 if (thread_is_in_step_over_chain (tp))
3011 {
3012 if (debug_infrun)
3013 fprintf_unfiltered (gdb_stdlog,
3014 "infrun: proceed: [%s] needs step-over\n",
a068643d 3015 target_pid_to_str (tp->ptid).c_str ());
fbea99ea
PA
3016 continue;
3017 }
3018
3019 if (debug_infrun)
3020 fprintf_unfiltered (gdb_stdlog,
3021 "infrun: proceed: resuming %s\n",
a068643d 3022 target_pid_to_str (tp->ptid).c_str ());
fbea99ea
PA
3023
3024 reset_ecs (ecs, tp);
00431a78 3025 switch_to_thread (tp);
fbea99ea
PA
3026 keep_going_pass_signal (ecs);
3027 if (!ecs->wait_some_more)
fd7dcb94 3028 error (_("Command aborted."));
fbea99ea 3029 }
a9bc57b9 3030 }
08036331 3031 else if (!cur_thr->resumed && !thread_is_in_step_over_chain (cur_thr))
a9bc57b9
TT
3032 {
3033 /* The thread wasn't started, and isn't queued, run it now. */
08036331
PA
3034 reset_ecs (ecs, cur_thr);
3035 switch_to_thread (cur_thr);
a9bc57b9
TT
3036 keep_going_pass_signal (ecs);
3037 if (!ecs->wait_some_more)
3038 error (_("Command aborted."));
3039 }
3040 }
c906108c 3041
85ad3aaf
PA
3042 target_commit_resume ();
3043
731f534f 3044 finish_state.release ();
c906108c 3045
873657b9
PA
3046 /* If we've switched threads above, switch back to the previously
3047 current thread. We don't want the user to see a different
3048 selected thread. */
3049 switch_to_thread (cur_thr);
3050
0b333c5e
PA
3051 /* Tell the event loop to wait for it to stop. If the target
3052 supports asynchronous execution, it'll do this from within
3053 target_resume. */
362646f5 3054 if (!target_can_async_p ())
0b333c5e 3055 mark_async_event_handler (infrun_async_inferior_event_token);
c906108c 3056}
c906108c
SS
3057\f
3058
3059/* Start remote-debugging of a machine over a serial link. */
96baa820 3060
c906108c 3061void
8621d6a9 3062start_remote (int from_tty)
c906108c 3063{
d6b48e9c 3064 struct inferior *inferior;
d6b48e9c
PA
3065
3066 inferior = current_inferior ();
16c381f0 3067 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
43ff13b4 3068
1777feb0 3069 /* Always go on waiting for the target, regardless of the mode. */
6426a772 3070 /* FIXME: cagney/1999-09-23: At present it isn't possible to
7e73cedf 3071 indicate to wait_for_inferior that a target should timeout if
6426a772
JM
3072 nothing is returned (instead of just blocking). Because of this,
3073 targets expecting an immediate response need to, internally, set
3074 things up so that the target_wait() is forced to eventually
1777feb0 3075 timeout. */
6426a772
JM
3076 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3077 differentiate to its caller what the state of the target is after
3078 the initial open has been performed. Here we're assuming that
3079 the target has stopped. It should be possible to eventually have
3080 target_open() return to the caller an indication that the target
3081 is currently running and GDB state should be set to the same as
1777feb0 3082 for an async run. */
e4c8541f 3083 wait_for_inferior ();
8621d6a9
DJ
3084
3085 /* Now that the inferior has stopped, do any bookkeeping like
3086 loading shared libraries. We want to do this before normal_stop,
3087 so that the displayed frame is up to date. */
8b88a78e 3088 post_create_inferior (current_top_target (), from_tty);
8621d6a9 3089
6426a772 3090 normal_stop ();
c906108c
SS
3091}
3092
3093/* Initialize static vars when a new inferior begins. */
3094
3095void
96baa820 3096init_wait_for_inferior (void)
c906108c
SS
3097{
3098 /* These are meaningless until the first time through wait_for_inferior. */
c906108c 3099
c906108c
SS
3100 breakpoint_init_inferior (inf_starting);
3101
70509625 3102 clear_proceed_status (0);
9f976b41 3103
ab1ddbcf 3104 nullify_last_target_wait_ptid ();
237fc4c9 3105
842951eb 3106 previous_inferior_ptid = inferior_ptid;
c906108c 3107}
237fc4c9 3108
c906108c 3109\f
488f131b 3110
ec9499be 3111static void handle_inferior_event (struct execution_control_state *ecs);
cd0fc7c3 3112
568d6575
UW
3113static void handle_step_into_function (struct gdbarch *gdbarch,
3114 struct execution_control_state *ecs);
3115static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3116 struct execution_control_state *ecs);
4f5d7f63 3117static void handle_signal_stop (struct execution_control_state *ecs);
186c406b 3118static void check_exception_resume (struct execution_control_state *,
28106bc2 3119 struct frame_info *);
611c83ae 3120
bdc36728 3121static void end_stepping_range (struct execution_control_state *ecs);
22bcd14b 3122static void stop_waiting (struct execution_control_state *ecs);
d4f3574e 3123static void keep_going (struct execution_control_state *ecs);
94c57d6a 3124static void process_event_stop_test (struct execution_control_state *ecs);
c447ac0b 3125static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
104c1213 3126
252fbfc8
PA
3127/* This function is attached as a "thread_stop_requested" observer.
3128 Cleanup local state that assumed the PTID was to be resumed, and
3129 report the stop to the frontend. */
3130
2c0b251b 3131static void
252fbfc8
PA
3132infrun_thread_stop_requested (ptid_t ptid)
3133{
c65d6b55
PA
3134 /* PTID was requested to stop. If the thread was already stopped,
3135 but the user/frontend doesn't know about that yet (e.g., the
3136 thread had been temporarily paused for some step-over), set up
3137 for reporting the stop now. */
08036331
PA
3138 for (thread_info *tp : all_threads (ptid))
3139 {
3140 if (tp->state != THREAD_RUNNING)
3141 continue;
3142 if (tp->executing)
3143 continue;
c65d6b55 3144
08036331
PA
3145 /* Remove matching threads from the step-over queue, so
3146 start_step_over doesn't try to resume them
3147 automatically. */
3148 if (thread_is_in_step_over_chain (tp))
3149 thread_step_over_chain_remove (tp);
c65d6b55 3150
08036331
PA
3151 /* If the thread is stopped, but the user/frontend doesn't
3152 know about that yet, queue a pending event, as if the
3153 thread had just stopped now. Unless the thread already had
3154 a pending event. */
3155 if (!tp->suspend.waitstatus_pending_p)
3156 {
3157 tp->suspend.waitstatus_pending_p = 1;
3158 tp->suspend.waitstatus.kind = TARGET_WAITKIND_STOPPED;
3159 tp->suspend.waitstatus.value.sig = GDB_SIGNAL_0;
3160 }
c65d6b55 3161
08036331
PA
3162 /* Clear the inline-frame state, since we're re-processing the
3163 stop. */
3164 clear_inline_frame_state (tp->ptid);
c65d6b55 3165
08036331
PA
3166 /* If this thread was paused because some other thread was
3167 doing an inline-step over, let that finish first. Once
3168 that happens, we'll restart all threads and consume pending
3169 stop events then. */
3170 if (step_over_info_valid_p ())
3171 continue;
3172
3173 /* Otherwise we can process the (new) pending event now. Set
3174 it so this pending event is considered by
3175 do_target_wait. */
3176 tp->resumed = 1;
3177 }
252fbfc8
PA
3178}
3179
a07daef3
PA
3180static void
3181infrun_thread_thread_exit (struct thread_info *tp, int silent)
3182{
d7e15655 3183 if (target_last_wait_ptid == tp->ptid)
a07daef3
PA
3184 nullify_last_target_wait_ptid ();
3185}
3186
0cbcdb96
PA
3187/* Delete the step resume, single-step and longjmp/exception resume
3188 breakpoints of TP. */
4e1c45ea 3189
0cbcdb96
PA
3190static void
3191delete_thread_infrun_breakpoints (struct thread_info *tp)
4e1c45ea 3192{
0cbcdb96
PA
3193 delete_step_resume_breakpoint (tp);
3194 delete_exception_resume_breakpoint (tp);
34b7e8a6 3195 delete_single_step_breakpoints (tp);
4e1c45ea
PA
3196}
3197
0cbcdb96
PA
3198/* If the target still has execution, call FUNC for each thread that
3199 just stopped. In all-stop, that's all the non-exited threads; in
3200 non-stop, that's the current thread, only. */
3201
3202typedef void (*for_each_just_stopped_thread_callback_func)
3203 (struct thread_info *tp);
4e1c45ea
PA
3204
3205static void
0cbcdb96 3206for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
4e1c45ea 3207{
d7e15655 3208 if (!target_has_execution || inferior_ptid == null_ptid)
4e1c45ea
PA
3209 return;
3210
fbea99ea 3211 if (target_is_non_stop_p ())
4e1c45ea 3212 {
0cbcdb96
PA
3213 /* If in non-stop mode, only the current thread stopped. */
3214 func (inferior_thread ());
4e1c45ea
PA
3215 }
3216 else
0cbcdb96 3217 {
0cbcdb96 3218 /* In all-stop mode, all threads have stopped. */
08036331
PA
3219 for (thread_info *tp : all_non_exited_threads ())
3220 func (tp);
0cbcdb96
PA
3221 }
3222}
3223
3224/* Delete the step resume and longjmp/exception resume breakpoints of
3225 the threads that just stopped. */
3226
3227static void
3228delete_just_stopped_threads_infrun_breakpoints (void)
3229{
3230 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
34b7e8a6
PA
3231}
3232
3233/* Delete the single-step breakpoints of the threads that just
3234 stopped. */
7c16b83e 3235
34b7e8a6
PA
3236static void
3237delete_just_stopped_threads_single_step_breakpoints (void)
3238{
3239 for_each_just_stopped_thread (delete_single_step_breakpoints);
4e1c45ea
PA
3240}
3241
221e1a37 3242/* See infrun.h. */
223698f8 3243
221e1a37 3244void
223698f8
DE
3245print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3246 const struct target_waitstatus *ws)
3247{
23fdd69e 3248 std::string status_string = target_waitstatus_to_string (ws);
d7e74731 3249 string_file stb;
223698f8
DE
3250
3251 /* The text is split over several lines because it was getting too long.
3252 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3253 output as a unit; we want only one timestamp printed if debug_timestamp
3254 is set. */
3255
d7e74731 3256 stb.printf ("infrun: target_wait (%d.%ld.%ld",
e99b03dc 3257 waiton_ptid.pid (),
e38504b3 3258 waiton_ptid.lwp (),
cc6bcb54 3259 waiton_ptid.tid ());
e99b03dc 3260 if (waiton_ptid.pid () != -1)
a068643d 3261 stb.printf (" [%s]", target_pid_to_str (waiton_ptid).c_str ());
d7e74731
PA
3262 stb.printf (", status) =\n");
3263 stb.printf ("infrun: %d.%ld.%ld [%s],\n",
e99b03dc 3264 result_ptid.pid (),
e38504b3 3265 result_ptid.lwp (),
cc6bcb54 3266 result_ptid.tid (),
a068643d 3267 target_pid_to_str (result_ptid).c_str ());
23fdd69e 3268 stb.printf ("infrun: %s\n", status_string.c_str ());
223698f8
DE
3269
3270 /* This uses %s in part to handle %'s in the text, but also to avoid
3271 a gcc error: the format attribute requires a string literal. */
d7e74731 3272 fprintf_unfiltered (gdb_stdlog, "%s", stb.c_str ());
223698f8
DE
3273}
3274
372316f1
PA
3275/* Select a thread at random, out of those which are resumed and have
3276 had events. */
3277
3278static struct thread_info *
3279random_pending_event_thread (ptid_t waiton_ptid)
3280{
372316f1 3281 int num_events = 0;
08036331
PA
3282
3283 auto has_event = [] (thread_info *tp)
3284 {
3285 return (tp->resumed
3286 && tp->suspend.waitstatus_pending_p);
3287 };
372316f1
PA
3288
3289 /* First see how many events we have. Count only resumed threads
3290 that have an event pending. */
08036331
PA
3291 for (thread_info *tp : all_non_exited_threads (waiton_ptid))
3292 if (has_event (tp))
372316f1
PA
3293 num_events++;
3294
3295 if (num_events == 0)
3296 return NULL;
3297
3298 /* Now randomly pick a thread out of those that have had events. */
08036331
PA
3299 int random_selector = (int) ((num_events * (double) rand ())
3300 / (RAND_MAX + 1.0));
372316f1
PA
3301
3302 if (debug_infrun && num_events > 1)
3303 fprintf_unfiltered (gdb_stdlog,
3304 "infrun: Found %d events, selecting #%d\n",
3305 num_events, random_selector);
3306
3307 /* Select the Nth thread that has had an event. */
08036331
PA
3308 for (thread_info *tp : all_non_exited_threads (waiton_ptid))
3309 if (has_event (tp))
372316f1 3310 if (random_selector-- == 0)
08036331 3311 return tp;
372316f1 3312
08036331 3313 gdb_assert_not_reached ("event thread not found");
372316f1
PA
3314}
3315
3316/* Wrapper for target_wait that first checks whether threads have
3317 pending statuses to report before actually asking the target for
3318 more events. */
3319
3320static ptid_t
3321do_target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
3322{
3323 ptid_t event_ptid;
3324 struct thread_info *tp;
3325
3326 /* First check if there is a resumed thread with a wait status
3327 pending. */
d7e15655 3328 if (ptid == minus_one_ptid || ptid.is_pid ())
372316f1
PA
3329 {
3330 tp = random_pending_event_thread (ptid);
3331 }
3332 else
3333 {
3334 if (debug_infrun)
3335 fprintf_unfiltered (gdb_stdlog,
3336 "infrun: Waiting for specific thread %s.\n",
a068643d 3337 target_pid_to_str (ptid).c_str ());
372316f1
PA
3338
3339 /* We have a specific thread to check. */
3340 tp = find_thread_ptid (ptid);
3341 gdb_assert (tp != NULL);
3342 if (!tp->suspend.waitstatus_pending_p)
3343 tp = NULL;
3344 }
3345
3346 if (tp != NULL
3347 && (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3348 || tp->suspend.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
3349 {
00431a78 3350 struct regcache *regcache = get_thread_regcache (tp);
ac7936df 3351 struct gdbarch *gdbarch = regcache->arch ();
372316f1
PA
3352 CORE_ADDR pc;
3353 int discard = 0;
3354
3355 pc = regcache_read_pc (regcache);
3356
3357 if (pc != tp->suspend.stop_pc)
3358 {
3359 if (debug_infrun)
3360 fprintf_unfiltered (gdb_stdlog,
3361 "infrun: PC of %s changed. was=%s, now=%s\n",
a068643d 3362 target_pid_to_str (tp->ptid).c_str (),
defd2172 3363 paddress (gdbarch, tp->suspend.stop_pc),
372316f1
PA
3364 paddress (gdbarch, pc));
3365 discard = 1;
3366 }
a01bda52 3367 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
372316f1
PA
3368 {
3369 if (debug_infrun)
3370 fprintf_unfiltered (gdb_stdlog,
3371 "infrun: previous breakpoint of %s, at %s gone\n",
a068643d 3372 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
3373 paddress (gdbarch, pc));
3374
3375 discard = 1;
3376 }
3377
3378 if (discard)
3379 {
3380 if (debug_infrun)
3381 fprintf_unfiltered (gdb_stdlog,
3382 "infrun: pending event of %s cancelled.\n",
a068643d 3383 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3384
3385 tp->suspend.waitstatus.kind = TARGET_WAITKIND_SPURIOUS;
3386 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3387 }
3388 }
3389
3390 if (tp != NULL)
3391 {
3392 if (debug_infrun)
3393 {
23fdd69e
SM
3394 std::string statstr
3395 = target_waitstatus_to_string (&tp->suspend.waitstatus);
372316f1 3396
372316f1
PA
3397 fprintf_unfiltered (gdb_stdlog,
3398 "infrun: Using pending wait status %s for %s.\n",
23fdd69e 3399 statstr.c_str (),
a068643d 3400 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3401 }
3402
3403 /* Now that we've selected our final event LWP, un-adjust its PC
3404 if it was a software breakpoint (and the target doesn't
3405 always adjust the PC itself). */
3406 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3407 && !target_supports_stopped_by_sw_breakpoint ())
3408 {
3409 struct regcache *regcache;
3410 struct gdbarch *gdbarch;
3411 int decr_pc;
3412
00431a78 3413 regcache = get_thread_regcache (tp);
ac7936df 3414 gdbarch = regcache->arch ();
372316f1
PA
3415
3416 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3417 if (decr_pc != 0)
3418 {
3419 CORE_ADDR pc;
3420
3421 pc = regcache_read_pc (regcache);
3422 regcache_write_pc (regcache, pc + decr_pc);
3423 }
3424 }
3425
3426 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3427 *status = tp->suspend.waitstatus;
3428 tp->suspend.waitstatus_pending_p = 0;
3429
3430 /* Wake up the event loop again, until all pending events are
3431 processed. */
3432 if (target_is_async_p ())
3433 mark_async_event_handler (infrun_async_inferior_event_token);
3434 return tp->ptid;
3435 }
3436
3437 /* But if we don't find one, we'll have to wait. */
3438
3439 if (deprecated_target_wait_hook)
3440 event_ptid = deprecated_target_wait_hook (ptid, status, options);
3441 else
3442 event_ptid = target_wait (ptid, status, options);
3443
3444 return event_ptid;
3445}
3446
24291992
PA
3447/* Prepare and stabilize the inferior for detaching it. E.g.,
3448 detaching while a thread is displaced stepping is a recipe for
3449 crashing it, as nothing would readjust the PC out of the scratch
3450 pad. */
3451
3452void
3453prepare_for_detach (void)
3454{
3455 struct inferior *inf = current_inferior ();
f2907e49 3456 ptid_t pid_ptid = ptid_t (inf->pid);
24291992 3457
00431a78 3458 displaced_step_inferior_state *displaced = get_displaced_stepping_state (inf);
24291992
PA
3459
3460 /* Is any thread of this process displaced stepping? If not,
3461 there's nothing else to do. */
d20172fc 3462 if (displaced->step_thread == nullptr)
24291992
PA
3463 return;
3464
3465 if (debug_infrun)
3466 fprintf_unfiltered (gdb_stdlog,
3467 "displaced-stepping in-process while detaching");
3468
9bcb1f16 3469 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
24291992 3470
00431a78 3471 while (displaced->step_thread != nullptr)
24291992 3472 {
24291992
PA
3473 struct execution_control_state ecss;
3474 struct execution_control_state *ecs;
3475
3476 ecs = &ecss;
3477 memset (ecs, 0, sizeof (*ecs));
3478
3479 overlay_cache_invalid = 1;
f15cb84a
YQ
3480 /* Flush target cache before starting to handle each event.
3481 Target was running and cache could be stale. This is just a
3482 heuristic. Running threads may modify target memory, but we
3483 don't get any event. */
3484 target_dcache_invalidate ();
24291992 3485
372316f1 3486 ecs->ptid = do_target_wait (pid_ptid, &ecs->ws, 0);
24291992
PA
3487
3488 if (debug_infrun)
3489 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3490
3491 /* If an error happens while handling the event, propagate GDB's
3492 knowledge of the executing state to the frontend/user running
3493 state. */
731f534f 3494 scoped_finish_thread_state finish_state (minus_one_ptid);
24291992
PA
3495
3496 /* Now figure out what to do with the result of the result. */
3497 handle_inferior_event (ecs);
3498
3499 /* No error, don't finish the state yet. */
731f534f 3500 finish_state.release ();
24291992
PA
3501
3502 /* Breakpoints and watchpoints are not installed on the target
3503 at this point, and signals are passed directly to the
3504 inferior, so this must mean the process is gone. */
3505 if (!ecs->wait_some_more)
3506 {
9bcb1f16 3507 restore_detaching.release ();
24291992
PA
3508 error (_("Program exited while detaching"));
3509 }
3510 }
3511
9bcb1f16 3512 restore_detaching.release ();
24291992
PA
3513}
3514
cd0fc7c3 3515/* Wait for control to return from inferior to debugger.
ae123ec6 3516
cd0fc7c3
SS
3517 If inferior gets a signal, we may decide to start it up again
3518 instead of returning. That is why there is a loop in this function.
3519 When this function actually returns it means the inferior
3520 should be left stopped and GDB should read more commands. */
3521
3522void
e4c8541f 3523wait_for_inferior (void)
cd0fc7c3 3524{
527159b7 3525 if (debug_infrun)
ae123ec6 3526 fprintf_unfiltered
e4c8541f 3527 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
527159b7 3528
4c41382a 3529 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
cd0fc7c3 3530
e6f5c25b
PA
3531 /* If an error happens while handling the event, propagate GDB's
3532 knowledge of the executing state to the frontend/user running
3533 state. */
731f534f 3534 scoped_finish_thread_state finish_state (minus_one_ptid);
e6f5c25b 3535
c906108c
SS
3536 while (1)
3537 {
ae25568b
PA
3538 struct execution_control_state ecss;
3539 struct execution_control_state *ecs = &ecss;
963f9c80 3540 ptid_t waiton_ptid = minus_one_ptid;
29f49a6a 3541
ae25568b
PA
3542 memset (ecs, 0, sizeof (*ecs));
3543
ec9499be 3544 overlay_cache_invalid = 1;
ec9499be 3545
f15cb84a
YQ
3546 /* Flush target cache before starting to handle each event.
3547 Target was running and cache could be stale. This is just a
3548 heuristic. Running threads may modify target memory, but we
3549 don't get any event. */
3550 target_dcache_invalidate ();
3551
372316f1 3552 ecs->ptid = do_target_wait (waiton_ptid, &ecs->ws, 0);
c906108c 3553
f00150c9 3554 if (debug_infrun)
223698f8 3555 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
f00150c9 3556
cd0fc7c3
SS
3557 /* Now figure out what to do with the result of the result. */
3558 handle_inferior_event (ecs);
c906108c 3559
cd0fc7c3
SS
3560 if (!ecs->wait_some_more)
3561 break;
3562 }
4e1c45ea 3563
e6f5c25b 3564 /* No error, don't finish the state yet. */
731f534f 3565 finish_state.release ();
cd0fc7c3 3566}
c906108c 3567
d3d4baed
PA
3568/* Cleanup that reinstalls the readline callback handler, if the
3569 target is running in the background. If while handling the target
3570 event something triggered a secondary prompt, like e.g., a
3571 pagination prompt, we'll have removed the callback handler (see
3572 gdb_readline_wrapper_line). Need to do this as we go back to the
3573 event loop, ready to process further input. Note this has no
3574 effect if the handler hasn't actually been removed, because calling
3575 rl_callback_handler_install resets the line buffer, thus losing
3576 input. */
3577
3578static void
d238133d 3579reinstall_readline_callback_handler_cleanup ()
d3d4baed 3580{
3b12939d
PA
3581 struct ui *ui = current_ui;
3582
3583 if (!ui->async)
6c400b59
PA
3584 {
3585 /* We're not going back to the top level event loop yet. Don't
3586 install the readline callback, as it'd prep the terminal,
3587 readline-style (raw, noecho) (e.g., --batch). We'll install
3588 it the next time the prompt is displayed, when we're ready
3589 for input. */
3590 return;
3591 }
3592
3b12939d 3593 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
d3d4baed
PA
3594 gdb_rl_callback_handler_reinstall ();
3595}
3596
243a9253
PA
3597/* Clean up the FSMs of threads that are now stopped. In non-stop,
3598 that's just the event thread. In all-stop, that's all threads. */
3599
3600static void
3601clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
3602{
08036331
PA
3603 if (ecs->event_thread != NULL
3604 && ecs->event_thread->thread_fsm != NULL)
46e3ed7f 3605 ecs->event_thread->thread_fsm->clean_up (ecs->event_thread);
243a9253
PA
3606
3607 if (!non_stop)
3608 {
08036331 3609 for (thread_info *thr : all_non_exited_threads ())
243a9253
PA
3610 {
3611 if (thr->thread_fsm == NULL)
3612 continue;
3613 if (thr == ecs->event_thread)
3614 continue;
3615
00431a78 3616 switch_to_thread (thr);
46e3ed7f 3617 thr->thread_fsm->clean_up (thr);
243a9253
PA
3618 }
3619
3620 if (ecs->event_thread != NULL)
00431a78 3621 switch_to_thread (ecs->event_thread);
243a9253
PA
3622 }
3623}
3624
3b12939d
PA
3625/* Helper for all_uis_check_sync_execution_done that works on the
3626 current UI. */
3627
3628static void
3629check_curr_ui_sync_execution_done (void)
3630{
3631 struct ui *ui = current_ui;
3632
3633 if (ui->prompt_state == PROMPT_NEEDED
3634 && ui->async
3635 && !gdb_in_secondary_prompt_p (ui))
3636 {
223ffa71 3637 target_terminal::ours ();
76727919 3638 gdb::observers::sync_execution_done.notify ();
3eb7562a 3639 ui_register_input_event_handler (ui);
3b12939d
PA
3640 }
3641}
3642
3643/* See infrun.h. */
3644
3645void
3646all_uis_check_sync_execution_done (void)
3647{
0e454242 3648 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
3649 {
3650 check_curr_ui_sync_execution_done ();
3651 }
3652}
3653
a8836c93
PA
3654/* See infrun.h. */
3655
3656void
3657all_uis_on_sync_execution_starting (void)
3658{
0e454242 3659 SWITCH_THRU_ALL_UIS ()
a8836c93
PA
3660 {
3661 if (current_ui->prompt_state == PROMPT_NEEDED)
3662 async_disable_stdin ();
3663 }
3664}
3665
1777feb0 3666/* Asynchronous version of wait_for_inferior. It is called by the
43ff13b4 3667 event loop whenever a change of state is detected on the file
1777feb0
MS
3668 descriptor corresponding to the target. It can be called more than
3669 once to complete a single execution command. In such cases we need
3670 to keep the state in a global variable ECSS. If it is the last time
a474d7c2
PA
3671 that this function is called for a single execution command, then
3672 report to the user that the inferior has stopped, and do the
1777feb0 3673 necessary cleanups. */
43ff13b4
JM
3674
3675void
fba45db2 3676fetch_inferior_event (void *client_data)
43ff13b4 3677{
0d1e5fa7 3678 struct execution_control_state ecss;
a474d7c2 3679 struct execution_control_state *ecs = &ecss;
0f641c01 3680 int cmd_done = 0;
963f9c80 3681 ptid_t waiton_ptid = minus_one_ptid;
43ff13b4 3682
0d1e5fa7
PA
3683 memset (ecs, 0, sizeof (*ecs));
3684
c61db772
PA
3685 /* Events are always processed with the main UI as current UI. This
3686 way, warnings, debug output, etc. are always consistently sent to
3687 the main console. */
4b6749b9 3688 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
c61db772 3689
d3d4baed 3690 /* End up with readline processing input, if necessary. */
d238133d
TT
3691 {
3692 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
3693
3694 /* We're handling a live event, so make sure we're doing live
3695 debugging. If we're looking at traceframes while the target is
3696 running, we're going to need to get back to that mode after
3697 handling the event. */
3698 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
3699 if (non_stop)
3700 {
3701 maybe_restore_traceframe.emplace ();
3702 set_current_traceframe (-1);
3703 }
43ff13b4 3704
873657b9
PA
3705 /* The user/frontend should not notice a thread switch due to
3706 internal events. Make sure we revert to the user selected
3707 thread and frame after handling the event and running any
3708 breakpoint commands. */
3709 scoped_restore_current_thread restore_thread;
d238133d
TT
3710
3711 overlay_cache_invalid = 1;
3712 /* Flush target cache before starting to handle each event. Target
3713 was running and cache could be stale. This is just a heuristic.
3714 Running threads may modify target memory, but we don't get any
3715 event. */
3716 target_dcache_invalidate ();
3717
3718 scoped_restore save_exec_dir
3719 = make_scoped_restore (&execution_direction,
3720 target_execution_direction ());
3721
3722 ecs->ptid = do_target_wait (waiton_ptid, &ecs->ws,
3723 target_can_async_p () ? TARGET_WNOHANG : 0);
3724
3725 if (debug_infrun)
3726 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3727
3728 /* If an error happens while handling the event, propagate GDB's
3729 knowledge of the executing state to the frontend/user running
3730 state. */
3731 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
3732 scoped_finish_thread_state finish_state (finish_ptid);
3733
979a0d13 3734 /* Get executed before scoped_restore_current_thread above to apply
d238133d
TT
3735 still for the thread which has thrown the exception. */
3736 auto defer_bpstat_clear
3737 = make_scope_exit (bpstat_clear_actions);
3738 auto defer_delete_threads
3739 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
3740
3741 /* Now figure out what to do with the result of the result. */
3742 handle_inferior_event (ecs);
3743
3744 if (!ecs->wait_some_more)
3745 {
3746 struct inferior *inf = find_inferior_ptid (ecs->ptid);
3747 int should_stop = 1;
3748 struct thread_info *thr = ecs->event_thread;
d6b48e9c 3749
d238133d 3750 delete_just_stopped_threads_infrun_breakpoints ();
f107f563 3751
d238133d
TT
3752 if (thr != NULL)
3753 {
3754 struct thread_fsm *thread_fsm = thr->thread_fsm;
243a9253 3755
d238133d 3756 if (thread_fsm != NULL)
46e3ed7f 3757 should_stop = thread_fsm->should_stop (thr);
d238133d 3758 }
243a9253 3759
d238133d
TT
3760 if (!should_stop)
3761 {
3762 keep_going (ecs);
3763 }
3764 else
3765 {
46e3ed7f 3766 bool should_notify_stop = true;
d238133d 3767 int proceeded = 0;
1840d81a 3768
d238133d 3769 clean_up_just_stopped_threads_fsms (ecs);
243a9253 3770
d238133d 3771 if (thr != NULL && thr->thread_fsm != NULL)
46e3ed7f 3772 should_notify_stop = thr->thread_fsm->should_notify_stop ();
388a7084 3773
d238133d
TT
3774 if (should_notify_stop)
3775 {
3776 /* We may not find an inferior if this was a process exit. */
3777 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3778 proceeded = normal_stop ();
3779 }
243a9253 3780
d238133d
TT
3781 if (!proceeded)
3782 {
3783 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
3784 cmd_done = 1;
3785 }
873657b9
PA
3786
3787 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
3788 previously selected thread is gone. We have two
3789 choices - switch to no thread selected, or restore the
3790 previously selected thread (now exited). We chose the
3791 later, just because that's what GDB used to do. After
3792 this, "info threads" says "The current thread <Thread
3793 ID 2> has terminated." instead of "No thread
3794 selected.". */
3795 if (!non_stop
3796 && cmd_done
3797 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
3798 restore_thread.dont_restore ();
d238133d
TT
3799 }
3800 }
4f8d22e3 3801
d238133d
TT
3802 defer_delete_threads.release ();
3803 defer_bpstat_clear.release ();
29f49a6a 3804
d238133d
TT
3805 /* No error, don't finish the thread states yet. */
3806 finish_state.release ();
731f534f 3807
d238133d
TT
3808 /* This scope is used to ensure that readline callbacks are
3809 reinstalled here. */
3810 }
4f8d22e3 3811
3b12939d
PA
3812 /* If a UI was in sync execution mode, and now isn't, restore its
3813 prompt (a synchronous execution command has finished, and we're
3814 ready for input). */
3815 all_uis_check_sync_execution_done ();
0f641c01
PA
3816
3817 if (cmd_done
0f641c01 3818 && exec_done_display_p
00431a78
PA
3819 && (inferior_ptid == null_ptid
3820 || inferior_thread ()->state != THREAD_RUNNING))
0f641c01 3821 printf_unfiltered (_("completed.\n"));
43ff13b4
JM
3822}
3823
edb3359d
DJ
3824/* Record the frame and location we're currently stepping through. */
3825void
3826set_step_info (struct frame_info *frame, struct symtab_and_line sal)
3827{
3828 struct thread_info *tp = inferior_thread ();
3829
16c381f0
JK
3830 tp->control.step_frame_id = get_frame_id (frame);
3831 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
edb3359d
DJ
3832
3833 tp->current_symtab = sal.symtab;
3834 tp->current_line = sal.line;
3835}
3836
0d1e5fa7
PA
3837/* Clear context switchable stepping state. */
3838
3839void
4e1c45ea 3840init_thread_stepping_state (struct thread_info *tss)
0d1e5fa7 3841{
7f5ef605 3842 tss->stepped_breakpoint = 0;
0d1e5fa7 3843 tss->stepping_over_breakpoint = 0;
963f9c80 3844 tss->stepping_over_watchpoint = 0;
0d1e5fa7 3845 tss->step_after_step_resume_breakpoint = 0;
cd0fc7c3
SS
3846}
3847
ab1ddbcf 3848/* See infrun.h. */
c32c64b7 3849
6efcd9a8 3850void
c32c64b7
DE
3851set_last_target_status (ptid_t ptid, struct target_waitstatus status)
3852{
3853 target_last_wait_ptid = ptid;
3854 target_last_waitstatus = status;
3855}
3856
ab1ddbcf 3857/* See infrun.h. */
e02bc4cc
DS
3858
3859void
ab1ddbcf 3860get_last_target_status (ptid_t *ptid, struct target_waitstatus *status)
e02bc4cc 3861{
ab1ddbcf
PA
3862 if (ptid != nullptr)
3863 *ptid = target_last_wait_ptid;
3864 if (status != nullptr)
3865 *status = target_last_waitstatus;
e02bc4cc
DS
3866}
3867
ab1ddbcf
PA
3868/* See infrun.h. */
3869
ac264b3b
MS
3870void
3871nullify_last_target_wait_ptid (void)
3872{
3873 target_last_wait_ptid = minus_one_ptid;
ab1ddbcf 3874 target_last_waitstatus = {};
ac264b3b
MS
3875}
3876
dcf4fbde 3877/* Switch thread contexts. */
dd80620e
MS
3878
3879static void
00431a78 3880context_switch (execution_control_state *ecs)
dd80620e 3881{
00431a78
PA
3882 if (debug_infrun
3883 && ecs->ptid != inferior_ptid
3884 && ecs->event_thread != inferior_thread ())
fd48f117
DJ
3885 {
3886 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
a068643d 3887 target_pid_to_str (inferior_ptid).c_str ());
fd48f117 3888 fprintf_unfiltered (gdb_stdlog, "to %s\n",
a068643d 3889 target_pid_to_str (ecs->ptid).c_str ());
fd48f117
DJ
3890 }
3891
00431a78 3892 switch_to_thread (ecs->event_thread);
dd80620e
MS
3893}
3894
d8dd4d5f
PA
3895/* If the target can't tell whether we've hit breakpoints
3896 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
3897 check whether that could have been caused by a breakpoint. If so,
3898 adjust the PC, per gdbarch_decr_pc_after_break. */
3899
4fa8626c 3900static void
d8dd4d5f
PA
3901adjust_pc_after_break (struct thread_info *thread,
3902 struct target_waitstatus *ws)
4fa8626c 3903{
24a73cce
UW
3904 struct regcache *regcache;
3905 struct gdbarch *gdbarch;
118e6252 3906 CORE_ADDR breakpoint_pc, decr_pc;
4fa8626c 3907
4fa8626c
DJ
3908 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3909 we aren't, just return.
9709f61c
DJ
3910
3911 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
b798847d
UW
3912 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3913 implemented by software breakpoints should be handled through the normal
3914 breakpoint layer.
8fb3e588 3915
4fa8626c
DJ
3916 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3917 different signals (SIGILL or SIGEMT for instance), but it is less
3918 clear where the PC is pointing afterwards. It may not match
b798847d
UW
3919 gdbarch_decr_pc_after_break. I don't know any specific target that
3920 generates these signals at breakpoints (the code has been in GDB since at
3921 least 1992) so I can not guess how to handle them here.
8fb3e588 3922
e6cf7916
UW
3923 In earlier versions of GDB, a target with
3924 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
b798847d
UW
3925 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3926 target with both of these set in GDB history, and it seems unlikely to be
3927 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4fa8626c 3928
d8dd4d5f 3929 if (ws->kind != TARGET_WAITKIND_STOPPED)
4fa8626c
DJ
3930 return;
3931
d8dd4d5f 3932 if (ws->value.sig != GDB_SIGNAL_TRAP)
4fa8626c
DJ
3933 return;
3934
4058b839
PA
3935 /* In reverse execution, when a breakpoint is hit, the instruction
3936 under it has already been de-executed. The reported PC always
3937 points at the breakpoint address, so adjusting it further would
3938 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3939 architecture:
3940
3941 B1 0x08000000 : INSN1
3942 B2 0x08000001 : INSN2
3943 0x08000002 : INSN3
3944 PC -> 0x08000003 : INSN4
3945
3946 Say you're stopped at 0x08000003 as above. Reverse continuing
3947 from that point should hit B2 as below. Reading the PC when the
3948 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3949 been de-executed already.
3950
3951 B1 0x08000000 : INSN1
3952 B2 PC -> 0x08000001 : INSN2
3953 0x08000002 : INSN3
3954 0x08000003 : INSN4
3955
3956 We can't apply the same logic as for forward execution, because
3957 we would wrongly adjust the PC to 0x08000000, since there's a
3958 breakpoint at PC - 1. We'd then report a hit on B1, although
3959 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3960 behaviour. */
3961 if (execution_direction == EXEC_REVERSE)
3962 return;
3963
1cf4d951
PA
3964 /* If the target can tell whether the thread hit a SW breakpoint,
3965 trust it. Targets that can tell also adjust the PC
3966 themselves. */
3967 if (target_supports_stopped_by_sw_breakpoint ())
3968 return;
3969
3970 /* Note that relying on whether a breakpoint is planted in memory to
3971 determine this can fail. E.g,. the breakpoint could have been
3972 removed since. Or the thread could have been told to step an
3973 instruction the size of a breakpoint instruction, and only
3974 _after_ was a breakpoint inserted at its address. */
3975
24a73cce
UW
3976 /* If this target does not decrement the PC after breakpoints, then
3977 we have nothing to do. */
00431a78 3978 regcache = get_thread_regcache (thread);
ac7936df 3979 gdbarch = regcache->arch ();
118e6252 3980
527a273a 3981 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
118e6252 3982 if (decr_pc == 0)
24a73cce
UW
3983 return;
3984
8b86c959 3985 const address_space *aspace = regcache->aspace ();
6c95b8df 3986
8aad930b
AC
3987 /* Find the location where (if we've hit a breakpoint) the
3988 breakpoint would be. */
118e6252 3989 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
8aad930b 3990
1cf4d951
PA
3991 /* If the target can't tell whether a software breakpoint triggered,
3992 fallback to figuring it out based on breakpoints we think were
3993 inserted in the target, and on whether the thread was stepped or
3994 continued. */
3995
1c5cfe86
PA
3996 /* Check whether there actually is a software breakpoint inserted at
3997 that location.
3998
3999 If in non-stop mode, a race condition is possible where we've
4000 removed a breakpoint, but stop events for that breakpoint were
4001 already queued and arrive later. To suppress those spurious
4002 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
1cf4d951
PA
4003 and retire them after a number of stop events are reported. Note
4004 this is an heuristic and can thus get confused. The real fix is
4005 to get the "stopped by SW BP and needs adjustment" info out of
4006 the target/kernel (and thus never reach here; see above). */
6c95b8df 4007 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
fbea99ea
PA
4008 || (target_is_non_stop_p ()
4009 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
8aad930b 4010 {
07036511 4011 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
abbb1732 4012
8213266a 4013 if (record_full_is_used ())
07036511
TT
4014 restore_operation_disable.emplace
4015 (record_full_gdb_operation_disable_set ());
96429cc8 4016
1c0fdd0e
UW
4017 /* When using hardware single-step, a SIGTRAP is reported for both
4018 a completed single-step and a software breakpoint. Need to
4019 differentiate between the two, as the latter needs adjusting
4020 but the former does not.
4021
4022 The SIGTRAP can be due to a completed hardware single-step only if
4023 - we didn't insert software single-step breakpoints
1c0fdd0e
UW
4024 - this thread is currently being stepped
4025
4026 If any of these events did not occur, we must have stopped due
4027 to hitting a software breakpoint, and have to back up to the
4028 breakpoint address.
4029
4030 As a special case, we could have hardware single-stepped a
4031 software breakpoint. In this case (prev_pc == breakpoint_pc),
4032 we also need to back up to the breakpoint address. */
4033
d8dd4d5f
PA
4034 if (thread_has_single_step_breakpoints_set (thread)
4035 || !currently_stepping (thread)
4036 || (thread->stepped_breakpoint
4037 && thread->prev_pc == breakpoint_pc))
515630c5 4038 regcache_write_pc (regcache, breakpoint_pc);
8aad930b 4039 }
4fa8626c
DJ
4040}
4041
edb3359d
DJ
4042static int
4043stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4044{
4045 for (frame = get_prev_frame (frame);
4046 frame != NULL;
4047 frame = get_prev_frame (frame))
4048 {
4049 if (frame_id_eq (get_frame_id (frame), step_frame_id))
4050 return 1;
4051 if (get_frame_type (frame) != INLINE_FRAME)
4052 break;
4053 }
4054
4055 return 0;
4056}
4057
4a4c04f1
BE
4058/* Look for an inline frame that is marked for skip.
4059 If PREV_FRAME is TRUE start at the previous frame,
4060 otherwise start at the current frame. Stop at the
4061 first non-inline frame, or at the frame where the
4062 step started. */
4063
4064static bool
4065inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
4066{
4067 struct frame_info *frame = get_current_frame ();
4068
4069 if (prev_frame)
4070 frame = get_prev_frame (frame);
4071
4072 for (; frame != NULL; frame = get_prev_frame (frame))
4073 {
4074 const char *fn = NULL;
4075 symtab_and_line sal;
4076 struct symbol *sym;
4077
4078 if (frame_id_eq (get_frame_id (frame), tp->control.step_frame_id))
4079 break;
4080 if (get_frame_type (frame) != INLINE_FRAME)
4081 break;
4082
4083 sal = find_frame_sal (frame);
4084 sym = get_frame_function (frame);
4085
4086 if (sym != NULL)
4087 fn = sym->print_name ();
4088
4089 if (sal.line != 0
4090 && function_name_is_marked_for_skip (fn, sal))
4091 return true;
4092 }
4093
4094 return false;
4095}
4096
c65d6b55
PA
4097/* If the event thread has the stop requested flag set, pretend it
4098 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4099 target_stop). */
4100
4101static bool
4102handle_stop_requested (struct execution_control_state *ecs)
4103{
4104 if (ecs->event_thread->stop_requested)
4105 {
4106 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
4107 ecs->ws.value.sig = GDB_SIGNAL_0;
4108 handle_signal_stop (ecs);
4109 return true;
4110 }
4111 return false;
4112}
4113
a96d9b2e
SDJ
4114/* Auxiliary function that handles syscall entry/return events.
4115 It returns 1 if the inferior should keep going (and GDB
4116 should ignore the event), or 0 if the event deserves to be
4117 processed. */
ca2163eb 4118
a96d9b2e 4119static int
ca2163eb 4120handle_syscall_event (struct execution_control_state *ecs)
a96d9b2e 4121{
ca2163eb 4122 struct regcache *regcache;
ca2163eb
PA
4123 int syscall_number;
4124
00431a78 4125 context_switch (ecs);
ca2163eb 4126
00431a78 4127 regcache = get_thread_regcache (ecs->event_thread);
f90263c1 4128 syscall_number = ecs->ws.value.syscall_number;
f2ffa92b 4129 ecs->event_thread->suspend.stop_pc = regcache_read_pc (regcache);
ca2163eb 4130
a96d9b2e
SDJ
4131 if (catch_syscall_enabled () > 0
4132 && catching_syscall_number (syscall_number) > 0)
4133 {
4134 if (debug_infrun)
4135 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
4136 syscall_number);
a96d9b2e 4137
16c381f0 4138 ecs->event_thread->control.stop_bpstat
a01bda52 4139 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
4140 ecs->event_thread->suspend.stop_pc,
4141 ecs->event_thread, &ecs->ws);
ab04a2af 4142
c65d6b55
PA
4143 if (handle_stop_requested (ecs))
4144 return 0;
4145
ce12b012 4146 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
ca2163eb
PA
4147 {
4148 /* Catchpoint hit. */
ca2163eb
PA
4149 return 0;
4150 }
a96d9b2e 4151 }
ca2163eb 4152
c65d6b55
PA
4153 if (handle_stop_requested (ecs))
4154 return 0;
4155
ca2163eb 4156 /* If no catchpoint triggered for this, then keep going. */
ca2163eb
PA
4157 keep_going (ecs);
4158 return 1;
a96d9b2e
SDJ
4159}
4160
7e324e48
GB
4161/* Lazily fill in the execution_control_state's stop_func_* fields. */
4162
4163static void
4164fill_in_stop_func (struct gdbarch *gdbarch,
4165 struct execution_control_state *ecs)
4166{
4167 if (!ecs->stop_func_filled_in)
4168 {
98a617f8
KB
4169 const block *block;
4170
7e324e48
GB
4171 /* Don't care about return value; stop_func_start and stop_func_name
4172 will both be 0 if it doesn't work. */
98a617f8
KB
4173 find_pc_partial_function (ecs->event_thread->suspend.stop_pc,
4174 &ecs->stop_func_name,
4175 &ecs->stop_func_start,
4176 &ecs->stop_func_end,
4177 &block);
4178
4179 /* The call to find_pc_partial_function, above, will set
4180 stop_func_start and stop_func_end to the start and end
4181 of the range containing the stop pc. If this range
4182 contains the entry pc for the block (which is always the
4183 case for contiguous blocks), advance stop_func_start past
4184 the function's start offset and entrypoint. Note that
4185 stop_func_start is NOT advanced when in a range of a
4186 non-contiguous block that does not contain the entry pc. */
4187 if (block != nullptr
4188 && ecs->stop_func_start <= BLOCK_ENTRY_PC (block)
4189 && BLOCK_ENTRY_PC (block) < ecs->stop_func_end)
4190 {
4191 ecs->stop_func_start
4192 += gdbarch_deprecated_function_start_offset (gdbarch);
4193
4194 if (gdbarch_skip_entrypoint_p (gdbarch))
4195 ecs->stop_func_start
4196 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
4197 }
591a12a1 4198
7e324e48
GB
4199 ecs->stop_func_filled_in = 1;
4200 }
4201}
4202
4f5d7f63 4203
00431a78 4204/* Return the STOP_SOON field of the inferior pointed at by ECS. */
4f5d7f63
PA
4205
4206static enum stop_kind
00431a78 4207get_inferior_stop_soon (execution_control_state *ecs)
4f5d7f63 4208{
00431a78 4209 struct inferior *inf = find_inferior_ptid (ecs->ptid);
4f5d7f63
PA
4210
4211 gdb_assert (inf != NULL);
4212 return inf->control.stop_soon;
4213}
4214
372316f1
PA
4215/* Wait for one event. Store the resulting waitstatus in WS, and
4216 return the event ptid. */
4217
4218static ptid_t
4219wait_one (struct target_waitstatus *ws)
4220{
4221 ptid_t event_ptid;
4222 ptid_t wait_ptid = minus_one_ptid;
4223
4224 overlay_cache_invalid = 1;
4225
4226 /* Flush target cache before starting to handle each event.
4227 Target was running and cache could be stale. This is just a
4228 heuristic. Running threads may modify target memory, but we
4229 don't get any event. */
4230 target_dcache_invalidate ();
4231
4232 if (deprecated_target_wait_hook)
4233 event_ptid = deprecated_target_wait_hook (wait_ptid, ws, 0);
4234 else
4235 event_ptid = target_wait (wait_ptid, ws, 0);
4236
4237 if (debug_infrun)
4238 print_target_wait_results (wait_ptid, event_ptid, ws);
4239
4240 return event_ptid;
4241}
4242
4243/* Generate a wrapper for target_stopped_by_REASON that works on PTID
4244 instead of the current thread. */
4245#define THREAD_STOPPED_BY(REASON) \
4246static int \
4247thread_stopped_by_ ## REASON (ptid_t ptid) \
4248{ \
2989a365 4249 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid); \
372316f1
PA
4250 inferior_ptid = ptid; \
4251 \
2989a365 4252 return target_stopped_by_ ## REASON (); \
372316f1
PA
4253}
4254
4255/* Generate thread_stopped_by_watchpoint. */
4256THREAD_STOPPED_BY (watchpoint)
4257/* Generate thread_stopped_by_sw_breakpoint. */
4258THREAD_STOPPED_BY (sw_breakpoint)
4259/* Generate thread_stopped_by_hw_breakpoint. */
4260THREAD_STOPPED_BY (hw_breakpoint)
4261
372316f1
PA
4262/* Save the thread's event and stop reason to process it later. */
4263
4264static void
4265save_waitstatus (struct thread_info *tp, struct target_waitstatus *ws)
4266{
372316f1
PA
4267 if (debug_infrun)
4268 {
23fdd69e 4269 std::string statstr = target_waitstatus_to_string (ws);
372316f1 4270
372316f1
PA
4271 fprintf_unfiltered (gdb_stdlog,
4272 "infrun: saving status %s for %d.%ld.%ld\n",
23fdd69e 4273 statstr.c_str (),
e99b03dc 4274 tp->ptid.pid (),
e38504b3 4275 tp->ptid.lwp (),
cc6bcb54 4276 tp->ptid.tid ());
372316f1
PA
4277 }
4278
4279 /* Record for later. */
4280 tp->suspend.waitstatus = *ws;
4281 tp->suspend.waitstatus_pending_p = 1;
4282
00431a78 4283 struct regcache *regcache = get_thread_regcache (tp);
8b86c959 4284 const address_space *aspace = regcache->aspace ();
372316f1
PA
4285
4286 if (ws->kind == TARGET_WAITKIND_STOPPED
4287 && ws->value.sig == GDB_SIGNAL_TRAP)
4288 {
4289 CORE_ADDR pc = regcache_read_pc (regcache);
4290
4291 adjust_pc_after_break (tp, &tp->suspend.waitstatus);
4292
4293 if (thread_stopped_by_watchpoint (tp->ptid))
4294 {
4295 tp->suspend.stop_reason
4296 = TARGET_STOPPED_BY_WATCHPOINT;
4297 }
4298 else if (target_supports_stopped_by_sw_breakpoint ()
4299 && thread_stopped_by_sw_breakpoint (tp->ptid))
4300 {
4301 tp->suspend.stop_reason
4302 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4303 }
4304 else if (target_supports_stopped_by_hw_breakpoint ()
4305 && thread_stopped_by_hw_breakpoint (tp->ptid))
4306 {
4307 tp->suspend.stop_reason
4308 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4309 }
4310 else if (!target_supports_stopped_by_hw_breakpoint ()
4311 && hardware_breakpoint_inserted_here_p (aspace,
4312 pc))
4313 {
4314 tp->suspend.stop_reason
4315 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4316 }
4317 else if (!target_supports_stopped_by_sw_breakpoint ()
4318 && software_breakpoint_inserted_here_p (aspace,
4319 pc))
4320 {
4321 tp->suspend.stop_reason
4322 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4323 }
4324 else if (!thread_has_single_step_breakpoints_set (tp)
4325 && currently_stepping (tp))
4326 {
4327 tp->suspend.stop_reason
4328 = TARGET_STOPPED_BY_SINGLE_STEP;
4329 }
4330 }
4331}
4332
6efcd9a8 4333/* See infrun.h. */
372316f1 4334
6efcd9a8 4335void
372316f1
PA
4336stop_all_threads (void)
4337{
4338 /* We may need multiple passes to discover all threads. */
4339 int pass;
4340 int iterations = 0;
372316f1 4341
fbea99ea 4342 gdb_assert (target_is_non_stop_p ());
372316f1
PA
4343
4344 if (debug_infrun)
4345 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads\n");
4346
00431a78 4347 scoped_restore_current_thread restore_thread;
372316f1 4348
65706a29 4349 target_thread_events (1);
9885e6bb 4350 SCOPE_EXIT { target_thread_events (0); };
65706a29 4351
372316f1
PA
4352 /* Request threads to stop, and then wait for the stops. Because
4353 threads we already know about can spawn more threads while we're
4354 trying to stop them, and we only learn about new threads when we
4355 update the thread list, do this in a loop, and keep iterating
4356 until two passes find no threads that need to be stopped. */
4357 for (pass = 0; pass < 2; pass++, iterations++)
4358 {
4359 if (debug_infrun)
4360 fprintf_unfiltered (gdb_stdlog,
4361 "infrun: stop_all_threads, pass=%d, "
4362 "iterations=%d\n", pass, iterations);
4363 while (1)
4364 {
4365 ptid_t event_ptid;
4366 struct target_waitstatus ws;
4367 int need_wait = 0;
372316f1
PA
4368
4369 update_thread_list ();
4370
4371 /* Go through all threads looking for threads that we need
4372 to tell the target to stop. */
08036331 4373 for (thread_info *t : all_non_exited_threads ())
372316f1
PA
4374 {
4375 if (t->executing)
4376 {
4377 /* If already stopping, don't request a stop again.
4378 We just haven't seen the notification yet. */
4379 if (!t->stop_requested)
4380 {
4381 if (debug_infrun)
4382 fprintf_unfiltered (gdb_stdlog,
4383 "infrun: %s executing, "
4384 "need stop\n",
a068643d 4385 target_pid_to_str (t->ptid).c_str ());
f3f8ece4 4386 switch_to_thread_no_regs (t);
372316f1
PA
4387 target_stop (t->ptid);
4388 t->stop_requested = 1;
4389 }
4390 else
4391 {
4392 if (debug_infrun)
4393 fprintf_unfiltered (gdb_stdlog,
4394 "infrun: %s executing, "
4395 "already stopping\n",
a068643d 4396 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4397 }
4398
4399 if (t->stop_requested)
4400 need_wait = 1;
4401 }
4402 else
4403 {
4404 if (debug_infrun)
4405 fprintf_unfiltered (gdb_stdlog,
4406 "infrun: %s not executing\n",
a068643d 4407 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4408
4409 /* The thread may be not executing, but still be
4410 resumed with a pending status to process. */
4411 t->resumed = 0;
4412 }
4413 }
4414
4415 if (!need_wait)
4416 break;
4417
4418 /* If we find new threads on the second iteration, restart
4419 over. We want to see two iterations in a row with all
4420 threads stopped. */
4421 if (pass > 0)
4422 pass = -1;
4423
4424 event_ptid = wait_one (&ws);
c29705b7 4425 if (debug_infrun)
372316f1 4426 {
c29705b7
PW
4427 fprintf_unfiltered (gdb_stdlog,
4428 "infrun: stop_all_threads %s %s\n",
4429 target_waitstatus_to_string (&ws).c_str (),
4430 target_pid_to_str (event_ptid).c_str ());
372316f1 4431 }
372316f1 4432
c29705b7
PW
4433 if (ws.kind == TARGET_WAITKIND_NO_RESUMED
4434 || ws.kind == TARGET_WAITKIND_THREAD_EXITED
4435 || ws.kind == TARGET_WAITKIND_EXITED
4436 || ws.kind == TARGET_WAITKIND_SIGNALLED)
4437 {
4438 /* All resumed threads exited
4439 or one thread/process exited/signalled. */
372316f1
PA
4440 }
4441 else
4442 {
08036331 4443 thread_info *t = find_thread_ptid (event_ptid);
372316f1
PA
4444 if (t == NULL)
4445 t = add_thread (event_ptid);
4446
4447 t->stop_requested = 0;
4448 t->executing = 0;
4449 t->resumed = 0;
4450 t->control.may_range_step = 0;
4451
6efcd9a8
PA
4452 /* This may be the first time we see the inferior report
4453 a stop. */
08036331 4454 inferior *inf = find_inferior_ptid (event_ptid);
6efcd9a8
PA
4455 if (inf->needs_setup)
4456 {
4457 switch_to_thread_no_regs (t);
4458 setup_inferior (0);
4459 }
4460
372316f1
PA
4461 if (ws.kind == TARGET_WAITKIND_STOPPED
4462 && ws.value.sig == GDB_SIGNAL_0)
4463 {
4464 /* We caught the event that we intended to catch, so
4465 there's no event pending. */
4466 t->suspend.waitstatus.kind = TARGET_WAITKIND_IGNORE;
4467 t->suspend.waitstatus_pending_p = 0;
4468
00431a78 4469 if (displaced_step_fixup (t, GDB_SIGNAL_0) < 0)
372316f1
PA
4470 {
4471 /* Add it back to the step-over queue. */
4472 if (debug_infrun)
4473 {
4474 fprintf_unfiltered (gdb_stdlog,
4475 "infrun: displaced-step of %s "
4476 "canceled: adding back to the "
4477 "step-over queue\n",
a068643d 4478 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4479 }
4480 t->control.trap_expected = 0;
4481 thread_step_over_chain_enqueue (t);
4482 }
4483 }
4484 else
4485 {
4486 enum gdb_signal sig;
4487 struct regcache *regcache;
372316f1
PA
4488
4489 if (debug_infrun)
4490 {
23fdd69e 4491 std::string statstr = target_waitstatus_to_string (&ws);
372316f1 4492
372316f1
PA
4493 fprintf_unfiltered (gdb_stdlog,
4494 "infrun: target_wait %s, saving "
4495 "status for %d.%ld.%ld\n",
23fdd69e 4496 statstr.c_str (),
e99b03dc 4497 t->ptid.pid (),
e38504b3 4498 t->ptid.lwp (),
cc6bcb54 4499 t->ptid.tid ());
372316f1
PA
4500 }
4501
4502 /* Record for later. */
4503 save_waitstatus (t, &ws);
4504
4505 sig = (ws.kind == TARGET_WAITKIND_STOPPED
4506 ? ws.value.sig : GDB_SIGNAL_0);
4507
00431a78 4508 if (displaced_step_fixup (t, sig) < 0)
372316f1
PA
4509 {
4510 /* Add it back to the step-over queue. */
4511 t->control.trap_expected = 0;
4512 thread_step_over_chain_enqueue (t);
4513 }
4514
00431a78 4515 regcache = get_thread_regcache (t);
372316f1
PA
4516 t->suspend.stop_pc = regcache_read_pc (regcache);
4517
4518 if (debug_infrun)
4519 {
4520 fprintf_unfiltered (gdb_stdlog,
4521 "infrun: saved stop_pc=%s for %s "
4522 "(currently_stepping=%d)\n",
4523 paddress (target_gdbarch (),
4524 t->suspend.stop_pc),
a068643d 4525 target_pid_to_str (t->ptid).c_str (),
372316f1
PA
4526 currently_stepping (t));
4527 }
4528 }
4529 }
4530 }
4531 }
4532
372316f1
PA
4533 if (debug_infrun)
4534 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads done\n");
4535}
4536
f4836ba9
PA
4537/* Handle a TARGET_WAITKIND_NO_RESUMED event. */
4538
4539static int
4540handle_no_resumed (struct execution_control_state *ecs)
4541{
3b12939d 4542 if (target_can_async_p ())
f4836ba9 4543 {
3b12939d
PA
4544 struct ui *ui;
4545 int any_sync = 0;
f4836ba9 4546
3b12939d
PA
4547 ALL_UIS (ui)
4548 {
4549 if (ui->prompt_state == PROMPT_BLOCKED)
4550 {
4551 any_sync = 1;
4552 break;
4553 }
4554 }
4555 if (!any_sync)
4556 {
4557 /* There were no unwaited-for children left in the target, but,
4558 we're not synchronously waiting for events either. Just
4559 ignore. */
4560
4561 if (debug_infrun)
4562 fprintf_unfiltered (gdb_stdlog,
4563 "infrun: TARGET_WAITKIND_NO_RESUMED "
4564 "(ignoring: bg)\n");
4565 prepare_to_wait (ecs);
4566 return 1;
4567 }
f4836ba9
PA
4568 }
4569
4570 /* Otherwise, if we were running a synchronous execution command, we
4571 may need to cancel it and give the user back the terminal.
4572
4573 In non-stop mode, the target can't tell whether we've already
4574 consumed previous stop events, so it can end up sending us a
4575 no-resumed event like so:
4576
4577 #0 - thread 1 is left stopped
4578
4579 #1 - thread 2 is resumed and hits breakpoint
4580 -> TARGET_WAITKIND_STOPPED
4581
4582 #2 - thread 3 is resumed and exits
4583 this is the last resumed thread, so
4584 -> TARGET_WAITKIND_NO_RESUMED
4585
4586 #3 - gdb processes stop for thread 2 and decides to re-resume
4587 it.
4588
4589 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
4590 thread 2 is now resumed, so the event should be ignored.
4591
4592 IOW, if the stop for thread 2 doesn't end a foreground command,
4593 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
4594 event. But it could be that the event meant that thread 2 itself
4595 (or whatever other thread was the last resumed thread) exited.
4596
4597 To address this we refresh the thread list and check whether we
4598 have resumed threads _now_. In the example above, this removes
4599 thread 3 from the thread list. If thread 2 was re-resumed, we
4600 ignore this event. If we find no thread resumed, then we cancel
4601 the synchronous command show "no unwaited-for " to the user. */
4602 update_thread_list ();
4603
08036331 4604 for (thread_info *thread : all_non_exited_threads ())
f4836ba9
PA
4605 {
4606 if (thread->executing
4607 || thread->suspend.waitstatus_pending_p)
4608 {
4609 /* There were no unwaited-for children left in the target at
4610 some point, but there are now. Just ignore. */
4611 if (debug_infrun)
4612 fprintf_unfiltered (gdb_stdlog,
4613 "infrun: TARGET_WAITKIND_NO_RESUMED "
4614 "(ignoring: found resumed)\n");
4615 prepare_to_wait (ecs);
4616 return 1;
4617 }
4618 }
4619
4620 /* Note however that we may find no resumed thread because the whole
4621 process exited meanwhile (thus updating the thread list results
4622 in an empty thread list). In this case we know we'll be getting
4623 a process exit event shortly. */
735fc2ca 4624 for (inferior *inf : all_non_exited_inferiors ())
f4836ba9 4625 {
08036331 4626 thread_info *thread = any_live_thread_of_inferior (inf);
f4836ba9
PA
4627 if (thread == NULL)
4628 {
4629 if (debug_infrun)
4630 fprintf_unfiltered (gdb_stdlog,
4631 "infrun: TARGET_WAITKIND_NO_RESUMED "
4632 "(expect process exit)\n");
4633 prepare_to_wait (ecs);
4634 return 1;
4635 }
4636 }
4637
4638 /* Go ahead and report the event. */
4639 return 0;
4640}
4641
05ba8510
PA
4642/* Given an execution control state that has been freshly filled in by
4643 an event from the inferior, figure out what it means and take
4644 appropriate action.
4645
4646 The alternatives are:
4647
22bcd14b 4648 1) stop_waiting and return; to really stop and return to the
05ba8510
PA
4649 debugger.
4650
4651 2) keep_going and return; to wait for the next event (set
4652 ecs->event_thread->stepping_over_breakpoint to 1 to single step
4653 once). */
c906108c 4654
ec9499be 4655static void
595915c1 4656handle_inferior_event (struct execution_control_state *ecs)
cd0fc7c3 4657{
595915c1
TT
4658 /* Make sure that all temporary struct value objects that were
4659 created during the handling of the event get deleted at the
4660 end. */
4661 scoped_value_mark free_values;
4662
d6b48e9c
PA
4663 enum stop_kind stop_soon;
4664
c29705b7
PW
4665 if (debug_infrun)
4666 fprintf_unfiltered (gdb_stdlog, "infrun: handle_inferior_event %s\n",
4667 target_waitstatus_to_string (&ecs->ws).c_str ());
4668
28736962
PA
4669 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
4670 {
4671 /* We had an event in the inferior, but we are not interested in
4672 handling it at this level. The lower layers have already
4673 done what needs to be done, if anything.
4674
4675 One of the possible circumstances for this is when the
4676 inferior produces output for the console. The inferior has
4677 not stopped, and we are ignoring the event. Another possible
4678 circumstance is any event which the lower level knows will be
4679 reported multiple times without an intervening resume. */
28736962
PA
4680 prepare_to_wait (ecs);
4681 return;
4682 }
4683
65706a29
PA
4684 if (ecs->ws.kind == TARGET_WAITKIND_THREAD_EXITED)
4685 {
65706a29
PA
4686 prepare_to_wait (ecs);
4687 return;
4688 }
4689
0e5bf2a8 4690 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
f4836ba9
PA
4691 && handle_no_resumed (ecs))
4692 return;
0e5bf2a8 4693
1777feb0 4694 /* Cache the last pid/waitstatus. */
c32c64b7 4695 set_last_target_status (ecs->ptid, ecs->ws);
e02bc4cc 4696
ca005067 4697 /* Always clear state belonging to the previous time we stopped. */
aa7d318d 4698 stop_stack_dummy = STOP_NONE;
ca005067 4699
0e5bf2a8
PA
4700 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
4701 {
4702 /* No unwaited-for children left. IOW, all resumed children
4703 have exited. */
0e5bf2a8 4704 stop_print_frame = 0;
22bcd14b 4705 stop_waiting (ecs);
0e5bf2a8
PA
4706 return;
4707 }
4708
8c90c137 4709 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
64776a0b 4710 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
359f5fe6
PA
4711 {
4712 ecs->event_thread = find_thread_ptid (ecs->ptid);
4713 /* If it's a new thread, add it to the thread database. */
4714 if (ecs->event_thread == NULL)
4715 ecs->event_thread = add_thread (ecs->ptid);
c1e36e3e
PA
4716
4717 /* Disable range stepping. If the next step request could use a
4718 range, this will be end up re-enabled then. */
4719 ecs->event_thread->control.may_range_step = 0;
359f5fe6 4720 }
88ed393a
JK
4721
4722 /* Dependent on valid ECS->EVENT_THREAD. */
d8dd4d5f 4723 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
88ed393a
JK
4724
4725 /* Dependent on the current PC value modified by adjust_pc_after_break. */
4726 reinit_frame_cache ();
4727
28736962
PA
4728 breakpoint_retire_moribund ();
4729
2b009048
DJ
4730 /* First, distinguish signals caused by the debugger from signals
4731 that have to do with the program's own actions. Note that
4732 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
4733 on the operating system version. Here we detect when a SIGILL or
4734 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
4735 something similar for SIGSEGV, since a SIGSEGV will be generated
4736 when we're trying to execute a breakpoint instruction on a
4737 non-executable stack. This happens for call dummy breakpoints
4738 for architectures like SPARC that place call dummies on the
4739 stack. */
2b009048 4740 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
a493e3e2
PA
4741 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
4742 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
4743 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
2b009048 4744 {
00431a78 4745 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
de0a0249 4746
a01bda52 4747 if (breakpoint_inserted_here_p (regcache->aspace (),
de0a0249
UW
4748 regcache_read_pc (regcache)))
4749 {
4750 if (debug_infrun)
4751 fprintf_unfiltered (gdb_stdlog,
4752 "infrun: Treating signal as SIGTRAP\n");
a493e3e2 4753 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
de0a0249 4754 }
2b009048
DJ
4755 }
4756
28736962
PA
4757 /* Mark the non-executing threads accordingly. In all-stop, all
4758 threads of all processes are stopped when we get any event
e1316e60 4759 reported. In non-stop mode, only the event thread stops. */
372316f1
PA
4760 {
4761 ptid_t mark_ptid;
4762
fbea99ea 4763 if (!target_is_non_stop_p ())
372316f1
PA
4764 mark_ptid = minus_one_ptid;
4765 else if (ecs->ws.kind == TARGET_WAITKIND_SIGNALLED
4766 || ecs->ws.kind == TARGET_WAITKIND_EXITED)
4767 {
4768 /* If we're handling a process exit in non-stop mode, even
4769 though threads haven't been deleted yet, one would think
4770 that there is nothing to do, as threads of the dead process
4771 will be soon deleted, and threads of any other process were
4772 left running. However, on some targets, threads survive a
4773 process exit event. E.g., for the "checkpoint" command,
4774 when the current checkpoint/fork exits, linux-fork.c
4775 automatically switches to another fork from within
4776 target_mourn_inferior, by associating the same
4777 inferior/thread to another fork. We haven't mourned yet at
4778 this point, but we must mark any threads left in the
4779 process as not-executing so that finish_thread_state marks
4780 them stopped (in the user's perspective) if/when we present
4781 the stop to the user. */
e99b03dc 4782 mark_ptid = ptid_t (ecs->ptid.pid ());
372316f1
PA
4783 }
4784 else
4785 mark_ptid = ecs->ptid;
4786
4787 set_executing (mark_ptid, 0);
4788
4789 /* Likewise the resumed flag. */
4790 set_resumed (mark_ptid, 0);
4791 }
8c90c137 4792
488f131b
JB
4793 switch (ecs->ws.kind)
4794 {
4795 case TARGET_WAITKIND_LOADED:
00431a78 4796 context_switch (ecs);
b0f4b84b
DJ
4797 /* Ignore gracefully during startup of the inferior, as it might
4798 be the shell which has just loaded some objects, otherwise
4799 add the symbols for the newly loaded objects. Also ignore at
4800 the beginning of an attach or remote session; we will query
4801 the full list of libraries once the connection is
4802 established. */
4f5d7f63 4803
00431a78 4804 stop_soon = get_inferior_stop_soon (ecs);
c0236d92 4805 if (stop_soon == NO_STOP_QUIETLY)
488f131b 4806 {
edcc5120
TT
4807 struct regcache *regcache;
4808
00431a78 4809 regcache = get_thread_regcache (ecs->event_thread);
edcc5120
TT
4810
4811 handle_solib_event ();
4812
4813 ecs->event_thread->control.stop_bpstat
a01bda52 4814 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
4815 ecs->event_thread->suspend.stop_pc,
4816 ecs->event_thread, &ecs->ws);
ab04a2af 4817
c65d6b55
PA
4818 if (handle_stop_requested (ecs))
4819 return;
4820
ce12b012 4821 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
edcc5120
TT
4822 {
4823 /* A catchpoint triggered. */
94c57d6a
PA
4824 process_event_stop_test (ecs);
4825 return;
edcc5120 4826 }
488f131b 4827
b0f4b84b
DJ
4828 /* If requested, stop when the dynamic linker notifies
4829 gdb of events. This allows the user to get control
4830 and place breakpoints in initializer routines for
4831 dynamically loaded objects (among other things). */
a493e3e2 4832 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
b0f4b84b
DJ
4833 if (stop_on_solib_events)
4834 {
55409f9d
DJ
4835 /* Make sure we print "Stopped due to solib-event" in
4836 normal_stop. */
4837 stop_print_frame = 1;
4838
22bcd14b 4839 stop_waiting (ecs);
b0f4b84b
DJ
4840 return;
4841 }
488f131b 4842 }
b0f4b84b
DJ
4843
4844 /* If we are skipping through a shell, or through shared library
4845 loading that we aren't interested in, resume the program. If
5c09a2c5 4846 we're running the program normally, also resume. */
b0f4b84b
DJ
4847 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
4848 {
74960c60
VP
4849 /* Loading of shared libraries might have changed breakpoint
4850 addresses. Make sure new breakpoints are inserted. */
a25a5a45 4851 if (stop_soon == NO_STOP_QUIETLY)
74960c60 4852 insert_breakpoints ();
64ce06e4 4853 resume (GDB_SIGNAL_0);
b0f4b84b
DJ
4854 prepare_to_wait (ecs);
4855 return;
4856 }
4857
5c09a2c5
PA
4858 /* But stop if we're attaching or setting up a remote
4859 connection. */
4860 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4861 || stop_soon == STOP_QUIETLY_REMOTE)
4862 {
4863 if (debug_infrun)
4864 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
22bcd14b 4865 stop_waiting (ecs);
5c09a2c5
PA
4866 return;
4867 }
4868
4869 internal_error (__FILE__, __LINE__,
4870 _("unhandled stop_soon: %d"), (int) stop_soon);
c5aa993b 4871
488f131b 4872 case TARGET_WAITKIND_SPURIOUS:
c65d6b55
PA
4873 if (handle_stop_requested (ecs))
4874 return;
00431a78 4875 context_switch (ecs);
64ce06e4 4876 resume (GDB_SIGNAL_0);
488f131b
JB
4877 prepare_to_wait (ecs);
4878 return;
c5aa993b 4879
65706a29 4880 case TARGET_WAITKIND_THREAD_CREATED:
c65d6b55
PA
4881 if (handle_stop_requested (ecs))
4882 return;
00431a78 4883 context_switch (ecs);
65706a29
PA
4884 if (!switch_back_to_stepped_thread (ecs))
4885 keep_going (ecs);
4886 return;
4887
488f131b 4888 case TARGET_WAITKIND_EXITED:
940c3c06 4889 case TARGET_WAITKIND_SIGNALLED:
fb66883a 4890 inferior_ptid = ecs->ptid;
c9657e70 4891 set_current_inferior (find_inferior_ptid (ecs->ptid));
6c95b8df
PA
4892 set_current_program_space (current_inferior ()->pspace);
4893 handle_vfork_child_exec_or_exit (0);
223ffa71 4894 target_terminal::ours (); /* Must do this before mourn anyway. */
488f131b 4895
0c557179
SDJ
4896 /* Clearing any previous state of convenience variables. */
4897 clear_exit_convenience_vars ();
4898
940c3c06
PA
4899 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
4900 {
4901 /* Record the exit code in the convenience variable $_exitcode, so
4902 that the user can inspect this again later. */
4903 set_internalvar_integer (lookup_internalvar ("_exitcode"),
4904 (LONGEST) ecs->ws.value.integer);
4905
4906 /* Also record this in the inferior itself. */
4907 current_inferior ()->has_exit_code = 1;
4908 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
8cf64490 4909
98eb56a4
PA
4910 /* Support the --return-child-result option. */
4911 return_child_result_value = ecs->ws.value.integer;
4912
76727919 4913 gdb::observers::exited.notify (ecs->ws.value.integer);
940c3c06
PA
4914 }
4915 else
0c557179 4916 {
00431a78 4917 struct gdbarch *gdbarch = current_inferior ()->gdbarch;
0c557179
SDJ
4918
4919 if (gdbarch_gdb_signal_to_target_p (gdbarch))
4920 {
4921 /* Set the value of the internal variable $_exitsignal,
4922 which holds the signal uncaught by the inferior. */
4923 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
4924 gdbarch_gdb_signal_to_target (gdbarch,
4925 ecs->ws.value.sig));
4926 }
4927 else
4928 {
4929 /* We don't have access to the target's method used for
4930 converting between signal numbers (GDB's internal
4931 representation <-> target's representation).
4932 Therefore, we cannot do a good job at displaying this
4933 information to the user. It's better to just warn
4934 her about it (if infrun debugging is enabled), and
4935 give up. */
4936 if (debug_infrun)
4937 fprintf_filtered (gdb_stdlog, _("\
4938Cannot fill $_exitsignal with the correct signal number.\n"));
4939 }
4940
76727919 4941 gdb::observers::signal_exited.notify (ecs->ws.value.sig);
0c557179 4942 }
8cf64490 4943
488f131b 4944 gdb_flush (gdb_stdout);
bc1e6c81 4945 target_mourn_inferior (inferior_ptid);
488f131b 4946 stop_print_frame = 0;
22bcd14b 4947 stop_waiting (ecs);
488f131b 4948 return;
c5aa993b 4949
488f131b 4950 /* The following are the only cases in which we keep going;
1777feb0 4951 the above cases end in a continue or goto. */
488f131b 4952 case TARGET_WAITKIND_FORKED:
deb3b17b 4953 case TARGET_WAITKIND_VFORKED:
e2d96639
YQ
4954 /* Check whether the inferior is displaced stepping. */
4955 {
00431a78 4956 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
ac7936df 4957 struct gdbarch *gdbarch = regcache->arch ();
e2d96639
YQ
4958
4959 /* If checking displaced stepping is supported, and thread
4960 ecs->ptid is displaced stepping. */
00431a78 4961 if (displaced_step_in_progress_thread (ecs->event_thread))
e2d96639
YQ
4962 {
4963 struct inferior *parent_inf
c9657e70 4964 = find_inferior_ptid (ecs->ptid);
e2d96639
YQ
4965 struct regcache *child_regcache;
4966 CORE_ADDR parent_pc;
4967
4968 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
4969 indicating that the displaced stepping of syscall instruction
4970 has been done. Perform cleanup for parent process here. Note
4971 that this operation also cleans up the child process for vfork,
4972 because their pages are shared. */
00431a78 4973 displaced_step_fixup (ecs->event_thread, GDB_SIGNAL_TRAP);
c2829269
PA
4974 /* Start a new step-over in another thread if there's one
4975 that needs it. */
4976 start_step_over ();
e2d96639
YQ
4977
4978 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
4979 {
c0987663 4980 struct displaced_step_inferior_state *displaced
00431a78 4981 = get_displaced_stepping_state (parent_inf);
c0987663 4982
e2d96639
YQ
4983 /* Restore scratch pad for child process. */
4984 displaced_step_restore (displaced, ecs->ws.value.related_pid);
4985 }
4986
4987 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
4988 the child's PC is also within the scratchpad. Set the child's PC
4989 to the parent's PC value, which has already been fixed up.
4990 FIXME: we use the parent's aspace here, although we're touching
4991 the child, because the child hasn't been added to the inferior
4992 list yet at this point. */
4993
4994 child_regcache
4995 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
4996 gdbarch,
4997 parent_inf->aspace);
4998 /* Read PC value of parent process. */
4999 parent_pc = regcache_read_pc (regcache);
5000
5001 if (debug_displaced)
5002 fprintf_unfiltered (gdb_stdlog,
5003 "displaced: write child pc from %s to %s\n",
5004 paddress (gdbarch,
5005 regcache_read_pc (child_regcache)),
5006 paddress (gdbarch, parent_pc));
5007
5008 regcache_write_pc (child_regcache, parent_pc);
5009 }
5010 }
5011
00431a78 5012 context_switch (ecs);
5a2901d9 5013
b242c3c2
PA
5014 /* Immediately detach breakpoints from the child before there's
5015 any chance of letting the user delete breakpoints from the
5016 breakpoint lists. If we don't do this early, it's easy to
5017 leave left over traps in the child, vis: "break foo; catch
5018 fork; c; <fork>; del; c; <child calls foo>". We only follow
5019 the fork on the last `continue', and by that time the
5020 breakpoint at "foo" is long gone from the breakpoint table.
5021 If we vforked, then we don't need to unpatch here, since both
5022 parent and child are sharing the same memory pages; we'll
5023 need to unpatch at follow/detach time instead to be certain
5024 that new breakpoints added between catchpoint hit time and
5025 vfork follow are detached. */
5026 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
5027 {
b242c3c2
PA
5028 /* This won't actually modify the breakpoint list, but will
5029 physically remove the breakpoints from the child. */
d80ee84f 5030 detach_breakpoints (ecs->ws.value.related_pid);
b242c3c2
PA
5031 }
5032
34b7e8a6 5033 delete_just_stopped_threads_single_step_breakpoints ();
d03285ec 5034
e58b0e63
PA
5035 /* In case the event is caught by a catchpoint, remember that
5036 the event is to be followed at the next resume of the thread,
5037 and not immediately. */
5038 ecs->event_thread->pending_follow = ecs->ws;
5039
f2ffa92b
PA
5040 ecs->event_thread->suspend.stop_pc
5041 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
675bf4cb 5042
16c381f0 5043 ecs->event_thread->control.stop_bpstat
a01bda52 5044 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5045 ecs->event_thread->suspend.stop_pc,
5046 ecs->event_thread, &ecs->ws);
675bf4cb 5047
c65d6b55
PA
5048 if (handle_stop_requested (ecs))
5049 return;
5050
ce12b012
PA
5051 /* If no catchpoint triggered for this, then keep going. Note
5052 that we're interested in knowing the bpstat actually causes a
5053 stop, not just if it may explain the signal. Software
5054 watchpoints, for example, always appear in the bpstat. */
5055 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5056 {
e58b0e63 5057 int should_resume;
3e43a32a
MS
5058 int follow_child
5059 = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63 5060
a493e3e2 5061 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
e58b0e63
PA
5062
5063 should_resume = follow_fork ();
5064
00431a78
PA
5065 thread_info *parent = ecs->event_thread;
5066 thread_info *child = find_thread_ptid (ecs->ws.value.related_pid);
6c95b8df 5067
a2077e25
PA
5068 /* At this point, the parent is marked running, and the
5069 child is marked stopped. */
5070
5071 /* If not resuming the parent, mark it stopped. */
5072 if (follow_child && !detach_fork && !non_stop && !sched_multi)
00431a78 5073 parent->set_running (false);
a2077e25
PA
5074
5075 /* If resuming the child, mark it running. */
5076 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
00431a78 5077 child->set_running (true);
a2077e25 5078
6c95b8df 5079 /* In non-stop mode, also resume the other branch. */
fbea99ea
PA
5080 if (!detach_fork && (non_stop
5081 || (sched_multi && target_is_non_stop_p ())))
6c95b8df
PA
5082 {
5083 if (follow_child)
5084 switch_to_thread (parent);
5085 else
5086 switch_to_thread (child);
5087
5088 ecs->event_thread = inferior_thread ();
5089 ecs->ptid = inferior_ptid;
5090 keep_going (ecs);
5091 }
5092
5093 if (follow_child)
5094 switch_to_thread (child);
5095 else
5096 switch_to_thread (parent);
5097
e58b0e63
PA
5098 ecs->event_thread = inferior_thread ();
5099 ecs->ptid = inferior_ptid;
5100
5101 if (should_resume)
5102 keep_going (ecs);
5103 else
22bcd14b 5104 stop_waiting (ecs);
04e68871
DJ
5105 return;
5106 }
94c57d6a
PA
5107 process_event_stop_test (ecs);
5108 return;
488f131b 5109
6c95b8df
PA
5110 case TARGET_WAITKIND_VFORK_DONE:
5111 /* Done with the shared memory region. Re-insert breakpoints in
5112 the parent, and keep going. */
5113
00431a78 5114 context_switch (ecs);
6c95b8df
PA
5115
5116 current_inferior ()->waiting_for_vfork_done = 0;
56710373 5117 current_inferior ()->pspace->breakpoints_not_allowed = 0;
c65d6b55
PA
5118
5119 if (handle_stop_requested (ecs))
5120 return;
5121
6c95b8df
PA
5122 /* This also takes care of reinserting breakpoints in the
5123 previously locked inferior. */
5124 keep_going (ecs);
5125 return;
5126
488f131b 5127 case TARGET_WAITKIND_EXECD:
488f131b 5128
cbd2b4e3
PA
5129 /* Note we can't read registers yet (the stop_pc), because we
5130 don't yet know the inferior's post-exec architecture.
5131 'stop_pc' is explicitly read below instead. */
00431a78 5132 switch_to_thread_no_regs (ecs->event_thread);
5a2901d9 5133
6c95b8df
PA
5134 /* Do whatever is necessary to the parent branch of the vfork. */
5135 handle_vfork_child_exec_or_exit (1);
5136
795e548f
PA
5137 /* This causes the eventpoints and symbol table to be reset.
5138 Must do this now, before trying to determine whether to
5139 stop. */
71b43ef8 5140 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
795e548f 5141
17d8546e
DB
5142 /* In follow_exec we may have deleted the original thread and
5143 created a new one. Make sure that the event thread is the
5144 execd thread for that case (this is a nop otherwise). */
5145 ecs->event_thread = inferior_thread ();
5146
f2ffa92b
PA
5147 ecs->event_thread->suspend.stop_pc
5148 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
ecdc3a72 5149
16c381f0 5150 ecs->event_thread->control.stop_bpstat
a01bda52 5151 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5152 ecs->event_thread->suspend.stop_pc,
5153 ecs->event_thread, &ecs->ws);
795e548f 5154
71b43ef8
PA
5155 /* Note that this may be referenced from inside
5156 bpstat_stop_status above, through inferior_has_execd. */
5157 xfree (ecs->ws.value.execd_pathname);
5158 ecs->ws.value.execd_pathname = NULL;
5159
c65d6b55
PA
5160 if (handle_stop_requested (ecs))
5161 return;
5162
04e68871 5163 /* If no catchpoint triggered for this, then keep going. */
ce12b012 5164 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5165 {
a493e3e2 5166 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
04e68871
DJ
5167 keep_going (ecs);
5168 return;
5169 }
94c57d6a
PA
5170 process_event_stop_test (ecs);
5171 return;
488f131b 5172
b4dc5ffa
MK
5173 /* Be careful not to try to gather much state about a thread
5174 that's in a syscall. It's frequently a losing proposition. */
488f131b 5175 case TARGET_WAITKIND_SYSCALL_ENTRY:
1777feb0 5176 /* Getting the current syscall number. */
94c57d6a
PA
5177 if (handle_syscall_event (ecs) == 0)
5178 process_event_stop_test (ecs);
5179 return;
c906108c 5180
488f131b
JB
5181 /* Before examining the threads further, step this thread to
5182 get it entirely out of the syscall. (We get notice of the
5183 event when the thread is just on the verge of exiting a
5184 syscall. Stepping one instruction seems to get it back
b4dc5ffa 5185 into user code.) */
488f131b 5186 case TARGET_WAITKIND_SYSCALL_RETURN:
94c57d6a
PA
5187 if (handle_syscall_event (ecs) == 0)
5188 process_event_stop_test (ecs);
5189 return;
c906108c 5190
488f131b 5191 case TARGET_WAITKIND_STOPPED:
4f5d7f63
PA
5192 handle_signal_stop (ecs);
5193 return;
c906108c 5194
b2175913
MS
5195 case TARGET_WAITKIND_NO_HISTORY:
5196 /* Reverse execution: target ran out of history info. */
eab402df 5197
d1988021 5198 /* Switch to the stopped thread. */
00431a78 5199 context_switch (ecs);
d1988021
MM
5200 if (debug_infrun)
5201 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
5202
34b7e8a6 5203 delete_just_stopped_threads_single_step_breakpoints ();
f2ffa92b
PA
5204 ecs->event_thread->suspend.stop_pc
5205 = regcache_read_pc (get_thread_regcache (inferior_thread ()));
c65d6b55
PA
5206
5207 if (handle_stop_requested (ecs))
5208 return;
5209
76727919 5210 gdb::observers::no_history.notify ();
22bcd14b 5211 stop_waiting (ecs);
b2175913 5212 return;
488f131b 5213 }
4f5d7f63
PA
5214}
5215
372316f1
PA
5216/* Restart threads back to what they were trying to do back when we
5217 paused them for an in-line step-over. The EVENT_THREAD thread is
5218 ignored. */
4d9d9d04
PA
5219
5220static void
372316f1
PA
5221restart_threads (struct thread_info *event_thread)
5222{
372316f1
PA
5223 /* In case the instruction just stepped spawned a new thread. */
5224 update_thread_list ();
5225
08036331 5226 for (thread_info *tp : all_non_exited_threads ())
372316f1 5227 {
f3f8ece4
PA
5228 switch_to_thread_no_regs (tp);
5229
372316f1
PA
5230 if (tp == event_thread)
5231 {
5232 if (debug_infrun)
5233 fprintf_unfiltered (gdb_stdlog,
5234 "infrun: restart threads: "
5235 "[%s] is event thread\n",
a068643d 5236 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5237 continue;
5238 }
5239
5240 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5241 {
5242 if (debug_infrun)
5243 fprintf_unfiltered (gdb_stdlog,
5244 "infrun: restart threads: "
5245 "[%s] not meant to be running\n",
a068643d 5246 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5247 continue;
5248 }
5249
5250 if (tp->resumed)
5251 {
5252 if (debug_infrun)
5253 fprintf_unfiltered (gdb_stdlog,
5254 "infrun: restart threads: [%s] resumed\n",
a068643d 5255 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5256 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
5257 continue;
5258 }
5259
5260 if (thread_is_in_step_over_chain (tp))
5261 {
5262 if (debug_infrun)
5263 fprintf_unfiltered (gdb_stdlog,
5264 "infrun: restart threads: "
5265 "[%s] needs step-over\n",
a068643d 5266 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5267 gdb_assert (!tp->resumed);
5268 continue;
5269 }
5270
5271
5272 if (tp->suspend.waitstatus_pending_p)
5273 {
5274 if (debug_infrun)
5275 fprintf_unfiltered (gdb_stdlog,
5276 "infrun: restart threads: "
5277 "[%s] has pending status\n",
a068643d 5278 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5279 tp->resumed = 1;
5280 continue;
5281 }
5282
c65d6b55
PA
5283 gdb_assert (!tp->stop_requested);
5284
372316f1
PA
5285 /* If some thread needs to start a step-over at this point, it
5286 should still be in the step-over queue, and thus skipped
5287 above. */
5288 if (thread_still_needs_step_over (tp))
5289 {
5290 internal_error (__FILE__, __LINE__,
5291 "thread [%s] needs a step-over, but not in "
5292 "step-over queue\n",
a068643d 5293 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5294 }
5295
5296 if (currently_stepping (tp))
5297 {
5298 if (debug_infrun)
5299 fprintf_unfiltered (gdb_stdlog,
5300 "infrun: restart threads: [%s] was stepping\n",
a068643d 5301 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5302 keep_going_stepped_thread (tp);
5303 }
5304 else
5305 {
5306 struct execution_control_state ecss;
5307 struct execution_control_state *ecs = &ecss;
5308
5309 if (debug_infrun)
5310 fprintf_unfiltered (gdb_stdlog,
5311 "infrun: restart threads: [%s] continuing\n",
a068643d 5312 target_pid_to_str (tp->ptid).c_str ());
372316f1 5313 reset_ecs (ecs, tp);
00431a78 5314 switch_to_thread (tp);
372316f1
PA
5315 keep_going_pass_signal (ecs);
5316 }
5317 }
5318}
5319
5320/* Callback for iterate_over_threads. Find a resumed thread that has
5321 a pending waitstatus. */
5322
5323static int
5324resumed_thread_with_pending_status (struct thread_info *tp,
5325 void *arg)
5326{
5327 return (tp->resumed
5328 && tp->suspend.waitstatus_pending_p);
5329}
5330
5331/* Called when we get an event that may finish an in-line or
5332 out-of-line (displaced stepping) step-over started previously.
5333 Return true if the event is processed and we should go back to the
5334 event loop; false if the caller should continue processing the
5335 event. */
5336
5337static int
4d9d9d04
PA
5338finish_step_over (struct execution_control_state *ecs)
5339{
372316f1
PA
5340 int had_step_over_info;
5341
00431a78 5342 displaced_step_fixup (ecs->event_thread,
4d9d9d04
PA
5343 ecs->event_thread->suspend.stop_signal);
5344
372316f1
PA
5345 had_step_over_info = step_over_info_valid_p ();
5346
5347 if (had_step_over_info)
4d9d9d04
PA
5348 {
5349 /* If we're stepping over a breakpoint with all threads locked,
5350 then only the thread that was stepped should be reporting
5351 back an event. */
5352 gdb_assert (ecs->event_thread->control.trap_expected);
5353
c65d6b55 5354 clear_step_over_info ();
4d9d9d04
PA
5355 }
5356
fbea99ea 5357 if (!target_is_non_stop_p ())
372316f1 5358 return 0;
4d9d9d04
PA
5359
5360 /* Start a new step-over in another thread if there's one that
5361 needs it. */
5362 start_step_over ();
372316f1
PA
5363
5364 /* If we were stepping over a breakpoint before, and haven't started
5365 a new in-line step-over sequence, then restart all other threads
5366 (except the event thread). We can't do this in all-stop, as then
5367 e.g., we wouldn't be able to issue any other remote packet until
5368 these other threads stop. */
5369 if (had_step_over_info && !step_over_info_valid_p ())
5370 {
5371 struct thread_info *pending;
5372
5373 /* If we only have threads with pending statuses, the restart
5374 below won't restart any thread and so nothing re-inserts the
5375 breakpoint we just stepped over. But we need it inserted
5376 when we later process the pending events, otherwise if
5377 another thread has a pending event for this breakpoint too,
5378 we'd discard its event (because the breakpoint that
5379 originally caused the event was no longer inserted). */
00431a78 5380 context_switch (ecs);
372316f1
PA
5381 insert_breakpoints ();
5382
5383 restart_threads (ecs->event_thread);
5384
5385 /* If we have events pending, go through handle_inferior_event
5386 again, picking up a pending event at random. This avoids
5387 thread starvation. */
5388
5389 /* But not if we just stepped over a watchpoint in order to let
5390 the instruction execute so we can evaluate its expression.
5391 The set of watchpoints that triggered is recorded in the
5392 breakpoint objects themselves (see bp->watchpoint_triggered).
5393 If we processed another event first, that other event could
5394 clobber this info. */
5395 if (ecs->event_thread->stepping_over_watchpoint)
5396 return 0;
5397
5398 pending = iterate_over_threads (resumed_thread_with_pending_status,
5399 NULL);
5400 if (pending != NULL)
5401 {
5402 struct thread_info *tp = ecs->event_thread;
5403 struct regcache *regcache;
5404
5405 if (debug_infrun)
5406 {
5407 fprintf_unfiltered (gdb_stdlog,
5408 "infrun: found resumed threads with "
5409 "pending events, saving status\n");
5410 }
5411
5412 gdb_assert (pending != tp);
5413
5414 /* Record the event thread's event for later. */
5415 save_waitstatus (tp, &ecs->ws);
5416 /* This was cleared early, by handle_inferior_event. Set it
5417 so this pending event is considered by
5418 do_target_wait. */
5419 tp->resumed = 1;
5420
5421 gdb_assert (!tp->executing);
5422
00431a78 5423 regcache = get_thread_regcache (tp);
372316f1
PA
5424 tp->suspend.stop_pc = regcache_read_pc (regcache);
5425
5426 if (debug_infrun)
5427 {
5428 fprintf_unfiltered (gdb_stdlog,
5429 "infrun: saved stop_pc=%s for %s "
5430 "(currently_stepping=%d)\n",
5431 paddress (target_gdbarch (),
5432 tp->suspend.stop_pc),
a068643d 5433 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
5434 currently_stepping (tp));
5435 }
5436
5437 /* This in-line step-over finished; clear this so we won't
5438 start a new one. This is what handle_signal_stop would
5439 do, if we returned false. */
5440 tp->stepping_over_breakpoint = 0;
5441
5442 /* Wake up the event loop again. */
5443 mark_async_event_handler (infrun_async_inferior_event_token);
5444
5445 prepare_to_wait (ecs);
5446 return 1;
5447 }
5448 }
5449
5450 return 0;
4d9d9d04
PA
5451}
5452
4f5d7f63
PA
5453/* Come here when the program has stopped with a signal. */
5454
5455static void
5456handle_signal_stop (struct execution_control_state *ecs)
5457{
5458 struct frame_info *frame;
5459 struct gdbarch *gdbarch;
5460 int stopped_by_watchpoint;
5461 enum stop_kind stop_soon;
5462 int random_signal;
c906108c 5463
f0407826
DE
5464 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
5465
c65d6b55
PA
5466 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
5467
f0407826
DE
5468 /* Do we need to clean up the state of a thread that has
5469 completed a displaced single-step? (Doing so usually affects
5470 the PC, so do it here, before we set stop_pc.) */
372316f1
PA
5471 if (finish_step_over (ecs))
5472 return;
f0407826
DE
5473
5474 /* If we either finished a single-step or hit a breakpoint, but
5475 the user wanted this thread to be stopped, pretend we got a
5476 SIG0 (generic unsignaled stop). */
5477 if (ecs->event_thread->stop_requested
5478 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
5479 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
237fc4c9 5480
f2ffa92b
PA
5481 ecs->event_thread->suspend.stop_pc
5482 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
488f131b 5483
527159b7 5484 if (debug_infrun)
237fc4c9 5485 {
00431a78 5486 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
b926417a 5487 struct gdbarch *reg_gdbarch = regcache->arch ();
7f82dfc7 5488
f3f8ece4 5489 switch_to_thread (ecs->event_thread);
5af949e3
UW
5490
5491 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
b926417a 5492 paddress (reg_gdbarch,
f2ffa92b 5493 ecs->event_thread->suspend.stop_pc));
d92524f1 5494 if (target_stopped_by_watchpoint ())
237fc4c9
PA
5495 {
5496 CORE_ADDR addr;
abbb1732 5497
237fc4c9
PA
5498 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
5499
8b88a78e 5500 if (target_stopped_data_address (current_top_target (), &addr))
237fc4c9 5501 fprintf_unfiltered (gdb_stdlog,
5af949e3 5502 "infrun: stopped data address = %s\n",
b926417a 5503 paddress (reg_gdbarch, addr));
237fc4c9
PA
5504 else
5505 fprintf_unfiltered (gdb_stdlog,
5506 "infrun: (no data address available)\n");
5507 }
5508 }
527159b7 5509
36fa8042
PA
5510 /* This is originated from start_remote(), start_inferior() and
5511 shared libraries hook functions. */
00431a78 5512 stop_soon = get_inferior_stop_soon (ecs);
36fa8042
PA
5513 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
5514 {
00431a78 5515 context_switch (ecs);
36fa8042
PA
5516 if (debug_infrun)
5517 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
5518 stop_print_frame = 1;
22bcd14b 5519 stop_waiting (ecs);
36fa8042
PA
5520 return;
5521 }
5522
36fa8042
PA
5523 /* This originates from attach_command(). We need to overwrite
5524 the stop_signal here, because some kernels don't ignore a
5525 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
5526 See more comments in inferior.h. On the other hand, if we
5527 get a non-SIGSTOP, report it to the user - assume the backend
5528 will handle the SIGSTOP if it should show up later.
5529
5530 Also consider that the attach is complete when we see a
5531 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
5532 target extended-remote report it instead of a SIGSTOP
5533 (e.g. gdbserver). We already rely on SIGTRAP being our
5534 signal, so this is no exception.
5535
5536 Also consider that the attach is complete when we see a
5537 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
5538 the target to stop all threads of the inferior, in case the
5539 low level attach operation doesn't stop them implicitly. If
5540 they weren't stopped implicitly, then the stub will report a
5541 GDB_SIGNAL_0, meaning: stopped for no particular reason
5542 other than GDB's request. */
5543 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5544 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
5545 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5546 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
5547 {
5548 stop_print_frame = 1;
22bcd14b 5549 stop_waiting (ecs);
36fa8042
PA
5550 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5551 return;
5552 }
5553
488f131b 5554 /* See if something interesting happened to the non-current thread. If
b40c7d58 5555 so, then switch to that thread. */
d7e15655 5556 if (ecs->ptid != inferior_ptid)
488f131b 5557 {
527159b7 5558 if (debug_infrun)
8a9de0e4 5559 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
527159b7 5560
00431a78 5561 context_switch (ecs);
c5aa993b 5562
9a4105ab 5563 if (deprecated_context_hook)
00431a78 5564 deprecated_context_hook (ecs->event_thread->global_num);
488f131b 5565 }
c906108c 5566
568d6575
UW
5567 /* At this point, get hold of the now-current thread's frame. */
5568 frame = get_current_frame ();
5569 gdbarch = get_frame_arch (frame);
5570
2adfaa28 5571 /* Pull the single step breakpoints out of the target. */
af48d08f 5572 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
488f131b 5573 {
af48d08f 5574 struct regcache *regcache;
af48d08f 5575 CORE_ADDR pc;
2adfaa28 5576
00431a78 5577 regcache = get_thread_regcache (ecs->event_thread);
8b86c959
YQ
5578 const address_space *aspace = regcache->aspace ();
5579
af48d08f 5580 pc = regcache_read_pc (regcache);
34b7e8a6 5581
af48d08f
PA
5582 /* However, before doing so, if this single-step breakpoint was
5583 actually for another thread, set this thread up for moving
5584 past it. */
5585 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
5586 aspace, pc))
5587 {
5588 if (single_step_breakpoint_inserted_here_p (aspace, pc))
2adfaa28
PA
5589 {
5590 if (debug_infrun)
5591 {
5592 fprintf_unfiltered (gdb_stdlog,
af48d08f 5593 "infrun: [%s] hit another thread's "
34b7e8a6 5594 "single-step breakpoint\n",
a068643d 5595 target_pid_to_str (ecs->ptid).c_str ());
2adfaa28 5596 }
af48d08f
PA
5597 ecs->hit_singlestep_breakpoint = 1;
5598 }
5599 }
5600 else
5601 {
5602 if (debug_infrun)
5603 {
5604 fprintf_unfiltered (gdb_stdlog,
5605 "infrun: [%s] hit its "
5606 "single-step breakpoint\n",
a068643d 5607 target_pid_to_str (ecs->ptid).c_str ());
2adfaa28
PA
5608 }
5609 }
488f131b 5610 }
af48d08f 5611 delete_just_stopped_threads_single_step_breakpoints ();
c906108c 5612
963f9c80
PA
5613 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5614 && ecs->event_thread->control.trap_expected
5615 && ecs->event_thread->stepping_over_watchpoint)
d983da9c
DJ
5616 stopped_by_watchpoint = 0;
5617 else
5618 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
5619
5620 /* If necessary, step over this watchpoint. We'll be back to display
5621 it in a moment. */
5622 if (stopped_by_watchpoint
d92524f1 5623 && (target_have_steppable_watchpoint
568d6575 5624 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
488f131b 5625 {
488f131b
JB
5626 /* At this point, we are stopped at an instruction which has
5627 attempted to write to a piece of memory under control of
5628 a watchpoint. The instruction hasn't actually executed
5629 yet. If we were to evaluate the watchpoint expression
5630 now, we would get the old value, and therefore no change
5631 would seem to have occurred.
5632
5633 In order to make watchpoints work `right', we really need
5634 to complete the memory write, and then evaluate the
d983da9c
DJ
5635 watchpoint expression. We do this by single-stepping the
5636 target.
5637
7f89fd65 5638 It may not be necessary to disable the watchpoint to step over
d983da9c
DJ
5639 it. For example, the PA can (with some kernel cooperation)
5640 single step over a watchpoint without disabling the watchpoint.
5641
5642 It is far more common to need to disable a watchpoint to step
5643 the inferior over it. If we have non-steppable watchpoints,
5644 we must disable the current watchpoint; it's simplest to
963f9c80
PA
5645 disable all watchpoints.
5646
5647 Any breakpoint at PC must also be stepped over -- if there's
5648 one, it will have already triggered before the watchpoint
5649 triggered, and we either already reported it to the user, or
5650 it didn't cause a stop and we called keep_going. In either
5651 case, if there was a breakpoint at PC, we must be trying to
5652 step past it. */
5653 ecs->event_thread->stepping_over_watchpoint = 1;
5654 keep_going (ecs);
488f131b
JB
5655 return;
5656 }
5657
4e1c45ea 5658 ecs->event_thread->stepping_over_breakpoint = 0;
963f9c80 5659 ecs->event_thread->stepping_over_watchpoint = 0;
16c381f0
JK
5660 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
5661 ecs->event_thread->control.stop_step = 0;
488f131b 5662 stop_print_frame = 1;
488f131b 5663 stopped_by_random_signal = 0;
ddfe970e 5664 bpstat stop_chain = NULL;
488f131b 5665
edb3359d
DJ
5666 /* Hide inlined functions starting here, unless we just performed stepi or
5667 nexti. After stepi and nexti, always show the innermost frame (not any
5668 inline function call sites). */
16c381f0 5669 if (ecs->event_thread->control.step_range_end != 1)
0574c78f 5670 {
00431a78
PA
5671 const address_space *aspace
5672 = get_thread_regcache (ecs->event_thread)->aspace ();
0574c78f
GB
5673
5674 /* skip_inline_frames is expensive, so we avoid it if we can
5675 determine that the address is one where functions cannot have
5676 been inlined. This improves performance with inferiors that
5677 load a lot of shared libraries, because the solib event
5678 breakpoint is defined as the address of a function (i.e. not
5679 inline). Note that we have to check the previous PC as well
5680 as the current one to catch cases when we have just
5681 single-stepped off a breakpoint prior to reinstating it.
5682 Note that we're assuming that the code we single-step to is
5683 not inline, but that's not definitive: there's nothing
5684 preventing the event breakpoint function from containing
5685 inlined code, and the single-step ending up there. If the
5686 user had set a breakpoint on that inlined code, the missing
5687 skip_inline_frames call would break things. Fortunately
5688 that's an extremely unlikely scenario. */
f2ffa92b
PA
5689 if (!pc_at_non_inline_function (aspace,
5690 ecs->event_thread->suspend.stop_pc,
5691 &ecs->ws)
a210c238
MR
5692 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5693 && ecs->event_thread->control.trap_expected
5694 && pc_at_non_inline_function (aspace,
5695 ecs->event_thread->prev_pc,
09ac7c10 5696 &ecs->ws)))
1c5a993e 5697 {
f2ffa92b
PA
5698 stop_chain = build_bpstat_chain (aspace,
5699 ecs->event_thread->suspend.stop_pc,
5700 &ecs->ws);
00431a78 5701 skip_inline_frames (ecs->event_thread, stop_chain);
1c5a993e
MR
5702
5703 /* Re-fetch current thread's frame in case that invalidated
5704 the frame cache. */
5705 frame = get_current_frame ();
5706 gdbarch = get_frame_arch (frame);
5707 }
0574c78f 5708 }
edb3359d 5709
a493e3e2 5710 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
16c381f0 5711 && ecs->event_thread->control.trap_expected
568d6575 5712 && gdbarch_single_step_through_delay_p (gdbarch)
4e1c45ea 5713 && currently_stepping (ecs->event_thread))
3352ef37 5714 {
b50d7442 5715 /* We're trying to step off a breakpoint. Turns out that we're
3352ef37 5716 also on an instruction that needs to be stepped multiple
1777feb0 5717 times before it's been fully executing. E.g., architectures
3352ef37
AC
5718 with a delay slot. It needs to be stepped twice, once for
5719 the instruction and once for the delay slot. */
5720 int step_through_delay
568d6575 5721 = gdbarch_single_step_through_delay (gdbarch, frame);
abbb1732 5722
527159b7 5723 if (debug_infrun && step_through_delay)
8a9de0e4 5724 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
16c381f0
JK
5725 if (ecs->event_thread->control.step_range_end == 0
5726 && step_through_delay)
3352ef37
AC
5727 {
5728 /* The user issued a continue when stopped at a breakpoint.
5729 Set up for another trap and get out of here. */
4e1c45ea 5730 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
5731 keep_going (ecs);
5732 return;
5733 }
5734 else if (step_through_delay)
5735 {
5736 /* The user issued a step when stopped at a breakpoint.
5737 Maybe we should stop, maybe we should not - the delay
5738 slot *might* correspond to a line of source. In any
ca67fcb8
VP
5739 case, don't decide that here, just set
5740 ecs->stepping_over_breakpoint, making sure we
5741 single-step again before breakpoints are re-inserted. */
4e1c45ea 5742 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
5743 }
5744 }
5745
ab04a2af
TT
5746 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
5747 handles this event. */
5748 ecs->event_thread->control.stop_bpstat
a01bda52 5749 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5750 ecs->event_thread->suspend.stop_pc,
5751 ecs->event_thread, &ecs->ws, stop_chain);
db82e815 5752
ab04a2af
TT
5753 /* Following in case break condition called a
5754 function. */
5755 stop_print_frame = 1;
73dd234f 5756
ab04a2af
TT
5757 /* This is where we handle "moribund" watchpoints. Unlike
5758 software breakpoints traps, hardware watchpoint traps are
5759 always distinguishable from random traps. If no high-level
5760 watchpoint is associated with the reported stop data address
5761 anymore, then the bpstat does not explain the signal ---
5762 simply make sure to ignore it if `stopped_by_watchpoint' is
5763 set. */
5764
5765 if (debug_infrun
5766 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
47591c29 5767 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
427cd150 5768 GDB_SIGNAL_TRAP)
ab04a2af
TT
5769 && stopped_by_watchpoint)
5770 fprintf_unfiltered (gdb_stdlog,
5771 "infrun: no user watchpoint explains "
5772 "watchpoint SIGTRAP, ignoring\n");
73dd234f 5773
bac7d97b 5774 /* NOTE: cagney/2003-03-29: These checks for a random signal
ab04a2af
TT
5775 at one stage in the past included checks for an inferior
5776 function call's call dummy's return breakpoint. The original
5777 comment, that went with the test, read:
03cebad2 5778
ab04a2af
TT
5779 ``End of a stack dummy. Some systems (e.g. Sony news) give
5780 another signal besides SIGTRAP, so check here as well as
5781 above.''
73dd234f 5782
ab04a2af
TT
5783 If someone ever tries to get call dummys on a
5784 non-executable stack to work (where the target would stop
5785 with something like a SIGSEGV), then those tests might need
5786 to be re-instated. Given, however, that the tests were only
5787 enabled when momentary breakpoints were not being used, I
5788 suspect that it won't be the case.
488f131b 5789
ab04a2af
TT
5790 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
5791 be necessary for call dummies on a non-executable stack on
5792 SPARC. */
488f131b 5793
bac7d97b 5794 /* See if the breakpoints module can explain the signal. */
47591c29
PA
5795 random_signal
5796 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
5797 ecs->event_thread->suspend.stop_signal);
bac7d97b 5798
1cf4d951
PA
5799 /* Maybe this was a trap for a software breakpoint that has since
5800 been removed. */
5801 if (random_signal && target_stopped_by_sw_breakpoint ())
5802 {
f2ffa92b
PA
5803 if (program_breakpoint_here_p (gdbarch,
5804 ecs->event_thread->suspend.stop_pc))
1cf4d951
PA
5805 {
5806 struct regcache *regcache;
5807 int decr_pc;
5808
5809 /* Re-adjust PC to what the program would see if GDB was not
5810 debugging it. */
00431a78 5811 regcache = get_thread_regcache (ecs->event_thread);
527a273a 5812 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
1cf4d951
PA
5813 if (decr_pc != 0)
5814 {
07036511
TT
5815 gdb::optional<scoped_restore_tmpl<int>>
5816 restore_operation_disable;
1cf4d951
PA
5817
5818 if (record_full_is_used ())
07036511
TT
5819 restore_operation_disable.emplace
5820 (record_full_gdb_operation_disable_set ());
1cf4d951 5821
f2ffa92b
PA
5822 regcache_write_pc (regcache,
5823 ecs->event_thread->suspend.stop_pc + decr_pc);
1cf4d951
PA
5824 }
5825 }
5826 else
5827 {
5828 /* A delayed software breakpoint event. Ignore the trap. */
5829 if (debug_infrun)
5830 fprintf_unfiltered (gdb_stdlog,
5831 "infrun: delayed software breakpoint "
5832 "trap, ignoring\n");
5833 random_signal = 0;
5834 }
5835 }
5836
5837 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
5838 has since been removed. */
5839 if (random_signal && target_stopped_by_hw_breakpoint ())
5840 {
5841 /* A delayed hardware breakpoint event. Ignore the trap. */
5842 if (debug_infrun)
5843 fprintf_unfiltered (gdb_stdlog,
5844 "infrun: delayed hardware breakpoint/watchpoint "
5845 "trap, ignoring\n");
5846 random_signal = 0;
5847 }
5848
bac7d97b
PA
5849 /* If not, perhaps stepping/nexting can. */
5850 if (random_signal)
5851 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5852 && currently_stepping (ecs->event_thread));
ab04a2af 5853
2adfaa28
PA
5854 /* Perhaps the thread hit a single-step breakpoint of _another_
5855 thread. Single-step breakpoints are transparent to the
5856 breakpoints module. */
5857 if (random_signal)
5858 random_signal = !ecs->hit_singlestep_breakpoint;
5859
bac7d97b
PA
5860 /* No? Perhaps we got a moribund watchpoint. */
5861 if (random_signal)
5862 random_signal = !stopped_by_watchpoint;
ab04a2af 5863
c65d6b55
PA
5864 /* Always stop if the user explicitly requested this thread to
5865 remain stopped. */
5866 if (ecs->event_thread->stop_requested)
5867 {
5868 random_signal = 1;
5869 if (debug_infrun)
5870 fprintf_unfiltered (gdb_stdlog, "infrun: user-requested stop\n");
5871 }
5872
488f131b
JB
5873 /* For the program's own signals, act according to
5874 the signal handling tables. */
5875
ce12b012 5876 if (random_signal)
488f131b
JB
5877 {
5878 /* Signal not for debugging purposes. */
c9657e70 5879 struct inferior *inf = find_inferior_ptid (ecs->ptid);
c9737c08 5880 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
488f131b 5881
527159b7 5882 if (debug_infrun)
c9737c08
PA
5883 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
5884 gdb_signal_to_symbol_string (stop_signal));
527159b7 5885
488f131b
JB
5886 stopped_by_random_signal = 1;
5887
252fbfc8
PA
5888 /* Always stop on signals if we're either just gaining control
5889 of the program, or the user explicitly requested this thread
5890 to remain stopped. */
d6b48e9c 5891 if (stop_soon != NO_STOP_QUIETLY
252fbfc8 5892 || ecs->event_thread->stop_requested
24291992 5893 || (!inf->detaching
16c381f0 5894 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
488f131b 5895 {
22bcd14b 5896 stop_waiting (ecs);
488f131b
JB
5897 return;
5898 }
b57bacec
PA
5899
5900 /* Notify observers the signal has "handle print" set. Note we
5901 returned early above if stopping; normal_stop handles the
5902 printing in that case. */
5903 if (signal_print[ecs->event_thread->suspend.stop_signal])
5904 {
5905 /* The signal table tells us to print about this signal. */
223ffa71 5906 target_terminal::ours_for_output ();
76727919 5907 gdb::observers::signal_received.notify (ecs->event_thread->suspend.stop_signal);
223ffa71 5908 target_terminal::inferior ();
b57bacec 5909 }
488f131b
JB
5910
5911 /* Clear the signal if it should not be passed. */
16c381f0 5912 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
a493e3e2 5913 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
488f131b 5914
f2ffa92b 5915 if (ecs->event_thread->prev_pc == ecs->event_thread->suspend.stop_pc
16c381f0 5916 && ecs->event_thread->control.trap_expected
8358c15c 5917 && ecs->event_thread->control.step_resume_breakpoint == NULL)
68f53502
AC
5918 {
5919 /* We were just starting a new sequence, attempting to
5920 single-step off of a breakpoint and expecting a SIGTRAP.
237fc4c9 5921 Instead this signal arrives. This signal will take us out
68f53502
AC
5922 of the stepping range so GDB needs to remember to, when
5923 the signal handler returns, resume stepping off that
5924 breakpoint. */
5925 /* To simplify things, "continue" is forced to use the same
5926 code paths as single-step - set a breakpoint at the
5927 signal return address and then, once hit, step off that
5928 breakpoint. */
237fc4c9
PA
5929 if (debug_infrun)
5930 fprintf_unfiltered (gdb_stdlog,
5931 "infrun: signal arrived while stepping over "
5932 "breakpoint\n");
d3169d93 5933
2c03e5be 5934 insert_hp_step_resume_breakpoint_at_frame (frame);
4e1c45ea 5935 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
5936 /* Reset trap_expected to ensure breakpoints are re-inserted. */
5937 ecs->event_thread->control.trap_expected = 0;
d137e6dc
PA
5938
5939 /* If we were nexting/stepping some other thread, switch to
5940 it, so that we don't continue it, losing control. */
5941 if (!switch_back_to_stepped_thread (ecs))
5942 keep_going (ecs);
9d799f85 5943 return;
68f53502 5944 }
9d799f85 5945
e5f8a7cc 5946 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
f2ffa92b
PA
5947 && (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
5948 ecs->event_thread)
e5f8a7cc 5949 || ecs->event_thread->control.step_range_end == 1)
edb3359d 5950 && frame_id_eq (get_stack_frame_id (frame),
16c381f0 5951 ecs->event_thread->control.step_stack_frame_id)
8358c15c 5952 && ecs->event_thread->control.step_resume_breakpoint == NULL)
d303a6c7
AC
5953 {
5954 /* The inferior is about to take a signal that will take it
5955 out of the single step range. Set a breakpoint at the
5956 current PC (which is presumably where the signal handler
5957 will eventually return) and then allow the inferior to
5958 run free.
5959
5960 Note that this is only needed for a signal delivered
5961 while in the single-step range. Nested signals aren't a
5962 problem as they eventually all return. */
237fc4c9
PA
5963 if (debug_infrun)
5964 fprintf_unfiltered (gdb_stdlog,
5965 "infrun: signal may take us out of "
5966 "single-step range\n");
5967
372316f1 5968 clear_step_over_info ();
2c03e5be 5969 insert_hp_step_resume_breakpoint_at_frame (frame);
e5f8a7cc 5970 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
5971 /* Reset trap_expected to ensure breakpoints are re-inserted. */
5972 ecs->event_thread->control.trap_expected = 0;
9d799f85
AC
5973 keep_going (ecs);
5974 return;
d303a6c7 5975 }
9d799f85 5976
85102364 5977 /* Note: step_resume_breakpoint may be non-NULL. This occurs
9d799f85
AC
5978 when either there's a nested signal, or when there's a
5979 pending signal enabled just as the signal handler returns
5980 (leaving the inferior at the step-resume-breakpoint without
5981 actually executing it). Either way continue until the
5982 breakpoint is really hit. */
c447ac0b
PA
5983
5984 if (!switch_back_to_stepped_thread (ecs))
5985 {
5986 if (debug_infrun)
5987 fprintf_unfiltered (gdb_stdlog,
5988 "infrun: random signal, keep going\n");
5989
5990 keep_going (ecs);
5991 }
5992 return;
488f131b 5993 }
94c57d6a
PA
5994
5995 process_event_stop_test (ecs);
5996}
5997
5998/* Come here when we've got some debug event / signal we can explain
5999 (IOW, not a random signal), and test whether it should cause a
6000 stop, or whether we should resume the inferior (transparently).
6001 E.g., could be a breakpoint whose condition evaluates false; we
6002 could be still stepping within the line; etc. */
6003
6004static void
6005process_event_stop_test (struct execution_control_state *ecs)
6006{
6007 struct symtab_and_line stop_pc_sal;
6008 struct frame_info *frame;
6009 struct gdbarch *gdbarch;
cdaa5b73
PA
6010 CORE_ADDR jmp_buf_pc;
6011 struct bpstat_what what;
94c57d6a 6012
cdaa5b73 6013 /* Handle cases caused by hitting a breakpoint. */
611c83ae 6014
cdaa5b73
PA
6015 frame = get_current_frame ();
6016 gdbarch = get_frame_arch (frame);
fcf3daef 6017
cdaa5b73 6018 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
611c83ae 6019
cdaa5b73
PA
6020 if (what.call_dummy)
6021 {
6022 stop_stack_dummy = what.call_dummy;
6023 }
186c406b 6024
243a9253
PA
6025 /* A few breakpoint types have callbacks associated (e.g.,
6026 bp_jit_event). Run them now. */
6027 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
6028
cdaa5b73
PA
6029 /* If we hit an internal event that triggers symbol changes, the
6030 current frame will be invalidated within bpstat_what (e.g., if we
6031 hit an internal solib event). Re-fetch it. */
6032 frame = get_current_frame ();
6033 gdbarch = get_frame_arch (frame);
e2e4d78b 6034
cdaa5b73
PA
6035 switch (what.main_action)
6036 {
6037 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
6038 /* If we hit the breakpoint at longjmp while stepping, we
6039 install a momentary breakpoint at the target of the
6040 jmp_buf. */
186c406b 6041
cdaa5b73
PA
6042 if (debug_infrun)
6043 fprintf_unfiltered (gdb_stdlog,
6044 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
186c406b 6045
cdaa5b73 6046 ecs->event_thread->stepping_over_breakpoint = 1;
611c83ae 6047
cdaa5b73
PA
6048 if (what.is_longjmp)
6049 {
6050 struct value *arg_value;
6051
6052 /* If we set the longjmp breakpoint via a SystemTap probe,
6053 then use it to extract the arguments. The destination PC
6054 is the third argument to the probe. */
6055 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6056 if (arg_value)
8fa0c4f8
AA
6057 {
6058 jmp_buf_pc = value_as_address (arg_value);
6059 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6060 }
cdaa5b73
PA
6061 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6062 || !gdbarch_get_longjmp_target (gdbarch,
6063 frame, &jmp_buf_pc))
e2e4d78b 6064 {
cdaa5b73
PA
6065 if (debug_infrun)
6066 fprintf_unfiltered (gdb_stdlog,
6067 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
6068 "(!gdbarch_get_longjmp_target)\n");
6069 keep_going (ecs);
6070 return;
e2e4d78b 6071 }
e2e4d78b 6072
cdaa5b73
PA
6073 /* Insert a breakpoint at resume address. */
6074 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6075 }
6076 else
6077 check_exception_resume (ecs, frame);
6078 keep_going (ecs);
6079 return;
e81a37f7 6080
cdaa5b73
PA
6081 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6082 {
6083 struct frame_info *init_frame;
e81a37f7 6084
cdaa5b73 6085 /* There are several cases to consider.
c906108c 6086
cdaa5b73
PA
6087 1. The initiating frame no longer exists. In this case we
6088 must stop, because the exception or longjmp has gone too
6089 far.
2c03e5be 6090
cdaa5b73
PA
6091 2. The initiating frame exists, and is the same as the
6092 current frame. We stop, because the exception or longjmp
6093 has been caught.
2c03e5be 6094
cdaa5b73
PA
6095 3. The initiating frame exists and is different from the
6096 current frame. This means the exception or longjmp has
6097 been caught beneath the initiating frame, so keep going.
c906108c 6098
cdaa5b73
PA
6099 4. longjmp breakpoint has been placed just to protect
6100 against stale dummy frames and user is not interested in
6101 stopping around longjmps. */
c5aa993b 6102
cdaa5b73
PA
6103 if (debug_infrun)
6104 fprintf_unfiltered (gdb_stdlog,
6105 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
c5aa993b 6106
cdaa5b73
PA
6107 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6108 != NULL);
6109 delete_exception_resume_breakpoint (ecs->event_thread);
c5aa993b 6110
cdaa5b73
PA
6111 if (what.is_longjmp)
6112 {
b67a2c6f 6113 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
c5aa993b 6114
cdaa5b73 6115 if (!frame_id_p (ecs->event_thread->initiating_frame))
e5ef252a 6116 {
cdaa5b73
PA
6117 /* Case 4. */
6118 keep_going (ecs);
6119 return;
e5ef252a 6120 }
cdaa5b73 6121 }
c5aa993b 6122
cdaa5b73 6123 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
527159b7 6124
cdaa5b73
PA
6125 if (init_frame)
6126 {
6127 struct frame_id current_id
6128 = get_frame_id (get_current_frame ());
6129 if (frame_id_eq (current_id,
6130 ecs->event_thread->initiating_frame))
6131 {
6132 /* Case 2. Fall through. */
6133 }
6134 else
6135 {
6136 /* Case 3. */
6137 keep_going (ecs);
6138 return;
6139 }
68f53502 6140 }
488f131b 6141
cdaa5b73
PA
6142 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6143 exists. */
6144 delete_step_resume_breakpoint (ecs->event_thread);
e5ef252a 6145
bdc36728 6146 end_stepping_range (ecs);
cdaa5b73
PA
6147 }
6148 return;
e5ef252a 6149
cdaa5b73
PA
6150 case BPSTAT_WHAT_SINGLE:
6151 if (debug_infrun)
6152 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
6153 ecs->event_thread->stepping_over_breakpoint = 1;
6154 /* Still need to check other stuff, at least the case where we
6155 are stepping and step out of the right range. */
6156 break;
e5ef252a 6157
cdaa5b73
PA
6158 case BPSTAT_WHAT_STEP_RESUME:
6159 if (debug_infrun)
6160 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
e5ef252a 6161
cdaa5b73
PA
6162 delete_step_resume_breakpoint (ecs->event_thread);
6163 if (ecs->event_thread->control.proceed_to_finish
6164 && execution_direction == EXEC_REVERSE)
6165 {
6166 struct thread_info *tp = ecs->event_thread;
6167
6168 /* We are finishing a function in reverse, and just hit the
6169 step-resume breakpoint at the start address of the
6170 function, and we're almost there -- just need to back up
6171 by one more single-step, which should take us back to the
6172 function call. */
6173 tp->control.step_range_start = tp->control.step_range_end = 1;
6174 keep_going (ecs);
e5ef252a 6175 return;
cdaa5b73
PA
6176 }
6177 fill_in_stop_func (gdbarch, ecs);
f2ffa92b 6178 if (ecs->event_thread->suspend.stop_pc == ecs->stop_func_start
cdaa5b73
PA
6179 && execution_direction == EXEC_REVERSE)
6180 {
6181 /* We are stepping over a function call in reverse, and just
6182 hit the step-resume breakpoint at the start address of
6183 the function. Go back to single-stepping, which should
6184 take us back to the function call. */
6185 ecs->event_thread->stepping_over_breakpoint = 1;
6186 keep_going (ecs);
6187 return;
6188 }
6189 break;
e5ef252a 6190
cdaa5b73
PA
6191 case BPSTAT_WHAT_STOP_NOISY:
6192 if (debug_infrun)
6193 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
6194 stop_print_frame = 1;
e5ef252a 6195
99619bea
PA
6196 /* Assume the thread stopped for a breapoint. We'll still check
6197 whether a/the breakpoint is there when the thread is next
6198 resumed. */
6199 ecs->event_thread->stepping_over_breakpoint = 1;
e5ef252a 6200
22bcd14b 6201 stop_waiting (ecs);
cdaa5b73 6202 return;
e5ef252a 6203
cdaa5b73
PA
6204 case BPSTAT_WHAT_STOP_SILENT:
6205 if (debug_infrun)
6206 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
6207 stop_print_frame = 0;
e5ef252a 6208
99619bea
PA
6209 /* Assume the thread stopped for a breapoint. We'll still check
6210 whether a/the breakpoint is there when the thread is next
6211 resumed. */
6212 ecs->event_thread->stepping_over_breakpoint = 1;
22bcd14b 6213 stop_waiting (ecs);
cdaa5b73
PA
6214 return;
6215
6216 case BPSTAT_WHAT_HP_STEP_RESUME:
6217 if (debug_infrun)
6218 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
6219
6220 delete_step_resume_breakpoint (ecs->event_thread);
6221 if (ecs->event_thread->step_after_step_resume_breakpoint)
6222 {
6223 /* Back when the step-resume breakpoint was inserted, we
6224 were trying to single-step off a breakpoint. Go back to
6225 doing that. */
6226 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6227 ecs->event_thread->stepping_over_breakpoint = 1;
6228 keep_going (ecs);
6229 return;
e5ef252a 6230 }
cdaa5b73
PA
6231 break;
6232
6233 case BPSTAT_WHAT_KEEP_CHECKING:
6234 break;
e5ef252a 6235 }
c906108c 6236
af48d08f
PA
6237 /* If we stepped a permanent breakpoint and we had a high priority
6238 step-resume breakpoint for the address we stepped, but we didn't
6239 hit it, then we must have stepped into the signal handler. The
6240 step-resume was only necessary to catch the case of _not_
6241 stepping into the handler, so delete it, and fall through to
6242 checking whether the step finished. */
6243 if (ecs->event_thread->stepped_breakpoint)
6244 {
6245 struct breakpoint *sr_bp
6246 = ecs->event_thread->control.step_resume_breakpoint;
6247
8d707a12
PA
6248 if (sr_bp != NULL
6249 && sr_bp->loc->permanent
af48d08f
PA
6250 && sr_bp->type == bp_hp_step_resume
6251 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6252 {
6253 if (debug_infrun)
6254 fprintf_unfiltered (gdb_stdlog,
6255 "infrun: stepped permanent breakpoint, stopped in "
6256 "handler\n");
6257 delete_step_resume_breakpoint (ecs->event_thread);
6258 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6259 }
6260 }
6261
cdaa5b73
PA
6262 /* We come here if we hit a breakpoint but should not stop for it.
6263 Possibly we also were stepping and should stop for that. So fall
6264 through and test for stepping. But, if not stepping, do not
6265 stop. */
c906108c 6266
a7212384
UW
6267 /* In all-stop mode, if we're currently stepping but have stopped in
6268 some other thread, we need to switch back to the stepped thread. */
c447ac0b
PA
6269 if (switch_back_to_stepped_thread (ecs))
6270 return;
776f04fa 6271
8358c15c 6272 if (ecs->event_thread->control.step_resume_breakpoint)
488f131b 6273 {
527159b7 6274 if (debug_infrun)
d3169d93
DJ
6275 fprintf_unfiltered (gdb_stdlog,
6276 "infrun: step-resume breakpoint is inserted\n");
527159b7 6277
488f131b
JB
6278 /* Having a step-resume breakpoint overrides anything
6279 else having to do with stepping commands until
6280 that breakpoint is reached. */
488f131b
JB
6281 keep_going (ecs);
6282 return;
6283 }
c5aa993b 6284
16c381f0 6285 if (ecs->event_thread->control.step_range_end == 0)
488f131b 6286 {
527159b7 6287 if (debug_infrun)
8a9de0e4 6288 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
488f131b 6289 /* Likewise if we aren't even stepping. */
488f131b
JB
6290 keep_going (ecs);
6291 return;
6292 }
c5aa993b 6293
4b7703ad
JB
6294 /* Re-fetch current thread's frame in case the code above caused
6295 the frame cache to be re-initialized, making our FRAME variable
6296 a dangling pointer. */
6297 frame = get_current_frame ();
628fe4e4 6298 gdbarch = get_frame_arch (frame);
7e324e48 6299 fill_in_stop_func (gdbarch, ecs);
4b7703ad 6300
488f131b 6301 /* If stepping through a line, keep going if still within it.
c906108c 6302
488f131b
JB
6303 Note that step_range_end is the address of the first instruction
6304 beyond the step range, and NOT the address of the last instruction
31410e84
MS
6305 within it!
6306
6307 Note also that during reverse execution, we may be stepping
6308 through a function epilogue and therefore must detect when
6309 the current-frame changes in the middle of a line. */
6310
f2ffa92b
PA
6311 if (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6312 ecs->event_thread)
31410e84 6313 && (execution_direction != EXEC_REVERSE
388a8562 6314 || frame_id_eq (get_frame_id (frame),
16c381f0 6315 ecs->event_thread->control.step_frame_id)))
488f131b 6316 {
527159b7 6317 if (debug_infrun)
5af949e3
UW
6318 fprintf_unfiltered
6319 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
16c381f0
JK
6320 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6321 paddress (gdbarch, ecs->event_thread->control.step_range_end));
b2175913 6322
c1e36e3e
PA
6323 /* Tentatively re-enable range stepping; `resume' disables it if
6324 necessary (e.g., if we're stepping over a breakpoint or we
6325 have software watchpoints). */
6326 ecs->event_thread->control.may_range_step = 1;
6327
b2175913
MS
6328 /* When stepping backward, stop at beginning of line range
6329 (unless it's the function entry point, in which case
6330 keep going back to the call point). */
f2ffa92b 6331 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
16c381f0 6332 if (stop_pc == ecs->event_thread->control.step_range_start
b2175913
MS
6333 && stop_pc != ecs->stop_func_start
6334 && execution_direction == EXEC_REVERSE)
bdc36728 6335 end_stepping_range (ecs);
b2175913
MS
6336 else
6337 keep_going (ecs);
6338
488f131b
JB
6339 return;
6340 }
c5aa993b 6341
488f131b 6342 /* We stepped out of the stepping range. */
c906108c 6343
488f131b 6344 /* If we are stepping at the source level and entered the runtime
388a8562
MS
6345 loader dynamic symbol resolution code...
6346
6347 EXEC_FORWARD: we keep on single stepping until we exit the run
6348 time loader code and reach the callee's address.
6349
6350 EXEC_REVERSE: we've already executed the callee (backward), and
6351 the runtime loader code is handled just like any other
6352 undebuggable function call. Now we need only keep stepping
6353 backward through the trampoline code, and that's handled further
6354 down, so there is nothing for us to do here. */
6355
6356 if (execution_direction != EXEC_REVERSE
16c381f0 6357 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
f2ffa92b 6358 && in_solib_dynsym_resolve_code (ecs->event_thread->suspend.stop_pc))
488f131b 6359 {
4c8c40e6 6360 CORE_ADDR pc_after_resolver =
f2ffa92b
PA
6361 gdbarch_skip_solib_resolver (gdbarch,
6362 ecs->event_thread->suspend.stop_pc);
c906108c 6363
527159b7 6364 if (debug_infrun)
3e43a32a
MS
6365 fprintf_unfiltered (gdb_stdlog,
6366 "infrun: stepped into dynsym resolve code\n");
527159b7 6367
488f131b
JB
6368 if (pc_after_resolver)
6369 {
6370 /* Set up a step-resume breakpoint at the address
6371 indicated by SKIP_SOLIB_RESOLVER. */
51abb421 6372 symtab_and_line sr_sal;
488f131b 6373 sr_sal.pc = pc_after_resolver;
6c95b8df 6374 sr_sal.pspace = get_frame_program_space (frame);
488f131b 6375
a6d9a66e
UW
6376 insert_step_resume_breakpoint_at_sal (gdbarch,
6377 sr_sal, null_frame_id);
c5aa993b 6378 }
c906108c 6379
488f131b
JB
6380 keep_going (ecs);
6381 return;
6382 }
c906108c 6383
1d509aa6
MM
6384 /* Step through an indirect branch thunk. */
6385 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
f2ffa92b
PA
6386 && gdbarch_in_indirect_branch_thunk (gdbarch,
6387 ecs->event_thread->suspend.stop_pc))
1d509aa6
MM
6388 {
6389 if (debug_infrun)
6390 fprintf_unfiltered (gdb_stdlog,
6391 "infrun: stepped into indirect branch thunk\n");
6392 keep_going (ecs);
6393 return;
6394 }
6395
16c381f0
JK
6396 if (ecs->event_thread->control.step_range_end != 1
6397 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6398 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
568d6575 6399 && get_frame_type (frame) == SIGTRAMP_FRAME)
488f131b 6400 {
527159b7 6401 if (debug_infrun)
3e43a32a
MS
6402 fprintf_unfiltered (gdb_stdlog,
6403 "infrun: stepped into signal trampoline\n");
42edda50 6404 /* The inferior, while doing a "step" or "next", has ended up in
8fb3e588
AC
6405 a signal trampoline (either by a signal being delivered or by
6406 the signal handler returning). Just single-step until the
6407 inferior leaves the trampoline (either by calling the handler
6408 or returning). */
488f131b
JB
6409 keep_going (ecs);
6410 return;
6411 }
c906108c 6412
14132e89
MR
6413 /* If we're in the return path from a shared library trampoline,
6414 we want to proceed through the trampoline when stepping. */
6415 /* macro/2012-04-25: This needs to come before the subroutine
6416 call check below as on some targets return trampolines look
6417 like subroutine calls (MIPS16 return thunks). */
6418 if (gdbarch_in_solib_return_trampoline (gdbarch,
f2ffa92b
PA
6419 ecs->event_thread->suspend.stop_pc,
6420 ecs->stop_func_name)
14132e89
MR
6421 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
6422 {
6423 /* Determine where this trampoline returns. */
f2ffa92b
PA
6424 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6425 CORE_ADDR real_stop_pc
6426 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
14132e89
MR
6427
6428 if (debug_infrun)
6429 fprintf_unfiltered (gdb_stdlog,
6430 "infrun: stepped into solib return tramp\n");
6431
6432 /* Only proceed through if we know where it's going. */
6433 if (real_stop_pc)
6434 {
6435 /* And put the step-breakpoint there and go until there. */
51abb421 6436 symtab_and_line sr_sal;
14132e89
MR
6437 sr_sal.pc = real_stop_pc;
6438 sr_sal.section = find_pc_overlay (sr_sal.pc);
6439 sr_sal.pspace = get_frame_program_space (frame);
6440
6441 /* Do not specify what the fp should be when we stop since
6442 on some machines the prologue is where the new fp value
6443 is established. */
6444 insert_step_resume_breakpoint_at_sal (gdbarch,
6445 sr_sal, null_frame_id);
6446
6447 /* Restart without fiddling with the step ranges or
6448 other state. */
6449 keep_going (ecs);
6450 return;
6451 }
6452 }
6453
c17eaafe
DJ
6454 /* Check for subroutine calls. The check for the current frame
6455 equalling the step ID is not necessary - the check of the
6456 previous frame's ID is sufficient - but it is a common case and
6457 cheaper than checking the previous frame's ID.
14e60db5
DJ
6458
6459 NOTE: frame_id_eq will never report two invalid frame IDs as
6460 being equal, so to get into this block, both the current and
6461 previous frame must have valid frame IDs. */
005ca36a
JB
6462 /* The outer_frame_id check is a heuristic to detect stepping
6463 through startup code. If we step over an instruction which
6464 sets the stack pointer from an invalid value to a valid value,
6465 we may detect that as a subroutine call from the mythical
6466 "outermost" function. This could be fixed by marking
6467 outermost frames as !stack_p,code_p,special_p. Then the
6468 initial outermost frame, before sp was valid, would
ce6cca6d 6469 have code_addr == &_start. See the comment in frame_id_eq
005ca36a 6470 for more. */
edb3359d 6471 if (!frame_id_eq (get_stack_frame_id (frame),
16c381f0 6472 ecs->event_thread->control.step_stack_frame_id)
005ca36a 6473 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
16c381f0
JK
6474 ecs->event_thread->control.step_stack_frame_id)
6475 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
005ca36a 6476 outer_frame_id)
885eeb5b 6477 || (ecs->event_thread->control.step_start_function
f2ffa92b 6478 != find_pc_function (ecs->event_thread->suspend.stop_pc)))))
488f131b 6479 {
f2ffa92b 6480 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
95918acb 6481 CORE_ADDR real_stop_pc;
8fb3e588 6482
527159b7 6483 if (debug_infrun)
8a9de0e4 6484 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
527159b7 6485
b7a084be 6486 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
95918acb
AC
6487 {
6488 /* I presume that step_over_calls is only 0 when we're
6489 supposed to be stepping at the assembly language level
6490 ("stepi"). Just stop. */
388a8562 6491 /* And this works the same backward as frontward. MVS */
bdc36728 6492 end_stepping_range (ecs);
95918acb
AC
6493 return;
6494 }
8fb3e588 6495
388a8562
MS
6496 /* Reverse stepping through solib trampolines. */
6497
6498 if (execution_direction == EXEC_REVERSE
16c381f0 6499 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
388a8562
MS
6500 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6501 || (ecs->stop_func_start == 0
6502 && in_solib_dynsym_resolve_code (stop_pc))))
6503 {
6504 /* Any solib trampoline code can be handled in reverse
6505 by simply continuing to single-step. We have already
6506 executed the solib function (backwards), and a few
6507 steps will take us back through the trampoline to the
6508 caller. */
6509 keep_going (ecs);
6510 return;
6511 }
6512
16c381f0 6513 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
8567c30f 6514 {
b2175913
MS
6515 /* We're doing a "next".
6516
6517 Normal (forward) execution: set a breakpoint at the
6518 callee's return address (the address at which the caller
6519 will resume).
6520
6521 Reverse (backward) execution. set the step-resume
6522 breakpoint at the start of the function that we just
6523 stepped into (backwards), and continue to there. When we
6130d0b7 6524 get there, we'll need to single-step back to the caller. */
b2175913
MS
6525
6526 if (execution_direction == EXEC_REVERSE)
6527 {
acf9414f
JK
6528 /* If we're already at the start of the function, we've either
6529 just stepped backward into a single instruction function,
6530 or stepped back out of a signal handler to the first instruction
6531 of the function. Just keep going, which will single-step back
6532 to the caller. */
58c48e72 6533 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
acf9414f 6534 {
acf9414f 6535 /* Normal function call return (static or dynamic). */
51abb421 6536 symtab_and_line sr_sal;
acf9414f
JK
6537 sr_sal.pc = ecs->stop_func_start;
6538 sr_sal.pspace = get_frame_program_space (frame);
6539 insert_step_resume_breakpoint_at_sal (gdbarch,
6540 sr_sal, null_frame_id);
6541 }
b2175913
MS
6542 }
6543 else
568d6575 6544 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6545
8567c30f
AC
6546 keep_going (ecs);
6547 return;
6548 }
a53c66de 6549
95918acb 6550 /* If we are in a function call trampoline (a stub between the
8fb3e588
AC
6551 calling routine and the real function), locate the real
6552 function. That's what tells us (a) whether we want to step
6553 into it at all, and (b) what prologue we want to run to the
6554 end of, if we do step into it. */
568d6575 6555 real_stop_pc = skip_language_trampoline (frame, stop_pc);
95918acb 6556 if (real_stop_pc == 0)
568d6575 6557 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
95918acb
AC
6558 if (real_stop_pc != 0)
6559 ecs->stop_func_start = real_stop_pc;
8fb3e588 6560
db5f024e 6561 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
1b2bfbb9 6562 {
51abb421 6563 symtab_and_line sr_sal;
1b2bfbb9 6564 sr_sal.pc = ecs->stop_func_start;
6c95b8df 6565 sr_sal.pspace = get_frame_program_space (frame);
1b2bfbb9 6566
a6d9a66e
UW
6567 insert_step_resume_breakpoint_at_sal (gdbarch,
6568 sr_sal, null_frame_id);
8fb3e588
AC
6569 keep_going (ecs);
6570 return;
1b2bfbb9
RC
6571 }
6572
95918acb 6573 /* If we have line number information for the function we are
1bfeeb0f
JL
6574 thinking of stepping into and the function isn't on the skip
6575 list, step into it.
95918acb 6576
8fb3e588
AC
6577 If there are several symtabs at that PC (e.g. with include
6578 files), just want to know whether *any* of them have line
6579 numbers. find_pc_line handles this. */
95918acb
AC
6580 {
6581 struct symtab_and_line tmp_sal;
8fb3e588 6582
95918acb 6583 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
2b914b52 6584 if (tmp_sal.line != 0
85817405 6585 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4a4c04f1
BE
6586 tmp_sal)
6587 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
95918acb 6588 {
b2175913 6589 if (execution_direction == EXEC_REVERSE)
568d6575 6590 handle_step_into_function_backward (gdbarch, ecs);
b2175913 6591 else
568d6575 6592 handle_step_into_function (gdbarch, ecs);
95918acb
AC
6593 return;
6594 }
6595 }
6596
6597 /* If we have no line number and the step-stop-if-no-debug is
8fb3e588
AC
6598 set, we stop the step so that the user has a chance to switch
6599 in assembly mode. */
16c381f0 6600 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
078130d0 6601 && step_stop_if_no_debug)
95918acb 6602 {
bdc36728 6603 end_stepping_range (ecs);
95918acb
AC
6604 return;
6605 }
6606
b2175913
MS
6607 if (execution_direction == EXEC_REVERSE)
6608 {
acf9414f
JK
6609 /* If we're already at the start of the function, we've either just
6610 stepped backward into a single instruction function without line
6611 number info, or stepped back out of a signal handler to the first
6612 instruction of the function without line number info. Just keep
6613 going, which will single-step back to the caller. */
6614 if (ecs->stop_func_start != stop_pc)
6615 {
6616 /* Set a breakpoint at callee's start address.
6617 From there we can step once and be back in the caller. */
51abb421 6618 symtab_and_line sr_sal;
acf9414f
JK
6619 sr_sal.pc = ecs->stop_func_start;
6620 sr_sal.pspace = get_frame_program_space (frame);
6621 insert_step_resume_breakpoint_at_sal (gdbarch,
6622 sr_sal, null_frame_id);
6623 }
b2175913
MS
6624 }
6625 else
6626 /* Set a breakpoint at callee's return address (the address
6627 at which the caller will resume). */
568d6575 6628 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6629
95918acb 6630 keep_going (ecs);
488f131b 6631 return;
488f131b 6632 }
c906108c 6633
fdd654f3
MS
6634 /* Reverse stepping through solib trampolines. */
6635
6636 if (execution_direction == EXEC_REVERSE
16c381f0 6637 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
fdd654f3 6638 {
f2ffa92b
PA
6639 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6640
fdd654f3
MS
6641 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6642 || (ecs->stop_func_start == 0
6643 && in_solib_dynsym_resolve_code (stop_pc)))
6644 {
6645 /* Any solib trampoline code can be handled in reverse
6646 by simply continuing to single-step. We have already
6647 executed the solib function (backwards), and a few
6648 steps will take us back through the trampoline to the
6649 caller. */
6650 keep_going (ecs);
6651 return;
6652 }
6653 else if (in_solib_dynsym_resolve_code (stop_pc))
6654 {
6655 /* Stepped backward into the solib dynsym resolver.
6656 Set a breakpoint at its start and continue, then
6657 one more step will take us out. */
51abb421 6658 symtab_and_line sr_sal;
fdd654f3 6659 sr_sal.pc = ecs->stop_func_start;
9d1807c3 6660 sr_sal.pspace = get_frame_program_space (frame);
fdd654f3
MS
6661 insert_step_resume_breakpoint_at_sal (gdbarch,
6662 sr_sal, null_frame_id);
6663 keep_going (ecs);
6664 return;
6665 }
6666 }
6667
f2ffa92b 6668 stop_pc_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
7ed0fe66 6669
1b2bfbb9
RC
6670 /* NOTE: tausq/2004-05-24: This if block used to be done before all
6671 the trampoline processing logic, however, there are some trampolines
6672 that have no names, so we should do trampoline handling first. */
16c381f0 6673 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7ed0fe66 6674 && ecs->stop_func_name == NULL
2afb61aa 6675 && stop_pc_sal.line == 0)
1b2bfbb9 6676 {
527159b7 6677 if (debug_infrun)
3e43a32a
MS
6678 fprintf_unfiltered (gdb_stdlog,
6679 "infrun: stepped into undebuggable function\n");
527159b7 6680
1b2bfbb9 6681 /* The inferior just stepped into, or returned to, an
7ed0fe66
DJ
6682 undebuggable function (where there is no debugging information
6683 and no line number corresponding to the address where the
1b2bfbb9
RC
6684 inferior stopped). Since we want to skip this kind of code,
6685 we keep going until the inferior returns from this
14e60db5
DJ
6686 function - unless the user has asked us not to (via
6687 set step-mode) or we no longer know how to get back
6688 to the call site. */
6689 if (step_stop_if_no_debug
c7ce8faa 6690 || !frame_id_p (frame_unwind_caller_id (frame)))
1b2bfbb9
RC
6691 {
6692 /* If we have no line number and the step-stop-if-no-debug
6693 is set, we stop the step so that the user has a chance to
6694 switch in assembly mode. */
bdc36728 6695 end_stepping_range (ecs);
1b2bfbb9
RC
6696 return;
6697 }
6698 else
6699 {
6700 /* Set a breakpoint at callee's return address (the address
6701 at which the caller will resume). */
568d6575 6702 insert_step_resume_breakpoint_at_caller (frame);
1b2bfbb9
RC
6703 keep_going (ecs);
6704 return;
6705 }
6706 }
6707
16c381f0 6708 if (ecs->event_thread->control.step_range_end == 1)
1b2bfbb9
RC
6709 {
6710 /* It is stepi or nexti. We always want to stop stepping after
6711 one instruction. */
527159b7 6712 if (debug_infrun)
8a9de0e4 6713 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
bdc36728 6714 end_stepping_range (ecs);
1b2bfbb9
RC
6715 return;
6716 }
6717
2afb61aa 6718 if (stop_pc_sal.line == 0)
488f131b
JB
6719 {
6720 /* We have no line number information. That means to stop
6721 stepping (does this always happen right after one instruction,
6722 when we do "s" in a function with no line numbers,
6723 or can this happen as a result of a return or longjmp?). */
527159b7 6724 if (debug_infrun)
8a9de0e4 6725 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
bdc36728 6726 end_stepping_range (ecs);
488f131b
JB
6727 return;
6728 }
c906108c 6729
edb3359d
DJ
6730 /* Look for "calls" to inlined functions, part one. If the inline
6731 frame machinery detected some skipped call sites, we have entered
6732 a new inline function. */
6733
6734 if (frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 6735 ecs->event_thread->control.step_frame_id)
00431a78 6736 && inline_skipped_frames (ecs->event_thread))
edb3359d 6737 {
edb3359d
DJ
6738 if (debug_infrun)
6739 fprintf_unfiltered (gdb_stdlog,
6740 "infrun: stepped into inlined function\n");
6741
51abb421 6742 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
edb3359d 6743
16c381f0 6744 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
edb3359d
DJ
6745 {
6746 /* For "step", we're going to stop. But if the call site
6747 for this inlined function is on the same source line as
6748 we were previously stepping, go down into the function
6749 first. Otherwise stop at the call site. */
6750
6751 if (call_sal.line == ecs->event_thread->current_line
6752 && call_sal.symtab == ecs->event_thread->current_symtab)
4a4c04f1
BE
6753 {
6754 step_into_inline_frame (ecs->event_thread);
6755 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
6756 {
6757 keep_going (ecs);
6758 return;
6759 }
6760 }
edb3359d 6761
bdc36728 6762 end_stepping_range (ecs);
edb3359d
DJ
6763 return;
6764 }
6765 else
6766 {
6767 /* For "next", we should stop at the call site if it is on a
6768 different source line. Otherwise continue through the
6769 inlined function. */
6770 if (call_sal.line == ecs->event_thread->current_line
6771 && call_sal.symtab == ecs->event_thread->current_symtab)
6772 keep_going (ecs);
6773 else
bdc36728 6774 end_stepping_range (ecs);
edb3359d
DJ
6775 return;
6776 }
6777 }
6778
6779 /* Look for "calls" to inlined functions, part two. If we are still
6780 in the same real function we were stepping through, but we have
6781 to go further up to find the exact frame ID, we are stepping
6782 through a more inlined call beyond its call site. */
6783
6784 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
6785 && !frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 6786 ecs->event_thread->control.step_frame_id)
edb3359d 6787 && stepped_in_from (get_current_frame (),
16c381f0 6788 ecs->event_thread->control.step_frame_id))
edb3359d
DJ
6789 {
6790 if (debug_infrun)
6791 fprintf_unfiltered (gdb_stdlog,
6792 "infrun: stepping through inlined function\n");
6793
4a4c04f1
BE
6794 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
6795 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
edb3359d
DJ
6796 keep_going (ecs);
6797 else
bdc36728 6798 end_stepping_range (ecs);
edb3359d
DJ
6799 return;
6800 }
6801
f2ffa92b 6802 if ((ecs->event_thread->suspend.stop_pc == stop_pc_sal.pc)
4e1c45ea
PA
6803 && (ecs->event_thread->current_line != stop_pc_sal.line
6804 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
488f131b
JB
6805 {
6806 /* We are at the start of a different line. So stop. Note that
6807 we don't stop if we step into the middle of a different line.
6808 That is said to make things like for (;;) statements work
6809 better. */
527159b7 6810 if (debug_infrun)
3e43a32a
MS
6811 fprintf_unfiltered (gdb_stdlog,
6812 "infrun: stepped to a different line\n");
bdc36728 6813 end_stepping_range (ecs);
488f131b
JB
6814 return;
6815 }
c906108c 6816
488f131b 6817 /* We aren't done stepping.
c906108c 6818
488f131b
JB
6819 Optimize by setting the stepping range to the line.
6820 (We might not be in the original line, but if we entered a
6821 new line in mid-statement, we continue stepping. This makes
6822 things like for(;;) statements work better.) */
c906108c 6823
16c381f0
JK
6824 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
6825 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
c1e36e3e 6826 ecs->event_thread->control.may_range_step = 1;
edb3359d 6827 set_step_info (frame, stop_pc_sal);
488f131b 6828
527159b7 6829 if (debug_infrun)
8a9de0e4 6830 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
488f131b 6831 keep_going (ecs);
104c1213
JM
6832}
6833
c447ac0b
PA
6834/* In all-stop mode, if we're currently stepping but have stopped in
6835 some other thread, we may need to switch back to the stepped
6836 thread. Returns true we set the inferior running, false if we left
6837 it stopped (and the event needs further processing). */
6838
6839static int
6840switch_back_to_stepped_thread (struct execution_control_state *ecs)
6841{
fbea99ea 6842 if (!target_is_non_stop_p ())
c447ac0b 6843 {
99619bea
PA
6844 struct thread_info *stepping_thread;
6845
6846 /* If any thread is blocked on some internal breakpoint, and we
6847 simply need to step over that breakpoint to get it going
6848 again, do that first. */
6849
6850 /* However, if we see an event for the stepping thread, then we
6851 know all other threads have been moved past their breakpoints
6852 already. Let the caller check whether the step is finished,
6853 etc., before deciding to move it past a breakpoint. */
6854 if (ecs->event_thread->control.step_range_end != 0)
6855 return 0;
6856
6857 /* Check if the current thread is blocked on an incomplete
6858 step-over, interrupted by a random signal. */
6859 if (ecs->event_thread->control.trap_expected
6860 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
c447ac0b 6861 {
99619bea
PA
6862 if (debug_infrun)
6863 {
6864 fprintf_unfiltered (gdb_stdlog,
6865 "infrun: need to finish step-over of [%s]\n",
a068643d 6866 target_pid_to_str (ecs->event_thread->ptid).c_str ());
99619bea
PA
6867 }
6868 keep_going (ecs);
6869 return 1;
6870 }
2adfaa28 6871
99619bea
PA
6872 /* Check if the current thread is blocked by a single-step
6873 breakpoint of another thread. */
6874 if (ecs->hit_singlestep_breakpoint)
6875 {
6876 if (debug_infrun)
6877 {
6878 fprintf_unfiltered (gdb_stdlog,
6879 "infrun: need to step [%s] over single-step "
6880 "breakpoint\n",
a068643d 6881 target_pid_to_str (ecs->ptid).c_str ());
99619bea
PA
6882 }
6883 keep_going (ecs);
6884 return 1;
6885 }
6886
4d9d9d04
PA
6887 /* If this thread needs yet another step-over (e.g., stepping
6888 through a delay slot), do it first before moving on to
6889 another thread. */
6890 if (thread_still_needs_step_over (ecs->event_thread))
6891 {
6892 if (debug_infrun)
6893 {
6894 fprintf_unfiltered (gdb_stdlog,
6895 "infrun: thread [%s] still needs step-over\n",
a068643d 6896 target_pid_to_str (ecs->event_thread->ptid).c_str ());
4d9d9d04
PA
6897 }
6898 keep_going (ecs);
6899 return 1;
6900 }
70509625 6901
483805cf
PA
6902 /* If scheduler locking applies even if not stepping, there's no
6903 need to walk over threads. Above we've checked whether the
6904 current thread is stepping. If some other thread not the
6905 event thread is stepping, then it must be that scheduler
6906 locking is not in effect. */
856e7dd6 6907 if (schedlock_applies (ecs->event_thread))
483805cf
PA
6908 return 0;
6909
4d9d9d04
PA
6910 /* Otherwise, we no longer expect a trap in the current thread.
6911 Clear the trap_expected flag before switching back -- this is
6912 what keep_going does as well, if we call it. */
6913 ecs->event_thread->control.trap_expected = 0;
6914
6915 /* Likewise, clear the signal if it should not be passed. */
6916 if (!signal_program[ecs->event_thread->suspend.stop_signal])
6917 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6918
6919 /* Do all pending step-overs before actually proceeding with
483805cf 6920 step/next/etc. */
4d9d9d04
PA
6921 if (start_step_over ())
6922 {
6923 prepare_to_wait (ecs);
6924 return 1;
6925 }
6926
6927 /* Look for the stepping/nexting thread. */
483805cf 6928 stepping_thread = NULL;
4d9d9d04 6929
08036331 6930 for (thread_info *tp : all_non_exited_threads ())
483805cf 6931 {
f3f8ece4
PA
6932 switch_to_thread_no_regs (tp);
6933
fbea99ea
PA
6934 /* Ignore threads of processes the caller is not
6935 resuming. */
483805cf 6936 if (!sched_multi
e99b03dc 6937 && tp->ptid.pid () != ecs->ptid.pid ())
483805cf
PA
6938 continue;
6939
6940 /* When stepping over a breakpoint, we lock all threads
6941 except the one that needs to move past the breakpoint.
6942 If a non-event thread has this set, the "incomplete
6943 step-over" check above should have caught it earlier. */
372316f1
PA
6944 if (tp->control.trap_expected)
6945 {
6946 internal_error (__FILE__, __LINE__,
6947 "[%s] has inconsistent state: "
6948 "trap_expected=%d\n",
a068643d 6949 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
6950 tp->control.trap_expected);
6951 }
483805cf
PA
6952
6953 /* Did we find the stepping thread? */
6954 if (tp->control.step_range_end)
6955 {
6956 /* Yep. There should only one though. */
6957 gdb_assert (stepping_thread == NULL);
6958
6959 /* The event thread is handled at the top, before we
6960 enter this loop. */
6961 gdb_assert (tp != ecs->event_thread);
6962
6963 /* If some thread other than the event thread is
6964 stepping, then scheduler locking can't be in effect,
6965 otherwise we wouldn't have resumed the current event
6966 thread in the first place. */
856e7dd6 6967 gdb_assert (!schedlock_applies (tp));
483805cf
PA
6968
6969 stepping_thread = tp;
6970 }
99619bea
PA
6971 }
6972
483805cf 6973 if (stepping_thread != NULL)
99619bea 6974 {
c447ac0b
PA
6975 if (debug_infrun)
6976 fprintf_unfiltered (gdb_stdlog,
6977 "infrun: switching back to stepped thread\n");
6978
2ac7589c
PA
6979 if (keep_going_stepped_thread (stepping_thread))
6980 {
6981 prepare_to_wait (ecs);
6982 return 1;
6983 }
6984 }
f3f8ece4
PA
6985
6986 switch_to_thread (ecs->event_thread);
2ac7589c 6987 }
2adfaa28 6988
2ac7589c
PA
6989 return 0;
6990}
2adfaa28 6991
2ac7589c
PA
6992/* Set a previously stepped thread back to stepping. Returns true on
6993 success, false if the resume is not possible (e.g., the thread
6994 vanished). */
6995
6996static int
6997keep_going_stepped_thread (struct thread_info *tp)
6998{
6999 struct frame_info *frame;
2ac7589c
PA
7000 struct execution_control_state ecss;
7001 struct execution_control_state *ecs = &ecss;
2adfaa28 7002
2ac7589c
PA
7003 /* If the stepping thread exited, then don't try to switch back and
7004 resume it, which could fail in several different ways depending
7005 on the target. Instead, just keep going.
2adfaa28 7006
2ac7589c
PA
7007 We can find a stepping dead thread in the thread list in two
7008 cases:
2adfaa28 7009
2ac7589c
PA
7010 - The target supports thread exit events, and when the target
7011 tries to delete the thread from the thread list, inferior_ptid
7012 pointed at the exiting thread. In such case, calling
7013 delete_thread does not really remove the thread from the list;
7014 instead, the thread is left listed, with 'exited' state.
64ce06e4 7015
2ac7589c
PA
7016 - The target's debug interface does not support thread exit
7017 events, and so we have no idea whatsoever if the previously
7018 stepping thread is still alive. For that reason, we need to
7019 synchronously query the target now. */
2adfaa28 7020
00431a78 7021 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
2ac7589c
PA
7022 {
7023 if (debug_infrun)
7024 fprintf_unfiltered (gdb_stdlog,
7025 "infrun: not resuming previously "
7026 "stepped thread, it has vanished\n");
7027
00431a78 7028 delete_thread (tp);
2ac7589c 7029 return 0;
c447ac0b 7030 }
2ac7589c
PA
7031
7032 if (debug_infrun)
7033 fprintf_unfiltered (gdb_stdlog,
7034 "infrun: resuming previously stepped thread\n");
7035
7036 reset_ecs (ecs, tp);
00431a78 7037 switch_to_thread (tp);
2ac7589c 7038
f2ffa92b 7039 tp->suspend.stop_pc = regcache_read_pc (get_thread_regcache (tp));
2ac7589c 7040 frame = get_current_frame ();
2ac7589c
PA
7041
7042 /* If the PC of the thread we were trying to single-step has
7043 changed, then that thread has trapped or been signaled, but the
7044 event has not been reported to GDB yet. Re-poll the target
7045 looking for this particular thread's event (i.e. temporarily
7046 enable schedlock) by:
7047
7048 - setting a break at the current PC
7049 - resuming that particular thread, only (by setting trap
7050 expected)
7051
7052 This prevents us continuously moving the single-step breakpoint
7053 forward, one instruction at a time, overstepping. */
7054
f2ffa92b 7055 if (tp->suspend.stop_pc != tp->prev_pc)
2ac7589c
PA
7056 {
7057 ptid_t resume_ptid;
7058
7059 if (debug_infrun)
7060 fprintf_unfiltered (gdb_stdlog,
7061 "infrun: expected thread advanced also (%s -> %s)\n",
7062 paddress (target_gdbarch (), tp->prev_pc),
f2ffa92b 7063 paddress (target_gdbarch (), tp->suspend.stop_pc));
2ac7589c
PA
7064
7065 /* Clear the info of the previous step-over, as it's no longer
7066 valid (if the thread was trying to step over a breakpoint, it
7067 has already succeeded). It's what keep_going would do too,
7068 if we called it. Do this before trying to insert the sss
7069 breakpoint, otherwise if we were previously trying to step
7070 over this exact address in another thread, the breakpoint is
7071 skipped. */
7072 clear_step_over_info ();
7073 tp->control.trap_expected = 0;
7074
7075 insert_single_step_breakpoint (get_frame_arch (frame),
7076 get_frame_address_space (frame),
f2ffa92b 7077 tp->suspend.stop_pc);
2ac7589c 7078
372316f1 7079 tp->resumed = 1;
fbea99ea 7080 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
2ac7589c
PA
7081 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
7082 }
7083 else
7084 {
7085 if (debug_infrun)
7086 fprintf_unfiltered (gdb_stdlog,
7087 "infrun: expected thread still hasn't advanced\n");
7088
7089 keep_going_pass_signal (ecs);
7090 }
7091 return 1;
c447ac0b
PA
7092}
7093
8b061563
PA
7094/* Is thread TP in the middle of (software or hardware)
7095 single-stepping? (Note the result of this function must never be
7096 passed directly as target_resume's STEP parameter.) */
104c1213 7097
a289b8f6 7098static int
b3444185 7099currently_stepping (struct thread_info *tp)
a7212384 7100{
8358c15c
JK
7101 return ((tp->control.step_range_end
7102 && tp->control.step_resume_breakpoint == NULL)
7103 || tp->control.trap_expected
af48d08f 7104 || tp->stepped_breakpoint
8358c15c 7105 || bpstat_should_step ());
a7212384
UW
7106}
7107
b2175913
MS
7108/* Inferior has stepped into a subroutine call with source code that
7109 we should not step over. Do step to the first line of code in
7110 it. */
c2c6d25f
JM
7111
7112static void
568d6575
UW
7113handle_step_into_function (struct gdbarch *gdbarch,
7114 struct execution_control_state *ecs)
c2c6d25f 7115{
7e324e48
GB
7116 fill_in_stop_func (gdbarch, ecs);
7117
f2ffa92b
PA
7118 compunit_symtab *cust
7119 = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7120 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7121 ecs->stop_func_start
7122 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
c2c6d25f 7123
51abb421 7124 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
c2c6d25f
JM
7125 /* Use the step_resume_break to step until the end of the prologue,
7126 even if that involves jumps (as it seems to on the vax under
7127 4.2). */
7128 /* If the prologue ends in the middle of a source line, continue to
7129 the end of that source line (if it is still within the function).
7130 Otherwise, just go to end of prologue. */
2afb61aa
PA
7131 if (stop_func_sal.end
7132 && stop_func_sal.pc != ecs->stop_func_start
7133 && stop_func_sal.end < ecs->stop_func_end)
7134 ecs->stop_func_start = stop_func_sal.end;
c2c6d25f 7135
2dbd5e30
KB
7136 /* Architectures which require breakpoint adjustment might not be able
7137 to place a breakpoint at the computed address. If so, the test
7138 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7139 ecs->stop_func_start to an address at which a breakpoint may be
7140 legitimately placed.
8fb3e588 7141
2dbd5e30
KB
7142 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7143 made, GDB will enter an infinite loop when stepping through
7144 optimized code consisting of VLIW instructions which contain
7145 subinstructions corresponding to different source lines. On
7146 FR-V, it's not permitted to place a breakpoint on any but the
7147 first subinstruction of a VLIW instruction. When a breakpoint is
7148 set, GDB will adjust the breakpoint address to the beginning of
7149 the VLIW instruction. Thus, we need to make the corresponding
7150 adjustment here when computing the stop address. */
8fb3e588 7151
568d6575 7152 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
2dbd5e30
KB
7153 {
7154 ecs->stop_func_start
568d6575 7155 = gdbarch_adjust_breakpoint_address (gdbarch,
8fb3e588 7156 ecs->stop_func_start);
2dbd5e30
KB
7157 }
7158
f2ffa92b 7159 if (ecs->stop_func_start == ecs->event_thread->suspend.stop_pc)
c2c6d25f
JM
7160 {
7161 /* We are already there: stop now. */
bdc36728 7162 end_stepping_range (ecs);
c2c6d25f
JM
7163 return;
7164 }
7165 else
7166 {
7167 /* Put the step-breakpoint there and go until there. */
51abb421 7168 symtab_and_line sr_sal;
c2c6d25f
JM
7169 sr_sal.pc = ecs->stop_func_start;
7170 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
6c95b8df 7171 sr_sal.pspace = get_frame_program_space (get_current_frame ());
44cbf7b5 7172
c2c6d25f 7173 /* Do not specify what the fp should be when we stop since on
488f131b
JB
7174 some machines the prologue is where the new fp value is
7175 established. */
a6d9a66e 7176 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
c2c6d25f
JM
7177
7178 /* And make sure stepping stops right away then. */
16c381f0
JK
7179 ecs->event_thread->control.step_range_end
7180 = ecs->event_thread->control.step_range_start;
c2c6d25f
JM
7181 }
7182 keep_going (ecs);
7183}
d4f3574e 7184
b2175913
MS
7185/* Inferior has stepped backward into a subroutine call with source
7186 code that we should not step over. Do step to the beginning of the
7187 last line of code in it. */
7188
7189static void
568d6575
UW
7190handle_step_into_function_backward (struct gdbarch *gdbarch,
7191 struct execution_control_state *ecs)
b2175913 7192{
43f3e411 7193 struct compunit_symtab *cust;
167e4384 7194 struct symtab_and_line stop_func_sal;
b2175913 7195
7e324e48
GB
7196 fill_in_stop_func (gdbarch, ecs);
7197
f2ffa92b 7198 cust = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7199 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7200 ecs->stop_func_start
7201 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
b2175913 7202
f2ffa92b 7203 stop_func_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
b2175913
MS
7204
7205 /* OK, we're just going to keep stepping here. */
f2ffa92b 7206 if (stop_func_sal.pc == ecs->event_thread->suspend.stop_pc)
b2175913
MS
7207 {
7208 /* We're there already. Just stop stepping now. */
bdc36728 7209 end_stepping_range (ecs);
b2175913
MS
7210 }
7211 else
7212 {
7213 /* Else just reset the step range and keep going.
7214 No step-resume breakpoint, they don't work for
7215 epilogues, which can have multiple entry paths. */
16c381f0
JK
7216 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7217 ecs->event_thread->control.step_range_end = stop_func_sal.end;
b2175913
MS
7218 keep_going (ecs);
7219 }
7220 return;
7221}
7222
d3169d93 7223/* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
44cbf7b5
AC
7224 This is used to both functions and to skip over code. */
7225
7226static void
2c03e5be
PA
7227insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7228 struct symtab_and_line sr_sal,
7229 struct frame_id sr_id,
7230 enum bptype sr_type)
44cbf7b5 7231{
611c83ae
PA
7232 /* There should never be more than one step-resume or longjmp-resume
7233 breakpoint per thread, so we should never be setting a new
44cbf7b5 7234 step_resume_breakpoint when one is already active. */
8358c15c 7235 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
2c03e5be 7236 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
d3169d93
DJ
7237
7238 if (debug_infrun)
7239 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
7240 "infrun: inserting step-resume breakpoint at %s\n",
7241 paddress (gdbarch, sr_sal.pc));
d3169d93 7242
8358c15c 7243 inferior_thread ()->control.step_resume_breakpoint
454dafbd 7244 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
2c03e5be
PA
7245}
7246
9da8c2a0 7247void
2c03e5be
PA
7248insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7249 struct symtab_and_line sr_sal,
7250 struct frame_id sr_id)
7251{
7252 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7253 sr_sal, sr_id,
7254 bp_step_resume);
44cbf7b5 7255}
7ce450bd 7256
2c03e5be
PA
7257/* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7258 This is used to skip a potential signal handler.
7ce450bd 7259
14e60db5
DJ
7260 This is called with the interrupted function's frame. The signal
7261 handler, when it returns, will resume the interrupted function at
7262 RETURN_FRAME.pc. */
d303a6c7
AC
7263
7264static void
2c03e5be 7265insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
d303a6c7 7266{
f4c1edd8 7267 gdb_assert (return_frame != NULL);
d303a6c7 7268
51abb421
PA
7269 struct gdbarch *gdbarch = get_frame_arch (return_frame);
7270
7271 symtab_and_line sr_sal;
568d6575 7272 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
d303a6c7 7273 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7274 sr_sal.pspace = get_frame_program_space (return_frame);
d303a6c7 7275
2c03e5be
PA
7276 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7277 get_stack_frame_id (return_frame),
7278 bp_hp_step_resume);
d303a6c7
AC
7279}
7280
2c03e5be
PA
7281/* Insert a "step-resume breakpoint" at the previous frame's PC. This
7282 is used to skip a function after stepping into it (for "next" or if
7283 the called function has no debugging information).
14e60db5
DJ
7284
7285 The current function has almost always been reached by single
7286 stepping a call or return instruction. NEXT_FRAME belongs to the
7287 current function, and the breakpoint will be set at the caller's
7288 resume address.
7289
7290 This is a separate function rather than reusing
2c03e5be 7291 insert_hp_step_resume_breakpoint_at_frame in order to avoid
14e60db5 7292 get_prev_frame, which may stop prematurely (see the implementation
c7ce8faa 7293 of frame_unwind_caller_id for an example). */
14e60db5
DJ
7294
7295static void
7296insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7297{
14e60db5
DJ
7298 /* We shouldn't have gotten here if we don't know where the call site
7299 is. */
c7ce8faa 7300 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
14e60db5 7301
51abb421 7302 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
14e60db5 7303
51abb421 7304 symtab_and_line sr_sal;
c7ce8faa
DJ
7305 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7306 frame_unwind_caller_pc (next_frame));
14e60db5 7307 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7308 sr_sal.pspace = frame_unwind_program_space (next_frame);
14e60db5 7309
a6d9a66e 7310 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
c7ce8faa 7311 frame_unwind_caller_id (next_frame));
14e60db5
DJ
7312}
7313
611c83ae
PA
7314/* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7315 new breakpoint at the target of a jmp_buf. The handling of
7316 longjmp-resume uses the same mechanisms used for handling
7317 "step-resume" breakpoints. */
7318
7319static void
a6d9a66e 7320insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
611c83ae 7321{
e81a37f7
TT
7322 /* There should never be more than one longjmp-resume breakpoint per
7323 thread, so we should never be setting a new
611c83ae 7324 longjmp_resume_breakpoint when one is already active. */
e81a37f7 7325 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
611c83ae
PA
7326
7327 if (debug_infrun)
7328 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
7329 "infrun: inserting longjmp-resume breakpoint at %s\n",
7330 paddress (gdbarch, pc));
611c83ae 7331
e81a37f7 7332 inferior_thread ()->control.exception_resume_breakpoint =
454dafbd 7333 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
611c83ae
PA
7334}
7335
186c406b
TT
7336/* Insert an exception resume breakpoint. TP is the thread throwing
7337 the exception. The block B is the block of the unwinder debug hook
7338 function. FRAME is the frame corresponding to the call to this
7339 function. SYM is the symbol of the function argument holding the
7340 target PC of the exception. */
7341
7342static void
7343insert_exception_resume_breakpoint (struct thread_info *tp,
3977b71f 7344 const struct block *b,
186c406b
TT
7345 struct frame_info *frame,
7346 struct symbol *sym)
7347{
a70b8144 7348 try
186c406b 7349 {
63e43d3a 7350 struct block_symbol vsym;
186c406b
TT
7351 struct value *value;
7352 CORE_ADDR handler;
7353 struct breakpoint *bp;
7354
987012b8 7355 vsym = lookup_symbol_search_name (sym->search_name (),
de63c46b 7356 b, VAR_DOMAIN);
63e43d3a 7357 value = read_var_value (vsym.symbol, vsym.block, frame);
186c406b
TT
7358 /* If the value was optimized out, revert to the old behavior. */
7359 if (! value_optimized_out (value))
7360 {
7361 handler = value_as_address (value);
7362
7363 if (debug_infrun)
7364 fprintf_unfiltered (gdb_stdlog,
7365 "infrun: exception resume at %lx\n",
7366 (unsigned long) handler);
7367
7368 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd
TT
7369 handler,
7370 bp_exception_resume).release ();
c70a6932
JK
7371
7372 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
7373 frame = NULL;
7374
5d5658a1 7375 bp->thread = tp->global_num;
186c406b
TT
7376 inferior_thread ()->control.exception_resume_breakpoint = bp;
7377 }
7378 }
230d2906 7379 catch (const gdb_exception_error &e)
492d29ea
PA
7380 {
7381 /* We want to ignore errors here. */
7382 }
186c406b
TT
7383}
7384
28106bc2
SDJ
7385/* A helper for check_exception_resume that sets an
7386 exception-breakpoint based on a SystemTap probe. */
7387
7388static void
7389insert_exception_resume_from_probe (struct thread_info *tp,
729662a5 7390 const struct bound_probe *probe,
28106bc2
SDJ
7391 struct frame_info *frame)
7392{
7393 struct value *arg_value;
7394 CORE_ADDR handler;
7395 struct breakpoint *bp;
7396
7397 arg_value = probe_safe_evaluate_at_pc (frame, 1);
7398 if (!arg_value)
7399 return;
7400
7401 handler = value_as_address (arg_value);
7402
7403 if (debug_infrun)
7404 fprintf_unfiltered (gdb_stdlog,
7405 "infrun: exception resume at %s\n",
6bac7473 7406 paddress (get_objfile_arch (probe->objfile),
28106bc2
SDJ
7407 handler));
7408
7409 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd 7410 handler, bp_exception_resume).release ();
5d5658a1 7411 bp->thread = tp->global_num;
28106bc2
SDJ
7412 inferior_thread ()->control.exception_resume_breakpoint = bp;
7413}
7414
186c406b
TT
7415/* This is called when an exception has been intercepted. Check to
7416 see whether the exception's destination is of interest, and if so,
7417 set an exception resume breakpoint there. */
7418
7419static void
7420check_exception_resume (struct execution_control_state *ecs,
28106bc2 7421 struct frame_info *frame)
186c406b 7422{
729662a5 7423 struct bound_probe probe;
28106bc2
SDJ
7424 struct symbol *func;
7425
7426 /* First see if this exception unwinding breakpoint was set via a
7427 SystemTap probe point. If so, the probe has two arguments: the
7428 CFA and the HANDLER. We ignore the CFA, extract the handler, and
7429 set a breakpoint there. */
6bac7473 7430 probe = find_probe_by_pc (get_frame_pc (frame));
935676c9 7431 if (probe.prob)
28106bc2 7432 {
729662a5 7433 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
28106bc2
SDJ
7434 return;
7435 }
7436
7437 func = get_frame_function (frame);
7438 if (!func)
7439 return;
186c406b 7440
a70b8144 7441 try
186c406b 7442 {
3977b71f 7443 const struct block *b;
8157b174 7444 struct block_iterator iter;
186c406b
TT
7445 struct symbol *sym;
7446 int argno = 0;
7447
7448 /* The exception breakpoint is a thread-specific breakpoint on
7449 the unwinder's debug hook, declared as:
7450
7451 void _Unwind_DebugHook (void *cfa, void *handler);
7452
7453 The CFA argument indicates the frame to which control is
7454 about to be transferred. HANDLER is the destination PC.
7455
7456 We ignore the CFA and set a temporary breakpoint at HANDLER.
7457 This is not extremely efficient but it avoids issues in gdb
7458 with computing the DWARF CFA, and it also works even in weird
7459 cases such as throwing an exception from inside a signal
7460 handler. */
7461
7462 b = SYMBOL_BLOCK_VALUE (func);
7463 ALL_BLOCK_SYMBOLS (b, iter, sym)
7464 {
7465 if (!SYMBOL_IS_ARGUMENT (sym))
7466 continue;
7467
7468 if (argno == 0)
7469 ++argno;
7470 else
7471 {
7472 insert_exception_resume_breakpoint (ecs->event_thread,
7473 b, frame, sym);
7474 break;
7475 }
7476 }
7477 }
230d2906 7478 catch (const gdb_exception_error &e)
492d29ea
PA
7479 {
7480 }
186c406b
TT
7481}
7482
104c1213 7483static void
22bcd14b 7484stop_waiting (struct execution_control_state *ecs)
104c1213 7485{
527159b7 7486 if (debug_infrun)
22bcd14b 7487 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
527159b7 7488
cd0fc7c3
SS
7489 /* Let callers know we don't want to wait for the inferior anymore. */
7490 ecs->wait_some_more = 0;
fbea99ea
PA
7491
7492 /* If all-stop, but the target is always in non-stop mode, stop all
7493 threads now that we're presenting the stop to the user. */
7494 if (!non_stop && target_is_non_stop_p ())
7495 stop_all_threads ();
cd0fc7c3
SS
7496}
7497
4d9d9d04
PA
7498/* Like keep_going, but passes the signal to the inferior, even if the
7499 signal is set to nopass. */
d4f3574e
SS
7500
7501static void
4d9d9d04 7502keep_going_pass_signal (struct execution_control_state *ecs)
d4f3574e 7503{
d7e15655 7504 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
372316f1 7505 gdb_assert (!ecs->event_thread->resumed);
4d9d9d04 7506
d4f3574e 7507 /* Save the pc before execution, to compare with pc after stop. */
fb14de7b 7508 ecs->event_thread->prev_pc
00431a78 7509 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
d4f3574e 7510
4d9d9d04 7511 if (ecs->event_thread->control.trap_expected)
d4f3574e 7512 {
4d9d9d04
PA
7513 struct thread_info *tp = ecs->event_thread;
7514
7515 if (debug_infrun)
7516 fprintf_unfiltered (gdb_stdlog,
7517 "infrun: %s has trap_expected set, "
7518 "resuming to collect trap\n",
a068643d 7519 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04 7520
a9ba6bae
PA
7521 /* We haven't yet gotten our trap, and either: intercepted a
7522 non-signal event (e.g., a fork); or took a signal which we
7523 are supposed to pass through to the inferior. Simply
7524 continue. */
64ce06e4 7525 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e 7526 }
372316f1
PA
7527 else if (step_over_info_valid_p ())
7528 {
7529 /* Another thread is stepping over a breakpoint in-line. If
7530 this thread needs a step-over too, queue the request. In
7531 either case, this resume must be deferred for later. */
7532 struct thread_info *tp = ecs->event_thread;
7533
7534 if (ecs->hit_singlestep_breakpoint
7535 || thread_still_needs_step_over (tp))
7536 {
7537 if (debug_infrun)
7538 fprintf_unfiltered (gdb_stdlog,
7539 "infrun: step-over already in progress: "
7540 "step-over for %s deferred\n",
a068643d 7541 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
7542 thread_step_over_chain_enqueue (tp);
7543 }
7544 else
7545 {
7546 if (debug_infrun)
7547 fprintf_unfiltered (gdb_stdlog,
7548 "infrun: step-over in progress: "
7549 "resume of %s deferred\n",
a068643d 7550 target_pid_to_str (tp->ptid).c_str ());
372316f1 7551 }
372316f1 7552 }
d4f3574e
SS
7553 else
7554 {
31e77af2 7555 struct regcache *regcache = get_current_regcache ();
963f9c80
PA
7556 int remove_bp;
7557 int remove_wps;
8d297bbf 7558 step_over_what step_what;
31e77af2 7559
d4f3574e 7560 /* Either the trap was not expected, but we are continuing
a9ba6bae
PA
7561 anyway (if we got a signal, the user asked it be passed to
7562 the child)
7563 -- or --
7564 We got our expected trap, but decided we should resume from
7565 it.
d4f3574e 7566
a9ba6bae 7567 We're going to run this baby now!
d4f3574e 7568
c36b740a
VP
7569 Note that insert_breakpoints won't try to re-insert
7570 already inserted breakpoints. Therefore, we don't
7571 care if breakpoints were already inserted, or not. */
a9ba6bae 7572
31e77af2
PA
7573 /* If we need to step over a breakpoint, and we're not using
7574 displaced stepping to do so, insert all breakpoints
7575 (watchpoints, etc.) but the one we're stepping over, step one
7576 instruction, and then re-insert the breakpoint when that step
7577 is finished. */
963f9c80 7578
6c4cfb24
PA
7579 step_what = thread_still_needs_step_over (ecs->event_thread);
7580
963f9c80 7581 remove_bp = (ecs->hit_singlestep_breakpoint
6c4cfb24
PA
7582 || (step_what & STEP_OVER_BREAKPOINT));
7583 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
963f9c80 7584
cb71640d
PA
7585 /* We can't use displaced stepping if we need to step past a
7586 watchpoint. The instruction copied to the scratch pad would
7587 still trigger the watchpoint. */
7588 if (remove_bp
3fc8eb30 7589 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
45e8c884 7590 {
a01bda52 7591 set_step_over_info (regcache->aspace (),
21edc42f
YQ
7592 regcache_read_pc (regcache), remove_wps,
7593 ecs->event_thread->global_num);
45e8c884 7594 }
963f9c80 7595 else if (remove_wps)
21edc42f 7596 set_step_over_info (NULL, 0, remove_wps, -1);
372316f1
PA
7597
7598 /* If we now need to do an in-line step-over, we need to stop
7599 all other threads. Note this must be done before
7600 insert_breakpoints below, because that removes the breakpoint
7601 we're about to step over, otherwise other threads could miss
7602 it. */
fbea99ea 7603 if (step_over_info_valid_p () && target_is_non_stop_p ())
372316f1 7604 stop_all_threads ();
abbb1732 7605
31e77af2 7606 /* Stop stepping if inserting breakpoints fails. */
a70b8144 7607 try
31e77af2
PA
7608 {
7609 insert_breakpoints ();
7610 }
230d2906 7611 catch (const gdb_exception_error &e)
31e77af2
PA
7612 {
7613 exception_print (gdb_stderr, e);
22bcd14b 7614 stop_waiting (ecs);
bdf2a94a 7615 clear_step_over_info ();
31e77af2 7616 return;
d4f3574e
SS
7617 }
7618
963f9c80 7619 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
d4f3574e 7620
64ce06e4 7621 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e
SS
7622 }
7623
488f131b 7624 prepare_to_wait (ecs);
d4f3574e
SS
7625}
7626
4d9d9d04
PA
7627/* Called when we should continue running the inferior, because the
7628 current event doesn't cause a user visible stop. This does the
7629 resuming part; waiting for the next event is done elsewhere. */
7630
7631static void
7632keep_going (struct execution_control_state *ecs)
7633{
7634 if (ecs->event_thread->control.trap_expected
7635 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
7636 ecs->event_thread->control.trap_expected = 0;
7637
7638 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7639 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7640 keep_going_pass_signal (ecs);
7641}
7642
104c1213
JM
7643/* This function normally comes after a resume, before
7644 handle_inferior_event exits. It takes care of any last bits of
7645 housekeeping, and sets the all-important wait_some_more flag. */
cd0fc7c3 7646
104c1213
JM
7647static void
7648prepare_to_wait (struct execution_control_state *ecs)
cd0fc7c3 7649{
527159b7 7650 if (debug_infrun)
8a9de0e4 7651 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
104c1213 7652
104c1213 7653 ecs->wait_some_more = 1;
0b333c5e
PA
7654
7655 if (!target_is_async_p ())
7656 mark_infrun_async_event_handler ();
c906108c 7657}
11cf8741 7658
fd664c91 7659/* We are done with the step range of a step/next/si/ni command.
b57bacec 7660 Called once for each n of a "step n" operation. */
fd664c91
PA
7661
7662static void
bdc36728 7663end_stepping_range (struct execution_control_state *ecs)
fd664c91 7664{
bdc36728 7665 ecs->event_thread->control.stop_step = 1;
bdc36728 7666 stop_waiting (ecs);
fd664c91
PA
7667}
7668
33d62d64
JK
7669/* Several print_*_reason functions to print why the inferior has stopped.
7670 We always print something when the inferior exits, or receives a signal.
7671 The rest of the cases are dealt with later on in normal_stop and
7672 print_it_typical. Ideally there should be a call to one of these
7673 print_*_reason functions functions from handle_inferior_event each time
22bcd14b 7674 stop_waiting is called.
33d62d64 7675
fd664c91
PA
7676 Note that we don't call these directly, instead we delegate that to
7677 the interpreters, through observers. Interpreters then call these
7678 with whatever uiout is right. */
33d62d64 7679
fd664c91
PA
7680void
7681print_end_stepping_range_reason (struct ui_out *uiout)
33d62d64 7682{
fd664c91 7683 /* For CLI-like interpreters, print nothing. */
33d62d64 7684
112e8700 7685 if (uiout->is_mi_like_p ())
fd664c91 7686 {
112e8700 7687 uiout->field_string ("reason",
fd664c91
PA
7688 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
7689 }
7690}
33d62d64 7691
fd664c91
PA
7692void
7693print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
11cf8741 7694{
33d62d64 7695 annotate_signalled ();
112e8700
SM
7696 if (uiout->is_mi_like_p ())
7697 uiout->field_string
7698 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
7699 uiout->text ("\nProgram terminated with signal ");
33d62d64 7700 annotate_signal_name ();
112e8700 7701 uiout->field_string ("signal-name",
2ea28649 7702 gdb_signal_to_name (siggnal));
33d62d64 7703 annotate_signal_name_end ();
112e8700 7704 uiout->text (", ");
33d62d64 7705 annotate_signal_string ();
112e8700 7706 uiout->field_string ("signal-meaning",
2ea28649 7707 gdb_signal_to_string (siggnal));
33d62d64 7708 annotate_signal_string_end ();
112e8700
SM
7709 uiout->text (".\n");
7710 uiout->text ("The program no longer exists.\n");
33d62d64
JK
7711}
7712
fd664c91
PA
7713void
7714print_exited_reason (struct ui_out *uiout, int exitstatus)
33d62d64 7715{
fda326dd 7716 struct inferior *inf = current_inferior ();
a068643d 7717 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
fda326dd 7718
33d62d64
JK
7719 annotate_exited (exitstatus);
7720 if (exitstatus)
7721 {
112e8700
SM
7722 if (uiout->is_mi_like_p ())
7723 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
6a831f06
PA
7724 std::string exit_code_str
7725 = string_printf ("0%o", (unsigned int) exitstatus);
7726 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
7727 plongest (inf->num), pidstr.c_str (),
7728 string_field ("exit-code", exit_code_str.c_str ()));
33d62d64
JK
7729 }
7730 else
11cf8741 7731 {
112e8700
SM
7732 if (uiout->is_mi_like_p ())
7733 uiout->field_string
7734 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6a831f06
PA
7735 uiout->message ("[Inferior %s (%s) exited normally]\n",
7736 plongest (inf->num), pidstr.c_str ());
33d62d64 7737 }
33d62d64
JK
7738}
7739
012b3a21
WT
7740/* Some targets/architectures can do extra processing/display of
7741 segmentation faults. E.g., Intel MPX boundary faults.
7742 Call the architecture dependent function to handle the fault. */
7743
7744static void
7745handle_segmentation_fault (struct ui_out *uiout)
7746{
7747 struct regcache *regcache = get_current_regcache ();
ac7936df 7748 struct gdbarch *gdbarch = regcache->arch ();
012b3a21
WT
7749
7750 if (gdbarch_handle_segmentation_fault_p (gdbarch))
7751 gdbarch_handle_segmentation_fault (gdbarch, uiout);
7752}
7753
fd664c91
PA
7754void
7755print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
33d62d64 7756{
f303dbd6
PA
7757 struct thread_info *thr = inferior_thread ();
7758
33d62d64
JK
7759 annotate_signal ();
7760
112e8700 7761 if (uiout->is_mi_like_p ())
f303dbd6
PA
7762 ;
7763 else if (show_thread_that_caused_stop ())
33d62d64 7764 {
f303dbd6 7765 const char *name;
33d62d64 7766
112e8700 7767 uiout->text ("\nThread ");
33eca680 7768 uiout->field_string ("thread-id", print_thread_id (thr));
f303dbd6
PA
7769
7770 name = thr->name != NULL ? thr->name : target_thread_name (thr);
7771 if (name != NULL)
7772 {
112e8700 7773 uiout->text (" \"");
33eca680 7774 uiout->field_string ("name", name);
112e8700 7775 uiout->text ("\"");
f303dbd6 7776 }
33d62d64 7777 }
f303dbd6 7778 else
112e8700 7779 uiout->text ("\nProgram");
f303dbd6 7780
112e8700
SM
7781 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
7782 uiout->text (" stopped");
33d62d64
JK
7783 else
7784 {
112e8700 7785 uiout->text (" received signal ");
8b93c638 7786 annotate_signal_name ();
112e8700
SM
7787 if (uiout->is_mi_like_p ())
7788 uiout->field_string
7789 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
7790 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8b93c638 7791 annotate_signal_name_end ();
112e8700 7792 uiout->text (", ");
8b93c638 7793 annotate_signal_string ();
112e8700 7794 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
012b3a21
WT
7795
7796 if (siggnal == GDB_SIGNAL_SEGV)
7797 handle_segmentation_fault (uiout);
7798
8b93c638 7799 annotate_signal_string_end ();
33d62d64 7800 }
112e8700 7801 uiout->text (".\n");
33d62d64 7802}
252fbfc8 7803
fd664c91
PA
7804void
7805print_no_history_reason (struct ui_out *uiout)
33d62d64 7806{
112e8700 7807 uiout->text ("\nNo more reverse-execution history.\n");
11cf8741 7808}
43ff13b4 7809
0c7e1a46
PA
7810/* Print current location without a level number, if we have changed
7811 functions or hit a breakpoint. Print source line if we have one.
7812 bpstat_print contains the logic deciding in detail what to print,
7813 based on the event(s) that just occurred. */
7814
243a9253
PA
7815static void
7816print_stop_location (struct target_waitstatus *ws)
0c7e1a46
PA
7817{
7818 int bpstat_ret;
f486487f 7819 enum print_what source_flag;
0c7e1a46
PA
7820 int do_frame_printing = 1;
7821 struct thread_info *tp = inferior_thread ();
7822
7823 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
7824 switch (bpstat_ret)
7825 {
7826 case PRINT_UNKNOWN:
7827 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
7828 should) carry around the function and does (or should) use
7829 that when doing a frame comparison. */
7830 if (tp->control.stop_step
7831 && frame_id_eq (tp->control.step_frame_id,
7832 get_frame_id (get_current_frame ()))
f2ffa92b
PA
7833 && (tp->control.step_start_function
7834 == find_pc_function (tp->suspend.stop_pc)))
0c7e1a46
PA
7835 {
7836 /* Finished step, just print source line. */
7837 source_flag = SRC_LINE;
7838 }
7839 else
7840 {
7841 /* Print location and source line. */
7842 source_flag = SRC_AND_LOC;
7843 }
7844 break;
7845 case PRINT_SRC_AND_LOC:
7846 /* Print location and source line. */
7847 source_flag = SRC_AND_LOC;
7848 break;
7849 case PRINT_SRC_ONLY:
7850 source_flag = SRC_LINE;
7851 break;
7852 case PRINT_NOTHING:
7853 /* Something bogus. */
7854 source_flag = SRC_LINE;
7855 do_frame_printing = 0;
7856 break;
7857 default:
7858 internal_error (__FILE__, __LINE__, _("Unknown value."));
7859 }
7860
7861 /* The behavior of this routine with respect to the source
7862 flag is:
7863 SRC_LINE: Print only source line
7864 LOCATION: Print only location
7865 SRC_AND_LOC: Print location and source line. */
7866 if (do_frame_printing)
7867 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
243a9253
PA
7868}
7869
243a9253
PA
7870/* See infrun.h. */
7871
7872void
4c7d57e7 7873print_stop_event (struct ui_out *uiout, bool displays)
243a9253 7874{
243a9253 7875 struct target_waitstatus last;
243a9253
PA
7876 struct thread_info *tp;
7877
ab1ddbcf 7878 get_last_target_status (nullptr, &last);
243a9253 7879
67ad9399
TT
7880 {
7881 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
0c7e1a46 7882
67ad9399 7883 print_stop_location (&last);
243a9253 7884
67ad9399 7885 /* Display the auto-display expressions. */
4c7d57e7
TT
7886 if (displays)
7887 do_displays ();
67ad9399 7888 }
243a9253
PA
7889
7890 tp = inferior_thread ();
7891 if (tp->thread_fsm != NULL
46e3ed7f 7892 && tp->thread_fsm->finished_p ())
243a9253
PA
7893 {
7894 struct return_value_info *rv;
7895
46e3ed7f 7896 rv = tp->thread_fsm->return_value ();
243a9253
PA
7897 if (rv != NULL)
7898 print_return_value (uiout, rv);
7899 }
0c7e1a46
PA
7900}
7901
388a7084
PA
7902/* See infrun.h. */
7903
7904void
7905maybe_remove_breakpoints (void)
7906{
7907 if (!breakpoints_should_be_inserted_now () && target_has_execution)
7908 {
7909 if (remove_breakpoints ())
7910 {
223ffa71 7911 target_terminal::ours_for_output ();
388a7084
PA
7912 printf_filtered (_("Cannot remove breakpoints because "
7913 "program is no longer writable.\nFurther "
7914 "execution is probably impossible.\n"));
7915 }
7916 }
7917}
7918
4c2f2a79
PA
7919/* The execution context that just caused a normal stop. */
7920
7921struct stop_context
7922{
2d844eaf
TT
7923 stop_context ();
7924 ~stop_context ();
7925
7926 DISABLE_COPY_AND_ASSIGN (stop_context);
7927
7928 bool changed () const;
7929
4c2f2a79
PA
7930 /* The stop ID. */
7931 ULONGEST stop_id;
c906108c 7932
4c2f2a79 7933 /* The event PTID. */
c906108c 7934
4c2f2a79
PA
7935 ptid_t ptid;
7936
7937 /* If stopp for a thread event, this is the thread that caused the
7938 stop. */
7939 struct thread_info *thread;
7940
7941 /* The inferior that caused the stop. */
7942 int inf_num;
7943};
7944
2d844eaf 7945/* Initializes a new stop context. If stopped for a thread event, this
4c2f2a79
PA
7946 takes a strong reference to the thread. */
7947
2d844eaf 7948stop_context::stop_context ()
4c2f2a79 7949{
2d844eaf
TT
7950 stop_id = get_stop_id ();
7951 ptid = inferior_ptid;
7952 inf_num = current_inferior ()->num;
4c2f2a79 7953
d7e15655 7954 if (inferior_ptid != null_ptid)
4c2f2a79
PA
7955 {
7956 /* Take a strong reference so that the thread can't be deleted
7957 yet. */
2d844eaf
TT
7958 thread = inferior_thread ();
7959 thread->incref ();
4c2f2a79
PA
7960 }
7961 else
2d844eaf 7962 thread = NULL;
4c2f2a79
PA
7963}
7964
7965/* Release a stop context previously created with save_stop_context.
7966 Releases the strong reference to the thread as well. */
7967
2d844eaf 7968stop_context::~stop_context ()
4c2f2a79 7969{
2d844eaf
TT
7970 if (thread != NULL)
7971 thread->decref ();
4c2f2a79
PA
7972}
7973
7974/* Return true if the current context no longer matches the saved stop
7975 context. */
7976
2d844eaf
TT
7977bool
7978stop_context::changed () const
7979{
7980 if (ptid != inferior_ptid)
7981 return true;
7982 if (inf_num != current_inferior ()->num)
7983 return true;
7984 if (thread != NULL && thread->state != THREAD_STOPPED)
7985 return true;
7986 if (get_stop_id () != stop_id)
7987 return true;
7988 return false;
4c2f2a79
PA
7989}
7990
7991/* See infrun.h. */
7992
7993int
96baa820 7994normal_stop (void)
c906108c 7995{
73b65bb0 7996 struct target_waitstatus last;
73b65bb0 7997
ab1ddbcf 7998 get_last_target_status (nullptr, &last);
73b65bb0 7999
4c2f2a79
PA
8000 new_stop_id ();
8001
29f49a6a
PA
8002 /* If an exception is thrown from this point on, make sure to
8003 propagate GDB's knowledge of the executing state to the
8004 frontend/user running state. A QUIT is an easy exception to see
8005 here, so do this before any filtered output. */
731f534f
PA
8006
8007 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
8008
c35b1492 8009 if (!non_stop)
731f534f 8010 maybe_finish_thread_state.emplace (minus_one_ptid);
e1316e60
PA
8011 else if (last.kind == TARGET_WAITKIND_SIGNALLED
8012 || last.kind == TARGET_WAITKIND_EXITED)
8013 {
8014 /* On some targets, we may still have live threads in the
8015 inferior when we get a process exit event. E.g., for
8016 "checkpoint", when the current checkpoint/fork exits,
8017 linux-fork.c automatically switches to another fork from
8018 within target_mourn_inferior. */
731f534f
PA
8019 if (inferior_ptid != null_ptid)
8020 maybe_finish_thread_state.emplace (ptid_t (inferior_ptid.pid ()));
e1316e60
PA
8021 }
8022 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
731f534f 8023 maybe_finish_thread_state.emplace (inferior_ptid);
29f49a6a 8024
b57bacec
PA
8025 /* As we're presenting a stop, and potentially removing breakpoints,
8026 update the thread list so we can tell whether there are threads
8027 running on the target. With target remote, for example, we can
8028 only learn about new threads when we explicitly update the thread
8029 list. Do this before notifying the interpreters about signal
8030 stops, end of stepping ranges, etc., so that the "new thread"
8031 output is emitted before e.g., "Program received signal FOO",
8032 instead of after. */
8033 update_thread_list ();
8034
8035 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
76727919 8036 gdb::observers::signal_received.notify (inferior_thread ()->suspend.stop_signal);
b57bacec 8037
c906108c
SS
8038 /* As with the notification of thread events, we want to delay
8039 notifying the user that we've switched thread context until
8040 the inferior actually stops.
8041
73b65bb0
DJ
8042 There's no point in saying anything if the inferior has exited.
8043 Note that SIGNALLED here means "exited with a signal", not
b65dc60b
PA
8044 "received a signal".
8045
8046 Also skip saying anything in non-stop mode. In that mode, as we
8047 don't want GDB to switch threads behind the user's back, to avoid
8048 races where the user is typing a command to apply to thread x,
8049 but GDB switches to thread y before the user finishes entering
8050 the command, fetch_inferior_event installs a cleanup to restore
8051 the current thread back to the thread the user had selected right
8052 after this event is handled, so we're not really switching, only
8053 informing of a stop. */
4f8d22e3 8054 if (!non_stop
731f534f 8055 && previous_inferior_ptid != inferior_ptid
73b65bb0
DJ
8056 && target_has_execution
8057 && last.kind != TARGET_WAITKIND_SIGNALLED
0e5bf2a8
PA
8058 && last.kind != TARGET_WAITKIND_EXITED
8059 && last.kind != TARGET_WAITKIND_NO_RESUMED)
c906108c 8060 {
0e454242 8061 SWITCH_THRU_ALL_UIS ()
3b12939d 8062 {
223ffa71 8063 target_terminal::ours_for_output ();
3b12939d 8064 printf_filtered (_("[Switching to %s]\n"),
a068643d 8065 target_pid_to_str (inferior_ptid).c_str ());
3b12939d
PA
8066 annotate_thread_changed ();
8067 }
39f77062 8068 previous_inferior_ptid = inferior_ptid;
c906108c 8069 }
c906108c 8070
0e5bf2a8
PA
8071 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
8072 {
0e454242 8073 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8074 if (current_ui->prompt_state == PROMPT_BLOCKED)
8075 {
223ffa71 8076 target_terminal::ours_for_output ();
3b12939d
PA
8077 printf_filtered (_("No unwaited-for children left.\n"));
8078 }
0e5bf2a8
PA
8079 }
8080
b57bacec 8081 /* Note: this depends on the update_thread_list call above. */
388a7084 8082 maybe_remove_breakpoints ();
c906108c 8083
c906108c
SS
8084 /* If an auto-display called a function and that got a signal,
8085 delete that auto-display to avoid an infinite recursion. */
8086
8087 if (stopped_by_random_signal)
8088 disable_current_display ();
8089
0e454242 8090 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8091 {
8092 async_enable_stdin ();
8093 }
c906108c 8094
388a7084 8095 /* Let the user/frontend see the threads as stopped. */
731f534f 8096 maybe_finish_thread_state.reset ();
388a7084
PA
8097
8098 /* Select innermost stack frame - i.e., current frame is frame 0,
8099 and current location is based on that. Handle the case where the
8100 dummy call is returning after being stopped. E.g. the dummy call
8101 previously hit a breakpoint. (If the dummy call returns
8102 normally, we won't reach here.) Do this before the stop hook is
8103 run, so that it doesn't get to see the temporary dummy frame,
8104 which is not where we'll present the stop. */
8105 if (has_stack_frames ())
8106 {
8107 if (stop_stack_dummy == STOP_STACK_DUMMY)
8108 {
8109 /* Pop the empty frame that contains the stack dummy. This
8110 also restores inferior state prior to the call (struct
8111 infcall_suspend_state). */
8112 struct frame_info *frame = get_current_frame ();
8113
8114 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8115 frame_pop (frame);
8116 /* frame_pop calls reinit_frame_cache as the last thing it
8117 does which means there's now no selected frame. */
8118 }
8119
8120 select_frame (get_current_frame ());
8121
8122 /* Set the current source location. */
8123 set_current_sal_from_frame (get_current_frame ());
8124 }
dd7e2d2b
PA
8125
8126 /* Look up the hook_stop and run it (CLI internally handles problem
8127 of stop_command's pre-hook not existing). */
4c2f2a79
PA
8128 if (stop_command != NULL)
8129 {
2d844eaf 8130 stop_context saved_context;
4c2f2a79 8131
a70b8144 8132 try
bf469271
PA
8133 {
8134 execute_cmd_pre_hook (stop_command);
8135 }
230d2906 8136 catch (const gdb_exception &ex)
bf469271
PA
8137 {
8138 exception_fprintf (gdb_stderr, ex,
8139 "Error while running hook_stop:\n");
8140 }
4c2f2a79
PA
8141
8142 /* If the stop hook resumes the target, then there's no point in
8143 trying to notify about the previous stop; its context is
8144 gone. Likewise if the command switches thread or inferior --
8145 the observers would print a stop for the wrong
8146 thread/inferior. */
2d844eaf
TT
8147 if (saved_context.changed ())
8148 return 1;
4c2f2a79 8149 }
dd7e2d2b 8150
388a7084
PA
8151 /* Notify observers about the stop. This is where the interpreters
8152 print the stop event. */
d7e15655 8153 if (inferior_ptid != null_ptid)
76727919 8154 gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
388a7084
PA
8155 stop_print_frame);
8156 else
76727919 8157 gdb::observers::normal_stop.notify (NULL, stop_print_frame);
347bddb7 8158
243a9253
PA
8159 annotate_stopped ();
8160
48844aa6
PA
8161 if (target_has_execution)
8162 {
8163 if (last.kind != TARGET_WAITKIND_SIGNALLED
fe726667
PA
8164 && last.kind != TARGET_WAITKIND_EXITED
8165 && last.kind != TARGET_WAITKIND_NO_RESUMED)
48844aa6
PA
8166 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8167 Delete any breakpoint that is to be deleted at the next stop. */
16c381f0 8168 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
94cc34af 8169 }
6c95b8df
PA
8170
8171 /* Try to get rid of automatically added inferiors that are no
8172 longer needed. Keeping those around slows down things linearly.
8173 Note that this never removes the current inferior. */
8174 prune_inferiors ();
4c2f2a79
PA
8175
8176 return 0;
c906108c 8177}
c906108c 8178\f
c5aa993b 8179int
96baa820 8180signal_stop_state (int signo)
c906108c 8181{
d6b48e9c 8182 return signal_stop[signo];
c906108c
SS
8183}
8184
c5aa993b 8185int
96baa820 8186signal_print_state (int signo)
c906108c
SS
8187{
8188 return signal_print[signo];
8189}
8190
c5aa993b 8191int
96baa820 8192signal_pass_state (int signo)
c906108c
SS
8193{
8194 return signal_program[signo];
8195}
8196
2455069d
UW
8197static void
8198signal_cache_update (int signo)
8199{
8200 if (signo == -1)
8201 {
a493e3e2 8202 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
2455069d
UW
8203 signal_cache_update (signo);
8204
8205 return;
8206 }
8207
8208 signal_pass[signo] = (signal_stop[signo] == 0
8209 && signal_print[signo] == 0
ab04a2af
TT
8210 && signal_program[signo] == 1
8211 && signal_catch[signo] == 0);
2455069d
UW
8212}
8213
488f131b 8214int
7bda5e4a 8215signal_stop_update (int signo, int state)
d4f3574e
SS
8216{
8217 int ret = signal_stop[signo];
abbb1732 8218
d4f3574e 8219 signal_stop[signo] = state;
2455069d 8220 signal_cache_update (signo);
d4f3574e
SS
8221 return ret;
8222}
8223
488f131b 8224int
7bda5e4a 8225signal_print_update (int signo, int state)
d4f3574e
SS
8226{
8227 int ret = signal_print[signo];
abbb1732 8228
d4f3574e 8229 signal_print[signo] = state;
2455069d 8230 signal_cache_update (signo);
d4f3574e
SS
8231 return ret;
8232}
8233
488f131b 8234int
7bda5e4a 8235signal_pass_update (int signo, int state)
d4f3574e
SS
8236{
8237 int ret = signal_program[signo];
abbb1732 8238
d4f3574e 8239 signal_program[signo] = state;
2455069d 8240 signal_cache_update (signo);
d4f3574e
SS
8241 return ret;
8242}
8243
ab04a2af
TT
8244/* Update the global 'signal_catch' from INFO and notify the
8245 target. */
8246
8247void
8248signal_catch_update (const unsigned int *info)
8249{
8250 int i;
8251
8252 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8253 signal_catch[i] = info[i] > 0;
8254 signal_cache_update (-1);
adc6a863 8255 target_pass_signals (signal_pass);
ab04a2af
TT
8256}
8257
c906108c 8258static void
96baa820 8259sig_print_header (void)
c906108c 8260{
3e43a32a
MS
8261 printf_filtered (_("Signal Stop\tPrint\tPass "
8262 "to program\tDescription\n"));
c906108c
SS
8263}
8264
8265static void
2ea28649 8266sig_print_info (enum gdb_signal oursig)
c906108c 8267{
2ea28649 8268 const char *name = gdb_signal_to_name (oursig);
c906108c 8269 int name_padding = 13 - strlen (name);
96baa820 8270
c906108c
SS
8271 if (name_padding <= 0)
8272 name_padding = 0;
8273
8274 printf_filtered ("%s", name);
488f131b 8275 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
c906108c
SS
8276 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8277 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
8278 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
2ea28649 8279 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
c906108c
SS
8280}
8281
8282/* Specify how various signals in the inferior should be handled. */
8283
8284static void
0b39b52e 8285handle_command (const char *args, int from_tty)
c906108c 8286{
c906108c 8287 int digits, wordlen;
b926417a 8288 int sigfirst, siglast;
2ea28649 8289 enum gdb_signal oursig;
c906108c 8290 int allsigs;
c906108c
SS
8291
8292 if (args == NULL)
8293 {
e2e0b3e5 8294 error_no_arg (_("signal to handle"));
c906108c
SS
8295 }
8296
1777feb0 8297 /* Allocate and zero an array of flags for which signals to handle. */
c906108c 8298
adc6a863
PA
8299 const size_t nsigs = GDB_SIGNAL_LAST;
8300 unsigned char sigs[nsigs] {};
c906108c 8301
1777feb0 8302 /* Break the command line up into args. */
c906108c 8303
773a1edc 8304 gdb_argv built_argv (args);
c906108c
SS
8305
8306 /* Walk through the args, looking for signal oursigs, signal names, and
8307 actions. Signal numbers and signal names may be interspersed with
8308 actions, with the actions being performed for all signals cumulatively
1777feb0 8309 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
c906108c 8310
773a1edc 8311 for (char *arg : built_argv)
c906108c 8312 {
773a1edc
TT
8313 wordlen = strlen (arg);
8314 for (digits = 0; isdigit (arg[digits]); digits++)
c906108c
SS
8315 {;
8316 }
8317 allsigs = 0;
8318 sigfirst = siglast = -1;
8319
773a1edc 8320 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
c906108c
SS
8321 {
8322 /* Apply action to all signals except those used by the
1777feb0 8323 debugger. Silently skip those. */
c906108c
SS
8324 allsigs = 1;
8325 sigfirst = 0;
8326 siglast = nsigs - 1;
8327 }
773a1edc 8328 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
c906108c
SS
8329 {
8330 SET_SIGS (nsigs, sigs, signal_stop);
8331 SET_SIGS (nsigs, sigs, signal_print);
8332 }
773a1edc 8333 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
c906108c
SS
8334 {
8335 UNSET_SIGS (nsigs, sigs, signal_program);
8336 }
773a1edc 8337 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
c906108c
SS
8338 {
8339 SET_SIGS (nsigs, sigs, signal_print);
8340 }
773a1edc 8341 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
c906108c
SS
8342 {
8343 SET_SIGS (nsigs, sigs, signal_program);
8344 }
773a1edc 8345 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
c906108c
SS
8346 {
8347 UNSET_SIGS (nsigs, sigs, signal_stop);
8348 }
773a1edc 8349 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
c906108c
SS
8350 {
8351 SET_SIGS (nsigs, sigs, signal_program);
8352 }
773a1edc 8353 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
c906108c
SS
8354 {
8355 UNSET_SIGS (nsigs, sigs, signal_print);
8356 UNSET_SIGS (nsigs, sigs, signal_stop);
8357 }
773a1edc 8358 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
c906108c
SS
8359 {
8360 UNSET_SIGS (nsigs, sigs, signal_program);
8361 }
8362 else if (digits > 0)
8363 {
8364 /* It is numeric. The numeric signal refers to our own
8365 internal signal numbering from target.h, not to host/target
8366 signal number. This is a feature; users really should be
8367 using symbolic names anyway, and the common ones like
8368 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8369
8370 sigfirst = siglast = (int)
773a1edc
TT
8371 gdb_signal_from_command (atoi (arg));
8372 if (arg[digits] == '-')
c906108c
SS
8373 {
8374 siglast = (int)
773a1edc 8375 gdb_signal_from_command (atoi (arg + digits + 1));
c906108c
SS
8376 }
8377 if (sigfirst > siglast)
8378 {
1777feb0 8379 /* Bet he didn't figure we'd think of this case... */
b926417a 8380 std::swap (sigfirst, siglast);
c906108c
SS
8381 }
8382 }
8383 else
8384 {
773a1edc 8385 oursig = gdb_signal_from_name (arg);
a493e3e2 8386 if (oursig != GDB_SIGNAL_UNKNOWN)
c906108c
SS
8387 {
8388 sigfirst = siglast = (int) oursig;
8389 }
8390 else
8391 {
8392 /* Not a number and not a recognized flag word => complain. */
773a1edc 8393 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
c906108c
SS
8394 }
8395 }
8396
8397 /* If any signal numbers or symbol names were found, set flags for
1777feb0 8398 which signals to apply actions to. */
c906108c 8399
b926417a 8400 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
c906108c 8401 {
2ea28649 8402 switch ((enum gdb_signal) signum)
c906108c 8403 {
a493e3e2
PA
8404 case GDB_SIGNAL_TRAP:
8405 case GDB_SIGNAL_INT:
c906108c
SS
8406 if (!allsigs && !sigs[signum])
8407 {
9e2f0ad4 8408 if (query (_("%s is used by the debugger.\n\
3e43a32a 8409Are you sure you want to change it? "),
2ea28649 8410 gdb_signal_to_name ((enum gdb_signal) signum)))
c906108c
SS
8411 {
8412 sigs[signum] = 1;
8413 }
8414 else
c119e040 8415 printf_unfiltered (_("Not confirmed, unchanged.\n"));
c906108c
SS
8416 }
8417 break;
a493e3e2
PA
8418 case GDB_SIGNAL_0:
8419 case GDB_SIGNAL_DEFAULT:
8420 case GDB_SIGNAL_UNKNOWN:
c906108c
SS
8421 /* Make sure that "all" doesn't print these. */
8422 break;
8423 default:
8424 sigs[signum] = 1;
8425 break;
8426 }
8427 }
c906108c
SS
8428 }
8429
b926417a 8430 for (int signum = 0; signum < nsigs; signum++)
3a031f65
PA
8431 if (sigs[signum])
8432 {
2455069d 8433 signal_cache_update (-1);
adc6a863
PA
8434 target_pass_signals (signal_pass);
8435 target_program_signals (signal_program);
c906108c 8436
3a031f65
PA
8437 if (from_tty)
8438 {
8439 /* Show the results. */
8440 sig_print_header ();
8441 for (; signum < nsigs; signum++)
8442 if (sigs[signum])
aead7601 8443 sig_print_info ((enum gdb_signal) signum);
3a031f65
PA
8444 }
8445
8446 break;
8447 }
c906108c
SS
8448}
8449
de0bea00
MF
8450/* Complete the "handle" command. */
8451
eb3ff9a5 8452static void
de0bea00 8453handle_completer (struct cmd_list_element *ignore,
eb3ff9a5 8454 completion_tracker &tracker,
6f937416 8455 const char *text, const char *word)
de0bea00 8456{
de0bea00
MF
8457 static const char * const keywords[] =
8458 {
8459 "all",
8460 "stop",
8461 "ignore",
8462 "print",
8463 "pass",
8464 "nostop",
8465 "noignore",
8466 "noprint",
8467 "nopass",
8468 NULL,
8469 };
8470
eb3ff9a5
PA
8471 signal_completer (ignore, tracker, text, word);
8472 complete_on_enum (tracker, keywords, word, word);
de0bea00
MF
8473}
8474
2ea28649
PA
8475enum gdb_signal
8476gdb_signal_from_command (int num)
ed01b82c
PA
8477{
8478 if (num >= 1 && num <= 15)
2ea28649 8479 return (enum gdb_signal) num;
ed01b82c
PA
8480 error (_("Only signals 1-15 are valid as numeric signals.\n\
8481Use \"info signals\" for a list of symbolic signals."));
8482}
8483
c906108c
SS
8484/* Print current contents of the tables set by the handle command.
8485 It is possible we should just be printing signals actually used
8486 by the current target (but for things to work right when switching
8487 targets, all signals should be in the signal tables). */
8488
8489static void
1d12d88f 8490info_signals_command (const char *signum_exp, int from_tty)
c906108c 8491{
2ea28649 8492 enum gdb_signal oursig;
abbb1732 8493
c906108c
SS
8494 sig_print_header ();
8495
8496 if (signum_exp)
8497 {
8498 /* First see if this is a symbol name. */
2ea28649 8499 oursig = gdb_signal_from_name (signum_exp);
a493e3e2 8500 if (oursig == GDB_SIGNAL_UNKNOWN)
c906108c
SS
8501 {
8502 /* No, try numeric. */
8503 oursig =
2ea28649 8504 gdb_signal_from_command (parse_and_eval_long (signum_exp));
c906108c
SS
8505 }
8506 sig_print_info (oursig);
8507 return;
8508 }
8509
8510 printf_filtered ("\n");
8511 /* These ugly casts brought to you by the native VAX compiler. */
a493e3e2
PA
8512 for (oursig = GDB_SIGNAL_FIRST;
8513 (int) oursig < (int) GDB_SIGNAL_LAST;
2ea28649 8514 oursig = (enum gdb_signal) ((int) oursig + 1))
c906108c
SS
8515 {
8516 QUIT;
8517
a493e3e2
PA
8518 if (oursig != GDB_SIGNAL_UNKNOWN
8519 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
c906108c
SS
8520 sig_print_info (oursig);
8521 }
8522
3e43a32a
MS
8523 printf_filtered (_("\nUse the \"handle\" command "
8524 "to change these tables.\n"));
c906108c 8525}
4aa995e1
PA
8526
8527/* The $_siginfo convenience variable is a bit special. We don't know
8528 for sure the type of the value until we actually have a chance to
7a9dd1b2 8529 fetch the data. The type can change depending on gdbarch, so it is
4aa995e1
PA
8530 also dependent on which thread you have selected.
8531
8532 1. making $_siginfo be an internalvar that creates a new value on
8533 access.
8534
8535 2. making the value of $_siginfo be an lval_computed value. */
8536
8537/* This function implements the lval_computed support for reading a
8538 $_siginfo value. */
8539
8540static void
8541siginfo_value_read (struct value *v)
8542{
8543 LONGEST transferred;
8544
a911d87a
PA
8545 /* If we can access registers, so can we access $_siginfo. Likewise
8546 vice versa. */
8547 validate_registers_access ();
c709acd1 8548
4aa995e1 8549 transferred =
8b88a78e 8550 target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO,
4aa995e1
PA
8551 NULL,
8552 value_contents_all_raw (v),
8553 value_offset (v),
8554 TYPE_LENGTH (value_type (v)));
8555
8556 if (transferred != TYPE_LENGTH (value_type (v)))
8557 error (_("Unable to read siginfo"));
8558}
8559
8560/* This function implements the lval_computed support for writing a
8561 $_siginfo value. */
8562
8563static void
8564siginfo_value_write (struct value *v, struct value *fromval)
8565{
8566 LONGEST transferred;
8567
a911d87a
PA
8568 /* If we can access registers, so can we access $_siginfo. Likewise
8569 vice versa. */
8570 validate_registers_access ();
c709acd1 8571
8b88a78e 8572 transferred = target_write (current_top_target (),
4aa995e1
PA
8573 TARGET_OBJECT_SIGNAL_INFO,
8574 NULL,
8575 value_contents_all_raw (fromval),
8576 value_offset (v),
8577 TYPE_LENGTH (value_type (fromval)));
8578
8579 if (transferred != TYPE_LENGTH (value_type (fromval)))
8580 error (_("Unable to write siginfo"));
8581}
8582
c8f2448a 8583static const struct lval_funcs siginfo_value_funcs =
4aa995e1
PA
8584 {
8585 siginfo_value_read,
8586 siginfo_value_write
8587 };
8588
8589/* Return a new value with the correct type for the siginfo object of
78267919
UW
8590 the current thread using architecture GDBARCH. Return a void value
8591 if there's no object available. */
4aa995e1 8592
2c0b251b 8593static struct value *
22d2b532
SDJ
8594siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
8595 void *ignore)
4aa995e1 8596{
4aa995e1 8597 if (target_has_stack
d7e15655 8598 && inferior_ptid != null_ptid
78267919 8599 && gdbarch_get_siginfo_type_p (gdbarch))
4aa995e1 8600 {
78267919 8601 struct type *type = gdbarch_get_siginfo_type (gdbarch);
abbb1732 8602
78267919 8603 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
4aa995e1
PA
8604 }
8605
78267919 8606 return allocate_value (builtin_type (gdbarch)->builtin_void);
4aa995e1
PA
8607}
8608
c906108c 8609\f
16c381f0
JK
8610/* infcall_suspend_state contains state about the program itself like its
8611 registers and any signal it received when it last stopped.
8612 This state must be restored regardless of how the inferior function call
8613 ends (either successfully, or after it hits a breakpoint or signal)
8614 if the program is to properly continue where it left off. */
8615
6bf78e29 8616class infcall_suspend_state
7a292a7a 8617{
6bf78e29
AB
8618public:
8619 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
8620 once the inferior function call has finished. */
8621 infcall_suspend_state (struct gdbarch *gdbarch,
8622 const struct thread_info *tp,
8623 struct regcache *regcache)
8624 : m_thread_suspend (tp->suspend),
8625 m_registers (new readonly_detached_regcache (*regcache))
8626 {
8627 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
8628
8629 if (gdbarch_get_siginfo_type_p (gdbarch))
8630 {
8631 struct type *type = gdbarch_get_siginfo_type (gdbarch);
8632 size_t len = TYPE_LENGTH (type);
8633
8634 siginfo_data.reset ((gdb_byte *) xmalloc (len));
8635
8636 if (target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
8637 siginfo_data.get (), 0, len) != len)
8638 {
8639 /* Errors ignored. */
8640 siginfo_data.reset (nullptr);
8641 }
8642 }
8643
8644 if (siginfo_data)
8645 {
8646 m_siginfo_gdbarch = gdbarch;
8647 m_siginfo_data = std::move (siginfo_data);
8648 }
8649 }
8650
8651 /* Return a pointer to the stored register state. */
16c381f0 8652
6bf78e29
AB
8653 readonly_detached_regcache *registers () const
8654 {
8655 return m_registers.get ();
8656 }
8657
8658 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
8659
8660 void restore (struct gdbarch *gdbarch,
8661 struct thread_info *tp,
8662 struct regcache *regcache) const
8663 {
8664 tp->suspend = m_thread_suspend;
8665
8666 if (m_siginfo_gdbarch == gdbarch)
8667 {
8668 struct type *type = gdbarch_get_siginfo_type (gdbarch);
8669
8670 /* Errors ignored. */
8671 target_write (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
8672 m_siginfo_data.get (), 0, TYPE_LENGTH (type));
8673 }
8674
8675 /* The inferior can be gone if the user types "print exit(0)"
8676 (and perhaps other times). */
8677 if (target_has_execution)
8678 /* NB: The register write goes through to the target. */
8679 regcache->restore (registers ());
8680 }
8681
8682private:
8683 /* How the current thread stopped before the inferior function call was
8684 executed. */
8685 struct thread_suspend_state m_thread_suspend;
8686
8687 /* The registers before the inferior function call was executed. */
8688 std::unique_ptr<readonly_detached_regcache> m_registers;
1736ad11 8689
35515841 8690 /* Format of SIGINFO_DATA or NULL if it is not present. */
6bf78e29 8691 struct gdbarch *m_siginfo_gdbarch = nullptr;
1736ad11
JK
8692
8693 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
8694 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
8695 content would be invalid. */
6bf78e29 8696 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
b89667eb
DE
8697};
8698
cb524840
TT
8699infcall_suspend_state_up
8700save_infcall_suspend_state ()
b89667eb 8701{
b89667eb 8702 struct thread_info *tp = inferior_thread ();
1736ad11 8703 struct regcache *regcache = get_current_regcache ();
ac7936df 8704 struct gdbarch *gdbarch = regcache->arch ();
1736ad11 8705
6bf78e29
AB
8706 infcall_suspend_state_up inf_state
8707 (new struct infcall_suspend_state (gdbarch, tp, regcache));
1736ad11 8708
6bf78e29
AB
8709 /* Having saved the current state, adjust the thread state, discarding
8710 any stop signal information. The stop signal is not useful when
8711 starting an inferior function call, and run_inferior_call will not use
8712 the signal due to its `proceed' call with GDB_SIGNAL_0. */
a493e3e2 8713 tp->suspend.stop_signal = GDB_SIGNAL_0;
35515841 8714
b89667eb
DE
8715 return inf_state;
8716}
8717
8718/* Restore inferior session state to INF_STATE. */
8719
8720void
16c381f0 8721restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb
DE
8722{
8723 struct thread_info *tp = inferior_thread ();
1736ad11 8724 struct regcache *regcache = get_current_regcache ();
ac7936df 8725 struct gdbarch *gdbarch = regcache->arch ();
b89667eb 8726
6bf78e29 8727 inf_state->restore (gdbarch, tp, regcache);
16c381f0 8728 discard_infcall_suspend_state (inf_state);
b89667eb
DE
8729}
8730
b89667eb 8731void
16c381f0 8732discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb 8733{
dd848631 8734 delete inf_state;
b89667eb
DE
8735}
8736
daf6667d 8737readonly_detached_regcache *
16c381f0 8738get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
b89667eb 8739{
6bf78e29 8740 return inf_state->registers ();
b89667eb
DE
8741}
8742
16c381f0
JK
8743/* infcall_control_state contains state regarding gdb's control of the
8744 inferior itself like stepping control. It also contains session state like
8745 the user's currently selected frame. */
b89667eb 8746
16c381f0 8747struct infcall_control_state
b89667eb 8748{
16c381f0
JK
8749 struct thread_control_state thread_control;
8750 struct inferior_control_state inferior_control;
d82142e2
JK
8751
8752 /* Other fields: */
ee841dd8
TT
8753 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
8754 int stopped_by_random_signal = 0;
7a292a7a 8755
b89667eb 8756 /* ID if the selected frame when the inferior function call was made. */
ee841dd8 8757 struct frame_id selected_frame_id {};
7a292a7a
SS
8758};
8759
c906108c 8760/* Save all of the information associated with the inferior<==>gdb
b89667eb 8761 connection. */
c906108c 8762
cb524840
TT
8763infcall_control_state_up
8764save_infcall_control_state ()
c906108c 8765{
cb524840 8766 infcall_control_state_up inf_status (new struct infcall_control_state);
4e1c45ea 8767 struct thread_info *tp = inferior_thread ();
d6b48e9c 8768 struct inferior *inf = current_inferior ();
7a292a7a 8769
16c381f0
JK
8770 inf_status->thread_control = tp->control;
8771 inf_status->inferior_control = inf->control;
d82142e2 8772
8358c15c 8773 tp->control.step_resume_breakpoint = NULL;
5b79abe7 8774 tp->control.exception_resume_breakpoint = NULL;
8358c15c 8775
16c381f0
JK
8776 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
8777 chain. If caller's caller is walking the chain, they'll be happier if we
8778 hand them back the original chain when restore_infcall_control_state is
8779 called. */
8780 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
d82142e2
JK
8781
8782 /* Other fields: */
8783 inf_status->stop_stack_dummy = stop_stack_dummy;
8784 inf_status->stopped_by_random_signal = stopped_by_random_signal;
c5aa993b 8785
206415a3 8786 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
b89667eb 8787
7a292a7a 8788 return inf_status;
c906108c
SS
8789}
8790
bf469271
PA
8791static void
8792restore_selected_frame (const frame_id &fid)
c906108c 8793{
bf469271 8794 frame_info *frame = frame_find_by_id (fid);
c906108c 8795
aa0cd9c1
AC
8796 /* If inf_status->selected_frame_id is NULL, there was no previously
8797 selected frame. */
101dcfbe 8798 if (frame == NULL)
c906108c 8799 {
8a3fe4f8 8800 warning (_("Unable to restore previously selected frame."));
bf469271 8801 return;
c906108c
SS
8802 }
8803
0f7d239c 8804 select_frame (frame);
c906108c
SS
8805}
8806
b89667eb
DE
8807/* Restore inferior session state to INF_STATUS. */
8808
c906108c 8809void
16c381f0 8810restore_infcall_control_state (struct infcall_control_state *inf_status)
c906108c 8811{
4e1c45ea 8812 struct thread_info *tp = inferior_thread ();
d6b48e9c 8813 struct inferior *inf = current_inferior ();
4e1c45ea 8814
8358c15c
JK
8815 if (tp->control.step_resume_breakpoint)
8816 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
8817
5b79abe7
TT
8818 if (tp->control.exception_resume_breakpoint)
8819 tp->control.exception_resume_breakpoint->disposition
8820 = disp_del_at_next_stop;
8821
d82142e2 8822 /* Handle the bpstat_copy of the chain. */
16c381f0 8823 bpstat_clear (&tp->control.stop_bpstat);
d82142e2 8824
16c381f0
JK
8825 tp->control = inf_status->thread_control;
8826 inf->control = inf_status->inferior_control;
d82142e2
JK
8827
8828 /* Other fields: */
8829 stop_stack_dummy = inf_status->stop_stack_dummy;
8830 stopped_by_random_signal = inf_status->stopped_by_random_signal;
c906108c 8831
b89667eb 8832 if (target_has_stack)
c906108c 8833 {
bf469271 8834 /* The point of the try/catch is that if the stack is clobbered,
101dcfbe
AC
8835 walking the stack might encounter a garbage pointer and
8836 error() trying to dereference it. */
a70b8144 8837 try
bf469271
PA
8838 {
8839 restore_selected_frame (inf_status->selected_frame_id);
8840 }
230d2906 8841 catch (const gdb_exception_error &ex)
bf469271
PA
8842 {
8843 exception_fprintf (gdb_stderr, ex,
8844 "Unable to restore previously selected frame:\n");
8845 /* Error in restoring the selected frame. Select the
8846 innermost frame. */
8847 select_frame (get_current_frame ());
8848 }
c906108c 8849 }
c906108c 8850
ee841dd8 8851 delete inf_status;
7a292a7a 8852}
c906108c
SS
8853
8854void
16c381f0 8855discard_infcall_control_state (struct infcall_control_state *inf_status)
7a292a7a 8856{
8358c15c
JK
8857 if (inf_status->thread_control.step_resume_breakpoint)
8858 inf_status->thread_control.step_resume_breakpoint->disposition
8859 = disp_del_at_next_stop;
8860
5b79abe7
TT
8861 if (inf_status->thread_control.exception_resume_breakpoint)
8862 inf_status->thread_control.exception_resume_breakpoint->disposition
8863 = disp_del_at_next_stop;
8864
1777feb0 8865 /* See save_infcall_control_state for info on stop_bpstat. */
16c381f0 8866 bpstat_clear (&inf_status->thread_control.stop_bpstat);
8358c15c 8867
ee841dd8 8868 delete inf_status;
7a292a7a 8869}
b89667eb 8870\f
7f89fd65 8871/* See infrun.h. */
0c557179
SDJ
8872
8873void
8874clear_exit_convenience_vars (void)
8875{
8876 clear_internalvar (lookup_internalvar ("_exitsignal"));
8877 clear_internalvar (lookup_internalvar ("_exitcode"));
8878}
c5aa993b 8879\f
488f131b 8880
b2175913
MS
8881/* User interface for reverse debugging:
8882 Set exec-direction / show exec-direction commands
8883 (returns error unless target implements to_set_exec_direction method). */
8884
170742de 8885enum exec_direction_kind execution_direction = EXEC_FORWARD;
b2175913
MS
8886static const char exec_forward[] = "forward";
8887static const char exec_reverse[] = "reverse";
8888static const char *exec_direction = exec_forward;
40478521 8889static const char *const exec_direction_names[] = {
b2175913
MS
8890 exec_forward,
8891 exec_reverse,
8892 NULL
8893};
8894
8895static void
eb4c3f4a 8896set_exec_direction_func (const char *args, int from_tty,
b2175913
MS
8897 struct cmd_list_element *cmd)
8898{
8899 if (target_can_execute_reverse)
8900 {
8901 if (!strcmp (exec_direction, exec_forward))
8902 execution_direction = EXEC_FORWARD;
8903 else if (!strcmp (exec_direction, exec_reverse))
8904 execution_direction = EXEC_REVERSE;
8905 }
8bbed405
MS
8906 else
8907 {
8908 exec_direction = exec_forward;
8909 error (_("Target does not support this operation."));
8910 }
b2175913
MS
8911}
8912
8913static void
8914show_exec_direction_func (struct ui_file *out, int from_tty,
8915 struct cmd_list_element *cmd, const char *value)
8916{
8917 switch (execution_direction) {
8918 case EXEC_FORWARD:
8919 fprintf_filtered (out, _("Forward.\n"));
8920 break;
8921 case EXEC_REVERSE:
8922 fprintf_filtered (out, _("Reverse.\n"));
8923 break;
b2175913 8924 default:
d8b34453
PA
8925 internal_error (__FILE__, __LINE__,
8926 _("bogus execution_direction value: %d"),
8927 (int) execution_direction);
b2175913
MS
8928 }
8929}
8930
d4db2f36
PA
8931static void
8932show_schedule_multiple (struct ui_file *file, int from_tty,
8933 struct cmd_list_element *c, const char *value)
8934{
3e43a32a
MS
8935 fprintf_filtered (file, _("Resuming the execution of threads "
8936 "of all processes is %s.\n"), value);
d4db2f36 8937}
ad52ddc6 8938
22d2b532
SDJ
8939/* Implementation of `siginfo' variable. */
8940
8941static const struct internalvar_funcs siginfo_funcs =
8942{
8943 siginfo_make_value,
8944 NULL,
8945 NULL
8946};
8947
372316f1
PA
8948/* Callback for infrun's target events source. This is marked when a
8949 thread has a pending status to process. */
8950
8951static void
8952infrun_async_inferior_event_handler (gdb_client_data data)
8953{
372316f1
PA
8954 inferior_event_handler (INF_REG_EVENT, NULL);
8955}
8956
c906108c 8957void
96baa820 8958_initialize_infrun (void)
c906108c 8959{
de0bea00 8960 struct cmd_list_element *c;
c906108c 8961
372316f1
PA
8962 /* Register extra event sources in the event loop. */
8963 infrun_async_inferior_event_token
8964 = create_async_event_handler (infrun_async_inferior_event_handler, NULL);
8965
11db9430 8966 add_info ("signals", info_signals_command, _("\
1bedd215
AC
8967What debugger does when program gets various signals.\n\
8968Specify a signal as argument to print info on that signal only."));
c906108c
SS
8969 add_info_alias ("handle", "signals", 0);
8970
de0bea00 8971 c = add_com ("handle", class_run, handle_command, _("\
dfbd5e7b 8972Specify how to handle signals.\n\
486c7739 8973Usage: handle SIGNAL [ACTIONS]\n\
c906108c 8974Args are signals and actions to apply to those signals.\n\
dfbd5e7b 8975If no actions are specified, the current settings for the specified signals\n\
486c7739
MF
8976will be displayed instead.\n\
8977\n\
c906108c
SS
8978Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
8979from 1-15 are allowed for compatibility with old versions of GDB.\n\
8980Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
8981The special arg \"all\" is recognized to mean all signals except those\n\
1bedd215 8982used by the debugger, typically SIGTRAP and SIGINT.\n\
486c7739 8983\n\
1bedd215 8984Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
c906108c
SS
8985\"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
8986Stop means reenter debugger if this signal happens (implies print).\n\
8987Print means print a message if this signal happens.\n\
8988Pass means let program see this signal; otherwise program doesn't know.\n\
8989Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
dfbd5e7b
PA
8990Pass and Stop may be combined.\n\
8991\n\
8992Multiple signals may be specified. Signal numbers and signal names\n\
8993may be interspersed with actions, with the actions being performed for\n\
8994all signals cumulatively specified."));
de0bea00 8995 set_cmd_completer (c, handle_completer);
486c7739 8996
c906108c 8997 if (!dbx_commands)
1a966eab
AC
8998 stop_command = add_cmd ("stop", class_obscure,
8999 not_just_help_class_command, _("\
9000There is no `stop' command, but you can set a hook on `stop'.\n\
c906108c 9001This allows you to set a list of commands to be run each time execution\n\
1a966eab 9002of the program stops."), &cmdlist);
c906108c 9003
ccce17b0 9004 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
85c07804
AC
9005Set inferior debugging."), _("\
9006Show inferior debugging."), _("\
9007When non-zero, inferior specific debugging is enabled."),
ccce17b0
YQ
9008 NULL,
9009 show_debug_infrun,
9010 &setdebuglist, &showdebuglist);
527159b7 9011
3e43a32a
MS
9012 add_setshow_boolean_cmd ("displaced", class_maintenance,
9013 &debug_displaced, _("\
237fc4c9
PA
9014Set displaced stepping debugging."), _("\
9015Show displaced stepping debugging."), _("\
9016When non-zero, displaced stepping specific debugging is enabled."),
9017 NULL,
9018 show_debug_displaced,
9019 &setdebuglist, &showdebuglist);
9020
ad52ddc6
PA
9021 add_setshow_boolean_cmd ("non-stop", no_class,
9022 &non_stop_1, _("\
9023Set whether gdb controls the inferior in non-stop mode."), _("\
9024Show whether gdb controls the inferior in non-stop mode."), _("\
9025When debugging a multi-threaded program and this setting is\n\
9026off (the default, also called all-stop mode), when one thread stops\n\
9027(for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9028all other threads in the program while you interact with the thread of\n\
9029interest. When you continue or step a thread, you can allow the other\n\
9030threads to run, or have them remain stopped, but while you inspect any\n\
9031thread's state, all threads stop.\n\
9032\n\
9033In non-stop mode, when one thread stops, other threads can continue\n\
9034to run freely. You'll be able to step each thread independently,\n\
9035leave it stopped or free to run as needed."),
9036 set_non_stop,
9037 show_non_stop,
9038 &setlist,
9039 &showlist);
9040
adc6a863 9041 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
c906108c
SS
9042 {
9043 signal_stop[i] = 1;
9044 signal_print[i] = 1;
9045 signal_program[i] = 1;
ab04a2af 9046 signal_catch[i] = 0;
c906108c
SS
9047 }
9048
4d9d9d04
PA
9049 /* Signals caused by debugger's own actions should not be given to
9050 the program afterwards.
9051
9052 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9053 explicitly specifies that it should be delivered to the target
9054 program. Typically, that would occur when a user is debugging a
9055 target monitor on a simulator: the target monitor sets a
9056 breakpoint; the simulator encounters this breakpoint and halts
9057 the simulation handing control to GDB; GDB, noting that the stop
9058 address doesn't map to any known breakpoint, returns control back
9059 to the simulator; the simulator then delivers the hardware
9060 equivalent of a GDB_SIGNAL_TRAP to the program being
9061 debugged. */
a493e3e2
PA
9062 signal_program[GDB_SIGNAL_TRAP] = 0;
9063 signal_program[GDB_SIGNAL_INT] = 0;
c906108c
SS
9064
9065 /* Signals that are not errors should not normally enter the debugger. */
a493e3e2
PA
9066 signal_stop[GDB_SIGNAL_ALRM] = 0;
9067 signal_print[GDB_SIGNAL_ALRM] = 0;
9068 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9069 signal_print[GDB_SIGNAL_VTALRM] = 0;
9070 signal_stop[GDB_SIGNAL_PROF] = 0;
9071 signal_print[GDB_SIGNAL_PROF] = 0;
9072 signal_stop[GDB_SIGNAL_CHLD] = 0;
9073 signal_print[GDB_SIGNAL_CHLD] = 0;
9074 signal_stop[GDB_SIGNAL_IO] = 0;
9075 signal_print[GDB_SIGNAL_IO] = 0;
9076 signal_stop[GDB_SIGNAL_POLL] = 0;
9077 signal_print[GDB_SIGNAL_POLL] = 0;
9078 signal_stop[GDB_SIGNAL_URG] = 0;
9079 signal_print[GDB_SIGNAL_URG] = 0;
9080 signal_stop[GDB_SIGNAL_WINCH] = 0;
9081 signal_print[GDB_SIGNAL_WINCH] = 0;
9082 signal_stop[GDB_SIGNAL_PRIO] = 0;
9083 signal_print[GDB_SIGNAL_PRIO] = 0;
c906108c 9084
cd0fc7c3
SS
9085 /* These signals are used internally by user-level thread
9086 implementations. (See signal(5) on Solaris.) Like the above
9087 signals, a healthy program receives and handles them as part of
9088 its normal operation. */
a493e3e2
PA
9089 signal_stop[GDB_SIGNAL_LWP] = 0;
9090 signal_print[GDB_SIGNAL_LWP] = 0;
9091 signal_stop[GDB_SIGNAL_WAITING] = 0;
9092 signal_print[GDB_SIGNAL_WAITING] = 0;
9093 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9094 signal_print[GDB_SIGNAL_CANCEL] = 0;
bc7b765a
JB
9095 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9096 signal_print[GDB_SIGNAL_LIBRT] = 0;
cd0fc7c3 9097
2455069d
UW
9098 /* Update cached state. */
9099 signal_cache_update (-1);
9100
85c07804
AC
9101 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9102 &stop_on_solib_events, _("\
9103Set stopping for shared library events."), _("\
9104Show stopping for shared library events."), _("\
c906108c
SS
9105If nonzero, gdb will give control to the user when the dynamic linker\n\
9106notifies gdb of shared library events. The most common event of interest\n\
85c07804 9107to the user would be loading/unloading of a new library."),
f9e14852 9108 set_stop_on_solib_events,
920d2a44 9109 show_stop_on_solib_events,
85c07804 9110 &setlist, &showlist);
c906108c 9111
7ab04401
AC
9112 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9113 follow_fork_mode_kind_names,
9114 &follow_fork_mode_string, _("\
9115Set debugger response to a program call of fork or vfork."), _("\
9116Show debugger response to a program call of fork or vfork."), _("\
c906108c
SS
9117A fork or vfork creates a new process. follow-fork-mode can be:\n\
9118 parent - the original process is debugged after a fork\n\
9119 child - the new process is debugged after a fork\n\
ea1dd7bc 9120The unfollowed process will continue to run.\n\
7ab04401
AC
9121By default, the debugger will follow the parent process."),
9122 NULL,
920d2a44 9123 show_follow_fork_mode_string,
7ab04401
AC
9124 &setlist, &showlist);
9125
6c95b8df
PA
9126 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9127 follow_exec_mode_names,
9128 &follow_exec_mode_string, _("\
9129Set debugger response to a program call of exec."), _("\
9130Show debugger response to a program call of exec."), _("\
9131An exec call replaces the program image of a process.\n\
9132\n\
9133follow-exec-mode can be:\n\
9134\n\
cce7e648 9135 new - the debugger creates a new inferior and rebinds the process\n\
6c95b8df
PA
9136to this new inferior. The program the process was running before\n\
9137the exec call can be restarted afterwards by restarting the original\n\
9138inferior.\n\
9139\n\
9140 same - the debugger keeps the process bound to the same inferior.\n\
9141The new executable image replaces the previous executable loaded in\n\
9142the inferior. Restarting the inferior after the exec call restarts\n\
9143the executable the process was running after the exec call.\n\
9144\n\
9145By default, the debugger will use the same inferior."),
9146 NULL,
9147 show_follow_exec_mode_string,
9148 &setlist, &showlist);
9149
7ab04401
AC
9150 add_setshow_enum_cmd ("scheduler-locking", class_run,
9151 scheduler_enums, &scheduler_mode, _("\
9152Set mode for locking scheduler during execution."), _("\
9153Show mode for locking scheduler during execution."), _("\
f2665db5
MM
9154off == no locking (threads may preempt at any time)\n\
9155on == full locking (no thread except the current thread may run)\n\
9156 This applies to both normal execution and replay mode.\n\
9157step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9158 In this mode, other threads may run during other commands.\n\
9159 This applies to both normal execution and replay mode.\n\
9160replay == scheduler locked in replay mode and unlocked during normal execution."),
7ab04401 9161 set_schedlock_func, /* traps on target vector */
920d2a44 9162 show_scheduler_mode,
7ab04401 9163 &setlist, &showlist);
5fbbeb29 9164
d4db2f36
PA
9165 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9166Set mode for resuming threads of all processes."), _("\
9167Show mode for resuming threads of all processes."), _("\
9168When on, execution commands (such as 'continue' or 'next') resume all\n\
9169threads of all processes. When off (which is the default), execution\n\
9170commands only resume the threads of the current process. The set of\n\
9171threads that are resumed is further refined by the scheduler-locking\n\
9172mode (see help set scheduler-locking)."),
9173 NULL,
9174 show_schedule_multiple,
9175 &setlist, &showlist);
9176
5bf193a2
AC
9177 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9178Set mode of the step operation."), _("\
9179Show mode of the step operation."), _("\
9180When set, doing a step over a function without debug line information\n\
9181will stop at the first instruction of that function. Otherwise, the\n\
9182function is skipped and the step command stops at a different source line."),
9183 NULL,
920d2a44 9184 show_step_stop_if_no_debug,
5bf193a2 9185 &setlist, &showlist);
ca6724c1 9186
72d0e2c5
YQ
9187 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9188 &can_use_displaced_stepping, _("\
237fc4c9
PA
9189Set debugger's willingness to use displaced stepping."), _("\
9190Show debugger's willingness to use displaced stepping."), _("\
fff08868
HZ
9191If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9192supported by the target architecture. If off, gdb will not use displaced\n\
9193stepping to step over breakpoints, even if such is supported by the target\n\
9194architecture. If auto (which is the default), gdb will use displaced stepping\n\
9195if the target architecture supports it and non-stop mode is active, but will not\n\
9196use it in all-stop mode (see help set non-stop)."),
72d0e2c5
YQ
9197 NULL,
9198 show_can_use_displaced_stepping,
9199 &setlist, &showlist);
237fc4c9 9200
b2175913
MS
9201 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9202 &exec_direction, _("Set direction of execution.\n\
9203Options are 'forward' or 'reverse'."),
9204 _("Show direction of execution (forward/reverse)."),
9205 _("Tells gdb whether to execute forward or backward."),
9206 set_exec_direction_func, show_exec_direction_func,
9207 &setlist, &showlist);
9208
6c95b8df
PA
9209 /* Set/show detach-on-fork: user-settable mode. */
9210
9211 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9212Set whether gdb will detach the child of a fork."), _("\
9213Show whether gdb will detach the child of a fork."), _("\
9214Tells gdb whether to detach the child of a fork."),
9215 NULL, NULL, &setlist, &showlist);
9216
03583c20
UW
9217 /* Set/show disable address space randomization mode. */
9218
9219 add_setshow_boolean_cmd ("disable-randomization", class_support,
9220 &disable_randomization, _("\
9221Set disabling of debuggee's virtual address space randomization."), _("\
9222Show disabling of debuggee's virtual address space randomization."), _("\
9223When this mode is on (which is the default), randomization of the virtual\n\
9224address space is disabled. Standalone programs run with the randomization\n\
9225enabled by default on some platforms."),
9226 &set_disable_randomization,
9227 &show_disable_randomization,
9228 &setlist, &showlist);
9229
ca6724c1 9230 /* ptid initializations */
ca6724c1
KB
9231 inferior_ptid = null_ptid;
9232 target_last_wait_ptid = minus_one_ptid;
5231c1fd 9233
76727919
TT
9234 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed);
9235 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested);
9236 gdb::observers::thread_exit.attach (infrun_thread_thread_exit);
9237 gdb::observers::inferior_exit.attach (infrun_inferior_exit);
4aa995e1
PA
9238
9239 /* Explicitly create without lookup, since that tries to create a
9240 value with a void typed value, and when we get here, gdbarch
9241 isn't initialized yet. At this point, we're quite sure there
9242 isn't another convenience variable of the same name. */
22d2b532 9243 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
d914c394
SS
9244
9245 add_setshow_boolean_cmd ("observer", no_class,
9246 &observer_mode_1, _("\
9247Set whether gdb controls the inferior in observer mode."), _("\
9248Show whether gdb controls the inferior in observer mode."), _("\
9249In observer mode, GDB can get data from the inferior, but not\n\
9250affect its execution. Registers and memory may not be changed,\n\
9251breakpoints may not be set, and the program cannot be interrupted\n\
9252or signalled."),
9253 set_observer_mode,
9254 show_observer_mode,
9255 &setlist,
9256 &showlist);
c906108c 9257}
This page took 2.833794 seconds and 4 git commands to generate.