gdb: stop trying to prepare displaced steps for an inferior when it returns _UNAVAILABLE
[deliverable/binutils-gdb.git] / gdb / infrun.c
CommitLineData
ca557f44
AC
1/* Target-struct-independent code to start (run) and stop an inferior
2 process.
8926118c 3
11bc5fe4 4 Copyright (C) 1986-2020 Free Software Foundation, Inc.
ca9af5a1 5 Copyright (C) 2019-2020 Advanced Micro Devices, Inc. All rights reserved.
c906108c 6
c5aa993b 7 This file is part of GDB.
c906108c 8
c5aa993b
JM
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
a9762ec7 11 the Free Software Foundation; either version 3 of the License, or
c5aa993b 12 (at your option) any later version.
c906108c 13
c5aa993b
JM
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
c906108c 18
c5aa993b 19 You should have received a copy of the GNU General Public License
a9762ec7 20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
c906108c
SS
21
22#include "defs.h"
b93d82bc 23#include "displaced-stepping.h"
45741a9c 24#include "infrun.h"
c906108c
SS
25#include <ctype.h>
26#include "symtab.h"
27#include "frame.h"
28#include "inferior.h"
29#include "breakpoint.h"
c906108c
SS
30#include "gdbcore.h"
31#include "gdbcmd.h"
32#include "target.h"
33#include "gdbthread.h"
34#include "annotate.h"
1adeb98a 35#include "symfile.h"
7a292a7a 36#include "top.h"
2acceee2 37#include "inf-loop.h"
4e052eda 38#include "regcache.h"
b93d82bc 39#include "utils.h"
fd0407d6 40#include "value.h"
76727919 41#include "observable.h"
f636b87d 42#include "language.h"
a77053c2 43#include "solib.h"
f17517ea 44#include "main.h"
186c406b 45#include "block.h"
034dad6f 46#include "mi/mi-common.h"
4f8d22e3 47#include "event-top.h"
96429cc8 48#include "record.h"
d02ed0bb 49#include "record-full.h"
edb3359d 50#include "inline-frame.h"
4efc6507 51#include "jit.h"
06cd862c 52#include "tracepoint.h"
1bfeeb0f 53#include "skip.h"
28106bc2
SDJ
54#include "probe.h"
55#include "objfiles.h"
de0bea00 56#include "completer.h"
9107fc8d 57#include "target-descriptions.h"
f15cb84a 58#include "target-dcache.h"
d83ad864 59#include "terminal.h"
ff862be4 60#include "solist.h"
372316f1 61#include "event-loop.h"
243a9253 62#include "thread-fsm.h"
268a13a5 63#include "gdbsupport/enum-flags.h"
5ed8105e 64#include "progspace-and-thread.h"
268a13a5 65#include "gdbsupport/gdb_optional.h"
46a62268 66#include "arch-utils.h"
268a13a5
TT
67#include "gdbsupport/scope-exit.h"
68#include "gdbsupport/forward-scope-exit.h"
c906108c
SS
69
70/* Prototypes for local functions */
71
2ea28649 72static void sig_print_info (enum gdb_signal);
c906108c 73
96baa820 74static void sig_print_header (void);
c906108c 75
4ef3f3be 76static int follow_fork (void);
96baa820 77
d83ad864
DB
78static int follow_fork_inferior (int follow_child, int detach_fork);
79
80static void follow_inferior_reset_breakpoints (void);
81
a289b8f6
JK
82static int currently_stepping (struct thread_info *tp);
83
e58b0e63
PA
84void nullify_last_target_wait_ptid (void);
85
2c03e5be 86static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
2484c66b
UW
87
88static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
89
2484c66b
UW
90static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
91
8550d3b3
YQ
92static int maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
93
aff4e175
AB
94static void resume (gdb_signal sig);
95
372316f1
PA
96/* Asynchronous signal handler registered as event loop source for
97 when we have pending events ready to be passed to the core. */
98static struct async_event_handler *infrun_async_inferior_event_token;
99
100/* Stores whether infrun_async was previously enabled or disabled.
101 Starts off as -1, indicating "never enabled/disabled". */
102static int infrun_is_async = -1;
103
104/* See infrun.h. */
105
106void
107infrun_async (int enable)
108{
109 if (infrun_is_async != enable)
110 {
111 infrun_is_async = enable;
112
113 if (debug_infrun)
114 fprintf_unfiltered (gdb_stdlog,
115 "infrun: infrun_async(%d)\n",
116 enable);
117
118 if (enable)
119 mark_async_event_handler (infrun_async_inferior_event_token);
120 else
121 clear_async_event_handler (infrun_async_inferior_event_token);
122 }
123}
124
0b333c5e
PA
125/* See infrun.h. */
126
127void
128mark_infrun_async_event_handler (void)
129{
130 mark_async_event_handler (infrun_async_inferior_event_token);
131}
132
5fbbeb29
CF
133/* When set, stop the 'step' command if we enter a function which has
134 no line number information. The normal behavior is that we step
135 over such function. */
491144b5 136bool step_stop_if_no_debug = false;
920d2a44
AC
137static void
138show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
139 struct cmd_list_element *c, const char *value)
140{
141 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
142}
5fbbeb29 143
b9f437de
PA
144/* proceed and normal_stop use this to notify the user when the
145 inferior stopped in a different thread than it had been running
146 in. */
96baa820 147
39f77062 148static ptid_t previous_inferior_ptid;
7a292a7a 149
07107ca6
LM
150/* If set (default for legacy reasons), when following a fork, GDB
151 will detach from one of the fork branches, child or parent.
152 Exactly which branch is detached depends on 'set follow-fork-mode'
153 setting. */
154
491144b5 155static bool detach_fork = true;
6c95b8df 156
491144b5 157bool debug_displaced = false;
237fc4c9
PA
158static void
159show_debug_displaced (struct ui_file *file, int from_tty,
160 struct cmd_list_element *c, const char *value)
161{
162 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
163}
164
ccce17b0 165unsigned int debug_infrun = 0;
920d2a44
AC
166static void
167show_debug_infrun (struct ui_file *file, int from_tty,
168 struct cmd_list_element *c, const char *value)
169{
170 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
171}
527159b7 172
03583c20
UW
173
174/* Support for disabling address space randomization. */
175
491144b5 176bool disable_randomization = true;
03583c20
UW
177
178static void
179show_disable_randomization (struct ui_file *file, int from_tty,
180 struct cmd_list_element *c, const char *value)
181{
182 if (target_supports_disable_randomization ())
183 fprintf_filtered (file,
184 _("Disabling randomization of debuggee's "
185 "virtual address space is %s.\n"),
186 value);
187 else
188 fputs_filtered (_("Disabling randomization of debuggee's "
189 "virtual address space is unsupported on\n"
190 "this platform.\n"), file);
191}
192
193static void
eb4c3f4a 194set_disable_randomization (const char *args, int from_tty,
03583c20
UW
195 struct cmd_list_element *c)
196{
197 if (!target_supports_disable_randomization ())
198 error (_("Disabling randomization of debuggee's "
199 "virtual address space is unsupported on\n"
200 "this platform."));
201}
202
d32dc48e
PA
203/* User interface for non-stop mode. */
204
491144b5
CB
205bool non_stop = false;
206static bool non_stop_1 = false;
d32dc48e
PA
207
208static void
eb4c3f4a 209set_non_stop (const char *args, int from_tty,
d32dc48e
PA
210 struct cmd_list_element *c)
211{
212 if (target_has_execution)
213 {
214 non_stop_1 = non_stop;
215 error (_("Cannot change this setting while the inferior is running."));
216 }
217
218 non_stop = non_stop_1;
219}
220
221static void
222show_non_stop (struct ui_file *file, int from_tty,
223 struct cmd_list_element *c, const char *value)
224{
225 fprintf_filtered (file,
226 _("Controlling the inferior in non-stop mode is %s.\n"),
227 value);
228}
229
d914c394
SS
230/* "Observer mode" is somewhat like a more extreme version of
231 non-stop, in which all GDB operations that might affect the
232 target's execution have been disabled. */
233
491144b5
CB
234bool observer_mode = false;
235static bool observer_mode_1 = false;
d914c394
SS
236
237static void
eb4c3f4a 238set_observer_mode (const char *args, int from_tty,
d914c394
SS
239 struct cmd_list_element *c)
240{
d914c394
SS
241 if (target_has_execution)
242 {
243 observer_mode_1 = observer_mode;
244 error (_("Cannot change this setting while the inferior is running."));
245 }
246
247 observer_mode = observer_mode_1;
248
249 may_write_registers = !observer_mode;
250 may_write_memory = !observer_mode;
251 may_insert_breakpoints = !observer_mode;
252 may_insert_tracepoints = !observer_mode;
253 /* We can insert fast tracepoints in or out of observer mode,
254 but enable them if we're going into this mode. */
255 if (observer_mode)
491144b5 256 may_insert_fast_tracepoints = true;
d914c394
SS
257 may_stop = !observer_mode;
258 update_target_permissions ();
259
260 /* Going *into* observer mode we must force non-stop, then
261 going out we leave it that way. */
262 if (observer_mode)
263 {
d914c394 264 pagination_enabled = 0;
491144b5 265 non_stop = non_stop_1 = true;
d914c394
SS
266 }
267
268 if (from_tty)
269 printf_filtered (_("Observer mode is now %s.\n"),
270 (observer_mode ? "on" : "off"));
271}
272
273static void
274show_observer_mode (struct ui_file *file, int from_tty,
275 struct cmd_list_element *c, const char *value)
276{
277 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
278}
279
280/* This updates the value of observer mode based on changes in
281 permissions. Note that we are deliberately ignoring the values of
282 may-write-registers and may-write-memory, since the user may have
283 reason to enable these during a session, for instance to turn on a
284 debugging-related global. */
285
286void
287update_observer_mode (void)
288{
491144b5
CB
289 bool newval = (!may_insert_breakpoints
290 && !may_insert_tracepoints
291 && may_insert_fast_tracepoints
292 && !may_stop
293 && non_stop);
d914c394
SS
294
295 /* Let the user know if things change. */
296 if (newval != observer_mode)
297 printf_filtered (_("Observer mode is now %s.\n"),
298 (newval ? "on" : "off"));
299
300 observer_mode = observer_mode_1 = newval;
301}
c2c6d25f 302
c906108c
SS
303/* Tables of how to react to signals; the user sets them. */
304
adc6a863
PA
305static unsigned char signal_stop[GDB_SIGNAL_LAST];
306static unsigned char signal_print[GDB_SIGNAL_LAST];
307static unsigned char signal_program[GDB_SIGNAL_LAST];
c906108c 308
ab04a2af
TT
309/* Table of signals that are registered with "catch signal". A
310 non-zero entry indicates that the signal is caught by some "catch
adc6a863
PA
311 signal" command. */
312static unsigned char signal_catch[GDB_SIGNAL_LAST];
ab04a2af 313
2455069d
UW
314/* Table of signals that the target may silently handle.
315 This is automatically determined from the flags above,
316 and simply cached here. */
adc6a863 317static unsigned char signal_pass[GDB_SIGNAL_LAST];
2455069d 318
c906108c
SS
319#define SET_SIGS(nsigs,sigs,flags) \
320 do { \
321 int signum = (nsigs); \
322 while (signum-- > 0) \
323 if ((sigs)[signum]) \
324 (flags)[signum] = 1; \
325 } while (0)
326
327#define UNSET_SIGS(nsigs,sigs,flags) \
328 do { \
329 int signum = (nsigs); \
330 while (signum-- > 0) \
331 if ((sigs)[signum]) \
332 (flags)[signum] = 0; \
333 } while (0)
334
9b224c5e
PA
335/* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
336 this function is to avoid exporting `signal_program'. */
337
338void
339update_signals_program_target (void)
340{
adc6a863 341 target_program_signals (signal_program);
9b224c5e
PA
342}
343
1777feb0 344/* Value to pass to target_resume() to cause all threads to resume. */
39f77062 345
edb3359d 346#define RESUME_ALL minus_one_ptid
c906108c
SS
347
348/* Command list pointer for the "stop" placeholder. */
349
350static struct cmd_list_element *stop_command;
351
c906108c
SS
352/* Nonzero if we want to give control to the user when we're notified
353 of shared library events by the dynamic linker. */
628fe4e4 354int stop_on_solib_events;
f9e14852
GB
355
356/* Enable or disable optional shared library event breakpoints
357 as appropriate when the above flag is changed. */
358
359static void
eb4c3f4a
TT
360set_stop_on_solib_events (const char *args,
361 int from_tty, struct cmd_list_element *c)
f9e14852
GB
362{
363 update_solib_breakpoints ();
364}
365
920d2a44
AC
366static void
367show_stop_on_solib_events (struct ui_file *file, int from_tty,
368 struct cmd_list_element *c, const char *value)
369{
370 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
371 value);
372}
c906108c 373
c906108c
SS
374/* Nonzero after stop if current stack frame should be printed. */
375
376static int stop_print_frame;
377
e02bc4cc 378/* This is a cached copy of the pid/waitstatus of the last event
9a4105ab
AC
379 returned by target_wait()/deprecated_target_wait_hook(). This
380 information is returned by get_last_target_status(). */
39f77062 381static ptid_t target_last_wait_ptid;
e02bc4cc
DS
382static struct target_waitstatus target_last_waitstatus;
383
4e1c45ea 384void init_thread_stepping_state (struct thread_info *tss);
0d1e5fa7 385
53904c9e
AC
386static const char follow_fork_mode_child[] = "child";
387static const char follow_fork_mode_parent[] = "parent";
388
40478521 389static const char *const follow_fork_mode_kind_names[] = {
53904c9e
AC
390 follow_fork_mode_child,
391 follow_fork_mode_parent,
392 NULL
ef346e04 393};
c906108c 394
53904c9e 395static const char *follow_fork_mode_string = follow_fork_mode_parent;
920d2a44
AC
396static void
397show_follow_fork_mode_string (struct ui_file *file, int from_tty,
398 struct cmd_list_element *c, const char *value)
399{
3e43a32a
MS
400 fprintf_filtered (file,
401 _("Debugger response to a program "
402 "call of fork or vfork is \"%s\".\n"),
920d2a44
AC
403 value);
404}
c906108c
SS
405\f
406
d83ad864
DB
407/* Handle changes to the inferior list based on the type of fork,
408 which process is being followed, and whether the other process
409 should be detached. On entry inferior_ptid must be the ptid of
410 the fork parent. At return inferior_ptid is the ptid of the
411 followed inferior. */
412
413static int
414follow_fork_inferior (int follow_child, int detach_fork)
415{
416 int has_vforked;
79639e11 417 ptid_t parent_ptid, child_ptid;
d83ad864
DB
418
419 has_vforked = (inferior_thread ()->pending_follow.kind
420 == TARGET_WAITKIND_VFORKED);
79639e11
PA
421 parent_ptid = inferior_ptid;
422 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
d83ad864
DB
423
424 if (has_vforked
425 && !non_stop /* Non-stop always resumes both branches. */
3b12939d 426 && current_ui->prompt_state == PROMPT_BLOCKED
d83ad864
DB
427 && !(follow_child || detach_fork || sched_multi))
428 {
429 /* The parent stays blocked inside the vfork syscall until the
430 child execs or exits. If we don't let the child run, then
431 the parent stays blocked. If we're telling the parent to run
432 in the foreground, the user will not be able to ctrl-c to get
433 back the terminal, effectively hanging the debug session. */
434 fprintf_filtered (gdb_stderr, _("\
435Can not resume the parent process over vfork in the foreground while\n\
436holding the child stopped. Try \"set detach-on-fork\" or \
437\"set schedule-multiple\".\n"));
d83ad864
DB
438 return 1;
439 }
440
441 if (!follow_child)
442 {
443 /* Detach new forked process? */
444 if (detach_fork)
445 {
d83ad864
DB
446 /* Before detaching from the child, remove all breakpoints
447 from it. If we forked, then this has already been taken
448 care of by infrun.c. If we vforked however, any
449 breakpoint inserted in the parent is visible in the
450 child, even those added while stopped in a vfork
451 catchpoint. This will remove the breakpoints from the
452 parent also, but they'll be reinserted below. */
453 if (has_vforked)
454 {
455 /* Keep breakpoints list in sync. */
00431a78 456 remove_breakpoints_inf (current_inferior ());
d83ad864
DB
457 }
458
f67c0c91 459 if (print_inferior_events)
d83ad864 460 {
8dd06f7a 461 /* Ensure that we have a process ptid. */
e99b03dc 462 ptid_t process_ptid = ptid_t (child_ptid.pid ());
8dd06f7a 463
223ffa71 464 target_terminal::ours_for_output ();
d83ad864 465 fprintf_filtered (gdb_stdlog,
f67c0c91 466 _("[Detaching after %s from child %s]\n"),
6f259a23 467 has_vforked ? "vfork" : "fork",
a068643d 468 target_pid_to_str (process_ptid).c_str ());
d83ad864
DB
469 }
470 }
471 else
472 {
473 struct inferior *parent_inf, *child_inf;
d83ad864
DB
474
475 /* Add process to GDB's tables. */
e99b03dc 476 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
477
478 parent_inf = current_inferior ();
479 child_inf->attach_flag = parent_inf->attach_flag;
480 copy_terminal_info (child_inf, parent_inf);
481 child_inf->gdbarch = parent_inf->gdbarch;
482 copy_inferior_target_desc_info (child_inf, parent_inf);
483
5ed8105e 484 scoped_restore_current_pspace_and_thread restore_pspace_thread;
d83ad864 485
79639e11 486 inferior_ptid = child_ptid;
f67c0c91 487 add_thread_silent (inferior_ptid);
2a00d7ce 488 set_current_inferior (child_inf);
d83ad864
DB
489 child_inf->symfile_flags = SYMFILE_NO_READ;
490
491 /* If this is a vfork child, then the address-space is
492 shared with the parent. */
493 if (has_vforked)
494 {
495 child_inf->pspace = parent_inf->pspace;
496 child_inf->aspace = parent_inf->aspace;
497
498 /* The parent will be frozen until the child is done
499 with the shared region. Keep track of the
500 parent. */
501 child_inf->vfork_parent = parent_inf;
502 child_inf->pending_detach = 0;
503 parent_inf->vfork_child = child_inf;
504 parent_inf->pending_detach = 0;
505 }
506 else
507 {
508 child_inf->aspace = new_address_space ();
564b1e3f 509 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
510 child_inf->removable = 1;
511 set_current_program_space (child_inf->pspace);
512 clone_program_space (child_inf->pspace, parent_inf->pspace);
513
514 /* Let the shared library layer (e.g., solib-svr4) learn
515 about this new process, relocate the cloned exec, pull
516 in shared libraries, and install the solib event
517 breakpoint. If a "cloned-VM" event was propagated
518 better throughout the core, this wouldn't be
519 required. */
520 solib_create_inferior_hook (0);
521 }
d83ad864
DB
522 }
523
524 if (has_vforked)
525 {
526 struct inferior *parent_inf;
527
528 parent_inf = current_inferior ();
529
530 /* If we detached from the child, then we have to be careful
531 to not insert breakpoints in the parent until the child
532 is done with the shared memory region. However, if we're
533 staying attached to the child, then we can and should
534 insert breakpoints, so that we can debug it. A
535 subsequent child exec or exit is enough to know when does
536 the child stops using the parent's address space. */
537 parent_inf->waiting_for_vfork_done = detach_fork;
538 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
539 }
540 }
541 else
542 {
543 /* Follow the child. */
544 struct inferior *parent_inf, *child_inf;
545 struct program_space *parent_pspace;
546
f67c0c91 547 if (print_inferior_events)
d83ad864 548 {
f67c0c91
SDJ
549 std::string parent_pid = target_pid_to_str (parent_ptid);
550 std::string child_pid = target_pid_to_str (child_ptid);
551
223ffa71 552 target_terminal::ours_for_output ();
6f259a23 553 fprintf_filtered (gdb_stdlog,
f67c0c91
SDJ
554 _("[Attaching after %s %s to child %s]\n"),
555 parent_pid.c_str (),
6f259a23 556 has_vforked ? "vfork" : "fork",
f67c0c91 557 child_pid.c_str ());
d83ad864
DB
558 }
559
560 /* Add the new inferior first, so that the target_detach below
561 doesn't unpush the target. */
562
e99b03dc 563 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
564
565 parent_inf = current_inferior ();
566 child_inf->attach_flag = parent_inf->attach_flag;
567 copy_terminal_info (child_inf, parent_inf);
568 child_inf->gdbarch = parent_inf->gdbarch;
569 copy_inferior_target_desc_info (child_inf, parent_inf);
570
571 parent_pspace = parent_inf->pspace;
572
573 /* If we're vforking, we want to hold on to the parent until the
574 child exits or execs. At child exec or exit time we can
575 remove the old breakpoints from the parent and detach or
576 resume debugging it. Otherwise, detach the parent now; we'll
577 want to reuse it's program/address spaces, but we can't set
578 them to the child before removing breakpoints from the
579 parent, otherwise, the breakpoints module could decide to
580 remove breakpoints from the wrong process (since they'd be
581 assigned to the same address space). */
582
583 if (has_vforked)
584 {
585 gdb_assert (child_inf->vfork_parent == NULL);
586 gdb_assert (parent_inf->vfork_child == NULL);
587 child_inf->vfork_parent = parent_inf;
588 child_inf->pending_detach = 0;
589 parent_inf->vfork_child = child_inf;
590 parent_inf->pending_detach = detach_fork;
591 parent_inf->waiting_for_vfork_done = 0;
592 }
593 else if (detach_fork)
6f259a23 594 {
f67c0c91 595 if (print_inferior_events)
6f259a23 596 {
8dd06f7a 597 /* Ensure that we have a process ptid. */
e99b03dc 598 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
8dd06f7a 599
223ffa71 600 target_terminal::ours_for_output ();
6f259a23 601 fprintf_filtered (gdb_stdlog,
f67c0c91
SDJ
602 _("[Detaching after fork from "
603 "parent %s]\n"),
a068643d 604 target_pid_to_str (process_ptid).c_str ());
6f259a23
DB
605 }
606
6e1e1966 607 target_detach (parent_inf, 0);
6f259a23 608 }
d83ad864
DB
609
610 /* Note that the detach above makes PARENT_INF dangling. */
611
612 /* Add the child thread to the appropriate lists, and switch to
613 this new thread, before cloning the program space, and
614 informing the solib layer about this new process. */
615
79639e11 616 inferior_ptid = child_ptid;
f67c0c91 617 add_thread_silent (inferior_ptid);
2a00d7ce 618 set_current_inferior (child_inf);
d83ad864
DB
619
620 /* If this is a vfork child, then the address-space is shared
621 with the parent. If we detached from the parent, then we can
622 reuse the parent's program/address spaces. */
623 if (has_vforked || detach_fork)
624 {
625 child_inf->pspace = parent_pspace;
626 child_inf->aspace = child_inf->pspace->aspace;
627 }
628 else
629 {
630 child_inf->aspace = new_address_space ();
564b1e3f 631 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
632 child_inf->removable = 1;
633 child_inf->symfile_flags = SYMFILE_NO_READ;
634 set_current_program_space (child_inf->pspace);
635 clone_program_space (child_inf->pspace, parent_pspace);
636
637 /* Let the shared library layer (e.g., solib-svr4) learn
638 about this new process, relocate the cloned exec, pull in
639 shared libraries, and install the solib event breakpoint.
640 If a "cloned-VM" event was propagated better throughout
641 the core, this wouldn't be required. */
642 solib_create_inferior_hook (0);
643 }
644 }
645
646 return target_follow_fork (follow_child, detach_fork);
647}
648
e58b0e63
PA
649/* Tell the target to follow the fork we're stopped at. Returns true
650 if the inferior should be resumed; false, if the target for some
651 reason decided it's best not to resume. */
652
6604731b 653static int
4ef3f3be 654follow_fork (void)
c906108c 655{
ea1dd7bc 656 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63
PA
657 int should_resume = 1;
658 struct thread_info *tp;
659
660 /* Copy user stepping state to the new inferior thread. FIXME: the
661 followed fork child thread should have a copy of most of the
4e3990f4
DE
662 parent thread structure's run control related fields, not just these.
663 Initialized to avoid "may be used uninitialized" warnings from gcc. */
664 struct breakpoint *step_resume_breakpoint = NULL;
186c406b 665 struct breakpoint *exception_resume_breakpoint = NULL;
4e3990f4
DE
666 CORE_ADDR step_range_start = 0;
667 CORE_ADDR step_range_end = 0;
668 struct frame_id step_frame_id = { 0 };
8980e177 669 struct thread_fsm *thread_fsm = NULL;
e58b0e63
PA
670
671 if (!non_stop)
672 {
673 ptid_t wait_ptid;
674 struct target_waitstatus wait_status;
675
676 /* Get the last target status returned by target_wait(). */
677 get_last_target_status (&wait_ptid, &wait_status);
678
679 /* If not stopped at a fork event, then there's nothing else to
680 do. */
681 if (wait_status.kind != TARGET_WAITKIND_FORKED
682 && wait_status.kind != TARGET_WAITKIND_VFORKED)
683 return 1;
684
685 /* Check if we switched over from WAIT_PTID, since the event was
686 reported. */
00431a78
PA
687 if (wait_ptid != minus_one_ptid
688 && inferior_ptid != wait_ptid)
e58b0e63
PA
689 {
690 /* We did. Switch back to WAIT_PTID thread, to tell the
691 target to follow it (in either direction). We'll
692 afterwards refuse to resume, and inform the user what
693 happened. */
00431a78
PA
694 thread_info *wait_thread
695 = find_thread_ptid (wait_ptid);
696 switch_to_thread (wait_thread);
e58b0e63
PA
697 should_resume = 0;
698 }
699 }
700
701 tp = inferior_thread ();
702
703 /* If there were any forks/vforks that were caught and are now to be
704 followed, then do so now. */
705 switch (tp->pending_follow.kind)
706 {
707 case TARGET_WAITKIND_FORKED:
708 case TARGET_WAITKIND_VFORKED:
709 {
710 ptid_t parent, child;
711
712 /* If the user did a next/step, etc, over a fork call,
713 preserve the stepping state in the fork child. */
714 if (follow_child && should_resume)
715 {
8358c15c
JK
716 step_resume_breakpoint = clone_momentary_breakpoint
717 (tp->control.step_resume_breakpoint);
16c381f0
JK
718 step_range_start = tp->control.step_range_start;
719 step_range_end = tp->control.step_range_end;
720 step_frame_id = tp->control.step_frame_id;
186c406b
TT
721 exception_resume_breakpoint
722 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
8980e177 723 thread_fsm = tp->thread_fsm;
e58b0e63
PA
724
725 /* For now, delete the parent's sr breakpoint, otherwise,
726 parent/child sr breakpoints are considered duplicates,
727 and the child version will not be installed. Remove
728 this when the breakpoints module becomes aware of
729 inferiors and address spaces. */
730 delete_step_resume_breakpoint (tp);
16c381f0
JK
731 tp->control.step_range_start = 0;
732 tp->control.step_range_end = 0;
733 tp->control.step_frame_id = null_frame_id;
186c406b 734 delete_exception_resume_breakpoint (tp);
8980e177 735 tp->thread_fsm = NULL;
e58b0e63
PA
736 }
737
738 parent = inferior_ptid;
739 child = tp->pending_follow.value.related_pid;
740
d83ad864
DB
741 /* Set up inferior(s) as specified by the caller, and tell the
742 target to do whatever is necessary to follow either parent
743 or child. */
744 if (follow_fork_inferior (follow_child, detach_fork))
e58b0e63
PA
745 {
746 /* Target refused to follow, or there's some other reason
747 we shouldn't resume. */
748 should_resume = 0;
749 }
750 else
751 {
752 /* This pending follow fork event is now handled, one way
753 or another. The previous selected thread may be gone
754 from the lists by now, but if it is still around, need
755 to clear the pending follow request. */
e09875d4 756 tp = find_thread_ptid (parent);
e58b0e63
PA
757 if (tp)
758 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
759
760 /* This makes sure we don't try to apply the "Switched
761 over from WAIT_PID" logic above. */
762 nullify_last_target_wait_ptid ();
763
1777feb0 764 /* If we followed the child, switch to it... */
e58b0e63
PA
765 if (follow_child)
766 {
00431a78
PA
767 thread_info *child_thr = find_thread_ptid (child);
768 switch_to_thread (child_thr);
e58b0e63
PA
769
770 /* ... and preserve the stepping state, in case the
771 user was stepping over the fork call. */
772 if (should_resume)
773 {
774 tp = inferior_thread ();
8358c15c
JK
775 tp->control.step_resume_breakpoint
776 = step_resume_breakpoint;
16c381f0
JK
777 tp->control.step_range_start = step_range_start;
778 tp->control.step_range_end = step_range_end;
779 tp->control.step_frame_id = step_frame_id;
186c406b
TT
780 tp->control.exception_resume_breakpoint
781 = exception_resume_breakpoint;
8980e177 782 tp->thread_fsm = thread_fsm;
e58b0e63
PA
783 }
784 else
785 {
786 /* If we get here, it was because we're trying to
787 resume from a fork catchpoint, but, the user
788 has switched threads away from the thread that
789 forked. In that case, the resume command
790 issued is most likely not applicable to the
791 child, so just warn, and refuse to resume. */
3e43a32a 792 warning (_("Not resuming: switched threads "
fd7dcb94 793 "before following fork child."));
e58b0e63
PA
794 }
795
796 /* Reset breakpoints in the child as appropriate. */
797 follow_inferior_reset_breakpoints ();
798 }
e58b0e63
PA
799 }
800 }
801 break;
802 case TARGET_WAITKIND_SPURIOUS:
803 /* Nothing to follow. */
804 break;
805 default:
806 internal_error (__FILE__, __LINE__,
807 "Unexpected pending_follow.kind %d\n",
808 tp->pending_follow.kind);
809 break;
810 }
c906108c 811
e58b0e63 812 return should_resume;
c906108c
SS
813}
814
d83ad864 815static void
6604731b 816follow_inferior_reset_breakpoints (void)
c906108c 817{
4e1c45ea
PA
818 struct thread_info *tp = inferior_thread ();
819
6604731b
DJ
820 /* Was there a step_resume breakpoint? (There was if the user
821 did a "next" at the fork() call.) If so, explicitly reset its
a1aa2221
LM
822 thread number. Cloned step_resume breakpoints are disabled on
823 creation, so enable it here now that it is associated with the
824 correct thread.
6604731b
DJ
825
826 step_resumes are a form of bp that are made to be per-thread.
827 Since we created the step_resume bp when the parent process
828 was being debugged, and now are switching to the child process,
829 from the breakpoint package's viewpoint, that's a switch of
830 "threads". We must update the bp's notion of which thread
831 it is for, or it'll be ignored when it triggers. */
832
8358c15c 833 if (tp->control.step_resume_breakpoint)
a1aa2221
LM
834 {
835 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
836 tp->control.step_resume_breakpoint->loc->enabled = 1;
837 }
6604731b 838
a1aa2221 839 /* Treat exception_resume breakpoints like step_resume breakpoints. */
186c406b 840 if (tp->control.exception_resume_breakpoint)
a1aa2221
LM
841 {
842 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
843 tp->control.exception_resume_breakpoint->loc->enabled = 1;
844 }
186c406b 845
6604731b
DJ
846 /* Reinsert all breakpoints in the child. The user may have set
847 breakpoints after catching the fork, in which case those
848 were never set in the child, but only in the parent. This makes
849 sure the inserted breakpoints match the breakpoint list. */
850
851 breakpoint_re_set ();
852 insert_breakpoints ();
c906108c 853}
c906108c 854
6c95b8df
PA
855/* The child has exited or execed: resume threads of the parent the
856 user wanted to be executing. */
857
858static int
859proceed_after_vfork_done (struct thread_info *thread,
860 void *arg)
861{
862 int pid = * (int *) arg;
863
00431a78
PA
864 if (thread->ptid.pid () == pid
865 && thread->state == THREAD_RUNNING
866 && !thread->executing
6c95b8df 867 && !thread->stop_requested
a493e3e2 868 && thread->suspend.stop_signal == GDB_SIGNAL_0)
6c95b8df
PA
869 {
870 if (debug_infrun)
871 fprintf_unfiltered (gdb_stdlog,
872 "infrun: resuming vfork parent thread %s\n",
a068643d 873 target_pid_to_str (thread->ptid).c_str ());
6c95b8df 874
00431a78 875 switch_to_thread (thread);
70509625 876 clear_proceed_status (0);
64ce06e4 877 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
6c95b8df
PA
878 }
879
880 return 0;
881}
882
5ed8105e
PA
883/* Save/restore inferior_ptid, current program space and current
884 inferior. Only use this if the current context points at an exited
885 inferior (and therefore there's no current thread to save). */
886class scoped_restore_exited_inferior
887{
888public:
889 scoped_restore_exited_inferior ()
890 : m_saved_ptid (&inferior_ptid)
891 {}
892
893private:
894 scoped_restore_tmpl<ptid_t> m_saved_ptid;
895 scoped_restore_current_program_space m_pspace;
896 scoped_restore_current_inferior m_inferior;
897};
898
6c95b8df
PA
899/* Called whenever we notice an exec or exit event, to handle
900 detaching or resuming a vfork parent. */
901
902static void
903handle_vfork_child_exec_or_exit (int exec)
904{
905 struct inferior *inf = current_inferior ();
906
907 if (inf->vfork_parent)
908 {
909 int resume_parent = -1;
910
911 /* This exec or exit marks the end of the shared memory region
b73715df
TV
912 between the parent and the child. Break the bonds. */
913 inferior *vfork_parent = inf->vfork_parent;
914 inf->vfork_parent->vfork_child = NULL;
915 inf->vfork_parent = NULL;
6c95b8df 916
b73715df
TV
917 /* If the user wanted to detach from the parent, now is the
918 time. */
919 if (vfork_parent->pending_detach)
6c95b8df
PA
920 {
921 struct thread_info *tp;
6c95b8df
PA
922 struct program_space *pspace;
923 struct address_space *aspace;
924
1777feb0 925 /* follow-fork child, detach-on-fork on. */
6c95b8df 926
b73715df 927 vfork_parent->pending_detach = 0;
68c9da30 928
5ed8105e
PA
929 gdb::optional<scoped_restore_exited_inferior>
930 maybe_restore_inferior;
931 gdb::optional<scoped_restore_current_pspace_and_thread>
932 maybe_restore_thread;
933
934 /* If we're handling a child exit, then inferior_ptid points
935 at the inferior's pid, not to a thread. */
f50f4e56 936 if (!exec)
5ed8105e 937 maybe_restore_inferior.emplace ();
f50f4e56 938 else
5ed8105e 939 maybe_restore_thread.emplace ();
6c95b8df
PA
940
941 /* We're letting loose of the parent. */
b73715df 942 tp = any_live_thread_of_inferior (vfork_parent);
00431a78 943 switch_to_thread (tp);
6c95b8df
PA
944
945 /* We're about to detach from the parent, which implicitly
946 removes breakpoints from its address space. There's a
947 catch here: we want to reuse the spaces for the child,
948 but, parent/child are still sharing the pspace at this
949 point, although the exec in reality makes the kernel give
950 the child a fresh set of new pages. The problem here is
951 that the breakpoints module being unaware of this, would
952 likely chose the child process to write to the parent
953 address space. Swapping the child temporarily away from
954 the spaces has the desired effect. Yes, this is "sort
955 of" a hack. */
956
957 pspace = inf->pspace;
958 aspace = inf->aspace;
959 inf->aspace = NULL;
960 inf->pspace = NULL;
961
f67c0c91 962 if (print_inferior_events)
6c95b8df 963 {
a068643d 964 std::string pidstr
b73715df 965 = target_pid_to_str (ptid_t (vfork_parent->pid));
f67c0c91 966
223ffa71 967 target_terminal::ours_for_output ();
6c95b8df
PA
968
969 if (exec)
6f259a23
DB
970 {
971 fprintf_filtered (gdb_stdlog,
f67c0c91 972 _("[Detaching vfork parent %s "
a068643d 973 "after child exec]\n"), pidstr.c_str ());
6f259a23 974 }
6c95b8df 975 else
6f259a23
DB
976 {
977 fprintf_filtered (gdb_stdlog,
f67c0c91 978 _("[Detaching vfork parent %s "
a068643d 979 "after child exit]\n"), pidstr.c_str ());
6f259a23 980 }
6c95b8df
PA
981 }
982
b73715df 983 target_detach (vfork_parent, 0);
6c95b8df
PA
984
985 /* Put it back. */
986 inf->pspace = pspace;
987 inf->aspace = aspace;
6c95b8df
PA
988 }
989 else if (exec)
990 {
991 /* We're staying attached to the parent, so, really give the
992 child a new address space. */
564b1e3f 993 inf->pspace = new program_space (maybe_new_address_space ());
6c95b8df
PA
994 inf->aspace = inf->pspace->aspace;
995 inf->removable = 1;
996 set_current_program_space (inf->pspace);
997
b73715df 998 resume_parent = vfork_parent->pid;
6c95b8df
PA
999 }
1000 else
1001 {
6c95b8df
PA
1002 struct program_space *pspace;
1003
1004 /* If this is a vfork child exiting, then the pspace and
1005 aspaces were shared with the parent. Since we're
1006 reporting the process exit, we'll be mourning all that is
1007 found in the address space, and switching to null_ptid,
1008 preparing to start a new inferior. But, since we don't
1009 want to clobber the parent's address/program spaces, we
1010 go ahead and create a new one for this exiting
1011 inferior. */
1012
5ed8105e
PA
1013 /* Switch to null_ptid while running clone_program_space, so
1014 that clone_program_space doesn't want to read the
1015 selected frame of a dead process. */
1016 scoped_restore restore_ptid
1017 = make_scoped_restore (&inferior_ptid, null_ptid);
6c95b8df
PA
1018
1019 /* This inferior is dead, so avoid giving the breakpoints
1020 module the option to write through to it (cloning a
1021 program space resets breakpoints). */
1022 inf->aspace = NULL;
1023 inf->pspace = NULL;
564b1e3f 1024 pspace = new program_space (maybe_new_address_space ());
6c95b8df
PA
1025 set_current_program_space (pspace);
1026 inf->removable = 1;
7dcd53a0 1027 inf->symfile_flags = SYMFILE_NO_READ;
b73715df 1028 clone_program_space (pspace, vfork_parent->pspace);
6c95b8df
PA
1029 inf->pspace = pspace;
1030 inf->aspace = pspace->aspace;
1031
b73715df 1032 resume_parent = vfork_parent->pid;
6c95b8df
PA
1033 }
1034
6c95b8df
PA
1035 gdb_assert (current_program_space == inf->pspace);
1036
1037 if (non_stop && resume_parent != -1)
1038 {
1039 /* If the user wanted the parent to be running, let it go
1040 free now. */
5ed8105e 1041 scoped_restore_current_thread restore_thread;
6c95b8df
PA
1042
1043 if (debug_infrun)
3e43a32a
MS
1044 fprintf_unfiltered (gdb_stdlog,
1045 "infrun: resuming vfork parent process %d\n",
6c95b8df
PA
1046 resume_parent);
1047
1048 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
6c95b8df
PA
1049 }
1050 }
1051}
1052
eb6c553b 1053/* Enum strings for "set|show follow-exec-mode". */
6c95b8df
PA
1054
1055static const char follow_exec_mode_new[] = "new";
1056static const char follow_exec_mode_same[] = "same";
40478521 1057static const char *const follow_exec_mode_names[] =
6c95b8df
PA
1058{
1059 follow_exec_mode_new,
1060 follow_exec_mode_same,
1061 NULL,
1062};
1063
1064static const char *follow_exec_mode_string = follow_exec_mode_same;
1065static void
1066show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1067 struct cmd_list_element *c, const char *value)
1068{
1069 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1070}
1071
ecf45d2c 1072/* EXEC_FILE_TARGET is assumed to be non-NULL. */
1adeb98a 1073
c906108c 1074static void
4ca51187 1075follow_exec (ptid_t ptid, const char *exec_file_target)
c906108c 1076{
6c95b8df 1077 struct inferior *inf = current_inferior ();
e99b03dc 1078 int pid = ptid.pid ();
94585166 1079 ptid_t process_ptid;
7a292a7a 1080
65d2b333
PW
1081 /* Switch terminal for any messages produced e.g. by
1082 breakpoint_re_set. */
1083 target_terminal::ours_for_output ();
1084
c906108c
SS
1085 /* This is an exec event that we actually wish to pay attention to.
1086 Refresh our symbol table to the newly exec'd program, remove any
1087 momentary bp's, etc.
1088
1089 If there are breakpoints, they aren't really inserted now,
1090 since the exec() transformed our inferior into a fresh set
1091 of instructions.
1092
1093 We want to preserve symbolic breakpoints on the list, since
1094 we have hopes that they can be reset after the new a.out's
1095 symbol table is read.
1096
1097 However, any "raw" breakpoints must be removed from the list
1098 (e.g., the solib bp's), since their address is probably invalid
1099 now.
1100
1101 And, we DON'T want to call delete_breakpoints() here, since
1102 that may write the bp's "shadow contents" (the instruction
85102364 1103 value that was overwritten with a TRAP instruction). Since
1777feb0 1104 we now have a new a.out, those shadow contents aren't valid. */
6c95b8df
PA
1105
1106 mark_breakpoints_out ();
1107
95e50b27
PA
1108 /* The target reports the exec event to the main thread, even if
1109 some other thread does the exec, and even if the main thread was
1110 stopped or already gone. We may still have non-leader threads of
1111 the process on our list. E.g., on targets that don't have thread
1112 exit events (like remote); or on native Linux in non-stop mode if
1113 there were only two threads in the inferior and the non-leader
1114 one is the one that execs (and nothing forces an update of the
1115 thread list up to here). When debugging remotely, it's best to
1116 avoid extra traffic, when possible, so avoid syncing the thread
1117 list with the target, and instead go ahead and delete all threads
1118 of the process but one that reported the event. Note this must
1119 be done before calling update_breakpoints_after_exec, as
1120 otherwise clearing the threads' resources would reference stale
1121 thread breakpoints -- it may have been one of these threads that
1122 stepped across the exec. We could just clear their stepping
1123 states, but as long as we're iterating, might as well delete
1124 them. Deleting them now rather than at the next user-visible
1125 stop provides a nicer sequence of events for user and MI
1126 notifications. */
08036331 1127 for (thread_info *th : all_threads_safe ())
d7e15655 1128 if (th->ptid.pid () == pid && th->ptid != ptid)
00431a78 1129 delete_thread (th);
95e50b27
PA
1130
1131 /* We also need to clear any left over stale state for the
1132 leader/event thread. E.g., if there was any step-resume
1133 breakpoint or similar, it's gone now. We cannot truly
1134 step-to-next statement through an exec(). */
08036331 1135 thread_info *th = inferior_thread ();
8358c15c 1136 th->control.step_resume_breakpoint = NULL;
186c406b 1137 th->control.exception_resume_breakpoint = NULL;
34b7e8a6 1138 th->control.single_step_breakpoints = NULL;
16c381f0
JK
1139 th->control.step_range_start = 0;
1140 th->control.step_range_end = 0;
c906108c 1141
95e50b27
PA
1142 /* The user may have had the main thread held stopped in the
1143 previous image (e.g., schedlock on, or non-stop). Release
1144 it now. */
a75724bc
PA
1145 th->stop_requested = 0;
1146
95e50b27
PA
1147 update_breakpoints_after_exec ();
1148
1777feb0 1149 /* What is this a.out's name? */
f2907e49 1150 process_ptid = ptid_t (pid);
6c95b8df 1151 printf_unfiltered (_("%s is executing new program: %s\n"),
a068643d 1152 target_pid_to_str (process_ptid).c_str (),
ecf45d2c 1153 exec_file_target);
c906108c
SS
1154
1155 /* We've followed the inferior through an exec. Therefore, the
1777feb0 1156 inferior has essentially been killed & reborn. */
7a292a7a 1157
6ca15a4b 1158 breakpoint_init_inferior (inf_execd);
e85a822c 1159
797bc1cb
TT
1160 gdb::unique_xmalloc_ptr<char> exec_file_host
1161 = exec_file_find (exec_file_target, NULL);
ff862be4 1162
ecf45d2c
SL
1163 /* If we were unable to map the executable target pathname onto a host
1164 pathname, tell the user that. Otherwise GDB's subsequent behavior
1165 is confusing. Maybe it would even be better to stop at this point
1166 so that the user can specify a file manually before continuing. */
1167 if (exec_file_host == NULL)
1168 warning (_("Could not load symbols for executable %s.\n"
1169 "Do you need \"set sysroot\"?"),
1170 exec_file_target);
c906108c 1171
cce9b6bf
PA
1172 /* Reset the shared library package. This ensures that we get a
1173 shlib event when the child reaches "_start", at which point the
1174 dld will have had a chance to initialize the child. */
1175 /* Also, loading a symbol file below may trigger symbol lookups, and
1176 we don't want those to be satisfied by the libraries of the
1177 previous incarnation of this process. */
1178 no_shared_libraries (NULL, 0);
1179
6c95b8df
PA
1180 if (follow_exec_mode_string == follow_exec_mode_new)
1181 {
6c95b8df
PA
1182 /* The user wants to keep the old inferior and program spaces
1183 around. Create a new fresh one, and switch to it. */
1184
35ed81d4
SM
1185 /* Do exit processing for the original inferior before setting the new
1186 inferior's pid. Having two inferiors with the same pid would confuse
1187 find_inferior_p(t)id. Transfer the terminal state and info from the
1188 old to the new inferior. */
1189 inf = add_inferior_with_spaces ();
1190 swap_terminal_info (inf, current_inferior ());
057302ce 1191 exit_inferior_silent (current_inferior ());
17d8546e 1192
94585166 1193 inf->pid = pid;
ecf45d2c 1194 target_follow_exec (inf, exec_file_target);
6c95b8df
PA
1195
1196 set_current_inferior (inf);
94585166 1197 set_current_program_space (inf->pspace);
c4c17fb0 1198 add_thread (ptid);
6c95b8df 1199 }
9107fc8d
PA
1200 else
1201 {
1202 /* The old description may no longer be fit for the new image.
1203 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1204 old description; we'll read a new one below. No need to do
1205 this on "follow-exec-mode new", as the old inferior stays
1206 around (its description is later cleared/refetched on
1207 restart). */
1208 target_clear_description ();
1209 }
6c95b8df
PA
1210
1211 gdb_assert (current_program_space == inf->pspace);
1212
ecf45d2c
SL
1213 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1214 because the proper displacement for a PIE (Position Independent
1215 Executable) main symbol file will only be computed by
1216 solib_create_inferior_hook below. breakpoint_re_set would fail
1217 to insert the breakpoints with the zero displacement. */
797bc1cb 1218 try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
c906108c 1219
9107fc8d
PA
1220 /* If the target can specify a description, read it. Must do this
1221 after flipping to the new executable (because the target supplied
1222 description must be compatible with the executable's
1223 architecture, and the old executable may e.g., be 32-bit, while
1224 the new one 64-bit), and before anything involving memory or
1225 registers. */
1226 target_find_description ();
1227
268a4a75 1228 solib_create_inferior_hook (0);
c906108c 1229
4efc6507
DE
1230 jit_inferior_created_hook ();
1231
c1e56572
JK
1232 breakpoint_re_set ();
1233
c906108c
SS
1234 /* Reinsert all breakpoints. (Those which were symbolic have
1235 been reset to the proper address in the new a.out, thanks
1777feb0 1236 to symbol_file_command...). */
c906108c
SS
1237 insert_breakpoints ();
1238
1239 /* The next resume of this inferior should bring it to the shlib
1240 startup breakpoints. (If the user had also set bp's on
1241 "main" from the old (parent) process, then they'll auto-
1777feb0 1242 matically get reset there in the new process.). */
c906108c
SS
1243}
1244
c2829269
PA
1245/* The queue of threads that need to do a step-over operation to get
1246 past e.g., a breakpoint. What technique is used to step over the
1247 breakpoint/watchpoint does not matter -- all threads end up in the
1248 same queue, to maintain rough temporal order of execution, in order
1249 to avoid starvation, otherwise, we could e.g., find ourselves
1250 constantly stepping the same couple threads past their breakpoints
1251 over and over, if the single-step finish fast enough. */
66716e78 1252struct thread_info *global_thread_step_over_chain_head;
c2829269 1253
6c4cfb24
PA
1254/* Bit flags indicating what the thread needs to step over. */
1255
8d297bbf 1256enum step_over_what_flag
6c4cfb24
PA
1257 {
1258 /* Step over a breakpoint. */
1259 STEP_OVER_BREAKPOINT = 1,
1260
1261 /* Step past a non-continuable watchpoint, in order to let the
1262 instruction execute so we can evaluate the watchpoint
1263 expression. */
1264 STEP_OVER_WATCHPOINT = 2
1265 };
8d297bbf 1266DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
6c4cfb24 1267
963f9c80 1268/* Info about an instruction that is being stepped over. */
31e77af2
PA
1269
1270struct step_over_info
1271{
963f9c80
PA
1272 /* If we're stepping past a breakpoint, this is the address space
1273 and address of the instruction the breakpoint is set at. We'll
1274 skip inserting all breakpoints here. Valid iff ASPACE is
1275 non-NULL. */
8b86c959 1276 const address_space *aspace;
31e77af2 1277 CORE_ADDR address;
963f9c80
PA
1278
1279 /* The instruction being stepped over triggers a nonsteppable
1280 watchpoint. If true, we'll skip inserting watchpoints. */
1281 int nonsteppable_watchpoint_p;
21edc42f
YQ
1282
1283 /* The thread's global number. */
1284 int thread;
31e77af2
PA
1285};
1286
1287/* The step-over info of the location that is being stepped over.
1288
1289 Note that with async/breakpoint always-inserted mode, a user might
1290 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1291 being stepped over. As setting a new breakpoint inserts all
1292 breakpoints, we need to make sure the breakpoint being stepped over
1293 isn't inserted then. We do that by only clearing the step-over
1294 info when the step-over is actually finished (or aborted).
1295
1296 Presently GDB can only step over one breakpoint at any given time.
1297 Given threads that can't run code in the same address space as the
1298 breakpoint's can't really miss the breakpoint, GDB could be taught
1299 to step-over at most one breakpoint per address space (so this info
1300 could move to the address space object if/when GDB is extended).
1301 The set of breakpoints being stepped over will normally be much
1302 smaller than the set of all breakpoints, so a flag in the
1303 breakpoint location structure would be wasteful. A separate list
1304 also saves complexity and run-time, as otherwise we'd have to go
1305 through all breakpoint locations clearing their flag whenever we
1306 start a new sequence. Similar considerations weigh against storing
1307 this info in the thread object. Plus, not all step overs actually
1308 have breakpoint locations -- e.g., stepping past a single-step
1309 breakpoint, or stepping to complete a non-continuable
1310 watchpoint. */
1311static struct step_over_info step_over_info;
1312
1313/* Record the address of the breakpoint/instruction we're currently
ce0db137
DE
1314 stepping over.
1315 N.B. We record the aspace and address now, instead of say just the thread,
1316 because when we need the info later the thread may be running. */
31e77af2
PA
1317
1318static void
8b86c959 1319set_step_over_info (const address_space *aspace, CORE_ADDR address,
21edc42f
YQ
1320 int nonsteppable_watchpoint_p,
1321 int thread)
31e77af2
PA
1322{
1323 step_over_info.aspace = aspace;
1324 step_over_info.address = address;
963f9c80 1325 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
21edc42f 1326 step_over_info.thread = thread;
31e77af2
PA
1327}
1328
1329/* Called when we're not longer stepping over a breakpoint / an
1330 instruction, so all breakpoints are free to be (re)inserted. */
1331
1332static void
1333clear_step_over_info (void)
1334{
372316f1
PA
1335 if (debug_infrun)
1336 fprintf_unfiltered (gdb_stdlog,
1337 "infrun: clear_step_over_info\n");
31e77af2
PA
1338 step_over_info.aspace = NULL;
1339 step_over_info.address = 0;
963f9c80 1340 step_over_info.nonsteppable_watchpoint_p = 0;
21edc42f 1341 step_over_info.thread = -1;
31e77af2
PA
1342}
1343
7f89fd65 1344/* See infrun.h. */
31e77af2
PA
1345
1346int
1347stepping_past_instruction_at (struct address_space *aspace,
1348 CORE_ADDR address)
1349{
1350 return (step_over_info.aspace != NULL
1351 && breakpoint_address_match (aspace, address,
1352 step_over_info.aspace,
1353 step_over_info.address));
1354}
1355
963f9c80
PA
1356/* See infrun.h. */
1357
21edc42f
YQ
1358int
1359thread_is_stepping_over_breakpoint (int thread)
1360{
1361 return (step_over_info.thread != -1
1362 && thread == step_over_info.thread);
1363}
1364
1365/* See infrun.h. */
1366
963f9c80
PA
1367int
1368stepping_past_nonsteppable_watchpoint (void)
1369{
1370 return step_over_info.nonsteppable_watchpoint_p;
1371}
1372
6cc83d2a
PA
1373/* Returns true if step-over info is valid. */
1374
1375static int
1376step_over_info_valid_p (void)
1377{
963f9c80
PA
1378 return (step_over_info.aspace != NULL
1379 || stepping_past_nonsteppable_watchpoint ());
6cc83d2a
PA
1380}
1381
c906108c 1382\f
237fc4c9
PA
1383/* Displaced stepping. */
1384
1385/* In non-stop debugging mode, we must take special care to manage
1386 breakpoints properly; in particular, the traditional strategy for
1387 stepping a thread past a breakpoint it has hit is unsuitable.
1388 'Displaced stepping' is a tactic for stepping one thread past a
1389 breakpoint it has hit while ensuring that other threads running
1390 concurrently will hit the breakpoint as they should.
1391
1392 The traditional way to step a thread T off a breakpoint in a
1393 multi-threaded program in all-stop mode is as follows:
1394
1395 a0) Initially, all threads are stopped, and breakpoints are not
1396 inserted.
1397 a1) We single-step T, leaving breakpoints uninserted.
1398 a2) We insert breakpoints, and resume all threads.
1399
1400 In non-stop debugging, however, this strategy is unsuitable: we
1401 don't want to have to stop all threads in the system in order to
1402 continue or step T past a breakpoint. Instead, we use displaced
1403 stepping:
1404
1405 n0) Initially, T is stopped, other threads are running, and
1406 breakpoints are inserted.
1407 n1) We copy the instruction "under" the breakpoint to a separate
1408 location, outside the main code stream, making any adjustments
1409 to the instruction, register, and memory state as directed by
1410 T's architecture.
1411 n2) We single-step T over the instruction at its new location.
1412 n3) We adjust the resulting register and memory state as directed
1413 by T's architecture. This includes resetting T's PC to point
1414 back into the main instruction stream.
1415 n4) We resume T.
1416
1417 This approach depends on the following gdbarch methods:
1418
1419 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1420 indicate where to copy the instruction, and how much space must
1421 be reserved there. We use these in step n1.
1422
1423 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1424 address, and makes any necessary adjustments to the instruction,
1425 register contents, and memory. We use this in step n1.
1426
1427 - gdbarch_displaced_step_fixup adjusts registers and memory after
85102364 1428 we have successfully single-stepped the instruction, to yield the
237fc4c9
PA
1429 same effect the instruction would have had if we had executed it
1430 at its original address. We use this in step n3.
1431
237fc4c9
PA
1432 The gdbarch_displaced_step_copy_insn and
1433 gdbarch_displaced_step_fixup functions must be written so that
1434 copying an instruction with gdbarch_displaced_step_copy_insn,
1435 single-stepping across the copied instruction, and then applying
1436 gdbarch_displaced_insn_fixup should have the same effects on the
1437 thread's memory and registers as stepping the instruction in place
1438 would have. Exactly which responsibilities fall to the copy and
1439 which fall to the fixup is up to the author of those functions.
1440
1441 See the comments in gdbarch.sh for details.
1442
1443 Note that displaced stepping and software single-step cannot
1444 currently be used in combination, although with some care I think
1445 they could be made to. Software single-step works by placing
1446 breakpoints on all possible subsequent instructions; if the
1447 displaced instruction is a PC-relative jump, those breakpoints
1448 could fall in very strange places --- on pages that aren't
1449 executable, or at addresses that are not proper instruction
1450 boundaries. (We do generally let other threads run while we wait
1451 to hit the software single-step breakpoint, and they might
1452 encounter such a corrupted instruction.) One way to work around
1453 this would be to have gdbarch_displaced_step_copy_insn fully
1454 simulate the effect of PC-relative instructions (and return NULL)
1455 on architectures that use software single-stepping.
1456
1457 In non-stop mode, we can have independent and simultaneous step
1458 requests, so more than one thread may need to simultaneously step
1459 over a breakpoint. The current implementation assumes there is
1460 only one scratch space per process. In this case, we have to
1461 serialize access to the scratch space. If thread A wants to step
1462 over a breakpoint, but we are currently waiting for some other
1463 thread to complete a displaced step, we leave thread A stopped and
1464 place it in the displaced_step_request_queue. Whenever a displaced
1465 step finishes, we pick the next thread in the queue and start a new
1466 displaced step operation on it. See displaced_step_prepare and
1467 displaced_step_fixup for details. */
1468
b93d82bc 1469/* Get the displaced stepping state of inferior INF. */
fc1cf338 1470
39a36629 1471static displaced_step_inferior_state *
00431a78 1472get_displaced_stepping_state (inferior *inf)
fc1cf338 1473{
d20172fc 1474 return &inf->displaced_step_state;
fc1cf338
PA
1475}
1476
b93d82bc 1477/* Get the displaced stepping state of thread THREAD. */
372316f1 1478
b93d82bc
SM
1479static displaced_step_thread_state *
1480get_displaced_stepping_state (thread_info *thread)
372316f1 1481{
b93d82bc 1482 return &thread->displaced_step_state;
372316f1
PA
1483}
1484
b93d82bc 1485/* Return true if the given thread is doing a displaced step. */
c0987663 1486
b93d82bc
SM
1487static bool
1488displaced_step_in_progress (thread_info *thread)
c0987663 1489{
00431a78 1490 gdb_assert (thread != NULL);
c0987663 1491
b93d82bc 1492 return get_displaced_stepping_state (thread)->in_progress ();
c0987663
YQ
1493}
1494
b93d82bc 1495/* Return true if any thread of this inferior is doing a displaced step. */
8f572e5c 1496
b93d82bc 1497static bool
00431a78 1498displaced_step_in_progress (inferior *inf)
8f572e5c 1499{
b93d82bc
SM
1500 for (thread_info *thread : inf->non_exited_threads ())
1501 {
1502 if (displaced_step_in_progress (thread))
1503 return true;
1504 }
1505
1506 return false;
1507}
1508
1509/* Return true if any thread is doing a displaced step. */
1510
1511static bool
1512displaced_step_in_progress_any_thread ()
1513{
1514 for (thread_info *thread : all_non_exited_threads ())
1515 {
1516 if (displaced_step_in_progress (thread))
1517 return true;
1518 }
1519
1520 return false;
fc1cf338
PA
1521}
1522
a42244db 1523/* If inferior is in displaced stepping, and ADDR equals to starting address
4a3a374c 1524 of copy area, return corresponding displaced_step_copy_insn_closure. Otherwise,
a42244db
YQ
1525 return NULL. */
1526
4a3a374c
SM
1527struct displaced_step_copy_insn_closure *
1528get_displaced_step_copy_insn_closure_by_addr (CORE_ADDR addr)
a42244db 1529{
b93d82bc
SM
1530// FIXME: implement me (only needed on ARM).
1531// displaced_step_inferior_state *displaced
1532// = get_displaced_stepping_state (current_inferior ());
1533//
1534// /* If checking the mode of displaced instruction in copy area. */
1535// if (displaced->step_thread != nullptr
1536// && displaced->step_copy == addr)
1537// return displaced->step_closure.get ();
1538//
a42244db
YQ
1539 return NULL;
1540}
1541
fc1cf338
PA
1542static void
1543infrun_inferior_exit (struct inferior *inf)
1544{
d20172fc 1545 inf->displaced_step_state.reset ();
fc1cf338 1546}
237fc4c9 1547
fff08868
HZ
1548/* If ON, and the architecture supports it, GDB will use displaced
1549 stepping to step over breakpoints. If OFF, or if the architecture
1550 doesn't support it, GDB will instead use the traditional
1551 hold-and-step approach. If AUTO (which is the default), GDB will
1552 decide which technique to use to step over breakpoints depending on
80e899b3 1553 whether the target works in a non-stop way (see use_displaced_stepping). */
fff08868 1554
72d0e2c5 1555static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
fff08868 1556
237fc4c9
PA
1557static void
1558show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1559 struct cmd_list_element *c,
1560 const char *value)
1561{
72d0e2c5 1562 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
3e43a32a
MS
1563 fprintf_filtered (file,
1564 _("Debugger's willingness to use displaced stepping "
1565 "to step over breakpoints is %s (currently %s).\n"),
fbea99ea 1566 value, target_is_non_stop_p () ? "on" : "off");
fff08868 1567 else
3e43a32a
MS
1568 fprintf_filtered (file,
1569 _("Debugger's willingness to use displaced stepping "
1570 "to step over breakpoints is %s.\n"), value);
237fc4c9
PA
1571}
1572
80e899b3
SM
1573/* Return true if the gdbarch implements the required methods to use
1574 displaced stepping. */
1575
1576static bool
1577gdbarch_supports_displaced_stepping (gdbarch *arch)
1578{
b93d82bc
SM
1579 /* Only check for the presence of copy_insn. Other required methods
1580 are checked by the gdbarch validation to be provided if copy_insn is
1581 provided. */
80e899b3
SM
1582 return gdbarch_displaced_step_copy_insn_p (arch);
1583}
1584
fff08868 1585/* Return non-zero if displaced stepping can/should be used to step
3fc8eb30 1586 over breakpoints of thread TP. */
fff08868 1587
80e899b3
SM
1588static bool
1589use_displaced_stepping (thread_info *tp)
237fc4c9 1590{
80e899b3
SM
1591 /* If the user disabled it explicitly, don't use displaced stepping. */
1592 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1593 return false;
1594
1595 /* If "auto", only use displaced stepping if the target operates in a non-stop
1596 way. */
1597 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1598 && !target_is_non_stop_p ())
1599 return false;
1600
1601 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1602
1603 /* If the architecture doesn't implement displaced stepping, don't use
1604 it. */
1605 if (!gdbarch_supports_displaced_stepping (gdbarch))
1606 return false;
1607
1608 /* If recording, don't use displaced stepping. */
1609 if (find_record_target () != nullptr)
1610 return false;
1611
d20172fc
SM
1612 displaced_step_inferior_state *displaced_state
1613 = get_displaced_stepping_state (tp->inf);
3fc8eb30 1614
80e899b3
SM
1615 /* If displaced stepping failed before for this inferior, don't bother trying
1616 again. */
1617 if (displaced_state->failed_before)
1618 return false;
1619
1620 return true;
237fc4c9
PA
1621}
1622
b93d82bc 1623/* Simple function wrapper around displaced_step_thread_state::reset. */
b8bfbca5 1624
237fc4c9 1625static void
b93d82bc 1626displaced_step_reset (displaced_step_thread_state *displaced)
237fc4c9 1627{
b8bfbca5 1628 displaced->reset ();
237fc4c9
PA
1629}
1630
b8bfbca5
SM
1631/* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1632 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1633
1634using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
237fc4c9
PA
1635
1636/* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1637void
1638displaced_step_dump_bytes (struct ui_file *file,
1639 const gdb_byte *buf,
1640 size_t len)
1641{
1642 int i;
1643
1644 for (i = 0; i < len; i++)
1645 fprintf_unfiltered (file, "%02x ", buf[i]);
1646 fputs_unfiltered ("\n", file);
1647}
1648
1649/* Prepare to single-step, using displaced stepping.
1650
1651 Note that we cannot use displaced stepping when we have a signal to
1652 deliver. If we have a signal to deliver and an instruction to step
1653 over, then after the step, there will be no indication from the
1654 target whether the thread entered a signal handler or ignored the
1655 signal and stepped over the instruction successfully --- both cases
1656 result in a simple SIGTRAP. In the first case we mustn't do a
1657 fixup, and in the second case we must --- but we can't tell which.
1658 Comments in the code for 'random signals' in handle_inferior_event
1659 explain how we handle this case instead.
1660
1661 Returns 1 if preparing was successful -- this thread is going to be
7f03bd92
PA
1662 stepped now; 0 if displaced stepping this thread got queued; or -1
1663 if this instruction can't be displaced stepped. */
1664
b93d82bc 1665static displaced_step_prepare_status
00431a78 1666displaced_step_prepare_throw (thread_info *tp)
237fc4c9 1667{
00431a78 1668 regcache *regcache = get_thread_regcache (tp);
ac7936df 1669 struct gdbarch *gdbarch = regcache->arch ();
b93d82bc
SM
1670 displaced_step_thread_state *thread_disp_step_state
1671 = get_displaced_stepping_state (tp);
237fc4c9
PA
1672
1673 /* We should never reach this function if the architecture does not
1674 support displaced stepping. */
80e899b3 1675 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
237fc4c9 1676
c2829269
PA
1677 /* Nor if the thread isn't meant to step over a breakpoint. */
1678 gdb_assert (tp->control.trap_expected);
1679
c1e36e3e
PA
1680 /* Disable range stepping while executing in the scratch pad. We
1681 want a single-step even if executing the displaced instruction in
1682 the scratch buffer lands within the stepping range (e.g., a
1683 jump/branch). */
1684 tp->control.may_range_step = 0;
1685
b93d82bc
SM
1686 /* We are about to start a displaced step for this thread, if one is already
1687 in progress, we goofed up somewhere. */
1688 gdb_assert (!thread_disp_step_state->in_progress ());
237fc4c9 1689
b93d82bc 1690 scoped_restore_current_thread restore_thread;
fc1cf338 1691
b93d82bc
SM
1692 switch_to_thread (tp);
1693
1694 CORE_ADDR original_pc = regcache_read_pc (regcache);
237fc4c9 1695
b93d82bc
SM
1696 displaced_step_prepare_status status =
1697 gdbarch_displaced_step_prepare (gdbarch, tp);
1698
1699 if (status == DISPLACED_STEP_PREPARE_STATUS_ERROR)
1700 {
237fc4c9
PA
1701 if (debug_displaced)
1702 fprintf_unfiltered (gdb_stdlog,
b93d82bc 1703 "displaced: failed to prepare (%s)",
a068643d 1704 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1705
b93d82bc 1706 return DISPLACED_STEP_PREPARE_STATUS_ERROR;
237fc4c9 1707 }
b93d82bc 1708 else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
237fc4c9 1709 {
b93d82bc
SM
1710 /* Not enough displaced stepping resources available, defer this
1711 request by placing it the queue. */
1712
237fc4c9
PA
1713 if (debug_displaced)
1714 fprintf_unfiltered (gdb_stdlog,
b93d82bc
SM
1715 "displaced: not enough resources available, "
1716 "deferring step of %s\n",
a068643d 1717 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1718
b93d82bc 1719 global_thread_step_over_chain_enqueue (tp);
23c6b653 1720 tp->inf->displaced_step_state.unavailable = true;
d35ae833 1721
b93d82bc 1722 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
d35ae833
PA
1723 }
1724
b93d82bc
SM
1725 gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
1726
1727// FIXME: Should probably replicated in the arch implementation now.
1728//
1729// if (breakpoint_in_range_p (aspace, copy, len))
1730// {
1731// /* There's a breakpoint set in the scratch pad location range
1732// (which is usually around the entry point). We'd either
1733// install it before resuming, which would overwrite/corrupt the
1734// scratch pad, or if it was already inserted, this displaced
1735// step would overwrite it. The latter is OK in the sense that
1736// we already assume that no thread is going to execute the code
1737// in the scratch pad range (after initial startup) anyway, but
1738// the former is unacceptable. Simply punt and fallback to
1739// stepping over this breakpoint in-line. */
1740// if (debug_displaced)
1741// {
1742// fprintf_unfiltered (gdb_stdlog,
1743// "displaced: breakpoint set in scratch pad. "
1744// "Stepping over breakpoint in-line instead.\n");
1745// }
1746//
1747// gdb_assert (false);
1748// gdbarch_displaced_step_release_location (gdbarch, copy);
1749//
1750// return -1;
1751// }
237fc4c9 1752
9f5a595d
UW
1753 /* Save the information we need to fix things up if the step
1754 succeeds. */
b93d82bc 1755 thread_disp_step_state->set (gdbarch);
237fc4c9 1756
b93d82bc
SM
1757 // FIXME: get it from _prepare?
1758 CORE_ADDR displaced_pc = 0;
ad53cd71 1759
237fc4c9 1760 if (debug_displaced)
b93d82bc
SM
1761 fprintf_unfiltered (gdb_stdlog,
1762 "displaced: prepared successfully thread=%s, "
1763 "original_pc=%s, displaced_pc=%s\n",
1764 target_pid_to_str (tp->ptid).c_str (),
1765 paddress (gdbarch, original_pc),
1766 paddress (gdbarch, displaced_pc));
237fc4c9 1767
b93d82bc 1768 return DISPLACED_STEP_PREPARE_STATUS_OK;
237fc4c9
PA
1769}
1770
3fc8eb30
PA
1771/* Wrapper for displaced_step_prepare_throw that disabled further
1772 attempts at displaced stepping if we get a memory error. */
1773
b93d82bc 1774static displaced_step_prepare_status
00431a78 1775displaced_step_prepare (thread_info *thread)
3fc8eb30 1776{
b93d82bc
SM
1777 displaced_step_prepare_status status
1778 = DISPLACED_STEP_PREPARE_STATUS_ERROR;
3fc8eb30 1779
a70b8144 1780 try
3fc8eb30 1781 {
b93d82bc 1782 status = displaced_step_prepare_throw (thread);
3fc8eb30 1783 }
230d2906 1784 catch (const gdb_exception_error &ex)
3fc8eb30
PA
1785 {
1786 struct displaced_step_inferior_state *displaced_state;
1787
16b41842
PA
1788 if (ex.error != MEMORY_ERROR
1789 && ex.error != NOT_SUPPORTED_ERROR)
eedc3f4f 1790 throw;
3fc8eb30
PA
1791
1792 if (debug_infrun)
1793 {
1794 fprintf_unfiltered (gdb_stdlog,
1795 "infrun: disabling displaced stepping: %s\n",
3d6e9d23 1796 ex.what ());
3fc8eb30
PA
1797 }
1798
1799 /* Be verbose if "set displaced-stepping" is "on", silent if
1800 "auto". */
1801 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1802 {
fd7dcb94 1803 warning (_("disabling displaced stepping: %s"),
3d6e9d23 1804 ex.what ());
3fc8eb30
PA
1805 }
1806
1807 /* Disable further displaced stepping attempts. */
1808 displaced_state
00431a78 1809 = get_displaced_stepping_state (thread->inf);
3fc8eb30
PA
1810 displaced_state->failed_before = 1;
1811 }
3fc8eb30 1812
b93d82bc 1813 return status;
e2d96639
YQ
1814}
1815
372316f1
PA
1816/* If we displaced stepped an instruction successfully, adjust
1817 registers and memory to yield the same effect the instruction would
1818 have had if we had executed it at its original address, and return
1819 1. If the instruction didn't complete, relocate the PC and return
1820 -1. If the thread wasn't displaced stepping, return 0. */
1821
1822static int
b93d82bc 1823displaced_step_finish (thread_info *event_thread, enum gdb_signal signal)
237fc4c9 1824{
b93d82bc
SM
1825 displaced_step_thread_state *displaced
1826 = get_displaced_stepping_state (event_thread);
fc1cf338 1827
b93d82bc
SM
1828 /* Was this thread performing a displaced step? */
1829 if (!displaced->in_progress ())
372316f1 1830 return 0;
237fc4c9 1831
b8bfbca5 1832 displaced_step_reset_cleanup cleanup (displaced);
237fc4c9 1833
cb71640d
PA
1834 /* Fixup may need to read memory/registers. Switch to the thread
1835 that we're fixing up. Also, target_stopped_by_watchpoint checks
b93d82bc
SM
1836 the current thread, and displaced_step_restore performs ptid-dependent
1837 memory accesses using current_inferior() and current_top_target(). */
00431a78 1838 switch_to_thread (event_thread);
cb71640d 1839
b93d82bc
SM
1840 /* Do the fixup, and release the resources acquired to do the displaced
1841 step. */
1842 displaced_step_finish_status finish_status =
1843 gdbarch_displaced_step_finish (displaced->get_original_gdbarch (),
1844 event_thread, signal);
237fc4c9 1845
b93d82bc
SM
1846 if (finish_status == DISPLACED_STEP_FINISH_STATUS_OK)
1847 return 1;
1848 else
1849 return -1;
c2829269 1850}
1c5cfe86 1851
4d9d9d04
PA
1852/* Data to be passed around while handling an event. This data is
1853 discarded between events. */
1854struct execution_control_state
1855{
1856 ptid_t ptid;
1857 /* The thread that got the event, if this was a thread event; NULL
1858 otherwise. */
1859 struct thread_info *event_thread;
1860
1861 struct target_waitstatus ws;
1862 int stop_func_filled_in;
1863 CORE_ADDR stop_func_start;
1864 CORE_ADDR stop_func_end;
1865 const char *stop_func_name;
1866 int wait_some_more;
1867
1868 /* True if the event thread hit the single-step breakpoint of
1869 another thread. Thus the event doesn't cause a stop, the thread
1870 needs to be single-stepped past the single-step breakpoint before
1871 we can switch back to the original stepping thread. */
1872 int hit_singlestep_breakpoint;
1873};
1874
1875/* Clear ECS and set it to point at TP. */
c2829269
PA
1876
1877static void
4d9d9d04
PA
1878reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1879{
1880 memset (ecs, 0, sizeof (*ecs));
1881 ecs->event_thread = tp;
1882 ecs->ptid = tp->ptid;
1883}
1884
1885static void keep_going_pass_signal (struct execution_control_state *ecs);
1886static void prepare_to_wait (struct execution_control_state *ecs);
2ac7589c 1887static int keep_going_stepped_thread (struct thread_info *tp);
8d297bbf 1888static step_over_what thread_still_needs_step_over (struct thread_info *tp);
4d9d9d04
PA
1889
1890/* Are there any pending step-over requests? If so, run all we can
1891 now and return true. Otherwise, return false. */
1892
1893static int
c2829269
PA
1894start_step_over (void)
1895{
1896 struct thread_info *tp, *next;
b93d82bc 1897 int started = 0;
c2829269 1898
372316f1
PA
1899 /* Don't start a new step-over if we already have an in-line
1900 step-over operation ongoing. */
1901 if (step_over_info_valid_p ())
b93d82bc
SM
1902 return started;
1903
1904 /* Steal the global thread step over chain. */
1905 thread_info *threads_to_step = global_thread_step_over_chain_head;
1906 global_thread_step_over_chain_head = NULL;
372316f1 1907
b93d82bc
SM
1908 if (debug_infrun)
1909 fprintf_unfiltered (gdb_stdlog,
1910 "infrun: stealing list of %d threads to step from global queue\n",
1911 thread_step_over_chain_length (threads_to_step));
1912
23c6b653
SM
1913 for (inferior *inf : all_inferiors ())
1914 inf->displaced_step_state.unavailable = false;
1915
b93d82bc 1916 for (tp = threads_to_step; tp != NULL; tp = next)
237fc4c9 1917 {
4d9d9d04
PA
1918 struct execution_control_state ecss;
1919 struct execution_control_state *ecs = &ecss;
8d297bbf 1920 step_over_what step_what;
372316f1 1921 int must_be_in_line;
c2829269 1922
c65d6b55
PA
1923 gdb_assert (!tp->stop_requested);
1924
b93d82bc 1925 next = thread_step_over_chain_next (threads_to_step, tp);
c2829269 1926
372316f1
PA
1927 step_what = thread_still_needs_step_over (tp);
1928 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
1929 || ((step_what & STEP_OVER_BREAKPOINT)
3fc8eb30 1930 && !use_displaced_stepping (tp)));
372316f1
PA
1931
1932 /* We currently stop all threads of all processes to step-over
1933 in-line. If we need to start a new in-line step-over, let
1934 any pending displaced steps finish first. */
b93d82bc
SM
1935 if (must_be_in_line && displaced_step_in_progress_any_thread ())
1936 continue;
c2829269 1937
b93d82bc 1938 thread_step_over_chain_remove (&threads_to_step, tp);
c2829269 1939
372316f1
PA
1940 if (tp->control.trap_expected
1941 || tp->resumed
1942 || tp->executing)
ad53cd71 1943 {
4d9d9d04
PA
1944 internal_error (__FILE__, __LINE__,
1945 "[%s] has inconsistent state: "
372316f1 1946 "trap_expected=%d, resumed=%d, executing=%d\n",
a068643d 1947 target_pid_to_str (tp->ptid).c_str (),
4d9d9d04 1948 tp->control.trap_expected,
372316f1 1949 tp->resumed,
4d9d9d04 1950 tp->executing);
ad53cd71 1951 }
1c5cfe86 1952
4d9d9d04
PA
1953 if (debug_infrun)
1954 fprintf_unfiltered (gdb_stdlog,
1955 "infrun: resuming [%s] for step-over\n",
a068643d 1956 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04
PA
1957
1958 /* keep_going_pass_signal skips the step-over if the breakpoint
1959 is no longer inserted. In all-stop, we want to keep looking
1960 for a thread that needs a step-over instead of resuming TP,
1961 because we wouldn't be able to resume anything else until the
1962 target stops again. In non-stop, the resume always resumes
1963 only TP, so it's OK to let the thread resume freely. */
fbea99ea 1964 if (!target_is_non_stop_p () && !step_what)
4d9d9d04 1965 continue;
8550d3b3 1966
23c6b653
SM
1967 if (tp->inf->displaced_step_state.unavailable)
1968 {
1969 global_thread_step_over_chain_enqueue (tp);
1970 continue;
1971 }
1972
00431a78 1973 switch_to_thread (tp);
4d9d9d04
PA
1974 reset_ecs (ecs, tp);
1975 keep_going_pass_signal (ecs);
1c5cfe86 1976
4d9d9d04
PA
1977 if (!ecs->wait_some_more)
1978 error (_("Command aborted."));
1c5cfe86 1979
b93d82bc
SM
1980 /* If the thread's step over could not be initiated, it was re-added
1981 to the global step over chain. */
1982 if (tp->resumed)
1983 {
1984 if (debug_infrun)
1985 fprintf_unfiltered (gdb_stdlog, "infrun: start_step_over: [%s] was resumed.\n",
1986 target_pid_to_str (tp->ptid).c_str ());
1987 gdb_assert (!thread_is_in_step_over_chain (tp));
1988 }
1989 else
1990 {
1991 if (debug_infrun)
1992 fprintf_unfiltered (gdb_stdlog, "infrun: start_step_over: [%s] was NOT resumed.\n",
1993 target_pid_to_str (tp->ptid).c_str ());
1994 gdb_assert (thread_is_in_step_over_chain (tp));
1995
1996 }
372316f1
PA
1997
1998 /* If we started a new in-line step-over, we're done. */
1999 if (step_over_info_valid_p ())
2000 {
2001 gdb_assert (tp->control.trap_expected);
b93d82bc
SM
2002 started = 1;
2003 break;
372316f1
PA
2004 }
2005
fbea99ea 2006 if (!target_is_non_stop_p ())
4d9d9d04
PA
2007 {
2008 /* On all-stop, shouldn't have resumed unless we needed a
2009 step over. */
2010 gdb_assert (tp->control.trap_expected
2011 || tp->step_after_step_resume_breakpoint);
2012
2013 /* With remote targets (at least), in all-stop, we can't
2014 issue any further remote commands until the program stops
2015 again. */
b93d82bc
SM
2016 started = 1;
2017 break;
1c5cfe86 2018 }
c2829269 2019
4d9d9d04
PA
2020 /* Either the thread no longer needed a step-over, or a new
2021 displaced stepping sequence started. Even in the latter
2022 case, continue looking. Maybe we can also start another
2023 displaced step on a thread of other process. */
237fc4c9 2024 }
4d9d9d04 2025
b93d82bc
SM
2026 /* If there are threads left in the THREADS_TO_STEP list, but we have
2027 detected that we can't start anything more, put back these threads
2028 in the global list. */
2029 if (threads_to_step == NULL)
2030 {
2031 if (debug_infrun)
2032 fprintf_unfiltered (gdb_stdlog,
2033 "infrun: step-over queue now empty\n");
2034 }
2035 else
2036 {
2037 if (debug_infrun)
2038 fprintf_unfiltered (gdb_stdlog,
2039 "infrun: putting back %d threads to step in global queue\n",
2040 thread_step_over_chain_length (threads_to_step));
2041 while (threads_to_step != nullptr)
2042 {
2043 thread_info *thread = threads_to_step;
2044
2045 /* Remove from that list. */
2046 thread_step_over_chain_remove (&threads_to_step, thread);
2047
2048 /* Add to global list. */
2049 global_thread_step_over_chain_enqueue (thread);
2050
2051 }
2052 }
2053
2054 return started;
237fc4c9
PA
2055}
2056
5231c1fd
PA
2057/* Update global variables holding ptids to hold NEW_PTID if they were
2058 holding OLD_PTID. */
2059static void
2060infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
2061{
d7e15655 2062 if (inferior_ptid == old_ptid)
5231c1fd 2063 inferior_ptid = new_ptid;
5231c1fd
PA
2064}
2065
237fc4c9 2066\f
c906108c 2067
53904c9e
AC
2068static const char schedlock_off[] = "off";
2069static const char schedlock_on[] = "on";
2070static const char schedlock_step[] = "step";
f2665db5 2071static const char schedlock_replay[] = "replay";
40478521 2072static const char *const scheduler_enums[] = {
ef346e04
AC
2073 schedlock_off,
2074 schedlock_on,
2075 schedlock_step,
f2665db5 2076 schedlock_replay,
ef346e04
AC
2077 NULL
2078};
f2665db5 2079static const char *scheduler_mode = schedlock_replay;
920d2a44
AC
2080static void
2081show_scheduler_mode (struct ui_file *file, int from_tty,
2082 struct cmd_list_element *c, const char *value)
2083{
3e43a32a
MS
2084 fprintf_filtered (file,
2085 _("Mode for locking scheduler "
2086 "during execution is \"%s\".\n"),
920d2a44
AC
2087 value);
2088}
c906108c
SS
2089
2090static void
eb4c3f4a 2091set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
c906108c 2092{
eefe576e
AC
2093 if (!target_can_lock_scheduler)
2094 {
2095 scheduler_mode = schedlock_off;
2096 error (_("Target '%s' cannot support this command."), target_shortname);
2097 }
c906108c
SS
2098}
2099
d4db2f36
PA
2100/* True if execution commands resume all threads of all processes by
2101 default; otherwise, resume only threads of the current inferior
2102 process. */
491144b5 2103bool sched_multi = false;
d4db2f36 2104
2facfe5c
DD
2105/* Try to setup for software single stepping over the specified location.
2106 Return 1 if target_resume() should use hardware single step.
2107
2108 GDBARCH the current gdbarch.
2109 PC the location to step over. */
2110
2111static int
2112maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2113{
2114 int hw_step = 1;
2115
f02253f1 2116 if (execution_direction == EXEC_FORWARD
93f9a11f
YQ
2117 && gdbarch_software_single_step_p (gdbarch))
2118 hw_step = !insert_single_step_breakpoints (gdbarch);
2119
2facfe5c
DD
2120 return hw_step;
2121}
c906108c 2122
f3263aa4
PA
2123/* See infrun.h. */
2124
09cee04b
PA
2125ptid_t
2126user_visible_resume_ptid (int step)
2127{
f3263aa4 2128 ptid_t resume_ptid;
09cee04b 2129
09cee04b
PA
2130 if (non_stop)
2131 {
2132 /* With non-stop mode on, threads are always handled
2133 individually. */
2134 resume_ptid = inferior_ptid;
2135 }
2136 else if ((scheduler_mode == schedlock_on)
03d46957 2137 || (scheduler_mode == schedlock_step && step))
09cee04b 2138 {
f3263aa4
PA
2139 /* User-settable 'scheduler' mode requires solo thread
2140 resume. */
09cee04b
PA
2141 resume_ptid = inferior_ptid;
2142 }
f2665db5
MM
2143 else if ((scheduler_mode == schedlock_replay)
2144 && target_record_will_replay (minus_one_ptid, execution_direction))
2145 {
2146 /* User-settable 'scheduler' mode requires solo thread resume in replay
2147 mode. */
2148 resume_ptid = inferior_ptid;
2149 }
f3263aa4
PA
2150 else if (!sched_multi && target_supports_multi_process ())
2151 {
2152 /* Resume all threads of the current process (and none of other
2153 processes). */
e99b03dc 2154 resume_ptid = ptid_t (inferior_ptid.pid ());
f3263aa4
PA
2155 }
2156 else
2157 {
2158 /* Resume all threads of all processes. */
2159 resume_ptid = RESUME_ALL;
2160 }
09cee04b
PA
2161
2162 return resume_ptid;
2163}
2164
fbea99ea
PA
2165/* Return a ptid representing the set of threads that we will resume,
2166 in the perspective of the target, assuming run control handling
2167 does not require leaving some threads stopped (e.g., stepping past
2168 breakpoint). USER_STEP indicates whether we're about to start the
2169 target for a stepping command. */
2170
2171static ptid_t
2172internal_resume_ptid (int user_step)
2173{
2174 /* In non-stop, we always control threads individually. Note that
2175 the target may always work in non-stop mode even with "set
2176 non-stop off", in which case user_visible_resume_ptid could
2177 return a wildcard ptid. */
2178 if (target_is_non_stop_p ())
2179 return inferior_ptid;
2180 else
2181 return user_visible_resume_ptid (user_step);
2182}
2183
64ce06e4
PA
2184/* Wrapper for target_resume, that handles infrun-specific
2185 bookkeeping. */
2186
2187static void
2188do_target_resume (ptid_t resume_ptid, int step, enum gdb_signal sig)
2189{
2190 struct thread_info *tp = inferior_thread ();
2191
c65d6b55
PA
2192 gdb_assert (!tp->stop_requested);
2193
64ce06e4 2194 /* Install inferior's terminal modes. */
223ffa71 2195 target_terminal::inferior ();
64ce06e4
PA
2196
2197 /* Avoid confusing the next resume, if the next stop/resume
2198 happens to apply to another thread. */
2199 tp->suspend.stop_signal = GDB_SIGNAL_0;
2200
8f572e5c
PA
2201 /* Advise target which signals may be handled silently.
2202
2203 If we have removed breakpoints because we are stepping over one
2204 in-line (in any thread), we need to receive all signals to avoid
2205 accidentally skipping a breakpoint during execution of a signal
2206 handler.
2207
2208 Likewise if we're displaced stepping, otherwise a trap for a
2209 breakpoint in a signal handler might be confused with the
2210 displaced step finishing. We don't make the displaced_step_fixup
2211 step distinguish the cases instead, because:
2212
2213 - a backtrace while stopped in the signal handler would show the
2214 scratch pad as frame older than the signal handler, instead of
2215 the real mainline code.
2216
2217 - when the thread is later resumed, the signal handler would
2218 return to the scratch pad area, which would no longer be
2219 valid. */
2220 if (step_over_info_valid_p ()
00431a78 2221 || displaced_step_in_progress (tp->inf))
adc6a863 2222 target_pass_signals ({});
64ce06e4 2223 else
adc6a863 2224 target_pass_signals (signal_pass);
64ce06e4
PA
2225
2226 target_resume (resume_ptid, step, sig);
85ad3aaf
PA
2227
2228 target_commit_resume ();
64ce06e4
PA
2229}
2230
d930703d 2231/* Resume the inferior. SIG is the signal to give the inferior
71d378ae
PA
2232 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2233 call 'resume', which handles exceptions. */
c906108c 2234
71d378ae
PA
2235static void
2236resume_1 (enum gdb_signal sig)
c906108c 2237{
515630c5 2238 struct regcache *regcache = get_current_regcache ();
ac7936df 2239 struct gdbarch *gdbarch = regcache->arch ();
4e1c45ea 2240 struct thread_info *tp = inferior_thread ();
515630c5 2241 CORE_ADDR pc = regcache_read_pc (regcache);
8b86c959 2242 const address_space *aspace = regcache->aspace ();
b0f16a3e 2243 ptid_t resume_ptid;
856e7dd6
PA
2244 /* This represents the user's step vs continue request. When
2245 deciding whether "set scheduler-locking step" applies, it's the
2246 user's intention that counts. */
2247 const int user_step = tp->control.stepping_command;
64ce06e4
PA
2248 /* This represents what we'll actually request the target to do.
2249 This can decay from a step to a continue, if e.g., we need to
2250 implement single-stepping with breakpoints (software
2251 single-step). */
6b403daa 2252 int step;
c7e8a53c 2253
c65d6b55 2254 gdb_assert (!tp->stop_requested);
c2829269
PA
2255 gdb_assert (!thread_is_in_step_over_chain (tp));
2256
372316f1
PA
2257 if (tp->suspend.waitstatus_pending_p)
2258 {
2259 if (debug_infrun)
2260 {
23fdd69e
SM
2261 std::string statstr
2262 = target_waitstatus_to_string (&tp->suspend.waitstatus);
372316f1 2263
372316f1 2264 fprintf_unfiltered (gdb_stdlog,
23fdd69e
SM
2265 "infrun: resume: thread %s has pending wait "
2266 "status %s (currently_stepping=%d).\n",
a068643d
TT
2267 target_pid_to_str (tp->ptid).c_str (),
2268 statstr.c_str (),
372316f1 2269 currently_stepping (tp));
372316f1
PA
2270 }
2271
2272 tp->resumed = 1;
2273
2274 /* FIXME: What should we do if we are supposed to resume this
2275 thread with a signal? Maybe we should maintain a queue of
2276 pending signals to deliver. */
2277 if (sig != GDB_SIGNAL_0)
2278 {
fd7dcb94 2279 warning (_("Couldn't deliver signal %s to %s."),
a068643d
TT
2280 gdb_signal_to_name (sig),
2281 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2282 }
2283
2284 tp->suspend.stop_signal = GDB_SIGNAL_0;
372316f1
PA
2285
2286 if (target_can_async_p ())
9516f85a
AB
2287 {
2288 target_async (1);
2289 /* Tell the event loop we have an event to process. */
2290 mark_async_event_handler (infrun_async_inferior_event_token);
2291 }
372316f1
PA
2292 return;
2293 }
2294
2295 tp->stepped_breakpoint = 0;
2296
6b403daa
PA
2297 /* Depends on stepped_breakpoint. */
2298 step = currently_stepping (tp);
2299
74609e71
YQ
2300 if (current_inferior ()->waiting_for_vfork_done)
2301 {
48f9886d
PA
2302 /* Don't try to single-step a vfork parent that is waiting for
2303 the child to get out of the shared memory region (by exec'ing
2304 or exiting). This is particularly important on software
2305 single-step archs, as the child process would trip on the
2306 software single step breakpoint inserted for the parent
2307 process. Since the parent will not actually execute any
2308 instruction until the child is out of the shared region (such
2309 are vfork's semantics), it is safe to simply continue it.
2310 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2311 the parent, and tell it to `keep_going', which automatically
2312 re-sets it stepping. */
74609e71
YQ
2313 if (debug_infrun)
2314 fprintf_unfiltered (gdb_stdlog,
2315 "infrun: resume : clear step\n");
a09dd441 2316 step = 0;
74609e71
YQ
2317 }
2318
527159b7 2319 if (debug_infrun)
237fc4c9 2320 fprintf_unfiltered (gdb_stdlog,
c9737c08 2321 "infrun: resume (step=%d, signal=%s), "
0d9a9a5f 2322 "trap_expected=%d, current thread [%s] at %s\n",
c9737c08
PA
2323 step, gdb_signal_to_symbol_string (sig),
2324 tp->control.trap_expected,
a068643d 2325 target_pid_to_str (inferior_ptid).c_str (),
0d9a9a5f 2326 paddress (gdbarch, pc));
c906108c 2327
c2c6d25f
JM
2328 /* Normally, by the time we reach `resume', the breakpoints are either
2329 removed or inserted, as appropriate. The exception is if we're sitting
2330 at a permanent breakpoint; we need to step over it, but permanent
2331 breakpoints can't be removed. So we have to test for it here. */
6c95b8df 2332 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
6d350bb5 2333 {
af48d08f
PA
2334 if (sig != GDB_SIGNAL_0)
2335 {
2336 /* We have a signal to pass to the inferior. The resume
2337 may, or may not take us to the signal handler. If this
2338 is a step, we'll need to stop in the signal handler, if
2339 there's one, (if the target supports stepping into
2340 handlers), or in the next mainline instruction, if
2341 there's no handler. If this is a continue, we need to be
2342 sure to run the handler with all breakpoints inserted.
2343 In all cases, set a breakpoint at the current address
2344 (where the handler returns to), and once that breakpoint
2345 is hit, resume skipping the permanent breakpoint. If
2346 that breakpoint isn't hit, then we've stepped into the
2347 signal handler (or hit some other event). We'll delete
2348 the step-resume breakpoint then. */
2349
2350 if (debug_infrun)
2351 fprintf_unfiltered (gdb_stdlog,
2352 "infrun: resume: skipping permanent breakpoint, "
2353 "deliver signal first\n");
2354
2355 clear_step_over_info ();
2356 tp->control.trap_expected = 0;
2357
2358 if (tp->control.step_resume_breakpoint == NULL)
2359 {
2360 /* Set a "high-priority" step-resume, as we don't want
2361 user breakpoints at PC to trigger (again) when this
2362 hits. */
2363 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2364 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2365
2366 tp->step_after_step_resume_breakpoint = step;
2367 }
2368
2369 insert_breakpoints ();
2370 }
2371 else
2372 {
2373 /* There's no signal to pass, we can go ahead and skip the
2374 permanent breakpoint manually. */
2375 if (debug_infrun)
2376 fprintf_unfiltered (gdb_stdlog,
2377 "infrun: resume: skipping permanent breakpoint\n");
2378 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2379 /* Update pc to reflect the new address from which we will
2380 execute instructions. */
2381 pc = regcache_read_pc (regcache);
2382
2383 if (step)
2384 {
2385 /* We've already advanced the PC, so the stepping part
2386 is done. Now we need to arrange for a trap to be
2387 reported to handle_inferior_event. Set a breakpoint
2388 at the current PC, and run to it. Don't update
2389 prev_pc, because if we end in
44a1ee51
PA
2390 switch_back_to_stepped_thread, we want the "expected
2391 thread advanced also" branch to be taken. IOW, we
2392 don't want this thread to step further from PC
af48d08f 2393 (overstep). */
1ac806b8 2394 gdb_assert (!step_over_info_valid_p ());
af48d08f
PA
2395 insert_single_step_breakpoint (gdbarch, aspace, pc);
2396 insert_breakpoints ();
2397
fbea99ea 2398 resume_ptid = internal_resume_ptid (user_step);
1ac806b8 2399 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
372316f1 2400 tp->resumed = 1;
af48d08f
PA
2401 return;
2402 }
2403 }
6d350bb5 2404 }
c2c6d25f 2405
c1e36e3e
PA
2406 /* If we have a breakpoint to step over, make sure to do a single
2407 step only. Same if we have software watchpoints. */
2408 if (tp->control.trap_expected || bpstat_should_step ())
2409 tp->control.may_range_step = 0;
2410
237fc4c9
PA
2411 /* If enabled, step over breakpoints by executing a copy of the
2412 instruction at a different address.
2413
2414 We can't use displaced stepping when we have a signal to deliver;
2415 the comments for displaced_step_prepare explain why. The
2416 comments in the handle_inferior event for dealing with 'random
74609e71
YQ
2417 signals' explain what we do instead.
2418
2419 We can't use displaced stepping when we are waiting for vfork_done
2420 event, displaced stepping breaks the vfork child similarly as single
2421 step software breakpoint. */
3fc8eb30
PA
2422 if (tp->control.trap_expected
2423 && use_displaced_stepping (tp)
cb71640d 2424 && !step_over_info_valid_p ()
a493e3e2 2425 && sig == GDB_SIGNAL_0
74609e71 2426 && !current_inferior ()->waiting_for_vfork_done)
237fc4c9 2427 {
b93d82bc
SM
2428 displaced_step_prepare_status prepare_status
2429 = displaced_step_prepare (tp);
fc1cf338 2430
b93d82bc 2431 if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
d56b7306 2432 {
4d9d9d04
PA
2433 if (debug_infrun)
2434 fprintf_unfiltered (gdb_stdlog,
2435 "Got placed in step-over queue\n");
2436
2437 tp->control.trap_expected = 0;
d56b7306
VP
2438 return;
2439 }
b93d82bc 2440 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_ERROR)
3fc8eb30
PA
2441 {
2442 /* Fallback to stepping over the breakpoint in-line. */
2443
2444 if (target_is_non_stop_p ())
2445 stop_all_threads ();
2446
a01bda52 2447 set_step_over_info (regcache->aspace (),
21edc42f 2448 regcache_read_pc (regcache), 0, tp->global_num);
3fc8eb30
PA
2449
2450 step = maybe_software_singlestep (gdbarch, pc);
2451
2452 insert_breakpoints ();
2453 }
b93d82bc 2454 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
3fc8eb30 2455 {
b93d82bc 2456 step = gdbarch_displaced_step_hw_singlestep (gdbarch, NULL);
3fc8eb30 2457 }
b93d82bc
SM
2458 else
2459 gdb_assert_not_reached ("invalid displaced_step_prepare_status value");
237fc4c9
PA
2460 }
2461
2facfe5c 2462 /* Do we need to do it the hard way, w/temp breakpoints? */
99e40580 2463 else if (step)
2facfe5c 2464 step = maybe_software_singlestep (gdbarch, pc);
c906108c 2465
30852783
UW
2466 /* Currently, our software single-step implementation leads to different
2467 results than hardware single-stepping in one situation: when stepping
2468 into delivering a signal which has an associated signal handler,
2469 hardware single-step will stop at the first instruction of the handler,
2470 while software single-step will simply skip execution of the handler.
2471
2472 For now, this difference in behavior is accepted since there is no
2473 easy way to actually implement single-stepping into a signal handler
2474 without kernel support.
2475
2476 However, there is one scenario where this difference leads to follow-on
2477 problems: if we're stepping off a breakpoint by removing all breakpoints
2478 and then single-stepping. In this case, the software single-step
2479 behavior means that even if there is a *breakpoint* in the signal
2480 handler, GDB still would not stop.
2481
2482 Fortunately, we can at least fix this particular issue. We detect
2483 here the case where we are about to deliver a signal while software
2484 single-stepping with breakpoints removed. In this situation, we
2485 revert the decisions to remove all breakpoints and insert single-
2486 step breakpoints, and instead we install a step-resume breakpoint
2487 at the current address, deliver the signal without stepping, and
2488 once we arrive back at the step-resume breakpoint, actually step
2489 over the breakpoint we originally wanted to step over. */
34b7e8a6 2490 if (thread_has_single_step_breakpoints_set (tp)
6cc83d2a
PA
2491 && sig != GDB_SIGNAL_0
2492 && step_over_info_valid_p ())
30852783
UW
2493 {
2494 /* If we have nested signals or a pending signal is delivered
2495 immediately after a handler returns, might might already have
2496 a step-resume breakpoint set on the earlier handler. We cannot
2497 set another step-resume breakpoint; just continue on until the
2498 original breakpoint is hit. */
2499 if (tp->control.step_resume_breakpoint == NULL)
2500 {
2c03e5be 2501 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
30852783
UW
2502 tp->step_after_step_resume_breakpoint = 1;
2503 }
2504
34b7e8a6 2505 delete_single_step_breakpoints (tp);
30852783 2506
31e77af2 2507 clear_step_over_info ();
30852783 2508 tp->control.trap_expected = 0;
31e77af2
PA
2509
2510 insert_breakpoints ();
30852783
UW
2511 }
2512
b0f16a3e
SM
2513 /* If STEP is set, it's a request to use hardware stepping
2514 facilities. But in that case, we should never
2515 use singlestep breakpoint. */
34b7e8a6 2516 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
dfcd3bfb 2517
fbea99ea 2518 /* Decide the set of threads to ask the target to resume. */
1946c4cc 2519 if (tp->control.trap_expected)
b0f16a3e
SM
2520 {
2521 /* We're allowing a thread to run past a breakpoint it has
1946c4cc
YQ
2522 hit, either by single-stepping the thread with the breakpoint
2523 removed, or by displaced stepping, with the breakpoint inserted.
2524 In the former case, we need to single-step only this thread,
2525 and keep others stopped, as they can miss this breakpoint if
2526 allowed to run. That's not really a problem for displaced
2527 stepping, but, we still keep other threads stopped, in case
2528 another thread is also stopped for a breakpoint waiting for
2529 its turn in the displaced stepping queue. */
b0f16a3e
SM
2530 resume_ptid = inferior_ptid;
2531 }
fbea99ea
PA
2532 else
2533 resume_ptid = internal_resume_ptid (user_step);
d4db2f36 2534
7f5ef605
PA
2535 if (execution_direction != EXEC_REVERSE
2536 && step && breakpoint_inserted_here_p (aspace, pc))
b0f16a3e 2537 {
372316f1
PA
2538 /* There are two cases where we currently need to step a
2539 breakpoint instruction when we have a signal to deliver:
2540
2541 - See handle_signal_stop where we handle random signals that
2542 could take out us out of the stepping range. Normally, in
2543 that case we end up continuing (instead of stepping) over the
7f5ef605
PA
2544 signal handler with a breakpoint at PC, but there are cases
2545 where we should _always_ single-step, even if we have a
2546 step-resume breakpoint, like when a software watchpoint is
2547 set. Assuming single-stepping and delivering a signal at the
2548 same time would takes us to the signal handler, then we could
2549 have removed the breakpoint at PC to step over it. However,
2550 some hardware step targets (like e.g., Mac OS) can't step
2551 into signal handlers, and for those, we need to leave the
2552 breakpoint at PC inserted, as otherwise if the handler
2553 recurses and executes PC again, it'll miss the breakpoint.
2554 So we leave the breakpoint inserted anyway, but we need to
2555 record that we tried to step a breakpoint instruction, so
372316f1
PA
2556 that adjust_pc_after_break doesn't end up confused.
2557
2558 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2559 in one thread after another thread that was stepping had been
2560 momentarily paused for a step-over. When we re-resume the
2561 stepping thread, it may be resumed from that address with a
2562 breakpoint that hasn't trapped yet. Seen with
2563 gdb.threads/non-stop-fair-events.exp, on targets that don't
2564 do displaced stepping. */
2565
2566 if (debug_infrun)
2567 fprintf_unfiltered (gdb_stdlog,
2568 "infrun: resume: [%s] stepped breakpoint\n",
a068643d 2569 target_pid_to_str (tp->ptid).c_str ());
7f5ef605
PA
2570
2571 tp->stepped_breakpoint = 1;
2572
b0f16a3e
SM
2573 /* Most targets can step a breakpoint instruction, thus
2574 executing it normally. But if this one cannot, just
2575 continue and we will hit it anyway. */
7f5ef605 2576 if (gdbarch_cannot_step_breakpoint (gdbarch))
b0f16a3e
SM
2577 step = 0;
2578 }
ef5cf84e 2579
b0f16a3e 2580 if (debug_displaced
cb71640d 2581 && tp->control.trap_expected
3fc8eb30 2582 && use_displaced_stepping (tp)
cb71640d 2583 && !step_over_info_valid_p ())
b0f16a3e 2584 {
00431a78 2585 struct regcache *resume_regcache = get_thread_regcache (tp);
ac7936df 2586 struct gdbarch *resume_gdbarch = resume_regcache->arch ();
b0f16a3e
SM
2587 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2588 gdb_byte buf[4];
2589
2590 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2591 paddress (resume_gdbarch, actual_pc));
2592 read_memory (actual_pc, buf, sizeof (buf));
2593 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2594 }
237fc4c9 2595
b0f16a3e
SM
2596 if (tp->control.may_range_step)
2597 {
2598 /* If we're resuming a thread with the PC out of the step
2599 range, then we're doing some nested/finer run control
2600 operation, like stepping the thread out of the dynamic
2601 linker or the displaced stepping scratch pad. We
2602 shouldn't have allowed a range step then. */
2603 gdb_assert (pc_in_thread_step_range (pc, tp));
2604 }
c1e36e3e 2605
64ce06e4 2606 do_target_resume (resume_ptid, step, sig);
372316f1 2607 tp->resumed = 1;
c906108c 2608}
71d378ae
PA
2609
2610/* Resume the inferior. SIG is the signal to give the inferior
2611 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2612 rolls back state on error. */
2613
aff4e175 2614static void
71d378ae
PA
2615resume (gdb_signal sig)
2616{
a70b8144 2617 try
71d378ae
PA
2618 {
2619 resume_1 (sig);
2620 }
230d2906 2621 catch (const gdb_exception &ex)
71d378ae
PA
2622 {
2623 /* If resuming is being aborted for any reason, delete any
2624 single-step breakpoint resume_1 may have created, to avoid
2625 confusing the following resumption, and to avoid leaving
2626 single-step breakpoints perturbing other threads, in case
2627 we're running in non-stop mode. */
2628 if (inferior_ptid != null_ptid)
2629 delete_single_step_breakpoints (inferior_thread ());
eedc3f4f 2630 throw;
71d378ae 2631 }
71d378ae
PA
2632}
2633
c906108c 2634\f
237fc4c9 2635/* Proceeding. */
c906108c 2636
4c2f2a79
PA
2637/* See infrun.h. */
2638
2639/* Counter that tracks number of user visible stops. This can be used
2640 to tell whether a command has proceeded the inferior past the
2641 current location. This allows e.g., inferior function calls in
2642 breakpoint commands to not interrupt the command list. When the
2643 call finishes successfully, the inferior is standing at the same
2644 breakpoint as if nothing happened (and so we don't call
2645 normal_stop). */
2646static ULONGEST current_stop_id;
2647
2648/* See infrun.h. */
2649
2650ULONGEST
2651get_stop_id (void)
2652{
2653 return current_stop_id;
2654}
2655
2656/* Called when we report a user visible stop. */
2657
2658static void
2659new_stop_id (void)
2660{
2661 current_stop_id++;
2662}
2663
c906108c
SS
2664/* Clear out all variables saying what to do when inferior is continued.
2665 First do this, then set the ones you want, then call `proceed'. */
2666
a7212384
UW
2667static void
2668clear_proceed_status_thread (struct thread_info *tp)
c906108c 2669{
a7212384
UW
2670 if (debug_infrun)
2671 fprintf_unfiltered (gdb_stdlog,
2672 "infrun: clear_proceed_status_thread (%s)\n",
a068643d 2673 target_pid_to_str (tp->ptid).c_str ());
d6b48e9c 2674
372316f1
PA
2675 /* If we're starting a new sequence, then the previous finished
2676 single-step is no longer relevant. */
2677 if (tp->suspend.waitstatus_pending_p)
2678 {
2679 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
2680 {
2681 if (debug_infrun)
2682 fprintf_unfiltered (gdb_stdlog,
2683 "infrun: clear_proceed_status: pending "
2684 "event of %s was a finished step. "
2685 "Discarding.\n",
a068643d 2686 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2687
2688 tp->suspend.waitstatus_pending_p = 0;
2689 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
2690 }
2691 else if (debug_infrun)
2692 {
23fdd69e
SM
2693 std::string statstr
2694 = target_waitstatus_to_string (&tp->suspend.waitstatus);
372316f1 2695
372316f1
PA
2696 fprintf_unfiltered (gdb_stdlog,
2697 "infrun: clear_proceed_status_thread: thread %s "
2698 "has pending wait status %s "
2699 "(currently_stepping=%d).\n",
a068643d
TT
2700 target_pid_to_str (tp->ptid).c_str (),
2701 statstr.c_str (),
372316f1 2702 currently_stepping (tp));
372316f1
PA
2703 }
2704 }
2705
70509625
PA
2706 /* If this signal should not be seen by program, give it zero.
2707 Used for debugging signals. */
2708 if (!signal_pass_state (tp->suspend.stop_signal))
2709 tp->suspend.stop_signal = GDB_SIGNAL_0;
2710
46e3ed7f 2711 delete tp->thread_fsm;
243a9253
PA
2712 tp->thread_fsm = NULL;
2713
16c381f0
JK
2714 tp->control.trap_expected = 0;
2715 tp->control.step_range_start = 0;
2716 tp->control.step_range_end = 0;
c1e36e3e 2717 tp->control.may_range_step = 0;
16c381f0
JK
2718 tp->control.step_frame_id = null_frame_id;
2719 tp->control.step_stack_frame_id = null_frame_id;
2720 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
885eeb5b 2721 tp->control.step_start_function = NULL;
a7212384 2722 tp->stop_requested = 0;
4e1c45ea 2723
16c381f0 2724 tp->control.stop_step = 0;
32400beb 2725
16c381f0 2726 tp->control.proceed_to_finish = 0;
414c69f7 2727
856e7dd6 2728 tp->control.stepping_command = 0;
17b2616c 2729
a7212384 2730 /* Discard any remaining commands or status from previous stop. */
16c381f0 2731 bpstat_clear (&tp->control.stop_bpstat);
a7212384 2732}
32400beb 2733
a7212384 2734void
70509625 2735clear_proceed_status (int step)
a7212384 2736{
f2665db5
MM
2737 /* With scheduler-locking replay, stop replaying other threads if we're
2738 not replaying the user-visible resume ptid.
2739
2740 This is a convenience feature to not require the user to explicitly
2741 stop replaying the other threads. We're assuming that the user's
2742 intent is to resume tracing the recorded process. */
2743 if (!non_stop && scheduler_mode == schedlock_replay
2744 && target_record_is_replaying (minus_one_ptid)
2745 && !target_record_will_replay (user_visible_resume_ptid (step),
2746 execution_direction))
2747 target_record_stop_replaying ();
2748
08036331 2749 if (!non_stop && inferior_ptid != null_ptid)
6c95b8df 2750 {
08036331 2751 ptid_t resume_ptid = user_visible_resume_ptid (step);
70509625
PA
2752
2753 /* In all-stop mode, delete the per-thread status of all threads
2754 we're about to resume, implicitly and explicitly. */
08036331
PA
2755 for (thread_info *tp : all_non_exited_threads (resume_ptid))
2756 clear_proceed_status_thread (tp);
6c95b8df
PA
2757 }
2758
d7e15655 2759 if (inferior_ptid != null_ptid)
a7212384
UW
2760 {
2761 struct inferior *inferior;
2762
2763 if (non_stop)
2764 {
6c95b8df
PA
2765 /* If in non-stop mode, only delete the per-thread status of
2766 the current thread. */
a7212384
UW
2767 clear_proceed_status_thread (inferior_thread ());
2768 }
6c95b8df 2769
d6b48e9c 2770 inferior = current_inferior ();
16c381f0 2771 inferior->control.stop_soon = NO_STOP_QUIETLY;
4e1c45ea
PA
2772 }
2773
76727919 2774 gdb::observers::about_to_proceed.notify ();
c906108c
SS
2775}
2776
99619bea
PA
2777/* Returns true if TP is still stopped at a breakpoint that needs
2778 stepping-over in order to make progress. If the breakpoint is gone
2779 meanwhile, we can skip the whole step-over dance. */
ea67f13b
DJ
2780
2781static int
6c4cfb24 2782thread_still_needs_step_over_bp (struct thread_info *tp)
99619bea
PA
2783{
2784 if (tp->stepping_over_breakpoint)
2785 {
00431a78 2786 struct regcache *regcache = get_thread_regcache (tp);
99619bea 2787
a01bda52 2788 if (breakpoint_here_p (regcache->aspace (),
af48d08f
PA
2789 regcache_read_pc (regcache))
2790 == ordinary_breakpoint_here)
99619bea
PA
2791 return 1;
2792
2793 tp->stepping_over_breakpoint = 0;
2794 }
2795
2796 return 0;
2797}
2798
6c4cfb24
PA
2799/* Check whether thread TP still needs to start a step-over in order
2800 to make progress when resumed. Returns an bitwise or of enum
2801 step_over_what bits, indicating what needs to be stepped over. */
2802
8d297bbf 2803static step_over_what
6c4cfb24
PA
2804thread_still_needs_step_over (struct thread_info *tp)
2805{
8d297bbf 2806 step_over_what what = 0;
6c4cfb24
PA
2807
2808 if (thread_still_needs_step_over_bp (tp))
2809 what |= STEP_OVER_BREAKPOINT;
2810
2811 if (tp->stepping_over_watchpoint
2812 && !target_have_steppable_watchpoint)
2813 what |= STEP_OVER_WATCHPOINT;
2814
2815 return what;
2816}
2817
483805cf
PA
2818/* Returns true if scheduler locking applies. STEP indicates whether
2819 we're about to do a step/next-like command to a thread. */
2820
2821static int
856e7dd6 2822schedlock_applies (struct thread_info *tp)
483805cf
PA
2823{
2824 return (scheduler_mode == schedlock_on
2825 || (scheduler_mode == schedlock_step
f2665db5
MM
2826 && tp->control.stepping_command)
2827 || (scheduler_mode == schedlock_replay
2828 && target_record_will_replay (minus_one_ptid,
2829 execution_direction)));
483805cf
PA
2830}
2831
c906108c
SS
2832/* Basic routine for continuing the program in various fashions.
2833
2834 ADDR is the address to resume at, or -1 for resume where stopped.
aff4e175
AB
2835 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
2836 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
c906108c
SS
2837
2838 You should call clear_proceed_status before calling proceed. */
2839
2840void
64ce06e4 2841proceed (CORE_ADDR addr, enum gdb_signal siggnal)
c906108c 2842{
e58b0e63
PA
2843 struct regcache *regcache;
2844 struct gdbarch *gdbarch;
e58b0e63 2845 CORE_ADDR pc;
4d9d9d04
PA
2846 ptid_t resume_ptid;
2847 struct execution_control_state ecss;
2848 struct execution_control_state *ecs = &ecss;
4d9d9d04 2849 int started;
c906108c 2850
e58b0e63
PA
2851 /* If we're stopped at a fork/vfork, follow the branch set by the
2852 "set follow-fork-mode" command; otherwise, we'll just proceed
2853 resuming the current thread. */
2854 if (!follow_fork ())
2855 {
2856 /* The target for some reason decided not to resume. */
2857 normal_stop ();
f148b27e
PA
2858 if (target_can_async_p ())
2859 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
e58b0e63
PA
2860 return;
2861 }
2862
842951eb
PA
2863 /* We'll update this if & when we switch to a new thread. */
2864 previous_inferior_ptid = inferior_ptid;
2865
e58b0e63 2866 regcache = get_current_regcache ();
ac7936df 2867 gdbarch = regcache->arch ();
8b86c959
YQ
2868 const address_space *aspace = regcache->aspace ();
2869
e58b0e63 2870 pc = regcache_read_pc (regcache);
08036331 2871 thread_info *cur_thr = inferior_thread ();
e58b0e63 2872
99619bea 2873 /* Fill in with reasonable starting values. */
08036331 2874 init_thread_stepping_state (cur_thr);
99619bea 2875
08036331 2876 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
c2829269 2877
2acceee2 2878 if (addr == (CORE_ADDR) -1)
c906108c 2879 {
08036331 2880 if (pc == cur_thr->suspend.stop_pc
af48d08f 2881 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
b2175913 2882 && execution_direction != EXEC_REVERSE)
3352ef37
AC
2883 /* There is a breakpoint at the address we will resume at,
2884 step one instruction before inserting breakpoints so that
2885 we do not stop right away (and report a second hit at this
b2175913
MS
2886 breakpoint).
2887
2888 Note, we don't do this in reverse, because we won't
2889 actually be executing the breakpoint insn anyway.
2890 We'll be (un-)executing the previous instruction. */
08036331 2891 cur_thr->stepping_over_breakpoint = 1;
515630c5
UW
2892 else if (gdbarch_single_step_through_delay_p (gdbarch)
2893 && gdbarch_single_step_through_delay (gdbarch,
2894 get_current_frame ()))
3352ef37
AC
2895 /* We stepped onto an instruction that needs to be stepped
2896 again before re-inserting the breakpoint, do so. */
08036331 2897 cur_thr->stepping_over_breakpoint = 1;
c906108c
SS
2898 }
2899 else
2900 {
515630c5 2901 regcache_write_pc (regcache, addr);
c906108c
SS
2902 }
2903
70509625 2904 if (siggnal != GDB_SIGNAL_DEFAULT)
08036331 2905 cur_thr->suspend.stop_signal = siggnal;
70509625 2906
08036331 2907 resume_ptid = user_visible_resume_ptid (cur_thr->control.stepping_command);
4d9d9d04
PA
2908
2909 /* If an exception is thrown from this point on, make sure to
2910 propagate GDB's knowledge of the executing state to the
2911 frontend/user running state. */
731f534f 2912 scoped_finish_thread_state finish_state (resume_ptid);
4d9d9d04
PA
2913
2914 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
2915 threads (e.g., we might need to set threads stepping over
2916 breakpoints first), from the user/frontend's point of view, all
2917 threads in RESUME_PTID are now running. Unless we're calling an
2918 inferior function, as in that case we pretend the inferior
2919 doesn't run at all. */
08036331 2920 if (!cur_thr->control.in_infcall)
4d9d9d04 2921 set_running (resume_ptid, 1);
17b2616c 2922
527159b7 2923 if (debug_infrun)
8a9de0e4 2924 fprintf_unfiltered (gdb_stdlog,
64ce06e4 2925 "infrun: proceed (addr=%s, signal=%s)\n",
c9737c08 2926 paddress (gdbarch, addr),
64ce06e4 2927 gdb_signal_to_symbol_string (siggnal));
527159b7 2928
4d9d9d04
PA
2929 annotate_starting ();
2930
2931 /* Make sure that output from GDB appears before output from the
2932 inferior. */
2933 gdb_flush (gdb_stdout);
2934
d930703d
PA
2935 /* Since we've marked the inferior running, give it the terminal. A
2936 QUIT/Ctrl-C from here on is forwarded to the target (which can
2937 still detect attempts to unblock a stuck connection with repeated
2938 Ctrl-C from within target_pass_ctrlc). */
2939 target_terminal::inferior ();
2940
4d9d9d04
PA
2941 /* In a multi-threaded task we may select another thread and
2942 then continue or step.
2943
2944 But if a thread that we're resuming had stopped at a breakpoint,
2945 it will immediately cause another breakpoint stop without any
2946 execution (i.e. it will report a breakpoint hit incorrectly). So
2947 we must step over it first.
2948
2949 Look for threads other than the current (TP) that reported a
2950 breakpoint hit and haven't been resumed yet since. */
2951
2952 /* If scheduler locking applies, we can avoid iterating over all
2953 threads. */
08036331 2954 if (!non_stop && !schedlock_applies (cur_thr))
94cc34af 2955 {
08036331
PA
2956 for (thread_info *tp : all_non_exited_threads (resume_ptid))
2957 {
4d9d9d04
PA
2958 /* Ignore the current thread here. It's handled
2959 afterwards. */
08036331 2960 if (tp == cur_thr)
4d9d9d04 2961 continue;
c906108c 2962
4d9d9d04
PA
2963 if (!thread_still_needs_step_over (tp))
2964 continue;
2965
2966 gdb_assert (!thread_is_in_step_over_chain (tp));
c906108c 2967
99619bea
PA
2968 if (debug_infrun)
2969 fprintf_unfiltered (gdb_stdlog,
2970 "infrun: need to step-over [%s] first\n",
a068643d 2971 target_pid_to_str (tp->ptid).c_str ());
99619bea 2972
66716e78 2973 global_thread_step_over_chain_enqueue (tp);
2adfaa28 2974 }
30852783
UW
2975 }
2976
4d9d9d04
PA
2977 /* Enqueue the current thread last, so that we move all other
2978 threads over their breakpoints first. */
08036331 2979 if (cur_thr->stepping_over_breakpoint)
66716e78 2980 global_thread_step_over_chain_enqueue (cur_thr);
30852783 2981
4d9d9d04
PA
2982 /* If the thread isn't started, we'll still need to set its prev_pc,
2983 so that switch_back_to_stepped_thread knows the thread hasn't
2984 advanced. Must do this before resuming any thread, as in
2985 all-stop/remote, once we resume we can't send any other packet
2986 until the target stops again. */
08036331 2987 cur_thr->prev_pc = regcache_read_pc (regcache);
99619bea 2988
a9bc57b9
TT
2989 {
2990 scoped_restore save_defer_tc = make_scoped_defer_target_commit_resume ();
85ad3aaf 2991
a9bc57b9 2992 started = start_step_over ();
c906108c 2993
a9bc57b9
TT
2994 if (step_over_info_valid_p ())
2995 {
2996 /* Either this thread started a new in-line step over, or some
2997 other thread was already doing one. In either case, don't
2998 resume anything else until the step-over is finished. */
2999 }
3000 else if (started && !target_is_non_stop_p ())
3001 {
3002 /* A new displaced stepping sequence was started. In all-stop,
3003 we can't talk to the target anymore until it next stops. */
3004 }
3005 else if (!non_stop && target_is_non_stop_p ())
3006 {
3007 /* In all-stop, but the target is always in non-stop mode.
3008 Start all other threads that are implicitly resumed too. */
08036331 3009 for (thread_info *tp : all_non_exited_threads (resume_ptid))
fbea99ea 3010 {
fbea99ea
PA
3011 if (tp->resumed)
3012 {
3013 if (debug_infrun)
3014 fprintf_unfiltered (gdb_stdlog,
3015 "infrun: proceed: [%s] resumed\n",
a068643d 3016 target_pid_to_str (tp->ptid).c_str ());
fbea99ea
PA
3017 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
3018 continue;
3019 }
3020
3021 if (thread_is_in_step_over_chain (tp))
3022 {
3023 if (debug_infrun)
3024 fprintf_unfiltered (gdb_stdlog,
3025 "infrun: proceed: [%s] needs step-over\n",
a068643d 3026 target_pid_to_str (tp->ptid).c_str ());
fbea99ea
PA
3027 continue;
3028 }
3029
3030 if (debug_infrun)
3031 fprintf_unfiltered (gdb_stdlog,
3032 "infrun: proceed: resuming %s\n",
a068643d 3033 target_pid_to_str (tp->ptid).c_str ());
fbea99ea
PA
3034
3035 reset_ecs (ecs, tp);
00431a78 3036 switch_to_thread (tp);
fbea99ea
PA
3037 keep_going_pass_signal (ecs);
3038 if (!ecs->wait_some_more)
fd7dcb94 3039 error (_("Command aborted."));
fbea99ea 3040 }
a9bc57b9 3041 }
08036331 3042 else if (!cur_thr->resumed && !thread_is_in_step_over_chain (cur_thr))
a9bc57b9
TT
3043 {
3044 /* The thread wasn't started, and isn't queued, run it now. */
08036331
PA
3045 reset_ecs (ecs, cur_thr);
3046 switch_to_thread (cur_thr);
a9bc57b9
TT
3047 keep_going_pass_signal (ecs);
3048 if (!ecs->wait_some_more)
3049 error (_("Command aborted."));
3050 }
3051 }
c906108c 3052
85ad3aaf
PA
3053 target_commit_resume ();
3054
731f534f 3055 finish_state.release ();
c906108c 3056
0b333c5e
PA
3057 /* Tell the event loop to wait for it to stop. If the target
3058 supports asynchronous execution, it'll do this from within
3059 target_resume. */
362646f5 3060 if (!target_can_async_p ())
0b333c5e 3061 mark_async_event_handler (infrun_async_inferior_event_token);
c906108c 3062}
c906108c
SS
3063\f
3064
3065/* Start remote-debugging of a machine over a serial link. */
96baa820 3066
c906108c 3067void
8621d6a9 3068start_remote (int from_tty)
c906108c 3069{
d6b48e9c 3070 struct inferior *inferior;
d6b48e9c
PA
3071
3072 inferior = current_inferior ();
16c381f0 3073 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
43ff13b4 3074
1777feb0 3075 /* Always go on waiting for the target, regardless of the mode. */
6426a772 3076 /* FIXME: cagney/1999-09-23: At present it isn't possible to
7e73cedf 3077 indicate to wait_for_inferior that a target should timeout if
6426a772
JM
3078 nothing is returned (instead of just blocking). Because of this,
3079 targets expecting an immediate response need to, internally, set
3080 things up so that the target_wait() is forced to eventually
1777feb0 3081 timeout. */
6426a772
JM
3082 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3083 differentiate to its caller what the state of the target is after
3084 the initial open has been performed. Here we're assuming that
3085 the target has stopped. It should be possible to eventually have
3086 target_open() return to the caller an indication that the target
3087 is currently running and GDB state should be set to the same as
1777feb0 3088 for an async run. */
e4c8541f 3089 wait_for_inferior ();
8621d6a9
DJ
3090
3091 /* Now that the inferior has stopped, do any bookkeeping like
3092 loading shared libraries. We want to do this before normal_stop,
3093 so that the displayed frame is up to date. */
8b88a78e 3094 post_create_inferior (current_top_target (), from_tty);
8621d6a9 3095
6426a772 3096 normal_stop ();
c906108c
SS
3097}
3098
3099/* Initialize static vars when a new inferior begins. */
3100
3101void
96baa820 3102init_wait_for_inferior (void)
c906108c
SS
3103{
3104 /* These are meaningless until the first time through wait_for_inferior. */
c906108c 3105
c906108c
SS
3106 breakpoint_init_inferior (inf_starting);
3107
70509625 3108 clear_proceed_status (0);
9f976b41 3109
ca005067 3110 target_last_wait_ptid = minus_one_ptid;
237fc4c9 3111
842951eb 3112 previous_inferior_ptid = inferior_ptid;
c906108c 3113}
237fc4c9 3114
c906108c 3115\f
488f131b 3116
ec9499be 3117static void handle_inferior_event (struct execution_control_state *ecs);
cd0fc7c3 3118
568d6575
UW
3119static void handle_step_into_function (struct gdbarch *gdbarch,
3120 struct execution_control_state *ecs);
3121static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3122 struct execution_control_state *ecs);
4f5d7f63 3123static void handle_signal_stop (struct execution_control_state *ecs);
186c406b 3124static void check_exception_resume (struct execution_control_state *,
28106bc2 3125 struct frame_info *);
611c83ae 3126
bdc36728 3127static void end_stepping_range (struct execution_control_state *ecs);
22bcd14b 3128static void stop_waiting (struct execution_control_state *ecs);
d4f3574e 3129static void keep_going (struct execution_control_state *ecs);
94c57d6a 3130static void process_event_stop_test (struct execution_control_state *ecs);
c447ac0b 3131static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
104c1213 3132
252fbfc8
PA
3133/* This function is attached as a "thread_stop_requested" observer.
3134 Cleanup local state that assumed the PTID was to be resumed, and
3135 report the stop to the frontend. */
3136
2c0b251b 3137static void
252fbfc8
PA
3138infrun_thread_stop_requested (ptid_t ptid)
3139{
c65d6b55
PA
3140 /* PTID was requested to stop. If the thread was already stopped,
3141 but the user/frontend doesn't know about that yet (e.g., the
3142 thread had been temporarily paused for some step-over), set up
3143 for reporting the stop now. */
08036331
PA
3144 for (thread_info *tp : all_threads (ptid))
3145 {
3146 if (tp->state != THREAD_RUNNING)
3147 continue;
3148 if (tp->executing)
3149 continue;
c65d6b55 3150
08036331
PA
3151 /* Remove matching threads from the step-over queue, so
3152 start_step_over doesn't try to resume them
3153 automatically. */
3154 if (thread_is_in_step_over_chain (tp))
66716e78 3155 global_thread_step_over_chain_remove (tp);
c65d6b55 3156
08036331
PA
3157 /* If the thread is stopped, but the user/frontend doesn't
3158 know about that yet, queue a pending event, as if the
3159 thread had just stopped now. Unless the thread already had
3160 a pending event. */
3161 if (!tp->suspend.waitstatus_pending_p)
3162 {
3163 tp->suspend.waitstatus_pending_p = 1;
3164 tp->suspend.waitstatus.kind = TARGET_WAITKIND_STOPPED;
3165 tp->suspend.waitstatus.value.sig = GDB_SIGNAL_0;
3166 }
c65d6b55 3167
08036331
PA
3168 /* Clear the inline-frame state, since we're re-processing the
3169 stop. */
3170 clear_inline_frame_state (tp->ptid);
c65d6b55 3171
08036331
PA
3172 /* If this thread was paused because some other thread was
3173 doing an inline-step over, let that finish first. Once
3174 that happens, we'll restart all threads and consume pending
3175 stop events then. */
3176 if (step_over_info_valid_p ())
3177 continue;
3178
3179 /* Otherwise we can process the (new) pending event now. Set
3180 it so this pending event is considered by
3181 do_target_wait. */
3182 tp->resumed = 1;
3183 }
252fbfc8
PA
3184}
3185
a07daef3
PA
3186static void
3187infrun_thread_thread_exit (struct thread_info *tp, int silent)
3188{
d7e15655 3189 if (target_last_wait_ptid == tp->ptid)
a07daef3
PA
3190 nullify_last_target_wait_ptid ();
3191}
3192
0cbcdb96
PA
3193/* Delete the step resume, single-step and longjmp/exception resume
3194 breakpoints of TP. */
4e1c45ea 3195
0cbcdb96
PA
3196static void
3197delete_thread_infrun_breakpoints (struct thread_info *tp)
4e1c45ea 3198{
0cbcdb96
PA
3199 delete_step_resume_breakpoint (tp);
3200 delete_exception_resume_breakpoint (tp);
34b7e8a6 3201 delete_single_step_breakpoints (tp);
4e1c45ea
PA
3202}
3203
0cbcdb96
PA
3204/* If the target still has execution, call FUNC for each thread that
3205 just stopped. In all-stop, that's all the non-exited threads; in
3206 non-stop, that's the current thread, only. */
3207
3208typedef void (*for_each_just_stopped_thread_callback_func)
3209 (struct thread_info *tp);
4e1c45ea
PA
3210
3211static void
0cbcdb96 3212for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
4e1c45ea 3213{
d7e15655 3214 if (!target_has_execution || inferior_ptid == null_ptid)
4e1c45ea
PA
3215 return;
3216
fbea99ea 3217 if (target_is_non_stop_p ())
4e1c45ea 3218 {
0cbcdb96
PA
3219 /* If in non-stop mode, only the current thread stopped. */
3220 func (inferior_thread ());
4e1c45ea
PA
3221 }
3222 else
0cbcdb96 3223 {
0cbcdb96 3224 /* In all-stop mode, all threads have stopped. */
08036331
PA
3225 for (thread_info *tp : all_non_exited_threads ())
3226 func (tp);
0cbcdb96
PA
3227 }
3228}
3229
3230/* Delete the step resume and longjmp/exception resume breakpoints of
3231 the threads that just stopped. */
3232
3233static void
3234delete_just_stopped_threads_infrun_breakpoints (void)
3235{
3236 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
34b7e8a6
PA
3237}
3238
3239/* Delete the single-step breakpoints of the threads that just
3240 stopped. */
7c16b83e 3241
34b7e8a6
PA
3242static void
3243delete_just_stopped_threads_single_step_breakpoints (void)
3244{
3245 for_each_just_stopped_thread (delete_single_step_breakpoints);
4e1c45ea
PA
3246}
3247
221e1a37 3248/* See infrun.h. */
223698f8 3249
221e1a37 3250void
223698f8
DE
3251print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3252 const struct target_waitstatus *ws)
3253{
23fdd69e 3254 std::string status_string = target_waitstatus_to_string (ws);
d7e74731 3255 string_file stb;
223698f8
DE
3256
3257 /* The text is split over several lines because it was getting too long.
3258 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3259 output as a unit; we want only one timestamp printed if debug_timestamp
3260 is set. */
3261
d7e74731 3262 stb.printf ("infrun: target_wait (%d.%ld.%ld",
e99b03dc 3263 waiton_ptid.pid (),
e38504b3 3264 waiton_ptid.lwp (),
cc6bcb54 3265 waiton_ptid.tid ());
e99b03dc 3266 if (waiton_ptid.pid () != -1)
a068643d 3267 stb.printf (" [%s]", target_pid_to_str (waiton_ptid).c_str ());
d7e74731
PA
3268 stb.printf (", status) =\n");
3269 stb.printf ("infrun: %d.%ld.%ld [%s],\n",
e99b03dc 3270 result_ptid.pid (),
e38504b3 3271 result_ptid.lwp (),
cc6bcb54 3272 result_ptid.tid (),
a068643d 3273 target_pid_to_str (result_ptid).c_str ());
23fdd69e 3274 stb.printf ("infrun: %s\n", status_string.c_str ());
223698f8
DE
3275
3276 /* This uses %s in part to handle %'s in the text, but also to avoid
3277 a gcc error: the format attribute requires a string literal. */
d7e74731 3278 fprintf_unfiltered (gdb_stdlog, "%s", stb.c_str ());
223698f8
DE
3279}
3280
372316f1
PA
3281/* Select a thread at random, out of those which are resumed and have
3282 had events. */
3283
3284static struct thread_info *
3285random_pending_event_thread (ptid_t waiton_ptid)
3286{
372316f1 3287 int num_events = 0;
08036331
PA
3288
3289 auto has_event = [] (thread_info *tp)
3290 {
3291 return (tp->resumed
3292 && tp->suspend.waitstatus_pending_p);
3293 };
372316f1
PA
3294
3295 /* First see how many events we have. Count only resumed threads
3296 that have an event pending. */
08036331
PA
3297 for (thread_info *tp : all_non_exited_threads (waiton_ptid))
3298 if (has_event (tp))
372316f1
PA
3299 num_events++;
3300
3301 if (num_events == 0)
3302 return NULL;
3303
3304 /* Now randomly pick a thread out of those that have had events. */
08036331
PA
3305 int random_selector = (int) ((num_events * (double) rand ())
3306 / (RAND_MAX + 1.0));
372316f1
PA
3307
3308 if (debug_infrun && num_events > 1)
3309 fprintf_unfiltered (gdb_stdlog,
3310 "infrun: Found %d events, selecting #%d\n",
3311 num_events, random_selector);
3312
3313 /* Select the Nth thread that has had an event. */
08036331
PA
3314 for (thread_info *tp : all_non_exited_threads (waiton_ptid))
3315 if (has_event (tp))
372316f1 3316 if (random_selector-- == 0)
08036331 3317 return tp;
372316f1 3318
08036331 3319 gdb_assert_not_reached ("event thread not found");
372316f1
PA
3320}
3321
3322/* Wrapper for target_wait that first checks whether threads have
3323 pending statuses to report before actually asking the target for
3324 more events. */
3325
3326static ptid_t
3327do_target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
3328{
3329 ptid_t event_ptid;
3330 struct thread_info *tp;
3331
3332 /* First check if there is a resumed thread with a wait status
3333 pending. */
d7e15655 3334 if (ptid == minus_one_ptid || ptid.is_pid ())
372316f1
PA
3335 {
3336 tp = random_pending_event_thread (ptid);
3337 }
3338 else
3339 {
3340 if (debug_infrun)
3341 fprintf_unfiltered (gdb_stdlog,
3342 "infrun: Waiting for specific thread %s.\n",
a068643d 3343 target_pid_to_str (ptid).c_str ());
372316f1
PA
3344
3345 /* We have a specific thread to check. */
3346 tp = find_thread_ptid (ptid);
3347 gdb_assert (tp != NULL);
3348 if (!tp->suspend.waitstatus_pending_p)
3349 tp = NULL;
3350 }
3351
3352 if (tp != NULL
3353 && (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3354 || tp->suspend.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
3355 {
00431a78 3356 struct regcache *regcache = get_thread_regcache (tp);
ac7936df 3357 struct gdbarch *gdbarch = regcache->arch ();
372316f1
PA
3358 CORE_ADDR pc;
3359 int discard = 0;
3360
3361 pc = regcache_read_pc (regcache);
3362
3363 if (pc != tp->suspend.stop_pc)
3364 {
3365 if (debug_infrun)
3366 fprintf_unfiltered (gdb_stdlog,
3367 "infrun: PC of %s changed. was=%s, now=%s\n",
a068643d 3368 target_pid_to_str (tp->ptid).c_str (),
defd2172 3369 paddress (gdbarch, tp->suspend.stop_pc),
372316f1
PA
3370 paddress (gdbarch, pc));
3371 discard = 1;
3372 }
a01bda52 3373 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
372316f1
PA
3374 {
3375 if (debug_infrun)
3376 fprintf_unfiltered (gdb_stdlog,
3377 "infrun: previous breakpoint of %s, at %s gone\n",
a068643d 3378 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
3379 paddress (gdbarch, pc));
3380
3381 discard = 1;
3382 }
3383
3384 if (discard)
3385 {
3386 if (debug_infrun)
3387 fprintf_unfiltered (gdb_stdlog,
3388 "infrun: pending event of %s cancelled.\n",
a068643d 3389 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3390
3391 tp->suspend.waitstatus.kind = TARGET_WAITKIND_SPURIOUS;
3392 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3393 }
3394 }
3395
3396 if (tp != NULL)
3397 {
3398 if (debug_infrun)
3399 {
23fdd69e
SM
3400 std::string statstr
3401 = target_waitstatus_to_string (&tp->suspend.waitstatus);
372316f1 3402
372316f1
PA
3403 fprintf_unfiltered (gdb_stdlog,
3404 "infrun: Using pending wait status %s for %s.\n",
23fdd69e 3405 statstr.c_str (),
a068643d 3406 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3407 }
3408
3409 /* Now that we've selected our final event LWP, un-adjust its PC
3410 if it was a software breakpoint (and the target doesn't
3411 always adjust the PC itself). */
3412 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3413 && !target_supports_stopped_by_sw_breakpoint ())
3414 {
3415 struct regcache *regcache;
3416 struct gdbarch *gdbarch;
3417 int decr_pc;
3418
00431a78 3419 regcache = get_thread_regcache (tp);
ac7936df 3420 gdbarch = regcache->arch ();
372316f1
PA
3421
3422 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3423 if (decr_pc != 0)
3424 {
3425 CORE_ADDR pc;
3426
3427 pc = regcache_read_pc (regcache);
3428 regcache_write_pc (regcache, pc + decr_pc);
3429 }
3430 }
3431
3432 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3433 *status = tp->suspend.waitstatus;
3434 tp->suspend.waitstatus_pending_p = 0;
3435
3436 /* Wake up the event loop again, until all pending events are
3437 processed. */
3438 if (target_is_async_p ())
3439 mark_async_event_handler (infrun_async_inferior_event_token);
3440 return tp->ptid;
3441 }
3442
3443 /* But if we don't find one, we'll have to wait. */
3444
3445 if (deprecated_target_wait_hook)
3446 event_ptid = deprecated_target_wait_hook (ptid, status, options);
3447 else
3448 event_ptid = target_wait (ptid, status, options);
3449
3450 return event_ptid;
3451}
3452
24291992
PA
3453/* Prepare and stabilize the inferior for detaching it. E.g.,
3454 detaching while a thread is displaced stepping is a recipe for
3455 crashing it, as nothing would readjust the PC out of the scratch
3456 pad. */
3457
3458void
3459prepare_for_detach (void)
3460{
3461 struct inferior *inf = current_inferior ();
f2907e49 3462 ptid_t pid_ptid = ptid_t (inf->pid);
24291992 3463
b93d82bc 3464 // displaced_step_inferior_state *displaced = get_displaced_stepping_state (inf);
24291992
PA
3465
3466 /* Is any thread of this process displaced stepping? If not,
3467 there's nothing else to do. */
b93d82bc 3468 if (displaced_step_in_progress (inf))
24291992
PA
3469 return;
3470
3471 if (debug_infrun)
3472 fprintf_unfiltered (gdb_stdlog,
3473 "displaced-stepping in-process while detaching");
3474
9bcb1f16 3475 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
24291992 3476
b93d82bc
SM
3477 // FIXME
3478 while (false)
24291992 3479 {
24291992
PA
3480 struct execution_control_state ecss;
3481 struct execution_control_state *ecs;
3482
3483 ecs = &ecss;
3484 memset (ecs, 0, sizeof (*ecs));
3485
3486 overlay_cache_invalid = 1;
f15cb84a
YQ
3487 /* Flush target cache before starting to handle each event.
3488 Target was running and cache could be stale. This is just a
3489 heuristic. Running threads may modify target memory, but we
3490 don't get any event. */
3491 target_dcache_invalidate ();
24291992 3492
372316f1 3493 ecs->ptid = do_target_wait (pid_ptid, &ecs->ws, 0);
24291992
PA
3494
3495 if (debug_infrun)
3496 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3497
3498 /* If an error happens while handling the event, propagate GDB's
3499 knowledge of the executing state to the frontend/user running
3500 state. */
731f534f 3501 scoped_finish_thread_state finish_state (minus_one_ptid);
24291992
PA
3502
3503 /* Now figure out what to do with the result of the result. */
3504 handle_inferior_event (ecs);
3505
3506 /* No error, don't finish the state yet. */
731f534f 3507 finish_state.release ();
24291992
PA
3508
3509 /* Breakpoints and watchpoints are not installed on the target
3510 at this point, and signals are passed directly to the
3511 inferior, so this must mean the process is gone. */
3512 if (!ecs->wait_some_more)
3513 {
9bcb1f16 3514 restore_detaching.release ();
24291992
PA
3515 error (_("Program exited while detaching"));
3516 }
3517 }
3518
9bcb1f16 3519 restore_detaching.release ();
24291992
PA
3520}
3521
cd0fc7c3 3522/* Wait for control to return from inferior to debugger.
ae123ec6 3523
cd0fc7c3
SS
3524 If inferior gets a signal, we may decide to start it up again
3525 instead of returning. That is why there is a loop in this function.
3526 When this function actually returns it means the inferior
3527 should be left stopped and GDB should read more commands. */
3528
3529void
e4c8541f 3530wait_for_inferior (void)
cd0fc7c3 3531{
527159b7 3532 if (debug_infrun)
ae123ec6 3533 fprintf_unfiltered
e4c8541f 3534 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
527159b7 3535
4c41382a 3536 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
cd0fc7c3 3537
e6f5c25b
PA
3538 /* If an error happens while handling the event, propagate GDB's
3539 knowledge of the executing state to the frontend/user running
3540 state. */
731f534f 3541 scoped_finish_thread_state finish_state (minus_one_ptid);
e6f5c25b 3542
c906108c
SS
3543 while (1)
3544 {
ae25568b
PA
3545 struct execution_control_state ecss;
3546 struct execution_control_state *ecs = &ecss;
963f9c80 3547 ptid_t waiton_ptid = minus_one_ptid;
29f49a6a 3548
ae25568b
PA
3549 memset (ecs, 0, sizeof (*ecs));
3550
ec9499be 3551 overlay_cache_invalid = 1;
ec9499be 3552
f15cb84a
YQ
3553 /* Flush target cache before starting to handle each event.
3554 Target was running and cache could be stale. This is just a
3555 heuristic. Running threads may modify target memory, but we
3556 don't get any event. */
3557 target_dcache_invalidate ();
3558
372316f1 3559 ecs->ptid = do_target_wait (waiton_ptid, &ecs->ws, 0);
c906108c 3560
f00150c9 3561 if (debug_infrun)
223698f8 3562 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
f00150c9 3563
cd0fc7c3
SS
3564 /* Now figure out what to do with the result of the result. */
3565 handle_inferior_event (ecs);
c906108c 3566
cd0fc7c3
SS
3567 if (!ecs->wait_some_more)
3568 break;
3569 }
4e1c45ea 3570
e6f5c25b 3571 /* No error, don't finish the state yet. */
731f534f 3572 finish_state.release ();
cd0fc7c3 3573}
c906108c 3574
d3d4baed
PA
3575/* Cleanup that reinstalls the readline callback handler, if the
3576 target is running in the background. If while handling the target
3577 event something triggered a secondary prompt, like e.g., a
3578 pagination prompt, we'll have removed the callback handler (see
3579 gdb_readline_wrapper_line). Need to do this as we go back to the
3580 event loop, ready to process further input. Note this has no
3581 effect if the handler hasn't actually been removed, because calling
3582 rl_callback_handler_install resets the line buffer, thus losing
3583 input. */
3584
3585static void
d238133d 3586reinstall_readline_callback_handler_cleanup ()
d3d4baed 3587{
3b12939d
PA
3588 struct ui *ui = current_ui;
3589
3590 if (!ui->async)
6c400b59
PA
3591 {
3592 /* We're not going back to the top level event loop yet. Don't
3593 install the readline callback, as it'd prep the terminal,
3594 readline-style (raw, noecho) (e.g., --batch). We'll install
3595 it the next time the prompt is displayed, when we're ready
3596 for input. */
3597 return;
3598 }
3599
3b12939d 3600 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
d3d4baed
PA
3601 gdb_rl_callback_handler_reinstall ();
3602}
3603
243a9253
PA
3604/* Clean up the FSMs of threads that are now stopped. In non-stop,
3605 that's just the event thread. In all-stop, that's all threads. */
3606
3607static void
3608clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
3609{
08036331
PA
3610 if (ecs->event_thread != NULL
3611 && ecs->event_thread->thread_fsm != NULL)
46e3ed7f 3612 ecs->event_thread->thread_fsm->clean_up (ecs->event_thread);
243a9253
PA
3613
3614 if (!non_stop)
3615 {
08036331 3616 for (thread_info *thr : all_non_exited_threads ())
243a9253
PA
3617 {
3618 if (thr->thread_fsm == NULL)
3619 continue;
3620 if (thr == ecs->event_thread)
3621 continue;
3622
00431a78 3623 switch_to_thread (thr);
46e3ed7f 3624 thr->thread_fsm->clean_up (thr);
243a9253
PA
3625 }
3626
3627 if (ecs->event_thread != NULL)
00431a78 3628 switch_to_thread (ecs->event_thread);
243a9253
PA
3629 }
3630}
3631
3b12939d
PA
3632/* Helper for all_uis_check_sync_execution_done that works on the
3633 current UI. */
3634
3635static void
3636check_curr_ui_sync_execution_done (void)
3637{
3638 struct ui *ui = current_ui;
3639
3640 if (ui->prompt_state == PROMPT_NEEDED
3641 && ui->async
3642 && !gdb_in_secondary_prompt_p (ui))
3643 {
223ffa71 3644 target_terminal::ours ();
76727919 3645 gdb::observers::sync_execution_done.notify ();
3eb7562a 3646 ui_register_input_event_handler (ui);
3b12939d
PA
3647 }
3648}
3649
3650/* See infrun.h. */
3651
3652void
3653all_uis_check_sync_execution_done (void)
3654{
0e454242 3655 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
3656 {
3657 check_curr_ui_sync_execution_done ();
3658 }
3659}
3660
a8836c93
PA
3661/* See infrun.h. */
3662
3663void
3664all_uis_on_sync_execution_starting (void)
3665{
0e454242 3666 SWITCH_THRU_ALL_UIS ()
a8836c93
PA
3667 {
3668 if (current_ui->prompt_state == PROMPT_NEEDED)
3669 async_disable_stdin ();
3670 }
3671}
3672
1777feb0 3673/* Asynchronous version of wait_for_inferior. It is called by the
43ff13b4 3674 event loop whenever a change of state is detected on the file
1777feb0
MS
3675 descriptor corresponding to the target. It can be called more than
3676 once to complete a single execution command. In such cases we need
3677 to keep the state in a global variable ECSS. If it is the last time
a474d7c2
PA
3678 that this function is called for a single execution command, then
3679 report to the user that the inferior has stopped, and do the
1777feb0 3680 necessary cleanups. */
43ff13b4
JM
3681
3682void
fba45db2 3683fetch_inferior_event (void *client_data)
43ff13b4 3684{
0d1e5fa7 3685 struct execution_control_state ecss;
a474d7c2 3686 struct execution_control_state *ecs = &ecss;
0f641c01 3687 int cmd_done = 0;
963f9c80 3688 ptid_t waiton_ptid = minus_one_ptid;
43ff13b4 3689
0d1e5fa7
PA
3690 memset (ecs, 0, sizeof (*ecs));
3691
c61db772
PA
3692 /* Events are always processed with the main UI as current UI. This
3693 way, warnings, debug output, etc. are always consistently sent to
3694 the main console. */
4b6749b9 3695 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
c61db772 3696
d3d4baed 3697 /* End up with readline processing input, if necessary. */
d238133d
TT
3698 {
3699 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
3700
3701 /* We're handling a live event, so make sure we're doing live
3702 debugging. If we're looking at traceframes while the target is
3703 running, we're going to need to get back to that mode after
3704 handling the event. */
3705 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
3706 if (non_stop)
3707 {
3708 maybe_restore_traceframe.emplace ();
3709 set_current_traceframe (-1);
3710 }
43ff13b4 3711
d238133d
TT
3712 gdb::optional<scoped_restore_current_thread> maybe_restore_thread;
3713
3714 if (non_stop)
3715 /* In non-stop mode, the user/frontend should not notice a thread
3716 switch due to internal events. Make sure we reverse to the
3717 user selected thread and frame after handling the event and
3718 running any breakpoint commands. */
3719 maybe_restore_thread.emplace ();
3720
3721 overlay_cache_invalid = 1;
3722 /* Flush target cache before starting to handle each event. Target
3723 was running and cache could be stale. This is just a heuristic.
3724 Running threads may modify target memory, but we don't get any
3725 event. */
3726 target_dcache_invalidate ();
3727
3728 scoped_restore save_exec_dir
3729 = make_scoped_restore (&execution_direction,
3730 target_execution_direction ());
3731
3732 ecs->ptid = do_target_wait (waiton_ptid, &ecs->ws,
3733 target_can_async_p () ? TARGET_WNOHANG : 0);
3734
3735 if (debug_infrun)
3736 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3737
3738 /* If an error happens while handling the event, propagate GDB's
3739 knowledge of the executing state to the frontend/user running
3740 state. */
3741 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
3742 scoped_finish_thread_state finish_state (finish_ptid);
3743
979a0d13 3744 /* Get executed before scoped_restore_current_thread above to apply
d238133d
TT
3745 still for the thread which has thrown the exception. */
3746 auto defer_bpstat_clear
3747 = make_scope_exit (bpstat_clear_actions);
3748 auto defer_delete_threads
3749 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
3750
3751 /* Now figure out what to do with the result of the result. */
3752 handle_inferior_event (ecs);
3753
3754 if (!ecs->wait_some_more)
3755 {
3756 struct inferior *inf = find_inferior_ptid (ecs->ptid);
3757 int should_stop = 1;
3758 struct thread_info *thr = ecs->event_thread;
d6b48e9c 3759
d238133d 3760 delete_just_stopped_threads_infrun_breakpoints ();
f107f563 3761
d238133d
TT
3762 if (thr != NULL)
3763 {
3764 struct thread_fsm *thread_fsm = thr->thread_fsm;
243a9253 3765
d238133d 3766 if (thread_fsm != NULL)
46e3ed7f 3767 should_stop = thread_fsm->should_stop (thr);
d238133d 3768 }
243a9253 3769
d238133d
TT
3770 if (!should_stop)
3771 {
3772 keep_going (ecs);
3773 }
3774 else
3775 {
46e3ed7f 3776 bool should_notify_stop = true;
d238133d 3777 int proceeded = 0;
1840d81a 3778
d238133d 3779 clean_up_just_stopped_threads_fsms (ecs);
243a9253 3780
d238133d 3781 if (thr != NULL && thr->thread_fsm != NULL)
46e3ed7f 3782 should_notify_stop = thr->thread_fsm->should_notify_stop ();
388a7084 3783
d238133d
TT
3784 if (should_notify_stop)
3785 {
3786 /* We may not find an inferior if this was a process exit. */
3787 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3788 proceeded = normal_stop ();
3789 }
243a9253 3790
d238133d
TT
3791 if (!proceeded)
3792 {
3793 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
3794 cmd_done = 1;
3795 }
3796 }
3797 }
4f8d22e3 3798
d238133d
TT
3799 defer_delete_threads.release ();
3800 defer_bpstat_clear.release ();
29f49a6a 3801
d238133d
TT
3802 /* No error, don't finish the thread states yet. */
3803 finish_state.release ();
731f534f 3804
d238133d
TT
3805 /* This scope is used to ensure that readline callbacks are
3806 reinstalled here. */
3807 }
4f8d22e3 3808
3b12939d
PA
3809 /* If a UI was in sync execution mode, and now isn't, restore its
3810 prompt (a synchronous execution command has finished, and we're
3811 ready for input). */
3812 all_uis_check_sync_execution_done ();
0f641c01
PA
3813
3814 if (cmd_done
0f641c01 3815 && exec_done_display_p
00431a78
PA
3816 && (inferior_ptid == null_ptid
3817 || inferior_thread ()->state != THREAD_RUNNING))
0f641c01 3818 printf_unfiltered (_("completed.\n"));
43ff13b4
JM
3819}
3820
edb3359d
DJ
3821/* Record the frame and location we're currently stepping through. */
3822void
3823set_step_info (struct frame_info *frame, struct symtab_and_line sal)
3824{
3825 struct thread_info *tp = inferior_thread ();
3826
16c381f0
JK
3827 tp->control.step_frame_id = get_frame_id (frame);
3828 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
edb3359d
DJ
3829
3830 tp->current_symtab = sal.symtab;
3831 tp->current_line = sal.line;
3832}
3833
0d1e5fa7
PA
3834/* Clear context switchable stepping state. */
3835
3836void
4e1c45ea 3837init_thread_stepping_state (struct thread_info *tss)
0d1e5fa7 3838{
7f5ef605 3839 tss->stepped_breakpoint = 0;
0d1e5fa7 3840 tss->stepping_over_breakpoint = 0;
963f9c80 3841 tss->stepping_over_watchpoint = 0;
0d1e5fa7 3842 tss->step_after_step_resume_breakpoint = 0;
cd0fc7c3
SS
3843}
3844
c32c64b7
DE
3845/* Set the cached copy of the last ptid/waitstatus. */
3846
6efcd9a8 3847void
c32c64b7
DE
3848set_last_target_status (ptid_t ptid, struct target_waitstatus status)
3849{
3850 target_last_wait_ptid = ptid;
3851 target_last_waitstatus = status;
3852}
3853
e02bc4cc 3854/* Return the cached copy of the last pid/waitstatus returned by
9a4105ab
AC
3855 target_wait()/deprecated_target_wait_hook(). The data is actually
3856 cached by handle_inferior_event(), which gets called immediately
3857 after target_wait()/deprecated_target_wait_hook(). */
e02bc4cc
DS
3858
3859void
488f131b 3860get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
e02bc4cc 3861{
39f77062 3862 *ptidp = target_last_wait_ptid;
e02bc4cc
DS
3863 *status = target_last_waitstatus;
3864}
3865
ac264b3b
MS
3866void
3867nullify_last_target_wait_ptid (void)
3868{
3869 target_last_wait_ptid = minus_one_ptid;
3870}
3871
dcf4fbde 3872/* Switch thread contexts. */
dd80620e
MS
3873
3874static void
00431a78 3875context_switch (execution_control_state *ecs)
dd80620e 3876{
00431a78
PA
3877 if (debug_infrun
3878 && ecs->ptid != inferior_ptid
3879 && ecs->event_thread != inferior_thread ())
fd48f117
DJ
3880 {
3881 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
a068643d 3882 target_pid_to_str (inferior_ptid).c_str ());
fd48f117 3883 fprintf_unfiltered (gdb_stdlog, "to %s\n",
a068643d 3884 target_pid_to_str (ecs->ptid).c_str ());
fd48f117
DJ
3885 }
3886
00431a78 3887 switch_to_thread (ecs->event_thread);
dd80620e
MS
3888}
3889
d8dd4d5f
PA
3890/* If the target can't tell whether we've hit breakpoints
3891 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
3892 check whether that could have been caused by a breakpoint. If so,
3893 adjust the PC, per gdbarch_decr_pc_after_break. */
3894
4fa8626c 3895static void
d8dd4d5f
PA
3896adjust_pc_after_break (struct thread_info *thread,
3897 struct target_waitstatus *ws)
4fa8626c 3898{
24a73cce
UW
3899 struct regcache *regcache;
3900 struct gdbarch *gdbarch;
118e6252 3901 CORE_ADDR breakpoint_pc, decr_pc;
4fa8626c 3902
4fa8626c
DJ
3903 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3904 we aren't, just return.
9709f61c
DJ
3905
3906 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
b798847d
UW
3907 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3908 implemented by software breakpoints should be handled through the normal
3909 breakpoint layer.
8fb3e588 3910
4fa8626c
DJ
3911 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3912 different signals (SIGILL or SIGEMT for instance), but it is less
3913 clear where the PC is pointing afterwards. It may not match
b798847d
UW
3914 gdbarch_decr_pc_after_break. I don't know any specific target that
3915 generates these signals at breakpoints (the code has been in GDB since at
3916 least 1992) so I can not guess how to handle them here.
8fb3e588 3917
e6cf7916
UW
3918 In earlier versions of GDB, a target with
3919 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
b798847d
UW
3920 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3921 target with both of these set in GDB history, and it seems unlikely to be
3922 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4fa8626c 3923
d8dd4d5f 3924 if (ws->kind != TARGET_WAITKIND_STOPPED)
4fa8626c
DJ
3925 return;
3926
d8dd4d5f 3927 if (ws->value.sig != GDB_SIGNAL_TRAP)
4fa8626c
DJ
3928 return;
3929
4058b839
PA
3930 /* In reverse execution, when a breakpoint is hit, the instruction
3931 under it has already been de-executed. The reported PC always
3932 points at the breakpoint address, so adjusting it further would
3933 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3934 architecture:
3935
3936 B1 0x08000000 : INSN1
3937 B2 0x08000001 : INSN2
3938 0x08000002 : INSN3
3939 PC -> 0x08000003 : INSN4
3940
3941 Say you're stopped at 0x08000003 as above. Reverse continuing
3942 from that point should hit B2 as below. Reading the PC when the
3943 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3944 been de-executed already.
3945
3946 B1 0x08000000 : INSN1
3947 B2 PC -> 0x08000001 : INSN2
3948 0x08000002 : INSN3
3949 0x08000003 : INSN4
3950
3951 We can't apply the same logic as for forward execution, because
3952 we would wrongly adjust the PC to 0x08000000, since there's a
3953 breakpoint at PC - 1. We'd then report a hit on B1, although
3954 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3955 behaviour. */
3956 if (execution_direction == EXEC_REVERSE)
3957 return;
3958
1cf4d951
PA
3959 /* If the target can tell whether the thread hit a SW breakpoint,
3960 trust it. Targets that can tell also adjust the PC
3961 themselves. */
3962 if (target_supports_stopped_by_sw_breakpoint ())
3963 return;
3964
3965 /* Note that relying on whether a breakpoint is planted in memory to
3966 determine this can fail. E.g,. the breakpoint could have been
3967 removed since. Or the thread could have been told to step an
3968 instruction the size of a breakpoint instruction, and only
3969 _after_ was a breakpoint inserted at its address. */
3970
24a73cce
UW
3971 /* If this target does not decrement the PC after breakpoints, then
3972 we have nothing to do. */
00431a78 3973 regcache = get_thread_regcache (thread);
ac7936df 3974 gdbarch = regcache->arch ();
118e6252 3975
527a273a 3976 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
118e6252 3977 if (decr_pc == 0)
24a73cce
UW
3978 return;
3979
8b86c959 3980 const address_space *aspace = regcache->aspace ();
6c95b8df 3981
8aad930b
AC
3982 /* Find the location where (if we've hit a breakpoint) the
3983 breakpoint would be. */
118e6252 3984 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
8aad930b 3985
1cf4d951
PA
3986 /* If the target can't tell whether a software breakpoint triggered,
3987 fallback to figuring it out based on breakpoints we think were
3988 inserted in the target, and on whether the thread was stepped or
3989 continued. */
3990
1c5cfe86
PA
3991 /* Check whether there actually is a software breakpoint inserted at
3992 that location.
3993
3994 If in non-stop mode, a race condition is possible where we've
3995 removed a breakpoint, but stop events for that breakpoint were
3996 already queued and arrive later. To suppress those spurious
3997 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
1cf4d951
PA
3998 and retire them after a number of stop events are reported. Note
3999 this is an heuristic and can thus get confused. The real fix is
4000 to get the "stopped by SW BP and needs adjustment" info out of
4001 the target/kernel (and thus never reach here; see above). */
6c95b8df 4002 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
fbea99ea
PA
4003 || (target_is_non_stop_p ()
4004 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
8aad930b 4005 {
07036511 4006 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
abbb1732 4007
8213266a 4008 if (record_full_is_used ())
07036511
TT
4009 restore_operation_disable.emplace
4010 (record_full_gdb_operation_disable_set ());
96429cc8 4011
1c0fdd0e
UW
4012 /* When using hardware single-step, a SIGTRAP is reported for both
4013 a completed single-step and a software breakpoint. Need to
4014 differentiate between the two, as the latter needs adjusting
4015 but the former does not.
4016
4017 The SIGTRAP can be due to a completed hardware single-step only if
4018 - we didn't insert software single-step breakpoints
1c0fdd0e
UW
4019 - this thread is currently being stepped
4020
4021 If any of these events did not occur, we must have stopped due
4022 to hitting a software breakpoint, and have to back up to the
4023 breakpoint address.
4024
4025 As a special case, we could have hardware single-stepped a
4026 software breakpoint. In this case (prev_pc == breakpoint_pc),
4027 we also need to back up to the breakpoint address. */
4028
d8dd4d5f
PA
4029 if (thread_has_single_step_breakpoints_set (thread)
4030 || !currently_stepping (thread)
4031 || (thread->stepped_breakpoint
4032 && thread->prev_pc == breakpoint_pc))
515630c5 4033 regcache_write_pc (regcache, breakpoint_pc);
8aad930b 4034 }
4fa8626c
DJ
4035}
4036
edb3359d
DJ
4037static int
4038stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4039{
4040 for (frame = get_prev_frame (frame);
4041 frame != NULL;
4042 frame = get_prev_frame (frame))
4043 {
4044 if (frame_id_eq (get_frame_id (frame), step_frame_id))
4045 return 1;
4046 if (get_frame_type (frame) != INLINE_FRAME)
4047 break;
4048 }
4049
4050 return 0;
4051}
4052
c65d6b55
PA
4053/* If the event thread has the stop requested flag set, pretend it
4054 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4055 target_stop). */
4056
4057static bool
4058handle_stop_requested (struct execution_control_state *ecs)
4059{
4060 if (ecs->event_thread->stop_requested)
4061 {
4062 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
4063 ecs->ws.value.sig = GDB_SIGNAL_0;
4064 handle_signal_stop (ecs);
4065 return true;
4066 }
4067 return false;
4068}
4069
a96d9b2e
SDJ
4070/* Auxiliary function that handles syscall entry/return events.
4071 It returns 1 if the inferior should keep going (and GDB
4072 should ignore the event), or 0 if the event deserves to be
4073 processed. */
ca2163eb 4074
a96d9b2e 4075static int
ca2163eb 4076handle_syscall_event (struct execution_control_state *ecs)
a96d9b2e 4077{
ca2163eb 4078 struct regcache *regcache;
ca2163eb
PA
4079 int syscall_number;
4080
00431a78 4081 context_switch (ecs);
ca2163eb 4082
00431a78 4083 regcache = get_thread_regcache (ecs->event_thread);
f90263c1 4084 syscall_number = ecs->ws.value.syscall_number;
f2ffa92b 4085 ecs->event_thread->suspend.stop_pc = regcache_read_pc (regcache);
ca2163eb 4086
a96d9b2e
SDJ
4087 if (catch_syscall_enabled () > 0
4088 && catching_syscall_number (syscall_number) > 0)
4089 {
4090 if (debug_infrun)
4091 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
4092 syscall_number);
a96d9b2e 4093
16c381f0 4094 ecs->event_thread->control.stop_bpstat
a01bda52 4095 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
4096 ecs->event_thread->suspend.stop_pc,
4097 ecs->event_thread, &ecs->ws);
ab04a2af 4098
c65d6b55
PA
4099 if (handle_stop_requested (ecs))
4100 return 0;
4101
ce12b012 4102 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
ca2163eb
PA
4103 {
4104 /* Catchpoint hit. */
ca2163eb
PA
4105 return 0;
4106 }
a96d9b2e 4107 }
ca2163eb 4108
c65d6b55
PA
4109 if (handle_stop_requested (ecs))
4110 return 0;
4111
ca2163eb 4112 /* If no catchpoint triggered for this, then keep going. */
ca2163eb
PA
4113 keep_going (ecs);
4114 return 1;
a96d9b2e
SDJ
4115}
4116
7e324e48
GB
4117/* Lazily fill in the execution_control_state's stop_func_* fields. */
4118
4119static void
4120fill_in_stop_func (struct gdbarch *gdbarch,
4121 struct execution_control_state *ecs)
4122{
4123 if (!ecs->stop_func_filled_in)
4124 {
98a617f8
KB
4125 const block *block;
4126
7e324e48
GB
4127 /* Don't care about return value; stop_func_start and stop_func_name
4128 will both be 0 if it doesn't work. */
98a617f8
KB
4129 find_pc_partial_function (ecs->event_thread->suspend.stop_pc,
4130 &ecs->stop_func_name,
4131 &ecs->stop_func_start,
4132 &ecs->stop_func_end,
4133 &block);
4134
4135 /* The call to find_pc_partial_function, above, will set
4136 stop_func_start and stop_func_end to the start and end
4137 of the range containing the stop pc. If this range
4138 contains the entry pc for the block (which is always the
4139 case for contiguous blocks), advance stop_func_start past
4140 the function's start offset and entrypoint. Note that
4141 stop_func_start is NOT advanced when in a range of a
4142 non-contiguous block that does not contain the entry pc. */
4143 if (block != nullptr
4144 && ecs->stop_func_start <= BLOCK_ENTRY_PC (block)
4145 && BLOCK_ENTRY_PC (block) < ecs->stop_func_end)
4146 {
4147 ecs->stop_func_start
4148 += gdbarch_deprecated_function_start_offset (gdbarch);
4149
4150 if (gdbarch_skip_entrypoint_p (gdbarch))
4151 ecs->stop_func_start
4152 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
4153 }
591a12a1 4154
7e324e48
GB
4155 ecs->stop_func_filled_in = 1;
4156 }
4157}
4158
4f5d7f63 4159
00431a78 4160/* Return the STOP_SOON field of the inferior pointed at by ECS. */
4f5d7f63
PA
4161
4162static enum stop_kind
00431a78 4163get_inferior_stop_soon (execution_control_state *ecs)
4f5d7f63 4164{
00431a78 4165 struct inferior *inf = find_inferior_ptid (ecs->ptid);
4f5d7f63
PA
4166
4167 gdb_assert (inf != NULL);
4168 return inf->control.stop_soon;
4169}
4170
372316f1
PA
4171/* Wait for one event. Store the resulting waitstatus in WS, and
4172 return the event ptid. */
4173
4174static ptid_t
4175wait_one (struct target_waitstatus *ws)
4176{
4177 ptid_t event_ptid;
4178 ptid_t wait_ptid = minus_one_ptid;
4179
4180 overlay_cache_invalid = 1;
4181
4182 /* Flush target cache before starting to handle each event.
4183 Target was running and cache could be stale. This is just a
4184 heuristic. Running threads may modify target memory, but we
4185 don't get any event. */
4186 target_dcache_invalidate ();
4187
4188 if (deprecated_target_wait_hook)
4189 event_ptid = deprecated_target_wait_hook (wait_ptid, ws, 0);
4190 else
4191 event_ptid = target_wait (wait_ptid, ws, 0);
4192
4193 if (debug_infrun)
4194 print_target_wait_results (wait_ptid, event_ptid, ws);
4195
4196 return event_ptid;
4197}
4198
4199/* Generate a wrapper for target_stopped_by_REASON that works on PTID
4200 instead of the current thread. */
4201#define THREAD_STOPPED_BY(REASON) \
4202static int \
4203thread_stopped_by_ ## REASON (ptid_t ptid) \
4204{ \
2989a365 4205 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid); \
372316f1
PA
4206 inferior_ptid = ptid; \
4207 \
2989a365 4208 return target_stopped_by_ ## REASON (); \
372316f1
PA
4209}
4210
4211/* Generate thread_stopped_by_watchpoint. */
4212THREAD_STOPPED_BY (watchpoint)
4213/* Generate thread_stopped_by_sw_breakpoint. */
4214THREAD_STOPPED_BY (sw_breakpoint)
4215/* Generate thread_stopped_by_hw_breakpoint. */
4216THREAD_STOPPED_BY (hw_breakpoint)
4217
372316f1
PA
4218/* Save the thread's event and stop reason to process it later. */
4219
4220static void
4221save_waitstatus (struct thread_info *tp, struct target_waitstatus *ws)
4222{
372316f1
PA
4223 if (debug_infrun)
4224 {
23fdd69e 4225 std::string statstr = target_waitstatus_to_string (ws);
372316f1 4226
372316f1
PA
4227 fprintf_unfiltered (gdb_stdlog,
4228 "infrun: saving status %s for %d.%ld.%ld\n",
23fdd69e 4229 statstr.c_str (),
e99b03dc 4230 tp->ptid.pid (),
e38504b3 4231 tp->ptid.lwp (),
cc6bcb54 4232 tp->ptid.tid ());
372316f1
PA
4233 }
4234
4235 /* Record for later. */
4236 tp->suspend.waitstatus = *ws;
4237 tp->suspend.waitstatus_pending_p = 1;
4238
00431a78 4239 struct regcache *regcache = get_thread_regcache (tp);
8b86c959 4240 const address_space *aspace = regcache->aspace ();
372316f1
PA
4241
4242 if (ws->kind == TARGET_WAITKIND_STOPPED
4243 && ws->value.sig == GDB_SIGNAL_TRAP)
4244 {
4245 CORE_ADDR pc = regcache_read_pc (regcache);
4246
4247 adjust_pc_after_break (tp, &tp->suspend.waitstatus);
4248
4249 if (thread_stopped_by_watchpoint (tp->ptid))
4250 {
4251 tp->suspend.stop_reason
4252 = TARGET_STOPPED_BY_WATCHPOINT;
4253 }
4254 else if (target_supports_stopped_by_sw_breakpoint ()
4255 && thread_stopped_by_sw_breakpoint (tp->ptid))
4256 {
4257 tp->suspend.stop_reason
4258 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4259 }
4260 else if (target_supports_stopped_by_hw_breakpoint ()
4261 && thread_stopped_by_hw_breakpoint (tp->ptid))
4262 {
4263 tp->suspend.stop_reason
4264 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4265 }
4266 else if (!target_supports_stopped_by_hw_breakpoint ()
4267 && hardware_breakpoint_inserted_here_p (aspace,
4268 pc))
4269 {
4270 tp->suspend.stop_reason
4271 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4272 }
4273 else if (!target_supports_stopped_by_sw_breakpoint ()
4274 && software_breakpoint_inserted_here_p (aspace,
4275 pc))
4276 {
4277 tp->suspend.stop_reason
4278 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4279 }
4280 else if (!thread_has_single_step_breakpoints_set (tp)
4281 && currently_stepping (tp))
4282 {
4283 tp->suspend.stop_reason
4284 = TARGET_STOPPED_BY_SINGLE_STEP;
4285 }
4286 }
4287}
4288
6efcd9a8 4289/* See infrun.h. */
372316f1 4290
6efcd9a8 4291void
372316f1
PA
4292stop_all_threads (void)
4293{
4294 /* We may need multiple passes to discover all threads. */
4295 int pass;
4296 int iterations = 0;
372316f1 4297
fbea99ea 4298 gdb_assert (target_is_non_stop_p ());
372316f1
PA
4299
4300 if (debug_infrun)
4301 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads\n");
4302
00431a78 4303 scoped_restore_current_thread restore_thread;
372316f1 4304
65706a29 4305 target_thread_events (1);
9885e6bb 4306 SCOPE_EXIT { target_thread_events (0); };
65706a29 4307
372316f1
PA
4308 /* Request threads to stop, and then wait for the stops. Because
4309 threads we already know about can spawn more threads while we're
4310 trying to stop them, and we only learn about new threads when we
4311 update the thread list, do this in a loop, and keep iterating
4312 until two passes find no threads that need to be stopped. */
4313 for (pass = 0; pass < 2; pass++, iterations++)
4314 {
4315 if (debug_infrun)
4316 fprintf_unfiltered (gdb_stdlog,
4317 "infrun: stop_all_threads, pass=%d, "
4318 "iterations=%d\n", pass, iterations);
4319 while (1)
4320 {
4321 ptid_t event_ptid;
4322 struct target_waitstatus ws;
4323 int need_wait = 0;
372316f1
PA
4324
4325 update_thread_list ();
4326
4327 /* Go through all threads looking for threads that we need
4328 to tell the target to stop. */
08036331 4329 for (thread_info *t : all_non_exited_threads ())
372316f1
PA
4330 {
4331 if (t->executing)
4332 {
4333 /* If already stopping, don't request a stop again.
4334 We just haven't seen the notification yet. */
4335 if (!t->stop_requested)
4336 {
4337 if (debug_infrun)
4338 fprintf_unfiltered (gdb_stdlog,
4339 "infrun: %s executing, "
4340 "need stop\n",
a068643d 4341 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4342 target_stop (t->ptid);
4343 t->stop_requested = 1;
4344 }
4345 else
4346 {
4347 if (debug_infrun)
4348 fprintf_unfiltered (gdb_stdlog,
4349 "infrun: %s executing, "
4350 "already stopping\n",
a068643d 4351 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4352 }
4353
4354 if (t->stop_requested)
4355 need_wait = 1;
4356 }
4357 else
4358 {
4359 if (debug_infrun)
4360 fprintf_unfiltered (gdb_stdlog,
4361 "infrun: %s not executing\n",
a068643d 4362 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4363
4364 /* The thread may be not executing, but still be
4365 resumed with a pending status to process. */
4366 t->resumed = 0;
4367 }
4368 }
4369
4370 if (!need_wait)
4371 break;
4372
4373 /* If we find new threads on the second iteration, restart
4374 over. We want to see two iterations in a row with all
4375 threads stopped. */
4376 if (pass > 0)
4377 pass = -1;
4378
4379 event_ptid = wait_one (&ws);
c29705b7 4380 if (debug_infrun)
372316f1 4381 {
c29705b7
PW
4382 fprintf_unfiltered (gdb_stdlog,
4383 "infrun: stop_all_threads %s %s\n",
4384 target_waitstatus_to_string (&ws).c_str (),
4385 target_pid_to_str (event_ptid).c_str ());
372316f1 4386 }
372316f1 4387
c29705b7
PW
4388 if (ws.kind == TARGET_WAITKIND_NO_RESUMED
4389 || ws.kind == TARGET_WAITKIND_THREAD_EXITED
4390 || ws.kind == TARGET_WAITKIND_EXITED
4391 || ws.kind == TARGET_WAITKIND_SIGNALLED)
4392 {
4393 /* All resumed threads exited
4394 or one thread/process exited/signalled. */
372316f1
PA
4395 }
4396 else
4397 {
08036331 4398 thread_info *t = find_thread_ptid (event_ptid);
372316f1
PA
4399 if (t == NULL)
4400 t = add_thread (event_ptid);
4401
4402 t->stop_requested = 0;
4403 t->executing = 0;
4404 t->resumed = 0;
4405 t->control.may_range_step = 0;
4406
6efcd9a8
PA
4407 /* This may be the first time we see the inferior report
4408 a stop. */
08036331 4409 inferior *inf = find_inferior_ptid (event_ptid);
6efcd9a8
PA
4410 if (inf->needs_setup)
4411 {
4412 switch_to_thread_no_regs (t);
4413 setup_inferior (0);
4414 }
4415
372316f1
PA
4416 if (ws.kind == TARGET_WAITKIND_STOPPED
4417 && ws.value.sig == GDB_SIGNAL_0)
4418 {
4419 /* We caught the event that we intended to catch, so
4420 there's no event pending. */
4421 t->suspend.waitstatus.kind = TARGET_WAITKIND_IGNORE;
4422 t->suspend.waitstatus_pending_p = 0;
4423
b93d82bc 4424 if (displaced_step_finish (t, GDB_SIGNAL_0) < 0)
372316f1
PA
4425 {
4426 /* Add it back to the step-over queue. */
4427 if (debug_infrun)
4428 {
4429 fprintf_unfiltered (gdb_stdlog,
4430 "infrun: displaced-step of %s "
4431 "canceled: adding back to the "
4432 "step-over queue\n",
a068643d 4433 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4434 }
4435 t->control.trap_expected = 0;
66716e78 4436 global_thread_step_over_chain_enqueue (t);
372316f1
PA
4437 }
4438 }
4439 else
4440 {
4441 enum gdb_signal sig;
4442 struct regcache *regcache;
372316f1
PA
4443
4444 if (debug_infrun)
4445 {
23fdd69e 4446 std::string statstr = target_waitstatus_to_string (&ws);
372316f1 4447
372316f1
PA
4448 fprintf_unfiltered (gdb_stdlog,
4449 "infrun: target_wait %s, saving "
4450 "status for %d.%ld.%ld\n",
23fdd69e 4451 statstr.c_str (),
e99b03dc 4452 t->ptid.pid (),
e38504b3 4453 t->ptid.lwp (),
cc6bcb54 4454 t->ptid.tid ());
372316f1
PA
4455 }
4456
4457 /* Record for later. */
4458 save_waitstatus (t, &ws);
4459
4460 sig = (ws.kind == TARGET_WAITKIND_STOPPED
4461 ? ws.value.sig : GDB_SIGNAL_0);
4462
b93d82bc 4463 if (displaced_step_finish (t, sig) < 0)
372316f1
PA
4464 {
4465 /* Add it back to the step-over queue. */
4466 t->control.trap_expected = 0;
66716e78 4467 global_thread_step_over_chain_enqueue (t);
372316f1
PA
4468 }
4469
00431a78 4470 regcache = get_thread_regcache (t);
372316f1
PA
4471 t->suspend.stop_pc = regcache_read_pc (regcache);
4472
4473 if (debug_infrun)
4474 {
4475 fprintf_unfiltered (gdb_stdlog,
4476 "infrun: saved stop_pc=%s for %s "
4477 "(currently_stepping=%d)\n",
4478 paddress (target_gdbarch (),
4479 t->suspend.stop_pc),
a068643d 4480 target_pid_to_str (t->ptid).c_str (),
372316f1
PA
4481 currently_stepping (t));
4482 }
4483 }
4484 }
4485 }
4486 }
4487
372316f1
PA
4488 if (debug_infrun)
4489 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads done\n");
4490}
4491
f4836ba9
PA
4492/* Handle a TARGET_WAITKIND_NO_RESUMED event. */
4493
4494static int
4495handle_no_resumed (struct execution_control_state *ecs)
4496{
3b12939d 4497 if (target_can_async_p ())
f4836ba9 4498 {
3b12939d
PA
4499 struct ui *ui;
4500 int any_sync = 0;
f4836ba9 4501
3b12939d
PA
4502 ALL_UIS (ui)
4503 {
4504 if (ui->prompt_state == PROMPT_BLOCKED)
4505 {
4506 any_sync = 1;
4507 break;
4508 }
4509 }
4510 if (!any_sync)
4511 {
4512 /* There were no unwaited-for children left in the target, but,
4513 we're not synchronously waiting for events either. Just
4514 ignore. */
4515
4516 if (debug_infrun)
4517 fprintf_unfiltered (gdb_stdlog,
4518 "infrun: TARGET_WAITKIND_NO_RESUMED "
4519 "(ignoring: bg)\n");
4520 prepare_to_wait (ecs);
4521 return 1;
4522 }
f4836ba9
PA
4523 }
4524
4525 /* Otherwise, if we were running a synchronous execution command, we
4526 may need to cancel it and give the user back the terminal.
4527
4528 In non-stop mode, the target can't tell whether we've already
4529 consumed previous stop events, so it can end up sending us a
4530 no-resumed event like so:
4531
4532 #0 - thread 1 is left stopped
4533
4534 #1 - thread 2 is resumed and hits breakpoint
4535 -> TARGET_WAITKIND_STOPPED
4536
4537 #2 - thread 3 is resumed and exits
4538 this is the last resumed thread, so
4539 -> TARGET_WAITKIND_NO_RESUMED
4540
4541 #3 - gdb processes stop for thread 2 and decides to re-resume
4542 it.
4543
4544 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
4545 thread 2 is now resumed, so the event should be ignored.
4546
4547 IOW, if the stop for thread 2 doesn't end a foreground command,
4548 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
4549 event. But it could be that the event meant that thread 2 itself
4550 (or whatever other thread was the last resumed thread) exited.
4551
4552 To address this we refresh the thread list and check whether we
4553 have resumed threads _now_. In the example above, this removes
4554 thread 3 from the thread list. If thread 2 was re-resumed, we
4555 ignore this event. If we find no thread resumed, then we cancel
4556 the synchronous command show "no unwaited-for " to the user. */
4557 update_thread_list ();
4558
08036331 4559 for (thread_info *thread : all_non_exited_threads ())
f4836ba9
PA
4560 {
4561 if (thread->executing
4562 || thread->suspend.waitstatus_pending_p)
4563 {
4564 /* There were no unwaited-for children left in the target at
4565 some point, but there are now. Just ignore. */
4566 if (debug_infrun)
4567 fprintf_unfiltered (gdb_stdlog,
4568 "infrun: TARGET_WAITKIND_NO_RESUMED "
4569 "(ignoring: found resumed)\n");
4570 prepare_to_wait (ecs);
4571 return 1;
4572 }
4573 }
4574
4575 /* Note however that we may find no resumed thread because the whole
4576 process exited meanwhile (thus updating the thread list results
4577 in an empty thread list). In this case we know we'll be getting
4578 a process exit event shortly. */
08036331 4579 for (inferior *inf : all_inferiors ())
f4836ba9
PA
4580 {
4581 if (inf->pid == 0)
4582 continue;
4583
08036331 4584 thread_info *thread = any_live_thread_of_inferior (inf);
f4836ba9
PA
4585 if (thread == NULL)
4586 {
4587 if (debug_infrun)
4588 fprintf_unfiltered (gdb_stdlog,
4589 "infrun: TARGET_WAITKIND_NO_RESUMED "
4590 "(expect process exit)\n");
4591 prepare_to_wait (ecs);
4592 return 1;
4593 }
4594 }
4595
4596 /* Go ahead and report the event. */
4597 return 0;
4598}
4599
05ba8510
PA
4600/* Given an execution control state that has been freshly filled in by
4601 an event from the inferior, figure out what it means and take
4602 appropriate action.
4603
4604 The alternatives are:
4605
22bcd14b 4606 1) stop_waiting and return; to really stop and return to the
05ba8510
PA
4607 debugger.
4608
4609 2) keep_going and return; to wait for the next event (set
4610 ecs->event_thread->stepping_over_breakpoint to 1 to single step
4611 once). */
c906108c 4612
ec9499be 4613static void
595915c1 4614handle_inferior_event (struct execution_control_state *ecs)
cd0fc7c3 4615{
595915c1
TT
4616 /* Make sure that all temporary struct value objects that were
4617 created during the handling of the event get deleted at the
4618 end. */
4619 scoped_value_mark free_values;
4620
d6b48e9c
PA
4621 enum stop_kind stop_soon;
4622
c29705b7
PW
4623 if (debug_infrun)
4624 fprintf_unfiltered (gdb_stdlog, "infrun: handle_inferior_event %s\n",
4625 target_waitstatus_to_string (&ecs->ws).c_str ());
4626
28736962
PA
4627 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
4628 {
4629 /* We had an event in the inferior, but we are not interested in
4630 handling it at this level. The lower layers have already
4631 done what needs to be done, if anything.
4632
4633 One of the possible circumstances for this is when the
4634 inferior produces output for the console. The inferior has
4635 not stopped, and we are ignoring the event. Another possible
4636 circumstance is any event which the lower level knows will be
4637 reported multiple times without an intervening resume. */
28736962
PA
4638 prepare_to_wait (ecs);
4639 return;
4640 }
4641
65706a29
PA
4642 if (ecs->ws.kind == TARGET_WAITKIND_THREAD_EXITED)
4643 {
65706a29
PA
4644 prepare_to_wait (ecs);
4645 return;
4646 }
4647
0e5bf2a8 4648 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
f4836ba9
PA
4649 && handle_no_resumed (ecs))
4650 return;
0e5bf2a8 4651
1777feb0 4652 /* Cache the last pid/waitstatus. */
c32c64b7 4653 set_last_target_status (ecs->ptid, ecs->ws);
e02bc4cc 4654
ca005067 4655 /* Always clear state belonging to the previous time we stopped. */
aa7d318d 4656 stop_stack_dummy = STOP_NONE;
ca005067 4657
0e5bf2a8
PA
4658 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
4659 {
4660 /* No unwaited-for children left. IOW, all resumed children
4661 have exited. */
0e5bf2a8 4662 stop_print_frame = 0;
22bcd14b 4663 stop_waiting (ecs);
0e5bf2a8
PA
4664 return;
4665 }
4666
8c90c137 4667 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
64776a0b 4668 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
359f5fe6
PA
4669 {
4670 ecs->event_thread = find_thread_ptid (ecs->ptid);
4671 /* If it's a new thread, add it to the thread database. */
4672 if (ecs->event_thread == NULL)
4673 ecs->event_thread = add_thread (ecs->ptid);
c1e36e3e
PA
4674
4675 /* Disable range stepping. If the next step request could use a
4676 range, this will be end up re-enabled then. */
4677 ecs->event_thread->control.may_range_step = 0;
359f5fe6 4678 }
88ed393a
JK
4679
4680 /* Dependent on valid ECS->EVENT_THREAD. */
d8dd4d5f 4681 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
88ed393a
JK
4682
4683 /* Dependent on the current PC value modified by adjust_pc_after_break. */
4684 reinit_frame_cache ();
4685
28736962
PA
4686 breakpoint_retire_moribund ();
4687
2b009048
DJ
4688 /* First, distinguish signals caused by the debugger from signals
4689 that have to do with the program's own actions. Note that
4690 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
4691 on the operating system version. Here we detect when a SIGILL or
4692 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
4693 something similar for SIGSEGV, since a SIGSEGV will be generated
4694 when we're trying to execute a breakpoint instruction on a
4695 non-executable stack. This happens for call dummy breakpoints
4696 for architectures like SPARC that place call dummies on the
4697 stack. */
2b009048 4698 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
a493e3e2
PA
4699 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
4700 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
4701 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
2b009048 4702 {
00431a78 4703 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
de0a0249 4704
a01bda52 4705 if (breakpoint_inserted_here_p (regcache->aspace (),
de0a0249
UW
4706 regcache_read_pc (regcache)))
4707 {
4708 if (debug_infrun)
4709 fprintf_unfiltered (gdb_stdlog,
4710 "infrun: Treating signal as SIGTRAP\n");
a493e3e2 4711 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
de0a0249 4712 }
2b009048
DJ
4713 }
4714
28736962
PA
4715 /* Mark the non-executing threads accordingly. In all-stop, all
4716 threads of all processes are stopped when we get any event
e1316e60 4717 reported. In non-stop mode, only the event thread stops. */
372316f1
PA
4718 {
4719 ptid_t mark_ptid;
4720
fbea99ea 4721 if (!target_is_non_stop_p ())
372316f1
PA
4722 mark_ptid = minus_one_ptid;
4723 else if (ecs->ws.kind == TARGET_WAITKIND_SIGNALLED
4724 || ecs->ws.kind == TARGET_WAITKIND_EXITED)
4725 {
4726 /* If we're handling a process exit in non-stop mode, even
4727 though threads haven't been deleted yet, one would think
4728 that there is nothing to do, as threads of the dead process
4729 will be soon deleted, and threads of any other process were
4730 left running. However, on some targets, threads survive a
4731 process exit event. E.g., for the "checkpoint" command,
4732 when the current checkpoint/fork exits, linux-fork.c
4733 automatically switches to another fork from within
4734 target_mourn_inferior, by associating the same
4735 inferior/thread to another fork. We haven't mourned yet at
4736 this point, but we must mark any threads left in the
4737 process as not-executing so that finish_thread_state marks
4738 them stopped (in the user's perspective) if/when we present
4739 the stop to the user. */
e99b03dc 4740 mark_ptid = ptid_t (ecs->ptid.pid ());
372316f1
PA
4741 }
4742 else
4743 mark_ptid = ecs->ptid;
4744
4745 set_executing (mark_ptid, 0);
4746
4747 /* Likewise the resumed flag. */
4748 set_resumed (mark_ptid, 0);
4749 }
8c90c137 4750
488f131b
JB
4751 switch (ecs->ws.kind)
4752 {
4753 case TARGET_WAITKIND_LOADED:
00431a78 4754 context_switch (ecs);
b0f4b84b
DJ
4755 /* Ignore gracefully during startup of the inferior, as it might
4756 be the shell which has just loaded some objects, otherwise
4757 add the symbols for the newly loaded objects. Also ignore at
4758 the beginning of an attach or remote session; we will query
4759 the full list of libraries once the connection is
4760 established. */
4f5d7f63 4761
00431a78 4762 stop_soon = get_inferior_stop_soon (ecs);
c0236d92 4763 if (stop_soon == NO_STOP_QUIETLY)
488f131b 4764 {
edcc5120
TT
4765 struct regcache *regcache;
4766
00431a78 4767 regcache = get_thread_regcache (ecs->event_thread);
edcc5120
TT
4768
4769 handle_solib_event ();
4770
4771 ecs->event_thread->control.stop_bpstat
a01bda52 4772 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
4773 ecs->event_thread->suspend.stop_pc,
4774 ecs->event_thread, &ecs->ws);
ab04a2af 4775
c65d6b55
PA
4776 if (handle_stop_requested (ecs))
4777 return;
4778
ce12b012 4779 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
edcc5120
TT
4780 {
4781 /* A catchpoint triggered. */
94c57d6a
PA
4782 process_event_stop_test (ecs);
4783 return;
edcc5120 4784 }
488f131b 4785
b0f4b84b
DJ
4786 /* If requested, stop when the dynamic linker notifies
4787 gdb of events. This allows the user to get control
4788 and place breakpoints in initializer routines for
4789 dynamically loaded objects (among other things). */
a493e3e2 4790 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
b0f4b84b
DJ
4791 if (stop_on_solib_events)
4792 {
55409f9d
DJ
4793 /* Make sure we print "Stopped due to solib-event" in
4794 normal_stop. */
4795 stop_print_frame = 1;
4796
22bcd14b 4797 stop_waiting (ecs);
b0f4b84b
DJ
4798 return;
4799 }
488f131b 4800 }
b0f4b84b
DJ
4801
4802 /* If we are skipping through a shell, or through shared library
4803 loading that we aren't interested in, resume the program. If
5c09a2c5 4804 we're running the program normally, also resume. */
b0f4b84b
DJ
4805 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
4806 {
74960c60
VP
4807 /* Loading of shared libraries might have changed breakpoint
4808 addresses. Make sure new breakpoints are inserted. */
a25a5a45 4809 if (stop_soon == NO_STOP_QUIETLY)
74960c60 4810 insert_breakpoints ();
64ce06e4 4811 resume (GDB_SIGNAL_0);
b0f4b84b
DJ
4812 prepare_to_wait (ecs);
4813 return;
4814 }
4815
5c09a2c5
PA
4816 /* But stop if we're attaching or setting up a remote
4817 connection. */
4818 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4819 || stop_soon == STOP_QUIETLY_REMOTE)
4820 {
4821 if (debug_infrun)
4822 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
22bcd14b 4823 stop_waiting (ecs);
5c09a2c5
PA
4824 return;
4825 }
4826
4827 internal_error (__FILE__, __LINE__,
4828 _("unhandled stop_soon: %d"), (int) stop_soon);
c5aa993b 4829
488f131b 4830 case TARGET_WAITKIND_SPURIOUS:
c65d6b55
PA
4831 if (handle_stop_requested (ecs))
4832 return;
00431a78 4833 context_switch (ecs);
64ce06e4 4834 resume (GDB_SIGNAL_0);
488f131b
JB
4835 prepare_to_wait (ecs);
4836 return;
c5aa993b 4837
65706a29 4838 case TARGET_WAITKIND_THREAD_CREATED:
c65d6b55
PA
4839 if (handle_stop_requested (ecs))
4840 return;
00431a78 4841 context_switch (ecs);
65706a29
PA
4842 if (!switch_back_to_stepped_thread (ecs))
4843 keep_going (ecs);
4844 return;
4845
488f131b 4846 case TARGET_WAITKIND_EXITED:
940c3c06 4847 case TARGET_WAITKIND_SIGNALLED:
fb66883a 4848 inferior_ptid = ecs->ptid;
c9657e70 4849 set_current_inferior (find_inferior_ptid (ecs->ptid));
6c95b8df
PA
4850 set_current_program_space (current_inferior ()->pspace);
4851 handle_vfork_child_exec_or_exit (0);
223ffa71 4852 target_terminal::ours (); /* Must do this before mourn anyway. */
488f131b 4853
0c557179
SDJ
4854 /* Clearing any previous state of convenience variables. */
4855 clear_exit_convenience_vars ();
4856
940c3c06
PA
4857 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
4858 {
4859 /* Record the exit code in the convenience variable $_exitcode, so
4860 that the user can inspect this again later. */
4861 set_internalvar_integer (lookup_internalvar ("_exitcode"),
4862 (LONGEST) ecs->ws.value.integer);
4863
4864 /* Also record this in the inferior itself. */
4865 current_inferior ()->has_exit_code = 1;
4866 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
8cf64490 4867
98eb56a4
PA
4868 /* Support the --return-child-result option. */
4869 return_child_result_value = ecs->ws.value.integer;
4870
76727919 4871 gdb::observers::exited.notify (ecs->ws.value.integer);
940c3c06
PA
4872 }
4873 else
0c557179 4874 {
00431a78 4875 struct gdbarch *gdbarch = current_inferior ()->gdbarch;
0c557179
SDJ
4876
4877 if (gdbarch_gdb_signal_to_target_p (gdbarch))
4878 {
4879 /* Set the value of the internal variable $_exitsignal,
4880 which holds the signal uncaught by the inferior. */
4881 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
4882 gdbarch_gdb_signal_to_target (gdbarch,
4883 ecs->ws.value.sig));
4884 }
4885 else
4886 {
4887 /* We don't have access to the target's method used for
4888 converting between signal numbers (GDB's internal
4889 representation <-> target's representation).
4890 Therefore, we cannot do a good job at displaying this
4891 information to the user. It's better to just warn
4892 her about it (if infrun debugging is enabled), and
4893 give up. */
4894 if (debug_infrun)
4895 fprintf_filtered (gdb_stdlog, _("\
4896Cannot fill $_exitsignal with the correct signal number.\n"));
4897 }
4898
76727919 4899 gdb::observers::signal_exited.notify (ecs->ws.value.sig);
0c557179 4900 }
8cf64490 4901
488f131b 4902 gdb_flush (gdb_stdout);
bc1e6c81 4903 target_mourn_inferior (inferior_ptid);
488f131b 4904 stop_print_frame = 0;
22bcd14b 4905 stop_waiting (ecs);
488f131b 4906 return;
c5aa993b 4907
488f131b 4908 /* The following are the only cases in which we keep going;
1777feb0 4909 the above cases end in a continue or goto. */
488f131b 4910 case TARGET_WAITKIND_FORKED:
deb3b17b 4911 case TARGET_WAITKIND_VFORKED:
e2d96639
YQ
4912 /* Check whether the inferior is displaced stepping. */
4913 {
00431a78 4914 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
ac7936df 4915 struct gdbarch *gdbarch = regcache->arch ();
e2d96639
YQ
4916
4917 /* If checking displaced stepping is supported, and thread
4918 ecs->ptid is displaced stepping. */
b93d82bc 4919 if (displaced_step_in_progress (ecs->event_thread))
e2d96639
YQ
4920 {
4921 struct inferior *parent_inf
c9657e70 4922 = find_inferior_ptid (ecs->ptid);
e2d96639
YQ
4923 struct regcache *child_regcache;
4924 CORE_ADDR parent_pc;
4925
b8bfbca5
SM
4926 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
4927 {
b93d82bc
SM
4928 // struct displaced_step_inferior_state *displaced
4929 // = get_displaced_stepping_state (parent_inf);
b8bfbca5
SM
4930
4931 /* Restore scratch pad for child process. */
b93d82bc
SM
4932 //displaced_step_restore (displaced, ecs->ws.value.related_pid);
4933 // FIXME: we should restore all the buffers that were currently in use
b8bfbca5
SM
4934 }
4935
e2d96639
YQ
4936 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
4937 indicating that the displaced stepping of syscall instruction
4938 has been done. Perform cleanup for parent process here. Note
4939 that this operation also cleans up the child process for vfork,
4940 because their pages are shared. */
b93d82bc 4941 displaced_step_finish (ecs->event_thread, GDB_SIGNAL_TRAP);
c2829269
PA
4942 /* Start a new step-over in another thread if there's one
4943 that needs it. */
4944 start_step_over ();
e2d96639 4945
e2d96639
YQ
4946 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
4947 the child's PC is also within the scratchpad. Set the child's PC
4948 to the parent's PC value, which has already been fixed up.
4949 FIXME: we use the parent's aspace here, although we're touching
4950 the child, because the child hasn't been added to the inferior
4951 list yet at this point. */
4952
4953 child_regcache
4954 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
4955 gdbarch,
4956 parent_inf->aspace);
4957 /* Read PC value of parent process. */
4958 parent_pc = regcache_read_pc (regcache);
4959
4960 if (debug_displaced)
4961 fprintf_unfiltered (gdb_stdlog,
4962 "displaced: write child pc from %s to %s\n",
4963 paddress (gdbarch,
4964 regcache_read_pc (child_regcache)),
4965 paddress (gdbarch, parent_pc));
4966
4967 regcache_write_pc (child_regcache, parent_pc);
4968 }
4969 }
4970
00431a78 4971 context_switch (ecs);
5a2901d9 4972
b242c3c2
PA
4973 /* Immediately detach breakpoints from the child before there's
4974 any chance of letting the user delete breakpoints from the
4975 breakpoint lists. If we don't do this early, it's easy to
4976 leave left over traps in the child, vis: "break foo; catch
4977 fork; c; <fork>; del; c; <child calls foo>". We only follow
4978 the fork on the last `continue', and by that time the
4979 breakpoint at "foo" is long gone from the breakpoint table.
4980 If we vforked, then we don't need to unpatch here, since both
4981 parent and child are sharing the same memory pages; we'll
4982 need to unpatch at follow/detach time instead to be certain
4983 that new breakpoints added between catchpoint hit time and
4984 vfork follow are detached. */
4985 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
4986 {
b242c3c2
PA
4987 /* This won't actually modify the breakpoint list, but will
4988 physically remove the breakpoints from the child. */
d80ee84f 4989 detach_breakpoints (ecs->ws.value.related_pid);
b242c3c2
PA
4990 }
4991
34b7e8a6 4992 delete_just_stopped_threads_single_step_breakpoints ();
d03285ec 4993
e58b0e63
PA
4994 /* In case the event is caught by a catchpoint, remember that
4995 the event is to be followed at the next resume of the thread,
4996 and not immediately. */
4997 ecs->event_thread->pending_follow = ecs->ws;
4998
f2ffa92b
PA
4999 ecs->event_thread->suspend.stop_pc
5000 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
675bf4cb 5001
16c381f0 5002 ecs->event_thread->control.stop_bpstat
a01bda52 5003 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5004 ecs->event_thread->suspend.stop_pc,
5005 ecs->event_thread, &ecs->ws);
675bf4cb 5006
c65d6b55
PA
5007 if (handle_stop_requested (ecs))
5008 return;
5009
ce12b012
PA
5010 /* If no catchpoint triggered for this, then keep going. Note
5011 that we're interested in knowing the bpstat actually causes a
5012 stop, not just if it may explain the signal. Software
5013 watchpoints, for example, always appear in the bpstat. */
5014 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5015 {
e58b0e63 5016 int should_resume;
3e43a32a
MS
5017 int follow_child
5018 = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63 5019
a493e3e2 5020 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
e58b0e63
PA
5021
5022 should_resume = follow_fork ();
5023
00431a78
PA
5024 thread_info *parent = ecs->event_thread;
5025 thread_info *child = find_thread_ptid (ecs->ws.value.related_pid);
6c95b8df 5026
a2077e25
PA
5027 /* At this point, the parent is marked running, and the
5028 child is marked stopped. */
5029
5030 /* If not resuming the parent, mark it stopped. */
5031 if (follow_child && !detach_fork && !non_stop && !sched_multi)
00431a78 5032 parent->set_running (false);
a2077e25
PA
5033
5034 /* If resuming the child, mark it running. */
5035 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
00431a78 5036 child->set_running (true);
a2077e25 5037
6c95b8df 5038 /* In non-stop mode, also resume the other branch. */
fbea99ea
PA
5039 if (!detach_fork && (non_stop
5040 || (sched_multi && target_is_non_stop_p ())))
6c95b8df
PA
5041 {
5042 if (follow_child)
5043 switch_to_thread (parent);
5044 else
5045 switch_to_thread (child);
5046
5047 ecs->event_thread = inferior_thread ();
5048 ecs->ptid = inferior_ptid;
5049 keep_going (ecs);
5050 }
5051
5052 if (follow_child)
5053 switch_to_thread (child);
5054 else
5055 switch_to_thread (parent);
5056
e58b0e63
PA
5057 ecs->event_thread = inferior_thread ();
5058 ecs->ptid = inferior_ptid;
5059
5060 if (should_resume)
5061 keep_going (ecs);
5062 else
22bcd14b 5063 stop_waiting (ecs);
04e68871
DJ
5064 return;
5065 }
94c57d6a
PA
5066 process_event_stop_test (ecs);
5067 return;
488f131b 5068
6c95b8df
PA
5069 case TARGET_WAITKIND_VFORK_DONE:
5070 /* Done with the shared memory region. Re-insert breakpoints in
5071 the parent, and keep going. */
5072
00431a78 5073 context_switch (ecs);
6c95b8df
PA
5074
5075 current_inferior ()->waiting_for_vfork_done = 0;
56710373 5076 current_inferior ()->pspace->breakpoints_not_allowed = 0;
c65d6b55
PA
5077
5078 if (handle_stop_requested (ecs))
5079 return;
5080
6c95b8df
PA
5081 /* This also takes care of reinserting breakpoints in the
5082 previously locked inferior. */
5083 keep_going (ecs);
5084 return;
5085
488f131b 5086 case TARGET_WAITKIND_EXECD:
488f131b 5087
cbd2b4e3
PA
5088 /* Note we can't read registers yet (the stop_pc), because we
5089 don't yet know the inferior's post-exec architecture.
5090 'stop_pc' is explicitly read below instead. */
00431a78 5091 switch_to_thread_no_regs (ecs->event_thread);
5a2901d9 5092
6c95b8df
PA
5093 /* Do whatever is necessary to the parent branch of the vfork. */
5094 handle_vfork_child_exec_or_exit (1);
5095
795e548f
PA
5096 /* This causes the eventpoints and symbol table to be reset.
5097 Must do this now, before trying to determine whether to
5098 stop. */
71b43ef8 5099 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
795e548f 5100
17d8546e
DB
5101 /* In follow_exec we may have deleted the original thread and
5102 created a new one. Make sure that the event thread is the
5103 execd thread for that case (this is a nop otherwise). */
5104 ecs->event_thread = inferior_thread ();
5105
f2ffa92b
PA
5106 ecs->event_thread->suspend.stop_pc
5107 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
ecdc3a72 5108
16c381f0 5109 ecs->event_thread->control.stop_bpstat
a01bda52 5110 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5111 ecs->event_thread->suspend.stop_pc,
5112 ecs->event_thread, &ecs->ws);
795e548f 5113
71b43ef8
PA
5114 /* Note that this may be referenced from inside
5115 bpstat_stop_status above, through inferior_has_execd. */
5116 xfree (ecs->ws.value.execd_pathname);
5117 ecs->ws.value.execd_pathname = NULL;
5118
c65d6b55
PA
5119 if (handle_stop_requested (ecs))
5120 return;
5121
04e68871 5122 /* If no catchpoint triggered for this, then keep going. */
ce12b012 5123 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5124 {
a493e3e2 5125 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
04e68871
DJ
5126 keep_going (ecs);
5127 return;
5128 }
94c57d6a
PA
5129 process_event_stop_test (ecs);
5130 return;
488f131b 5131
b4dc5ffa
MK
5132 /* Be careful not to try to gather much state about a thread
5133 that's in a syscall. It's frequently a losing proposition. */
488f131b 5134 case TARGET_WAITKIND_SYSCALL_ENTRY:
1777feb0 5135 /* Getting the current syscall number. */
94c57d6a
PA
5136 if (handle_syscall_event (ecs) == 0)
5137 process_event_stop_test (ecs);
5138 return;
c906108c 5139
488f131b
JB
5140 /* Before examining the threads further, step this thread to
5141 get it entirely out of the syscall. (We get notice of the
5142 event when the thread is just on the verge of exiting a
5143 syscall. Stepping one instruction seems to get it back
b4dc5ffa 5144 into user code.) */
488f131b 5145 case TARGET_WAITKIND_SYSCALL_RETURN:
94c57d6a
PA
5146 if (handle_syscall_event (ecs) == 0)
5147 process_event_stop_test (ecs);
5148 return;
c906108c 5149
488f131b 5150 case TARGET_WAITKIND_STOPPED:
4f5d7f63
PA
5151 handle_signal_stop (ecs);
5152 return;
c906108c 5153
b2175913
MS
5154 case TARGET_WAITKIND_NO_HISTORY:
5155 /* Reverse execution: target ran out of history info. */
eab402df 5156
d1988021 5157 /* Switch to the stopped thread. */
00431a78 5158 context_switch (ecs);
d1988021
MM
5159 if (debug_infrun)
5160 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
5161
34b7e8a6 5162 delete_just_stopped_threads_single_step_breakpoints ();
f2ffa92b
PA
5163 ecs->event_thread->suspend.stop_pc
5164 = regcache_read_pc (get_thread_regcache (inferior_thread ()));
c65d6b55
PA
5165
5166 if (handle_stop_requested (ecs))
5167 return;
5168
76727919 5169 gdb::observers::no_history.notify ();
22bcd14b 5170 stop_waiting (ecs);
b2175913 5171 return;
488f131b 5172 }
4f5d7f63
PA
5173}
5174
372316f1
PA
5175/* Restart threads back to what they were trying to do back when we
5176 paused them for an in-line step-over. The EVENT_THREAD thread is
5177 ignored. */
4d9d9d04
PA
5178
5179static void
372316f1
PA
5180restart_threads (struct thread_info *event_thread)
5181{
372316f1
PA
5182 /* In case the instruction just stepped spawned a new thread. */
5183 update_thread_list ();
5184
08036331 5185 for (thread_info *tp : all_non_exited_threads ())
372316f1
PA
5186 {
5187 if (tp == event_thread)
5188 {
5189 if (debug_infrun)
5190 fprintf_unfiltered (gdb_stdlog,
5191 "infrun: restart threads: "
5192 "[%s] is event thread\n",
a068643d 5193 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5194 continue;
5195 }
5196
5197 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5198 {
5199 if (debug_infrun)
5200 fprintf_unfiltered (gdb_stdlog,
5201 "infrun: restart threads: "
5202 "[%s] not meant to be running\n",
a068643d 5203 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5204 continue;
5205 }
5206
5207 if (tp->resumed)
5208 {
5209 if (debug_infrun)
5210 fprintf_unfiltered (gdb_stdlog,
5211 "infrun: restart threads: [%s] resumed\n",
a068643d 5212 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5213 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
5214 continue;
5215 }
5216
5217 if (thread_is_in_step_over_chain (tp))
5218 {
5219 if (debug_infrun)
5220 fprintf_unfiltered (gdb_stdlog,
5221 "infrun: restart threads: "
5222 "[%s] needs step-over\n",
a068643d 5223 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5224 gdb_assert (!tp->resumed);
5225 continue;
5226 }
5227
5228
5229 if (tp->suspend.waitstatus_pending_p)
5230 {
5231 if (debug_infrun)
5232 fprintf_unfiltered (gdb_stdlog,
5233 "infrun: restart threads: "
5234 "[%s] has pending status\n",
a068643d 5235 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5236 tp->resumed = 1;
5237 continue;
5238 }
5239
c65d6b55
PA
5240 gdb_assert (!tp->stop_requested);
5241
372316f1
PA
5242 /* If some thread needs to start a step-over at this point, it
5243 should still be in the step-over queue, and thus skipped
5244 above. */
5245 if (thread_still_needs_step_over (tp))
5246 {
5247 internal_error (__FILE__, __LINE__,
5248 "thread [%s] needs a step-over, but not in "
5249 "step-over queue\n",
a068643d 5250 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5251 }
5252
5253 if (currently_stepping (tp))
5254 {
5255 if (debug_infrun)
5256 fprintf_unfiltered (gdb_stdlog,
5257 "infrun: restart threads: [%s] was stepping\n",
a068643d 5258 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5259 keep_going_stepped_thread (tp);
5260 }
5261 else
5262 {
5263 struct execution_control_state ecss;
5264 struct execution_control_state *ecs = &ecss;
5265
5266 if (debug_infrun)
5267 fprintf_unfiltered (gdb_stdlog,
5268 "infrun: restart threads: [%s] continuing\n",
a068643d 5269 target_pid_to_str (tp->ptid).c_str ());
372316f1 5270 reset_ecs (ecs, tp);
00431a78 5271 switch_to_thread (tp);
372316f1
PA
5272 keep_going_pass_signal (ecs);
5273 }
5274 }
5275}
5276
5277/* Callback for iterate_over_threads. Find a resumed thread that has
5278 a pending waitstatus. */
5279
5280static int
5281resumed_thread_with_pending_status (struct thread_info *tp,
5282 void *arg)
5283{
5284 return (tp->resumed
5285 && tp->suspend.waitstatus_pending_p);
5286}
5287
5288/* Called when we get an event that may finish an in-line or
5289 out-of-line (displaced stepping) step-over started previously.
5290 Return true if the event is processed and we should go back to the
5291 event loop; false if the caller should continue processing the
5292 event. */
5293
5294static int
4d9d9d04
PA
5295finish_step_over (struct execution_control_state *ecs)
5296{
372316f1
PA
5297 int had_step_over_info;
5298
b93d82bc
SM
5299 displaced_step_finish (ecs->event_thread,
5300 ecs->event_thread->suspend.stop_signal);
4d9d9d04 5301
372316f1
PA
5302 had_step_over_info = step_over_info_valid_p ();
5303
5304 if (had_step_over_info)
4d9d9d04
PA
5305 {
5306 /* If we're stepping over a breakpoint with all threads locked,
5307 then only the thread that was stepped should be reporting
5308 back an event. */
5309 gdb_assert (ecs->event_thread->control.trap_expected);
5310
c65d6b55 5311 clear_step_over_info ();
4d9d9d04
PA
5312 }
5313
fbea99ea 5314 if (!target_is_non_stop_p ())
372316f1 5315 return 0;
4d9d9d04
PA
5316
5317 /* Start a new step-over in another thread if there's one that
5318 needs it. */
5319 start_step_over ();
372316f1
PA
5320
5321 /* If we were stepping over a breakpoint before, and haven't started
5322 a new in-line step-over sequence, then restart all other threads
5323 (except the event thread). We can't do this in all-stop, as then
5324 e.g., we wouldn't be able to issue any other remote packet until
5325 these other threads stop. */
5326 if (had_step_over_info && !step_over_info_valid_p ())
5327 {
5328 struct thread_info *pending;
5329
5330 /* If we only have threads with pending statuses, the restart
5331 below won't restart any thread and so nothing re-inserts the
5332 breakpoint we just stepped over. But we need it inserted
5333 when we later process the pending events, otherwise if
5334 another thread has a pending event for this breakpoint too,
5335 we'd discard its event (because the breakpoint that
5336 originally caused the event was no longer inserted). */
00431a78 5337 context_switch (ecs);
372316f1
PA
5338 insert_breakpoints ();
5339
abeeff98
LM
5340 {
5341 scoped_restore save_defer_tc
5342 = make_scoped_defer_target_commit_resume ();
5343 restart_threads (ecs->event_thread);
5344 }
5345 target_commit_resume ();
372316f1
PA
5346
5347 /* If we have events pending, go through handle_inferior_event
5348 again, picking up a pending event at random. This avoids
5349 thread starvation. */
5350
5351 /* But not if we just stepped over a watchpoint in order to let
5352 the instruction execute so we can evaluate its expression.
5353 The set of watchpoints that triggered is recorded in the
5354 breakpoint objects themselves (see bp->watchpoint_triggered).
5355 If we processed another event first, that other event could
5356 clobber this info. */
5357 if (ecs->event_thread->stepping_over_watchpoint)
5358 return 0;
5359
5360 pending = iterate_over_threads (resumed_thread_with_pending_status,
5361 NULL);
5362 if (pending != NULL)
5363 {
5364 struct thread_info *tp = ecs->event_thread;
5365 struct regcache *regcache;
5366
5367 if (debug_infrun)
5368 {
5369 fprintf_unfiltered (gdb_stdlog,
5370 "infrun: found resumed threads with "
5371 "pending events, saving status\n");
5372 }
5373
5374 gdb_assert (pending != tp);
5375
5376 /* Record the event thread's event for later. */
5377 save_waitstatus (tp, &ecs->ws);
5378 /* This was cleared early, by handle_inferior_event. Set it
5379 so this pending event is considered by
5380 do_target_wait. */
5381 tp->resumed = 1;
5382
5383 gdb_assert (!tp->executing);
5384
00431a78 5385 regcache = get_thread_regcache (tp);
372316f1
PA
5386 tp->suspend.stop_pc = regcache_read_pc (regcache);
5387
5388 if (debug_infrun)
5389 {
5390 fprintf_unfiltered (gdb_stdlog,
5391 "infrun: saved stop_pc=%s for %s "
5392 "(currently_stepping=%d)\n",
5393 paddress (target_gdbarch (),
5394 tp->suspend.stop_pc),
a068643d 5395 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
5396 currently_stepping (tp));
5397 }
5398
5399 /* This in-line step-over finished; clear this so we won't
5400 start a new one. This is what handle_signal_stop would
5401 do, if we returned false. */
5402 tp->stepping_over_breakpoint = 0;
5403
5404 /* Wake up the event loop again. */
5405 mark_async_event_handler (infrun_async_inferior_event_token);
5406
5407 prepare_to_wait (ecs);
5408 return 1;
5409 }
5410 }
5411
5412 return 0;
4d9d9d04
PA
5413}
5414
4f5d7f63
PA
5415/* Come here when the program has stopped with a signal. */
5416
5417static void
5418handle_signal_stop (struct execution_control_state *ecs)
5419{
5420 struct frame_info *frame;
5421 struct gdbarch *gdbarch;
5422 int stopped_by_watchpoint;
5423 enum stop_kind stop_soon;
5424 int random_signal;
c906108c 5425
f0407826
DE
5426 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
5427
c65d6b55
PA
5428 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
5429
f0407826
DE
5430 /* Do we need to clean up the state of a thread that has
5431 completed a displaced single-step? (Doing so usually affects
5432 the PC, so do it here, before we set stop_pc.) */
372316f1
PA
5433 if (finish_step_over (ecs))
5434 return;
f0407826
DE
5435
5436 /* If we either finished a single-step or hit a breakpoint, but
5437 the user wanted this thread to be stopped, pretend we got a
5438 SIG0 (generic unsignaled stop). */
5439 if (ecs->event_thread->stop_requested
5440 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
5441 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
237fc4c9 5442
f2ffa92b
PA
5443 ecs->event_thread->suspend.stop_pc
5444 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
488f131b 5445
527159b7 5446 if (debug_infrun)
237fc4c9 5447 {
00431a78 5448 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
b926417a 5449 struct gdbarch *reg_gdbarch = regcache->arch ();
2989a365 5450 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
7f82dfc7
JK
5451
5452 inferior_ptid = ecs->ptid;
5af949e3
UW
5453
5454 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
b926417a 5455 paddress (reg_gdbarch,
f2ffa92b 5456 ecs->event_thread->suspend.stop_pc));
d92524f1 5457 if (target_stopped_by_watchpoint ())
237fc4c9
PA
5458 {
5459 CORE_ADDR addr;
abbb1732 5460
237fc4c9
PA
5461 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
5462
8b88a78e 5463 if (target_stopped_data_address (current_top_target (), &addr))
237fc4c9 5464 fprintf_unfiltered (gdb_stdlog,
5af949e3 5465 "infrun: stopped data address = %s\n",
b926417a 5466 paddress (reg_gdbarch, addr));
237fc4c9
PA
5467 else
5468 fprintf_unfiltered (gdb_stdlog,
5469 "infrun: (no data address available)\n");
5470 }
5471 }
527159b7 5472
36fa8042
PA
5473 /* This is originated from start_remote(), start_inferior() and
5474 shared libraries hook functions. */
00431a78 5475 stop_soon = get_inferior_stop_soon (ecs);
36fa8042
PA
5476 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
5477 {
00431a78 5478 context_switch (ecs);
36fa8042
PA
5479 if (debug_infrun)
5480 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
5481 stop_print_frame = 1;
22bcd14b 5482 stop_waiting (ecs);
36fa8042
PA
5483 return;
5484 }
5485
36fa8042
PA
5486 /* This originates from attach_command(). We need to overwrite
5487 the stop_signal here, because some kernels don't ignore a
5488 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
5489 See more comments in inferior.h. On the other hand, if we
5490 get a non-SIGSTOP, report it to the user - assume the backend
5491 will handle the SIGSTOP if it should show up later.
5492
5493 Also consider that the attach is complete when we see a
5494 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
5495 target extended-remote report it instead of a SIGSTOP
5496 (e.g. gdbserver). We already rely on SIGTRAP being our
5497 signal, so this is no exception.
5498
5499 Also consider that the attach is complete when we see a
5500 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
5501 the target to stop all threads of the inferior, in case the
5502 low level attach operation doesn't stop them implicitly. If
5503 they weren't stopped implicitly, then the stub will report a
5504 GDB_SIGNAL_0, meaning: stopped for no particular reason
5505 other than GDB's request. */
5506 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5507 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
5508 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5509 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
5510 {
5511 stop_print_frame = 1;
22bcd14b 5512 stop_waiting (ecs);
36fa8042
PA
5513 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5514 return;
5515 }
5516
488f131b 5517 /* See if something interesting happened to the non-current thread. If
b40c7d58 5518 so, then switch to that thread. */
d7e15655 5519 if (ecs->ptid != inferior_ptid)
488f131b 5520 {
527159b7 5521 if (debug_infrun)
8a9de0e4 5522 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
527159b7 5523
00431a78 5524 context_switch (ecs);
c5aa993b 5525
9a4105ab 5526 if (deprecated_context_hook)
00431a78 5527 deprecated_context_hook (ecs->event_thread->global_num);
488f131b 5528 }
c906108c 5529
568d6575
UW
5530 /* At this point, get hold of the now-current thread's frame. */
5531 frame = get_current_frame ();
5532 gdbarch = get_frame_arch (frame);
5533
2adfaa28 5534 /* Pull the single step breakpoints out of the target. */
af48d08f 5535 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
488f131b 5536 {
af48d08f 5537 struct regcache *regcache;
af48d08f 5538 CORE_ADDR pc;
2adfaa28 5539
00431a78 5540 regcache = get_thread_regcache (ecs->event_thread);
8b86c959
YQ
5541 const address_space *aspace = regcache->aspace ();
5542
af48d08f 5543 pc = regcache_read_pc (regcache);
34b7e8a6 5544
af48d08f
PA
5545 /* However, before doing so, if this single-step breakpoint was
5546 actually for another thread, set this thread up for moving
5547 past it. */
5548 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
5549 aspace, pc))
5550 {
5551 if (single_step_breakpoint_inserted_here_p (aspace, pc))
2adfaa28
PA
5552 {
5553 if (debug_infrun)
5554 {
5555 fprintf_unfiltered (gdb_stdlog,
af48d08f 5556 "infrun: [%s] hit another thread's "
34b7e8a6 5557 "single-step breakpoint\n",
a068643d 5558 target_pid_to_str (ecs->ptid).c_str ());
2adfaa28 5559 }
af48d08f
PA
5560 ecs->hit_singlestep_breakpoint = 1;
5561 }
5562 }
5563 else
5564 {
5565 if (debug_infrun)
5566 {
5567 fprintf_unfiltered (gdb_stdlog,
5568 "infrun: [%s] hit its "
5569 "single-step breakpoint\n",
a068643d 5570 target_pid_to_str (ecs->ptid).c_str ());
2adfaa28
PA
5571 }
5572 }
488f131b 5573 }
af48d08f 5574 delete_just_stopped_threads_single_step_breakpoints ();
c906108c 5575
963f9c80
PA
5576 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5577 && ecs->event_thread->control.trap_expected
5578 && ecs->event_thread->stepping_over_watchpoint)
d983da9c
DJ
5579 stopped_by_watchpoint = 0;
5580 else
5581 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
5582
5583 /* If necessary, step over this watchpoint. We'll be back to display
5584 it in a moment. */
5585 if (stopped_by_watchpoint
d92524f1 5586 && (target_have_steppable_watchpoint
568d6575 5587 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
488f131b 5588 {
488f131b
JB
5589 /* At this point, we are stopped at an instruction which has
5590 attempted to write to a piece of memory under control of
5591 a watchpoint. The instruction hasn't actually executed
5592 yet. If we were to evaluate the watchpoint expression
5593 now, we would get the old value, and therefore no change
5594 would seem to have occurred.
5595
5596 In order to make watchpoints work `right', we really need
5597 to complete the memory write, and then evaluate the
d983da9c
DJ
5598 watchpoint expression. We do this by single-stepping the
5599 target.
5600
7f89fd65 5601 It may not be necessary to disable the watchpoint to step over
d983da9c
DJ
5602 it. For example, the PA can (with some kernel cooperation)
5603 single step over a watchpoint without disabling the watchpoint.
5604
5605 It is far more common to need to disable a watchpoint to step
5606 the inferior over it. If we have non-steppable watchpoints,
5607 we must disable the current watchpoint; it's simplest to
963f9c80
PA
5608 disable all watchpoints.
5609
5610 Any breakpoint at PC must also be stepped over -- if there's
5611 one, it will have already triggered before the watchpoint
5612 triggered, and we either already reported it to the user, or
5613 it didn't cause a stop and we called keep_going. In either
5614 case, if there was a breakpoint at PC, we must be trying to
5615 step past it. */
5616 ecs->event_thread->stepping_over_watchpoint = 1;
5617 keep_going (ecs);
488f131b
JB
5618 return;
5619 }
5620
4e1c45ea 5621 ecs->event_thread->stepping_over_breakpoint = 0;
963f9c80 5622 ecs->event_thread->stepping_over_watchpoint = 0;
16c381f0
JK
5623 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
5624 ecs->event_thread->control.stop_step = 0;
488f131b 5625 stop_print_frame = 1;
488f131b 5626 stopped_by_random_signal = 0;
ddfe970e 5627 bpstat stop_chain = NULL;
488f131b 5628
edb3359d
DJ
5629 /* Hide inlined functions starting here, unless we just performed stepi or
5630 nexti. After stepi and nexti, always show the innermost frame (not any
5631 inline function call sites). */
16c381f0 5632 if (ecs->event_thread->control.step_range_end != 1)
0574c78f 5633 {
00431a78
PA
5634 const address_space *aspace
5635 = get_thread_regcache (ecs->event_thread)->aspace ();
0574c78f
GB
5636
5637 /* skip_inline_frames is expensive, so we avoid it if we can
5638 determine that the address is one where functions cannot have
5639 been inlined. This improves performance with inferiors that
5640 load a lot of shared libraries, because the solib event
5641 breakpoint is defined as the address of a function (i.e. not
5642 inline). Note that we have to check the previous PC as well
5643 as the current one to catch cases when we have just
5644 single-stepped off a breakpoint prior to reinstating it.
5645 Note that we're assuming that the code we single-step to is
5646 not inline, but that's not definitive: there's nothing
5647 preventing the event breakpoint function from containing
5648 inlined code, and the single-step ending up there. If the
5649 user had set a breakpoint on that inlined code, the missing
5650 skip_inline_frames call would break things. Fortunately
5651 that's an extremely unlikely scenario. */
f2ffa92b
PA
5652 if (!pc_at_non_inline_function (aspace,
5653 ecs->event_thread->suspend.stop_pc,
5654 &ecs->ws)
a210c238
MR
5655 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5656 && ecs->event_thread->control.trap_expected
5657 && pc_at_non_inline_function (aspace,
5658 ecs->event_thread->prev_pc,
09ac7c10 5659 &ecs->ws)))
1c5a993e 5660 {
f2ffa92b
PA
5661 stop_chain = build_bpstat_chain (aspace,
5662 ecs->event_thread->suspend.stop_pc,
5663 &ecs->ws);
00431a78 5664 skip_inline_frames (ecs->event_thread, stop_chain);
1c5a993e
MR
5665
5666 /* Re-fetch current thread's frame in case that invalidated
5667 the frame cache. */
5668 frame = get_current_frame ();
5669 gdbarch = get_frame_arch (frame);
5670 }
0574c78f 5671 }
edb3359d 5672
a493e3e2 5673 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
16c381f0 5674 && ecs->event_thread->control.trap_expected
568d6575 5675 && gdbarch_single_step_through_delay_p (gdbarch)
4e1c45ea 5676 && currently_stepping (ecs->event_thread))
3352ef37 5677 {
b50d7442 5678 /* We're trying to step off a breakpoint. Turns out that we're
3352ef37 5679 also on an instruction that needs to be stepped multiple
1777feb0 5680 times before it's been fully executing. E.g., architectures
3352ef37
AC
5681 with a delay slot. It needs to be stepped twice, once for
5682 the instruction and once for the delay slot. */
5683 int step_through_delay
568d6575 5684 = gdbarch_single_step_through_delay (gdbarch, frame);
abbb1732 5685
527159b7 5686 if (debug_infrun && step_through_delay)
8a9de0e4 5687 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
16c381f0
JK
5688 if (ecs->event_thread->control.step_range_end == 0
5689 && step_through_delay)
3352ef37
AC
5690 {
5691 /* The user issued a continue when stopped at a breakpoint.
5692 Set up for another trap and get out of here. */
4e1c45ea 5693 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
5694 keep_going (ecs);
5695 return;
5696 }
5697 else if (step_through_delay)
5698 {
5699 /* The user issued a step when stopped at a breakpoint.
5700 Maybe we should stop, maybe we should not - the delay
5701 slot *might* correspond to a line of source. In any
ca67fcb8
VP
5702 case, don't decide that here, just set
5703 ecs->stepping_over_breakpoint, making sure we
5704 single-step again before breakpoints are re-inserted. */
4e1c45ea 5705 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
5706 }
5707 }
5708
ab04a2af
TT
5709 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
5710 handles this event. */
5711 ecs->event_thread->control.stop_bpstat
a01bda52 5712 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5713 ecs->event_thread->suspend.stop_pc,
5714 ecs->event_thread, &ecs->ws, stop_chain);
db82e815 5715
ab04a2af
TT
5716 /* Following in case break condition called a
5717 function. */
5718 stop_print_frame = 1;
73dd234f 5719
ab04a2af
TT
5720 /* This is where we handle "moribund" watchpoints. Unlike
5721 software breakpoints traps, hardware watchpoint traps are
5722 always distinguishable from random traps. If no high-level
5723 watchpoint is associated with the reported stop data address
5724 anymore, then the bpstat does not explain the signal ---
5725 simply make sure to ignore it if `stopped_by_watchpoint' is
5726 set. */
5727
5728 if (debug_infrun
5729 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
47591c29 5730 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
427cd150 5731 GDB_SIGNAL_TRAP)
ab04a2af
TT
5732 && stopped_by_watchpoint)
5733 fprintf_unfiltered (gdb_stdlog,
5734 "infrun: no user watchpoint explains "
5735 "watchpoint SIGTRAP, ignoring\n");
73dd234f 5736
bac7d97b 5737 /* NOTE: cagney/2003-03-29: These checks for a random signal
ab04a2af
TT
5738 at one stage in the past included checks for an inferior
5739 function call's call dummy's return breakpoint. The original
5740 comment, that went with the test, read:
03cebad2 5741
ab04a2af
TT
5742 ``End of a stack dummy. Some systems (e.g. Sony news) give
5743 another signal besides SIGTRAP, so check here as well as
5744 above.''
73dd234f 5745
ab04a2af
TT
5746 If someone ever tries to get call dummys on a
5747 non-executable stack to work (where the target would stop
5748 with something like a SIGSEGV), then those tests might need
5749 to be re-instated. Given, however, that the tests were only
5750 enabled when momentary breakpoints were not being used, I
5751 suspect that it won't be the case.
488f131b 5752
ab04a2af
TT
5753 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
5754 be necessary for call dummies on a non-executable stack on
5755 SPARC. */
488f131b 5756
bac7d97b 5757 /* See if the breakpoints module can explain the signal. */
47591c29
PA
5758 random_signal
5759 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
5760 ecs->event_thread->suspend.stop_signal);
bac7d97b 5761
1cf4d951
PA
5762 /* Maybe this was a trap for a software breakpoint that has since
5763 been removed. */
5764 if (random_signal && target_stopped_by_sw_breakpoint ())
5765 {
f2ffa92b
PA
5766 if (program_breakpoint_here_p (gdbarch,
5767 ecs->event_thread->suspend.stop_pc))
1cf4d951
PA
5768 {
5769 struct regcache *regcache;
5770 int decr_pc;
5771
5772 /* Re-adjust PC to what the program would see if GDB was not
5773 debugging it. */
00431a78 5774 regcache = get_thread_regcache (ecs->event_thread);
527a273a 5775 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
1cf4d951
PA
5776 if (decr_pc != 0)
5777 {
07036511
TT
5778 gdb::optional<scoped_restore_tmpl<int>>
5779 restore_operation_disable;
1cf4d951
PA
5780
5781 if (record_full_is_used ())
07036511
TT
5782 restore_operation_disable.emplace
5783 (record_full_gdb_operation_disable_set ());
1cf4d951 5784
f2ffa92b
PA
5785 regcache_write_pc (regcache,
5786 ecs->event_thread->suspend.stop_pc + decr_pc);
1cf4d951
PA
5787 }
5788 }
5789 else
5790 {
5791 /* A delayed software breakpoint event. Ignore the trap. */
5792 if (debug_infrun)
5793 fprintf_unfiltered (gdb_stdlog,
5794 "infrun: delayed software breakpoint "
5795 "trap, ignoring\n");
5796 random_signal = 0;
5797 }
5798 }
5799
5800 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
5801 has since been removed. */
5802 if (random_signal && target_stopped_by_hw_breakpoint ())
5803 {
5804 /* A delayed hardware breakpoint event. Ignore the trap. */
5805 if (debug_infrun)
5806 fprintf_unfiltered (gdb_stdlog,
5807 "infrun: delayed hardware breakpoint/watchpoint "
5808 "trap, ignoring\n");
5809 random_signal = 0;
5810 }
5811
bac7d97b
PA
5812 /* If not, perhaps stepping/nexting can. */
5813 if (random_signal)
5814 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5815 && currently_stepping (ecs->event_thread));
ab04a2af 5816
2adfaa28
PA
5817 /* Perhaps the thread hit a single-step breakpoint of _another_
5818 thread. Single-step breakpoints are transparent to the
5819 breakpoints module. */
5820 if (random_signal)
5821 random_signal = !ecs->hit_singlestep_breakpoint;
5822
bac7d97b
PA
5823 /* No? Perhaps we got a moribund watchpoint. */
5824 if (random_signal)
5825 random_signal = !stopped_by_watchpoint;
ab04a2af 5826
c65d6b55
PA
5827 /* Always stop if the user explicitly requested this thread to
5828 remain stopped. */
5829 if (ecs->event_thread->stop_requested)
5830 {
5831 random_signal = 1;
5832 if (debug_infrun)
5833 fprintf_unfiltered (gdb_stdlog, "infrun: user-requested stop\n");
5834 }
5835
488f131b
JB
5836 /* For the program's own signals, act according to
5837 the signal handling tables. */
5838
ce12b012 5839 if (random_signal)
488f131b
JB
5840 {
5841 /* Signal not for debugging purposes. */
c9657e70 5842 struct inferior *inf = find_inferior_ptid (ecs->ptid);
c9737c08 5843 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
488f131b 5844
527159b7 5845 if (debug_infrun)
c9737c08
PA
5846 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
5847 gdb_signal_to_symbol_string (stop_signal));
527159b7 5848
488f131b
JB
5849 stopped_by_random_signal = 1;
5850
252fbfc8
PA
5851 /* Always stop on signals if we're either just gaining control
5852 of the program, or the user explicitly requested this thread
5853 to remain stopped. */
d6b48e9c 5854 if (stop_soon != NO_STOP_QUIETLY
252fbfc8 5855 || ecs->event_thread->stop_requested
24291992 5856 || (!inf->detaching
16c381f0 5857 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
488f131b 5858 {
22bcd14b 5859 stop_waiting (ecs);
488f131b
JB
5860 return;
5861 }
b57bacec
PA
5862
5863 /* Notify observers the signal has "handle print" set. Note we
5864 returned early above if stopping; normal_stop handles the
5865 printing in that case. */
5866 if (signal_print[ecs->event_thread->suspend.stop_signal])
5867 {
5868 /* The signal table tells us to print about this signal. */
223ffa71 5869 target_terminal::ours_for_output ();
76727919 5870 gdb::observers::signal_received.notify (ecs->event_thread->suspend.stop_signal);
223ffa71 5871 target_terminal::inferior ();
b57bacec 5872 }
488f131b
JB
5873
5874 /* Clear the signal if it should not be passed. */
16c381f0 5875 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
a493e3e2 5876 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
488f131b 5877
f2ffa92b 5878 if (ecs->event_thread->prev_pc == ecs->event_thread->suspend.stop_pc
16c381f0 5879 && ecs->event_thread->control.trap_expected
8358c15c 5880 && ecs->event_thread->control.step_resume_breakpoint == NULL)
68f53502
AC
5881 {
5882 /* We were just starting a new sequence, attempting to
5883 single-step off of a breakpoint and expecting a SIGTRAP.
237fc4c9 5884 Instead this signal arrives. This signal will take us out
68f53502
AC
5885 of the stepping range so GDB needs to remember to, when
5886 the signal handler returns, resume stepping off that
5887 breakpoint. */
5888 /* To simplify things, "continue" is forced to use the same
5889 code paths as single-step - set a breakpoint at the
5890 signal return address and then, once hit, step off that
5891 breakpoint. */
237fc4c9
PA
5892 if (debug_infrun)
5893 fprintf_unfiltered (gdb_stdlog,
5894 "infrun: signal arrived while stepping over "
5895 "breakpoint\n");
d3169d93 5896
2c03e5be 5897 insert_hp_step_resume_breakpoint_at_frame (frame);
4e1c45ea 5898 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
5899 /* Reset trap_expected to ensure breakpoints are re-inserted. */
5900 ecs->event_thread->control.trap_expected = 0;
d137e6dc
PA
5901
5902 /* If we were nexting/stepping some other thread, switch to
5903 it, so that we don't continue it, losing control. */
5904 if (!switch_back_to_stepped_thread (ecs))
5905 keep_going (ecs);
9d799f85 5906 return;
68f53502 5907 }
9d799f85 5908
e5f8a7cc 5909 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
f2ffa92b
PA
5910 && (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
5911 ecs->event_thread)
e5f8a7cc 5912 || ecs->event_thread->control.step_range_end == 1)
edb3359d 5913 && frame_id_eq (get_stack_frame_id (frame),
16c381f0 5914 ecs->event_thread->control.step_stack_frame_id)
8358c15c 5915 && ecs->event_thread->control.step_resume_breakpoint == NULL)
d303a6c7
AC
5916 {
5917 /* The inferior is about to take a signal that will take it
5918 out of the single step range. Set a breakpoint at the
5919 current PC (which is presumably where the signal handler
5920 will eventually return) and then allow the inferior to
5921 run free.
5922
5923 Note that this is only needed for a signal delivered
5924 while in the single-step range. Nested signals aren't a
5925 problem as they eventually all return. */
237fc4c9
PA
5926 if (debug_infrun)
5927 fprintf_unfiltered (gdb_stdlog,
5928 "infrun: signal may take us out of "
5929 "single-step range\n");
5930
372316f1 5931 clear_step_over_info ();
2c03e5be 5932 insert_hp_step_resume_breakpoint_at_frame (frame);
e5f8a7cc 5933 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
5934 /* Reset trap_expected to ensure breakpoints are re-inserted. */
5935 ecs->event_thread->control.trap_expected = 0;
9d799f85
AC
5936 keep_going (ecs);
5937 return;
d303a6c7 5938 }
9d799f85 5939
85102364 5940 /* Note: step_resume_breakpoint may be non-NULL. This occurs
9d799f85
AC
5941 when either there's a nested signal, or when there's a
5942 pending signal enabled just as the signal handler returns
5943 (leaving the inferior at the step-resume-breakpoint without
5944 actually executing it). Either way continue until the
5945 breakpoint is really hit. */
c447ac0b
PA
5946
5947 if (!switch_back_to_stepped_thread (ecs))
5948 {
5949 if (debug_infrun)
5950 fprintf_unfiltered (gdb_stdlog,
5951 "infrun: random signal, keep going\n");
5952
5953 keep_going (ecs);
5954 }
5955 return;
488f131b 5956 }
94c57d6a
PA
5957
5958 process_event_stop_test (ecs);
5959}
5960
5961/* Come here when we've got some debug event / signal we can explain
5962 (IOW, not a random signal), and test whether it should cause a
5963 stop, or whether we should resume the inferior (transparently).
5964 E.g., could be a breakpoint whose condition evaluates false; we
5965 could be still stepping within the line; etc. */
5966
5967static void
5968process_event_stop_test (struct execution_control_state *ecs)
5969{
5970 struct symtab_and_line stop_pc_sal;
5971 struct frame_info *frame;
5972 struct gdbarch *gdbarch;
cdaa5b73
PA
5973 CORE_ADDR jmp_buf_pc;
5974 struct bpstat_what what;
94c57d6a 5975
cdaa5b73 5976 /* Handle cases caused by hitting a breakpoint. */
611c83ae 5977
cdaa5b73
PA
5978 frame = get_current_frame ();
5979 gdbarch = get_frame_arch (frame);
fcf3daef 5980
cdaa5b73 5981 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
611c83ae 5982
cdaa5b73
PA
5983 if (what.call_dummy)
5984 {
5985 stop_stack_dummy = what.call_dummy;
5986 }
186c406b 5987
243a9253
PA
5988 /* A few breakpoint types have callbacks associated (e.g.,
5989 bp_jit_event). Run them now. */
5990 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
5991
cdaa5b73
PA
5992 /* If we hit an internal event that triggers symbol changes, the
5993 current frame will be invalidated within bpstat_what (e.g., if we
5994 hit an internal solib event). Re-fetch it. */
5995 frame = get_current_frame ();
5996 gdbarch = get_frame_arch (frame);
e2e4d78b 5997
cdaa5b73
PA
5998 switch (what.main_action)
5999 {
6000 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
6001 /* If we hit the breakpoint at longjmp while stepping, we
6002 install a momentary breakpoint at the target of the
6003 jmp_buf. */
186c406b 6004
cdaa5b73
PA
6005 if (debug_infrun)
6006 fprintf_unfiltered (gdb_stdlog,
6007 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
186c406b 6008
cdaa5b73 6009 ecs->event_thread->stepping_over_breakpoint = 1;
611c83ae 6010
cdaa5b73
PA
6011 if (what.is_longjmp)
6012 {
6013 struct value *arg_value;
6014
6015 /* If we set the longjmp breakpoint via a SystemTap probe,
6016 then use it to extract the arguments. The destination PC
6017 is the third argument to the probe. */
6018 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6019 if (arg_value)
8fa0c4f8
AA
6020 {
6021 jmp_buf_pc = value_as_address (arg_value);
6022 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6023 }
cdaa5b73
PA
6024 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6025 || !gdbarch_get_longjmp_target (gdbarch,
6026 frame, &jmp_buf_pc))
e2e4d78b 6027 {
cdaa5b73
PA
6028 if (debug_infrun)
6029 fprintf_unfiltered (gdb_stdlog,
6030 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
6031 "(!gdbarch_get_longjmp_target)\n");
6032 keep_going (ecs);
6033 return;
e2e4d78b 6034 }
e2e4d78b 6035
cdaa5b73
PA
6036 /* Insert a breakpoint at resume address. */
6037 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6038 }
6039 else
6040 check_exception_resume (ecs, frame);
6041 keep_going (ecs);
6042 return;
e81a37f7 6043
cdaa5b73
PA
6044 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6045 {
6046 struct frame_info *init_frame;
e81a37f7 6047
cdaa5b73 6048 /* There are several cases to consider.
c906108c 6049
cdaa5b73
PA
6050 1. The initiating frame no longer exists. In this case we
6051 must stop, because the exception or longjmp has gone too
6052 far.
2c03e5be 6053
cdaa5b73
PA
6054 2. The initiating frame exists, and is the same as the
6055 current frame. We stop, because the exception or longjmp
6056 has been caught.
2c03e5be 6057
cdaa5b73
PA
6058 3. The initiating frame exists and is different from the
6059 current frame. This means the exception or longjmp has
6060 been caught beneath the initiating frame, so keep going.
c906108c 6061
cdaa5b73
PA
6062 4. longjmp breakpoint has been placed just to protect
6063 against stale dummy frames and user is not interested in
6064 stopping around longjmps. */
c5aa993b 6065
cdaa5b73
PA
6066 if (debug_infrun)
6067 fprintf_unfiltered (gdb_stdlog,
6068 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
c5aa993b 6069
cdaa5b73
PA
6070 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6071 != NULL);
6072 delete_exception_resume_breakpoint (ecs->event_thread);
c5aa993b 6073
cdaa5b73
PA
6074 if (what.is_longjmp)
6075 {
b67a2c6f 6076 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
c5aa993b 6077
cdaa5b73 6078 if (!frame_id_p (ecs->event_thread->initiating_frame))
e5ef252a 6079 {
cdaa5b73
PA
6080 /* Case 4. */
6081 keep_going (ecs);
6082 return;
e5ef252a 6083 }
cdaa5b73 6084 }
c5aa993b 6085
cdaa5b73 6086 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
527159b7 6087
cdaa5b73
PA
6088 if (init_frame)
6089 {
6090 struct frame_id current_id
6091 = get_frame_id (get_current_frame ());
6092 if (frame_id_eq (current_id,
6093 ecs->event_thread->initiating_frame))
6094 {
6095 /* Case 2. Fall through. */
6096 }
6097 else
6098 {
6099 /* Case 3. */
6100 keep_going (ecs);
6101 return;
6102 }
68f53502 6103 }
488f131b 6104
cdaa5b73
PA
6105 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6106 exists. */
6107 delete_step_resume_breakpoint (ecs->event_thread);
e5ef252a 6108
bdc36728 6109 end_stepping_range (ecs);
cdaa5b73
PA
6110 }
6111 return;
e5ef252a 6112
cdaa5b73
PA
6113 case BPSTAT_WHAT_SINGLE:
6114 if (debug_infrun)
6115 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
6116 ecs->event_thread->stepping_over_breakpoint = 1;
6117 /* Still need to check other stuff, at least the case where we
6118 are stepping and step out of the right range. */
6119 break;
e5ef252a 6120
cdaa5b73
PA
6121 case BPSTAT_WHAT_STEP_RESUME:
6122 if (debug_infrun)
6123 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
e5ef252a 6124
cdaa5b73
PA
6125 delete_step_resume_breakpoint (ecs->event_thread);
6126 if (ecs->event_thread->control.proceed_to_finish
6127 && execution_direction == EXEC_REVERSE)
6128 {
6129 struct thread_info *tp = ecs->event_thread;
6130
6131 /* We are finishing a function in reverse, and just hit the
6132 step-resume breakpoint at the start address of the
6133 function, and we're almost there -- just need to back up
6134 by one more single-step, which should take us back to the
6135 function call. */
6136 tp->control.step_range_start = tp->control.step_range_end = 1;
6137 keep_going (ecs);
e5ef252a 6138 return;
cdaa5b73
PA
6139 }
6140 fill_in_stop_func (gdbarch, ecs);
f2ffa92b 6141 if (ecs->event_thread->suspend.stop_pc == ecs->stop_func_start
cdaa5b73
PA
6142 && execution_direction == EXEC_REVERSE)
6143 {
6144 /* We are stepping over a function call in reverse, and just
6145 hit the step-resume breakpoint at the start address of
6146 the function. Go back to single-stepping, which should
6147 take us back to the function call. */
6148 ecs->event_thread->stepping_over_breakpoint = 1;
6149 keep_going (ecs);
6150 return;
6151 }
6152 break;
e5ef252a 6153
cdaa5b73
PA
6154 case BPSTAT_WHAT_STOP_NOISY:
6155 if (debug_infrun)
6156 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
6157 stop_print_frame = 1;
e5ef252a 6158
99619bea
PA
6159 /* Assume the thread stopped for a breapoint. We'll still check
6160 whether a/the breakpoint is there when the thread is next
6161 resumed. */
6162 ecs->event_thread->stepping_over_breakpoint = 1;
e5ef252a 6163
22bcd14b 6164 stop_waiting (ecs);
cdaa5b73 6165 return;
e5ef252a 6166
cdaa5b73
PA
6167 case BPSTAT_WHAT_STOP_SILENT:
6168 if (debug_infrun)
6169 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
6170 stop_print_frame = 0;
e5ef252a 6171
99619bea
PA
6172 /* Assume the thread stopped for a breapoint. We'll still check
6173 whether a/the breakpoint is there when the thread is next
6174 resumed. */
6175 ecs->event_thread->stepping_over_breakpoint = 1;
22bcd14b 6176 stop_waiting (ecs);
cdaa5b73
PA
6177 return;
6178
6179 case BPSTAT_WHAT_HP_STEP_RESUME:
6180 if (debug_infrun)
6181 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
6182
6183 delete_step_resume_breakpoint (ecs->event_thread);
6184 if (ecs->event_thread->step_after_step_resume_breakpoint)
6185 {
6186 /* Back when the step-resume breakpoint was inserted, we
6187 were trying to single-step off a breakpoint. Go back to
6188 doing that. */
6189 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6190 ecs->event_thread->stepping_over_breakpoint = 1;
6191 keep_going (ecs);
6192 return;
e5ef252a 6193 }
cdaa5b73
PA
6194 break;
6195
6196 case BPSTAT_WHAT_KEEP_CHECKING:
6197 break;
e5ef252a 6198 }
c906108c 6199
af48d08f
PA
6200 /* If we stepped a permanent breakpoint and we had a high priority
6201 step-resume breakpoint for the address we stepped, but we didn't
6202 hit it, then we must have stepped into the signal handler. The
6203 step-resume was only necessary to catch the case of _not_
6204 stepping into the handler, so delete it, and fall through to
6205 checking whether the step finished. */
6206 if (ecs->event_thread->stepped_breakpoint)
6207 {
6208 struct breakpoint *sr_bp
6209 = ecs->event_thread->control.step_resume_breakpoint;
6210
8d707a12
PA
6211 if (sr_bp != NULL
6212 && sr_bp->loc->permanent
af48d08f
PA
6213 && sr_bp->type == bp_hp_step_resume
6214 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6215 {
6216 if (debug_infrun)
6217 fprintf_unfiltered (gdb_stdlog,
6218 "infrun: stepped permanent breakpoint, stopped in "
6219 "handler\n");
6220 delete_step_resume_breakpoint (ecs->event_thread);
6221 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6222 }
6223 }
6224
cdaa5b73
PA
6225 /* We come here if we hit a breakpoint but should not stop for it.
6226 Possibly we also were stepping and should stop for that. So fall
6227 through and test for stepping. But, if not stepping, do not
6228 stop. */
c906108c 6229
a7212384
UW
6230 /* In all-stop mode, if we're currently stepping but have stopped in
6231 some other thread, we need to switch back to the stepped thread. */
c447ac0b
PA
6232 if (switch_back_to_stepped_thread (ecs))
6233 return;
776f04fa 6234
8358c15c 6235 if (ecs->event_thread->control.step_resume_breakpoint)
488f131b 6236 {
527159b7 6237 if (debug_infrun)
d3169d93
DJ
6238 fprintf_unfiltered (gdb_stdlog,
6239 "infrun: step-resume breakpoint is inserted\n");
527159b7 6240
488f131b
JB
6241 /* Having a step-resume breakpoint overrides anything
6242 else having to do with stepping commands until
6243 that breakpoint is reached. */
488f131b
JB
6244 keep_going (ecs);
6245 return;
6246 }
c5aa993b 6247
16c381f0 6248 if (ecs->event_thread->control.step_range_end == 0)
488f131b 6249 {
527159b7 6250 if (debug_infrun)
8a9de0e4 6251 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
488f131b 6252 /* Likewise if we aren't even stepping. */
488f131b
JB
6253 keep_going (ecs);
6254 return;
6255 }
c5aa993b 6256
4b7703ad
JB
6257 /* Re-fetch current thread's frame in case the code above caused
6258 the frame cache to be re-initialized, making our FRAME variable
6259 a dangling pointer. */
6260 frame = get_current_frame ();
628fe4e4 6261 gdbarch = get_frame_arch (frame);
7e324e48 6262 fill_in_stop_func (gdbarch, ecs);
4b7703ad 6263
488f131b 6264 /* If stepping through a line, keep going if still within it.
c906108c 6265
488f131b
JB
6266 Note that step_range_end is the address of the first instruction
6267 beyond the step range, and NOT the address of the last instruction
31410e84
MS
6268 within it!
6269
6270 Note also that during reverse execution, we may be stepping
6271 through a function epilogue and therefore must detect when
6272 the current-frame changes in the middle of a line. */
6273
f2ffa92b
PA
6274 if (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6275 ecs->event_thread)
31410e84 6276 && (execution_direction != EXEC_REVERSE
388a8562 6277 || frame_id_eq (get_frame_id (frame),
16c381f0 6278 ecs->event_thread->control.step_frame_id)))
488f131b 6279 {
527159b7 6280 if (debug_infrun)
5af949e3
UW
6281 fprintf_unfiltered
6282 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
16c381f0
JK
6283 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6284 paddress (gdbarch, ecs->event_thread->control.step_range_end));
b2175913 6285
c1e36e3e
PA
6286 /* Tentatively re-enable range stepping; `resume' disables it if
6287 necessary (e.g., if we're stepping over a breakpoint or we
6288 have software watchpoints). */
6289 ecs->event_thread->control.may_range_step = 1;
6290
b2175913
MS
6291 /* When stepping backward, stop at beginning of line range
6292 (unless it's the function entry point, in which case
6293 keep going back to the call point). */
f2ffa92b 6294 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
16c381f0 6295 if (stop_pc == ecs->event_thread->control.step_range_start
b2175913
MS
6296 && stop_pc != ecs->stop_func_start
6297 && execution_direction == EXEC_REVERSE)
bdc36728 6298 end_stepping_range (ecs);
b2175913
MS
6299 else
6300 keep_going (ecs);
6301
488f131b
JB
6302 return;
6303 }
c5aa993b 6304
488f131b 6305 /* We stepped out of the stepping range. */
c906108c 6306
488f131b 6307 /* If we are stepping at the source level and entered the runtime
388a8562
MS
6308 loader dynamic symbol resolution code...
6309
6310 EXEC_FORWARD: we keep on single stepping until we exit the run
6311 time loader code and reach the callee's address.
6312
6313 EXEC_REVERSE: we've already executed the callee (backward), and
6314 the runtime loader code is handled just like any other
6315 undebuggable function call. Now we need only keep stepping
6316 backward through the trampoline code, and that's handled further
6317 down, so there is nothing for us to do here. */
6318
6319 if (execution_direction != EXEC_REVERSE
16c381f0 6320 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
f2ffa92b 6321 && in_solib_dynsym_resolve_code (ecs->event_thread->suspend.stop_pc))
488f131b 6322 {
4c8c40e6 6323 CORE_ADDR pc_after_resolver =
f2ffa92b
PA
6324 gdbarch_skip_solib_resolver (gdbarch,
6325 ecs->event_thread->suspend.stop_pc);
c906108c 6326
527159b7 6327 if (debug_infrun)
3e43a32a
MS
6328 fprintf_unfiltered (gdb_stdlog,
6329 "infrun: stepped into dynsym resolve code\n");
527159b7 6330
488f131b
JB
6331 if (pc_after_resolver)
6332 {
6333 /* Set up a step-resume breakpoint at the address
6334 indicated by SKIP_SOLIB_RESOLVER. */
51abb421 6335 symtab_and_line sr_sal;
488f131b 6336 sr_sal.pc = pc_after_resolver;
6c95b8df 6337 sr_sal.pspace = get_frame_program_space (frame);
488f131b 6338
a6d9a66e
UW
6339 insert_step_resume_breakpoint_at_sal (gdbarch,
6340 sr_sal, null_frame_id);
c5aa993b 6341 }
c906108c 6342
488f131b
JB
6343 keep_going (ecs);
6344 return;
6345 }
c906108c 6346
1d509aa6
MM
6347 /* Step through an indirect branch thunk. */
6348 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
f2ffa92b
PA
6349 && gdbarch_in_indirect_branch_thunk (gdbarch,
6350 ecs->event_thread->suspend.stop_pc))
1d509aa6
MM
6351 {
6352 if (debug_infrun)
6353 fprintf_unfiltered (gdb_stdlog,
6354 "infrun: stepped into indirect branch thunk\n");
6355 keep_going (ecs);
6356 return;
6357 }
6358
16c381f0
JK
6359 if (ecs->event_thread->control.step_range_end != 1
6360 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6361 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
568d6575 6362 && get_frame_type (frame) == SIGTRAMP_FRAME)
488f131b 6363 {
527159b7 6364 if (debug_infrun)
3e43a32a
MS
6365 fprintf_unfiltered (gdb_stdlog,
6366 "infrun: stepped into signal trampoline\n");
42edda50 6367 /* The inferior, while doing a "step" or "next", has ended up in
8fb3e588
AC
6368 a signal trampoline (either by a signal being delivered or by
6369 the signal handler returning). Just single-step until the
6370 inferior leaves the trampoline (either by calling the handler
6371 or returning). */
488f131b
JB
6372 keep_going (ecs);
6373 return;
6374 }
c906108c 6375
14132e89
MR
6376 /* If we're in the return path from a shared library trampoline,
6377 we want to proceed through the trampoline when stepping. */
6378 /* macro/2012-04-25: This needs to come before the subroutine
6379 call check below as on some targets return trampolines look
6380 like subroutine calls (MIPS16 return thunks). */
6381 if (gdbarch_in_solib_return_trampoline (gdbarch,
f2ffa92b
PA
6382 ecs->event_thread->suspend.stop_pc,
6383 ecs->stop_func_name)
14132e89
MR
6384 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
6385 {
6386 /* Determine where this trampoline returns. */
f2ffa92b
PA
6387 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6388 CORE_ADDR real_stop_pc
6389 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
14132e89
MR
6390
6391 if (debug_infrun)
6392 fprintf_unfiltered (gdb_stdlog,
6393 "infrun: stepped into solib return tramp\n");
6394
6395 /* Only proceed through if we know where it's going. */
6396 if (real_stop_pc)
6397 {
6398 /* And put the step-breakpoint there and go until there. */
51abb421 6399 symtab_and_line sr_sal;
14132e89
MR
6400 sr_sal.pc = real_stop_pc;
6401 sr_sal.section = find_pc_overlay (sr_sal.pc);
6402 sr_sal.pspace = get_frame_program_space (frame);
6403
6404 /* Do not specify what the fp should be when we stop since
6405 on some machines the prologue is where the new fp value
6406 is established. */
6407 insert_step_resume_breakpoint_at_sal (gdbarch,
6408 sr_sal, null_frame_id);
6409
6410 /* Restart without fiddling with the step ranges or
6411 other state. */
6412 keep_going (ecs);
6413 return;
6414 }
6415 }
6416
c17eaafe
DJ
6417 /* Check for subroutine calls. The check for the current frame
6418 equalling the step ID is not necessary - the check of the
6419 previous frame's ID is sufficient - but it is a common case and
6420 cheaper than checking the previous frame's ID.
14e60db5
DJ
6421
6422 NOTE: frame_id_eq will never report two invalid frame IDs as
6423 being equal, so to get into this block, both the current and
6424 previous frame must have valid frame IDs. */
005ca36a
JB
6425 /* The outer_frame_id check is a heuristic to detect stepping
6426 through startup code. If we step over an instruction which
6427 sets the stack pointer from an invalid value to a valid value,
6428 we may detect that as a subroutine call from the mythical
6429 "outermost" function. This could be fixed by marking
6430 outermost frames as !stack_p,code_p,special_p. Then the
6431 initial outermost frame, before sp was valid, would
ce6cca6d 6432 have code_addr == &_start. See the comment in frame_id_eq
005ca36a 6433 for more. */
edb3359d 6434 if (!frame_id_eq (get_stack_frame_id (frame),
16c381f0 6435 ecs->event_thread->control.step_stack_frame_id)
005ca36a 6436 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
16c381f0
JK
6437 ecs->event_thread->control.step_stack_frame_id)
6438 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
005ca36a 6439 outer_frame_id)
885eeb5b 6440 || (ecs->event_thread->control.step_start_function
f2ffa92b 6441 != find_pc_function (ecs->event_thread->suspend.stop_pc)))))
488f131b 6442 {
f2ffa92b 6443 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
95918acb 6444 CORE_ADDR real_stop_pc;
8fb3e588 6445
527159b7 6446 if (debug_infrun)
8a9de0e4 6447 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
527159b7 6448
b7a084be 6449 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
95918acb
AC
6450 {
6451 /* I presume that step_over_calls is only 0 when we're
6452 supposed to be stepping at the assembly language level
6453 ("stepi"). Just stop. */
388a8562 6454 /* And this works the same backward as frontward. MVS */
bdc36728 6455 end_stepping_range (ecs);
95918acb
AC
6456 return;
6457 }
8fb3e588 6458
388a8562
MS
6459 /* Reverse stepping through solib trampolines. */
6460
6461 if (execution_direction == EXEC_REVERSE
16c381f0 6462 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
388a8562
MS
6463 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6464 || (ecs->stop_func_start == 0
6465 && in_solib_dynsym_resolve_code (stop_pc))))
6466 {
6467 /* Any solib trampoline code can be handled in reverse
6468 by simply continuing to single-step. We have already
6469 executed the solib function (backwards), and a few
6470 steps will take us back through the trampoline to the
6471 caller. */
6472 keep_going (ecs);
6473 return;
6474 }
6475
16c381f0 6476 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
8567c30f 6477 {
b2175913
MS
6478 /* We're doing a "next".
6479
6480 Normal (forward) execution: set a breakpoint at the
6481 callee's return address (the address at which the caller
6482 will resume).
6483
6484 Reverse (backward) execution. set the step-resume
6485 breakpoint at the start of the function that we just
6486 stepped into (backwards), and continue to there. When we
6130d0b7 6487 get there, we'll need to single-step back to the caller. */
b2175913
MS
6488
6489 if (execution_direction == EXEC_REVERSE)
6490 {
acf9414f
JK
6491 /* If we're already at the start of the function, we've either
6492 just stepped backward into a single instruction function,
6493 or stepped back out of a signal handler to the first instruction
6494 of the function. Just keep going, which will single-step back
6495 to the caller. */
58c48e72 6496 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
acf9414f 6497 {
acf9414f 6498 /* Normal function call return (static or dynamic). */
51abb421 6499 symtab_and_line sr_sal;
acf9414f
JK
6500 sr_sal.pc = ecs->stop_func_start;
6501 sr_sal.pspace = get_frame_program_space (frame);
6502 insert_step_resume_breakpoint_at_sal (gdbarch,
6503 sr_sal, null_frame_id);
6504 }
b2175913
MS
6505 }
6506 else
568d6575 6507 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6508
8567c30f
AC
6509 keep_going (ecs);
6510 return;
6511 }
a53c66de 6512
95918acb 6513 /* If we are in a function call trampoline (a stub between the
8fb3e588
AC
6514 calling routine and the real function), locate the real
6515 function. That's what tells us (a) whether we want to step
6516 into it at all, and (b) what prologue we want to run to the
6517 end of, if we do step into it. */
568d6575 6518 real_stop_pc = skip_language_trampoline (frame, stop_pc);
95918acb 6519 if (real_stop_pc == 0)
568d6575 6520 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
95918acb
AC
6521 if (real_stop_pc != 0)
6522 ecs->stop_func_start = real_stop_pc;
8fb3e588 6523
db5f024e 6524 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
1b2bfbb9 6525 {
51abb421 6526 symtab_and_line sr_sal;
1b2bfbb9 6527 sr_sal.pc = ecs->stop_func_start;
6c95b8df 6528 sr_sal.pspace = get_frame_program_space (frame);
1b2bfbb9 6529
a6d9a66e
UW
6530 insert_step_resume_breakpoint_at_sal (gdbarch,
6531 sr_sal, null_frame_id);
8fb3e588
AC
6532 keep_going (ecs);
6533 return;
1b2bfbb9
RC
6534 }
6535
95918acb 6536 /* If we have line number information for the function we are
1bfeeb0f
JL
6537 thinking of stepping into and the function isn't on the skip
6538 list, step into it.
95918acb 6539
8fb3e588
AC
6540 If there are several symtabs at that PC (e.g. with include
6541 files), just want to know whether *any* of them have line
6542 numbers. find_pc_line handles this. */
95918acb
AC
6543 {
6544 struct symtab_and_line tmp_sal;
8fb3e588 6545
95918acb 6546 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
2b914b52 6547 if (tmp_sal.line != 0
85817405 6548 && !function_name_is_marked_for_skip (ecs->stop_func_name,
de7985c3 6549 tmp_sal))
95918acb 6550 {
b2175913 6551 if (execution_direction == EXEC_REVERSE)
568d6575 6552 handle_step_into_function_backward (gdbarch, ecs);
b2175913 6553 else
568d6575 6554 handle_step_into_function (gdbarch, ecs);
95918acb
AC
6555 return;
6556 }
6557 }
6558
6559 /* If we have no line number and the step-stop-if-no-debug is
8fb3e588
AC
6560 set, we stop the step so that the user has a chance to switch
6561 in assembly mode. */
16c381f0 6562 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
078130d0 6563 && step_stop_if_no_debug)
95918acb 6564 {
bdc36728 6565 end_stepping_range (ecs);
95918acb
AC
6566 return;
6567 }
6568
b2175913
MS
6569 if (execution_direction == EXEC_REVERSE)
6570 {
acf9414f
JK
6571 /* If we're already at the start of the function, we've either just
6572 stepped backward into a single instruction function without line
6573 number info, or stepped back out of a signal handler to the first
6574 instruction of the function without line number info. Just keep
6575 going, which will single-step back to the caller. */
6576 if (ecs->stop_func_start != stop_pc)
6577 {
6578 /* Set a breakpoint at callee's start address.
6579 From there we can step once and be back in the caller. */
51abb421 6580 symtab_and_line sr_sal;
acf9414f
JK
6581 sr_sal.pc = ecs->stop_func_start;
6582 sr_sal.pspace = get_frame_program_space (frame);
6583 insert_step_resume_breakpoint_at_sal (gdbarch,
6584 sr_sal, null_frame_id);
6585 }
b2175913
MS
6586 }
6587 else
6588 /* Set a breakpoint at callee's return address (the address
6589 at which the caller will resume). */
568d6575 6590 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6591
95918acb 6592 keep_going (ecs);
488f131b 6593 return;
488f131b 6594 }
c906108c 6595
fdd654f3
MS
6596 /* Reverse stepping through solib trampolines. */
6597
6598 if (execution_direction == EXEC_REVERSE
16c381f0 6599 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
fdd654f3 6600 {
f2ffa92b
PA
6601 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6602
fdd654f3
MS
6603 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6604 || (ecs->stop_func_start == 0
6605 && in_solib_dynsym_resolve_code (stop_pc)))
6606 {
6607 /* Any solib trampoline code can be handled in reverse
6608 by simply continuing to single-step. We have already
6609 executed the solib function (backwards), and a few
6610 steps will take us back through the trampoline to the
6611 caller. */
6612 keep_going (ecs);
6613 return;
6614 }
6615 else if (in_solib_dynsym_resolve_code (stop_pc))
6616 {
6617 /* Stepped backward into the solib dynsym resolver.
6618 Set a breakpoint at its start and continue, then
6619 one more step will take us out. */
51abb421 6620 symtab_and_line sr_sal;
fdd654f3 6621 sr_sal.pc = ecs->stop_func_start;
9d1807c3 6622 sr_sal.pspace = get_frame_program_space (frame);
fdd654f3
MS
6623 insert_step_resume_breakpoint_at_sal (gdbarch,
6624 sr_sal, null_frame_id);
6625 keep_going (ecs);
6626 return;
6627 }
6628 }
6629
f2ffa92b 6630 stop_pc_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
7ed0fe66 6631
1b2bfbb9
RC
6632 /* NOTE: tausq/2004-05-24: This if block used to be done before all
6633 the trampoline processing logic, however, there are some trampolines
6634 that have no names, so we should do trampoline handling first. */
16c381f0 6635 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7ed0fe66 6636 && ecs->stop_func_name == NULL
2afb61aa 6637 && stop_pc_sal.line == 0)
1b2bfbb9 6638 {
527159b7 6639 if (debug_infrun)
3e43a32a
MS
6640 fprintf_unfiltered (gdb_stdlog,
6641 "infrun: stepped into undebuggable function\n");
527159b7 6642
1b2bfbb9 6643 /* The inferior just stepped into, or returned to, an
7ed0fe66
DJ
6644 undebuggable function (where there is no debugging information
6645 and no line number corresponding to the address where the
1b2bfbb9
RC
6646 inferior stopped). Since we want to skip this kind of code,
6647 we keep going until the inferior returns from this
14e60db5
DJ
6648 function - unless the user has asked us not to (via
6649 set step-mode) or we no longer know how to get back
6650 to the call site. */
6651 if (step_stop_if_no_debug
c7ce8faa 6652 || !frame_id_p (frame_unwind_caller_id (frame)))
1b2bfbb9
RC
6653 {
6654 /* If we have no line number and the step-stop-if-no-debug
6655 is set, we stop the step so that the user has a chance to
6656 switch in assembly mode. */
bdc36728 6657 end_stepping_range (ecs);
1b2bfbb9
RC
6658 return;
6659 }
6660 else
6661 {
6662 /* Set a breakpoint at callee's return address (the address
6663 at which the caller will resume). */
568d6575 6664 insert_step_resume_breakpoint_at_caller (frame);
1b2bfbb9
RC
6665 keep_going (ecs);
6666 return;
6667 }
6668 }
6669
16c381f0 6670 if (ecs->event_thread->control.step_range_end == 1)
1b2bfbb9
RC
6671 {
6672 /* It is stepi or nexti. We always want to stop stepping after
6673 one instruction. */
527159b7 6674 if (debug_infrun)
8a9de0e4 6675 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
bdc36728 6676 end_stepping_range (ecs);
1b2bfbb9
RC
6677 return;
6678 }
6679
2afb61aa 6680 if (stop_pc_sal.line == 0)
488f131b
JB
6681 {
6682 /* We have no line number information. That means to stop
6683 stepping (does this always happen right after one instruction,
6684 when we do "s" in a function with no line numbers,
6685 or can this happen as a result of a return or longjmp?). */
527159b7 6686 if (debug_infrun)
8a9de0e4 6687 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
bdc36728 6688 end_stepping_range (ecs);
488f131b
JB
6689 return;
6690 }
c906108c 6691
edb3359d
DJ
6692 /* Look for "calls" to inlined functions, part one. If the inline
6693 frame machinery detected some skipped call sites, we have entered
6694 a new inline function. */
6695
6696 if (frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 6697 ecs->event_thread->control.step_frame_id)
00431a78 6698 && inline_skipped_frames (ecs->event_thread))
edb3359d 6699 {
edb3359d
DJ
6700 if (debug_infrun)
6701 fprintf_unfiltered (gdb_stdlog,
6702 "infrun: stepped into inlined function\n");
6703
51abb421 6704 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
edb3359d 6705
16c381f0 6706 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
edb3359d
DJ
6707 {
6708 /* For "step", we're going to stop. But if the call site
6709 for this inlined function is on the same source line as
6710 we were previously stepping, go down into the function
6711 first. Otherwise stop at the call site. */
6712
6713 if (call_sal.line == ecs->event_thread->current_line
6714 && call_sal.symtab == ecs->event_thread->current_symtab)
00431a78 6715 step_into_inline_frame (ecs->event_thread);
edb3359d 6716
bdc36728 6717 end_stepping_range (ecs);
edb3359d
DJ
6718 return;
6719 }
6720 else
6721 {
6722 /* For "next", we should stop at the call site if it is on a
6723 different source line. Otherwise continue through the
6724 inlined function. */
6725 if (call_sal.line == ecs->event_thread->current_line
6726 && call_sal.symtab == ecs->event_thread->current_symtab)
6727 keep_going (ecs);
6728 else
bdc36728 6729 end_stepping_range (ecs);
edb3359d
DJ
6730 return;
6731 }
6732 }
6733
6734 /* Look for "calls" to inlined functions, part two. If we are still
6735 in the same real function we were stepping through, but we have
6736 to go further up to find the exact frame ID, we are stepping
6737 through a more inlined call beyond its call site. */
6738
6739 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
6740 && !frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 6741 ecs->event_thread->control.step_frame_id)
edb3359d 6742 && stepped_in_from (get_current_frame (),
16c381f0 6743 ecs->event_thread->control.step_frame_id))
edb3359d
DJ
6744 {
6745 if (debug_infrun)
6746 fprintf_unfiltered (gdb_stdlog,
6747 "infrun: stepping through inlined function\n");
6748
16c381f0 6749 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
edb3359d
DJ
6750 keep_going (ecs);
6751 else
bdc36728 6752 end_stepping_range (ecs);
edb3359d
DJ
6753 return;
6754 }
6755
f2ffa92b 6756 if ((ecs->event_thread->suspend.stop_pc == stop_pc_sal.pc)
4e1c45ea
PA
6757 && (ecs->event_thread->current_line != stop_pc_sal.line
6758 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
488f131b
JB
6759 {
6760 /* We are at the start of a different line. So stop. Note that
6761 we don't stop if we step into the middle of a different line.
6762 That is said to make things like for (;;) statements work
6763 better. */
527159b7 6764 if (debug_infrun)
3e43a32a
MS
6765 fprintf_unfiltered (gdb_stdlog,
6766 "infrun: stepped to a different line\n");
bdc36728 6767 end_stepping_range (ecs);
488f131b
JB
6768 return;
6769 }
c906108c 6770
488f131b 6771 /* We aren't done stepping.
c906108c 6772
488f131b
JB
6773 Optimize by setting the stepping range to the line.
6774 (We might not be in the original line, but if we entered a
6775 new line in mid-statement, we continue stepping. This makes
6776 things like for(;;) statements work better.) */
c906108c 6777
16c381f0
JK
6778 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
6779 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
c1e36e3e 6780 ecs->event_thread->control.may_range_step = 1;
edb3359d 6781 set_step_info (frame, stop_pc_sal);
488f131b 6782
527159b7 6783 if (debug_infrun)
8a9de0e4 6784 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
488f131b 6785 keep_going (ecs);
104c1213
JM
6786}
6787
c447ac0b
PA
6788/* In all-stop mode, if we're currently stepping but have stopped in
6789 some other thread, we may need to switch back to the stepped
6790 thread. Returns true we set the inferior running, false if we left
6791 it stopped (and the event needs further processing). */
6792
6793static int
6794switch_back_to_stepped_thread (struct execution_control_state *ecs)
6795{
fbea99ea 6796 if (!target_is_non_stop_p ())
c447ac0b 6797 {
99619bea
PA
6798 struct thread_info *stepping_thread;
6799
6800 /* If any thread is blocked on some internal breakpoint, and we
6801 simply need to step over that breakpoint to get it going
6802 again, do that first. */
6803
6804 /* However, if we see an event for the stepping thread, then we
6805 know all other threads have been moved past their breakpoints
6806 already. Let the caller check whether the step is finished,
6807 etc., before deciding to move it past a breakpoint. */
6808 if (ecs->event_thread->control.step_range_end != 0)
6809 return 0;
6810
6811 /* Check if the current thread is blocked on an incomplete
6812 step-over, interrupted by a random signal. */
6813 if (ecs->event_thread->control.trap_expected
6814 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
c447ac0b 6815 {
99619bea
PA
6816 if (debug_infrun)
6817 {
6818 fprintf_unfiltered (gdb_stdlog,
6819 "infrun: need to finish step-over of [%s]\n",
a068643d 6820 target_pid_to_str (ecs->event_thread->ptid).c_str ());
99619bea
PA
6821 }
6822 keep_going (ecs);
6823 return 1;
6824 }
2adfaa28 6825
99619bea
PA
6826 /* Check if the current thread is blocked by a single-step
6827 breakpoint of another thread. */
6828 if (ecs->hit_singlestep_breakpoint)
6829 {
6830 if (debug_infrun)
6831 {
6832 fprintf_unfiltered (gdb_stdlog,
6833 "infrun: need to step [%s] over single-step "
6834 "breakpoint\n",
a068643d 6835 target_pid_to_str (ecs->ptid).c_str ());
99619bea
PA
6836 }
6837 keep_going (ecs);
6838 return 1;
6839 }
6840
4d9d9d04
PA
6841 /* If this thread needs yet another step-over (e.g., stepping
6842 through a delay slot), do it first before moving on to
6843 another thread. */
6844 if (thread_still_needs_step_over (ecs->event_thread))
6845 {
6846 if (debug_infrun)
6847 {
6848 fprintf_unfiltered (gdb_stdlog,
6849 "infrun: thread [%s] still needs step-over\n",
a068643d 6850 target_pid_to_str (ecs->event_thread->ptid).c_str ());
4d9d9d04
PA
6851 }
6852 keep_going (ecs);
6853 return 1;
6854 }
70509625 6855
483805cf
PA
6856 /* If scheduler locking applies even if not stepping, there's no
6857 need to walk over threads. Above we've checked whether the
6858 current thread is stepping. If some other thread not the
6859 event thread is stepping, then it must be that scheduler
6860 locking is not in effect. */
856e7dd6 6861 if (schedlock_applies (ecs->event_thread))
483805cf
PA
6862 return 0;
6863
4d9d9d04
PA
6864 /* Otherwise, we no longer expect a trap in the current thread.
6865 Clear the trap_expected flag before switching back -- this is
6866 what keep_going does as well, if we call it. */
6867 ecs->event_thread->control.trap_expected = 0;
6868
6869 /* Likewise, clear the signal if it should not be passed. */
6870 if (!signal_program[ecs->event_thread->suspend.stop_signal])
6871 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6872
6873 /* Do all pending step-overs before actually proceeding with
483805cf 6874 step/next/etc. */
4d9d9d04
PA
6875 if (start_step_over ())
6876 {
6877 prepare_to_wait (ecs);
6878 return 1;
6879 }
6880
6881 /* Look for the stepping/nexting thread. */
483805cf 6882 stepping_thread = NULL;
4d9d9d04 6883
08036331 6884 for (thread_info *tp : all_non_exited_threads ())
483805cf 6885 {
fbea99ea
PA
6886 /* Ignore threads of processes the caller is not
6887 resuming. */
483805cf 6888 if (!sched_multi
e99b03dc 6889 && tp->ptid.pid () != ecs->ptid.pid ())
483805cf
PA
6890 continue;
6891
6892 /* When stepping over a breakpoint, we lock all threads
6893 except the one that needs to move past the breakpoint.
6894 If a non-event thread has this set, the "incomplete
6895 step-over" check above should have caught it earlier. */
372316f1
PA
6896 if (tp->control.trap_expected)
6897 {
6898 internal_error (__FILE__, __LINE__,
6899 "[%s] has inconsistent state: "
6900 "trap_expected=%d\n",
a068643d 6901 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
6902 tp->control.trap_expected);
6903 }
483805cf
PA
6904
6905 /* Did we find the stepping thread? */
6906 if (tp->control.step_range_end)
6907 {
6908 /* Yep. There should only one though. */
6909 gdb_assert (stepping_thread == NULL);
6910
6911 /* The event thread is handled at the top, before we
6912 enter this loop. */
6913 gdb_assert (tp != ecs->event_thread);
6914
6915 /* If some thread other than the event thread is
6916 stepping, then scheduler locking can't be in effect,
6917 otherwise we wouldn't have resumed the current event
6918 thread in the first place. */
856e7dd6 6919 gdb_assert (!schedlock_applies (tp));
483805cf
PA
6920
6921 stepping_thread = tp;
6922 }
99619bea
PA
6923 }
6924
483805cf 6925 if (stepping_thread != NULL)
99619bea 6926 {
c447ac0b
PA
6927 if (debug_infrun)
6928 fprintf_unfiltered (gdb_stdlog,
6929 "infrun: switching back to stepped thread\n");
6930
2ac7589c
PA
6931 if (keep_going_stepped_thread (stepping_thread))
6932 {
6933 prepare_to_wait (ecs);
6934 return 1;
6935 }
6936 }
6937 }
2adfaa28 6938
2ac7589c
PA
6939 return 0;
6940}
2adfaa28 6941
2ac7589c
PA
6942/* Set a previously stepped thread back to stepping. Returns true on
6943 success, false if the resume is not possible (e.g., the thread
6944 vanished). */
6945
6946static int
6947keep_going_stepped_thread (struct thread_info *tp)
6948{
6949 struct frame_info *frame;
2ac7589c
PA
6950 struct execution_control_state ecss;
6951 struct execution_control_state *ecs = &ecss;
2adfaa28 6952
2ac7589c
PA
6953 /* If the stepping thread exited, then don't try to switch back and
6954 resume it, which could fail in several different ways depending
6955 on the target. Instead, just keep going.
2adfaa28 6956
2ac7589c
PA
6957 We can find a stepping dead thread in the thread list in two
6958 cases:
2adfaa28 6959
2ac7589c
PA
6960 - The target supports thread exit events, and when the target
6961 tries to delete the thread from the thread list, inferior_ptid
6962 pointed at the exiting thread. In such case, calling
6963 delete_thread does not really remove the thread from the list;
6964 instead, the thread is left listed, with 'exited' state.
64ce06e4 6965
2ac7589c
PA
6966 - The target's debug interface does not support thread exit
6967 events, and so we have no idea whatsoever if the previously
6968 stepping thread is still alive. For that reason, we need to
6969 synchronously query the target now. */
2adfaa28 6970
00431a78 6971 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
2ac7589c
PA
6972 {
6973 if (debug_infrun)
6974 fprintf_unfiltered (gdb_stdlog,
6975 "infrun: not resuming previously "
6976 "stepped thread, it has vanished\n");
6977
00431a78 6978 delete_thread (tp);
2ac7589c 6979 return 0;
c447ac0b 6980 }
2ac7589c
PA
6981
6982 if (debug_infrun)
6983 fprintf_unfiltered (gdb_stdlog,
6984 "infrun: resuming previously stepped thread\n");
6985
6986 reset_ecs (ecs, tp);
00431a78 6987 switch_to_thread (tp);
2ac7589c 6988
f2ffa92b 6989 tp->suspend.stop_pc = regcache_read_pc (get_thread_regcache (tp));
2ac7589c 6990 frame = get_current_frame ();
2ac7589c
PA
6991
6992 /* If the PC of the thread we were trying to single-step has
6993 changed, then that thread has trapped or been signaled, but the
6994 event has not been reported to GDB yet. Re-poll the target
6995 looking for this particular thread's event (i.e. temporarily
6996 enable schedlock) by:
6997
6998 - setting a break at the current PC
6999 - resuming that particular thread, only (by setting trap
7000 expected)
7001
7002 This prevents us continuously moving the single-step breakpoint
7003 forward, one instruction at a time, overstepping. */
7004
f2ffa92b 7005 if (tp->suspend.stop_pc != tp->prev_pc)
2ac7589c
PA
7006 {
7007 ptid_t resume_ptid;
7008
7009 if (debug_infrun)
7010 fprintf_unfiltered (gdb_stdlog,
7011 "infrun: expected thread advanced also (%s -> %s)\n",
7012 paddress (target_gdbarch (), tp->prev_pc),
f2ffa92b 7013 paddress (target_gdbarch (), tp->suspend.stop_pc));
2ac7589c
PA
7014
7015 /* Clear the info of the previous step-over, as it's no longer
7016 valid (if the thread was trying to step over a breakpoint, it
7017 has already succeeded). It's what keep_going would do too,
7018 if we called it. Do this before trying to insert the sss
7019 breakpoint, otherwise if we were previously trying to step
7020 over this exact address in another thread, the breakpoint is
7021 skipped. */
7022 clear_step_over_info ();
7023 tp->control.trap_expected = 0;
7024
7025 insert_single_step_breakpoint (get_frame_arch (frame),
7026 get_frame_address_space (frame),
f2ffa92b 7027 tp->suspend.stop_pc);
2ac7589c 7028
372316f1 7029 tp->resumed = 1;
fbea99ea 7030 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
2ac7589c
PA
7031 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
7032 }
7033 else
7034 {
7035 if (debug_infrun)
7036 fprintf_unfiltered (gdb_stdlog,
7037 "infrun: expected thread still hasn't advanced\n");
7038
7039 keep_going_pass_signal (ecs);
7040 }
7041 return 1;
c447ac0b
PA
7042}
7043
8b061563
PA
7044/* Is thread TP in the middle of (software or hardware)
7045 single-stepping? (Note the result of this function must never be
7046 passed directly as target_resume's STEP parameter.) */
104c1213 7047
a289b8f6 7048static int
b3444185 7049currently_stepping (struct thread_info *tp)
a7212384 7050{
8358c15c
JK
7051 return ((tp->control.step_range_end
7052 && tp->control.step_resume_breakpoint == NULL)
7053 || tp->control.trap_expected
af48d08f 7054 || tp->stepped_breakpoint
8358c15c 7055 || bpstat_should_step ());
a7212384
UW
7056}
7057
b2175913
MS
7058/* Inferior has stepped into a subroutine call with source code that
7059 we should not step over. Do step to the first line of code in
7060 it. */
c2c6d25f
JM
7061
7062static void
568d6575
UW
7063handle_step_into_function (struct gdbarch *gdbarch,
7064 struct execution_control_state *ecs)
c2c6d25f 7065{
7e324e48
GB
7066 fill_in_stop_func (gdbarch, ecs);
7067
f2ffa92b
PA
7068 compunit_symtab *cust
7069 = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7070 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7071 ecs->stop_func_start
7072 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
c2c6d25f 7073
51abb421 7074 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
c2c6d25f
JM
7075 /* Use the step_resume_break to step until the end of the prologue,
7076 even if that involves jumps (as it seems to on the vax under
7077 4.2). */
7078 /* If the prologue ends in the middle of a source line, continue to
7079 the end of that source line (if it is still within the function).
7080 Otherwise, just go to end of prologue. */
2afb61aa
PA
7081 if (stop_func_sal.end
7082 && stop_func_sal.pc != ecs->stop_func_start
7083 && stop_func_sal.end < ecs->stop_func_end)
7084 ecs->stop_func_start = stop_func_sal.end;
c2c6d25f 7085
2dbd5e30
KB
7086 /* Architectures which require breakpoint adjustment might not be able
7087 to place a breakpoint at the computed address. If so, the test
7088 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7089 ecs->stop_func_start to an address at which a breakpoint may be
7090 legitimately placed.
8fb3e588 7091
2dbd5e30
KB
7092 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7093 made, GDB will enter an infinite loop when stepping through
7094 optimized code consisting of VLIW instructions which contain
7095 subinstructions corresponding to different source lines. On
7096 FR-V, it's not permitted to place a breakpoint on any but the
7097 first subinstruction of a VLIW instruction. When a breakpoint is
7098 set, GDB will adjust the breakpoint address to the beginning of
7099 the VLIW instruction. Thus, we need to make the corresponding
7100 adjustment here when computing the stop address. */
8fb3e588 7101
568d6575 7102 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
2dbd5e30
KB
7103 {
7104 ecs->stop_func_start
568d6575 7105 = gdbarch_adjust_breakpoint_address (gdbarch,
8fb3e588 7106 ecs->stop_func_start);
2dbd5e30
KB
7107 }
7108
f2ffa92b 7109 if (ecs->stop_func_start == ecs->event_thread->suspend.stop_pc)
c2c6d25f
JM
7110 {
7111 /* We are already there: stop now. */
bdc36728 7112 end_stepping_range (ecs);
c2c6d25f
JM
7113 return;
7114 }
7115 else
7116 {
7117 /* Put the step-breakpoint there and go until there. */
51abb421 7118 symtab_and_line sr_sal;
c2c6d25f
JM
7119 sr_sal.pc = ecs->stop_func_start;
7120 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
6c95b8df 7121 sr_sal.pspace = get_frame_program_space (get_current_frame ());
44cbf7b5 7122
c2c6d25f 7123 /* Do not specify what the fp should be when we stop since on
488f131b
JB
7124 some machines the prologue is where the new fp value is
7125 established. */
a6d9a66e 7126 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
c2c6d25f
JM
7127
7128 /* And make sure stepping stops right away then. */
16c381f0
JK
7129 ecs->event_thread->control.step_range_end
7130 = ecs->event_thread->control.step_range_start;
c2c6d25f
JM
7131 }
7132 keep_going (ecs);
7133}
d4f3574e 7134
b2175913
MS
7135/* Inferior has stepped backward into a subroutine call with source
7136 code that we should not step over. Do step to the beginning of the
7137 last line of code in it. */
7138
7139static void
568d6575
UW
7140handle_step_into_function_backward (struct gdbarch *gdbarch,
7141 struct execution_control_state *ecs)
b2175913 7142{
43f3e411 7143 struct compunit_symtab *cust;
167e4384 7144 struct symtab_and_line stop_func_sal;
b2175913 7145
7e324e48
GB
7146 fill_in_stop_func (gdbarch, ecs);
7147
f2ffa92b 7148 cust = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7149 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7150 ecs->stop_func_start
7151 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
b2175913 7152
f2ffa92b 7153 stop_func_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
b2175913
MS
7154
7155 /* OK, we're just going to keep stepping here. */
f2ffa92b 7156 if (stop_func_sal.pc == ecs->event_thread->suspend.stop_pc)
b2175913
MS
7157 {
7158 /* We're there already. Just stop stepping now. */
bdc36728 7159 end_stepping_range (ecs);
b2175913
MS
7160 }
7161 else
7162 {
7163 /* Else just reset the step range and keep going.
7164 No step-resume breakpoint, they don't work for
7165 epilogues, which can have multiple entry paths. */
16c381f0
JK
7166 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7167 ecs->event_thread->control.step_range_end = stop_func_sal.end;
b2175913
MS
7168 keep_going (ecs);
7169 }
7170 return;
7171}
7172
d3169d93 7173/* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
44cbf7b5
AC
7174 This is used to both functions and to skip over code. */
7175
7176static void
2c03e5be
PA
7177insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7178 struct symtab_and_line sr_sal,
7179 struct frame_id sr_id,
7180 enum bptype sr_type)
44cbf7b5 7181{
611c83ae
PA
7182 /* There should never be more than one step-resume or longjmp-resume
7183 breakpoint per thread, so we should never be setting a new
44cbf7b5 7184 step_resume_breakpoint when one is already active. */
8358c15c 7185 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
2c03e5be 7186 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
d3169d93
DJ
7187
7188 if (debug_infrun)
7189 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
7190 "infrun: inserting step-resume breakpoint at %s\n",
7191 paddress (gdbarch, sr_sal.pc));
d3169d93 7192
8358c15c 7193 inferior_thread ()->control.step_resume_breakpoint
454dafbd 7194 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
2c03e5be
PA
7195}
7196
9da8c2a0 7197void
2c03e5be
PA
7198insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7199 struct symtab_and_line sr_sal,
7200 struct frame_id sr_id)
7201{
7202 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7203 sr_sal, sr_id,
7204 bp_step_resume);
44cbf7b5 7205}
7ce450bd 7206
2c03e5be
PA
7207/* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7208 This is used to skip a potential signal handler.
7ce450bd 7209
14e60db5
DJ
7210 This is called with the interrupted function's frame. The signal
7211 handler, when it returns, will resume the interrupted function at
7212 RETURN_FRAME.pc. */
d303a6c7
AC
7213
7214static void
2c03e5be 7215insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
d303a6c7 7216{
f4c1edd8 7217 gdb_assert (return_frame != NULL);
d303a6c7 7218
51abb421
PA
7219 struct gdbarch *gdbarch = get_frame_arch (return_frame);
7220
7221 symtab_and_line sr_sal;
568d6575 7222 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
d303a6c7 7223 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7224 sr_sal.pspace = get_frame_program_space (return_frame);
d303a6c7 7225
2c03e5be
PA
7226 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7227 get_stack_frame_id (return_frame),
7228 bp_hp_step_resume);
d303a6c7
AC
7229}
7230
2c03e5be
PA
7231/* Insert a "step-resume breakpoint" at the previous frame's PC. This
7232 is used to skip a function after stepping into it (for "next" or if
7233 the called function has no debugging information).
14e60db5
DJ
7234
7235 The current function has almost always been reached by single
7236 stepping a call or return instruction. NEXT_FRAME belongs to the
7237 current function, and the breakpoint will be set at the caller's
7238 resume address.
7239
7240 This is a separate function rather than reusing
2c03e5be 7241 insert_hp_step_resume_breakpoint_at_frame in order to avoid
14e60db5 7242 get_prev_frame, which may stop prematurely (see the implementation
c7ce8faa 7243 of frame_unwind_caller_id for an example). */
14e60db5
DJ
7244
7245static void
7246insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7247{
14e60db5
DJ
7248 /* We shouldn't have gotten here if we don't know where the call site
7249 is. */
c7ce8faa 7250 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
14e60db5 7251
51abb421 7252 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
14e60db5 7253
51abb421 7254 symtab_and_line sr_sal;
c7ce8faa
DJ
7255 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7256 frame_unwind_caller_pc (next_frame));
14e60db5 7257 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7258 sr_sal.pspace = frame_unwind_program_space (next_frame);
14e60db5 7259
a6d9a66e 7260 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
c7ce8faa 7261 frame_unwind_caller_id (next_frame));
14e60db5
DJ
7262}
7263
611c83ae
PA
7264/* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7265 new breakpoint at the target of a jmp_buf. The handling of
7266 longjmp-resume uses the same mechanisms used for handling
7267 "step-resume" breakpoints. */
7268
7269static void
a6d9a66e 7270insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
611c83ae 7271{
e81a37f7
TT
7272 /* There should never be more than one longjmp-resume breakpoint per
7273 thread, so we should never be setting a new
611c83ae 7274 longjmp_resume_breakpoint when one is already active. */
e81a37f7 7275 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
611c83ae
PA
7276
7277 if (debug_infrun)
7278 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
7279 "infrun: inserting longjmp-resume breakpoint at %s\n",
7280 paddress (gdbarch, pc));
611c83ae 7281
e81a37f7 7282 inferior_thread ()->control.exception_resume_breakpoint =
454dafbd 7283 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
611c83ae
PA
7284}
7285
186c406b
TT
7286/* Insert an exception resume breakpoint. TP is the thread throwing
7287 the exception. The block B is the block of the unwinder debug hook
7288 function. FRAME is the frame corresponding to the call to this
7289 function. SYM is the symbol of the function argument holding the
7290 target PC of the exception. */
7291
7292static void
7293insert_exception_resume_breakpoint (struct thread_info *tp,
3977b71f 7294 const struct block *b,
186c406b
TT
7295 struct frame_info *frame,
7296 struct symbol *sym)
7297{
a70b8144 7298 try
186c406b 7299 {
63e43d3a 7300 struct block_symbol vsym;
186c406b
TT
7301 struct value *value;
7302 CORE_ADDR handler;
7303 struct breakpoint *bp;
7304
987012b8 7305 vsym = lookup_symbol_search_name (sym->search_name (),
de63c46b 7306 b, VAR_DOMAIN);
63e43d3a 7307 value = read_var_value (vsym.symbol, vsym.block, frame);
186c406b
TT
7308 /* If the value was optimized out, revert to the old behavior. */
7309 if (! value_optimized_out (value))
7310 {
7311 handler = value_as_address (value);
7312
7313 if (debug_infrun)
7314 fprintf_unfiltered (gdb_stdlog,
7315 "infrun: exception resume at %lx\n",
7316 (unsigned long) handler);
7317
7318 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd
TT
7319 handler,
7320 bp_exception_resume).release ();
c70a6932
JK
7321
7322 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
7323 frame = NULL;
7324
5d5658a1 7325 bp->thread = tp->global_num;
186c406b
TT
7326 inferior_thread ()->control.exception_resume_breakpoint = bp;
7327 }
7328 }
230d2906 7329 catch (const gdb_exception_error &e)
492d29ea
PA
7330 {
7331 /* We want to ignore errors here. */
7332 }
186c406b
TT
7333}
7334
28106bc2
SDJ
7335/* A helper for check_exception_resume that sets an
7336 exception-breakpoint based on a SystemTap probe. */
7337
7338static void
7339insert_exception_resume_from_probe (struct thread_info *tp,
729662a5 7340 const struct bound_probe *probe,
28106bc2
SDJ
7341 struct frame_info *frame)
7342{
7343 struct value *arg_value;
7344 CORE_ADDR handler;
7345 struct breakpoint *bp;
7346
7347 arg_value = probe_safe_evaluate_at_pc (frame, 1);
7348 if (!arg_value)
7349 return;
7350
7351 handler = value_as_address (arg_value);
7352
7353 if (debug_infrun)
7354 fprintf_unfiltered (gdb_stdlog,
7355 "infrun: exception resume at %s\n",
6bac7473 7356 paddress (get_objfile_arch (probe->objfile),
28106bc2
SDJ
7357 handler));
7358
7359 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd 7360 handler, bp_exception_resume).release ();
5d5658a1 7361 bp->thread = tp->global_num;
28106bc2
SDJ
7362 inferior_thread ()->control.exception_resume_breakpoint = bp;
7363}
7364
186c406b
TT
7365/* This is called when an exception has been intercepted. Check to
7366 see whether the exception's destination is of interest, and if so,
7367 set an exception resume breakpoint there. */
7368
7369static void
7370check_exception_resume (struct execution_control_state *ecs,
28106bc2 7371 struct frame_info *frame)
186c406b 7372{
729662a5 7373 struct bound_probe probe;
28106bc2
SDJ
7374 struct symbol *func;
7375
7376 /* First see if this exception unwinding breakpoint was set via a
7377 SystemTap probe point. If so, the probe has two arguments: the
7378 CFA and the HANDLER. We ignore the CFA, extract the handler, and
7379 set a breakpoint there. */
6bac7473 7380 probe = find_probe_by_pc (get_frame_pc (frame));
935676c9 7381 if (probe.prob)
28106bc2 7382 {
729662a5 7383 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
28106bc2
SDJ
7384 return;
7385 }
7386
7387 func = get_frame_function (frame);
7388 if (!func)
7389 return;
186c406b 7390
a70b8144 7391 try
186c406b 7392 {
3977b71f 7393 const struct block *b;
8157b174 7394 struct block_iterator iter;
186c406b
TT
7395 struct symbol *sym;
7396 int argno = 0;
7397
7398 /* The exception breakpoint is a thread-specific breakpoint on
7399 the unwinder's debug hook, declared as:
7400
7401 void _Unwind_DebugHook (void *cfa, void *handler);
7402
7403 The CFA argument indicates the frame to which control is
7404 about to be transferred. HANDLER is the destination PC.
7405
7406 We ignore the CFA and set a temporary breakpoint at HANDLER.
7407 This is not extremely efficient but it avoids issues in gdb
7408 with computing the DWARF CFA, and it also works even in weird
7409 cases such as throwing an exception from inside a signal
7410 handler. */
7411
7412 b = SYMBOL_BLOCK_VALUE (func);
7413 ALL_BLOCK_SYMBOLS (b, iter, sym)
7414 {
7415 if (!SYMBOL_IS_ARGUMENT (sym))
7416 continue;
7417
7418 if (argno == 0)
7419 ++argno;
7420 else
7421 {
7422 insert_exception_resume_breakpoint (ecs->event_thread,
7423 b, frame, sym);
7424 break;
7425 }
7426 }
7427 }
230d2906 7428 catch (const gdb_exception_error &e)
492d29ea
PA
7429 {
7430 }
186c406b
TT
7431}
7432
104c1213 7433static void
22bcd14b 7434stop_waiting (struct execution_control_state *ecs)
104c1213 7435{
527159b7 7436 if (debug_infrun)
22bcd14b 7437 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
527159b7 7438
cd0fc7c3
SS
7439 /* Let callers know we don't want to wait for the inferior anymore. */
7440 ecs->wait_some_more = 0;
fbea99ea
PA
7441
7442 /* If all-stop, but the target is always in non-stop mode, stop all
7443 threads now that we're presenting the stop to the user. */
7444 if (!non_stop && target_is_non_stop_p ())
7445 stop_all_threads ();
cd0fc7c3
SS
7446}
7447
4d9d9d04
PA
7448/* Like keep_going, but passes the signal to the inferior, even if the
7449 signal is set to nopass. */
d4f3574e
SS
7450
7451static void
4d9d9d04 7452keep_going_pass_signal (struct execution_control_state *ecs)
d4f3574e 7453{
d7e15655 7454 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
372316f1 7455 gdb_assert (!ecs->event_thread->resumed);
4d9d9d04 7456
d4f3574e 7457 /* Save the pc before execution, to compare with pc after stop. */
fb14de7b 7458 ecs->event_thread->prev_pc
00431a78 7459 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
d4f3574e 7460
4d9d9d04 7461 if (ecs->event_thread->control.trap_expected)
d4f3574e 7462 {
4d9d9d04
PA
7463 struct thread_info *tp = ecs->event_thread;
7464
7465 if (debug_infrun)
7466 fprintf_unfiltered (gdb_stdlog,
7467 "infrun: %s has trap_expected set, "
7468 "resuming to collect trap\n",
a068643d 7469 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04 7470
a9ba6bae
PA
7471 /* We haven't yet gotten our trap, and either: intercepted a
7472 non-signal event (e.g., a fork); or took a signal which we
7473 are supposed to pass through to the inferior. Simply
7474 continue. */
64ce06e4 7475 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e 7476 }
372316f1
PA
7477 else if (step_over_info_valid_p ())
7478 {
7479 /* Another thread is stepping over a breakpoint in-line. If
7480 this thread needs a step-over too, queue the request. In
7481 either case, this resume must be deferred for later. */
7482 struct thread_info *tp = ecs->event_thread;
7483
7484 if (ecs->hit_singlestep_breakpoint
7485 || thread_still_needs_step_over (tp))
7486 {
7487 if (debug_infrun)
7488 fprintf_unfiltered (gdb_stdlog,
7489 "infrun: step-over already in progress: "
7490 "step-over for %s deferred\n",
a068643d 7491 target_pid_to_str (tp->ptid).c_str ());
66716e78 7492 global_thread_step_over_chain_enqueue (tp);
372316f1
PA
7493 }
7494 else
7495 {
7496 if (debug_infrun)
7497 fprintf_unfiltered (gdb_stdlog,
7498 "infrun: step-over in progress: "
7499 "resume of %s deferred\n",
a068643d 7500 target_pid_to_str (tp->ptid).c_str ());
372316f1 7501 }
372316f1 7502 }
d4f3574e
SS
7503 else
7504 {
31e77af2 7505 struct regcache *regcache = get_current_regcache ();
963f9c80
PA
7506 int remove_bp;
7507 int remove_wps;
8d297bbf 7508 step_over_what step_what;
31e77af2 7509
d4f3574e 7510 /* Either the trap was not expected, but we are continuing
a9ba6bae
PA
7511 anyway (if we got a signal, the user asked it be passed to
7512 the child)
7513 -- or --
7514 We got our expected trap, but decided we should resume from
7515 it.
d4f3574e 7516
a9ba6bae 7517 We're going to run this baby now!
d4f3574e 7518
c36b740a
VP
7519 Note that insert_breakpoints won't try to re-insert
7520 already inserted breakpoints. Therefore, we don't
7521 care if breakpoints were already inserted, or not. */
a9ba6bae 7522
31e77af2
PA
7523 /* If we need to step over a breakpoint, and we're not using
7524 displaced stepping to do so, insert all breakpoints
7525 (watchpoints, etc.) but the one we're stepping over, step one
7526 instruction, and then re-insert the breakpoint when that step
7527 is finished. */
963f9c80 7528
6c4cfb24
PA
7529 step_what = thread_still_needs_step_over (ecs->event_thread);
7530
963f9c80 7531 remove_bp = (ecs->hit_singlestep_breakpoint
6c4cfb24
PA
7532 || (step_what & STEP_OVER_BREAKPOINT));
7533 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
963f9c80 7534
cb71640d
PA
7535 /* We can't use displaced stepping if we need to step past a
7536 watchpoint. The instruction copied to the scratch pad would
7537 still trigger the watchpoint. */
7538 if (remove_bp
3fc8eb30 7539 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
45e8c884 7540 {
a01bda52 7541 set_step_over_info (regcache->aspace (),
21edc42f
YQ
7542 regcache_read_pc (regcache), remove_wps,
7543 ecs->event_thread->global_num);
45e8c884 7544 }
963f9c80 7545 else if (remove_wps)
21edc42f 7546 set_step_over_info (NULL, 0, remove_wps, -1);
372316f1
PA
7547
7548 /* If we now need to do an in-line step-over, we need to stop
7549 all other threads. Note this must be done before
7550 insert_breakpoints below, because that removes the breakpoint
7551 we're about to step over, otherwise other threads could miss
7552 it. */
fbea99ea 7553 if (step_over_info_valid_p () && target_is_non_stop_p ())
372316f1 7554 stop_all_threads ();
abbb1732 7555
31e77af2 7556 /* Stop stepping if inserting breakpoints fails. */
a70b8144 7557 try
31e77af2
PA
7558 {
7559 insert_breakpoints ();
7560 }
230d2906 7561 catch (const gdb_exception_error &e)
31e77af2
PA
7562 {
7563 exception_print (gdb_stderr, e);
22bcd14b 7564 stop_waiting (ecs);
bdf2a94a 7565 clear_step_over_info ();
31e77af2 7566 return;
d4f3574e
SS
7567 }
7568
963f9c80 7569 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
d4f3574e 7570
64ce06e4 7571 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e
SS
7572 }
7573
488f131b 7574 prepare_to_wait (ecs);
d4f3574e
SS
7575}
7576
4d9d9d04
PA
7577/* Called when we should continue running the inferior, because the
7578 current event doesn't cause a user visible stop. This does the
7579 resuming part; waiting for the next event is done elsewhere. */
7580
7581static void
7582keep_going (struct execution_control_state *ecs)
7583{
7584 if (ecs->event_thread->control.trap_expected
7585 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
7586 ecs->event_thread->control.trap_expected = 0;
7587
7588 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7589 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7590 keep_going_pass_signal (ecs);
7591}
7592
104c1213
JM
7593/* This function normally comes after a resume, before
7594 handle_inferior_event exits. It takes care of any last bits of
7595 housekeeping, and sets the all-important wait_some_more flag. */
cd0fc7c3 7596
104c1213
JM
7597static void
7598prepare_to_wait (struct execution_control_state *ecs)
cd0fc7c3 7599{
527159b7 7600 if (debug_infrun)
8a9de0e4 7601 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
104c1213 7602
104c1213 7603 ecs->wait_some_more = 1;
0b333c5e
PA
7604
7605 if (!target_is_async_p ())
7606 mark_infrun_async_event_handler ();
c906108c 7607}
11cf8741 7608
fd664c91 7609/* We are done with the step range of a step/next/si/ni command.
b57bacec 7610 Called once for each n of a "step n" operation. */
fd664c91
PA
7611
7612static void
bdc36728 7613end_stepping_range (struct execution_control_state *ecs)
fd664c91 7614{
bdc36728 7615 ecs->event_thread->control.stop_step = 1;
bdc36728 7616 stop_waiting (ecs);
fd664c91
PA
7617}
7618
33d62d64
JK
7619/* Several print_*_reason functions to print why the inferior has stopped.
7620 We always print something when the inferior exits, or receives a signal.
7621 The rest of the cases are dealt with later on in normal_stop and
7622 print_it_typical. Ideally there should be a call to one of these
7623 print_*_reason functions functions from handle_inferior_event each time
22bcd14b 7624 stop_waiting is called.
33d62d64 7625
fd664c91
PA
7626 Note that we don't call these directly, instead we delegate that to
7627 the interpreters, through observers. Interpreters then call these
7628 with whatever uiout is right. */
33d62d64 7629
fd664c91
PA
7630void
7631print_end_stepping_range_reason (struct ui_out *uiout)
33d62d64 7632{
fd664c91 7633 /* For CLI-like interpreters, print nothing. */
33d62d64 7634
112e8700 7635 if (uiout->is_mi_like_p ())
fd664c91 7636 {
112e8700 7637 uiout->field_string ("reason",
fd664c91
PA
7638 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
7639 }
7640}
33d62d64 7641
fd664c91
PA
7642void
7643print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
11cf8741 7644{
33d62d64 7645 annotate_signalled ();
112e8700
SM
7646 if (uiout->is_mi_like_p ())
7647 uiout->field_string
7648 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
7649 uiout->text ("\nProgram terminated with signal ");
33d62d64 7650 annotate_signal_name ();
112e8700 7651 uiout->field_string ("signal-name",
2ea28649 7652 gdb_signal_to_name (siggnal));
33d62d64 7653 annotate_signal_name_end ();
112e8700 7654 uiout->text (", ");
33d62d64 7655 annotate_signal_string ();
112e8700 7656 uiout->field_string ("signal-meaning",
2ea28649 7657 gdb_signal_to_string (siggnal));
33d62d64 7658 annotate_signal_string_end ();
112e8700
SM
7659 uiout->text (".\n");
7660 uiout->text ("The program no longer exists.\n");
33d62d64
JK
7661}
7662
fd664c91
PA
7663void
7664print_exited_reason (struct ui_out *uiout, int exitstatus)
33d62d64 7665{
fda326dd 7666 struct inferior *inf = current_inferior ();
a068643d 7667 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
fda326dd 7668
33d62d64
JK
7669 annotate_exited (exitstatus);
7670 if (exitstatus)
7671 {
112e8700
SM
7672 if (uiout->is_mi_like_p ())
7673 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
6a831f06
PA
7674 std::string exit_code_str
7675 = string_printf ("0%o", (unsigned int) exitstatus);
7676 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
7677 plongest (inf->num), pidstr.c_str (),
7678 string_field ("exit-code", exit_code_str.c_str ()));
33d62d64
JK
7679 }
7680 else
11cf8741 7681 {
112e8700
SM
7682 if (uiout->is_mi_like_p ())
7683 uiout->field_string
7684 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6a831f06
PA
7685 uiout->message ("[Inferior %s (%s) exited normally]\n",
7686 plongest (inf->num), pidstr.c_str ());
33d62d64 7687 }
33d62d64
JK
7688}
7689
012b3a21
WT
7690/* Some targets/architectures can do extra processing/display of
7691 segmentation faults. E.g., Intel MPX boundary faults.
7692 Call the architecture dependent function to handle the fault. */
7693
7694static void
7695handle_segmentation_fault (struct ui_out *uiout)
7696{
7697 struct regcache *regcache = get_current_regcache ();
ac7936df 7698 struct gdbarch *gdbarch = regcache->arch ();
012b3a21
WT
7699
7700 if (gdbarch_handle_segmentation_fault_p (gdbarch))
7701 gdbarch_handle_segmentation_fault (gdbarch, uiout);
7702}
7703
fd664c91
PA
7704void
7705print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
33d62d64 7706{
f303dbd6
PA
7707 struct thread_info *thr = inferior_thread ();
7708
33d62d64
JK
7709 annotate_signal ();
7710
112e8700 7711 if (uiout->is_mi_like_p ())
f303dbd6
PA
7712 ;
7713 else if (show_thread_that_caused_stop ())
33d62d64 7714 {
f303dbd6 7715 const char *name;
33d62d64 7716
112e8700 7717 uiout->text ("\nThread ");
33eca680 7718 uiout->field_string ("thread-id", print_thread_id (thr));
f303dbd6
PA
7719
7720 name = thr->name != NULL ? thr->name : target_thread_name (thr);
7721 if (name != NULL)
7722 {
112e8700 7723 uiout->text (" \"");
33eca680 7724 uiout->field_string ("name", name);
112e8700 7725 uiout->text ("\"");
f303dbd6 7726 }
33d62d64 7727 }
f303dbd6 7728 else
112e8700 7729 uiout->text ("\nProgram");
f303dbd6 7730
112e8700
SM
7731 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
7732 uiout->text (" stopped");
33d62d64
JK
7733 else
7734 {
112e8700 7735 uiout->text (" received signal ");
8b93c638 7736 annotate_signal_name ();
112e8700
SM
7737 if (uiout->is_mi_like_p ())
7738 uiout->field_string
7739 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
7740 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8b93c638 7741 annotate_signal_name_end ();
112e8700 7742 uiout->text (", ");
8b93c638 7743 annotate_signal_string ();
112e8700 7744 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
012b3a21
WT
7745
7746 if (siggnal == GDB_SIGNAL_SEGV)
7747 handle_segmentation_fault (uiout);
7748
8b93c638 7749 annotate_signal_string_end ();
33d62d64 7750 }
112e8700 7751 uiout->text (".\n");
33d62d64 7752}
252fbfc8 7753
fd664c91
PA
7754void
7755print_no_history_reason (struct ui_out *uiout)
33d62d64 7756{
112e8700 7757 uiout->text ("\nNo more reverse-execution history.\n");
11cf8741 7758}
43ff13b4 7759
0c7e1a46
PA
7760/* Print current location without a level number, if we have changed
7761 functions or hit a breakpoint. Print source line if we have one.
7762 bpstat_print contains the logic deciding in detail what to print,
7763 based on the event(s) that just occurred. */
7764
243a9253
PA
7765static void
7766print_stop_location (struct target_waitstatus *ws)
0c7e1a46
PA
7767{
7768 int bpstat_ret;
f486487f 7769 enum print_what source_flag;
0c7e1a46
PA
7770 int do_frame_printing = 1;
7771 struct thread_info *tp = inferior_thread ();
7772
7773 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
7774 switch (bpstat_ret)
7775 {
7776 case PRINT_UNKNOWN:
7777 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
7778 should) carry around the function and does (or should) use
7779 that when doing a frame comparison. */
7780 if (tp->control.stop_step
7781 && frame_id_eq (tp->control.step_frame_id,
7782 get_frame_id (get_current_frame ()))
f2ffa92b
PA
7783 && (tp->control.step_start_function
7784 == find_pc_function (tp->suspend.stop_pc)))
0c7e1a46
PA
7785 {
7786 /* Finished step, just print source line. */
7787 source_flag = SRC_LINE;
7788 }
7789 else
7790 {
7791 /* Print location and source line. */
7792 source_flag = SRC_AND_LOC;
7793 }
7794 break;
7795 case PRINT_SRC_AND_LOC:
7796 /* Print location and source line. */
7797 source_flag = SRC_AND_LOC;
7798 break;
7799 case PRINT_SRC_ONLY:
7800 source_flag = SRC_LINE;
7801 break;
7802 case PRINT_NOTHING:
7803 /* Something bogus. */
7804 source_flag = SRC_LINE;
7805 do_frame_printing = 0;
7806 break;
7807 default:
7808 internal_error (__FILE__, __LINE__, _("Unknown value."));
7809 }
7810
7811 /* The behavior of this routine with respect to the source
7812 flag is:
7813 SRC_LINE: Print only source line
7814 LOCATION: Print only location
7815 SRC_AND_LOC: Print location and source line. */
7816 if (do_frame_printing)
7817 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
243a9253
PA
7818}
7819
243a9253
PA
7820/* See infrun.h. */
7821
7822void
4c7d57e7 7823print_stop_event (struct ui_out *uiout, bool displays)
243a9253 7824{
243a9253
PA
7825 struct target_waitstatus last;
7826 ptid_t last_ptid;
7827 struct thread_info *tp;
7828
7829 get_last_target_status (&last_ptid, &last);
7830
67ad9399
TT
7831 {
7832 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
0c7e1a46 7833
67ad9399 7834 print_stop_location (&last);
243a9253 7835
67ad9399 7836 /* Display the auto-display expressions. */
4c7d57e7
TT
7837 if (displays)
7838 do_displays ();
67ad9399 7839 }
243a9253
PA
7840
7841 tp = inferior_thread ();
7842 if (tp->thread_fsm != NULL
46e3ed7f 7843 && tp->thread_fsm->finished_p ())
243a9253
PA
7844 {
7845 struct return_value_info *rv;
7846
46e3ed7f 7847 rv = tp->thread_fsm->return_value ();
243a9253
PA
7848 if (rv != NULL)
7849 print_return_value (uiout, rv);
7850 }
0c7e1a46
PA
7851}
7852
388a7084
PA
7853/* See infrun.h. */
7854
7855void
7856maybe_remove_breakpoints (void)
7857{
7858 if (!breakpoints_should_be_inserted_now () && target_has_execution)
7859 {
7860 if (remove_breakpoints ())
7861 {
223ffa71 7862 target_terminal::ours_for_output ();
388a7084
PA
7863 printf_filtered (_("Cannot remove breakpoints because "
7864 "program is no longer writable.\nFurther "
7865 "execution is probably impossible.\n"));
7866 }
7867 }
7868}
7869
4c2f2a79
PA
7870/* The execution context that just caused a normal stop. */
7871
7872struct stop_context
7873{
2d844eaf
TT
7874 stop_context ();
7875 ~stop_context ();
7876
7877 DISABLE_COPY_AND_ASSIGN (stop_context);
7878
7879 bool changed () const;
7880
4c2f2a79
PA
7881 /* The stop ID. */
7882 ULONGEST stop_id;
c906108c 7883
4c2f2a79 7884 /* The event PTID. */
c906108c 7885
4c2f2a79
PA
7886 ptid_t ptid;
7887
7888 /* If stopp for a thread event, this is the thread that caused the
7889 stop. */
7890 struct thread_info *thread;
7891
7892 /* The inferior that caused the stop. */
7893 int inf_num;
7894};
7895
2d844eaf 7896/* Initializes a new stop context. If stopped for a thread event, this
4c2f2a79
PA
7897 takes a strong reference to the thread. */
7898
2d844eaf 7899stop_context::stop_context ()
4c2f2a79 7900{
2d844eaf
TT
7901 stop_id = get_stop_id ();
7902 ptid = inferior_ptid;
7903 inf_num = current_inferior ()->num;
4c2f2a79 7904
d7e15655 7905 if (inferior_ptid != null_ptid)
4c2f2a79
PA
7906 {
7907 /* Take a strong reference so that the thread can't be deleted
7908 yet. */
2d844eaf
TT
7909 thread = inferior_thread ();
7910 thread->incref ();
4c2f2a79
PA
7911 }
7912 else
2d844eaf 7913 thread = NULL;
4c2f2a79
PA
7914}
7915
7916/* Release a stop context previously created with save_stop_context.
7917 Releases the strong reference to the thread as well. */
7918
2d844eaf 7919stop_context::~stop_context ()
4c2f2a79 7920{
2d844eaf
TT
7921 if (thread != NULL)
7922 thread->decref ();
4c2f2a79
PA
7923}
7924
7925/* Return true if the current context no longer matches the saved stop
7926 context. */
7927
2d844eaf
TT
7928bool
7929stop_context::changed () const
7930{
7931 if (ptid != inferior_ptid)
7932 return true;
7933 if (inf_num != current_inferior ()->num)
7934 return true;
7935 if (thread != NULL && thread->state != THREAD_STOPPED)
7936 return true;
7937 if (get_stop_id () != stop_id)
7938 return true;
7939 return false;
4c2f2a79
PA
7940}
7941
7942/* See infrun.h. */
7943
7944int
96baa820 7945normal_stop (void)
c906108c 7946{
73b65bb0
DJ
7947 struct target_waitstatus last;
7948 ptid_t last_ptid;
7949
7950 get_last_target_status (&last_ptid, &last);
7951
4c2f2a79
PA
7952 new_stop_id ();
7953
29f49a6a
PA
7954 /* If an exception is thrown from this point on, make sure to
7955 propagate GDB's knowledge of the executing state to the
7956 frontend/user running state. A QUIT is an easy exception to see
7957 here, so do this before any filtered output. */
731f534f
PA
7958
7959 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
7960
c35b1492 7961 if (!non_stop)
731f534f 7962 maybe_finish_thread_state.emplace (minus_one_ptid);
e1316e60
PA
7963 else if (last.kind == TARGET_WAITKIND_SIGNALLED
7964 || last.kind == TARGET_WAITKIND_EXITED)
7965 {
7966 /* On some targets, we may still have live threads in the
7967 inferior when we get a process exit event. E.g., for
7968 "checkpoint", when the current checkpoint/fork exits,
7969 linux-fork.c automatically switches to another fork from
7970 within target_mourn_inferior. */
731f534f
PA
7971 if (inferior_ptid != null_ptid)
7972 maybe_finish_thread_state.emplace (ptid_t (inferior_ptid.pid ()));
e1316e60
PA
7973 }
7974 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
731f534f 7975 maybe_finish_thread_state.emplace (inferior_ptid);
29f49a6a 7976
b57bacec
PA
7977 /* As we're presenting a stop, and potentially removing breakpoints,
7978 update the thread list so we can tell whether there are threads
7979 running on the target. With target remote, for example, we can
7980 only learn about new threads when we explicitly update the thread
7981 list. Do this before notifying the interpreters about signal
7982 stops, end of stepping ranges, etc., so that the "new thread"
7983 output is emitted before e.g., "Program received signal FOO",
7984 instead of after. */
7985 update_thread_list ();
7986
7987 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
76727919 7988 gdb::observers::signal_received.notify (inferior_thread ()->suspend.stop_signal);
b57bacec 7989
c906108c
SS
7990 /* As with the notification of thread events, we want to delay
7991 notifying the user that we've switched thread context until
7992 the inferior actually stops.
7993
73b65bb0
DJ
7994 There's no point in saying anything if the inferior has exited.
7995 Note that SIGNALLED here means "exited with a signal", not
b65dc60b
PA
7996 "received a signal".
7997
7998 Also skip saying anything in non-stop mode. In that mode, as we
7999 don't want GDB to switch threads behind the user's back, to avoid
8000 races where the user is typing a command to apply to thread x,
8001 but GDB switches to thread y before the user finishes entering
8002 the command, fetch_inferior_event installs a cleanup to restore
8003 the current thread back to the thread the user had selected right
8004 after this event is handled, so we're not really switching, only
8005 informing of a stop. */
4f8d22e3 8006 if (!non_stop
731f534f 8007 && previous_inferior_ptid != inferior_ptid
73b65bb0
DJ
8008 && target_has_execution
8009 && last.kind != TARGET_WAITKIND_SIGNALLED
0e5bf2a8
PA
8010 && last.kind != TARGET_WAITKIND_EXITED
8011 && last.kind != TARGET_WAITKIND_NO_RESUMED)
c906108c 8012 {
0e454242 8013 SWITCH_THRU_ALL_UIS ()
3b12939d 8014 {
223ffa71 8015 target_terminal::ours_for_output ();
3b12939d 8016 printf_filtered (_("[Switching to %s]\n"),
a068643d 8017 target_pid_to_str (inferior_ptid).c_str ());
3b12939d
PA
8018 annotate_thread_changed ();
8019 }
39f77062 8020 previous_inferior_ptid = inferior_ptid;
c906108c 8021 }
c906108c 8022
0e5bf2a8
PA
8023 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
8024 {
0e454242 8025 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8026 if (current_ui->prompt_state == PROMPT_BLOCKED)
8027 {
223ffa71 8028 target_terminal::ours_for_output ();
3b12939d
PA
8029 printf_filtered (_("No unwaited-for children left.\n"));
8030 }
0e5bf2a8
PA
8031 }
8032
b57bacec 8033 /* Note: this depends on the update_thread_list call above. */
388a7084 8034 maybe_remove_breakpoints ();
c906108c 8035
c906108c
SS
8036 /* If an auto-display called a function and that got a signal,
8037 delete that auto-display to avoid an infinite recursion. */
8038
8039 if (stopped_by_random_signal)
8040 disable_current_display ();
8041
0e454242 8042 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8043 {
8044 async_enable_stdin ();
8045 }
c906108c 8046
388a7084 8047 /* Let the user/frontend see the threads as stopped. */
731f534f 8048 maybe_finish_thread_state.reset ();
388a7084
PA
8049
8050 /* Select innermost stack frame - i.e., current frame is frame 0,
8051 and current location is based on that. Handle the case where the
8052 dummy call is returning after being stopped. E.g. the dummy call
8053 previously hit a breakpoint. (If the dummy call returns
8054 normally, we won't reach here.) Do this before the stop hook is
8055 run, so that it doesn't get to see the temporary dummy frame,
8056 which is not where we'll present the stop. */
8057 if (has_stack_frames ())
8058 {
8059 if (stop_stack_dummy == STOP_STACK_DUMMY)
8060 {
8061 /* Pop the empty frame that contains the stack dummy. This
8062 also restores inferior state prior to the call (struct
8063 infcall_suspend_state). */
8064 struct frame_info *frame = get_current_frame ();
8065
8066 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8067 frame_pop (frame);
8068 /* frame_pop calls reinit_frame_cache as the last thing it
8069 does which means there's now no selected frame. */
8070 }
8071
8072 select_frame (get_current_frame ());
8073
8074 /* Set the current source location. */
8075 set_current_sal_from_frame (get_current_frame ());
8076 }
dd7e2d2b
PA
8077
8078 /* Look up the hook_stop and run it (CLI internally handles problem
8079 of stop_command's pre-hook not existing). */
4c2f2a79
PA
8080 if (stop_command != NULL)
8081 {
2d844eaf 8082 stop_context saved_context;
4c2f2a79 8083
a70b8144 8084 try
bf469271
PA
8085 {
8086 execute_cmd_pre_hook (stop_command);
8087 }
230d2906 8088 catch (const gdb_exception &ex)
bf469271
PA
8089 {
8090 exception_fprintf (gdb_stderr, ex,
8091 "Error while running hook_stop:\n");
8092 }
4c2f2a79
PA
8093
8094 /* If the stop hook resumes the target, then there's no point in
8095 trying to notify about the previous stop; its context is
8096 gone. Likewise if the command switches thread or inferior --
8097 the observers would print a stop for the wrong
8098 thread/inferior. */
2d844eaf
TT
8099 if (saved_context.changed ())
8100 return 1;
4c2f2a79 8101 }
dd7e2d2b 8102
388a7084
PA
8103 /* Notify observers about the stop. This is where the interpreters
8104 print the stop event. */
d7e15655 8105 if (inferior_ptid != null_ptid)
76727919 8106 gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
388a7084
PA
8107 stop_print_frame);
8108 else
76727919 8109 gdb::observers::normal_stop.notify (NULL, stop_print_frame);
347bddb7 8110
243a9253
PA
8111 annotate_stopped ();
8112
48844aa6
PA
8113 if (target_has_execution)
8114 {
8115 if (last.kind != TARGET_WAITKIND_SIGNALLED
fe726667
PA
8116 && last.kind != TARGET_WAITKIND_EXITED
8117 && last.kind != TARGET_WAITKIND_NO_RESUMED)
48844aa6
PA
8118 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8119 Delete any breakpoint that is to be deleted at the next stop. */
16c381f0 8120 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
94cc34af 8121 }
6c95b8df
PA
8122
8123 /* Try to get rid of automatically added inferiors that are no
8124 longer needed. Keeping those around slows down things linearly.
8125 Note that this never removes the current inferior. */
8126 prune_inferiors ();
4c2f2a79
PA
8127
8128 return 0;
c906108c 8129}
c906108c 8130\f
c5aa993b 8131int
96baa820 8132signal_stop_state (int signo)
c906108c 8133{
d6b48e9c 8134 return signal_stop[signo];
c906108c
SS
8135}
8136
c5aa993b 8137int
96baa820 8138signal_print_state (int signo)
c906108c
SS
8139{
8140 return signal_print[signo];
8141}
8142
c5aa993b 8143int
96baa820 8144signal_pass_state (int signo)
c906108c
SS
8145{
8146 return signal_program[signo];
8147}
8148
2455069d
UW
8149static void
8150signal_cache_update (int signo)
8151{
8152 if (signo == -1)
8153 {
a493e3e2 8154 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
2455069d
UW
8155 signal_cache_update (signo);
8156
8157 return;
8158 }
8159
8160 signal_pass[signo] = (signal_stop[signo] == 0
8161 && signal_print[signo] == 0
ab04a2af
TT
8162 && signal_program[signo] == 1
8163 && signal_catch[signo] == 0);
2455069d
UW
8164}
8165
488f131b 8166int
7bda5e4a 8167signal_stop_update (int signo, int state)
d4f3574e
SS
8168{
8169 int ret = signal_stop[signo];
abbb1732 8170
d4f3574e 8171 signal_stop[signo] = state;
2455069d 8172 signal_cache_update (signo);
d4f3574e
SS
8173 return ret;
8174}
8175
488f131b 8176int
7bda5e4a 8177signal_print_update (int signo, int state)
d4f3574e
SS
8178{
8179 int ret = signal_print[signo];
abbb1732 8180
d4f3574e 8181 signal_print[signo] = state;
2455069d 8182 signal_cache_update (signo);
d4f3574e
SS
8183 return ret;
8184}
8185
488f131b 8186int
7bda5e4a 8187signal_pass_update (int signo, int state)
d4f3574e
SS
8188{
8189 int ret = signal_program[signo];
abbb1732 8190
d4f3574e 8191 signal_program[signo] = state;
2455069d 8192 signal_cache_update (signo);
d4f3574e
SS
8193 return ret;
8194}
8195
ab04a2af
TT
8196/* Update the global 'signal_catch' from INFO and notify the
8197 target. */
8198
8199void
8200signal_catch_update (const unsigned int *info)
8201{
8202 int i;
8203
8204 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8205 signal_catch[i] = info[i] > 0;
8206 signal_cache_update (-1);
adc6a863 8207 target_pass_signals (signal_pass);
ab04a2af
TT
8208}
8209
c906108c 8210static void
96baa820 8211sig_print_header (void)
c906108c 8212{
3e43a32a
MS
8213 printf_filtered (_("Signal Stop\tPrint\tPass "
8214 "to program\tDescription\n"));
c906108c
SS
8215}
8216
8217static void
2ea28649 8218sig_print_info (enum gdb_signal oursig)
c906108c 8219{
2ea28649 8220 const char *name = gdb_signal_to_name (oursig);
c906108c 8221 int name_padding = 13 - strlen (name);
96baa820 8222
c906108c
SS
8223 if (name_padding <= 0)
8224 name_padding = 0;
8225
8226 printf_filtered ("%s", name);
488f131b 8227 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
c906108c
SS
8228 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8229 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
8230 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
2ea28649 8231 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
c906108c
SS
8232}
8233
8234/* Specify how various signals in the inferior should be handled. */
8235
8236static void
0b39b52e 8237handle_command (const char *args, int from_tty)
c906108c 8238{
c906108c 8239 int digits, wordlen;
b926417a 8240 int sigfirst, siglast;
2ea28649 8241 enum gdb_signal oursig;
c906108c 8242 int allsigs;
c906108c
SS
8243
8244 if (args == NULL)
8245 {
e2e0b3e5 8246 error_no_arg (_("signal to handle"));
c906108c
SS
8247 }
8248
1777feb0 8249 /* Allocate and zero an array of flags for which signals to handle. */
c906108c 8250
adc6a863
PA
8251 const size_t nsigs = GDB_SIGNAL_LAST;
8252 unsigned char sigs[nsigs] {};
c906108c 8253
1777feb0 8254 /* Break the command line up into args. */
c906108c 8255
773a1edc 8256 gdb_argv built_argv (args);
c906108c
SS
8257
8258 /* Walk through the args, looking for signal oursigs, signal names, and
8259 actions. Signal numbers and signal names may be interspersed with
8260 actions, with the actions being performed for all signals cumulatively
1777feb0 8261 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
c906108c 8262
773a1edc 8263 for (char *arg : built_argv)
c906108c 8264 {
773a1edc
TT
8265 wordlen = strlen (arg);
8266 for (digits = 0; isdigit (arg[digits]); digits++)
c906108c
SS
8267 {;
8268 }
8269 allsigs = 0;
8270 sigfirst = siglast = -1;
8271
773a1edc 8272 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
c906108c
SS
8273 {
8274 /* Apply action to all signals except those used by the
1777feb0 8275 debugger. Silently skip those. */
c906108c
SS
8276 allsigs = 1;
8277 sigfirst = 0;
8278 siglast = nsigs - 1;
8279 }
773a1edc 8280 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
c906108c
SS
8281 {
8282 SET_SIGS (nsigs, sigs, signal_stop);
8283 SET_SIGS (nsigs, sigs, signal_print);
8284 }
773a1edc 8285 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
c906108c
SS
8286 {
8287 UNSET_SIGS (nsigs, sigs, signal_program);
8288 }
773a1edc 8289 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
c906108c
SS
8290 {
8291 SET_SIGS (nsigs, sigs, signal_print);
8292 }
773a1edc 8293 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
c906108c
SS
8294 {
8295 SET_SIGS (nsigs, sigs, signal_program);
8296 }
773a1edc 8297 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
c906108c
SS
8298 {
8299 UNSET_SIGS (nsigs, sigs, signal_stop);
8300 }
773a1edc 8301 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
c906108c
SS
8302 {
8303 SET_SIGS (nsigs, sigs, signal_program);
8304 }
773a1edc 8305 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
c906108c
SS
8306 {
8307 UNSET_SIGS (nsigs, sigs, signal_print);
8308 UNSET_SIGS (nsigs, sigs, signal_stop);
8309 }
773a1edc 8310 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
c906108c
SS
8311 {
8312 UNSET_SIGS (nsigs, sigs, signal_program);
8313 }
8314 else if (digits > 0)
8315 {
8316 /* It is numeric. The numeric signal refers to our own
8317 internal signal numbering from target.h, not to host/target
8318 signal number. This is a feature; users really should be
8319 using symbolic names anyway, and the common ones like
8320 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8321
8322 sigfirst = siglast = (int)
773a1edc
TT
8323 gdb_signal_from_command (atoi (arg));
8324 if (arg[digits] == '-')
c906108c
SS
8325 {
8326 siglast = (int)
773a1edc 8327 gdb_signal_from_command (atoi (arg + digits + 1));
c906108c
SS
8328 }
8329 if (sigfirst > siglast)
8330 {
1777feb0 8331 /* Bet he didn't figure we'd think of this case... */
b926417a 8332 std::swap (sigfirst, siglast);
c906108c
SS
8333 }
8334 }
8335 else
8336 {
773a1edc 8337 oursig = gdb_signal_from_name (arg);
a493e3e2 8338 if (oursig != GDB_SIGNAL_UNKNOWN)
c906108c
SS
8339 {
8340 sigfirst = siglast = (int) oursig;
8341 }
8342 else
8343 {
8344 /* Not a number and not a recognized flag word => complain. */
773a1edc 8345 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
c906108c
SS
8346 }
8347 }
8348
8349 /* If any signal numbers or symbol names were found, set flags for
1777feb0 8350 which signals to apply actions to. */
c906108c 8351
b926417a 8352 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
c906108c 8353 {
2ea28649 8354 switch ((enum gdb_signal) signum)
c906108c 8355 {
a493e3e2
PA
8356 case GDB_SIGNAL_TRAP:
8357 case GDB_SIGNAL_INT:
c906108c
SS
8358 if (!allsigs && !sigs[signum])
8359 {
9e2f0ad4 8360 if (query (_("%s is used by the debugger.\n\
3e43a32a 8361Are you sure you want to change it? "),
2ea28649 8362 gdb_signal_to_name ((enum gdb_signal) signum)))
c906108c
SS
8363 {
8364 sigs[signum] = 1;
8365 }
8366 else
c119e040 8367 printf_unfiltered (_("Not confirmed, unchanged.\n"));
c906108c
SS
8368 }
8369 break;
a493e3e2
PA
8370 case GDB_SIGNAL_0:
8371 case GDB_SIGNAL_DEFAULT:
8372 case GDB_SIGNAL_UNKNOWN:
c906108c
SS
8373 /* Make sure that "all" doesn't print these. */
8374 break;
8375 default:
8376 sigs[signum] = 1;
8377 break;
8378 }
8379 }
c906108c
SS
8380 }
8381
b926417a 8382 for (int signum = 0; signum < nsigs; signum++)
3a031f65
PA
8383 if (sigs[signum])
8384 {
2455069d 8385 signal_cache_update (-1);
adc6a863
PA
8386 target_pass_signals (signal_pass);
8387 target_program_signals (signal_program);
c906108c 8388
3a031f65
PA
8389 if (from_tty)
8390 {
8391 /* Show the results. */
8392 sig_print_header ();
8393 for (; signum < nsigs; signum++)
8394 if (sigs[signum])
aead7601 8395 sig_print_info ((enum gdb_signal) signum);
3a031f65
PA
8396 }
8397
8398 break;
8399 }
c906108c
SS
8400}
8401
de0bea00
MF
8402/* Complete the "handle" command. */
8403
eb3ff9a5 8404static void
de0bea00 8405handle_completer (struct cmd_list_element *ignore,
eb3ff9a5 8406 completion_tracker &tracker,
6f937416 8407 const char *text, const char *word)
de0bea00 8408{
de0bea00
MF
8409 static const char * const keywords[] =
8410 {
8411 "all",
8412 "stop",
8413 "ignore",
8414 "print",
8415 "pass",
8416 "nostop",
8417 "noignore",
8418 "noprint",
8419 "nopass",
8420 NULL,
8421 };
8422
eb3ff9a5
PA
8423 signal_completer (ignore, tracker, text, word);
8424 complete_on_enum (tracker, keywords, word, word);
de0bea00
MF
8425}
8426
2ea28649
PA
8427enum gdb_signal
8428gdb_signal_from_command (int num)
ed01b82c
PA
8429{
8430 if (num >= 1 && num <= 15)
2ea28649 8431 return (enum gdb_signal) num;
ed01b82c
PA
8432 error (_("Only signals 1-15 are valid as numeric signals.\n\
8433Use \"info signals\" for a list of symbolic signals."));
8434}
8435
c906108c
SS
8436/* Print current contents of the tables set by the handle command.
8437 It is possible we should just be printing signals actually used
8438 by the current target (but for things to work right when switching
8439 targets, all signals should be in the signal tables). */
8440
8441static void
1d12d88f 8442info_signals_command (const char *signum_exp, int from_tty)
c906108c 8443{
2ea28649 8444 enum gdb_signal oursig;
abbb1732 8445
c906108c
SS
8446 sig_print_header ();
8447
8448 if (signum_exp)
8449 {
8450 /* First see if this is a symbol name. */
2ea28649 8451 oursig = gdb_signal_from_name (signum_exp);
a493e3e2 8452 if (oursig == GDB_SIGNAL_UNKNOWN)
c906108c
SS
8453 {
8454 /* No, try numeric. */
8455 oursig =
2ea28649 8456 gdb_signal_from_command (parse_and_eval_long (signum_exp));
c906108c
SS
8457 }
8458 sig_print_info (oursig);
8459 return;
8460 }
8461
8462 printf_filtered ("\n");
8463 /* These ugly casts brought to you by the native VAX compiler. */
a493e3e2
PA
8464 for (oursig = GDB_SIGNAL_FIRST;
8465 (int) oursig < (int) GDB_SIGNAL_LAST;
2ea28649 8466 oursig = (enum gdb_signal) ((int) oursig + 1))
c906108c
SS
8467 {
8468 QUIT;
8469
a493e3e2
PA
8470 if (oursig != GDB_SIGNAL_UNKNOWN
8471 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
c906108c
SS
8472 sig_print_info (oursig);
8473 }
8474
3e43a32a
MS
8475 printf_filtered (_("\nUse the \"handle\" command "
8476 "to change these tables.\n"));
c906108c 8477}
4aa995e1
PA
8478
8479/* The $_siginfo convenience variable is a bit special. We don't know
8480 for sure the type of the value until we actually have a chance to
7a9dd1b2 8481 fetch the data. The type can change depending on gdbarch, so it is
4aa995e1
PA
8482 also dependent on which thread you have selected.
8483
8484 1. making $_siginfo be an internalvar that creates a new value on
8485 access.
8486
8487 2. making the value of $_siginfo be an lval_computed value. */
8488
8489/* This function implements the lval_computed support for reading a
8490 $_siginfo value. */
8491
8492static void
8493siginfo_value_read (struct value *v)
8494{
8495 LONGEST transferred;
8496
a911d87a
PA
8497 /* If we can access registers, so can we access $_siginfo. Likewise
8498 vice versa. */
8499 validate_registers_access ();
c709acd1 8500
4aa995e1 8501 transferred =
8b88a78e 8502 target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO,
4aa995e1
PA
8503 NULL,
8504 value_contents_all_raw (v),
8505 value_offset (v),
8506 TYPE_LENGTH (value_type (v)));
8507
8508 if (transferred != TYPE_LENGTH (value_type (v)))
8509 error (_("Unable to read siginfo"));
8510}
8511
8512/* This function implements the lval_computed support for writing a
8513 $_siginfo value. */
8514
8515static void
8516siginfo_value_write (struct value *v, struct value *fromval)
8517{
8518 LONGEST transferred;
8519
a911d87a
PA
8520 /* If we can access registers, so can we access $_siginfo. Likewise
8521 vice versa. */
8522 validate_registers_access ();
c709acd1 8523
8b88a78e 8524 transferred = target_write (current_top_target (),
4aa995e1
PA
8525 TARGET_OBJECT_SIGNAL_INFO,
8526 NULL,
8527 value_contents_all_raw (fromval),
8528 value_offset (v),
8529 TYPE_LENGTH (value_type (fromval)));
8530
8531 if (transferred != TYPE_LENGTH (value_type (fromval)))
8532 error (_("Unable to write siginfo"));
8533}
8534
c8f2448a 8535static const struct lval_funcs siginfo_value_funcs =
4aa995e1
PA
8536 {
8537 siginfo_value_read,
8538 siginfo_value_write
8539 };
8540
8541/* Return a new value with the correct type for the siginfo object of
78267919
UW
8542 the current thread using architecture GDBARCH. Return a void value
8543 if there's no object available. */
4aa995e1 8544
2c0b251b 8545static struct value *
22d2b532
SDJ
8546siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
8547 void *ignore)
4aa995e1 8548{
4aa995e1 8549 if (target_has_stack
d7e15655 8550 && inferior_ptid != null_ptid
78267919 8551 && gdbarch_get_siginfo_type_p (gdbarch))
4aa995e1 8552 {
78267919 8553 struct type *type = gdbarch_get_siginfo_type (gdbarch);
abbb1732 8554
78267919 8555 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
4aa995e1
PA
8556 }
8557
78267919 8558 return allocate_value (builtin_type (gdbarch)->builtin_void);
4aa995e1
PA
8559}
8560
c906108c 8561\f
16c381f0
JK
8562/* infcall_suspend_state contains state about the program itself like its
8563 registers and any signal it received when it last stopped.
8564 This state must be restored regardless of how the inferior function call
8565 ends (either successfully, or after it hits a breakpoint or signal)
8566 if the program is to properly continue where it left off. */
8567
6bf78e29 8568class infcall_suspend_state
7a292a7a 8569{
6bf78e29
AB
8570public:
8571 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
8572 once the inferior function call has finished. */
8573 infcall_suspend_state (struct gdbarch *gdbarch,
8574 const struct thread_info *tp,
8575 struct regcache *regcache)
8576 : m_thread_suspend (tp->suspend),
8577 m_registers (new readonly_detached_regcache (*regcache))
8578 {
8579 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
8580
8581 if (gdbarch_get_siginfo_type_p (gdbarch))
8582 {
8583 struct type *type = gdbarch_get_siginfo_type (gdbarch);
8584 size_t len = TYPE_LENGTH (type);
8585
8586 siginfo_data.reset ((gdb_byte *) xmalloc (len));
8587
8588 if (target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
8589 siginfo_data.get (), 0, len) != len)
8590 {
8591 /* Errors ignored. */
8592 siginfo_data.reset (nullptr);
8593 }
8594 }
8595
8596 if (siginfo_data)
8597 {
8598 m_siginfo_gdbarch = gdbarch;
8599 m_siginfo_data = std::move (siginfo_data);
8600 }
8601 }
8602
8603 /* Return a pointer to the stored register state. */
16c381f0 8604
6bf78e29
AB
8605 readonly_detached_regcache *registers () const
8606 {
8607 return m_registers.get ();
8608 }
8609
8610 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
8611
8612 void restore (struct gdbarch *gdbarch,
8613 struct thread_info *tp,
8614 struct regcache *regcache) const
8615 {
8616 tp->suspend = m_thread_suspend;
8617
8618 if (m_siginfo_gdbarch == gdbarch)
8619 {
8620 struct type *type = gdbarch_get_siginfo_type (gdbarch);
8621
8622 /* Errors ignored. */
8623 target_write (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
8624 m_siginfo_data.get (), 0, TYPE_LENGTH (type));
8625 }
8626
8627 /* The inferior can be gone if the user types "print exit(0)"
8628 (and perhaps other times). */
8629 if (target_has_execution)
8630 /* NB: The register write goes through to the target. */
8631 regcache->restore (registers ());
8632 }
8633
8634private:
8635 /* How the current thread stopped before the inferior function call was
8636 executed. */
8637 struct thread_suspend_state m_thread_suspend;
8638
8639 /* The registers before the inferior function call was executed. */
8640 std::unique_ptr<readonly_detached_regcache> m_registers;
1736ad11 8641
35515841 8642 /* Format of SIGINFO_DATA or NULL if it is not present. */
6bf78e29 8643 struct gdbarch *m_siginfo_gdbarch = nullptr;
1736ad11
JK
8644
8645 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
8646 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
8647 content would be invalid. */
6bf78e29 8648 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
b89667eb
DE
8649};
8650
cb524840
TT
8651infcall_suspend_state_up
8652save_infcall_suspend_state ()
b89667eb 8653{
b89667eb 8654 struct thread_info *tp = inferior_thread ();
1736ad11 8655 struct regcache *regcache = get_current_regcache ();
ac7936df 8656 struct gdbarch *gdbarch = regcache->arch ();
1736ad11 8657
6bf78e29
AB
8658 infcall_suspend_state_up inf_state
8659 (new struct infcall_suspend_state (gdbarch, tp, regcache));
1736ad11 8660
6bf78e29
AB
8661 /* Having saved the current state, adjust the thread state, discarding
8662 any stop signal information. The stop signal is not useful when
8663 starting an inferior function call, and run_inferior_call will not use
8664 the signal due to its `proceed' call with GDB_SIGNAL_0. */
a493e3e2 8665 tp->suspend.stop_signal = GDB_SIGNAL_0;
35515841 8666
b89667eb
DE
8667 return inf_state;
8668}
8669
8670/* Restore inferior session state to INF_STATE. */
8671
8672void
16c381f0 8673restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb
DE
8674{
8675 struct thread_info *tp = inferior_thread ();
1736ad11 8676 struct regcache *regcache = get_current_regcache ();
ac7936df 8677 struct gdbarch *gdbarch = regcache->arch ();
b89667eb 8678
6bf78e29 8679 inf_state->restore (gdbarch, tp, regcache);
16c381f0 8680 discard_infcall_suspend_state (inf_state);
b89667eb
DE
8681}
8682
b89667eb 8683void
16c381f0 8684discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb 8685{
dd848631 8686 delete inf_state;
b89667eb
DE
8687}
8688
daf6667d 8689readonly_detached_regcache *
16c381f0 8690get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
b89667eb 8691{
6bf78e29 8692 return inf_state->registers ();
b89667eb
DE
8693}
8694
16c381f0
JK
8695/* infcall_control_state contains state regarding gdb's control of the
8696 inferior itself like stepping control. It also contains session state like
8697 the user's currently selected frame. */
b89667eb 8698
16c381f0 8699struct infcall_control_state
b89667eb 8700{
16c381f0
JK
8701 struct thread_control_state thread_control;
8702 struct inferior_control_state inferior_control;
d82142e2
JK
8703
8704 /* Other fields: */
ee841dd8
TT
8705 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
8706 int stopped_by_random_signal = 0;
7a292a7a 8707
b89667eb 8708 /* ID if the selected frame when the inferior function call was made. */
ee841dd8 8709 struct frame_id selected_frame_id {};
7a292a7a
SS
8710};
8711
c906108c 8712/* Save all of the information associated with the inferior<==>gdb
b89667eb 8713 connection. */
c906108c 8714
cb524840
TT
8715infcall_control_state_up
8716save_infcall_control_state ()
c906108c 8717{
cb524840 8718 infcall_control_state_up inf_status (new struct infcall_control_state);
4e1c45ea 8719 struct thread_info *tp = inferior_thread ();
d6b48e9c 8720 struct inferior *inf = current_inferior ();
7a292a7a 8721
16c381f0
JK
8722 inf_status->thread_control = tp->control;
8723 inf_status->inferior_control = inf->control;
d82142e2 8724
8358c15c 8725 tp->control.step_resume_breakpoint = NULL;
5b79abe7 8726 tp->control.exception_resume_breakpoint = NULL;
8358c15c 8727
16c381f0
JK
8728 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
8729 chain. If caller's caller is walking the chain, they'll be happier if we
8730 hand them back the original chain when restore_infcall_control_state is
8731 called. */
8732 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
d82142e2
JK
8733
8734 /* Other fields: */
8735 inf_status->stop_stack_dummy = stop_stack_dummy;
8736 inf_status->stopped_by_random_signal = stopped_by_random_signal;
c5aa993b 8737
206415a3 8738 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
b89667eb 8739
7a292a7a 8740 return inf_status;
c906108c
SS
8741}
8742
bf469271
PA
8743static void
8744restore_selected_frame (const frame_id &fid)
c906108c 8745{
bf469271 8746 frame_info *frame = frame_find_by_id (fid);
c906108c 8747
aa0cd9c1
AC
8748 /* If inf_status->selected_frame_id is NULL, there was no previously
8749 selected frame. */
101dcfbe 8750 if (frame == NULL)
c906108c 8751 {
8a3fe4f8 8752 warning (_("Unable to restore previously selected frame."));
bf469271 8753 return;
c906108c
SS
8754 }
8755
0f7d239c 8756 select_frame (frame);
c906108c
SS
8757}
8758
b89667eb
DE
8759/* Restore inferior session state to INF_STATUS. */
8760
c906108c 8761void
16c381f0 8762restore_infcall_control_state (struct infcall_control_state *inf_status)
c906108c 8763{
4e1c45ea 8764 struct thread_info *tp = inferior_thread ();
d6b48e9c 8765 struct inferior *inf = current_inferior ();
4e1c45ea 8766
8358c15c
JK
8767 if (tp->control.step_resume_breakpoint)
8768 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
8769
5b79abe7
TT
8770 if (tp->control.exception_resume_breakpoint)
8771 tp->control.exception_resume_breakpoint->disposition
8772 = disp_del_at_next_stop;
8773
d82142e2 8774 /* Handle the bpstat_copy of the chain. */
16c381f0 8775 bpstat_clear (&tp->control.stop_bpstat);
d82142e2 8776
16c381f0
JK
8777 tp->control = inf_status->thread_control;
8778 inf->control = inf_status->inferior_control;
d82142e2
JK
8779
8780 /* Other fields: */
8781 stop_stack_dummy = inf_status->stop_stack_dummy;
8782 stopped_by_random_signal = inf_status->stopped_by_random_signal;
c906108c 8783
b89667eb 8784 if (target_has_stack)
c906108c 8785 {
bf469271 8786 /* The point of the try/catch is that if the stack is clobbered,
101dcfbe
AC
8787 walking the stack might encounter a garbage pointer and
8788 error() trying to dereference it. */
a70b8144 8789 try
bf469271
PA
8790 {
8791 restore_selected_frame (inf_status->selected_frame_id);
8792 }
230d2906 8793 catch (const gdb_exception_error &ex)
bf469271
PA
8794 {
8795 exception_fprintf (gdb_stderr, ex,
8796 "Unable to restore previously selected frame:\n");
8797 /* Error in restoring the selected frame. Select the
8798 innermost frame. */
8799 select_frame (get_current_frame ());
8800 }
c906108c 8801 }
c906108c 8802
ee841dd8 8803 delete inf_status;
7a292a7a 8804}
c906108c
SS
8805
8806void
16c381f0 8807discard_infcall_control_state (struct infcall_control_state *inf_status)
7a292a7a 8808{
8358c15c
JK
8809 if (inf_status->thread_control.step_resume_breakpoint)
8810 inf_status->thread_control.step_resume_breakpoint->disposition
8811 = disp_del_at_next_stop;
8812
5b79abe7
TT
8813 if (inf_status->thread_control.exception_resume_breakpoint)
8814 inf_status->thread_control.exception_resume_breakpoint->disposition
8815 = disp_del_at_next_stop;
8816
1777feb0 8817 /* See save_infcall_control_state for info on stop_bpstat. */
16c381f0 8818 bpstat_clear (&inf_status->thread_control.stop_bpstat);
8358c15c 8819
ee841dd8 8820 delete inf_status;
7a292a7a 8821}
b89667eb 8822\f
7f89fd65 8823/* See infrun.h. */
0c557179
SDJ
8824
8825void
8826clear_exit_convenience_vars (void)
8827{
8828 clear_internalvar (lookup_internalvar ("_exitsignal"));
8829 clear_internalvar (lookup_internalvar ("_exitcode"));
8830}
c5aa993b 8831\f
488f131b 8832
b2175913
MS
8833/* User interface for reverse debugging:
8834 Set exec-direction / show exec-direction commands
8835 (returns error unless target implements to_set_exec_direction method). */
8836
170742de 8837enum exec_direction_kind execution_direction = EXEC_FORWARD;
b2175913
MS
8838static const char exec_forward[] = "forward";
8839static const char exec_reverse[] = "reverse";
8840static const char *exec_direction = exec_forward;
40478521 8841static const char *const exec_direction_names[] = {
b2175913
MS
8842 exec_forward,
8843 exec_reverse,
8844 NULL
8845};
8846
8847static void
eb4c3f4a 8848set_exec_direction_func (const char *args, int from_tty,
b2175913
MS
8849 struct cmd_list_element *cmd)
8850{
8851 if (target_can_execute_reverse)
8852 {
8853 if (!strcmp (exec_direction, exec_forward))
8854 execution_direction = EXEC_FORWARD;
8855 else if (!strcmp (exec_direction, exec_reverse))
8856 execution_direction = EXEC_REVERSE;
8857 }
8bbed405
MS
8858 else
8859 {
8860 exec_direction = exec_forward;
8861 error (_("Target does not support this operation."));
8862 }
b2175913
MS
8863}
8864
8865static void
8866show_exec_direction_func (struct ui_file *out, int from_tty,
8867 struct cmd_list_element *cmd, const char *value)
8868{
8869 switch (execution_direction) {
8870 case EXEC_FORWARD:
8871 fprintf_filtered (out, _("Forward.\n"));
8872 break;
8873 case EXEC_REVERSE:
8874 fprintf_filtered (out, _("Reverse.\n"));
8875 break;
b2175913 8876 default:
d8b34453
PA
8877 internal_error (__FILE__, __LINE__,
8878 _("bogus execution_direction value: %d"),
8879 (int) execution_direction);
b2175913
MS
8880 }
8881}
8882
d4db2f36
PA
8883static void
8884show_schedule_multiple (struct ui_file *file, int from_tty,
8885 struct cmd_list_element *c, const char *value)
8886{
3e43a32a
MS
8887 fprintf_filtered (file, _("Resuming the execution of threads "
8888 "of all processes is %s.\n"), value);
d4db2f36 8889}
ad52ddc6 8890
22d2b532
SDJ
8891/* Implementation of `siginfo' variable. */
8892
8893static const struct internalvar_funcs siginfo_funcs =
8894{
8895 siginfo_make_value,
8896 NULL,
8897 NULL
8898};
8899
372316f1
PA
8900/* Callback for infrun's target events source. This is marked when a
8901 thread has a pending status to process. */
8902
8903static void
8904infrun_async_inferior_event_handler (gdb_client_data data)
8905{
372316f1
PA
8906 inferior_event_handler (INF_REG_EVENT, NULL);
8907}
8908
c906108c 8909void
96baa820 8910_initialize_infrun (void)
c906108c 8911{
de0bea00 8912 struct cmd_list_element *c;
c906108c 8913
372316f1
PA
8914 /* Register extra event sources in the event loop. */
8915 infrun_async_inferior_event_token
8916 = create_async_event_handler (infrun_async_inferior_event_handler, NULL);
8917
11db9430 8918 add_info ("signals", info_signals_command, _("\
1bedd215
AC
8919What debugger does when program gets various signals.\n\
8920Specify a signal as argument to print info on that signal only."));
c906108c
SS
8921 add_info_alias ("handle", "signals", 0);
8922
de0bea00 8923 c = add_com ("handle", class_run, handle_command, _("\
dfbd5e7b 8924Specify how to handle signals.\n\
486c7739 8925Usage: handle SIGNAL [ACTIONS]\n\
c906108c 8926Args are signals and actions to apply to those signals.\n\
dfbd5e7b 8927If no actions are specified, the current settings for the specified signals\n\
486c7739
MF
8928will be displayed instead.\n\
8929\n\
c906108c
SS
8930Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
8931from 1-15 are allowed for compatibility with old versions of GDB.\n\
8932Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
8933The special arg \"all\" is recognized to mean all signals except those\n\
1bedd215 8934used by the debugger, typically SIGTRAP and SIGINT.\n\
486c7739 8935\n\
1bedd215 8936Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
c906108c
SS
8937\"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
8938Stop means reenter debugger if this signal happens (implies print).\n\
8939Print means print a message if this signal happens.\n\
8940Pass means let program see this signal; otherwise program doesn't know.\n\
8941Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
dfbd5e7b
PA
8942Pass and Stop may be combined.\n\
8943\n\
8944Multiple signals may be specified. Signal numbers and signal names\n\
8945may be interspersed with actions, with the actions being performed for\n\
8946all signals cumulatively specified."));
de0bea00 8947 set_cmd_completer (c, handle_completer);
486c7739 8948
c906108c 8949 if (!dbx_commands)
1a966eab
AC
8950 stop_command = add_cmd ("stop", class_obscure,
8951 not_just_help_class_command, _("\
8952There is no `stop' command, but you can set a hook on `stop'.\n\
c906108c 8953This allows you to set a list of commands to be run each time execution\n\
1a966eab 8954of the program stops."), &cmdlist);
c906108c 8955
ccce17b0 8956 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
85c07804
AC
8957Set inferior debugging."), _("\
8958Show inferior debugging."), _("\
8959When non-zero, inferior specific debugging is enabled."),
ccce17b0
YQ
8960 NULL,
8961 show_debug_infrun,
8962 &setdebuglist, &showdebuglist);
527159b7 8963
3e43a32a
MS
8964 add_setshow_boolean_cmd ("displaced", class_maintenance,
8965 &debug_displaced, _("\
237fc4c9
PA
8966Set displaced stepping debugging."), _("\
8967Show displaced stepping debugging."), _("\
8968When non-zero, displaced stepping specific debugging is enabled."),
8969 NULL,
8970 show_debug_displaced,
8971 &setdebuglist, &showdebuglist);
8972
ad52ddc6
PA
8973 add_setshow_boolean_cmd ("non-stop", no_class,
8974 &non_stop_1, _("\
8975Set whether gdb controls the inferior in non-stop mode."), _("\
8976Show whether gdb controls the inferior in non-stop mode."), _("\
8977When debugging a multi-threaded program and this setting is\n\
8978off (the default, also called all-stop mode), when one thread stops\n\
8979(for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
8980all other threads in the program while you interact with the thread of\n\
8981interest. When you continue or step a thread, you can allow the other\n\
8982threads to run, or have them remain stopped, but while you inspect any\n\
8983thread's state, all threads stop.\n\
8984\n\
8985In non-stop mode, when one thread stops, other threads can continue\n\
8986to run freely. You'll be able to step each thread independently,\n\
8987leave it stopped or free to run as needed."),
8988 set_non_stop,
8989 show_non_stop,
8990 &setlist,
8991 &showlist);
8992
adc6a863 8993 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
c906108c
SS
8994 {
8995 signal_stop[i] = 1;
8996 signal_print[i] = 1;
8997 signal_program[i] = 1;
ab04a2af 8998 signal_catch[i] = 0;
c906108c
SS
8999 }
9000
4d9d9d04
PA
9001 /* Signals caused by debugger's own actions should not be given to
9002 the program afterwards.
9003
9004 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9005 explicitly specifies that it should be delivered to the target
9006 program. Typically, that would occur when a user is debugging a
9007 target monitor on a simulator: the target monitor sets a
9008 breakpoint; the simulator encounters this breakpoint and halts
9009 the simulation handing control to GDB; GDB, noting that the stop
9010 address doesn't map to any known breakpoint, returns control back
9011 to the simulator; the simulator then delivers the hardware
9012 equivalent of a GDB_SIGNAL_TRAP to the program being
9013 debugged. */
a493e3e2
PA
9014 signal_program[GDB_SIGNAL_TRAP] = 0;
9015 signal_program[GDB_SIGNAL_INT] = 0;
c906108c
SS
9016
9017 /* Signals that are not errors should not normally enter the debugger. */
a493e3e2
PA
9018 signal_stop[GDB_SIGNAL_ALRM] = 0;
9019 signal_print[GDB_SIGNAL_ALRM] = 0;
9020 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9021 signal_print[GDB_SIGNAL_VTALRM] = 0;
9022 signal_stop[GDB_SIGNAL_PROF] = 0;
9023 signal_print[GDB_SIGNAL_PROF] = 0;
9024 signal_stop[GDB_SIGNAL_CHLD] = 0;
9025 signal_print[GDB_SIGNAL_CHLD] = 0;
9026 signal_stop[GDB_SIGNAL_IO] = 0;
9027 signal_print[GDB_SIGNAL_IO] = 0;
9028 signal_stop[GDB_SIGNAL_POLL] = 0;
9029 signal_print[GDB_SIGNAL_POLL] = 0;
9030 signal_stop[GDB_SIGNAL_URG] = 0;
9031 signal_print[GDB_SIGNAL_URG] = 0;
9032 signal_stop[GDB_SIGNAL_WINCH] = 0;
9033 signal_print[GDB_SIGNAL_WINCH] = 0;
9034 signal_stop[GDB_SIGNAL_PRIO] = 0;
9035 signal_print[GDB_SIGNAL_PRIO] = 0;
c906108c 9036
cd0fc7c3
SS
9037 /* These signals are used internally by user-level thread
9038 implementations. (See signal(5) on Solaris.) Like the above
9039 signals, a healthy program receives and handles them as part of
9040 its normal operation. */
a493e3e2
PA
9041 signal_stop[GDB_SIGNAL_LWP] = 0;
9042 signal_print[GDB_SIGNAL_LWP] = 0;
9043 signal_stop[GDB_SIGNAL_WAITING] = 0;
9044 signal_print[GDB_SIGNAL_WAITING] = 0;
9045 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9046 signal_print[GDB_SIGNAL_CANCEL] = 0;
bc7b765a
JB
9047 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9048 signal_print[GDB_SIGNAL_LIBRT] = 0;
cd0fc7c3 9049
2455069d
UW
9050 /* Update cached state. */
9051 signal_cache_update (-1);
9052
85c07804
AC
9053 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9054 &stop_on_solib_events, _("\
9055Set stopping for shared library events."), _("\
9056Show stopping for shared library events."), _("\
c906108c
SS
9057If nonzero, gdb will give control to the user when the dynamic linker\n\
9058notifies gdb of shared library events. The most common event of interest\n\
85c07804 9059to the user would be loading/unloading of a new library."),
f9e14852 9060 set_stop_on_solib_events,
920d2a44 9061 show_stop_on_solib_events,
85c07804 9062 &setlist, &showlist);
c906108c 9063
7ab04401
AC
9064 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9065 follow_fork_mode_kind_names,
9066 &follow_fork_mode_string, _("\
9067Set debugger response to a program call of fork or vfork."), _("\
9068Show debugger response to a program call of fork or vfork."), _("\
c906108c
SS
9069A fork or vfork creates a new process. follow-fork-mode can be:\n\
9070 parent - the original process is debugged after a fork\n\
9071 child - the new process is debugged after a fork\n\
ea1dd7bc 9072The unfollowed process will continue to run.\n\
7ab04401
AC
9073By default, the debugger will follow the parent process."),
9074 NULL,
920d2a44 9075 show_follow_fork_mode_string,
7ab04401
AC
9076 &setlist, &showlist);
9077
6c95b8df
PA
9078 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9079 follow_exec_mode_names,
9080 &follow_exec_mode_string, _("\
9081Set debugger response to a program call of exec."), _("\
9082Show debugger response to a program call of exec."), _("\
9083An exec call replaces the program image of a process.\n\
9084\n\
9085follow-exec-mode can be:\n\
9086\n\
cce7e648 9087 new - the debugger creates a new inferior and rebinds the process\n\
6c95b8df
PA
9088to this new inferior. The program the process was running before\n\
9089the exec call can be restarted afterwards by restarting the original\n\
9090inferior.\n\
9091\n\
9092 same - the debugger keeps the process bound to the same inferior.\n\
9093The new executable image replaces the previous executable loaded in\n\
9094the inferior. Restarting the inferior after the exec call restarts\n\
9095the executable the process was running after the exec call.\n\
9096\n\
9097By default, the debugger will use the same inferior."),
9098 NULL,
9099 show_follow_exec_mode_string,
9100 &setlist, &showlist);
9101
7ab04401
AC
9102 add_setshow_enum_cmd ("scheduler-locking", class_run,
9103 scheduler_enums, &scheduler_mode, _("\
9104Set mode for locking scheduler during execution."), _("\
9105Show mode for locking scheduler during execution."), _("\
f2665db5
MM
9106off == no locking (threads may preempt at any time)\n\
9107on == full locking (no thread except the current thread may run)\n\
9108 This applies to both normal execution and replay mode.\n\
9109step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9110 In this mode, other threads may run during other commands.\n\
9111 This applies to both normal execution and replay mode.\n\
9112replay == scheduler locked in replay mode and unlocked during normal execution."),
7ab04401 9113 set_schedlock_func, /* traps on target vector */
920d2a44 9114 show_scheduler_mode,
7ab04401 9115 &setlist, &showlist);
5fbbeb29 9116
d4db2f36
PA
9117 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9118Set mode for resuming threads of all processes."), _("\
9119Show mode for resuming threads of all processes."), _("\
9120When on, execution commands (such as 'continue' or 'next') resume all\n\
9121threads of all processes. When off (which is the default), execution\n\
9122commands only resume the threads of the current process. The set of\n\
9123threads that are resumed is further refined by the scheduler-locking\n\
9124mode (see help set scheduler-locking)."),
9125 NULL,
9126 show_schedule_multiple,
9127 &setlist, &showlist);
9128
5bf193a2
AC
9129 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9130Set mode of the step operation."), _("\
9131Show mode of the step operation."), _("\
9132When set, doing a step over a function without debug line information\n\
9133will stop at the first instruction of that function. Otherwise, the\n\
9134function is skipped and the step command stops at a different source line."),
9135 NULL,
920d2a44 9136 show_step_stop_if_no_debug,
5bf193a2 9137 &setlist, &showlist);
ca6724c1 9138
72d0e2c5
YQ
9139 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9140 &can_use_displaced_stepping, _("\
237fc4c9
PA
9141Set debugger's willingness to use displaced stepping."), _("\
9142Show debugger's willingness to use displaced stepping."), _("\
fff08868
HZ
9143If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9144supported by the target architecture. If off, gdb will not use displaced\n\
9145stepping to step over breakpoints, even if such is supported by the target\n\
9146architecture. If auto (which is the default), gdb will use displaced stepping\n\
9147if the target architecture supports it and non-stop mode is active, but will not\n\
9148use it in all-stop mode (see help set non-stop)."),
72d0e2c5
YQ
9149 NULL,
9150 show_can_use_displaced_stepping,
9151 &setlist, &showlist);
237fc4c9 9152
b2175913
MS
9153 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9154 &exec_direction, _("Set direction of execution.\n\
9155Options are 'forward' or 'reverse'."),
9156 _("Show direction of execution (forward/reverse)."),
9157 _("Tells gdb whether to execute forward or backward."),
9158 set_exec_direction_func, show_exec_direction_func,
9159 &setlist, &showlist);
9160
6c95b8df
PA
9161 /* Set/show detach-on-fork: user-settable mode. */
9162
9163 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9164Set whether gdb will detach the child of a fork."), _("\
9165Show whether gdb will detach the child of a fork."), _("\
9166Tells gdb whether to detach the child of a fork."),
9167 NULL, NULL, &setlist, &showlist);
9168
03583c20
UW
9169 /* Set/show disable address space randomization mode. */
9170
9171 add_setshow_boolean_cmd ("disable-randomization", class_support,
9172 &disable_randomization, _("\
9173Set disabling of debuggee's virtual address space randomization."), _("\
9174Show disabling of debuggee's virtual address space randomization."), _("\
9175When this mode is on (which is the default), randomization of the virtual\n\
9176address space is disabled. Standalone programs run with the randomization\n\
9177enabled by default on some platforms."),
9178 &set_disable_randomization,
9179 &show_disable_randomization,
9180 &setlist, &showlist);
9181
ca6724c1 9182 /* ptid initializations */
ca6724c1
KB
9183 inferior_ptid = null_ptid;
9184 target_last_wait_ptid = minus_one_ptid;
5231c1fd 9185
76727919
TT
9186 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed);
9187 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested);
9188 gdb::observers::thread_exit.attach (infrun_thread_thread_exit);
9189 gdb::observers::inferior_exit.attach (infrun_inferior_exit);
4aa995e1
PA
9190
9191 /* Explicitly create without lookup, since that tries to create a
9192 value with a void typed value, and when we get here, gdbarch
9193 isn't initialized yet. At this point, we're quite sure there
9194 isn't another convenience variable of the same name. */
22d2b532 9195 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
d914c394
SS
9196
9197 add_setshow_boolean_cmd ("observer", no_class,
9198 &observer_mode_1, _("\
9199Set whether gdb controls the inferior in observer mode."), _("\
9200Show whether gdb controls the inferior in observer mode."), _("\
9201In observer mode, GDB can get data from the inferior, but not\n\
9202affect its execution. Registers and memory may not be changed,\n\
9203breakpoints may not be set, and the program cannot be interrupted\n\
9204or signalled."),
9205 set_observer_mode,
9206 show_observer_mode,
9207 &setlist,
9208 &showlist);
c906108c 9209}
This page took 2.682686 seconds and 4 git commands to generate.