Support AT_BSDFLAGS on FreeBSD.
[deliverable/binutils-gdb.git] / gdb / infrun.c
CommitLineData
ca557f44
AC
1/* Target-struct-independent code to start (run) and stop an inferior
2 process.
8926118c 3
b811d2c2 4 Copyright (C) 1986-2020 Free Software Foundation, Inc.
c906108c 5
c5aa993b 6 This file is part of GDB.
c906108c 7
c5aa993b
JM
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
c5aa993b 11 (at your option) any later version.
c906108c 12
c5aa993b
JM
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
c906108c 17
c5aa993b 18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
c906108c
SS
20
21#include "defs.h"
45741a9c 22#include "infrun.h"
c906108c
SS
23#include <ctype.h>
24#include "symtab.h"
25#include "frame.h"
26#include "inferior.h"
27#include "breakpoint.h"
c906108c
SS
28#include "gdbcore.h"
29#include "gdbcmd.h"
30#include "target.h"
2f4fcf00 31#include "target-connection.h"
c906108c
SS
32#include "gdbthread.h"
33#include "annotate.h"
1adeb98a 34#include "symfile.h"
7a292a7a 35#include "top.h"
2acceee2 36#include "inf-loop.h"
4e052eda 37#include "regcache.h"
fd0407d6 38#include "value.h"
76727919 39#include "observable.h"
f636b87d 40#include "language.h"
a77053c2 41#include "solib.h"
f17517ea 42#include "main.h"
186c406b 43#include "block.h"
034dad6f 44#include "mi/mi-common.h"
4f8d22e3 45#include "event-top.h"
96429cc8 46#include "record.h"
d02ed0bb 47#include "record-full.h"
edb3359d 48#include "inline-frame.h"
4efc6507 49#include "jit.h"
06cd862c 50#include "tracepoint.h"
1bfeeb0f 51#include "skip.h"
28106bc2
SDJ
52#include "probe.h"
53#include "objfiles.h"
de0bea00 54#include "completer.h"
9107fc8d 55#include "target-descriptions.h"
f15cb84a 56#include "target-dcache.h"
d83ad864 57#include "terminal.h"
ff862be4 58#include "solist.h"
372316f1 59#include "event-loop.h"
243a9253 60#include "thread-fsm.h"
268a13a5 61#include "gdbsupport/enum-flags.h"
5ed8105e 62#include "progspace-and-thread.h"
268a13a5 63#include "gdbsupport/gdb_optional.h"
46a62268 64#include "arch-utils.h"
268a13a5
TT
65#include "gdbsupport/scope-exit.h"
66#include "gdbsupport/forward-scope-exit.h"
5b6d1e4f
PA
67#include "gdb_select.h"
68#include <unordered_map>
c906108c
SS
69
70/* Prototypes for local functions */
71
2ea28649 72static void sig_print_info (enum gdb_signal);
c906108c 73
96baa820 74static void sig_print_header (void);
c906108c 75
d83ad864
DB
76static void follow_inferior_reset_breakpoints (void);
77
a289b8f6
JK
78static int currently_stepping (struct thread_info *tp);
79
2c03e5be 80static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
2484c66b
UW
81
82static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
83
2484c66b
UW
84static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
85
8550d3b3
YQ
86static int maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
87
aff4e175
AB
88static void resume (gdb_signal sig);
89
5b6d1e4f
PA
90static void wait_for_inferior (inferior *inf);
91
372316f1
PA
92/* Asynchronous signal handler registered as event loop source for
93 when we have pending events ready to be passed to the core. */
94static struct async_event_handler *infrun_async_inferior_event_token;
95
96/* Stores whether infrun_async was previously enabled or disabled.
97 Starts off as -1, indicating "never enabled/disabled". */
98static int infrun_is_async = -1;
99
100/* See infrun.h. */
101
102void
103infrun_async (int enable)
104{
105 if (infrun_is_async != enable)
106 {
107 infrun_is_async = enable;
108
109 if (debug_infrun)
110 fprintf_unfiltered (gdb_stdlog,
111 "infrun: infrun_async(%d)\n",
112 enable);
113
114 if (enable)
115 mark_async_event_handler (infrun_async_inferior_event_token);
116 else
117 clear_async_event_handler (infrun_async_inferior_event_token);
118 }
119}
120
0b333c5e
PA
121/* See infrun.h. */
122
123void
124mark_infrun_async_event_handler (void)
125{
126 mark_async_event_handler (infrun_async_inferior_event_token);
127}
128
5fbbeb29
CF
129/* When set, stop the 'step' command if we enter a function which has
130 no line number information. The normal behavior is that we step
131 over such function. */
491144b5 132bool step_stop_if_no_debug = false;
920d2a44
AC
133static void
134show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
135 struct cmd_list_element *c, const char *value)
136{
137 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
138}
5fbbeb29 139
b9f437de
PA
140/* proceed and normal_stop use this to notify the user when the
141 inferior stopped in a different thread than it had been running
142 in. */
96baa820 143
39f77062 144static ptid_t previous_inferior_ptid;
7a292a7a 145
07107ca6
LM
146/* If set (default for legacy reasons), when following a fork, GDB
147 will detach from one of the fork branches, child or parent.
148 Exactly which branch is detached depends on 'set follow-fork-mode'
149 setting. */
150
491144b5 151static bool detach_fork = true;
6c95b8df 152
491144b5 153bool debug_displaced = false;
237fc4c9
PA
154static void
155show_debug_displaced (struct ui_file *file, int from_tty,
156 struct cmd_list_element *c, const char *value)
157{
158 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
159}
160
ccce17b0 161unsigned int debug_infrun = 0;
920d2a44
AC
162static void
163show_debug_infrun (struct ui_file *file, int from_tty,
164 struct cmd_list_element *c, const char *value)
165{
166 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
167}
527159b7 168
03583c20
UW
169
170/* Support for disabling address space randomization. */
171
491144b5 172bool disable_randomization = true;
03583c20
UW
173
174static void
175show_disable_randomization (struct ui_file *file, int from_tty,
176 struct cmd_list_element *c, const char *value)
177{
178 if (target_supports_disable_randomization ())
179 fprintf_filtered (file,
180 _("Disabling randomization of debuggee's "
181 "virtual address space is %s.\n"),
182 value);
183 else
184 fputs_filtered (_("Disabling randomization of debuggee's "
185 "virtual address space is unsupported on\n"
186 "this platform.\n"), file);
187}
188
189static void
eb4c3f4a 190set_disable_randomization (const char *args, int from_tty,
03583c20
UW
191 struct cmd_list_element *c)
192{
193 if (!target_supports_disable_randomization ())
194 error (_("Disabling randomization of debuggee's "
195 "virtual address space is unsupported on\n"
196 "this platform."));
197}
198
d32dc48e
PA
199/* User interface for non-stop mode. */
200
491144b5
CB
201bool non_stop = false;
202static bool non_stop_1 = false;
d32dc48e
PA
203
204static void
eb4c3f4a 205set_non_stop (const char *args, int from_tty,
d32dc48e
PA
206 struct cmd_list_element *c)
207{
208 if (target_has_execution)
209 {
210 non_stop_1 = non_stop;
211 error (_("Cannot change this setting while the inferior is running."));
212 }
213
214 non_stop = non_stop_1;
215}
216
217static void
218show_non_stop (struct ui_file *file, int from_tty,
219 struct cmd_list_element *c, const char *value)
220{
221 fprintf_filtered (file,
222 _("Controlling the inferior in non-stop mode is %s.\n"),
223 value);
224}
225
d914c394
SS
226/* "Observer mode" is somewhat like a more extreme version of
227 non-stop, in which all GDB operations that might affect the
228 target's execution have been disabled. */
229
491144b5
CB
230bool observer_mode = false;
231static bool observer_mode_1 = false;
d914c394
SS
232
233static void
eb4c3f4a 234set_observer_mode (const char *args, int from_tty,
d914c394
SS
235 struct cmd_list_element *c)
236{
d914c394
SS
237 if (target_has_execution)
238 {
239 observer_mode_1 = observer_mode;
240 error (_("Cannot change this setting while the inferior is running."));
241 }
242
243 observer_mode = observer_mode_1;
244
245 may_write_registers = !observer_mode;
246 may_write_memory = !observer_mode;
247 may_insert_breakpoints = !observer_mode;
248 may_insert_tracepoints = !observer_mode;
249 /* We can insert fast tracepoints in or out of observer mode,
250 but enable them if we're going into this mode. */
251 if (observer_mode)
491144b5 252 may_insert_fast_tracepoints = true;
d914c394
SS
253 may_stop = !observer_mode;
254 update_target_permissions ();
255
256 /* Going *into* observer mode we must force non-stop, then
257 going out we leave it that way. */
258 if (observer_mode)
259 {
d914c394 260 pagination_enabled = 0;
491144b5 261 non_stop = non_stop_1 = true;
d914c394
SS
262 }
263
264 if (from_tty)
265 printf_filtered (_("Observer mode is now %s.\n"),
266 (observer_mode ? "on" : "off"));
267}
268
269static void
270show_observer_mode (struct ui_file *file, int from_tty,
271 struct cmd_list_element *c, const char *value)
272{
273 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
274}
275
276/* This updates the value of observer mode based on changes in
277 permissions. Note that we are deliberately ignoring the values of
278 may-write-registers and may-write-memory, since the user may have
279 reason to enable these during a session, for instance to turn on a
280 debugging-related global. */
281
282void
283update_observer_mode (void)
284{
491144b5
CB
285 bool newval = (!may_insert_breakpoints
286 && !may_insert_tracepoints
287 && may_insert_fast_tracepoints
288 && !may_stop
289 && non_stop);
d914c394
SS
290
291 /* Let the user know if things change. */
292 if (newval != observer_mode)
293 printf_filtered (_("Observer mode is now %s.\n"),
294 (newval ? "on" : "off"));
295
296 observer_mode = observer_mode_1 = newval;
297}
c2c6d25f 298
c906108c
SS
299/* Tables of how to react to signals; the user sets them. */
300
adc6a863
PA
301static unsigned char signal_stop[GDB_SIGNAL_LAST];
302static unsigned char signal_print[GDB_SIGNAL_LAST];
303static unsigned char signal_program[GDB_SIGNAL_LAST];
c906108c 304
ab04a2af
TT
305/* Table of signals that are registered with "catch signal". A
306 non-zero entry indicates that the signal is caught by some "catch
adc6a863
PA
307 signal" command. */
308static unsigned char signal_catch[GDB_SIGNAL_LAST];
ab04a2af 309
2455069d
UW
310/* Table of signals that the target may silently handle.
311 This is automatically determined from the flags above,
312 and simply cached here. */
adc6a863 313static unsigned char signal_pass[GDB_SIGNAL_LAST];
2455069d 314
c906108c
SS
315#define SET_SIGS(nsigs,sigs,flags) \
316 do { \
317 int signum = (nsigs); \
318 while (signum-- > 0) \
319 if ((sigs)[signum]) \
320 (flags)[signum] = 1; \
321 } while (0)
322
323#define UNSET_SIGS(nsigs,sigs,flags) \
324 do { \
325 int signum = (nsigs); \
326 while (signum-- > 0) \
327 if ((sigs)[signum]) \
328 (flags)[signum] = 0; \
329 } while (0)
330
9b224c5e
PA
331/* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
332 this function is to avoid exporting `signal_program'. */
333
334void
335update_signals_program_target (void)
336{
adc6a863 337 target_program_signals (signal_program);
9b224c5e
PA
338}
339
1777feb0 340/* Value to pass to target_resume() to cause all threads to resume. */
39f77062 341
edb3359d 342#define RESUME_ALL minus_one_ptid
c906108c
SS
343
344/* Command list pointer for the "stop" placeholder. */
345
346static struct cmd_list_element *stop_command;
347
c906108c
SS
348/* Nonzero if we want to give control to the user when we're notified
349 of shared library events by the dynamic linker. */
628fe4e4 350int stop_on_solib_events;
f9e14852
GB
351
352/* Enable or disable optional shared library event breakpoints
353 as appropriate when the above flag is changed. */
354
355static void
eb4c3f4a
TT
356set_stop_on_solib_events (const char *args,
357 int from_tty, struct cmd_list_element *c)
f9e14852
GB
358{
359 update_solib_breakpoints ();
360}
361
920d2a44
AC
362static void
363show_stop_on_solib_events (struct ui_file *file, int from_tty,
364 struct cmd_list_element *c, const char *value)
365{
366 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
367 value);
368}
c906108c 369
c906108c
SS
370/* Nonzero after stop if current stack frame should be printed. */
371
372static int stop_print_frame;
373
5b6d1e4f
PA
374/* This is a cached copy of the target/ptid/waitstatus of the last
375 event returned by target_wait()/deprecated_target_wait_hook().
376 This information is returned by get_last_target_status(). */
377static process_stratum_target *target_last_proc_target;
39f77062 378static ptid_t target_last_wait_ptid;
e02bc4cc
DS
379static struct target_waitstatus target_last_waitstatus;
380
4e1c45ea 381void init_thread_stepping_state (struct thread_info *tss);
0d1e5fa7 382
53904c9e
AC
383static const char follow_fork_mode_child[] = "child";
384static const char follow_fork_mode_parent[] = "parent";
385
40478521 386static const char *const follow_fork_mode_kind_names[] = {
53904c9e
AC
387 follow_fork_mode_child,
388 follow_fork_mode_parent,
389 NULL
ef346e04 390};
c906108c 391
53904c9e 392static const char *follow_fork_mode_string = follow_fork_mode_parent;
920d2a44
AC
393static void
394show_follow_fork_mode_string (struct ui_file *file, int from_tty,
395 struct cmd_list_element *c, const char *value)
396{
3e43a32a
MS
397 fprintf_filtered (file,
398 _("Debugger response to a program "
399 "call of fork or vfork is \"%s\".\n"),
920d2a44
AC
400 value);
401}
c906108c
SS
402\f
403
d83ad864
DB
404/* Handle changes to the inferior list based on the type of fork,
405 which process is being followed, and whether the other process
406 should be detached. On entry inferior_ptid must be the ptid of
407 the fork parent. At return inferior_ptid is the ptid of the
408 followed inferior. */
409
5ab2fbf1
SM
410static bool
411follow_fork_inferior (bool follow_child, bool detach_fork)
d83ad864
DB
412{
413 int has_vforked;
79639e11 414 ptid_t parent_ptid, child_ptid;
d83ad864
DB
415
416 has_vforked = (inferior_thread ()->pending_follow.kind
417 == TARGET_WAITKIND_VFORKED);
79639e11
PA
418 parent_ptid = inferior_ptid;
419 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
d83ad864
DB
420
421 if (has_vforked
422 && !non_stop /* Non-stop always resumes both branches. */
3b12939d 423 && current_ui->prompt_state == PROMPT_BLOCKED
d83ad864
DB
424 && !(follow_child || detach_fork || sched_multi))
425 {
426 /* The parent stays blocked inside the vfork syscall until the
427 child execs or exits. If we don't let the child run, then
428 the parent stays blocked. If we're telling the parent to run
429 in the foreground, the user will not be able to ctrl-c to get
430 back the terminal, effectively hanging the debug session. */
431 fprintf_filtered (gdb_stderr, _("\
432Can not resume the parent process over vfork in the foreground while\n\
433holding the child stopped. Try \"set detach-on-fork\" or \
434\"set schedule-multiple\".\n"));
d83ad864
DB
435 return 1;
436 }
437
438 if (!follow_child)
439 {
440 /* Detach new forked process? */
441 if (detach_fork)
442 {
d83ad864
DB
443 /* Before detaching from the child, remove all breakpoints
444 from it. If we forked, then this has already been taken
445 care of by infrun.c. If we vforked however, any
446 breakpoint inserted in the parent is visible in the
447 child, even those added while stopped in a vfork
448 catchpoint. This will remove the breakpoints from the
449 parent also, but they'll be reinserted below. */
450 if (has_vforked)
451 {
452 /* Keep breakpoints list in sync. */
00431a78 453 remove_breakpoints_inf (current_inferior ());
d83ad864
DB
454 }
455
f67c0c91 456 if (print_inferior_events)
d83ad864 457 {
8dd06f7a 458 /* Ensure that we have a process ptid. */
e99b03dc 459 ptid_t process_ptid = ptid_t (child_ptid.pid ());
8dd06f7a 460
223ffa71 461 target_terminal::ours_for_output ();
d83ad864 462 fprintf_filtered (gdb_stdlog,
f67c0c91 463 _("[Detaching after %s from child %s]\n"),
6f259a23 464 has_vforked ? "vfork" : "fork",
a068643d 465 target_pid_to_str (process_ptid).c_str ());
d83ad864
DB
466 }
467 }
468 else
469 {
470 struct inferior *parent_inf, *child_inf;
d83ad864
DB
471
472 /* Add process to GDB's tables. */
e99b03dc 473 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
474
475 parent_inf = current_inferior ();
476 child_inf->attach_flag = parent_inf->attach_flag;
477 copy_terminal_info (child_inf, parent_inf);
478 child_inf->gdbarch = parent_inf->gdbarch;
479 copy_inferior_target_desc_info (child_inf, parent_inf);
480
5ed8105e 481 scoped_restore_current_pspace_and_thread restore_pspace_thread;
d83ad864 482
2a00d7ce 483 set_current_inferior (child_inf);
5b6d1e4f 484 switch_to_no_thread ();
d83ad864 485 child_inf->symfile_flags = SYMFILE_NO_READ;
5b6d1e4f
PA
486 push_target (parent_inf->process_target ());
487 add_thread_silent (child_inf->process_target (), child_ptid);
488 inferior_ptid = child_ptid;
d83ad864
DB
489
490 /* If this is a vfork child, then the address-space is
491 shared with the parent. */
492 if (has_vforked)
493 {
494 child_inf->pspace = parent_inf->pspace;
495 child_inf->aspace = parent_inf->aspace;
496
5b6d1e4f
PA
497 exec_on_vfork ();
498
d83ad864
DB
499 /* The parent will be frozen until the child is done
500 with the shared region. Keep track of the
501 parent. */
502 child_inf->vfork_parent = parent_inf;
503 child_inf->pending_detach = 0;
504 parent_inf->vfork_child = child_inf;
505 parent_inf->pending_detach = 0;
506 }
507 else
508 {
509 child_inf->aspace = new_address_space ();
564b1e3f 510 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
511 child_inf->removable = 1;
512 set_current_program_space (child_inf->pspace);
513 clone_program_space (child_inf->pspace, parent_inf->pspace);
514
515 /* Let the shared library layer (e.g., solib-svr4) learn
516 about this new process, relocate the cloned exec, pull
517 in shared libraries, and install the solib event
518 breakpoint. If a "cloned-VM" event was propagated
519 better throughout the core, this wouldn't be
520 required. */
521 solib_create_inferior_hook (0);
522 }
d83ad864
DB
523 }
524
525 if (has_vforked)
526 {
527 struct inferior *parent_inf;
528
529 parent_inf = current_inferior ();
530
531 /* If we detached from the child, then we have to be careful
532 to not insert breakpoints in the parent until the child
533 is done with the shared memory region. However, if we're
534 staying attached to the child, then we can and should
535 insert breakpoints, so that we can debug it. A
536 subsequent child exec or exit is enough to know when does
537 the child stops using the parent's address space. */
538 parent_inf->waiting_for_vfork_done = detach_fork;
539 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
540 }
541 }
542 else
543 {
544 /* Follow the child. */
545 struct inferior *parent_inf, *child_inf;
546 struct program_space *parent_pspace;
547
f67c0c91 548 if (print_inferior_events)
d83ad864 549 {
f67c0c91
SDJ
550 std::string parent_pid = target_pid_to_str (parent_ptid);
551 std::string child_pid = target_pid_to_str (child_ptid);
552
223ffa71 553 target_terminal::ours_for_output ();
6f259a23 554 fprintf_filtered (gdb_stdlog,
f67c0c91
SDJ
555 _("[Attaching after %s %s to child %s]\n"),
556 parent_pid.c_str (),
6f259a23 557 has_vforked ? "vfork" : "fork",
f67c0c91 558 child_pid.c_str ());
d83ad864
DB
559 }
560
561 /* Add the new inferior first, so that the target_detach below
562 doesn't unpush the target. */
563
e99b03dc 564 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
565
566 parent_inf = current_inferior ();
567 child_inf->attach_flag = parent_inf->attach_flag;
568 copy_terminal_info (child_inf, parent_inf);
569 child_inf->gdbarch = parent_inf->gdbarch;
570 copy_inferior_target_desc_info (child_inf, parent_inf);
571
572 parent_pspace = parent_inf->pspace;
573
5b6d1e4f 574 process_stratum_target *target = parent_inf->process_target ();
d83ad864 575
5b6d1e4f
PA
576 {
577 /* Hold a strong reference to the target while (maybe)
578 detaching the parent. Otherwise detaching could close the
579 target. */
580 auto target_ref = target_ops_ref::new_reference (target);
581
582 /* If we're vforking, we want to hold on to the parent until
583 the child exits or execs. At child exec or exit time we
584 can remove the old breakpoints from the parent and detach
585 or resume debugging it. Otherwise, detach the parent now;
586 we'll want to reuse it's program/address spaces, but we
587 can't set them to the child before removing breakpoints
588 from the parent, otherwise, the breakpoints module could
589 decide to remove breakpoints from the wrong process (since
590 they'd be assigned to the same address space). */
591
592 if (has_vforked)
593 {
594 gdb_assert (child_inf->vfork_parent == NULL);
595 gdb_assert (parent_inf->vfork_child == NULL);
596 child_inf->vfork_parent = parent_inf;
597 child_inf->pending_detach = 0;
598 parent_inf->vfork_child = child_inf;
599 parent_inf->pending_detach = detach_fork;
600 parent_inf->waiting_for_vfork_done = 0;
601 }
602 else if (detach_fork)
603 {
604 if (print_inferior_events)
605 {
606 /* Ensure that we have a process ptid. */
607 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
608
609 target_terminal::ours_for_output ();
610 fprintf_filtered (gdb_stdlog,
611 _("[Detaching after fork from "
612 "parent %s]\n"),
613 target_pid_to_str (process_ptid).c_str ());
614 }
8dd06f7a 615
5b6d1e4f
PA
616 target_detach (parent_inf, 0);
617 parent_inf = NULL;
618 }
6f259a23 619
5b6d1e4f 620 /* Note that the detach above makes PARENT_INF dangling. */
d83ad864 621
5b6d1e4f
PA
622 /* Add the child thread to the appropriate lists, and switch
623 to this new thread, before cloning the program space, and
624 informing the solib layer about this new process. */
d83ad864 625
5b6d1e4f
PA
626 set_current_inferior (child_inf);
627 push_target (target);
628 }
d83ad864 629
5b6d1e4f 630 add_thread_silent (target, child_ptid);
79639e11 631 inferior_ptid = child_ptid;
d83ad864
DB
632
633 /* If this is a vfork child, then the address-space is shared
634 with the parent. If we detached from the parent, then we can
635 reuse the parent's program/address spaces. */
636 if (has_vforked || detach_fork)
637 {
638 child_inf->pspace = parent_pspace;
639 child_inf->aspace = child_inf->pspace->aspace;
5b6d1e4f
PA
640
641 exec_on_vfork ();
d83ad864
DB
642 }
643 else
644 {
645 child_inf->aspace = new_address_space ();
564b1e3f 646 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
647 child_inf->removable = 1;
648 child_inf->symfile_flags = SYMFILE_NO_READ;
649 set_current_program_space (child_inf->pspace);
650 clone_program_space (child_inf->pspace, parent_pspace);
651
652 /* Let the shared library layer (e.g., solib-svr4) learn
653 about this new process, relocate the cloned exec, pull in
654 shared libraries, and install the solib event breakpoint.
655 If a "cloned-VM" event was propagated better throughout
656 the core, this wouldn't be required. */
657 solib_create_inferior_hook (0);
658 }
659 }
660
661 return target_follow_fork (follow_child, detach_fork);
662}
663
e58b0e63
PA
664/* Tell the target to follow the fork we're stopped at. Returns true
665 if the inferior should be resumed; false, if the target for some
666 reason decided it's best not to resume. */
667
5ab2fbf1
SM
668static bool
669follow_fork ()
c906108c 670{
5ab2fbf1
SM
671 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
672 bool should_resume = true;
e58b0e63
PA
673 struct thread_info *tp;
674
675 /* Copy user stepping state to the new inferior thread. FIXME: the
676 followed fork child thread should have a copy of most of the
4e3990f4
DE
677 parent thread structure's run control related fields, not just these.
678 Initialized to avoid "may be used uninitialized" warnings from gcc. */
679 struct breakpoint *step_resume_breakpoint = NULL;
186c406b 680 struct breakpoint *exception_resume_breakpoint = NULL;
4e3990f4
DE
681 CORE_ADDR step_range_start = 0;
682 CORE_ADDR step_range_end = 0;
683 struct frame_id step_frame_id = { 0 };
8980e177 684 struct thread_fsm *thread_fsm = NULL;
e58b0e63
PA
685
686 if (!non_stop)
687 {
5b6d1e4f 688 process_stratum_target *wait_target;
e58b0e63
PA
689 ptid_t wait_ptid;
690 struct target_waitstatus wait_status;
691
692 /* Get the last target status returned by target_wait(). */
5b6d1e4f 693 get_last_target_status (&wait_target, &wait_ptid, &wait_status);
e58b0e63
PA
694
695 /* If not stopped at a fork event, then there's nothing else to
696 do. */
697 if (wait_status.kind != TARGET_WAITKIND_FORKED
698 && wait_status.kind != TARGET_WAITKIND_VFORKED)
699 return 1;
700
701 /* Check if we switched over from WAIT_PTID, since the event was
702 reported. */
00431a78 703 if (wait_ptid != minus_one_ptid
5b6d1e4f
PA
704 && (current_inferior ()->process_target () != wait_target
705 || inferior_ptid != wait_ptid))
e58b0e63
PA
706 {
707 /* We did. Switch back to WAIT_PTID thread, to tell the
708 target to follow it (in either direction). We'll
709 afterwards refuse to resume, and inform the user what
710 happened. */
5b6d1e4f 711 thread_info *wait_thread = find_thread_ptid (wait_target, wait_ptid);
00431a78 712 switch_to_thread (wait_thread);
5ab2fbf1 713 should_resume = false;
e58b0e63
PA
714 }
715 }
716
717 tp = inferior_thread ();
718
719 /* If there were any forks/vforks that were caught and are now to be
720 followed, then do so now. */
721 switch (tp->pending_follow.kind)
722 {
723 case TARGET_WAITKIND_FORKED:
724 case TARGET_WAITKIND_VFORKED:
725 {
726 ptid_t parent, child;
727
728 /* If the user did a next/step, etc, over a fork call,
729 preserve the stepping state in the fork child. */
730 if (follow_child && should_resume)
731 {
8358c15c
JK
732 step_resume_breakpoint = clone_momentary_breakpoint
733 (tp->control.step_resume_breakpoint);
16c381f0
JK
734 step_range_start = tp->control.step_range_start;
735 step_range_end = tp->control.step_range_end;
736 step_frame_id = tp->control.step_frame_id;
186c406b
TT
737 exception_resume_breakpoint
738 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
8980e177 739 thread_fsm = tp->thread_fsm;
e58b0e63
PA
740
741 /* For now, delete the parent's sr breakpoint, otherwise,
742 parent/child sr breakpoints are considered duplicates,
743 and the child version will not be installed. Remove
744 this when the breakpoints module becomes aware of
745 inferiors and address spaces. */
746 delete_step_resume_breakpoint (tp);
16c381f0
JK
747 tp->control.step_range_start = 0;
748 tp->control.step_range_end = 0;
749 tp->control.step_frame_id = null_frame_id;
186c406b 750 delete_exception_resume_breakpoint (tp);
8980e177 751 tp->thread_fsm = NULL;
e58b0e63
PA
752 }
753
754 parent = inferior_ptid;
755 child = tp->pending_follow.value.related_pid;
756
5b6d1e4f 757 process_stratum_target *parent_targ = tp->inf->process_target ();
d83ad864
DB
758 /* Set up inferior(s) as specified by the caller, and tell the
759 target to do whatever is necessary to follow either parent
760 or child. */
761 if (follow_fork_inferior (follow_child, detach_fork))
e58b0e63
PA
762 {
763 /* Target refused to follow, or there's some other reason
764 we shouldn't resume. */
765 should_resume = 0;
766 }
767 else
768 {
769 /* This pending follow fork event is now handled, one way
770 or another. The previous selected thread may be gone
771 from the lists by now, but if it is still around, need
772 to clear the pending follow request. */
5b6d1e4f 773 tp = find_thread_ptid (parent_targ, parent);
e58b0e63
PA
774 if (tp)
775 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
776
777 /* This makes sure we don't try to apply the "Switched
778 over from WAIT_PID" logic above. */
779 nullify_last_target_wait_ptid ();
780
1777feb0 781 /* If we followed the child, switch to it... */
e58b0e63
PA
782 if (follow_child)
783 {
5b6d1e4f 784 thread_info *child_thr = find_thread_ptid (parent_targ, child);
00431a78 785 switch_to_thread (child_thr);
e58b0e63
PA
786
787 /* ... and preserve the stepping state, in case the
788 user was stepping over the fork call. */
789 if (should_resume)
790 {
791 tp = inferior_thread ();
8358c15c
JK
792 tp->control.step_resume_breakpoint
793 = step_resume_breakpoint;
16c381f0
JK
794 tp->control.step_range_start = step_range_start;
795 tp->control.step_range_end = step_range_end;
796 tp->control.step_frame_id = step_frame_id;
186c406b
TT
797 tp->control.exception_resume_breakpoint
798 = exception_resume_breakpoint;
8980e177 799 tp->thread_fsm = thread_fsm;
e58b0e63
PA
800 }
801 else
802 {
803 /* If we get here, it was because we're trying to
804 resume from a fork catchpoint, but, the user
805 has switched threads away from the thread that
806 forked. In that case, the resume command
807 issued is most likely not applicable to the
808 child, so just warn, and refuse to resume. */
3e43a32a 809 warning (_("Not resuming: switched threads "
fd7dcb94 810 "before following fork child."));
e58b0e63
PA
811 }
812
813 /* Reset breakpoints in the child as appropriate. */
814 follow_inferior_reset_breakpoints ();
815 }
e58b0e63
PA
816 }
817 }
818 break;
819 case TARGET_WAITKIND_SPURIOUS:
820 /* Nothing to follow. */
821 break;
822 default:
823 internal_error (__FILE__, __LINE__,
824 "Unexpected pending_follow.kind %d\n",
825 tp->pending_follow.kind);
826 break;
827 }
c906108c 828
e58b0e63 829 return should_resume;
c906108c
SS
830}
831
d83ad864 832static void
6604731b 833follow_inferior_reset_breakpoints (void)
c906108c 834{
4e1c45ea
PA
835 struct thread_info *tp = inferior_thread ();
836
6604731b
DJ
837 /* Was there a step_resume breakpoint? (There was if the user
838 did a "next" at the fork() call.) If so, explicitly reset its
a1aa2221
LM
839 thread number. Cloned step_resume breakpoints are disabled on
840 creation, so enable it here now that it is associated with the
841 correct thread.
6604731b
DJ
842
843 step_resumes are a form of bp that are made to be per-thread.
844 Since we created the step_resume bp when the parent process
845 was being debugged, and now are switching to the child process,
846 from the breakpoint package's viewpoint, that's a switch of
847 "threads". We must update the bp's notion of which thread
848 it is for, or it'll be ignored when it triggers. */
849
8358c15c 850 if (tp->control.step_resume_breakpoint)
a1aa2221
LM
851 {
852 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
853 tp->control.step_resume_breakpoint->loc->enabled = 1;
854 }
6604731b 855
a1aa2221 856 /* Treat exception_resume breakpoints like step_resume breakpoints. */
186c406b 857 if (tp->control.exception_resume_breakpoint)
a1aa2221
LM
858 {
859 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
860 tp->control.exception_resume_breakpoint->loc->enabled = 1;
861 }
186c406b 862
6604731b
DJ
863 /* Reinsert all breakpoints in the child. The user may have set
864 breakpoints after catching the fork, in which case those
865 were never set in the child, but only in the parent. This makes
866 sure the inserted breakpoints match the breakpoint list. */
867
868 breakpoint_re_set ();
869 insert_breakpoints ();
c906108c 870}
c906108c 871
6c95b8df
PA
872/* The child has exited or execed: resume threads of the parent the
873 user wanted to be executing. */
874
875static int
876proceed_after_vfork_done (struct thread_info *thread,
877 void *arg)
878{
879 int pid = * (int *) arg;
880
00431a78
PA
881 if (thread->ptid.pid () == pid
882 && thread->state == THREAD_RUNNING
883 && !thread->executing
6c95b8df 884 && !thread->stop_requested
a493e3e2 885 && thread->suspend.stop_signal == GDB_SIGNAL_0)
6c95b8df
PA
886 {
887 if (debug_infrun)
888 fprintf_unfiltered (gdb_stdlog,
889 "infrun: resuming vfork parent thread %s\n",
a068643d 890 target_pid_to_str (thread->ptid).c_str ());
6c95b8df 891
00431a78 892 switch_to_thread (thread);
70509625 893 clear_proceed_status (0);
64ce06e4 894 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
6c95b8df
PA
895 }
896
897 return 0;
898}
899
5ed8105e
PA
900/* Save/restore inferior_ptid, current program space and current
901 inferior. Only use this if the current context points at an exited
902 inferior (and therefore there's no current thread to save). */
903class scoped_restore_exited_inferior
904{
905public:
906 scoped_restore_exited_inferior ()
907 : m_saved_ptid (&inferior_ptid)
908 {}
909
910private:
911 scoped_restore_tmpl<ptid_t> m_saved_ptid;
912 scoped_restore_current_program_space m_pspace;
913 scoped_restore_current_inferior m_inferior;
914};
915
6c95b8df
PA
916/* Called whenever we notice an exec or exit event, to handle
917 detaching or resuming a vfork parent. */
918
919static void
920handle_vfork_child_exec_or_exit (int exec)
921{
922 struct inferior *inf = current_inferior ();
923
924 if (inf->vfork_parent)
925 {
926 int resume_parent = -1;
927
928 /* This exec or exit marks the end of the shared memory region
b73715df
TV
929 between the parent and the child. Break the bonds. */
930 inferior *vfork_parent = inf->vfork_parent;
931 inf->vfork_parent->vfork_child = NULL;
932 inf->vfork_parent = NULL;
6c95b8df 933
b73715df
TV
934 /* If the user wanted to detach from the parent, now is the
935 time. */
936 if (vfork_parent->pending_detach)
6c95b8df
PA
937 {
938 struct thread_info *tp;
6c95b8df
PA
939 struct program_space *pspace;
940 struct address_space *aspace;
941
1777feb0 942 /* follow-fork child, detach-on-fork on. */
6c95b8df 943
b73715df 944 vfork_parent->pending_detach = 0;
68c9da30 945
5ed8105e
PA
946 gdb::optional<scoped_restore_exited_inferior>
947 maybe_restore_inferior;
948 gdb::optional<scoped_restore_current_pspace_and_thread>
949 maybe_restore_thread;
950
951 /* If we're handling a child exit, then inferior_ptid points
952 at the inferior's pid, not to a thread. */
f50f4e56 953 if (!exec)
5ed8105e 954 maybe_restore_inferior.emplace ();
f50f4e56 955 else
5ed8105e 956 maybe_restore_thread.emplace ();
6c95b8df
PA
957
958 /* We're letting loose of the parent. */
b73715df 959 tp = any_live_thread_of_inferior (vfork_parent);
00431a78 960 switch_to_thread (tp);
6c95b8df
PA
961
962 /* We're about to detach from the parent, which implicitly
963 removes breakpoints from its address space. There's a
964 catch here: we want to reuse the spaces for the child,
965 but, parent/child are still sharing the pspace at this
966 point, although the exec in reality makes the kernel give
967 the child a fresh set of new pages. The problem here is
968 that the breakpoints module being unaware of this, would
969 likely chose the child process to write to the parent
970 address space. Swapping the child temporarily away from
971 the spaces has the desired effect. Yes, this is "sort
972 of" a hack. */
973
974 pspace = inf->pspace;
975 aspace = inf->aspace;
976 inf->aspace = NULL;
977 inf->pspace = NULL;
978
f67c0c91 979 if (print_inferior_events)
6c95b8df 980 {
a068643d 981 std::string pidstr
b73715df 982 = target_pid_to_str (ptid_t (vfork_parent->pid));
f67c0c91 983
223ffa71 984 target_terminal::ours_for_output ();
6c95b8df
PA
985
986 if (exec)
6f259a23
DB
987 {
988 fprintf_filtered (gdb_stdlog,
f67c0c91 989 _("[Detaching vfork parent %s "
a068643d 990 "after child exec]\n"), pidstr.c_str ());
6f259a23 991 }
6c95b8df 992 else
6f259a23
DB
993 {
994 fprintf_filtered (gdb_stdlog,
f67c0c91 995 _("[Detaching vfork parent %s "
a068643d 996 "after child exit]\n"), pidstr.c_str ());
6f259a23 997 }
6c95b8df
PA
998 }
999
b73715df 1000 target_detach (vfork_parent, 0);
6c95b8df
PA
1001
1002 /* Put it back. */
1003 inf->pspace = pspace;
1004 inf->aspace = aspace;
6c95b8df
PA
1005 }
1006 else if (exec)
1007 {
1008 /* We're staying attached to the parent, so, really give the
1009 child a new address space. */
564b1e3f 1010 inf->pspace = new program_space (maybe_new_address_space ());
6c95b8df
PA
1011 inf->aspace = inf->pspace->aspace;
1012 inf->removable = 1;
1013 set_current_program_space (inf->pspace);
1014
b73715df 1015 resume_parent = vfork_parent->pid;
6c95b8df
PA
1016 }
1017 else
1018 {
6c95b8df
PA
1019 /* If this is a vfork child exiting, then the pspace and
1020 aspaces were shared with the parent. Since we're
1021 reporting the process exit, we'll be mourning all that is
1022 found in the address space, and switching to null_ptid,
1023 preparing to start a new inferior. But, since we don't
1024 want to clobber the parent's address/program spaces, we
1025 go ahead and create a new one for this exiting
1026 inferior. */
1027
5ed8105e
PA
1028 /* Switch to null_ptid while running clone_program_space, so
1029 that clone_program_space doesn't want to read the
1030 selected frame of a dead process. */
1031 scoped_restore restore_ptid
1032 = make_scoped_restore (&inferior_ptid, null_ptid);
6c95b8df 1033
53af73bf
PA
1034 inf->pspace = new program_space (maybe_new_address_space ());
1035 inf->aspace = inf->pspace->aspace;
1036 set_current_program_space (inf->pspace);
6c95b8df 1037 inf->removable = 1;
7dcd53a0 1038 inf->symfile_flags = SYMFILE_NO_READ;
53af73bf 1039 clone_program_space (inf->pspace, vfork_parent->pspace);
6c95b8df 1040
b73715df 1041 resume_parent = vfork_parent->pid;
6c95b8df
PA
1042 }
1043
6c95b8df
PA
1044 gdb_assert (current_program_space == inf->pspace);
1045
1046 if (non_stop && resume_parent != -1)
1047 {
1048 /* If the user wanted the parent to be running, let it go
1049 free now. */
5ed8105e 1050 scoped_restore_current_thread restore_thread;
6c95b8df
PA
1051
1052 if (debug_infrun)
3e43a32a
MS
1053 fprintf_unfiltered (gdb_stdlog,
1054 "infrun: resuming vfork parent process %d\n",
6c95b8df
PA
1055 resume_parent);
1056
1057 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
6c95b8df
PA
1058 }
1059 }
1060}
1061
eb6c553b 1062/* Enum strings for "set|show follow-exec-mode". */
6c95b8df
PA
1063
1064static const char follow_exec_mode_new[] = "new";
1065static const char follow_exec_mode_same[] = "same";
40478521 1066static const char *const follow_exec_mode_names[] =
6c95b8df
PA
1067{
1068 follow_exec_mode_new,
1069 follow_exec_mode_same,
1070 NULL,
1071};
1072
1073static const char *follow_exec_mode_string = follow_exec_mode_same;
1074static void
1075show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1076 struct cmd_list_element *c, const char *value)
1077{
1078 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1079}
1080
ecf45d2c 1081/* EXEC_FILE_TARGET is assumed to be non-NULL. */
1adeb98a 1082
c906108c 1083static void
4ca51187 1084follow_exec (ptid_t ptid, const char *exec_file_target)
c906108c 1085{
6c95b8df 1086 struct inferior *inf = current_inferior ();
e99b03dc 1087 int pid = ptid.pid ();
94585166 1088 ptid_t process_ptid;
7a292a7a 1089
65d2b333
PW
1090 /* Switch terminal for any messages produced e.g. by
1091 breakpoint_re_set. */
1092 target_terminal::ours_for_output ();
1093
c906108c
SS
1094 /* This is an exec event that we actually wish to pay attention to.
1095 Refresh our symbol table to the newly exec'd program, remove any
1096 momentary bp's, etc.
1097
1098 If there are breakpoints, they aren't really inserted now,
1099 since the exec() transformed our inferior into a fresh set
1100 of instructions.
1101
1102 We want to preserve symbolic breakpoints on the list, since
1103 we have hopes that they can be reset after the new a.out's
1104 symbol table is read.
1105
1106 However, any "raw" breakpoints must be removed from the list
1107 (e.g., the solib bp's), since their address is probably invalid
1108 now.
1109
1110 And, we DON'T want to call delete_breakpoints() here, since
1111 that may write the bp's "shadow contents" (the instruction
85102364 1112 value that was overwritten with a TRAP instruction). Since
1777feb0 1113 we now have a new a.out, those shadow contents aren't valid. */
6c95b8df
PA
1114
1115 mark_breakpoints_out ();
1116
95e50b27
PA
1117 /* The target reports the exec event to the main thread, even if
1118 some other thread does the exec, and even if the main thread was
1119 stopped or already gone. We may still have non-leader threads of
1120 the process on our list. E.g., on targets that don't have thread
1121 exit events (like remote); or on native Linux in non-stop mode if
1122 there were only two threads in the inferior and the non-leader
1123 one is the one that execs (and nothing forces an update of the
1124 thread list up to here). When debugging remotely, it's best to
1125 avoid extra traffic, when possible, so avoid syncing the thread
1126 list with the target, and instead go ahead and delete all threads
1127 of the process but one that reported the event. Note this must
1128 be done before calling update_breakpoints_after_exec, as
1129 otherwise clearing the threads' resources would reference stale
1130 thread breakpoints -- it may have been one of these threads that
1131 stepped across the exec. We could just clear their stepping
1132 states, but as long as we're iterating, might as well delete
1133 them. Deleting them now rather than at the next user-visible
1134 stop provides a nicer sequence of events for user and MI
1135 notifications. */
08036331 1136 for (thread_info *th : all_threads_safe ())
d7e15655 1137 if (th->ptid.pid () == pid && th->ptid != ptid)
00431a78 1138 delete_thread (th);
95e50b27
PA
1139
1140 /* We also need to clear any left over stale state for the
1141 leader/event thread. E.g., if there was any step-resume
1142 breakpoint or similar, it's gone now. We cannot truly
1143 step-to-next statement through an exec(). */
08036331 1144 thread_info *th = inferior_thread ();
8358c15c 1145 th->control.step_resume_breakpoint = NULL;
186c406b 1146 th->control.exception_resume_breakpoint = NULL;
34b7e8a6 1147 th->control.single_step_breakpoints = NULL;
16c381f0
JK
1148 th->control.step_range_start = 0;
1149 th->control.step_range_end = 0;
c906108c 1150
95e50b27
PA
1151 /* The user may have had the main thread held stopped in the
1152 previous image (e.g., schedlock on, or non-stop). Release
1153 it now. */
a75724bc
PA
1154 th->stop_requested = 0;
1155
95e50b27
PA
1156 update_breakpoints_after_exec ();
1157
1777feb0 1158 /* What is this a.out's name? */
f2907e49 1159 process_ptid = ptid_t (pid);
6c95b8df 1160 printf_unfiltered (_("%s is executing new program: %s\n"),
a068643d 1161 target_pid_to_str (process_ptid).c_str (),
ecf45d2c 1162 exec_file_target);
c906108c
SS
1163
1164 /* We've followed the inferior through an exec. Therefore, the
1777feb0 1165 inferior has essentially been killed & reborn. */
7a292a7a 1166
6ca15a4b 1167 breakpoint_init_inferior (inf_execd);
e85a822c 1168
797bc1cb
TT
1169 gdb::unique_xmalloc_ptr<char> exec_file_host
1170 = exec_file_find (exec_file_target, NULL);
ff862be4 1171
ecf45d2c
SL
1172 /* If we were unable to map the executable target pathname onto a host
1173 pathname, tell the user that. Otherwise GDB's subsequent behavior
1174 is confusing. Maybe it would even be better to stop at this point
1175 so that the user can specify a file manually before continuing. */
1176 if (exec_file_host == NULL)
1177 warning (_("Could not load symbols for executable %s.\n"
1178 "Do you need \"set sysroot\"?"),
1179 exec_file_target);
c906108c 1180
cce9b6bf
PA
1181 /* Reset the shared library package. This ensures that we get a
1182 shlib event when the child reaches "_start", at which point the
1183 dld will have had a chance to initialize the child. */
1184 /* Also, loading a symbol file below may trigger symbol lookups, and
1185 we don't want those to be satisfied by the libraries of the
1186 previous incarnation of this process. */
1187 no_shared_libraries (NULL, 0);
1188
6c95b8df
PA
1189 if (follow_exec_mode_string == follow_exec_mode_new)
1190 {
6c95b8df
PA
1191 /* The user wants to keep the old inferior and program spaces
1192 around. Create a new fresh one, and switch to it. */
1193
35ed81d4
SM
1194 /* Do exit processing for the original inferior before setting the new
1195 inferior's pid. Having two inferiors with the same pid would confuse
1196 find_inferior_p(t)id. Transfer the terminal state and info from the
1197 old to the new inferior. */
1198 inf = add_inferior_with_spaces ();
1199 swap_terminal_info (inf, current_inferior ());
057302ce 1200 exit_inferior_silent (current_inferior ());
17d8546e 1201
94585166 1202 inf->pid = pid;
ecf45d2c 1203 target_follow_exec (inf, exec_file_target);
6c95b8df 1204
5b6d1e4f
PA
1205 inferior *org_inferior = current_inferior ();
1206 switch_to_inferior_no_thread (inf);
1207 push_target (org_inferior->process_target ());
1208 thread_info *thr = add_thread (inf->process_target (), ptid);
1209 switch_to_thread (thr);
6c95b8df 1210 }
9107fc8d
PA
1211 else
1212 {
1213 /* The old description may no longer be fit for the new image.
1214 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1215 old description; we'll read a new one below. No need to do
1216 this on "follow-exec-mode new", as the old inferior stays
1217 around (its description is later cleared/refetched on
1218 restart). */
1219 target_clear_description ();
1220 }
6c95b8df
PA
1221
1222 gdb_assert (current_program_space == inf->pspace);
1223
ecf45d2c
SL
1224 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1225 because the proper displacement for a PIE (Position Independent
1226 Executable) main symbol file will only be computed by
1227 solib_create_inferior_hook below. breakpoint_re_set would fail
1228 to insert the breakpoints with the zero displacement. */
797bc1cb 1229 try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
c906108c 1230
9107fc8d
PA
1231 /* If the target can specify a description, read it. Must do this
1232 after flipping to the new executable (because the target supplied
1233 description must be compatible with the executable's
1234 architecture, and the old executable may e.g., be 32-bit, while
1235 the new one 64-bit), and before anything involving memory or
1236 registers. */
1237 target_find_description ();
1238
268a4a75 1239 solib_create_inferior_hook (0);
c906108c 1240
4efc6507
DE
1241 jit_inferior_created_hook ();
1242
c1e56572
JK
1243 breakpoint_re_set ();
1244
c906108c
SS
1245 /* Reinsert all breakpoints. (Those which were symbolic have
1246 been reset to the proper address in the new a.out, thanks
1777feb0 1247 to symbol_file_command...). */
c906108c
SS
1248 insert_breakpoints ();
1249
1250 /* The next resume of this inferior should bring it to the shlib
1251 startup breakpoints. (If the user had also set bp's on
1252 "main" from the old (parent) process, then they'll auto-
1777feb0 1253 matically get reset there in the new process.). */
c906108c
SS
1254}
1255
c2829269
PA
1256/* The queue of threads that need to do a step-over operation to get
1257 past e.g., a breakpoint. What technique is used to step over the
1258 breakpoint/watchpoint does not matter -- all threads end up in the
1259 same queue, to maintain rough temporal order of execution, in order
1260 to avoid starvation, otherwise, we could e.g., find ourselves
1261 constantly stepping the same couple threads past their breakpoints
1262 over and over, if the single-step finish fast enough. */
1263struct thread_info *step_over_queue_head;
1264
6c4cfb24
PA
1265/* Bit flags indicating what the thread needs to step over. */
1266
8d297bbf 1267enum step_over_what_flag
6c4cfb24
PA
1268 {
1269 /* Step over a breakpoint. */
1270 STEP_OVER_BREAKPOINT = 1,
1271
1272 /* Step past a non-continuable watchpoint, in order to let the
1273 instruction execute so we can evaluate the watchpoint
1274 expression. */
1275 STEP_OVER_WATCHPOINT = 2
1276 };
8d297bbf 1277DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
6c4cfb24 1278
963f9c80 1279/* Info about an instruction that is being stepped over. */
31e77af2
PA
1280
1281struct step_over_info
1282{
963f9c80
PA
1283 /* If we're stepping past a breakpoint, this is the address space
1284 and address of the instruction the breakpoint is set at. We'll
1285 skip inserting all breakpoints here. Valid iff ASPACE is
1286 non-NULL. */
8b86c959 1287 const address_space *aspace;
31e77af2 1288 CORE_ADDR address;
963f9c80
PA
1289
1290 /* The instruction being stepped over triggers a nonsteppable
1291 watchpoint. If true, we'll skip inserting watchpoints. */
1292 int nonsteppable_watchpoint_p;
21edc42f
YQ
1293
1294 /* The thread's global number. */
1295 int thread;
31e77af2
PA
1296};
1297
1298/* The step-over info of the location that is being stepped over.
1299
1300 Note that with async/breakpoint always-inserted mode, a user might
1301 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1302 being stepped over. As setting a new breakpoint inserts all
1303 breakpoints, we need to make sure the breakpoint being stepped over
1304 isn't inserted then. We do that by only clearing the step-over
1305 info when the step-over is actually finished (or aborted).
1306
1307 Presently GDB can only step over one breakpoint at any given time.
1308 Given threads that can't run code in the same address space as the
1309 breakpoint's can't really miss the breakpoint, GDB could be taught
1310 to step-over at most one breakpoint per address space (so this info
1311 could move to the address space object if/when GDB is extended).
1312 The set of breakpoints being stepped over will normally be much
1313 smaller than the set of all breakpoints, so a flag in the
1314 breakpoint location structure would be wasteful. A separate list
1315 also saves complexity and run-time, as otherwise we'd have to go
1316 through all breakpoint locations clearing their flag whenever we
1317 start a new sequence. Similar considerations weigh against storing
1318 this info in the thread object. Plus, not all step overs actually
1319 have breakpoint locations -- e.g., stepping past a single-step
1320 breakpoint, or stepping to complete a non-continuable
1321 watchpoint. */
1322static struct step_over_info step_over_info;
1323
1324/* Record the address of the breakpoint/instruction we're currently
ce0db137
DE
1325 stepping over.
1326 N.B. We record the aspace and address now, instead of say just the thread,
1327 because when we need the info later the thread may be running. */
31e77af2
PA
1328
1329static void
8b86c959 1330set_step_over_info (const address_space *aspace, CORE_ADDR address,
21edc42f
YQ
1331 int nonsteppable_watchpoint_p,
1332 int thread)
31e77af2
PA
1333{
1334 step_over_info.aspace = aspace;
1335 step_over_info.address = address;
963f9c80 1336 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
21edc42f 1337 step_over_info.thread = thread;
31e77af2
PA
1338}
1339
1340/* Called when we're not longer stepping over a breakpoint / an
1341 instruction, so all breakpoints are free to be (re)inserted. */
1342
1343static void
1344clear_step_over_info (void)
1345{
372316f1
PA
1346 if (debug_infrun)
1347 fprintf_unfiltered (gdb_stdlog,
1348 "infrun: clear_step_over_info\n");
31e77af2
PA
1349 step_over_info.aspace = NULL;
1350 step_over_info.address = 0;
963f9c80 1351 step_over_info.nonsteppable_watchpoint_p = 0;
21edc42f 1352 step_over_info.thread = -1;
31e77af2
PA
1353}
1354
7f89fd65 1355/* See infrun.h. */
31e77af2
PA
1356
1357int
1358stepping_past_instruction_at (struct address_space *aspace,
1359 CORE_ADDR address)
1360{
1361 return (step_over_info.aspace != NULL
1362 && breakpoint_address_match (aspace, address,
1363 step_over_info.aspace,
1364 step_over_info.address));
1365}
1366
963f9c80
PA
1367/* See infrun.h. */
1368
21edc42f
YQ
1369int
1370thread_is_stepping_over_breakpoint (int thread)
1371{
1372 return (step_over_info.thread != -1
1373 && thread == step_over_info.thread);
1374}
1375
1376/* See infrun.h. */
1377
963f9c80
PA
1378int
1379stepping_past_nonsteppable_watchpoint (void)
1380{
1381 return step_over_info.nonsteppable_watchpoint_p;
1382}
1383
6cc83d2a
PA
1384/* Returns true if step-over info is valid. */
1385
1386static int
1387step_over_info_valid_p (void)
1388{
963f9c80
PA
1389 return (step_over_info.aspace != NULL
1390 || stepping_past_nonsteppable_watchpoint ());
6cc83d2a
PA
1391}
1392
c906108c 1393\f
237fc4c9
PA
1394/* Displaced stepping. */
1395
1396/* In non-stop debugging mode, we must take special care to manage
1397 breakpoints properly; in particular, the traditional strategy for
1398 stepping a thread past a breakpoint it has hit is unsuitable.
1399 'Displaced stepping' is a tactic for stepping one thread past a
1400 breakpoint it has hit while ensuring that other threads running
1401 concurrently will hit the breakpoint as they should.
1402
1403 The traditional way to step a thread T off a breakpoint in a
1404 multi-threaded program in all-stop mode is as follows:
1405
1406 a0) Initially, all threads are stopped, and breakpoints are not
1407 inserted.
1408 a1) We single-step T, leaving breakpoints uninserted.
1409 a2) We insert breakpoints, and resume all threads.
1410
1411 In non-stop debugging, however, this strategy is unsuitable: we
1412 don't want to have to stop all threads in the system in order to
1413 continue or step T past a breakpoint. Instead, we use displaced
1414 stepping:
1415
1416 n0) Initially, T is stopped, other threads are running, and
1417 breakpoints are inserted.
1418 n1) We copy the instruction "under" the breakpoint to a separate
1419 location, outside the main code stream, making any adjustments
1420 to the instruction, register, and memory state as directed by
1421 T's architecture.
1422 n2) We single-step T over the instruction at its new location.
1423 n3) We adjust the resulting register and memory state as directed
1424 by T's architecture. This includes resetting T's PC to point
1425 back into the main instruction stream.
1426 n4) We resume T.
1427
1428 This approach depends on the following gdbarch methods:
1429
1430 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1431 indicate where to copy the instruction, and how much space must
1432 be reserved there. We use these in step n1.
1433
1434 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1435 address, and makes any necessary adjustments to the instruction,
1436 register contents, and memory. We use this in step n1.
1437
1438 - gdbarch_displaced_step_fixup adjusts registers and memory after
85102364 1439 we have successfully single-stepped the instruction, to yield the
237fc4c9
PA
1440 same effect the instruction would have had if we had executed it
1441 at its original address. We use this in step n3.
1442
237fc4c9
PA
1443 The gdbarch_displaced_step_copy_insn and
1444 gdbarch_displaced_step_fixup functions must be written so that
1445 copying an instruction with gdbarch_displaced_step_copy_insn,
1446 single-stepping across the copied instruction, and then applying
1447 gdbarch_displaced_insn_fixup should have the same effects on the
1448 thread's memory and registers as stepping the instruction in place
1449 would have. Exactly which responsibilities fall to the copy and
1450 which fall to the fixup is up to the author of those functions.
1451
1452 See the comments in gdbarch.sh for details.
1453
1454 Note that displaced stepping and software single-step cannot
1455 currently be used in combination, although with some care I think
1456 they could be made to. Software single-step works by placing
1457 breakpoints on all possible subsequent instructions; if the
1458 displaced instruction is a PC-relative jump, those breakpoints
1459 could fall in very strange places --- on pages that aren't
1460 executable, or at addresses that are not proper instruction
1461 boundaries. (We do generally let other threads run while we wait
1462 to hit the software single-step breakpoint, and they might
1463 encounter such a corrupted instruction.) One way to work around
1464 this would be to have gdbarch_displaced_step_copy_insn fully
1465 simulate the effect of PC-relative instructions (and return NULL)
1466 on architectures that use software single-stepping.
1467
1468 In non-stop mode, we can have independent and simultaneous step
1469 requests, so more than one thread may need to simultaneously step
1470 over a breakpoint. The current implementation assumes there is
1471 only one scratch space per process. In this case, we have to
1472 serialize access to the scratch space. If thread A wants to step
1473 over a breakpoint, but we are currently waiting for some other
1474 thread to complete a displaced step, we leave thread A stopped and
1475 place it in the displaced_step_request_queue. Whenever a displaced
1476 step finishes, we pick the next thread in the queue and start a new
1477 displaced step operation on it. See displaced_step_prepare and
1478 displaced_step_fixup for details. */
1479
cfba9872
SM
1480/* Default destructor for displaced_step_closure. */
1481
1482displaced_step_closure::~displaced_step_closure () = default;
1483
fc1cf338
PA
1484/* Get the displaced stepping state of process PID. */
1485
39a36629 1486static displaced_step_inferior_state *
00431a78 1487get_displaced_stepping_state (inferior *inf)
fc1cf338 1488{
d20172fc 1489 return &inf->displaced_step_state;
fc1cf338
PA
1490}
1491
372316f1
PA
1492/* Returns true if any inferior has a thread doing a displaced
1493 step. */
1494
39a36629
SM
1495static bool
1496displaced_step_in_progress_any_inferior ()
372316f1 1497{
d20172fc 1498 for (inferior *i : all_inferiors ())
39a36629 1499 {
d20172fc 1500 if (i->displaced_step_state.step_thread != nullptr)
39a36629
SM
1501 return true;
1502 }
372316f1 1503
39a36629 1504 return false;
372316f1
PA
1505}
1506
c0987663
YQ
1507/* Return true if thread represented by PTID is doing a displaced
1508 step. */
1509
1510static int
00431a78 1511displaced_step_in_progress_thread (thread_info *thread)
c0987663 1512{
00431a78 1513 gdb_assert (thread != NULL);
c0987663 1514
d20172fc 1515 return get_displaced_stepping_state (thread->inf)->step_thread == thread;
c0987663
YQ
1516}
1517
8f572e5c
PA
1518/* Return true if process PID has a thread doing a displaced step. */
1519
1520static int
00431a78 1521displaced_step_in_progress (inferior *inf)
8f572e5c 1522{
d20172fc 1523 return get_displaced_stepping_state (inf)->step_thread != nullptr;
fc1cf338
PA
1524}
1525
a42244db
YQ
1526/* If inferior is in displaced stepping, and ADDR equals to starting address
1527 of copy area, return corresponding displaced_step_closure. Otherwise,
1528 return NULL. */
1529
1530struct displaced_step_closure*
1531get_displaced_step_closure_by_addr (CORE_ADDR addr)
1532{
d20172fc 1533 displaced_step_inferior_state *displaced
00431a78 1534 = get_displaced_stepping_state (current_inferior ());
a42244db
YQ
1535
1536 /* If checking the mode of displaced instruction in copy area. */
d20172fc 1537 if (displaced->step_thread != nullptr
00431a78 1538 && displaced->step_copy == addr)
d8d83535 1539 return displaced->step_closure.get ();
a42244db
YQ
1540
1541 return NULL;
1542}
1543
fc1cf338
PA
1544static void
1545infrun_inferior_exit (struct inferior *inf)
1546{
d20172fc 1547 inf->displaced_step_state.reset ();
fc1cf338 1548}
237fc4c9 1549
fff08868
HZ
1550/* If ON, and the architecture supports it, GDB will use displaced
1551 stepping to step over breakpoints. If OFF, or if the architecture
1552 doesn't support it, GDB will instead use the traditional
1553 hold-and-step approach. If AUTO (which is the default), GDB will
1554 decide which technique to use to step over breakpoints depending on
9822cb57 1555 whether the target works in a non-stop way (see use_displaced_stepping). */
fff08868 1556
72d0e2c5 1557static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
fff08868 1558
237fc4c9
PA
1559static void
1560show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1561 struct cmd_list_element *c,
1562 const char *value)
1563{
72d0e2c5 1564 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
3e43a32a
MS
1565 fprintf_filtered (file,
1566 _("Debugger's willingness to use displaced stepping "
1567 "to step over breakpoints is %s (currently %s).\n"),
fbea99ea 1568 value, target_is_non_stop_p () ? "on" : "off");
fff08868 1569 else
3e43a32a
MS
1570 fprintf_filtered (file,
1571 _("Debugger's willingness to use displaced stepping "
1572 "to step over breakpoints is %s.\n"), value);
237fc4c9
PA
1573}
1574
9822cb57
SM
1575/* Return true if the gdbarch implements the required methods to use
1576 displaced stepping. */
1577
1578static bool
1579gdbarch_supports_displaced_stepping (gdbarch *arch)
1580{
1581 /* Only check for the presence of step_copy_insn. Other required methods
1582 are checked by the gdbarch validation. */
1583 return gdbarch_displaced_step_copy_insn_p (arch);
1584}
1585
fff08868 1586/* Return non-zero if displaced stepping can/should be used to step
3fc8eb30 1587 over breakpoints of thread TP. */
fff08868 1588
9822cb57
SM
1589static bool
1590use_displaced_stepping (thread_info *tp)
237fc4c9 1591{
9822cb57
SM
1592 /* If the user disabled it explicitly, don't use displaced stepping. */
1593 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1594 return false;
1595
1596 /* If "auto", only use displaced stepping if the target operates in a non-stop
1597 way. */
1598 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1599 && !target_is_non_stop_p ())
1600 return false;
1601
1602 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1603
1604 /* If the architecture doesn't implement displaced stepping, don't use
1605 it. */
1606 if (!gdbarch_supports_displaced_stepping (gdbarch))
1607 return false;
1608
1609 /* If recording, don't use displaced stepping. */
1610 if (find_record_target () != nullptr)
1611 return false;
1612
d20172fc
SM
1613 displaced_step_inferior_state *displaced_state
1614 = get_displaced_stepping_state (tp->inf);
3fc8eb30 1615
9822cb57
SM
1616 /* If displaced stepping failed before for this inferior, don't bother trying
1617 again. */
1618 if (displaced_state->failed_before)
1619 return false;
1620
1621 return true;
237fc4c9
PA
1622}
1623
d8d83535
SM
1624/* Simple function wrapper around displaced_step_inferior_state::reset. */
1625
237fc4c9 1626static void
d8d83535 1627displaced_step_reset (displaced_step_inferior_state *displaced)
237fc4c9 1628{
d8d83535 1629 displaced->reset ();
237fc4c9
PA
1630}
1631
d8d83535
SM
1632/* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1633 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1634
1635using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
237fc4c9
PA
1636
1637/* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1638void
1639displaced_step_dump_bytes (struct ui_file *file,
1640 const gdb_byte *buf,
1641 size_t len)
1642{
1643 int i;
1644
1645 for (i = 0; i < len; i++)
1646 fprintf_unfiltered (file, "%02x ", buf[i]);
1647 fputs_unfiltered ("\n", file);
1648}
1649
1650/* Prepare to single-step, using displaced stepping.
1651
1652 Note that we cannot use displaced stepping when we have a signal to
1653 deliver. If we have a signal to deliver and an instruction to step
1654 over, then after the step, there will be no indication from the
1655 target whether the thread entered a signal handler or ignored the
1656 signal and stepped over the instruction successfully --- both cases
1657 result in a simple SIGTRAP. In the first case we mustn't do a
1658 fixup, and in the second case we must --- but we can't tell which.
1659 Comments in the code for 'random signals' in handle_inferior_event
1660 explain how we handle this case instead.
1661
1662 Returns 1 if preparing was successful -- this thread is going to be
7f03bd92
PA
1663 stepped now; 0 if displaced stepping this thread got queued; or -1
1664 if this instruction can't be displaced stepped. */
1665
237fc4c9 1666static int
00431a78 1667displaced_step_prepare_throw (thread_info *tp)
237fc4c9 1668{
00431a78 1669 regcache *regcache = get_thread_regcache (tp);
ac7936df 1670 struct gdbarch *gdbarch = regcache->arch ();
8b86c959 1671 const address_space *aspace = regcache->aspace ();
237fc4c9
PA
1672 CORE_ADDR original, copy;
1673 ULONGEST len;
9e529e1d 1674 int status;
237fc4c9
PA
1675
1676 /* We should never reach this function if the architecture does not
1677 support displaced stepping. */
9822cb57 1678 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
237fc4c9 1679
c2829269
PA
1680 /* Nor if the thread isn't meant to step over a breakpoint. */
1681 gdb_assert (tp->control.trap_expected);
1682
c1e36e3e
PA
1683 /* Disable range stepping while executing in the scratch pad. We
1684 want a single-step even if executing the displaced instruction in
1685 the scratch buffer lands within the stepping range (e.g., a
1686 jump/branch). */
1687 tp->control.may_range_step = 0;
1688
fc1cf338
PA
1689 /* We have to displaced step one thread at a time, as we only have
1690 access to a single scratch space per inferior. */
237fc4c9 1691
d20172fc
SM
1692 displaced_step_inferior_state *displaced
1693 = get_displaced_stepping_state (tp->inf);
fc1cf338 1694
00431a78 1695 if (displaced->step_thread != nullptr)
237fc4c9
PA
1696 {
1697 /* Already waiting for a displaced step to finish. Defer this
1698 request and place in queue. */
237fc4c9
PA
1699
1700 if (debug_displaced)
1701 fprintf_unfiltered (gdb_stdlog,
c2829269 1702 "displaced: deferring step of %s\n",
a068643d 1703 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1704
c2829269 1705 thread_step_over_chain_enqueue (tp);
237fc4c9
PA
1706 return 0;
1707 }
1708 else
1709 {
1710 if (debug_displaced)
1711 fprintf_unfiltered (gdb_stdlog,
1712 "displaced: stepping %s now\n",
a068643d 1713 target_pid_to_str (tp->ptid).c_str ());
237fc4c9
PA
1714 }
1715
d8d83535 1716 displaced_step_reset (displaced);
237fc4c9 1717
00431a78
PA
1718 scoped_restore_current_thread restore_thread;
1719
1720 switch_to_thread (tp);
ad53cd71 1721
515630c5 1722 original = regcache_read_pc (regcache);
237fc4c9
PA
1723
1724 copy = gdbarch_displaced_step_location (gdbarch);
1725 len = gdbarch_max_insn_length (gdbarch);
1726
d35ae833
PA
1727 if (breakpoint_in_range_p (aspace, copy, len))
1728 {
1729 /* There's a breakpoint set in the scratch pad location range
1730 (which is usually around the entry point). We'd either
1731 install it before resuming, which would overwrite/corrupt the
1732 scratch pad, or if it was already inserted, this displaced
1733 step would overwrite it. The latter is OK in the sense that
1734 we already assume that no thread is going to execute the code
1735 in the scratch pad range (after initial startup) anyway, but
1736 the former is unacceptable. Simply punt and fallback to
1737 stepping over this breakpoint in-line. */
1738 if (debug_displaced)
1739 {
1740 fprintf_unfiltered (gdb_stdlog,
1741 "displaced: breakpoint set in scratch pad. "
1742 "Stepping over breakpoint in-line instead.\n");
1743 }
1744
d35ae833
PA
1745 return -1;
1746 }
1747
237fc4c9 1748 /* Save the original contents of the copy area. */
d20172fc
SM
1749 displaced->step_saved_copy.resize (len);
1750 status = target_read_memory (copy, displaced->step_saved_copy.data (), len);
9e529e1d
JK
1751 if (status != 0)
1752 throw_error (MEMORY_ERROR,
1753 _("Error accessing memory address %s (%s) for "
1754 "displaced-stepping scratch space."),
1755 paddress (gdbarch, copy), safe_strerror (status));
237fc4c9
PA
1756 if (debug_displaced)
1757 {
5af949e3
UW
1758 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1759 paddress (gdbarch, copy));
fc1cf338 1760 displaced_step_dump_bytes (gdb_stdlog,
d20172fc 1761 displaced->step_saved_copy.data (),
fc1cf338 1762 len);
237fc4c9
PA
1763 };
1764
e8217e61
SM
1765 displaced->step_closure
1766 = gdbarch_displaced_step_copy_insn (gdbarch, original, copy, regcache);
1767 if (displaced->step_closure == NULL)
7f03bd92
PA
1768 {
1769 /* The architecture doesn't know how or want to displaced step
1770 this instruction or instruction sequence. Fallback to
1771 stepping over the breakpoint in-line. */
7f03bd92
PA
1772 return -1;
1773 }
237fc4c9 1774
9f5a595d
UW
1775 /* Save the information we need to fix things up if the step
1776 succeeds. */
00431a78 1777 displaced->step_thread = tp;
fc1cf338 1778 displaced->step_gdbarch = gdbarch;
fc1cf338
PA
1779 displaced->step_original = original;
1780 displaced->step_copy = copy;
9f5a595d 1781
9799571e 1782 {
d8d83535 1783 displaced_step_reset_cleanup cleanup (displaced);
237fc4c9 1784
9799571e
TT
1785 /* Resume execution at the copy. */
1786 regcache_write_pc (regcache, copy);
237fc4c9 1787
9799571e
TT
1788 cleanup.release ();
1789 }
ad53cd71 1790
237fc4c9 1791 if (debug_displaced)
5af949e3
UW
1792 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1793 paddress (gdbarch, copy));
237fc4c9 1794
237fc4c9
PA
1795 return 1;
1796}
1797
3fc8eb30
PA
1798/* Wrapper for displaced_step_prepare_throw that disabled further
1799 attempts at displaced stepping if we get a memory error. */
1800
1801static int
00431a78 1802displaced_step_prepare (thread_info *thread)
3fc8eb30
PA
1803{
1804 int prepared = -1;
1805
a70b8144 1806 try
3fc8eb30 1807 {
00431a78 1808 prepared = displaced_step_prepare_throw (thread);
3fc8eb30 1809 }
230d2906 1810 catch (const gdb_exception_error &ex)
3fc8eb30
PA
1811 {
1812 struct displaced_step_inferior_state *displaced_state;
1813
16b41842
PA
1814 if (ex.error != MEMORY_ERROR
1815 && ex.error != NOT_SUPPORTED_ERROR)
eedc3f4f 1816 throw;
3fc8eb30
PA
1817
1818 if (debug_infrun)
1819 {
1820 fprintf_unfiltered (gdb_stdlog,
1821 "infrun: disabling displaced stepping: %s\n",
3d6e9d23 1822 ex.what ());
3fc8eb30
PA
1823 }
1824
1825 /* Be verbose if "set displaced-stepping" is "on", silent if
1826 "auto". */
1827 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1828 {
fd7dcb94 1829 warning (_("disabling displaced stepping: %s"),
3d6e9d23 1830 ex.what ());
3fc8eb30
PA
1831 }
1832
1833 /* Disable further displaced stepping attempts. */
1834 displaced_state
00431a78 1835 = get_displaced_stepping_state (thread->inf);
3fc8eb30
PA
1836 displaced_state->failed_before = 1;
1837 }
3fc8eb30
PA
1838
1839 return prepared;
1840}
1841
237fc4c9 1842static void
3e43a32a
MS
1843write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1844 const gdb_byte *myaddr, int len)
237fc4c9 1845{
2989a365 1846 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
abbb1732 1847
237fc4c9
PA
1848 inferior_ptid = ptid;
1849 write_memory (memaddr, myaddr, len);
237fc4c9
PA
1850}
1851
e2d96639
YQ
1852/* Restore the contents of the copy area for thread PTID. */
1853
1854static void
1855displaced_step_restore (struct displaced_step_inferior_state *displaced,
1856 ptid_t ptid)
1857{
1858 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1859
1860 write_memory_ptid (ptid, displaced->step_copy,
d20172fc 1861 displaced->step_saved_copy.data (), len);
e2d96639
YQ
1862 if (debug_displaced)
1863 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
a068643d 1864 target_pid_to_str (ptid).c_str (),
e2d96639
YQ
1865 paddress (displaced->step_gdbarch,
1866 displaced->step_copy));
1867}
1868
372316f1
PA
1869/* If we displaced stepped an instruction successfully, adjust
1870 registers and memory to yield the same effect the instruction would
1871 have had if we had executed it at its original address, and return
1872 1. If the instruction didn't complete, relocate the PC and return
1873 -1. If the thread wasn't displaced stepping, return 0. */
1874
1875static int
00431a78 1876displaced_step_fixup (thread_info *event_thread, enum gdb_signal signal)
237fc4c9 1877{
fc1cf338 1878 struct displaced_step_inferior_state *displaced
00431a78 1879 = get_displaced_stepping_state (event_thread->inf);
372316f1 1880 int ret;
fc1cf338 1881
00431a78
PA
1882 /* Was this event for the thread we displaced? */
1883 if (displaced->step_thread != event_thread)
372316f1 1884 return 0;
237fc4c9 1885
d8d83535 1886 displaced_step_reset_cleanup cleanup (displaced);
237fc4c9 1887
00431a78 1888 displaced_step_restore (displaced, displaced->step_thread->ptid);
237fc4c9 1889
cb71640d
PA
1890 /* Fixup may need to read memory/registers. Switch to the thread
1891 that we're fixing up. Also, target_stopped_by_watchpoint checks
1892 the current thread. */
00431a78 1893 switch_to_thread (event_thread);
cb71640d 1894
237fc4c9 1895 /* Did the instruction complete successfully? */
cb71640d
PA
1896 if (signal == GDB_SIGNAL_TRAP
1897 && !(target_stopped_by_watchpoint ()
1898 && (gdbarch_have_nonsteppable_watchpoint (displaced->step_gdbarch)
1899 || target_have_steppable_watchpoint)))
237fc4c9
PA
1900 {
1901 /* Fix up the resulting state. */
fc1cf338 1902 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
d8d83535 1903 displaced->step_closure.get (),
fc1cf338
PA
1904 displaced->step_original,
1905 displaced->step_copy,
00431a78 1906 get_thread_regcache (displaced->step_thread));
372316f1 1907 ret = 1;
237fc4c9
PA
1908 }
1909 else
1910 {
1911 /* Since the instruction didn't complete, all we can do is
1912 relocate the PC. */
00431a78 1913 struct regcache *regcache = get_thread_regcache (event_thread);
515630c5 1914 CORE_ADDR pc = regcache_read_pc (regcache);
abbb1732 1915
fc1cf338 1916 pc = displaced->step_original + (pc - displaced->step_copy);
515630c5 1917 regcache_write_pc (regcache, pc);
372316f1 1918 ret = -1;
237fc4c9
PA
1919 }
1920
372316f1 1921 return ret;
c2829269 1922}
1c5cfe86 1923
4d9d9d04
PA
1924/* Data to be passed around while handling an event. This data is
1925 discarded between events. */
1926struct execution_control_state
1927{
5b6d1e4f 1928 process_stratum_target *target;
4d9d9d04
PA
1929 ptid_t ptid;
1930 /* The thread that got the event, if this was a thread event; NULL
1931 otherwise. */
1932 struct thread_info *event_thread;
1933
1934 struct target_waitstatus ws;
1935 int stop_func_filled_in;
1936 CORE_ADDR stop_func_start;
1937 CORE_ADDR stop_func_end;
1938 const char *stop_func_name;
1939 int wait_some_more;
1940
1941 /* True if the event thread hit the single-step breakpoint of
1942 another thread. Thus the event doesn't cause a stop, the thread
1943 needs to be single-stepped past the single-step breakpoint before
1944 we can switch back to the original stepping thread. */
1945 int hit_singlestep_breakpoint;
1946};
1947
1948/* Clear ECS and set it to point at TP. */
c2829269
PA
1949
1950static void
4d9d9d04
PA
1951reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1952{
1953 memset (ecs, 0, sizeof (*ecs));
1954 ecs->event_thread = tp;
1955 ecs->ptid = tp->ptid;
1956}
1957
1958static void keep_going_pass_signal (struct execution_control_state *ecs);
1959static void prepare_to_wait (struct execution_control_state *ecs);
2ac7589c 1960static int keep_going_stepped_thread (struct thread_info *tp);
8d297bbf 1961static step_over_what thread_still_needs_step_over (struct thread_info *tp);
4d9d9d04
PA
1962
1963/* Are there any pending step-over requests? If so, run all we can
1964 now and return true. Otherwise, return false. */
1965
1966static int
c2829269
PA
1967start_step_over (void)
1968{
1969 struct thread_info *tp, *next;
1970
372316f1
PA
1971 /* Don't start a new step-over if we already have an in-line
1972 step-over operation ongoing. */
1973 if (step_over_info_valid_p ())
1974 return 0;
1975
c2829269 1976 for (tp = step_over_queue_head; tp != NULL; tp = next)
237fc4c9 1977 {
4d9d9d04
PA
1978 struct execution_control_state ecss;
1979 struct execution_control_state *ecs = &ecss;
8d297bbf 1980 step_over_what step_what;
372316f1 1981 int must_be_in_line;
c2829269 1982
c65d6b55
PA
1983 gdb_assert (!tp->stop_requested);
1984
c2829269 1985 next = thread_step_over_chain_next (tp);
237fc4c9 1986
c2829269
PA
1987 /* If this inferior already has a displaced step in process,
1988 don't start a new one. */
00431a78 1989 if (displaced_step_in_progress (tp->inf))
c2829269
PA
1990 continue;
1991
372316f1
PA
1992 step_what = thread_still_needs_step_over (tp);
1993 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
1994 || ((step_what & STEP_OVER_BREAKPOINT)
3fc8eb30 1995 && !use_displaced_stepping (tp)));
372316f1
PA
1996
1997 /* We currently stop all threads of all processes to step-over
1998 in-line. If we need to start a new in-line step-over, let
1999 any pending displaced steps finish first. */
2000 if (must_be_in_line && displaced_step_in_progress_any_inferior ())
2001 return 0;
2002
c2829269
PA
2003 thread_step_over_chain_remove (tp);
2004
2005 if (step_over_queue_head == NULL)
2006 {
2007 if (debug_infrun)
2008 fprintf_unfiltered (gdb_stdlog,
2009 "infrun: step-over queue now empty\n");
2010 }
2011
372316f1
PA
2012 if (tp->control.trap_expected
2013 || tp->resumed
2014 || tp->executing)
ad53cd71 2015 {
4d9d9d04
PA
2016 internal_error (__FILE__, __LINE__,
2017 "[%s] has inconsistent state: "
372316f1 2018 "trap_expected=%d, resumed=%d, executing=%d\n",
a068643d 2019 target_pid_to_str (tp->ptid).c_str (),
4d9d9d04 2020 tp->control.trap_expected,
372316f1 2021 tp->resumed,
4d9d9d04 2022 tp->executing);
ad53cd71 2023 }
1c5cfe86 2024
4d9d9d04
PA
2025 if (debug_infrun)
2026 fprintf_unfiltered (gdb_stdlog,
2027 "infrun: resuming [%s] for step-over\n",
a068643d 2028 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04
PA
2029
2030 /* keep_going_pass_signal skips the step-over if the breakpoint
2031 is no longer inserted. In all-stop, we want to keep looking
2032 for a thread that needs a step-over instead of resuming TP,
2033 because we wouldn't be able to resume anything else until the
2034 target stops again. In non-stop, the resume always resumes
2035 only TP, so it's OK to let the thread resume freely. */
fbea99ea 2036 if (!target_is_non_stop_p () && !step_what)
4d9d9d04 2037 continue;
8550d3b3 2038
00431a78 2039 switch_to_thread (tp);
4d9d9d04
PA
2040 reset_ecs (ecs, tp);
2041 keep_going_pass_signal (ecs);
1c5cfe86 2042
4d9d9d04
PA
2043 if (!ecs->wait_some_more)
2044 error (_("Command aborted."));
1c5cfe86 2045
372316f1
PA
2046 gdb_assert (tp->resumed);
2047
2048 /* If we started a new in-line step-over, we're done. */
2049 if (step_over_info_valid_p ())
2050 {
2051 gdb_assert (tp->control.trap_expected);
2052 return 1;
2053 }
2054
fbea99ea 2055 if (!target_is_non_stop_p ())
4d9d9d04
PA
2056 {
2057 /* On all-stop, shouldn't have resumed unless we needed a
2058 step over. */
2059 gdb_assert (tp->control.trap_expected
2060 || tp->step_after_step_resume_breakpoint);
2061
2062 /* With remote targets (at least), in all-stop, we can't
2063 issue any further remote commands until the program stops
2064 again. */
2065 return 1;
1c5cfe86 2066 }
c2829269 2067
4d9d9d04
PA
2068 /* Either the thread no longer needed a step-over, or a new
2069 displaced stepping sequence started. Even in the latter
2070 case, continue looking. Maybe we can also start another
2071 displaced step on a thread of other process. */
237fc4c9 2072 }
4d9d9d04
PA
2073
2074 return 0;
237fc4c9
PA
2075}
2076
5231c1fd
PA
2077/* Update global variables holding ptids to hold NEW_PTID if they were
2078 holding OLD_PTID. */
2079static void
2080infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
2081{
d7e15655 2082 if (inferior_ptid == old_ptid)
5231c1fd 2083 inferior_ptid = new_ptid;
5231c1fd
PA
2084}
2085
237fc4c9 2086\f
c906108c 2087
53904c9e
AC
2088static const char schedlock_off[] = "off";
2089static const char schedlock_on[] = "on";
2090static const char schedlock_step[] = "step";
f2665db5 2091static const char schedlock_replay[] = "replay";
40478521 2092static const char *const scheduler_enums[] = {
ef346e04
AC
2093 schedlock_off,
2094 schedlock_on,
2095 schedlock_step,
f2665db5 2096 schedlock_replay,
ef346e04
AC
2097 NULL
2098};
f2665db5 2099static const char *scheduler_mode = schedlock_replay;
920d2a44
AC
2100static void
2101show_scheduler_mode (struct ui_file *file, int from_tty,
2102 struct cmd_list_element *c, const char *value)
2103{
3e43a32a
MS
2104 fprintf_filtered (file,
2105 _("Mode for locking scheduler "
2106 "during execution is \"%s\".\n"),
920d2a44
AC
2107 value);
2108}
c906108c
SS
2109
2110static void
eb4c3f4a 2111set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
c906108c 2112{
eefe576e
AC
2113 if (!target_can_lock_scheduler)
2114 {
2115 scheduler_mode = schedlock_off;
2116 error (_("Target '%s' cannot support this command."), target_shortname);
2117 }
c906108c
SS
2118}
2119
d4db2f36
PA
2120/* True if execution commands resume all threads of all processes by
2121 default; otherwise, resume only threads of the current inferior
2122 process. */
491144b5 2123bool sched_multi = false;
d4db2f36 2124
2facfe5c
DD
2125/* Try to setup for software single stepping over the specified location.
2126 Return 1 if target_resume() should use hardware single step.
2127
2128 GDBARCH the current gdbarch.
2129 PC the location to step over. */
2130
2131static int
2132maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2133{
2134 int hw_step = 1;
2135
f02253f1 2136 if (execution_direction == EXEC_FORWARD
93f9a11f
YQ
2137 && gdbarch_software_single_step_p (gdbarch))
2138 hw_step = !insert_single_step_breakpoints (gdbarch);
2139
2facfe5c
DD
2140 return hw_step;
2141}
c906108c 2142
f3263aa4
PA
2143/* See infrun.h. */
2144
09cee04b
PA
2145ptid_t
2146user_visible_resume_ptid (int step)
2147{
f3263aa4 2148 ptid_t resume_ptid;
09cee04b 2149
09cee04b
PA
2150 if (non_stop)
2151 {
2152 /* With non-stop mode on, threads are always handled
2153 individually. */
2154 resume_ptid = inferior_ptid;
2155 }
2156 else if ((scheduler_mode == schedlock_on)
03d46957 2157 || (scheduler_mode == schedlock_step && step))
09cee04b 2158 {
f3263aa4
PA
2159 /* User-settable 'scheduler' mode requires solo thread
2160 resume. */
09cee04b
PA
2161 resume_ptid = inferior_ptid;
2162 }
f2665db5
MM
2163 else if ((scheduler_mode == schedlock_replay)
2164 && target_record_will_replay (minus_one_ptid, execution_direction))
2165 {
2166 /* User-settable 'scheduler' mode requires solo thread resume in replay
2167 mode. */
2168 resume_ptid = inferior_ptid;
2169 }
f3263aa4
PA
2170 else if (!sched_multi && target_supports_multi_process ())
2171 {
2172 /* Resume all threads of the current process (and none of other
2173 processes). */
e99b03dc 2174 resume_ptid = ptid_t (inferior_ptid.pid ());
f3263aa4
PA
2175 }
2176 else
2177 {
2178 /* Resume all threads of all processes. */
2179 resume_ptid = RESUME_ALL;
2180 }
09cee04b
PA
2181
2182 return resume_ptid;
2183}
2184
5b6d1e4f
PA
2185/* See infrun.h. */
2186
2187process_stratum_target *
2188user_visible_resume_target (ptid_t resume_ptid)
2189{
2190 return (resume_ptid == minus_one_ptid && sched_multi
2191 ? NULL
2192 : current_inferior ()->process_target ());
2193}
2194
fbea99ea
PA
2195/* Return a ptid representing the set of threads that we will resume,
2196 in the perspective of the target, assuming run control handling
2197 does not require leaving some threads stopped (e.g., stepping past
2198 breakpoint). USER_STEP indicates whether we're about to start the
2199 target for a stepping command. */
2200
2201static ptid_t
2202internal_resume_ptid (int user_step)
2203{
2204 /* In non-stop, we always control threads individually. Note that
2205 the target may always work in non-stop mode even with "set
2206 non-stop off", in which case user_visible_resume_ptid could
2207 return a wildcard ptid. */
2208 if (target_is_non_stop_p ())
2209 return inferior_ptid;
2210 else
2211 return user_visible_resume_ptid (user_step);
2212}
2213
64ce06e4
PA
2214/* Wrapper for target_resume, that handles infrun-specific
2215 bookkeeping. */
2216
2217static void
2218do_target_resume (ptid_t resume_ptid, int step, enum gdb_signal sig)
2219{
2220 struct thread_info *tp = inferior_thread ();
2221
c65d6b55
PA
2222 gdb_assert (!tp->stop_requested);
2223
64ce06e4 2224 /* Install inferior's terminal modes. */
223ffa71 2225 target_terminal::inferior ();
64ce06e4
PA
2226
2227 /* Avoid confusing the next resume, if the next stop/resume
2228 happens to apply to another thread. */
2229 tp->suspend.stop_signal = GDB_SIGNAL_0;
2230
8f572e5c
PA
2231 /* Advise target which signals may be handled silently.
2232
2233 If we have removed breakpoints because we are stepping over one
2234 in-line (in any thread), we need to receive all signals to avoid
2235 accidentally skipping a breakpoint during execution of a signal
2236 handler.
2237
2238 Likewise if we're displaced stepping, otherwise a trap for a
2239 breakpoint in a signal handler might be confused with the
2240 displaced step finishing. We don't make the displaced_step_fixup
2241 step distinguish the cases instead, because:
2242
2243 - a backtrace while stopped in the signal handler would show the
2244 scratch pad as frame older than the signal handler, instead of
2245 the real mainline code.
2246
2247 - when the thread is later resumed, the signal handler would
2248 return to the scratch pad area, which would no longer be
2249 valid. */
2250 if (step_over_info_valid_p ()
00431a78 2251 || displaced_step_in_progress (tp->inf))
adc6a863 2252 target_pass_signals ({});
64ce06e4 2253 else
adc6a863 2254 target_pass_signals (signal_pass);
64ce06e4
PA
2255
2256 target_resume (resume_ptid, step, sig);
85ad3aaf
PA
2257
2258 target_commit_resume ();
5b6d1e4f
PA
2259
2260 if (target_can_async_p ())
2261 target_async (1);
64ce06e4
PA
2262}
2263
d930703d 2264/* Resume the inferior. SIG is the signal to give the inferior
71d378ae
PA
2265 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2266 call 'resume', which handles exceptions. */
c906108c 2267
71d378ae
PA
2268static void
2269resume_1 (enum gdb_signal sig)
c906108c 2270{
515630c5 2271 struct regcache *regcache = get_current_regcache ();
ac7936df 2272 struct gdbarch *gdbarch = regcache->arch ();
4e1c45ea 2273 struct thread_info *tp = inferior_thread ();
515630c5 2274 CORE_ADDR pc = regcache_read_pc (regcache);
8b86c959 2275 const address_space *aspace = regcache->aspace ();
b0f16a3e 2276 ptid_t resume_ptid;
856e7dd6
PA
2277 /* This represents the user's step vs continue request. When
2278 deciding whether "set scheduler-locking step" applies, it's the
2279 user's intention that counts. */
2280 const int user_step = tp->control.stepping_command;
64ce06e4
PA
2281 /* This represents what we'll actually request the target to do.
2282 This can decay from a step to a continue, if e.g., we need to
2283 implement single-stepping with breakpoints (software
2284 single-step). */
6b403daa 2285 int step;
c7e8a53c 2286
c65d6b55 2287 gdb_assert (!tp->stop_requested);
c2829269
PA
2288 gdb_assert (!thread_is_in_step_over_chain (tp));
2289
372316f1
PA
2290 if (tp->suspend.waitstatus_pending_p)
2291 {
2292 if (debug_infrun)
2293 {
23fdd69e
SM
2294 std::string statstr
2295 = target_waitstatus_to_string (&tp->suspend.waitstatus);
372316f1 2296
372316f1 2297 fprintf_unfiltered (gdb_stdlog,
23fdd69e
SM
2298 "infrun: resume: thread %s has pending wait "
2299 "status %s (currently_stepping=%d).\n",
a068643d
TT
2300 target_pid_to_str (tp->ptid).c_str (),
2301 statstr.c_str (),
372316f1 2302 currently_stepping (tp));
372316f1
PA
2303 }
2304
5b6d1e4f 2305 tp->inf->process_target ()->threads_executing = true;
719546c4 2306 tp->resumed = true;
372316f1
PA
2307
2308 /* FIXME: What should we do if we are supposed to resume this
2309 thread with a signal? Maybe we should maintain a queue of
2310 pending signals to deliver. */
2311 if (sig != GDB_SIGNAL_0)
2312 {
fd7dcb94 2313 warning (_("Couldn't deliver signal %s to %s."),
a068643d
TT
2314 gdb_signal_to_name (sig),
2315 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2316 }
2317
2318 tp->suspend.stop_signal = GDB_SIGNAL_0;
372316f1
PA
2319
2320 if (target_can_async_p ())
9516f85a
AB
2321 {
2322 target_async (1);
2323 /* Tell the event loop we have an event to process. */
2324 mark_async_event_handler (infrun_async_inferior_event_token);
2325 }
372316f1
PA
2326 return;
2327 }
2328
2329 tp->stepped_breakpoint = 0;
2330
6b403daa
PA
2331 /* Depends on stepped_breakpoint. */
2332 step = currently_stepping (tp);
2333
74609e71
YQ
2334 if (current_inferior ()->waiting_for_vfork_done)
2335 {
48f9886d
PA
2336 /* Don't try to single-step a vfork parent that is waiting for
2337 the child to get out of the shared memory region (by exec'ing
2338 or exiting). This is particularly important on software
2339 single-step archs, as the child process would trip on the
2340 software single step breakpoint inserted for the parent
2341 process. Since the parent will not actually execute any
2342 instruction until the child is out of the shared region (such
2343 are vfork's semantics), it is safe to simply continue it.
2344 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2345 the parent, and tell it to `keep_going', which automatically
2346 re-sets it stepping. */
74609e71
YQ
2347 if (debug_infrun)
2348 fprintf_unfiltered (gdb_stdlog,
2349 "infrun: resume : clear step\n");
a09dd441 2350 step = 0;
74609e71
YQ
2351 }
2352
527159b7 2353 if (debug_infrun)
237fc4c9 2354 fprintf_unfiltered (gdb_stdlog,
c9737c08 2355 "infrun: resume (step=%d, signal=%s), "
0d9a9a5f 2356 "trap_expected=%d, current thread [%s] at %s\n",
c9737c08
PA
2357 step, gdb_signal_to_symbol_string (sig),
2358 tp->control.trap_expected,
a068643d 2359 target_pid_to_str (inferior_ptid).c_str (),
0d9a9a5f 2360 paddress (gdbarch, pc));
c906108c 2361
c2c6d25f
JM
2362 /* Normally, by the time we reach `resume', the breakpoints are either
2363 removed or inserted, as appropriate. The exception is if we're sitting
2364 at a permanent breakpoint; we need to step over it, but permanent
2365 breakpoints can't be removed. So we have to test for it here. */
6c95b8df 2366 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
6d350bb5 2367 {
af48d08f
PA
2368 if (sig != GDB_SIGNAL_0)
2369 {
2370 /* We have a signal to pass to the inferior. The resume
2371 may, or may not take us to the signal handler. If this
2372 is a step, we'll need to stop in the signal handler, if
2373 there's one, (if the target supports stepping into
2374 handlers), or in the next mainline instruction, if
2375 there's no handler. If this is a continue, we need to be
2376 sure to run the handler with all breakpoints inserted.
2377 In all cases, set a breakpoint at the current address
2378 (where the handler returns to), and once that breakpoint
2379 is hit, resume skipping the permanent breakpoint. If
2380 that breakpoint isn't hit, then we've stepped into the
2381 signal handler (or hit some other event). We'll delete
2382 the step-resume breakpoint then. */
2383
2384 if (debug_infrun)
2385 fprintf_unfiltered (gdb_stdlog,
2386 "infrun: resume: skipping permanent breakpoint, "
2387 "deliver signal first\n");
2388
2389 clear_step_over_info ();
2390 tp->control.trap_expected = 0;
2391
2392 if (tp->control.step_resume_breakpoint == NULL)
2393 {
2394 /* Set a "high-priority" step-resume, as we don't want
2395 user breakpoints at PC to trigger (again) when this
2396 hits. */
2397 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2398 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2399
2400 tp->step_after_step_resume_breakpoint = step;
2401 }
2402
2403 insert_breakpoints ();
2404 }
2405 else
2406 {
2407 /* There's no signal to pass, we can go ahead and skip the
2408 permanent breakpoint manually. */
2409 if (debug_infrun)
2410 fprintf_unfiltered (gdb_stdlog,
2411 "infrun: resume: skipping permanent breakpoint\n");
2412 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2413 /* Update pc to reflect the new address from which we will
2414 execute instructions. */
2415 pc = regcache_read_pc (regcache);
2416
2417 if (step)
2418 {
2419 /* We've already advanced the PC, so the stepping part
2420 is done. Now we need to arrange for a trap to be
2421 reported to handle_inferior_event. Set a breakpoint
2422 at the current PC, and run to it. Don't update
2423 prev_pc, because if we end in
44a1ee51
PA
2424 switch_back_to_stepped_thread, we want the "expected
2425 thread advanced also" branch to be taken. IOW, we
2426 don't want this thread to step further from PC
af48d08f 2427 (overstep). */
1ac806b8 2428 gdb_assert (!step_over_info_valid_p ());
af48d08f
PA
2429 insert_single_step_breakpoint (gdbarch, aspace, pc);
2430 insert_breakpoints ();
2431
fbea99ea 2432 resume_ptid = internal_resume_ptid (user_step);
1ac806b8 2433 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
719546c4 2434 tp->resumed = true;
af48d08f
PA
2435 return;
2436 }
2437 }
6d350bb5 2438 }
c2c6d25f 2439
c1e36e3e
PA
2440 /* If we have a breakpoint to step over, make sure to do a single
2441 step only. Same if we have software watchpoints. */
2442 if (tp->control.trap_expected || bpstat_should_step ())
2443 tp->control.may_range_step = 0;
2444
7da6a5b9
LM
2445 /* If displaced stepping is enabled, step over breakpoints by executing a
2446 copy of the instruction at a different address.
237fc4c9
PA
2447
2448 We can't use displaced stepping when we have a signal to deliver;
2449 the comments for displaced_step_prepare explain why. The
2450 comments in the handle_inferior event for dealing with 'random
74609e71
YQ
2451 signals' explain what we do instead.
2452
2453 We can't use displaced stepping when we are waiting for vfork_done
2454 event, displaced stepping breaks the vfork child similarly as single
2455 step software breakpoint. */
3fc8eb30
PA
2456 if (tp->control.trap_expected
2457 && use_displaced_stepping (tp)
cb71640d 2458 && !step_over_info_valid_p ()
a493e3e2 2459 && sig == GDB_SIGNAL_0
74609e71 2460 && !current_inferior ()->waiting_for_vfork_done)
237fc4c9 2461 {
00431a78 2462 int prepared = displaced_step_prepare (tp);
fc1cf338 2463
3fc8eb30 2464 if (prepared == 0)
d56b7306 2465 {
4d9d9d04
PA
2466 if (debug_infrun)
2467 fprintf_unfiltered (gdb_stdlog,
2468 "Got placed in step-over queue\n");
2469
2470 tp->control.trap_expected = 0;
d56b7306
VP
2471 return;
2472 }
3fc8eb30
PA
2473 else if (prepared < 0)
2474 {
2475 /* Fallback to stepping over the breakpoint in-line. */
2476
2477 if (target_is_non_stop_p ())
2478 stop_all_threads ();
2479
a01bda52 2480 set_step_over_info (regcache->aspace (),
21edc42f 2481 regcache_read_pc (regcache), 0, tp->global_num);
3fc8eb30
PA
2482
2483 step = maybe_software_singlestep (gdbarch, pc);
2484
2485 insert_breakpoints ();
2486 }
2487 else if (prepared > 0)
2488 {
2489 struct displaced_step_inferior_state *displaced;
99e40580 2490
3fc8eb30
PA
2491 /* Update pc to reflect the new address from which we will
2492 execute instructions due to displaced stepping. */
00431a78 2493 pc = regcache_read_pc (get_thread_regcache (tp));
ca7781d2 2494
00431a78 2495 displaced = get_displaced_stepping_state (tp->inf);
d8d83535
SM
2496 step = gdbarch_displaced_step_hw_singlestep
2497 (gdbarch, displaced->step_closure.get ());
3fc8eb30 2498 }
237fc4c9
PA
2499 }
2500
2facfe5c 2501 /* Do we need to do it the hard way, w/temp breakpoints? */
99e40580 2502 else if (step)
2facfe5c 2503 step = maybe_software_singlestep (gdbarch, pc);
c906108c 2504
30852783
UW
2505 /* Currently, our software single-step implementation leads to different
2506 results than hardware single-stepping in one situation: when stepping
2507 into delivering a signal which has an associated signal handler,
2508 hardware single-step will stop at the first instruction of the handler,
2509 while software single-step will simply skip execution of the handler.
2510
2511 For now, this difference in behavior is accepted since there is no
2512 easy way to actually implement single-stepping into a signal handler
2513 without kernel support.
2514
2515 However, there is one scenario where this difference leads to follow-on
2516 problems: if we're stepping off a breakpoint by removing all breakpoints
2517 and then single-stepping. In this case, the software single-step
2518 behavior means that even if there is a *breakpoint* in the signal
2519 handler, GDB still would not stop.
2520
2521 Fortunately, we can at least fix this particular issue. We detect
2522 here the case where we are about to deliver a signal while software
2523 single-stepping with breakpoints removed. In this situation, we
2524 revert the decisions to remove all breakpoints and insert single-
2525 step breakpoints, and instead we install a step-resume breakpoint
2526 at the current address, deliver the signal without stepping, and
2527 once we arrive back at the step-resume breakpoint, actually step
2528 over the breakpoint we originally wanted to step over. */
34b7e8a6 2529 if (thread_has_single_step_breakpoints_set (tp)
6cc83d2a
PA
2530 && sig != GDB_SIGNAL_0
2531 && step_over_info_valid_p ())
30852783
UW
2532 {
2533 /* If we have nested signals or a pending signal is delivered
7da6a5b9 2534 immediately after a handler returns, might already have
30852783
UW
2535 a step-resume breakpoint set on the earlier handler. We cannot
2536 set another step-resume breakpoint; just continue on until the
2537 original breakpoint is hit. */
2538 if (tp->control.step_resume_breakpoint == NULL)
2539 {
2c03e5be 2540 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
30852783
UW
2541 tp->step_after_step_resume_breakpoint = 1;
2542 }
2543
34b7e8a6 2544 delete_single_step_breakpoints (tp);
30852783 2545
31e77af2 2546 clear_step_over_info ();
30852783 2547 tp->control.trap_expected = 0;
31e77af2
PA
2548
2549 insert_breakpoints ();
30852783
UW
2550 }
2551
b0f16a3e
SM
2552 /* If STEP is set, it's a request to use hardware stepping
2553 facilities. But in that case, we should never
2554 use singlestep breakpoint. */
34b7e8a6 2555 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
dfcd3bfb 2556
fbea99ea 2557 /* Decide the set of threads to ask the target to resume. */
1946c4cc 2558 if (tp->control.trap_expected)
b0f16a3e
SM
2559 {
2560 /* We're allowing a thread to run past a breakpoint it has
1946c4cc
YQ
2561 hit, either by single-stepping the thread with the breakpoint
2562 removed, or by displaced stepping, with the breakpoint inserted.
2563 In the former case, we need to single-step only this thread,
2564 and keep others stopped, as they can miss this breakpoint if
2565 allowed to run. That's not really a problem for displaced
2566 stepping, but, we still keep other threads stopped, in case
2567 another thread is also stopped for a breakpoint waiting for
2568 its turn in the displaced stepping queue. */
b0f16a3e
SM
2569 resume_ptid = inferior_ptid;
2570 }
fbea99ea
PA
2571 else
2572 resume_ptid = internal_resume_ptid (user_step);
d4db2f36 2573
7f5ef605
PA
2574 if (execution_direction != EXEC_REVERSE
2575 && step && breakpoint_inserted_here_p (aspace, pc))
b0f16a3e 2576 {
372316f1
PA
2577 /* There are two cases where we currently need to step a
2578 breakpoint instruction when we have a signal to deliver:
2579
2580 - See handle_signal_stop where we handle random signals that
2581 could take out us out of the stepping range. Normally, in
2582 that case we end up continuing (instead of stepping) over the
7f5ef605
PA
2583 signal handler with a breakpoint at PC, but there are cases
2584 where we should _always_ single-step, even if we have a
2585 step-resume breakpoint, like when a software watchpoint is
2586 set. Assuming single-stepping and delivering a signal at the
2587 same time would takes us to the signal handler, then we could
2588 have removed the breakpoint at PC to step over it. However,
2589 some hardware step targets (like e.g., Mac OS) can't step
2590 into signal handlers, and for those, we need to leave the
2591 breakpoint at PC inserted, as otherwise if the handler
2592 recurses and executes PC again, it'll miss the breakpoint.
2593 So we leave the breakpoint inserted anyway, but we need to
2594 record that we tried to step a breakpoint instruction, so
372316f1
PA
2595 that adjust_pc_after_break doesn't end up confused.
2596
2597 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2598 in one thread after another thread that was stepping had been
2599 momentarily paused for a step-over. When we re-resume the
2600 stepping thread, it may be resumed from that address with a
2601 breakpoint that hasn't trapped yet. Seen with
2602 gdb.threads/non-stop-fair-events.exp, on targets that don't
2603 do displaced stepping. */
2604
2605 if (debug_infrun)
2606 fprintf_unfiltered (gdb_stdlog,
2607 "infrun: resume: [%s] stepped breakpoint\n",
a068643d 2608 target_pid_to_str (tp->ptid).c_str ());
7f5ef605
PA
2609
2610 tp->stepped_breakpoint = 1;
2611
b0f16a3e
SM
2612 /* Most targets can step a breakpoint instruction, thus
2613 executing it normally. But if this one cannot, just
2614 continue and we will hit it anyway. */
7f5ef605 2615 if (gdbarch_cannot_step_breakpoint (gdbarch))
b0f16a3e
SM
2616 step = 0;
2617 }
ef5cf84e 2618
b0f16a3e 2619 if (debug_displaced
cb71640d 2620 && tp->control.trap_expected
3fc8eb30 2621 && use_displaced_stepping (tp)
cb71640d 2622 && !step_over_info_valid_p ())
b0f16a3e 2623 {
00431a78 2624 struct regcache *resume_regcache = get_thread_regcache (tp);
ac7936df 2625 struct gdbarch *resume_gdbarch = resume_regcache->arch ();
b0f16a3e
SM
2626 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2627 gdb_byte buf[4];
2628
2629 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2630 paddress (resume_gdbarch, actual_pc));
2631 read_memory (actual_pc, buf, sizeof (buf));
2632 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2633 }
237fc4c9 2634
b0f16a3e
SM
2635 if (tp->control.may_range_step)
2636 {
2637 /* If we're resuming a thread with the PC out of the step
2638 range, then we're doing some nested/finer run control
2639 operation, like stepping the thread out of the dynamic
2640 linker or the displaced stepping scratch pad. We
2641 shouldn't have allowed a range step then. */
2642 gdb_assert (pc_in_thread_step_range (pc, tp));
2643 }
c1e36e3e 2644
64ce06e4 2645 do_target_resume (resume_ptid, step, sig);
719546c4 2646 tp->resumed = true;
c906108c 2647}
71d378ae
PA
2648
2649/* Resume the inferior. SIG is the signal to give the inferior
2650 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2651 rolls back state on error. */
2652
aff4e175 2653static void
71d378ae
PA
2654resume (gdb_signal sig)
2655{
a70b8144 2656 try
71d378ae
PA
2657 {
2658 resume_1 (sig);
2659 }
230d2906 2660 catch (const gdb_exception &ex)
71d378ae
PA
2661 {
2662 /* If resuming is being aborted for any reason, delete any
2663 single-step breakpoint resume_1 may have created, to avoid
2664 confusing the following resumption, and to avoid leaving
2665 single-step breakpoints perturbing other threads, in case
2666 we're running in non-stop mode. */
2667 if (inferior_ptid != null_ptid)
2668 delete_single_step_breakpoints (inferior_thread ());
eedc3f4f 2669 throw;
71d378ae 2670 }
71d378ae
PA
2671}
2672
c906108c 2673\f
237fc4c9 2674/* Proceeding. */
c906108c 2675
4c2f2a79
PA
2676/* See infrun.h. */
2677
2678/* Counter that tracks number of user visible stops. This can be used
2679 to tell whether a command has proceeded the inferior past the
2680 current location. This allows e.g., inferior function calls in
2681 breakpoint commands to not interrupt the command list. When the
2682 call finishes successfully, the inferior is standing at the same
2683 breakpoint as if nothing happened (and so we don't call
2684 normal_stop). */
2685static ULONGEST current_stop_id;
2686
2687/* See infrun.h. */
2688
2689ULONGEST
2690get_stop_id (void)
2691{
2692 return current_stop_id;
2693}
2694
2695/* Called when we report a user visible stop. */
2696
2697static void
2698new_stop_id (void)
2699{
2700 current_stop_id++;
2701}
2702
c906108c
SS
2703/* Clear out all variables saying what to do when inferior is continued.
2704 First do this, then set the ones you want, then call `proceed'. */
2705
a7212384
UW
2706static void
2707clear_proceed_status_thread (struct thread_info *tp)
c906108c 2708{
a7212384
UW
2709 if (debug_infrun)
2710 fprintf_unfiltered (gdb_stdlog,
2711 "infrun: clear_proceed_status_thread (%s)\n",
a068643d 2712 target_pid_to_str (tp->ptid).c_str ());
d6b48e9c 2713
372316f1
PA
2714 /* If we're starting a new sequence, then the previous finished
2715 single-step is no longer relevant. */
2716 if (tp->suspend.waitstatus_pending_p)
2717 {
2718 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
2719 {
2720 if (debug_infrun)
2721 fprintf_unfiltered (gdb_stdlog,
2722 "infrun: clear_proceed_status: pending "
2723 "event of %s was a finished step. "
2724 "Discarding.\n",
a068643d 2725 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2726
2727 tp->suspend.waitstatus_pending_p = 0;
2728 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
2729 }
2730 else if (debug_infrun)
2731 {
23fdd69e
SM
2732 std::string statstr
2733 = target_waitstatus_to_string (&tp->suspend.waitstatus);
372316f1 2734
372316f1
PA
2735 fprintf_unfiltered (gdb_stdlog,
2736 "infrun: clear_proceed_status_thread: thread %s "
2737 "has pending wait status %s "
2738 "(currently_stepping=%d).\n",
a068643d
TT
2739 target_pid_to_str (tp->ptid).c_str (),
2740 statstr.c_str (),
372316f1 2741 currently_stepping (tp));
372316f1
PA
2742 }
2743 }
2744
70509625
PA
2745 /* If this signal should not be seen by program, give it zero.
2746 Used for debugging signals. */
2747 if (!signal_pass_state (tp->suspend.stop_signal))
2748 tp->suspend.stop_signal = GDB_SIGNAL_0;
2749
46e3ed7f 2750 delete tp->thread_fsm;
243a9253
PA
2751 tp->thread_fsm = NULL;
2752
16c381f0
JK
2753 tp->control.trap_expected = 0;
2754 tp->control.step_range_start = 0;
2755 tp->control.step_range_end = 0;
c1e36e3e 2756 tp->control.may_range_step = 0;
16c381f0
JK
2757 tp->control.step_frame_id = null_frame_id;
2758 tp->control.step_stack_frame_id = null_frame_id;
2759 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
885eeb5b 2760 tp->control.step_start_function = NULL;
a7212384 2761 tp->stop_requested = 0;
4e1c45ea 2762
16c381f0 2763 tp->control.stop_step = 0;
32400beb 2764
16c381f0 2765 tp->control.proceed_to_finish = 0;
414c69f7 2766
856e7dd6 2767 tp->control.stepping_command = 0;
17b2616c 2768
a7212384 2769 /* Discard any remaining commands or status from previous stop. */
16c381f0 2770 bpstat_clear (&tp->control.stop_bpstat);
a7212384 2771}
32400beb 2772
a7212384 2773void
70509625 2774clear_proceed_status (int step)
a7212384 2775{
f2665db5
MM
2776 /* With scheduler-locking replay, stop replaying other threads if we're
2777 not replaying the user-visible resume ptid.
2778
2779 This is a convenience feature to not require the user to explicitly
2780 stop replaying the other threads. We're assuming that the user's
2781 intent is to resume tracing the recorded process. */
2782 if (!non_stop && scheduler_mode == schedlock_replay
2783 && target_record_is_replaying (minus_one_ptid)
2784 && !target_record_will_replay (user_visible_resume_ptid (step),
2785 execution_direction))
2786 target_record_stop_replaying ();
2787
08036331 2788 if (!non_stop && inferior_ptid != null_ptid)
6c95b8df 2789 {
08036331 2790 ptid_t resume_ptid = user_visible_resume_ptid (step);
5b6d1e4f
PA
2791 process_stratum_target *resume_target
2792 = user_visible_resume_target (resume_ptid);
70509625
PA
2793
2794 /* In all-stop mode, delete the per-thread status of all threads
2795 we're about to resume, implicitly and explicitly. */
5b6d1e4f 2796 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
08036331 2797 clear_proceed_status_thread (tp);
6c95b8df
PA
2798 }
2799
d7e15655 2800 if (inferior_ptid != null_ptid)
a7212384
UW
2801 {
2802 struct inferior *inferior;
2803
2804 if (non_stop)
2805 {
6c95b8df
PA
2806 /* If in non-stop mode, only delete the per-thread status of
2807 the current thread. */
a7212384
UW
2808 clear_proceed_status_thread (inferior_thread ());
2809 }
6c95b8df 2810
d6b48e9c 2811 inferior = current_inferior ();
16c381f0 2812 inferior->control.stop_soon = NO_STOP_QUIETLY;
4e1c45ea
PA
2813 }
2814
76727919 2815 gdb::observers::about_to_proceed.notify ();
c906108c
SS
2816}
2817
99619bea
PA
2818/* Returns true if TP is still stopped at a breakpoint that needs
2819 stepping-over in order to make progress. If the breakpoint is gone
2820 meanwhile, we can skip the whole step-over dance. */
ea67f13b
DJ
2821
2822static int
6c4cfb24 2823thread_still_needs_step_over_bp (struct thread_info *tp)
99619bea
PA
2824{
2825 if (tp->stepping_over_breakpoint)
2826 {
00431a78 2827 struct regcache *regcache = get_thread_regcache (tp);
99619bea 2828
a01bda52 2829 if (breakpoint_here_p (regcache->aspace (),
af48d08f
PA
2830 regcache_read_pc (regcache))
2831 == ordinary_breakpoint_here)
99619bea
PA
2832 return 1;
2833
2834 tp->stepping_over_breakpoint = 0;
2835 }
2836
2837 return 0;
2838}
2839
6c4cfb24
PA
2840/* Check whether thread TP still needs to start a step-over in order
2841 to make progress when resumed. Returns an bitwise or of enum
2842 step_over_what bits, indicating what needs to be stepped over. */
2843
8d297bbf 2844static step_over_what
6c4cfb24
PA
2845thread_still_needs_step_over (struct thread_info *tp)
2846{
8d297bbf 2847 step_over_what what = 0;
6c4cfb24
PA
2848
2849 if (thread_still_needs_step_over_bp (tp))
2850 what |= STEP_OVER_BREAKPOINT;
2851
2852 if (tp->stepping_over_watchpoint
2853 && !target_have_steppable_watchpoint)
2854 what |= STEP_OVER_WATCHPOINT;
2855
2856 return what;
2857}
2858
483805cf
PA
2859/* Returns true if scheduler locking applies. STEP indicates whether
2860 we're about to do a step/next-like command to a thread. */
2861
2862static int
856e7dd6 2863schedlock_applies (struct thread_info *tp)
483805cf
PA
2864{
2865 return (scheduler_mode == schedlock_on
2866 || (scheduler_mode == schedlock_step
f2665db5
MM
2867 && tp->control.stepping_command)
2868 || (scheduler_mode == schedlock_replay
2869 && target_record_will_replay (minus_one_ptid,
2870 execution_direction)));
483805cf
PA
2871}
2872
5b6d1e4f
PA
2873/* Calls target_commit_resume on all targets. */
2874
2875static void
2876commit_resume_all_targets ()
2877{
2878 scoped_restore_current_thread restore_thread;
2879
2880 /* Map between process_target and a representative inferior. This
2881 is to avoid committing a resume in the same target more than
2882 once. Resumptions must be idempotent, so this is an
2883 optimization. */
2884 std::unordered_map<process_stratum_target *, inferior *> conn_inf;
2885
2886 for (inferior *inf : all_non_exited_inferiors ())
2887 if (inf->has_execution ())
2888 conn_inf[inf->process_target ()] = inf;
2889
2890 for (const auto &ci : conn_inf)
2891 {
2892 inferior *inf = ci.second;
2893 switch_to_inferior_no_thread (inf);
2894 target_commit_resume ();
2895 }
2896}
2897
2f4fcf00
PA
2898/* Check that all the targets we're about to resume are in non-stop
2899 mode. Ideally, we'd only care whether all targets support
2900 target-async, but we're not there yet. E.g., stop_all_threads
2901 doesn't know how to handle all-stop targets. Also, the remote
2902 protocol in all-stop mode is synchronous, irrespective of
2903 target-async, which means that things like a breakpoint re-set
2904 triggered by one target would try to read memory from all targets
2905 and fail. */
2906
2907static void
2908check_multi_target_resumption (process_stratum_target *resume_target)
2909{
2910 if (!non_stop && resume_target == nullptr)
2911 {
2912 scoped_restore_current_thread restore_thread;
2913
2914 /* This is used to track whether we're resuming more than one
2915 target. */
2916 process_stratum_target *first_connection = nullptr;
2917
2918 /* The first inferior we see with a target that does not work in
2919 always-non-stop mode. */
2920 inferior *first_not_non_stop = nullptr;
2921
2922 for (inferior *inf : all_non_exited_inferiors (resume_target))
2923 {
2924 switch_to_inferior_no_thread (inf);
2925
2926 if (!target_has_execution)
2927 continue;
2928
2929 process_stratum_target *proc_target
2930 = current_inferior ()->process_target();
2931
2932 if (!target_is_non_stop_p ())
2933 first_not_non_stop = inf;
2934
2935 if (first_connection == nullptr)
2936 first_connection = proc_target;
2937 else if (first_connection != proc_target
2938 && first_not_non_stop != nullptr)
2939 {
2940 switch_to_inferior_no_thread (first_not_non_stop);
2941
2942 proc_target = current_inferior ()->process_target();
2943
2944 error (_("Connection %d (%s) does not support "
2945 "multi-target resumption."),
2946 proc_target->connection_number,
2947 make_target_connection_string (proc_target).c_str ());
2948 }
2949 }
2950 }
2951}
2952
c906108c
SS
2953/* Basic routine for continuing the program in various fashions.
2954
2955 ADDR is the address to resume at, or -1 for resume where stopped.
aff4e175
AB
2956 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
2957 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
c906108c
SS
2958
2959 You should call clear_proceed_status before calling proceed. */
2960
2961void
64ce06e4 2962proceed (CORE_ADDR addr, enum gdb_signal siggnal)
c906108c 2963{
e58b0e63
PA
2964 struct regcache *regcache;
2965 struct gdbarch *gdbarch;
e58b0e63 2966 CORE_ADDR pc;
4d9d9d04
PA
2967 struct execution_control_state ecss;
2968 struct execution_control_state *ecs = &ecss;
4d9d9d04 2969 int started;
c906108c 2970
e58b0e63
PA
2971 /* If we're stopped at a fork/vfork, follow the branch set by the
2972 "set follow-fork-mode" command; otherwise, we'll just proceed
2973 resuming the current thread. */
2974 if (!follow_fork ())
2975 {
2976 /* The target for some reason decided not to resume. */
2977 normal_stop ();
f148b27e
PA
2978 if (target_can_async_p ())
2979 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
e58b0e63
PA
2980 return;
2981 }
2982
842951eb
PA
2983 /* We'll update this if & when we switch to a new thread. */
2984 previous_inferior_ptid = inferior_ptid;
2985
e58b0e63 2986 regcache = get_current_regcache ();
ac7936df 2987 gdbarch = regcache->arch ();
8b86c959
YQ
2988 const address_space *aspace = regcache->aspace ();
2989
e58b0e63 2990 pc = regcache_read_pc (regcache);
08036331 2991 thread_info *cur_thr = inferior_thread ();
e58b0e63 2992
99619bea 2993 /* Fill in with reasonable starting values. */
08036331 2994 init_thread_stepping_state (cur_thr);
99619bea 2995
08036331 2996 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
c2829269 2997
5b6d1e4f
PA
2998 ptid_t resume_ptid
2999 = user_visible_resume_ptid (cur_thr->control.stepping_command);
3000 process_stratum_target *resume_target
3001 = user_visible_resume_target (resume_ptid);
3002
2f4fcf00
PA
3003 check_multi_target_resumption (resume_target);
3004
2acceee2 3005 if (addr == (CORE_ADDR) -1)
c906108c 3006 {
08036331 3007 if (pc == cur_thr->suspend.stop_pc
af48d08f 3008 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
b2175913 3009 && execution_direction != EXEC_REVERSE)
3352ef37
AC
3010 /* There is a breakpoint at the address we will resume at,
3011 step one instruction before inserting breakpoints so that
3012 we do not stop right away (and report a second hit at this
b2175913
MS
3013 breakpoint).
3014
3015 Note, we don't do this in reverse, because we won't
3016 actually be executing the breakpoint insn anyway.
3017 We'll be (un-)executing the previous instruction. */
08036331 3018 cur_thr->stepping_over_breakpoint = 1;
515630c5
UW
3019 else if (gdbarch_single_step_through_delay_p (gdbarch)
3020 && gdbarch_single_step_through_delay (gdbarch,
3021 get_current_frame ()))
3352ef37
AC
3022 /* We stepped onto an instruction that needs to be stepped
3023 again before re-inserting the breakpoint, do so. */
08036331 3024 cur_thr->stepping_over_breakpoint = 1;
c906108c
SS
3025 }
3026 else
3027 {
515630c5 3028 regcache_write_pc (regcache, addr);
c906108c
SS
3029 }
3030
70509625 3031 if (siggnal != GDB_SIGNAL_DEFAULT)
08036331 3032 cur_thr->suspend.stop_signal = siggnal;
70509625 3033
4d9d9d04
PA
3034 /* If an exception is thrown from this point on, make sure to
3035 propagate GDB's knowledge of the executing state to the
3036 frontend/user running state. */
5b6d1e4f 3037 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
4d9d9d04
PA
3038
3039 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3040 threads (e.g., we might need to set threads stepping over
3041 breakpoints first), from the user/frontend's point of view, all
3042 threads in RESUME_PTID are now running. Unless we're calling an
3043 inferior function, as in that case we pretend the inferior
3044 doesn't run at all. */
08036331 3045 if (!cur_thr->control.in_infcall)
719546c4 3046 set_running (resume_target, resume_ptid, true);
17b2616c 3047
527159b7 3048 if (debug_infrun)
8a9de0e4 3049 fprintf_unfiltered (gdb_stdlog,
64ce06e4 3050 "infrun: proceed (addr=%s, signal=%s)\n",
c9737c08 3051 paddress (gdbarch, addr),
64ce06e4 3052 gdb_signal_to_symbol_string (siggnal));
527159b7 3053
4d9d9d04
PA
3054 annotate_starting ();
3055
3056 /* Make sure that output from GDB appears before output from the
3057 inferior. */
3058 gdb_flush (gdb_stdout);
3059
d930703d
PA
3060 /* Since we've marked the inferior running, give it the terminal. A
3061 QUIT/Ctrl-C from here on is forwarded to the target (which can
3062 still detect attempts to unblock a stuck connection with repeated
3063 Ctrl-C from within target_pass_ctrlc). */
3064 target_terminal::inferior ();
3065
4d9d9d04
PA
3066 /* In a multi-threaded task we may select another thread and
3067 then continue or step.
3068
3069 But if a thread that we're resuming had stopped at a breakpoint,
3070 it will immediately cause another breakpoint stop without any
3071 execution (i.e. it will report a breakpoint hit incorrectly). So
3072 we must step over it first.
3073
3074 Look for threads other than the current (TP) that reported a
3075 breakpoint hit and haven't been resumed yet since. */
3076
3077 /* If scheduler locking applies, we can avoid iterating over all
3078 threads. */
08036331 3079 if (!non_stop && !schedlock_applies (cur_thr))
94cc34af 3080 {
5b6d1e4f
PA
3081 for (thread_info *tp : all_non_exited_threads (resume_target,
3082 resume_ptid))
08036331 3083 {
f3f8ece4
PA
3084 switch_to_thread_no_regs (tp);
3085
4d9d9d04
PA
3086 /* Ignore the current thread here. It's handled
3087 afterwards. */
08036331 3088 if (tp == cur_thr)
4d9d9d04 3089 continue;
c906108c 3090
4d9d9d04
PA
3091 if (!thread_still_needs_step_over (tp))
3092 continue;
3093
3094 gdb_assert (!thread_is_in_step_over_chain (tp));
c906108c 3095
99619bea
PA
3096 if (debug_infrun)
3097 fprintf_unfiltered (gdb_stdlog,
3098 "infrun: need to step-over [%s] first\n",
a068643d 3099 target_pid_to_str (tp->ptid).c_str ());
99619bea 3100
4d9d9d04 3101 thread_step_over_chain_enqueue (tp);
2adfaa28 3102 }
f3f8ece4
PA
3103
3104 switch_to_thread (cur_thr);
30852783
UW
3105 }
3106
4d9d9d04
PA
3107 /* Enqueue the current thread last, so that we move all other
3108 threads over their breakpoints first. */
08036331
PA
3109 if (cur_thr->stepping_over_breakpoint)
3110 thread_step_over_chain_enqueue (cur_thr);
30852783 3111
4d9d9d04
PA
3112 /* If the thread isn't started, we'll still need to set its prev_pc,
3113 so that switch_back_to_stepped_thread knows the thread hasn't
3114 advanced. Must do this before resuming any thread, as in
3115 all-stop/remote, once we resume we can't send any other packet
3116 until the target stops again. */
08036331 3117 cur_thr->prev_pc = regcache_read_pc (regcache);
99619bea 3118
a9bc57b9
TT
3119 {
3120 scoped_restore save_defer_tc = make_scoped_defer_target_commit_resume ();
85ad3aaf 3121
a9bc57b9 3122 started = start_step_over ();
c906108c 3123
a9bc57b9
TT
3124 if (step_over_info_valid_p ())
3125 {
3126 /* Either this thread started a new in-line step over, or some
3127 other thread was already doing one. In either case, don't
3128 resume anything else until the step-over is finished. */
3129 }
3130 else if (started && !target_is_non_stop_p ())
3131 {
3132 /* A new displaced stepping sequence was started. In all-stop,
3133 we can't talk to the target anymore until it next stops. */
3134 }
3135 else if (!non_stop && target_is_non_stop_p ())
3136 {
3137 /* In all-stop, but the target is always in non-stop mode.
3138 Start all other threads that are implicitly resumed too. */
5b6d1e4f
PA
3139 for (thread_info *tp : all_non_exited_threads (resume_target,
3140 resume_ptid))
3141 {
3142 switch_to_thread_no_regs (tp);
3143
f9fac3c8
SM
3144 if (!tp->inf->has_execution ())
3145 {
3146 if (debug_infrun)
3147 fprintf_unfiltered (gdb_stdlog,
3148 "infrun: proceed: [%s] target has "
3149 "no execution\n",
3150 target_pid_to_str (tp->ptid).c_str ());
3151 continue;
3152 }
f3f8ece4 3153
f9fac3c8
SM
3154 if (tp->resumed)
3155 {
3156 if (debug_infrun)
3157 fprintf_unfiltered (gdb_stdlog,
3158 "infrun: proceed: [%s] resumed\n",
3159 target_pid_to_str (tp->ptid).c_str ());
3160 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
3161 continue;
3162 }
fbea99ea 3163
f9fac3c8
SM
3164 if (thread_is_in_step_over_chain (tp))
3165 {
3166 if (debug_infrun)
3167 fprintf_unfiltered (gdb_stdlog,
3168 "infrun: proceed: [%s] needs step-over\n",
3169 target_pid_to_str (tp->ptid).c_str ());
3170 continue;
3171 }
fbea99ea 3172
f9fac3c8
SM
3173 if (debug_infrun)
3174 fprintf_unfiltered (gdb_stdlog,
3175 "infrun: proceed: resuming %s\n",
3176 target_pid_to_str (tp->ptid).c_str ());
fbea99ea 3177
f9fac3c8
SM
3178 reset_ecs (ecs, tp);
3179 switch_to_thread (tp);
3180 keep_going_pass_signal (ecs);
3181 if (!ecs->wait_some_more)
3182 error (_("Command aborted."));
3183 }
a9bc57b9 3184 }
08036331 3185 else if (!cur_thr->resumed && !thread_is_in_step_over_chain (cur_thr))
a9bc57b9
TT
3186 {
3187 /* The thread wasn't started, and isn't queued, run it now. */
08036331
PA
3188 reset_ecs (ecs, cur_thr);
3189 switch_to_thread (cur_thr);
a9bc57b9
TT
3190 keep_going_pass_signal (ecs);
3191 if (!ecs->wait_some_more)
3192 error (_("Command aborted."));
3193 }
3194 }
c906108c 3195
5b6d1e4f 3196 commit_resume_all_targets ();
85ad3aaf 3197
731f534f 3198 finish_state.release ();
c906108c 3199
873657b9
PA
3200 /* If we've switched threads above, switch back to the previously
3201 current thread. We don't want the user to see a different
3202 selected thread. */
3203 switch_to_thread (cur_thr);
3204
0b333c5e
PA
3205 /* Tell the event loop to wait for it to stop. If the target
3206 supports asynchronous execution, it'll do this from within
3207 target_resume. */
362646f5 3208 if (!target_can_async_p ())
0b333c5e 3209 mark_async_event_handler (infrun_async_inferior_event_token);
c906108c 3210}
c906108c
SS
3211\f
3212
3213/* Start remote-debugging of a machine over a serial link. */
96baa820 3214
c906108c 3215void
8621d6a9 3216start_remote (int from_tty)
c906108c 3217{
5b6d1e4f
PA
3218 inferior *inf = current_inferior ();
3219 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
43ff13b4 3220
1777feb0 3221 /* Always go on waiting for the target, regardless of the mode. */
6426a772 3222 /* FIXME: cagney/1999-09-23: At present it isn't possible to
7e73cedf 3223 indicate to wait_for_inferior that a target should timeout if
6426a772
JM
3224 nothing is returned (instead of just blocking). Because of this,
3225 targets expecting an immediate response need to, internally, set
3226 things up so that the target_wait() is forced to eventually
1777feb0 3227 timeout. */
6426a772
JM
3228 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3229 differentiate to its caller what the state of the target is after
3230 the initial open has been performed. Here we're assuming that
3231 the target has stopped. It should be possible to eventually have
3232 target_open() return to the caller an indication that the target
3233 is currently running and GDB state should be set to the same as
1777feb0 3234 for an async run. */
5b6d1e4f 3235 wait_for_inferior (inf);
8621d6a9
DJ
3236
3237 /* Now that the inferior has stopped, do any bookkeeping like
3238 loading shared libraries. We want to do this before normal_stop,
3239 so that the displayed frame is up to date. */
8b88a78e 3240 post_create_inferior (current_top_target (), from_tty);
8621d6a9 3241
6426a772 3242 normal_stop ();
c906108c
SS
3243}
3244
3245/* Initialize static vars when a new inferior begins. */
3246
3247void
96baa820 3248init_wait_for_inferior (void)
c906108c
SS
3249{
3250 /* These are meaningless until the first time through wait_for_inferior. */
c906108c 3251
c906108c
SS
3252 breakpoint_init_inferior (inf_starting);
3253
70509625 3254 clear_proceed_status (0);
9f976b41 3255
ab1ddbcf 3256 nullify_last_target_wait_ptid ();
237fc4c9 3257
842951eb 3258 previous_inferior_ptid = inferior_ptid;
c906108c 3259}
237fc4c9 3260
c906108c 3261\f
488f131b 3262
ec9499be 3263static void handle_inferior_event (struct execution_control_state *ecs);
cd0fc7c3 3264
568d6575
UW
3265static void handle_step_into_function (struct gdbarch *gdbarch,
3266 struct execution_control_state *ecs);
3267static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3268 struct execution_control_state *ecs);
4f5d7f63 3269static void handle_signal_stop (struct execution_control_state *ecs);
186c406b 3270static void check_exception_resume (struct execution_control_state *,
28106bc2 3271 struct frame_info *);
611c83ae 3272
bdc36728 3273static void end_stepping_range (struct execution_control_state *ecs);
22bcd14b 3274static void stop_waiting (struct execution_control_state *ecs);
d4f3574e 3275static void keep_going (struct execution_control_state *ecs);
94c57d6a 3276static void process_event_stop_test (struct execution_control_state *ecs);
c447ac0b 3277static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
104c1213 3278
252fbfc8
PA
3279/* This function is attached as a "thread_stop_requested" observer.
3280 Cleanup local state that assumed the PTID was to be resumed, and
3281 report the stop to the frontend. */
3282
2c0b251b 3283static void
252fbfc8
PA
3284infrun_thread_stop_requested (ptid_t ptid)
3285{
5b6d1e4f
PA
3286 process_stratum_target *curr_target = current_inferior ()->process_target ();
3287
c65d6b55
PA
3288 /* PTID was requested to stop. If the thread was already stopped,
3289 but the user/frontend doesn't know about that yet (e.g., the
3290 thread had been temporarily paused for some step-over), set up
3291 for reporting the stop now. */
5b6d1e4f 3292 for (thread_info *tp : all_threads (curr_target, ptid))
08036331
PA
3293 {
3294 if (tp->state != THREAD_RUNNING)
3295 continue;
3296 if (tp->executing)
3297 continue;
c65d6b55 3298
08036331
PA
3299 /* Remove matching threads from the step-over queue, so
3300 start_step_over doesn't try to resume them
3301 automatically. */
3302 if (thread_is_in_step_over_chain (tp))
3303 thread_step_over_chain_remove (tp);
c65d6b55 3304
08036331
PA
3305 /* If the thread is stopped, but the user/frontend doesn't
3306 know about that yet, queue a pending event, as if the
3307 thread had just stopped now. Unless the thread already had
3308 a pending event. */
3309 if (!tp->suspend.waitstatus_pending_p)
3310 {
3311 tp->suspend.waitstatus_pending_p = 1;
3312 tp->suspend.waitstatus.kind = TARGET_WAITKIND_STOPPED;
3313 tp->suspend.waitstatus.value.sig = GDB_SIGNAL_0;
3314 }
c65d6b55 3315
08036331
PA
3316 /* Clear the inline-frame state, since we're re-processing the
3317 stop. */
5b6d1e4f 3318 clear_inline_frame_state (tp);
c65d6b55 3319
08036331
PA
3320 /* If this thread was paused because some other thread was
3321 doing an inline-step over, let that finish first. Once
3322 that happens, we'll restart all threads and consume pending
3323 stop events then. */
3324 if (step_over_info_valid_p ())
3325 continue;
3326
3327 /* Otherwise we can process the (new) pending event now. Set
3328 it so this pending event is considered by
3329 do_target_wait. */
719546c4 3330 tp->resumed = true;
08036331 3331 }
252fbfc8
PA
3332}
3333
a07daef3
PA
3334static void
3335infrun_thread_thread_exit (struct thread_info *tp, int silent)
3336{
5b6d1e4f
PA
3337 if (target_last_proc_target == tp->inf->process_target ()
3338 && target_last_wait_ptid == tp->ptid)
a07daef3
PA
3339 nullify_last_target_wait_ptid ();
3340}
3341
0cbcdb96
PA
3342/* Delete the step resume, single-step and longjmp/exception resume
3343 breakpoints of TP. */
4e1c45ea 3344
0cbcdb96
PA
3345static void
3346delete_thread_infrun_breakpoints (struct thread_info *tp)
4e1c45ea 3347{
0cbcdb96
PA
3348 delete_step_resume_breakpoint (tp);
3349 delete_exception_resume_breakpoint (tp);
34b7e8a6 3350 delete_single_step_breakpoints (tp);
4e1c45ea
PA
3351}
3352
0cbcdb96
PA
3353/* If the target still has execution, call FUNC for each thread that
3354 just stopped. In all-stop, that's all the non-exited threads; in
3355 non-stop, that's the current thread, only. */
3356
3357typedef void (*for_each_just_stopped_thread_callback_func)
3358 (struct thread_info *tp);
4e1c45ea
PA
3359
3360static void
0cbcdb96 3361for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
4e1c45ea 3362{
d7e15655 3363 if (!target_has_execution || inferior_ptid == null_ptid)
4e1c45ea
PA
3364 return;
3365
fbea99ea 3366 if (target_is_non_stop_p ())
4e1c45ea 3367 {
0cbcdb96
PA
3368 /* If in non-stop mode, only the current thread stopped. */
3369 func (inferior_thread ());
4e1c45ea
PA
3370 }
3371 else
0cbcdb96 3372 {
0cbcdb96 3373 /* In all-stop mode, all threads have stopped. */
08036331
PA
3374 for (thread_info *tp : all_non_exited_threads ())
3375 func (tp);
0cbcdb96
PA
3376 }
3377}
3378
3379/* Delete the step resume and longjmp/exception resume breakpoints of
3380 the threads that just stopped. */
3381
3382static void
3383delete_just_stopped_threads_infrun_breakpoints (void)
3384{
3385 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
34b7e8a6
PA
3386}
3387
3388/* Delete the single-step breakpoints of the threads that just
3389 stopped. */
7c16b83e 3390
34b7e8a6
PA
3391static void
3392delete_just_stopped_threads_single_step_breakpoints (void)
3393{
3394 for_each_just_stopped_thread (delete_single_step_breakpoints);
4e1c45ea
PA
3395}
3396
221e1a37 3397/* See infrun.h. */
223698f8 3398
221e1a37 3399void
223698f8
DE
3400print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3401 const struct target_waitstatus *ws)
3402{
23fdd69e 3403 std::string status_string = target_waitstatus_to_string (ws);
d7e74731 3404 string_file stb;
223698f8
DE
3405
3406 /* The text is split over several lines because it was getting too long.
3407 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3408 output as a unit; we want only one timestamp printed if debug_timestamp
3409 is set. */
3410
d7e74731 3411 stb.printf ("infrun: target_wait (%d.%ld.%ld",
e99b03dc 3412 waiton_ptid.pid (),
e38504b3 3413 waiton_ptid.lwp (),
cc6bcb54 3414 waiton_ptid.tid ());
e99b03dc 3415 if (waiton_ptid.pid () != -1)
a068643d 3416 stb.printf (" [%s]", target_pid_to_str (waiton_ptid).c_str ());
d7e74731
PA
3417 stb.printf (", status) =\n");
3418 stb.printf ("infrun: %d.%ld.%ld [%s],\n",
e99b03dc 3419 result_ptid.pid (),
e38504b3 3420 result_ptid.lwp (),
cc6bcb54 3421 result_ptid.tid (),
a068643d 3422 target_pid_to_str (result_ptid).c_str ());
23fdd69e 3423 stb.printf ("infrun: %s\n", status_string.c_str ());
223698f8
DE
3424
3425 /* This uses %s in part to handle %'s in the text, but also to avoid
3426 a gcc error: the format attribute requires a string literal. */
d7e74731 3427 fprintf_unfiltered (gdb_stdlog, "%s", stb.c_str ());
223698f8
DE
3428}
3429
372316f1
PA
3430/* Select a thread at random, out of those which are resumed and have
3431 had events. */
3432
3433static struct thread_info *
5b6d1e4f 3434random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
372316f1 3435{
372316f1 3436 int num_events = 0;
08036331 3437
5b6d1e4f 3438 auto has_event = [&] (thread_info *tp)
08036331 3439 {
5b6d1e4f
PA
3440 return (tp->ptid.matches (waiton_ptid)
3441 && tp->resumed
08036331
PA
3442 && tp->suspend.waitstatus_pending_p);
3443 };
372316f1
PA
3444
3445 /* First see how many events we have. Count only resumed threads
3446 that have an event pending. */
5b6d1e4f 3447 for (thread_info *tp : inf->non_exited_threads ())
08036331 3448 if (has_event (tp))
372316f1
PA
3449 num_events++;
3450
3451 if (num_events == 0)
3452 return NULL;
3453
3454 /* Now randomly pick a thread out of those that have had events. */
08036331
PA
3455 int random_selector = (int) ((num_events * (double) rand ())
3456 / (RAND_MAX + 1.0));
372316f1
PA
3457
3458 if (debug_infrun && num_events > 1)
3459 fprintf_unfiltered (gdb_stdlog,
3460 "infrun: Found %d events, selecting #%d\n",
3461 num_events, random_selector);
3462
3463 /* Select the Nth thread that has had an event. */
5b6d1e4f 3464 for (thread_info *tp : inf->non_exited_threads ())
08036331 3465 if (has_event (tp))
372316f1 3466 if (random_selector-- == 0)
08036331 3467 return tp;
372316f1 3468
08036331 3469 gdb_assert_not_reached ("event thread not found");
372316f1
PA
3470}
3471
3472/* Wrapper for target_wait that first checks whether threads have
3473 pending statuses to report before actually asking the target for
5b6d1e4f
PA
3474 more events. INF is the inferior we're using to call target_wait
3475 on. */
372316f1
PA
3476
3477static ptid_t
5b6d1e4f
PA
3478do_target_wait_1 (inferior *inf, ptid_t ptid,
3479 target_waitstatus *status, int options)
372316f1
PA
3480{
3481 ptid_t event_ptid;
3482 struct thread_info *tp;
3483
24ed6739
AB
3484 /* We know that we are looking for an event in the target of inferior
3485 INF, but we don't know which thread the event might come from. As
3486 such we want to make sure that INFERIOR_PTID is reset so that none of
3487 the wait code relies on it - doing so is always a mistake. */
3488 switch_to_inferior_no_thread (inf);
3489
372316f1
PA
3490 /* First check if there is a resumed thread with a wait status
3491 pending. */
d7e15655 3492 if (ptid == minus_one_ptid || ptid.is_pid ())
372316f1 3493 {
5b6d1e4f 3494 tp = random_pending_event_thread (inf, ptid);
372316f1
PA
3495 }
3496 else
3497 {
3498 if (debug_infrun)
3499 fprintf_unfiltered (gdb_stdlog,
3500 "infrun: Waiting for specific thread %s.\n",
a068643d 3501 target_pid_to_str (ptid).c_str ());
372316f1
PA
3502
3503 /* We have a specific thread to check. */
5b6d1e4f 3504 tp = find_thread_ptid (inf, ptid);
372316f1
PA
3505 gdb_assert (tp != NULL);
3506 if (!tp->suspend.waitstatus_pending_p)
3507 tp = NULL;
3508 }
3509
3510 if (tp != NULL
3511 && (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3512 || tp->suspend.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
3513 {
00431a78 3514 struct regcache *regcache = get_thread_regcache (tp);
ac7936df 3515 struct gdbarch *gdbarch = regcache->arch ();
372316f1
PA
3516 CORE_ADDR pc;
3517 int discard = 0;
3518
3519 pc = regcache_read_pc (regcache);
3520
3521 if (pc != tp->suspend.stop_pc)
3522 {
3523 if (debug_infrun)
3524 fprintf_unfiltered (gdb_stdlog,
3525 "infrun: PC of %s changed. was=%s, now=%s\n",
a068643d 3526 target_pid_to_str (tp->ptid).c_str (),
defd2172 3527 paddress (gdbarch, tp->suspend.stop_pc),
372316f1
PA
3528 paddress (gdbarch, pc));
3529 discard = 1;
3530 }
a01bda52 3531 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
372316f1
PA
3532 {
3533 if (debug_infrun)
3534 fprintf_unfiltered (gdb_stdlog,
3535 "infrun: previous breakpoint of %s, at %s gone\n",
a068643d 3536 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
3537 paddress (gdbarch, pc));
3538
3539 discard = 1;
3540 }
3541
3542 if (discard)
3543 {
3544 if (debug_infrun)
3545 fprintf_unfiltered (gdb_stdlog,
3546 "infrun: pending event of %s cancelled.\n",
a068643d 3547 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3548
3549 tp->suspend.waitstatus.kind = TARGET_WAITKIND_SPURIOUS;
3550 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3551 }
3552 }
3553
3554 if (tp != NULL)
3555 {
3556 if (debug_infrun)
3557 {
23fdd69e
SM
3558 std::string statstr
3559 = target_waitstatus_to_string (&tp->suspend.waitstatus);
372316f1 3560
372316f1
PA
3561 fprintf_unfiltered (gdb_stdlog,
3562 "infrun: Using pending wait status %s for %s.\n",
23fdd69e 3563 statstr.c_str (),
a068643d 3564 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3565 }
3566
3567 /* Now that we've selected our final event LWP, un-adjust its PC
3568 if it was a software breakpoint (and the target doesn't
3569 always adjust the PC itself). */
3570 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3571 && !target_supports_stopped_by_sw_breakpoint ())
3572 {
3573 struct regcache *regcache;
3574 struct gdbarch *gdbarch;
3575 int decr_pc;
3576
00431a78 3577 regcache = get_thread_regcache (tp);
ac7936df 3578 gdbarch = regcache->arch ();
372316f1
PA
3579
3580 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3581 if (decr_pc != 0)
3582 {
3583 CORE_ADDR pc;
3584
3585 pc = regcache_read_pc (regcache);
3586 regcache_write_pc (regcache, pc + decr_pc);
3587 }
3588 }
3589
3590 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3591 *status = tp->suspend.waitstatus;
3592 tp->suspend.waitstatus_pending_p = 0;
3593
3594 /* Wake up the event loop again, until all pending events are
3595 processed. */
3596 if (target_is_async_p ())
3597 mark_async_event_handler (infrun_async_inferior_event_token);
3598 return tp->ptid;
3599 }
3600
3601 /* But if we don't find one, we'll have to wait. */
3602
3603 if (deprecated_target_wait_hook)
3604 event_ptid = deprecated_target_wait_hook (ptid, status, options);
3605 else
3606 event_ptid = target_wait (ptid, status, options);
3607
3608 return event_ptid;
3609}
3610
5b6d1e4f
PA
3611/* Returns true if INF has any resumed thread with a status
3612 pending. */
3613
3614static bool
3615threads_are_resumed_pending_p (inferior *inf)
3616{
3617 for (thread_info *tp : inf->non_exited_threads ())
3618 if (tp->resumed
3619 && tp->suspend.waitstatus_pending_p)
3620 return true;
3621
3622 return false;
3623}
3624
3625/* Wrapper for target_wait that first checks whether threads have
3626 pending statuses to report before actually asking the target for
3627 more events. Polls for events from all inferiors/targets. */
3628
3629static bool
3630do_target_wait (ptid_t wait_ptid, execution_control_state *ecs, int options)
3631{
3632 int num_inferiors = 0;
3633 int random_selector;
3634
3635 /* For fairness, we pick the first inferior/target to poll at
3636 random, and then continue polling the rest of the inferior list
3637 starting from that one in a circular fashion until the whole list
3638 is polled once. */
3639
3640 auto inferior_matches = [&wait_ptid] (inferior *inf)
3641 {
3642 return (inf->process_target () != NULL
3643 && (threads_are_executing (inf->process_target ())
3644 || threads_are_resumed_pending_p (inf))
3645 && ptid_t (inf->pid).matches (wait_ptid));
3646 };
3647
3648 /* First see how many resumed inferiors we have. */
3649 for (inferior *inf : all_inferiors ())
3650 if (inferior_matches (inf))
3651 num_inferiors++;
3652
3653 if (num_inferiors == 0)
3654 {
3655 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3656 return false;
3657 }
3658
3659 /* Now randomly pick an inferior out of those that were resumed. */
3660 random_selector = (int)
3661 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
3662
3663 if (debug_infrun && num_inferiors > 1)
3664 fprintf_unfiltered (gdb_stdlog,
3665 "infrun: Found %d inferiors, starting at #%d\n",
3666 num_inferiors, random_selector);
3667
3668 /* Select the Nth inferior that was resumed. */
3669
3670 inferior *selected = nullptr;
3671
3672 for (inferior *inf : all_inferiors ())
3673 if (inferior_matches (inf))
3674 if (random_selector-- == 0)
3675 {
3676 selected = inf;
3677 break;
3678 }
3679
3680 /* Now poll for events out of each of the resumed inferior's
3681 targets, starting from the selected one. */
3682
3683 auto do_wait = [&] (inferior *inf)
3684 {
5b6d1e4f
PA
3685 ecs->ptid = do_target_wait_1 (inf, wait_ptid, &ecs->ws, options);
3686 ecs->target = inf->process_target ();
3687 return (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
3688 };
3689
3690 /* Needed in all-stop+target-non-stop mode, because we end up here
3691 spuriously after the target is all stopped and we've already
3692 reported the stop to the user, polling for events. */
3693 scoped_restore_current_thread restore_thread;
3694
3695 int inf_num = selected->num;
3696 for (inferior *inf = selected; inf != NULL; inf = inf->next)
3697 if (inferior_matches (inf))
3698 if (do_wait (inf))
3699 return true;
3700
3701 for (inferior *inf = inferior_list;
3702 inf != NULL && inf->num < inf_num;
3703 inf = inf->next)
3704 if (inferior_matches (inf))
3705 if (do_wait (inf))
3706 return true;
3707
3708 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3709 return false;
3710}
3711
24291992
PA
3712/* Prepare and stabilize the inferior for detaching it. E.g.,
3713 detaching while a thread is displaced stepping is a recipe for
3714 crashing it, as nothing would readjust the PC out of the scratch
3715 pad. */
3716
3717void
3718prepare_for_detach (void)
3719{
3720 struct inferior *inf = current_inferior ();
f2907e49 3721 ptid_t pid_ptid = ptid_t (inf->pid);
24291992 3722
00431a78 3723 displaced_step_inferior_state *displaced = get_displaced_stepping_state (inf);
24291992
PA
3724
3725 /* Is any thread of this process displaced stepping? If not,
3726 there's nothing else to do. */
d20172fc 3727 if (displaced->step_thread == nullptr)
24291992
PA
3728 return;
3729
3730 if (debug_infrun)
3731 fprintf_unfiltered (gdb_stdlog,
3732 "displaced-stepping in-process while detaching");
3733
9bcb1f16 3734 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
24291992 3735
00431a78 3736 while (displaced->step_thread != nullptr)
24291992 3737 {
24291992
PA
3738 struct execution_control_state ecss;
3739 struct execution_control_state *ecs;
3740
3741 ecs = &ecss;
3742 memset (ecs, 0, sizeof (*ecs));
3743
3744 overlay_cache_invalid = 1;
f15cb84a
YQ
3745 /* Flush target cache before starting to handle each event.
3746 Target was running and cache could be stale. This is just a
3747 heuristic. Running threads may modify target memory, but we
3748 don't get any event. */
3749 target_dcache_invalidate ();
24291992 3750
5b6d1e4f 3751 do_target_wait (pid_ptid, ecs, 0);
24291992
PA
3752
3753 if (debug_infrun)
3754 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3755
3756 /* If an error happens while handling the event, propagate GDB's
3757 knowledge of the executing state to the frontend/user running
3758 state. */
5b6d1e4f
PA
3759 scoped_finish_thread_state finish_state (inf->process_target (),
3760 minus_one_ptid);
24291992
PA
3761
3762 /* Now figure out what to do with the result of the result. */
3763 handle_inferior_event (ecs);
3764
3765 /* No error, don't finish the state yet. */
731f534f 3766 finish_state.release ();
24291992
PA
3767
3768 /* Breakpoints and watchpoints are not installed on the target
3769 at this point, and signals are passed directly to the
3770 inferior, so this must mean the process is gone. */
3771 if (!ecs->wait_some_more)
3772 {
9bcb1f16 3773 restore_detaching.release ();
24291992
PA
3774 error (_("Program exited while detaching"));
3775 }
3776 }
3777
9bcb1f16 3778 restore_detaching.release ();
24291992
PA
3779}
3780
cd0fc7c3 3781/* Wait for control to return from inferior to debugger.
ae123ec6 3782
cd0fc7c3
SS
3783 If inferior gets a signal, we may decide to start it up again
3784 instead of returning. That is why there is a loop in this function.
3785 When this function actually returns it means the inferior
3786 should be left stopped and GDB should read more commands. */
3787
5b6d1e4f
PA
3788static void
3789wait_for_inferior (inferior *inf)
cd0fc7c3 3790{
527159b7 3791 if (debug_infrun)
ae123ec6 3792 fprintf_unfiltered
e4c8541f 3793 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
527159b7 3794
4c41382a 3795 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
cd0fc7c3 3796
e6f5c25b
PA
3797 /* If an error happens while handling the event, propagate GDB's
3798 knowledge of the executing state to the frontend/user running
3799 state. */
5b6d1e4f
PA
3800 scoped_finish_thread_state finish_state
3801 (inf->process_target (), minus_one_ptid);
e6f5c25b 3802
c906108c
SS
3803 while (1)
3804 {
ae25568b
PA
3805 struct execution_control_state ecss;
3806 struct execution_control_state *ecs = &ecss;
29f49a6a 3807
ae25568b
PA
3808 memset (ecs, 0, sizeof (*ecs));
3809
ec9499be 3810 overlay_cache_invalid = 1;
ec9499be 3811
f15cb84a
YQ
3812 /* Flush target cache before starting to handle each event.
3813 Target was running and cache could be stale. This is just a
3814 heuristic. Running threads may modify target memory, but we
3815 don't get any event. */
3816 target_dcache_invalidate ();
3817
5b6d1e4f
PA
3818 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, 0);
3819 ecs->target = inf->process_target ();
c906108c 3820
f00150c9 3821 if (debug_infrun)
5b6d1e4f 3822 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
f00150c9 3823
cd0fc7c3
SS
3824 /* Now figure out what to do with the result of the result. */
3825 handle_inferior_event (ecs);
c906108c 3826
cd0fc7c3
SS
3827 if (!ecs->wait_some_more)
3828 break;
3829 }
4e1c45ea 3830
e6f5c25b 3831 /* No error, don't finish the state yet. */
731f534f 3832 finish_state.release ();
cd0fc7c3 3833}
c906108c 3834
d3d4baed
PA
3835/* Cleanup that reinstalls the readline callback handler, if the
3836 target is running in the background. If while handling the target
3837 event something triggered a secondary prompt, like e.g., a
3838 pagination prompt, we'll have removed the callback handler (see
3839 gdb_readline_wrapper_line). Need to do this as we go back to the
3840 event loop, ready to process further input. Note this has no
3841 effect if the handler hasn't actually been removed, because calling
3842 rl_callback_handler_install resets the line buffer, thus losing
3843 input. */
3844
3845static void
d238133d 3846reinstall_readline_callback_handler_cleanup ()
d3d4baed 3847{
3b12939d
PA
3848 struct ui *ui = current_ui;
3849
3850 if (!ui->async)
6c400b59
PA
3851 {
3852 /* We're not going back to the top level event loop yet. Don't
3853 install the readline callback, as it'd prep the terminal,
3854 readline-style (raw, noecho) (e.g., --batch). We'll install
3855 it the next time the prompt is displayed, when we're ready
3856 for input. */
3857 return;
3858 }
3859
3b12939d 3860 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
d3d4baed
PA
3861 gdb_rl_callback_handler_reinstall ();
3862}
3863
243a9253
PA
3864/* Clean up the FSMs of threads that are now stopped. In non-stop,
3865 that's just the event thread. In all-stop, that's all threads. */
3866
3867static void
3868clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
3869{
08036331
PA
3870 if (ecs->event_thread != NULL
3871 && ecs->event_thread->thread_fsm != NULL)
46e3ed7f 3872 ecs->event_thread->thread_fsm->clean_up (ecs->event_thread);
243a9253
PA
3873
3874 if (!non_stop)
3875 {
08036331 3876 for (thread_info *thr : all_non_exited_threads ())
243a9253
PA
3877 {
3878 if (thr->thread_fsm == NULL)
3879 continue;
3880 if (thr == ecs->event_thread)
3881 continue;
3882
00431a78 3883 switch_to_thread (thr);
46e3ed7f 3884 thr->thread_fsm->clean_up (thr);
243a9253
PA
3885 }
3886
3887 if (ecs->event_thread != NULL)
00431a78 3888 switch_to_thread (ecs->event_thread);
243a9253
PA
3889 }
3890}
3891
3b12939d
PA
3892/* Helper for all_uis_check_sync_execution_done that works on the
3893 current UI. */
3894
3895static void
3896check_curr_ui_sync_execution_done (void)
3897{
3898 struct ui *ui = current_ui;
3899
3900 if (ui->prompt_state == PROMPT_NEEDED
3901 && ui->async
3902 && !gdb_in_secondary_prompt_p (ui))
3903 {
223ffa71 3904 target_terminal::ours ();
76727919 3905 gdb::observers::sync_execution_done.notify ();
3eb7562a 3906 ui_register_input_event_handler (ui);
3b12939d
PA
3907 }
3908}
3909
3910/* See infrun.h. */
3911
3912void
3913all_uis_check_sync_execution_done (void)
3914{
0e454242 3915 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
3916 {
3917 check_curr_ui_sync_execution_done ();
3918 }
3919}
3920
a8836c93
PA
3921/* See infrun.h. */
3922
3923void
3924all_uis_on_sync_execution_starting (void)
3925{
0e454242 3926 SWITCH_THRU_ALL_UIS ()
a8836c93
PA
3927 {
3928 if (current_ui->prompt_state == PROMPT_NEEDED)
3929 async_disable_stdin ();
3930 }
3931}
3932
1777feb0 3933/* Asynchronous version of wait_for_inferior. It is called by the
43ff13b4 3934 event loop whenever a change of state is detected on the file
1777feb0
MS
3935 descriptor corresponding to the target. It can be called more than
3936 once to complete a single execution command. In such cases we need
3937 to keep the state in a global variable ECSS. If it is the last time
a474d7c2
PA
3938 that this function is called for a single execution command, then
3939 report to the user that the inferior has stopped, and do the
1777feb0 3940 necessary cleanups. */
43ff13b4
JM
3941
3942void
fba45db2 3943fetch_inferior_event (void *client_data)
43ff13b4 3944{
0d1e5fa7 3945 struct execution_control_state ecss;
a474d7c2 3946 struct execution_control_state *ecs = &ecss;
0f641c01 3947 int cmd_done = 0;
43ff13b4 3948
0d1e5fa7
PA
3949 memset (ecs, 0, sizeof (*ecs));
3950
c61db772
PA
3951 /* Events are always processed with the main UI as current UI. This
3952 way, warnings, debug output, etc. are always consistently sent to
3953 the main console. */
4b6749b9 3954 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
c61db772 3955
d3d4baed 3956 /* End up with readline processing input, if necessary. */
d238133d
TT
3957 {
3958 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
3959
3960 /* We're handling a live event, so make sure we're doing live
3961 debugging. If we're looking at traceframes while the target is
3962 running, we're going to need to get back to that mode after
3963 handling the event. */
3964 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
3965 if (non_stop)
3966 {
3967 maybe_restore_traceframe.emplace ();
3968 set_current_traceframe (-1);
3969 }
43ff13b4 3970
873657b9
PA
3971 /* The user/frontend should not notice a thread switch due to
3972 internal events. Make sure we revert to the user selected
3973 thread and frame after handling the event and running any
3974 breakpoint commands. */
3975 scoped_restore_current_thread restore_thread;
d238133d
TT
3976
3977 overlay_cache_invalid = 1;
3978 /* Flush target cache before starting to handle each event. Target
3979 was running and cache could be stale. This is just a heuristic.
3980 Running threads may modify target memory, but we don't get any
3981 event. */
3982 target_dcache_invalidate ();
3983
3984 scoped_restore save_exec_dir
3985 = make_scoped_restore (&execution_direction,
3986 target_execution_direction ());
3987
5b6d1e4f
PA
3988 if (!do_target_wait (minus_one_ptid, ecs, TARGET_WNOHANG))
3989 return;
3990
3991 gdb_assert (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
3992
3993 /* Switch to the target that generated the event, so we can do
3994 target calls. Any inferior bound to the target will do, so we
3995 just switch to the first we find. */
3996 for (inferior *inf : all_inferiors (ecs->target))
3997 {
3998 switch_to_inferior_no_thread (inf);
3999 break;
4000 }
d238133d
TT
4001
4002 if (debug_infrun)
5b6d1e4f 4003 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
d238133d
TT
4004
4005 /* If an error happens while handling the event, propagate GDB's
4006 knowledge of the executing state to the frontend/user running
4007 state. */
4008 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
5b6d1e4f 4009 scoped_finish_thread_state finish_state (ecs->target, finish_ptid);
d238133d 4010
979a0d13 4011 /* Get executed before scoped_restore_current_thread above to apply
d238133d
TT
4012 still for the thread which has thrown the exception. */
4013 auto defer_bpstat_clear
4014 = make_scope_exit (bpstat_clear_actions);
4015 auto defer_delete_threads
4016 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
4017
4018 /* Now figure out what to do with the result of the result. */
4019 handle_inferior_event (ecs);
4020
4021 if (!ecs->wait_some_more)
4022 {
5b6d1e4f 4023 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
d238133d
TT
4024 int should_stop = 1;
4025 struct thread_info *thr = ecs->event_thread;
d6b48e9c 4026
d238133d 4027 delete_just_stopped_threads_infrun_breakpoints ();
f107f563 4028
d238133d
TT
4029 if (thr != NULL)
4030 {
4031 struct thread_fsm *thread_fsm = thr->thread_fsm;
243a9253 4032
d238133d 4033 if (thread_fsm != NULL)
46e3ed7f 4034 should_stop = thread_fsm->should_stop (thr);
d238133d 4035 }
243a9253 4036
d238133d
TT
4037 if (!should_stop)
4038 {
4039 keep_going (ecs);
4040 }
4041 else
4042 {
46e3ed7f 4043 bool should_notify_stop = true;
d238133d 4044 int proceeded = 0;
1840d81a 4045
d238133d 4046 clean_up_just_stopped_threads_fsms (ecs);
243a9253 4047
d238133d 4048 if (thr != NULL && thr->thread_fsm != NULL)
46e3ed7f 4049 should_notify_stop = thr->thread_fsm->should_notify_stop ();
388a7084 4050
d238133d
TT
4051 if (should_notify_stop)
4052 {
4053 /* We may not find an inferior if this was a process exit. */
4054 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
4055 proceeded = normal_stop ();
4056 }
243a9253 4057
d238133d
TT
4058 if (!proceeded)
4059 {
4060 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
4061 cmd_done = 1;
4062 }
873657b9
PA
4063
4064 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4065 previously selected thread is gone. We have two
4066 choices - switch to no thread selected, or restore the
4067 previously selected thread (now exited). We chose the
4068 later, just because that's what GDB used to do. After
4069 this, "info threads" says "The current thread <Thread
4070 ID 2> has terminated." instead of "No thread
4071 selected.". */
4072 if (!non_stop
4073 && cmd_done
4074 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
4075 restore_thread.dont_restore ();
d238133d
TT
4076 }
4077 }
4f8d22e3 4078
d238133d
TT
4079 defer_delete_threads.release ();
4080 defer_bpstat_clear.release ();
29f49a6a 4081
d238133d
TT
4082 /* No error, don't finish the thread states yet. */
4083 finish_state.release ();
731f534f 4084
d238133d
TT
4085 /* This scope is used to ensure that readline callbacks are
4086 reinstalled here. */
4087 }
4f8d22e3 4088
3b12939d
PA
4089 /* If a UI was in sync execution mode, and now isn't, restore its
4090 prompt (a synchronous execution command has finished, and we're
4091 ready for input). */
4092 all_uis_check_sync_execution_done ();
0f641c01
PA
4093
4094 if (cmd_done
0f641c01 4095 && exec_done_display_p
00431a78
PA
4096 && (inferior_ptid == null_ptid
4097 || inferior_thread ()->state != THREAD_RUNNING))
0f641c01 4098 printf_unfiltered (_("completed.\n"));
43ff13b4
JM
4099}
4100
29734269
SM
4101/* See infrun.h. */
4102
edb3359d 4103void
29734269
SM
4104set_step_info (thread_info *tp, struct frame_info *frame,
4105 struct symtab_and_line sal)
edb3359d 4106{
29734269
SM
4107 /* This can be removed once this function no longer implicitly relies on the
4108 inferior_ptid value. */
4109 gdb_assert (inferior_ptid == tp->ptid);
edb3359d 4110
16c381f0
JK
4111 tp->control.step_frame_id = get_frame_id (frame);
4112 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
edb3359d
DJ
4113
4114 tp->current_symtab = sal.symtab;
4115 tp->current_line = sal.line;
4116}
4117
0d1e5fa7
PA
4118/* Clear context switchable stepping state. */
4119
4120void
4e1c45ea 4121init_thread_stepping_state (struct thread_info *tss)
0d1e5fa7 4122{
7f5ef605 4123 tss->stepped_breakpoint = 0;
0d1e5fa7 4124 tss->stepping_over_breakpoint = 0;
963f9c80 4125 tss->stepping_over_watchpoint = 0;
0d1e5fa7 4126 tss->step_after_step_resume_breakpoint = 0;
cd0fc7c3
SS
4127}
4128
ab1ddbcf 4129/* See infrun.h. */
c32c64b7 4130
6efcd9a8 4131void
5b6d1e4f
PA
4132set_last_target_status (process_stratum_target *target, ptid_t ptid,
4133 target_waitstatus status)
c32c64b7 4134{
5b6d1e4f 4135 target_last_proc_target = target;
c32c64b7
DE
4136 target_last_wait_ptid = ptid;
4137 target_last_waitstatus = status;
4138}
4139
ab1ddbcf 4140/* See infrun.h. */
e02bc4cc
DS
4141
4142void
5b6d1e4f
PA
4143get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4144 target_waitstatus *status)
e02bc4cc 4145{
5b6d1e4f
PA
4146 if (target != nullptr)
4147 *target = target_last_proc_target;
ab1ddbcf
PA
4148 if (ptid != nullptr)
4149 *ptid = target_last_wait_ptid;
4150 if (status != nullptr)
4151 *status = target_last_waitstatus;
e02bc4cc
DS
4152}
4153
ab1ddbcf
PA
4154/* See infrun.h. */
4155
ac264b3b
MS
4156void
4157nullify_last_target_wait_ptid (void)
4158{
5b6d1e4f 4159 target_last_proc_target = nullptr;
ac264b3b 4160 target_last_wait_ptid = minus_one_ptid;
ab1ddbcf 4161 target_last_waitstatus = {};
ac264b3b
MS
4162}
4163
dcf4fbde 4164/* Switch thread contexts. */
dd80620e
MS
4165
4166static void
00431a78 4167context_switch (execution_control_state *ecs)
dd80620e 4168{
00431a78
PA
4169 if (debug_infrun
4170 && ecs->ptid != inferior_ptid
5b6d1e4f
PA
4171 && (inferior_ptid == null_ptid
4172 || ecs->event_thread != inferior_thread ()))
fd48f117
DJ
4173 {
4174 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
a068643d 4175 target_pid_to_str (inferior_ptid).c_str ());
fd48f117 4176 fprintf_unfiltered (gdb_stdlog, "to %s\n",
a068643d 4177 target_pid_to_str (ecs->ptid).c_str ());
fd48f117
DJ
4178 }
4179
00431a78 4180 switch_to_thread (ecs->event_thread);
dd80620e
MS
4181}
4182
d8dd4d5f
PA
4183/* If the target can't tell whether we've hit breakpoints
4184 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4185 check whether that could have been caused by a breakpoint. If so,
4186 adjust the PC, per gdbarch_decr_pc_after_break. */
4187
4fa8626c 4188static void
d8dd4d5f
PA
4189adjust_pc_after_break (struct thread_info *thread,
4190 struct target_waitstatus *ws)
4fa8626c 4191{
24a73cce
UW
4192 struct regcache *regcache;
4193 struct gdbarch *gdbarch;
118e6252 4194 CORE_ADDR breakpoint_pc, decr_pc;
4fa8626c 4195
4fa8626c
DJ
4196 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4197 we aren't, just return.
9709f61c
DJ
4198
4199 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
b798847d
UW
4200 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4201 implemented by software breakpoints should be handled through the normal
4202 breakpoint layer.
8fb3e588 4203
4fa8626c
DJ
4204 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4205 different signals (SIGILL or SIGEMT for instance), but it is less
4206 clear where the PC is pointing afterwards. It may not match
b798847d
UW
4207 gdbarch_decr_pc_after_break. I don't know any specific target that
4208 generates these signals at breakpoints (the code has been in GDB since at
4209 least 1992) so I can not guess how to handle them here.
8fb3e588 4210
e6cf7916
UW
4211 In earlier versions of GDB, a target with
4212 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
b798847d
UW
4213 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4214 target with both of these set in GDB history, and it seems unlikely to be
4215 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4fa8626c 4216
d8dd4d5f 4217 if (ws->kind != TARGET_WAITKIND_STOPPED)
4fa8626c
DJ
4218 return;
4219
d8dd4d5f 4220 if (ws->value.sig != GDB_SIGNAL_TRAP)
4fa8626c
DJ
4221 return;
4222
4058b839
PA
4223 /* In reverse execution, when a breakpoint is hit, the instruction
4224 under it has already been de-executed. The reported PC always
4225 points at the breakpoint address, so adjusting it further would
4226 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4227 architecture:
4228
4229 B1 0x08000000 : INSN1
4230 B2 0x08000001 : INSN2
4231 0x08000002 : INSN3
4232 PC -> 0x08000003 : INSN4
4233
4234 Say you're stopped at 0x08000003 as above. Reverse continuing
4235 from that point should hit B2 as below. Reading the PC when the
4236 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4237 been de-executed already.
4238
4239 B1 0x08000000 : INSN1
4240 B2 PC -> 0x08000001 : INSN2
4241 0x08000002 : INSN3
4242 0x08000003 : INSN4
4243
4244 We can't apply the same logic as for forward execution, because
4245 we would wrongly adjust the PC to 0x08000000, since there's a
4246 breakpoint at PC - 1. We'd then report a hit on B1, although
4247 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4248 behaviour. */
4249 if (execution_direction == EXEC_REVERSE)
4250 return;
4251
1cf4d951
PA
4252 /* If the target can tell whether the thread hit a SW breakpoint,
4253 trust it. Targets that can tell also adjust the PC
4254 themselves. */
4255 if (target_supports_stopped_by_sw_breakpoint ())
4256 return;
4257
4258 /* Note that relying on whether a breakpoint is planted in memory to
4259 determine this can fail. E.g,. the breakpoint could have been
4260 removed since. Or the thread could have been told to step an
4261 instruction the size of a breakpoint instruction, and only
4262 _after_ was a breakpoint inserted at its address. */
4263
24a73cce
UW
4264 /* If this target does not decrement the PC after breakpoints, then
4265 we have nothing to do. */
00431a78 4266 regcache = get_thread_regcache (thread);
ac7936df 4267 gdbarch = regcache->arch ();
118e6252 4268
527a273a 4269 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
118e6252 4270 if (decr_pc == 0)
24a73cce
UW
4271 return;
4272
8b86c959 4273 const address_space *aspace = regcache->aspace ();
6c95b8df 4274
8aad930b
AC
4275 /* Find the location where (if we've hit a breakpoint) the
4276 breakpoint would be. */
118e6252 4277 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
8aad930b 4278
1cf4d951
PA
4279 /* If the target can't tell whether a software breakpoint triggered,
4280 fallback to figuring it out based on breakpoints we think were
4281 inserted in the target, and on whether the thread was stepped or
4282 continued. */
4283
1c5cfe86
PA
4284 /* Check whether there actually is a software breakpoint inserted at
4285 that location.
4286
4287 If in non-stop mode, a race condition is possible where we've
4288 removed a breakpoint, but stop events for that breakpoint were
4289 already queued and arrive later. To suppress those spurious
4290 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
1cf4d951
PA
4291 and retire them after a number of stop events are reported. Note
4292 this is an heuristic and can thus get confused. The real fix is
4293 to get the "stopped by SW BP and needs adjustment" info out of
4294 the target/kernel (and thus never reach here; see above). */
6c95b8df 4295 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
fbea99ea
PA
4296 || (target_is_non_stop_p ()
4297 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
8aad930b 4298 {
07036511 4299 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
abbb1732 4300
8213266a 4301 if (record_full_is_used ())
07036511
TT
4302 restore_operation_disable.emplace
4303 (record_full_gdb_operation_disable_set ());
96429cc8 4304
1c0fdd0e
UW
4305 /* When using hardware single-step, a SIGTRAP is reported for both
4306 a completed single-step and a software breakpoint. Need to
4307 differentiate between the two, as the latter needs adjusting
4308 but the former does not.
4309
4310 The SIGTRAP can be due to a completed hardware single-step only if
4311 - we didn't insert software single-step breakpoints
1c0fdd0e
UW
4312 - this thread is currently being stepped
4313
4314 If any of these events did not occur, we must have stopped due
4315 to hitting a software breakpoint, and have to back up to the
4316 breakpoint address.
4317
4318 As a special case, we could have hardware single-stepped a
4319 software breakpoint. In this case (prev_pc == breakpoint_pc),
4320 we also need to back up to the breakpoint address. */
4321
d8dd4d5f
PA
4322 if (thread_has_single_step_breakpoints_set (thread)
4323 || !currently_stepping (thread)
4324 || (thread->stepped_breakpoint
4325 && thread->prev_pc == breakpoint_pc))
515630c5 4326 regcache_write_pc (regcache, breakpoint_pc);
8aad930b 4327 }
4fa8626c
DJ
4328}
4329
edb3359d
DJ
4330static int
4331stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4332{
4333 for (frame = get_prev_frame (frame);
4334 frame != NULL;
4335 frame = get_prev_frame (frame))
4336 {
4337 if (frame_id_eq (get_frame_id (frame), step_frame_id))
4338 return 1;
4339 if (get_frame_type (frame) != INLINE_FRAME)
4340 break;
4341 }
4342
4343 return 0;
4344}
4345
4a4c04f1
BE
4346/* Look for an inline frame that is marked for skip.
4347 If PREV_FRAME is TRUE start at the previous frame,
4348 otherwise start at the current frame. Stop at the
4349 first non-inline frame, or at the frame where the
4350 step started. */
4351
4352static bool
4353inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
4354{
4355 struct frame_info *frame = get_current_frame ();
4356
4357 if (prev_frame)
4358 frame = get_prev_frame (frame);
4359
4360 for (; frame != NULL; frame = get_prev_frame (frame))
4361 {
4362 const char *fn = NULL;
4363 symtab_and_line sal;
4364 struct symbol *sym;
4365
4366 if (frame_id_eq (get_frame_id (frame), tp->control.step_frame_id))
4367 break;
4368 if (get_frame_type (frame) != INLINE_FRAME)
4369 break;
4370
4371 sal = find_frame_sal (frame);
4372 sym = get_frame_function (frame);
4373
4374 if (sym != NULL)
4375 fn = sym->print_name ();
4376
4377 if (sal.line != 0
4378 && function_name_is_marked_for_skip (fn, sal))
4379 return true;
4380 }
4381
4382 return false;
4383}
4384
c65d6b55
PA
4385/* If the event thread has the stop requested flag set, pretend it
4386 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4387 target_stop). */
4388
4389static bool
4390handle_stop_requested (struct execution_control_state *ecs)
4391{
4392 if (ecs->event_thread->stop_requested)
4393 {
4394 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
4395 ecs->ws.value.sig = GDB_SIGNAL_0;
4396 handle_signal_stop (ecs);
4397 return true;
4398 }
4399 return false;
4400}
4401
a96d9b2e
SDJ
4402/* Auxiliary function that handles syscall entry/return events.
4403 It returns 1 if the inferior should keep going (and GDB
4404 should ignore the event), or 0 if the event deserves to be
4405 processed. */
ca2163eb 4406
a96d9b2e 4407static int
ca2163eb 4408handle_syscall_event (struct execution_control_state *ecs)
a96d9b2e 4409{
ca2163eb 4410 struct regcache *regcache;
ca2163eb
PA
4411 int syscall_number;
4412
00431a78 4413 context_switch (ecs);
ca2163eb 4414
00431a78 4415 regcache = get_thread_regcache (ecs->event_thread);
f90263c1 4416 syscall_number = ecs->ws.value.syscall_number;
f2ffa92b 4417 ecs->event_thread->suspend.stop_pc = regcache_read_pc (regcache);
ca2163eb 4418
a96d9b2e
SDJ
4419 if (catch_syscall_enabled () > 0
4420 && catching_syscall_number (syscall_number) > 0)
4421 {
4422 if (debug_infrun)
4423 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
4424 syscall_number);
a96d9b2e 4425
16c381f0 4426 ecs->event_thread->control.stop_bpstat
a01bda52 4427 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
4428 ecs->event_thread->suspend.stop_pc,
4429 ecs->event_thread, &ecs->ws);
ab04a2af 4430
c65d6b55
PA
4431 if (handle_stop_requested (ecs))
4432 return 0;
4433
ce12b012 4434 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
ca2163eb
PA
4435 {
4436 /* Catchpoint hit. */
ca2163eb
PA
4437 return 0;
4438 }
a96d9b2e 4439 }
ca2163eb 4440
c65d6b55
PA
4441 if (handle_stop_requested (ecs))
4442 return 0;
4443
ca2163eb 4444 /* If no catchpoint triggered for this, then keep going. */
ca2163eb
PA
4445 keep_going (ecs);
4446 return 1;
a96d9b2e
SDJ
4447}
4448
7e324e48
GB
4449/* Lazily fill in the execution_control_state's stop_func_* fields. */
4450
4451static void
4452fill_in_stop_func (struct gdbarch *gdbarch,
4453 struct execution_control_state *ecs)
4454{
4455 if (!ecs->stop_func_filled_in)
4456 {
98a617f8
KB
4457 const block *block;
4458
7e324e48
GB
4459 /* Don't care about return value; stop_func_start and stop_func_name
4460 will both be 0 if it doesn't work. */
98a617f8
KB
4461 find_pc_partial_function (ecs->event_thread->suspend.stop_pc,
4462 &ecs->stop_func_name,
4463 &ecs->stop_func_start,
4464 &ecs->stop_func_end,
4465 &block);
4466
4467 /* The call to find_pc_partial_function, above, will set
4468 stop_func_start and stop_func_end to the start and end
4469 of the range containing the stop pc. If this range
4470 contains the entry pc for the block (which is always the
4471 case for contiguous blocks), advance stop_func_start past
4472 the function's start offset and entrypoint. Note that
4473 stop_func_start is NOT advanced when in a range of a
4474 non-contiguous block that does not contain the entry pc. */
4475 if (block != nullptr
4476 && ecs->stop_func_start <= BLOCK_ENTRY_PC (block)
4477 && BLOCK_ENTRY_PC (block) < ecs->stop_func_end)
4478 {
4479 ecs->stop_func_start
4480 += gdbarch_deprecated_function_start_offset (gdbarch);
4481
4482 if (gdbarch_skip_entrypoint_p (gdbarch))
4483 ecs->stop_func_start
4484 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
4485 }
591a12a1 4486
7e324e48
GB
4487 ecs->stop_func_filled_in = 1;
4488 }
4489}
4490
4f5d7f63 4491
00431a78 4492/* Return the STOP_SOON field of the inferior pointed at by ECS. */
4f5d7f63
PA
4493
4494static enum stop_kind
00431a78 4495get_inferior_stop_soon (execution_control_state *ecs)
4f5d7f63 4496{
5b6d1e4f 4497 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4f5d7f63
PA
4498
4499 gdb_assert (inf != NULL);
4500 return inf->control.stop_soon;
4501}
4502
5b6d1e4f
PA
4503/* Poll for one event out of the current target. Store the resulting
4504 waitstatus in WS, and return the event ptid. Does not block. */
372316f1
PA
4505
4506static ptid_t
5b6d1e4f 4507poll_one_curr_target (struct target_waitstatus *ws)
372316f1
PA
4508{
4509 ptid_t event_ptid;
372316f1
PA
4510
4511 overlay_cache_invalid = 1;
4512
4513 /* Flush target cache before starting to handle each event.
4514 Target was running and cache could be stale. This is just a
4515 heuristic. Running threads may modify target memory, but we
4516 don't get any event. */
4517 target_dcache_invalidate ();
4518
4519 if (deprecated_target_wait_hook)
5b6d1e4f 4520 event_ptid = deprecated_target_wait_hook (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1 4521 else
5b6d1e4f 4522 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1
PA
4523
4524 if (debug_infrun)
5b6d1e4f 4525 print_target_wait_results (minus_one_ptid, event_ptid, ws);
372316f1
PA
4526
4527 return event_ptid;
4528}
4529
5b6d1e4f
PA
4530/* An event reported by wait_one. */
4531
4532struct wait_one_event
4533{
4534 /* The target the event came out of. */
4535 process_stratum_target *target;
4536
4537 /* The PTID the event was for. */
4538 ptid_t ptid;
4539
4540 /* The waitstatus. */
4541 target_waitstatus ws;
4542};
4543
4544/* Wait for one event out of any target. */
4545
4546static wait_one_event
4547wait_one ()
4548{
4549 while (1)
4550 {
4551 for (inferior *inf : all_inferiors ())
4552 {
4553 process_stratum_target *target = inf->process_target ();
4554 if (target == NULL
4555 || !target->is_async_p ()
4556 || !target->threads_executing)
4557 continue;
4558
4559 switch_to_inferior_no_thread (inf);
4560
4561 wait_one_event event;
4562 event.target = target;
4563 event.ptid = poll_one_curr_target (&event.ws);
4564
4565 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
4566 {
4567 /* If nothing is resumed, remove the target from the
4568 event loop. */
4569 target_async (0);
4570 }
4571 else if (event.ws.kind != TARGET_WAITKIND_IGNORE)
4572 return event;
4573 }
4574
4575 /* Block waiting for some event. */
4576
4577 fd_set readfds;
4578 int nfds = 0;
4579
4580 FD_ZERO (&readfds);
4581
4582 for (inferior *inf : all_inferiors ())
4583 {
4584 process_stratum_target *target = inf->process_target ();
4585 if (target == NULL
4586 || !target->is_async_p ()
4587 || !target->threads_executing)
4588 continue;
4589
4590 int fd = target->async_wait_fd ();
4591 FD_SET (fd, &readfds);
4592 if (nfds <= fd)
4593 nfds = fd + 1;
4594 }
4595
4596 if (nfds == 0)
4597 {
4598 /* No waitable targets left. All must be stopped. */
4599 return {NULL, minus_one_ptid, {TARGET_WAITKIND_NO_RESUMED}};
4600 }
4601
4602 QUIT;
4603
4604 int numfds = interruptible_select (nfds, &readfds, 0, NULL, 0);
4605 if (numfds < 0)
4606 {
4607 if (errno == EINTR)
4608 continue;
4609 else
4610 perror_with_name ("interruptible_select");
4611 }
4612 }
4613}
4614
372316f1
PA
4615/* Generate a wrapper for target_stopped_by_REASON that works on PTID
4616 instead of the current thread. */
4617#define THREAD_STOPPED_BY(REASON) \
4618static int \
4619thread_stopped_by_ ## REASON (ptid_t ptid) \
4620{ \
2989a365 4621 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid); \
372316f1
PA
4622 inferior_ptid = ptid; \
4623 \
2989a365 4624 return target_stopped_by_ ## REASON (); \
372316f1
PA
4625}
4626
4627/* Generate thread_stopped_by_watchpoint. */
4628THREAD_STOPPED_BY (watchpoint)
4629/* Generate thread_stopped_by_sw_breakpoint. */
4630THREAD_STOPPED_BY (sw_breakpoint)
4631/* Generate thread_stopped_by_hw_breakpoint. */
4632THREAD_STOPPED_BY (hw_breakpoint)
4633
372316f1
PA
4634/* Save the thread's event and stop reason to process it later. */
4635
4636static void
5b6d1e4f 4637save_waitstatus (struct thread_info *tp, const target_waitstatus *ws)
372316f1 4638{
372316f1
PA
4639 if (debug_infrun)
4640 {
23fdd69e 4641 std::string statstr = target_waitstatus_to_string (ws);
372316f1 4642
372316f1
PA
4643 fprintf_unfiltered (gdb_stdlog,
4644 "infrun: saving status %s for %d.%ld.%ld\n",
23fdd69e 4645 statstr.c_str (),
e99b03dc 4646 tp->ptid.pid (),
e38504b3 4647 tp->ptid.lwp (),
cc6bcb54 4648 tp->ptid.tid ());
372316f1
PA
4649 }
4650
4651 /* Record for later. */
4652 tp->suspend.waitstatus = *ws;
4653 tp->suspend.waitstatus_pending_p = 1;
4654
00431a78 4655 struct regcache *regcache = get_thread_regcache (tp);
8b86c959 4656 const address_space *aspace = regcache->aspace ();
372316f1
PA
4657
4658 if (ws->kind == TARGET_WAITKIND_STOPPED
4659 && ws->value.sig == GDB_SIGNAL_TRAP)
4660 {
4661 CORE_ADDR pc = regcache_read_pc (regcache);
4662
4663 adjust_pc_after_break (tp, &tp->suspend.waitstatus);
4664
4665 if (thread_stopped_by_watchpoint (tp->ptid))
4666 {
4667 tp->suspend.stop_reason
4668 = TARGET_STOPPED_BY_WATCHPOINT;
4669 }
4670 else if (target_supports_stopped_by_sw_breakpoint ()
4671 && thread_stopped_by_sw_breakpoint (tp->ptid))
4672 {
4673 tp->suspend.stop_reason
4674 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4675 }
4676 else if (target_supports_stopped_by_hw_breakpoint ()
4677 && thread_stopped_by_hw_breakpoint (tp->ptid))
4678 {
4679 tp->suspend.stop_reason
4680 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4681 }
4682 else if (!target_supports_stopped_by_hw_breakpoint ()
4683 && hardware_breakpoint_inserted_here_p (aspace,
4684 pc))
4685 {
4686 tp->suspend.stop_reason
4687 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4688 }
4689 else if (!target_supports_stopped_by_sw_breakpoint ()
4690 && software_breakpoint_inserted_here_p (aspace,
4691 pc))
4692 {
4693 tp->suspend.stop_reason
4694 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4695 }
4696 else if (!thread_has_single_step_breakpoints_set (tp)
4697 && currently_stepping (tp))
4698 {
4699 tp->suspend.stop_reason
4700 = TARGET_STOPPED_BY_SINGLE_STEP;
4701 }
4702 }
4703}
4704
6efcd9a8 4705/* See infrun.h. */
372316f1 4706
6efcd9a8 4707void
372316f1
PA
4708stop_all_threads (void)
4709{
4710 /* We may need multiple passes to discover all threads. */
4711 int pass;
4712 int iterations = 0;
372316f1 4713
fbea99ea 4714 gdb_assert (target_is_non_stop_p ());
372316f1
PA
4715
4716 if (debug_infrun)
4717 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads\n");
4718
00431a78 4719 scoped_restore_current_thread restore_thread;
372316f1 4720
65706a29 4721 target_thread_events (1);
9885e6bb 4722 SCOPE_EXIT { target_thread_events (0); };
65706a29 4723
372316f1
PA
4724 /* Request threads to stop, and then wait for the stops. Because
4725 threads we already know about can spawn more threads while we're
4726 trying to stop them, and we only learn about new threads when we
4727 update the thread list, do this in a loop, and keep iterating
4728 until two passes find no threads that need to be stopped. */
4729 for (pass = 0; pass < 2; pass++, iterations++)
4730 {
4731 if (debug_infrun)
4732 fprintf_unfiltered (gdb_stdlog,
4733 "infrun: stop_all_threads, pass=%d, "
4734 "iterations=%d\n", pass, iterations);
4735 while (1)
4736 {
372316f1 4737 int need_wait = 0;
372316f1
PA
4738
4739 update_thread_list ();
4740
4741 /* Go through all threads looking for threads that we need
4742 to tell the target to stop. */
08036331 4743 for (thread_info *t : all_non_exited_threads ())
372316f1
PA
4744 {
4745 if (t->executing)
4746 {
4747 /* If already stopping, don't request a stop again.
4748 We just haven't seen the notification yet. */
4749 if (!t->stop_requested)
4750 {
4751 if (debug_infrun)
4752 fprintf_unfiltered (gdb_stdlog,
4753 "infrun: %s executing, "
4754 "need stop\n",
a068643d 4755 target_pid_to_str (t->ptid).c_str ());
f3f8ece4 4756 switch_to_thread_no_regs (t);
372316f1
PA
4757 target_stop (t->ptid);
4758 t->stop_requested = 1;
4759 }
4760 else
4761 {
4762 if (debug_infrun)
4763 fprintf_unfiltered (gdb_stdlog,
4764 "infrun: %s executing, "
4765 "already stopping\n",
a068643d 4766 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4767 }
4768
4769 if (t->stop_requested)
4770 need_wait = 1;
4771 }
4772 else
4773 {
4774 if (debug_infrun)
4775 fprintf_unfiltered (gdb_stdlog,
4776 "infrun: %s not executing\n",
a068643d 4777 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4778
4779 /* The thread may be not executing, but still be
4780 resumed with a pending status to process. */
719546c4 4781 t->resumed = false;
372316f1
PA
4782 }
4783 }
4784
4785 if (!need_wait)
4786 break;
4787
4788 /* If we find new threads on the second iteration, restart
4789 over. We want to see two iterations in a row with all
4790 threads stopped. */
4791 if (pass > 0)
4792 pass = -1;
4793
5b6d1e4f
PA
4794 wait_one_event event = wait_one ();
4795
c29705b7 4796 if (debug_infrun)
372316f1 4797 {
c29705b7
PW
4798 fprintf_unfiltered (gdb_stdlog,
4799 "infrun: stop_all_threads %s %s\n",
5b6d1e4f
PA
4800 target_waitstatus_to_string (&event.ws).c_str (),
4801 target_pid_to_str (event.ptid).c_str ());
372316f1 4802 }
372316f1 4803
5b6d1e4f
PA
4804 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED
4805 || event.ws.kind == TARGET_WAITKIND_THREAD_EXITED
4806 || event.ws.kind == TARGET_WAITKIND_EXITED
4807 || event.ws.kind == TARGET_WAITKIND_SIGNALLED)
c29705b7
PW
4808 {
4809 /* All resumed threads exited
4810 or one thread/process exited/signalled. */
372316f1
PA
4811 }
4812 else
4813 {
5b6d1e4f 4814 thread_info *t = find_thread_ptid (event.target, event.ptid);
372316f1 4815 if (t == NULL)
5b6d1e4f 4816 t = add_thread (event.target, event.ptid);
372316f1
PA
4817
4818 t->stop_requested = 0;
4819 t->executing = 0;
719546c4 4820 t->resumed = false;
372316f1
PA
4821 t->control.may_range_step = 0;
4822
6efcd9a8
PA
4823 /* This may be the first time we see the inferior report
4824 a stop. */
5b6d1e4f 4825 inferior *inf = find_inferior_ptid (event.target, event.ptid);
6efcd9a8
PA
4826 if (inf->needs_setup)
4827 {
4828 switch_to_thread_no_regs (t);
4829 setup_inferior (0);
4830 }
4831
5b6d1e4f
PA
4832 if (event.ws.kind == TARGET_WAITKIND_STOPPED
4833 && event.ws.value.sig == GDB_SIGNAL_0)
372316f1
PA
4834 {
4835 /* We caught the event that we intended to catch, so
4836 there's no event pending. */
4837 t->suspend.waitstatus.kind = TARGET_WAITKIND_IGNORE;
4838 t->suspend.waitstatus_pending_p = 0;
4839
00431a78 4840 if (displaced_step_fixup (t, GDB_SIGNAL_0) < 0)
372316f1
PA
4841 {
4842 /* Add it back to the step-over queue. */
4843 if (debug_infrun)
4844 {
4845 fprintf_unfiltered (gdb_stdlog,
4846 "infrun: displaced-step of %s "
4847 "canceled: adding back to the "
4848 "step-over queue\n",
a068643d 4849 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4850 }
4851 t->control.trap_expected = 0;
4852 thread_step_over_chain_enqueue (t);
4853 }
4854 }
4855 else
4856 {
4857 enum gdb_signal sig;
4858 struct regcache *regcache;
372316f1
PA
4859
4860 if (debug_infrun)
4861 {
5b6d1e4f 4862 std::string statstr = target_waitstatus_to_string (&event.ws);
372316f1 4863
372316f1
PA
4864 fprintf_unfiltered (gdb_stdlog,
4865 "infrun: target_wait %s, saving "
4866 "status for %d.%ld.%ld\n",
23fdd69e 4867 statstr.c_str (),
e99b03dc 4868 t->ptid.pid (),
e38504b3 4869 t->ptid.lwp (),
cc6bcb54 4870 t->ptid.tid ());
372316f1
PA
4871 }
4872
4873 /* Record for later. */
5b6d1e4f 4874 save_waitstatus (t, &event.ws);
372316f1 4875
5b6d1e4f
PA
4876 sig = (event.ws.kind == TARGET_WAITKIND_STOPPED
4877 ? event.ws.value.sig : GDB_SIGNAL_0);
372316f1 4878
00431a78 4879 if (displaced_step_fixup (t, sig) < 0)
372316f1
PA
4880 {
4881 /* Add it back to the step-over queue. */
4882 t->control.trap_expected = 0;
4883 thread_step_over_chain_enqueue (t);
4884 }
4885
00431a78 4886 regcache = get_thread_regcache (t);
372316f1
PA
4887 t->suspend.stop_pc = regcache_read_pc (regcache);
4888
4889 if (debug_infrun)
4890 {
4891 fprintf_unfiltered (gdb_stdlog,
4892 "infrun: saved stop_pc=%s for %s "
4893 "(currently_stepping=%d)\n",
4894 paddress (target_gdbarch (),
4895 t->suspend.stop_pc),
a068643d 4896 target_pid_to_str (t->ptid).c_str (),
372316f1
PA
4897 currently_stepping (t));
4898 }
4899 }
4900 }
4901 }
4902 }
4903
372316f1
PA
4904 if (debug_infrun)
4905 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads done\n");
4906}
4907
f4836ba9
PA
4908/* Handle a TARGET_WAITKIND_NO_RESUMED event. */
4909
4910static int
4911handle_no_resumed (struct execution_control_state *ecs)
4912{
3b12939d 4913 if (target_can_async_p ())
f4836ba9 4914 {
3b12939d
PA
4915 struct ui *ui;
4916 int any_sync = 0;
f4836ba9 4917
3b12939d
PA
4918 ALL_UIS (ui)
4919 {
4920 if (ui->prompt_state == PROMPT_BLOCKED)
4921 {
4922 any_sync = 1;
4923 break;
4924 }
4925 }
4926 if (!any_sync)
4927 {
4928 /* There were no unwaited-for children left in the target, but,
4929 we're not synchronously waiting for events either. Just
4930 ignore. */
4931
4932 if (debug_infrun)
4933 fprintf_unfiltered (gdb_stdlog,
4934 "infrun: TARGET_WAITKIND_NO_RESUMED "
4935 "(ignoring: bg)\n");
4936 prepare_to_wait (ecs);
4937 return 1;
4938 }
f4836ba9
PA
4939 }
4940
4941 /* Otherwise, if we were running a synchronous execution command, we
4942 may need to cancel it and give the user back the terminal.
4943
4944 In non-stop mode, the target can't tell whether we've already
4945 consumed previous stop events, so it can end up sending us a
4946 no-resumed event like so:
4947
4948 #0 - thread 1 is left stopped
4949
4950 #1 - thread 2 is resumed and hits breakpoint
4951 -> TARGET_WAITKIND_STOPPED
4952
4953 #2 - thread 3 is resumed and exits
4954 this is the last resumed thread, so
4955 -> TARGET_WAITKIND_NO_RESUMED
4956
4957 #3 - gdb processes stop for thread 2 and decides to re-resume
4958 it.
4959
4960 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
4961 thread 2 is now resumed, so the event should be ignored.
4962
4963 IOW, if the stop for thread 2 doesn't end a foreground command,
4964 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
4965 event. But it could be that the event meant that thread 2 itself
4966 (or whatever other thread was the last resumed thread) exited.
4967
4968 To address this we refresh the thread list and check whether we
4969 have resumed threads _now_. In the example above, this removes
4970 thread 3 from the thread list. If thread 2 was re-resumed, we
4971 ignore this event. If we find no thread resumed, then we cancel
4972 the synchronous command show "no unwaited-for " to the user. */
4973 update_thread_list ();
4974
5b6d1e4f 4975 for (thread_info *thread : all_non_exited_threads (ecs->target))
f4836ba9
PA
4976 {
4977 if (thread->executing
4978 || thread->suspend.waitstatus_pending_p)
4979 {
4980 /* There were no unwaited-for children left in the target at
4981 some point, but there are now. Just ignore. */
4982 if (debug_infrun)
4983 fprintf_unfiltered (gdb_stdlog,
4984 "infrun: TARGET_WAITKIND_NO_RESUMED "
4985 "(ignoring: found resumed)\n");
4986 prepare_to_wait (ecs);
4987 return 1;
4988 }
4989 }
4990
4991 /* Note however that we may find no resumed thread because the whole
4992 process exited meanwhile (thus updating the thread list results
4993 in an empty thread list). In this case we know we'll be getting
4994 a process exit event shortly. */
5b6d1e4f 4995 for (inferior *inf : all_non_exited_inferiors (ecs->target))
f4836ba9 4996 {
08036331 4997 thread_info *thread = any_live_thread_of_inferior (inf);
f4836ba9
PA
4998 if (thread == NULL)
4999 {
5000 if (debug_infrun)
5001 fprintf_unfiltered (gdb_stdlog,
5002 "infrun: TARGET_WAITKIND_NO_RESUMED "
5003 "(expect process exit)\n");
5004 prepare_to_wait (ecs);
5005 return 1;
5006 }
5007 }
5008
5009 /* Go ahead and report the event. */
5010 return 0;
5011}
5012
05ba8510
PA
5013/* Given an execution control state that has been freshly filled in by
5014 an event from the inferior, figure out what it means and take
5015 appropriate action.
5016
5017 The alternatives are:
5018
22bcd14b 5019 1) stop_waiting and return; to really stop and return to the
05ba8510
PA
5020 debugger.
5021
5022 2) keep_going and return; to wait for the next event (set
5023 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5024 once). */
c906108c 5025
ec9499be 5026static void
595915c1 5027handle_inferior_event (struct execution_control_state *ecs)
cd0fc7c3 5028{
595915c1
TT
5029 /* Make sure that all temporary struct value objects that were
5030 created during the handling of the event get deleted at the
5031 end. */
5032 scoped_value_mark free_values;
5033
d6b48e9c
PA
5034 enum stop_kind stop_soon;
5035
c29705b7
PW
5036 if (debug_infrun)
5037 fprintf_unfiltered (gdb_stdlog, "infrun: handle_inferior_event %s\n",
5038 target_waitstatus_to_string (&ecs->ws).c_str ());
5039
28736962
PA
5040 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
5041 {
5042 /* We had an event in the inferior, but we are not interested in
5043 handling it at this level. The lower layers have already
5044 done what needs to be done, if anything.
5045
5046 One of the possible circumstances for this is when the
5047 inferior produces output for the console. The inferior has
5048 not stopped, and we are ignoring the event. Another possible
5049 circumstance is any event which the lower level knows will be
5050 reported multiple times without an intervening resume. */
28736962
PA
5051 prepare_to_wait (ecs);
5052 return;
5053 }
5054
65706a29
PA
5055 if (ecs->ws.kind == TARGET_WAITKIND_THREAD_EXITED)
5056 {
65706a29
PA
5057 prepare_to_wait (ecs);
5058 return;
5059 }
5060
0e5bf2a8 5061 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
f4836ba9
PA
5062 && handle_no_resumed (ecs))
5063 return;
0e5bf2a8 5064
5b6d1e4f
PA
5065 /* Cache the last target/ptid/waitstatus. */
5066 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
e02bc4cc 5067
ca005067 5068 /* Always clear state belonging to the previous time we stopped. */
aa7d318d 5069 stop_stack_dummy = STOP_NONE;
ca005067 5070
0e5bf2a8
PA
5071 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
5072 {
5073 /* No unwaited-for children left. IOW, all resumed children
5074 have exited. */
0e5bf2a8 5075 stop_print_frame = 0;
22bcd14b 5076 stop_waiting (ecs);
0e5bf2a8
PA
5077 return;
5078 }
5079
8c90c137 5080 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
64776a0b 5081 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
359f5fe6 5082 {
5b6d1e4f 5083 ecs->event_thread = find_thread_ptid (ecs->target, ecs->ptid);
359f5fe6
PA
5084 /* If it's a new thread, add it to the thread database. */
5085 if (ecs->event_thread == NULL)
5b6d1e4f 5086 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
c1e36e3e
PA
5087
5088 /* Disable range stepping. If the next step request could use a
5089 range, this will be end up re-enabled then. */
5090 ecs->event_thread->control.may_range_step = 0;
359f5fe6 5091 }
88ed393a
JK
5092
5093 /* Dependent on valid ECS->EVENT_THREAD. */
d8dd4d5f 5094 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
88ed393a
JK
5095
5096 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5097 reinit_frame_cache ();
5098
28736962
PA
5099 breakpoint_retire_moribund ();
5100
2b009048
DJ
5101 /* First, distinguish signals caused by the debugger from signals
5102 that have to do with the program's own actions. Note that
5103 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5104 on the operating system version. Here we detect when a SIGILL or
5105 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5106 something similar for SIGSEGV, since a SIGSEGV will be generated
5107 when we're trying to execute a breakpoint instruction on a
5108 non-executable stack. This happens for call dummy breakpoints
5109 for architectures like SPARC that place call dummies on the
5110 stack. */
2b009048 5111 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
a493e3e2
PA
5112 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
5113 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
5114 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
2b009048 5115 {
00431a78 5116 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
de0a0249 5117
a01bda52 5118 if (breakpoint_inserted_here_p (regcache->aspace (),
de0a0249
UW
5119 regcache_read_pc (regcache)))
5120 {
5121 if (debug_infrun)
5122 fprintf_unfiltered (gdb_stdlog,
5123 "infrun: Treating signal as SIGTRAP\n");
a493e3e2 5124 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
de0a0249 5125 }
2b009048
DJ
5126 }
5127
28736962
PA
5128 /* Mark the non-executing threads accordingly. In all-stop, all
5129 threads of all processes are stopped when we get any event
e1316e60 5130 reported. In non-stop mode, only the event thread stops. */
372316f1
PA
5131 {
5132 ptid_t mark_ptid;
5133
fbea99ea 5134 if (!target_is_non_stop_p ())
372316f1
PA
5135 mark_ptid = minus_one_ptid;
5136 else if (ecs->ws.kind == TARGET_WAITKIND_SIGNALLED
5137 || ecs->ws.kind == TARGET_WAITKIND_EXITED)
5138 {
5139 /* If we're handling a process exit in non-stop mode, even
5140 though threads haven't been deleted yet, one would think
5141 that there is nothing to do, as threads of the dead process
5142 will be soon deleted, and threads of any other process were
5143 left running. However, on some targets, threads survive a
5144 process exit event. E.g., for the "checkpoint" command,
5145 when the current checkpoint/fork exits, linux-fork.c
5146 automatically switches to another fork from within
5147 target_mourn_inferior, by associating the same
5148 inferior/thread to another fork. We haven't mourned yet at
5149 this point, but we must mark any threads left in the
5150 process as not-executing so that finish_thread_state marks
5151 them stopped (in the user's perspective) if/when we present
5152 the stop to the user. */
e99b03dc 5153 mark_ptid = ptid_t (ecs->ptid.pid ());
372316f1
PA
5154 }
5155 else
5156 mark_ptid = ecs->ptid;
5157
719546c4 5158 set_executing (ecs->target, mark_ptid, false);
372316f1
PA
5159
5160 /* Likewise the resumed flag. */
719546c4 5161 set_resumed (ecs->target, mark_ptid, false);
372316f1 5162 }
8c90c137 5163
488f131b
JB
5164 switch (ecs->ws.kind)
5165 {
5166 case TARGET_WAITKIND_LOADED:
00431a78 5167 context_switch (ecs);
b0f4b84b
DJ
5168 /* Ignore gracefully during startup of the inferior, as it might
5169 be the shell which has just loaded some objects, otherwise
5170 add the symbols for the newly loaded objects. Also ignore at
5171 the beginning of an attach or remote session; we will query
5172 the full list of libraries once the connection is
5173 established. */
4f5d7f63 5174
00431a78 5175 stop_soon = get_inferior_stop_soon (ecs);
c0236d92 5176 if (stop_soon == NO_STOP_QUIETLY)
488f131b 5177 {
edcc5120
TT
5178 struct regcache *regcache;
5179
00431a78 5180 regcache = get_thread_regcache (ecs->event_thread);
edcc5120
TT
5181
5182 handle_solib_event ();
5183
5184 ecs->event_thread->control.stop_bpstat
a01bda52 5185 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
5186 ecs->event_thread->suspend.stop_pc,
5187 ecs->event_thread, &ecs->ws);
ab04a2af 5188
c65d6b55
PA
5189 if (handle_stop_requested (ecs))
5190 return;
5191
ce12b012 5192 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
edcc5120
TT
5193 {
5194 /* A catchpoint triggered. */
94c57d6a
PA
5195 process_event_stop_test (ecs);
5196 return;
edcc5120 5197 }
488f131b 5198
b0f4b84b
DJ
5199 /* If requested, stop when the dynamic linker notifies
5200 gdb of events. This allows the user to get control
5201 and place breakpoints in initializer routines for
5202 dynamically loaded objects (among other things). */
a493e3e2 5203 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
b0f4b84b
DJ
5204 if (stop_on_solib_events)
5205 {
55409f9d
DJ
5206 /* Make sure we print "Stopped due to solib-event" in
5207 normal_stop. */
5208 stop_print_frame = 1;
5209
22bcd14b 5210 stop_waiting (ecs);
b0f4b84b
DJ
5211 return;
5212 }
488f131b 5213 }
b0f4b84b
DJ
5214
5215 /* If we are skipping through a shell, or through shared library
5216 loading that we aren't interested in, resume the program. If
5c09a2c5 5217 we're running the program normally, also resume. */
b0f4b84b
DJ
5218 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
5219 {
74960c60
VP
5220 /* Loading of shared libraries might have changed breakpoint
5221 addresses. Make sure new breakpoints are inserted. */
a25a5a45 5222 if (stop_soon == NO_STOP_QUIETLY)
74960c60 5223 insert_breakpoints ();
64ce06e4 5224 resume (GDB_SIGNAL_0);
b0f4b84b
DJ
5225 prepare_to_wait (ecs);
5226 return;
5227 }
5228
5c09a2c5
PA
5229 /* But stop if we're attaching or setting up a remote
5230 connection. */
5231 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5232 || stop_soon == STOP_QUIETLY_REMOTE)
5233 {
5234 if (debug_infrun)
5235 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
22bcd14b 5236 stop_waiting (ecs);
5c09a2c5
PA
5237 return;
5238 }
5239
5240 internal_error (__FILE__, __LINE__,
5241 _("unhandled stop_soon: %d"), (int) stop_soon);
c5aa993b 5242
488f131b 5243 case TARGET_WAITKIND_SPURIOUS:
c65d6b55
PA
5244 if (handle_stop_requested (ecs))
5245 return;
00431a78 5246 context_switch (ecs);
64ce06e4 5247 resume (GDB_SIGNAL_0);
488f131b
JB
5248 prepare_to_wait (ecs);
5249 return;
c5aa993b 5250
65706a29 5251 case TARGET_WAITKIND_THREAD_CREATED:
c65d6b55
PA
5252 if (handle_stop_requested (ecs))
5253 return;
00431a78 5254 context_switch (ecs);
65706a29
PA
5255 if (!switch_back_to_stepped_thread (ecs))
5256 keep_going (ecs);
5257 return;
5258
488f131b 5259 case TARGET_WAITKIND_EXITED:
940c3c06 5260 case TARGET_WAITKIND_SIGNALLED:
fb66883a 5261 inferior_ptid = ecs->ptid;
5b6d1e4f 5262 set_current_inferior (find_inferior_ptid (ecs->target, ecs->ptid));
6c95b8df
PA
5263 set_current_program_space (current_inferior ()->pspace);
5264 handle_vfork_child_exec_or_exit (0);
223ffa71 5265 target_terminal::ours (); /* Must do this before mourn anyway. */
488f131b 5266
0c557179
SDJ
5267 /* Clearing any previous state of convenience variables. */
5268 clear_exit_convenience_vars ();
5269
940c3c06
PA
5270 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
5271 {
5272 /* Record the exit code in the convenience variable $_exitcode, so
5273 that the user can inspect this again later. */
5274 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5275 (LONGEST) ecs->ws.value.integer);
5276
5277 /* Also record this in the inferior itself. */
5278 current_inferior ()->has_exit_code = 1;
5279 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
8cf64490 5280
98eb56a4
PA
5281 /* Support the --return-child-result option. */
5282 return_child_result_value = ecs->ws.value.integer;
5283
76727919 5284 gdb::observers::exited.notify (ecs->ws.value.integer);
940c3c06
PA
5285 }
5286 else
0c557179 5287 {
00431a78 5288 struct gdbarch *gdbarch = current_inferior ()->gdbarch;
0c557179
SDJ
5289
5290 if (gdbarch_gdb_signal_to_target_p (gdbarch))
5291 {
5292 /* Set the value of the internal variable $_exitsignal,
5293 which holds the signal uncaught by the inferior. */
5294 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5295 gdbarch_gdb_signal_to_target (gdbarch,
5296 ecs->ws.value.sig));
5297 }
5298 else
5299 {
5300 /* We don't have access to the target's method used for
5301 converting between signal numbers (GDB's internal
5302 representation <-> target's representation).
5303 Therefore, we cannot do a good job at displaying this
5304 information to the user. It's better to just warn
5305 her about it (if infrun debugging is enabled), and
5306 give up. */
5307 if (debug_infrun)
5308 fprintf_filtered (gdb_stdlog, _("\
5309Cannot fill $_exitsignal with the correct signal number.\n"));
5310 }
5311
76727919 5312 gdb::observers::signal_exited.notify (ecs->ws.value.sig);
0c557179 5313 }
8cf64490 5314
488f131b 5315 gdb_flush (gdb_stdout);
bc1e6c81 5316 target_mourn_inferior (inferior_ptid);
488f131b 5317 stop_print_frame = 0;
22bcd14b 5318 stop_waiting (ecs);
488f131b 5319 return;
c5aa993b 5320
488f131b 5321 case TARGET_WAITKIND_FORKED:
deb3b17b 5322 case TARGET_WAITKIND_VFORKED:
e2d96639
YQ
5323 /* Check whether the inferior is displaced stepping. */
5324 {
00431a78 5325 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
ac7936df 5326 struct gdbarch *gdbarch = regcache->arch ();
e2d96639
YQ
5327
5328 /* If checking displaced stepping is supported, and thread
5329 ecs->ptid is displaced stepping. */
00431a78 5330 if (displaced_step_in_progress_thread (ecs->event_thread))
e2d96639
YQ
5331 {
5332 struct inferior *parent_inf
5b6d1e4f 5333 = find_inferior_ptid (ecs->target, ecs->ptid);
e2d96639
YQ
5334 struct regcache *child_regcache;
5335 CORE_ADDR parent_pc;
5336
d8d83535
SM
5337 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
5338 {
5339 struct displaced_step_inferior_state *displaced
5340 = get_displaced_stepping_state (parent_inf);
5341
5342 /* Restore scratch pad for child process. */
5343 displaced_step_restore (displaced, ecs->ws.value.related_pid);
5344 }
5345
e2d96639
YQ
5346 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5347 indicating that the displaced stepping of syscall instruction
5348 has been done. Perform cleanup for parent process here. Note
5349 that this operation also cleans up the child process for vfork,
5350 because their pages are shared. */
00431a78 5351 displaced_step_fixup (ecs->event_thread, GDB_SIGNAL_TRAP);
c2829269
PA
5352 /* Start a new step-over in another thread if there's one
5353 that needs it. */
5354 start_step_over ();
e2d96639 5355
e2d96639
YQ
5356 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5357 the child's PC is also within the scratchpad. Set the child's PC
5358 to the parent's PC value, which has already been fixed up.
5359 FIXME: we use the parent's aspace here, although we're touching
5360 the child, because the child hasn't been added to the inferior
5361 list yet at this point. */
5362
5363 child_regcache
5b6d1e4f
PA
5364 = get_thread_arch_aspace_regcache (parent_inf->process_target (),
5365 ecs->ws.value.related_pid,
e2d96639
YQ
5366 gdbarch,
5367 parent_inf->aspace);
5368 /* Read PC value of parent process. */
5369 parent_pc = regcache_read_pc (regcache);
5370
5371 if (debug_displaced)
5372 fprintf_unfiltered (gdb_stdlog,
5373 "displaced: write child pc from %s to %s\n",
5374 paddress (gdbarch,
5375 regcache_read_pc (child_regcache)),
5376 paddress (gdbarch, parent_pc));
5377
5378 regcache_write_pc (child_regcache, parent_pc);
5379 }
5380 }
5381
00431a78 5382 context_switch (ecs);
5a2901d9 5383
b242c3c2
PA
5384 /* Immediately detach breakpoints from the child before there's
5385 any chance of letting the user delete breakpoints from the
5386 breakpoint lists. If we don't do this early, it's easy to
5387 leave left over traps in the child, vis: "break foo; catch
5388 fork; c; <fork>; del; c; <child calls foo>". We only follow
5389 the fork on the last `continue', and by that time the
5390 breakpoint at "foo" is long gone from the breakpoint table.
5391 If we vforked, then we don't need to unpatch here, since both
5392 parent and child are sharing the same memory pages; we'll
5393 need to unpatch at follow/detach time instead to be certain
5394 that new breakpoints added between catchpoint hit time and
5395 vfork follow are detached. */
5396 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
5397 {
b242c3c2
PA
5398 /* This won't actually modify the breakpoint list, but will
5399 physically remove the breakpoints from the child. */
d80ee84f 5400 detach_breakpoints (ecs->ws.value.related_pid);
b242c3c2
PA
5401 }
5402
34b7e8a6 5403 delete_just_stopped_threads_single_step_breakpoints ();
d03285ec 5404
e58b0e63
PA
5405 /* In case the event is caught by a catchpoint, remember that
5406 the event is to be followed at the next resume of the thread,
5407 and not immediately. */
5408 ecs->event_thread->pending_follow = ecs->ws;
5409
f2ffa92b
PA
5410 ecs->event_thread->suspend.stop_pc
5411 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
675bf4cb 5412
16c381f0 5413 ecs->event_thread->control.stop_bpstat
a01bda52 5414 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5415 ecs->event_thread->suspend.stop_pc,
5416 ecs->event_thread, &ecs->ws);
675bf4cb 5417
c65d6b55
PA
5418 if (handle_stop_requested (ecs))
5419 return;
5420
ce12b012
PA
5421 /* If no catchpoint triggered for this, then keep going. Note
5422 that we're interested in knowing the bpstat actually causes a
5423 stop, not just if it may explain the signal. Software
5424 watchpoints, for example, always appear in the bpstat. */
5425 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5426 {
5ab2fbf1 5427 bool follow_child
3e43a32a 5428 = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63 5429
a493e3e2 5430 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
e58b0e63 5431
5b6d1e4f
PA
5432 process_stratum_target *targ
5433 = ecs->event_thread->inf->process_target ();
5434
5ab2fbf1 5435 bool should_resume = follow_fork ();
e58b0e63 5436
5b6d1e4f
PA
5437 /* Note that one of these may be an invalid pointer,
5438 depending on detach_fork. */
00431a78 5439 thread_info *parent = ecs->event_thread;
5b6d1e4f
PA
5440 thread_info *child
5441 = find_thread_ptid (targ, ecs->ws.value.related_pid);
6c95b8df 5442
a2077e25
PA
5443 /* At this point, the parent is marked running, and the
5444 child is marked stopped. */
5445
5446 /* If not resuming the parent, mark it stopped. */
5447 if (follow_child && !detach_fork && !non_stop && !sched_multi)
00431a78 5448 parent->set_running (false);
a2077e25
PA
5449
5450 /* If resuming the child, mark it running. */
5451 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
00431a78 5452 child->set_running (true);
a2077e25 5453
6c95b8df 5454 /* In non-stop mode, also resume the other branch. */
fbea99ea
PA
5455 if (!detach_fork && (non_stop
5456 || (sched_multi && target_is_non_stop_p ())))
6c95b8df
PA
5457 {
5458 if (follow_child)
5459 switch_to_thread (parent);
5460 else
5461 switch_to_thread (child);
5462
5463 ecs->event_thread = inferior_thread ();
5464 ecs->ptid = inferior_ptid;
5465 keep_going (ecs);
5466 }
5467
5468 if (follow_child)
5469 switch_to_thread (child);
5470 else
5471 switch_to_thread (parent);
5472
e58b0e63
PA
5473 ecs->event_thread = inferior_thread ();
5474 ecs->ptid = inferior_ptid;
5475
5476 if (should_resume)
5477 keep_going (ecs);
5478 else
22bcd14b 5479 stop_waiting (ecs);
04e68871
DJ
5480 return;
5481 }
94c57d6a
PA
5482 process_event_stop_test (ecs);
5483 return;
488f131b 5484
6c95b8df
PA
5485 case TARGET_WAITKIND_VFORK_DONE:
5486 /* Done with the shared memory region. Re-insert breakpoints in
5487 the parent, and keep going. */
5488
00431a78 5489 context_switch (ecs);
6c95b8df
PA
5490
5491 current_inferior ()->waiting_for_vfork_done = 0;
56710373 5492 current_inferior ()->pspace->breakpoints_not_allowed = 0;
c65d6b55
PA
5493
5494 if (handle_stop_requested (ecs))
5495 return;
5496
6c95b8df
PA
5497 /* This also takes care of reinserting breakpoints in the
5498 previously locked inferior. */
5499 keep_going (ecs);
5500 return;
5501
488f131b 5502 case TARGET_WAITKIND_EXECD:
488f131b 5503
cbd2b4e3
PA
5504 /* Note we can't read registers yet (the stop_pc), because we
5505 don't yet know the inferior's post-exec architecture.
5506 'stop_pc' is explicitly read below instead. */
00431a78 5507 switch_to_thread_no_regs (ecs->event_thread);
5a2901d9 5508
6c95b8df
PA
5509 /* Do whatever is necessary to the parent branch of the vfork. */
5510 handle_vfork_child_exec_or_exit (1);
5511
795e548f
PA
5512 /* This causes the eventpoints and symbol table to be reset.
5513 Must do this now, before trying to determine whether to
5514 stop. */
71b43ef8 5515 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
795e548f 5516
17d8546e
DB
5517 /* In follow_exec we may have deleted the original thread and
5518 created a new one. Make sure that the event thread is the
5519 execd thread for that case (this is a nop otherwise). */
5520 ecs->event_thread = inferior_thread ();
5521
f2ffa92b
PA
5522 ecs->event_thread->suspend.stop_pc
5523 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
ecdc3a72 5524
16c381f0 5525 ecs->event_thread->control.stop_bpstat
a01bda52 5526 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5527 ecs->event_thread->suspend.stop_pc,
5528 ecs->event_thread, &ecs->ws);
795e548f 5529
71b43ef8
PA
5530 /* Note that this may be referenced from inside
5531 bpstat_stop_status above, through inferior_has_execd. */
5532 xfree (ecs->ws.value.execd_pathname);
5533 ecs->ws.value.execd_pathname = NULL;
5534
c65d6b55
PA
5535 if (handle_stop_requested (ecs))
5536 return;
5537
04e68871 5538 /* If no catchpoint triggered for this, then keep going. */
ce12b012 5539 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5540 {
a493e3e2 5541 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
04e68871
DJ
5542 keep_going (ecs);
5543 return;
5544 }
94c57d6a
PA
5545 process_event_stop_test (ecs);
5546 return;
488f131b 5547
b4dc5ffa
MK
5548 /* Be careful not to try to gather much state about a thread
5549 that's in a syscall. It's frequently a losing proposition. */
488f131b 5550 case TARGET_WAITKIND_SYSCALL_ENTRY:
1777feb0 5551 /* Getting the current syscall number. */
94c57d6a
PA
5552 if (handle_syscall_event (ecs) == 0)
5553 process_event_stop_test (ecs);
5554 return;
c906108c 5555
488f131b
JB
5556 /* Before examining the threads further, step this thread to
5557 get it entirely out of the syscall. (We get notice of the
5558 event when the thread is just on the verge of exiting a
5559 syscall. Stepping one instruction seems to get it back
b4dc5ffa 5560 into user code.) */
488f131b 5561 case TARGET_WAITKIND_SYSCALL_RETURN:
94c57d6a
PA
5562 if (handle_syscall_event (ecs) == 0)
5563 process_event_stop_test (ecs);
5564 return;
c906108c 5565
488f131b 5566 case TARGET_WAITKIND_STOPPED:
4f5d7f63
PA
5567 handle_signal_stop (ecs);
5568 return;
c906108c 5569
b2175913
MS
5570 case TARGET_WAITKIND_NO_HISTORY:
5571 /* Reverse execution: target ran out of history info. */
eab402df 5572
d1988021 5573 /* Switch to the stopped thread. */
00431a78 5574 context_switch (ecs);
d1988021
MM
5575 if (debug_infrun)
5576 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
5577
34b7e8a6 5578 delete_just_stopped_threads_single_step_breakpoints ();
f2ffa92b
PA
5579 ecs->event_thread->suspend.stop_pc
5580 = regcache_read_pc (get_thread_regcache (inferior_thread ()));
c65d6b55
PA
5581
5582 if (handle_stop_requested (ecs))
5583 return;
5584
76727919 5585 gdb::observers::no_history.notify ();
22bcd14b 5586 stop_waiting (ecs);
b2175913 5587 return;
488f131b 5588 }
4f5d7f63
PA
5589}
5590
372316f1
PA
5591/* Restart threads back to what they were trying to do back when we
5592 paused them for an in-line step-over. The EVENT_THREAD thread is
5593 ignored. */
4d9d9d04
PA
5594
5595static void
372316f1
PA
5596restart_threads (struct thread_info *event_thread)
5597{
372316f1
PA
5598 /* In case the instruction just stepped spawned a new thread. */
5599 update_thread_list ();
5600
08036331 5601 for (thread_info *tp : all_non_exited_threads ())
372316f1 5602 {
f3f8ece4
PA
5603 switch_to_thread_no_regs (tp);
5604
372316f1
PA
5605 if (tp == event_thread)
5606 {
5607 if (debug_infrun)
5608 fprintf_unfiltered (gdb_stdlog,
5609 "infrun: restart threads: "
5610 "[%s] is event thread\n",
a068643d 5611 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5612 continue;
5613 }
5614
5615 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5616 {
5617 if (debug_infrun)
5618 fprintf_unfiltered (gdb_stdlog,
5619 "infrun: restart threads: "
5620 "[%s] not meant to be running\n",
a068643d 5621 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5622 continue;
5623 }
5624
5625 if (tp->resumed)
5626 {
5627 if (debug_infrun)
5628 fprintf_unfiltered (gdb_stdlog,
5629 "infrun: restart threads: [%s] resumed\n",
a068643d 5630 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5631 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
5632 continue;
5633 }
5634
5635 if (thread_is_in_step_over_chain (tp))
5636 {
5637 if (debug_infrun)
5638 fprintf_unfiltered (gdb_stdlog,
5639 "infrun: restart threads: "
5640 "[%s] needs step-over\n",
a068643d 5641 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5642 gdb_assert (!tp->resumed);
5643 continue;
5644 }
5645
5646
5647 if (tp->suspend.waitstatus_pending_p)
5648 {
5649 if (debug_infrun)
5650 fprintf_unfiltered (gdb_stdlog,
5651 "infrun: restart threads: "
5652 "[%s] has pending status\n",
a068643d 5653 target_pid_to_str (tp->ptid).c_str ());
719546c4 5654 tp->resumed = true;
372316f1
PA
5655 continue;
5656 }
5657
c65d6b55
PA
5658 gdb_assert (!tp->stop_requested);
5659
372316f1
PA
5660 /* If some thread needs to start a step-over at this point, it
5661 should still be in the step-over queue, and thus skipped
5662 above. */
5663 if (thread_still_needs_step_over (tp))
5664 {
5665 internal_error (__FILE__, __LINE__,
5666 "thread [%s] needs a step-over, but not in "
5667 "step-over queue\n",
a068643d 5668 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5669 }
5670
5671 if (currently_stepping (tp))
5672 {
5673 if (debug_infrun)
5674 fprintf_unfiltered (gdb_stdlog,
5675 "infrun: restart threads: [%s] was stepping\n",
a068643d 5676 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5677 keep_going_stepped_thread (tp);
5678 }
5679 else
5680 {
5681 struct execution_control_state ecss;
5682 struct execution_control_state *ecs = &ecss;
5683
5684 if (debug_infrun)
5685 fprintf_unfiltered (gdb_stdlog,
5686 "infrun: restart threads: [%s] continuing\n",
a068643d 5687 target_pid_to_str (tp->ptid).c_str ());
372316f1 5688 reset_ecs (ecs, tp);
00431a78 5689 switch_to_thread (tp);
372316f1
PA
5690 keep_going_pass_signal (ecs);
5691 }
5692 }
5693}
5694
5695/* Callback for iterate_over_threads. Find a resumed thread that has
5696 a pending waitstatus. */
5697
5698static int
5699resumed_thread_with_pending_status (struct thread_info *tp,
5700 void *arg)
5701{
5702 return (tp->resumed
5703 && tp->suspend.waitstatus_pending_p);
5704}
5705
5706/* Called when we get an event that may finish an in-line or
5707 out-of-line (displaced stepping) step-over started previously.
5708 Return true if the event is processed and we should go back to the
5709 event loop; false if the caller should continue processing the
5710 event. */
5711
5712static int
4d9d9d04
PA
5713finish_step_over (struct execution_control_state *ecs)
5714{
372316f1
PA
5715 int had_step_over_info;
5716
00431a78 5717 displaced_step_fixup (ecs->event_thread,
4d9d9d04
PA
5718 ecs->event_thread->suspend.stop_signal);
5719
372316f1
PA
5720 had_step_over_info = step_over_info_valid_p ();
5721
5722 if (had_step_over_info)
4d9d9d04
PA
5723 {
5724 /* If we're stepping over a breakpoint with all threads locked,
5725 then only the thread that was stepped should be reporting
5726 back an event. */
5727 gdb_assert (ecs->event_thread->control.trap_expected);
5728
c65d6b55 5729 clear_step_over_info ();
4d9d9d04
PA
5730 }
5731
fbea99ea 5732 if (!target_is_non_stop_p ())
372316f1 5733 return 0;
4d9d9d04
PA
5734
5735 /* Start a new step-over in another thread if there's one that
5736 needs it. */
5737 start_step_over ();
372316f1
PA
5738
5739 /* If we were stepping over a breakpoint before, and haven't started
5740 a new in-line step-over sequence, then restart all other threads
5741 (except the event thread). We can't do this in all-stop, as then
5742 e.g., we wouldn't be able to issue any other remote packet until
5743 these other threads stop. */
5744 if (had_step_over_info && !step_over_info_valid_p ())
5745 {
5746 struct thread_info *pending;
5747
5748 /* If we only have threads with pending statuses, the restart
5749 below won't restart any thread and so nothing re-inserts the
5750 breakpoint we just stepped over. But we need it inserted
5751 when we later process the pending events, otherwise if
5752 another thread has a pending event for this breakpoint too,
5753 we'd discard its event (because the breakpoint that
5754 originally caused the event was no longer inserted). */
00431a78 5755 context_switch (ecs);
372316f1
PA
5756 insert_breakpoints ();
5757
5758 restart_threads (ecs->event_thread);
5759
5760 /* If we have events pending, go through handle_inferior_event
5761 again, picking up a pending event at random. This avoids
5762 thread starvation. */
5763
5764 /* But not if we just stepped over a watchpoint in order to let
5765 the instruction execute so we can evaluate its expression.
5766 The set of watchpoints that triggered is recorded in the
5767 breakpoint objects themselves (see bp->watchpoint_triggered).
5768 If we processed another event first, that other event could
5769 clobber this info. */
5770 if (ecs->event_thread->stepping_over_watchpoint)
5771 return 0;
5772
5773 pending = iterate_over_threads (resumed_thread_with_pending_status,
5774 NULL);
5775 if (pending != NULL)
5776 {
5777 struct thread_info *tp = ecs->event_thread;
5778 struct regcache *regcache;
5779
5780 if (debug_infrun)
5781 {
5782 fprintf_unfiltered (gdb_stdlog,
5783 "infrun: found resumed threads with "
5784 "pending events, saving status\n");
5785 }
5786
5787 gdb_assert (pending != tp);
5788
5789 /* Record the event thread's event for later. */
5790 save_waitstatus (tp, &ecs->ws);
5791 /* This was cleared early, by handle_inferior_event. Set it
5792 so this pending event is considered by
5793 do_target_wait. */
719546c4 5794 tp->resumed = true;
372316f1
PA
5795
5796 gdb_assert (!tp->executing);
5797
00431a78 5798 regcache = get_thread_regcache (tp);
372316f1
PA
5799 tp->suspend.stop_pc = regcache_read_pc (regcache);
5800
5801 if (debug_infrun)
5802 {
5803 fprintf_unfiltered (gdb_stdlog,
5804 "infrun: saved stop_pc=%s for %s "
5805 "(currently_stepping=%d)\n",
5806 paddress (target_gdbarch (),
5807 tp->suspend.stop_pc),
a068643d 5808 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
5809 currently_stepping (tp));
5810 }
5811
5812 /* This in-line step-over finished; clear this so we won't
5813 start a new one. This is what handle_signal_stop would
5814 do, if we returned false. */
5815 tp->stepping_over_breakpoint = 0;
5816
5817 /* Wake up the event loop again. */
5818 mark_async_event_handler (infrun_async_inferior_event_token);
5819
5820 prepare_to_wait (ecs);
5821 return 1;
5822 }
5823 }
5824
5825 return 0;
4d9d9d04
PA
5826}
5827
4f5d7f63
PA
5828/* Come here when the program has stopped with a signal. */
5829
5830static void
5831handle_signal_stop (struct execution_control_state *ecs)
5832{
5833 struct frame_info *frame;
5834 struct gdbarch *gdbarch;
5835 int stopped_by_watchpoint;
5836 enum stop_kind stop_soon;
5837 int random_signal;
c906108c 5838
f0407826
DE
5839 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
5840
c65d6b55
PA
5841 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
5842
f0407826
DE
5843 /* Do we need to clean up the state of a thread that has
5844 completed a displaced single-step? (Doing so usually affects
5845 the PC, so do it here, before we set stop_pc.) */
372316f1
PA
5846 if (finish_step_over (ecs))
5847 return;
f0407826
DE
5848
5849 /* If we either finished a single-step or hit a breakpoint, but
5850 the user wanted this thread to be stopped, pretend we got a
5851 SIG0 (generic unsignaled stop). */
5852 if (ecs->event_thread->stop_requested
5853 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
5854 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
237fc4c9 5855
f2ffa92b
PA
5856 ecs->event_thread->suspend.stop_pc
5857 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
488f131b 5858
527159b7 5859 if (debug_infrun)
237fc4c9 5860 {
00431a78 5861 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
b926417a 5862 struct gdbarch *reg_gdbarch = regcache->arch ();
7f82dfc7 5863
f3f8ece4 5864 switch_to_thread (ecs->event_thread);
5af949e3
UW
5865
5866 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
b926417a 5867 paddress (reg_gdbarch,
f2ffa92b 5868 ecs->event_thread->suspend.stop_pc));
d92524f1 5869 if (target_stopped_by_watchpoint ())
237fc4c9
PA
5870 {
5871 CORE_ADDR addr;
abbb1732 5872
237fc4c9
PA
5873 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
5874
8b88a78e 5875 if (target_stopped_data_address (current_top_target (), &addr))
237fc4c9 5876 fprintf_unfiltered (gdb_stdlog,
5af949e3 5877 "infrun: stopped data address = %s\n",
b926417a 5878 paddress (reg_gdbarch, addr));
237fc4c9
PA
5879 else
5880 fprintf_unfiltered (gdb_stdlog,
5881 "infrun: (no data address available)\n");
5882 }
5883 }
527159b7 5884
36fa8042
PA
5885 /* This is originated from start_remote(), start_inferior() and
5886 shared libraries hook functions. */
00431a78 5887 stop_soon = get_inferior_stop_soon (ecs);
36fa8042
PA
5888 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
5889 {
00431a78 5890 context_switch (ecs);
36fa8042
PA
5891 if (debug_infrun)
5892 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
5893 stop_print_frame = 1;
22bcd14b 5894 stop_waiting (ecs);
36fa8042
PA
5895 return;
5896 }
5897
36fa8042
PA
5898 /* This originates from attach_command(). We need to overwrite
5899 the stop_signal here, because some kernels don't ignore a
5900 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
5901 See more comments in inferior.h. On the other hand, if we
5902 get a non-SIGSTOP, report it to the user - assume the backend
5903 will handle the SIGSTOP if it should show up later.
5904
5905 Also consider that the attach is complete when we see a
5906 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
5907 target extended-remote report it instead of a SIGSTOP
5908 (e.g. gdbserver). We already rely on SIGTRAP being our
5909 signal, so this is no exception.
5910
5911 Also consider that the attach is complete when we see a
5912 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
5913 the target to stop all threads of the inferior, in case the
5914 low level attach operation doesn't stop them implicitly. If
5915 they weren't stopped implicitly, then the stub will report a
5916 GDB_SIGNAL_0, meaning: stopped for no particular reason
5917 other than GDB's request. */
5918 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5919 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
5920 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5921 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
5922 {
5923 stop_print_frame = 1;
22bcd14b 5924 stop_waiting (ecs);
36fa8042
PA
5925 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5926 return;
5927 }
5928
488f131b 5929 /* See if something interesting happened to the non-current thread. If
b40c7d58 5930 so, then switch to that thread. */
d7e15655 5931 if (ecs->ptid != inferior_ptid)
488f131b 5932 {
527159b7 5933 if (debug_infrun)
8a9de0e4 5934 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
527159b7 5935
00431a78 5936 context_switch (ecs);
c5aa993b 5937
9a4105ab 5938 if (deprecated_context_hook)
00431a78 5939 deprecated_context_hook (ecs->event_thread->global_num);
488f131b 5940 }
c906108c 5941
568d6575
UW
5942 /* At this point, get hold of the now-current thread's frame. */
5943 frame = get_current_frame ();
5944 gdbarch = get_frame_arch (frame);
5945
2adfaa28 5946 /* Pull the single step breakpoints out of the target. */
af48d08f 5947 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
488f131b 5948 {
af48d08f 5949 struct regcache *regcache;
af48d08f 5950 CORE_ADDR pc;
2adfaa28 5951
00431a78 5952 regcache = get_thread_regcache (ecs->event_thread);
8b86c959
YQ
5953 const address_space *aspace = regcache->aspace ();
5954
af48d08f 5955 pc = regcache_read_pc (regcache);
34b7e8a6 5956
af48d08f
PA
5957 /* However, before doing so, if this single-step breakpoint was
5958 actually for another thread, set this thread up for moving
5959 past it. */
5960 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
5961 aspace, pc))
5962 {
5963 if (single_step_breakpoint_inserted_here_p (aspace, pc))
2adfaa28
PA
5964 {
5965 if (debug_infrun)
5966 {
5967 fprintf_unfiltered (gdb_stdlog,
af48d08f 5968 "infrun: [%s] hit another thread's "
34b7e8a6 5969 "single-step breakpoint\n",
a068643d 5970 target_pid_to_str (ecs->ptid).c_str ());
2adfaa28 5971 }
af48d08f
PA
5972 ecs->hit_singlestep_breakpoint = 1;
5973 }
5974 }
5975 else
5976 {
5977 if (debug_infrun)
5978 {
5979 fprintf_unfiltered (gdb_stdlog,
5980 "infrun: [%s] hit its "
5981 "single-step breakpoint\n",
a068643d 5982 target_pid_to_str (ecs->ptid).c_str ());
2adfaa28
PA
5983 }
5984 }
488f131b 5985 }
af48d08f 5986 delete_just_stopped_threads_single_step_breakpoints ();
c906108c 5987
963f9c80
PA
5988 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5989 && ecs->event_thread->control.trap_expected
5990 && ecs->event_thread->stepping_over_watchpoint)
d983da9c
DJ
5991 stopped_by_watchpoint = 0;
5992 else
5993 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
5994
5995 /* If necessary, step over this watchpoint. We'll be back to display
5996 it in a moment. */
5997 if (stopped_by_watchpoint
d92524f1 5998 && (target_have_steppable_watchpoint
568d6575 5999 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
488f131b 6000 {
488f131b
JB
6001 /* At this point, we are stopped at an instruction which has
6002 attempted to write to a piece of memory under control of
6003 a watchpoint. The instruction hasn't actually executed
6004 yet. If we were to evaluate the watchpoint expression
6005 now, we would get the old value, and therefore no change
6006 would seem to have occurred.
6007
6008 In order to make watchpoints work `right', we really need
6009 to complete the memory write, and then evaluate the
d983da9c
DJ
6010 watchpoint expression. We do this by single-stepping the
6011 target.
6012
7f89fd65 6013 It may not be necessary to disable the watchpoint to step over
d983da9c
DJ
6014 it. For example, the PA can (with some kernel cooperation)
6015 single step over a watchpoint without disabling the watchpoint.
6016
6017 It is far more common to need to disable a watchpoint to step
6018 the inferior over it. If we have non-steppable watchpoints,
6019 we must disable the current watchpoint; it's simplest to
963f9c80
PA
6020 disable all watchpoints.
6021
6022 Any breakpoint at PC must also be stepped over -- if there's
6023 one, it will have already triggered before the watchpoint
6024 triggered, and we either already reported it to the user, or
6025 it didn't cause a stop and we called keep_going. In either
6026 case, if there was a breakpoint at PC, we must be trying to
6027 step past it. */
6028 ecs->event_thread->stepping_over_watchpoint = 1;
6029 keep_going (ecs);
488f131b
JB
6030 return;
6031 }
6032
4e1c45ea 6033 ecs->event_thread->stepping_over_breakpoint = 0;
963f9c80 6034 ecs->event_thread->stepping_over_watchpoint = 0;
16c381f0
JK
6035 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
6036 ecs->event_thread->control.stop_step = 0;
488f131b 6037 stop_print_frame = 1;
488f131b 6038 stopped_by_random_signal = 0;
ddfe970e 6039 bpstat stop_chain = NULL;
488f131b 6040
edb3359d
DJ
6041 /* Hide inlined functions starting here, unless we just performed stepi or
6042 nexti. After stepi and nexti, always show the innermost frame (not any
6043 inline function call sites). */
16c381f0 6044 if (ecs->event_thread->control.step_range_end != 1)
0574c78f 6045 {
00431a78
PA
6046 const address_space *aspace
6047 = get_thread_regcache (ecs->event_thread)->aspace ();
0574c78f
GB
6048
6049 /* skip_inline_frames is expensive, so we avoid it if we can
6050 determine that the address is one where functions cannot have
6051 been inlined. This improves performance with inferiors that
6052 load a lot of shared libraries, because the solib event
6053 breakpoint is defined as the address of a function (i.e. not
6054 inline). Note that we have to check the previous PC as well
6055 as the current one to catch cases when we have just
6056 single-stepped off a breakpoint prior to reinstating it.
6057 Note that we're assuming that the code we single-step to is
6058 not inline, but that's not definitive: there's nothing
6059 preventing the event breakpoint function from containing
6060 inlined code, and the single-step ending up there. If the
6061 user had set a breakpoint on that inlined code, the missing
6062 skip_inline_frames call would break things. Fortunately
6063 that's an extremely unlikely scenario. */
f2ffa92b
PA
6064 if (!pc_at_non_inline_function (aspace,
6065 ecs->event_thread->suspend.stop_pc,
6066 &ecs->ws)
a210c238
MR
6067 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6068 && ecs->event_thread->control.trap_expected
6069 && pc_at_non_inline_function (aspace,
6070 ecs->event_thread->prev_pc,
09ac7c10 6071 &ecs->ws)))
1c5a993e 6072 {
f2ffa92b
PA
6073 stop_chain = build_bpstat_chain (aspace,
6074 ecs->event_thread->suspend.stop_pc,
6075 &ecs->ws);
00431a78 6076 skip_inline_frames (ecs->event_thread, stop_chain);
1c5a993e
MR
6077
6078 /* Re-fetch current thread's frame in case that invalidated
6079 the frame cache. */
6080 frame = get_current_frame ();
6081 gdbarch = get_frame_arch (frame);
6082 }
0574c78f 6083 }
edb3359d 6084
a493e3e2 6085 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
16c381f0 6086 && ecs->event_thread->control.trap_expected
568d6575 6087 && gdbarch_single_step_through_delay_p (gdbarch)
4e1c45ea 6088 && currently_stepping (ecs->event_thread))
3352ef37 6089 {
b50d7442 6090 /* We're trying to step off a breakpoint. Turns out that we're
3352ef37 6091 also on an instruction that needs to be stepped multiple
1777feb0 6092 times before it's been fully executing. E.g., architectures
3352ef37
AC
6093 with a delay slot. It needs to be stepped twice, once for
6094 the instruction and once for the delay slot. */
6095 int step_through_delay
568d6575 6096 = gdbarch_single_step_through_delay (gdbarch, frame);
abbb1732 6097
527159b7 6098 if (debug_infrun && step_through_delay)
8a9de0e4 6099 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
16c381f0
JK
6100 if (ecs->event_thread->control.step_range_end == 0
6101 && step_through_delay)
3352ef37
AC
6102 {
6103 /* The user issued a continue when stopped at a breakpoint.
6104 Set up for another trap and get out of here. */
4e1c45ea 6105 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
6106 keep_going (ecs);
6107 return;
6108 }
6109 else if (step_through_delay)
6110 {
6111 /* The user issued a step when stopped at a breakpoint.
6112 Maybe we should stop, maybe we should not - the delay
6113 slot *might* correspond to a line of source. In any
ca67fcb8
VP
6114 case, don't decide that here, just set
6115 ecs->stepping_over_breakpoint, making sure we
6116 single-step again before breakpoints are re-inserted. */
4e1c45ea 6117 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
6118 }
6119 }
6120
ab04a2af
TT
6121 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6122 handles this event. */
6123 ecs->event_thread->control.stop_bpstat
a01bda52 6124 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
6125 ecs->event_thread->suspend.stop_pc,
6126 ecs->event_thread, &ecs->ws, stop_chain);
db82e815 6127
ab04a2af
TT
6128 /* Following in case break condition called a
6129 function. */
6130 stop_print_frame = 1;
73dd234f 6131
ab04a2af
TT
6132 /* This is where we handle "moribund" watchpoints. Unlike
6133 software breakpoints traps, hardware watchpoint traps are
6134 always distinguishable from random traps. If no high-level
6135 watchpoint is associated with the reported stop data address
6136 anymore, then the bpstat does not explain the signal ---
6137 simply make sure to ignore it if `stopped_by_watchpoint' is
6138 set. */
6139
6140 if (debug_infrun
6141 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
47591c29 6142 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
427cd150 6143 GDB_SIGNAL_TRAP)
ab04a2af
TT
6144 && stopped_by_watchpoint)
6145 fprintf_unfiltered (gdb_stdlog,
6146 "infrun: no user watchpoint explains "
6147 "watchpoint SIGTRAP, ignoring\n");
73dd234f 6148
bac7d97b 6149 /* NOTE: cagney/2003-03-29: These checks for a random signal
ab04a2af
TT
6150 at one stage in the past included checks for an inferior
6151 function call's call dummy's return breakpoint. The original
6152 comment, that went with the test, read:
03cebad2 6153
ab04a2af
TT
6154 ``End of a stack dummy. Some systems (e.g. Sony news) give
6155 another signal besides SIGTRAP, so check here as well as
6156 above.''
73dd234f 6157
ab04a2af
TT
6158 If someone ever tries to get call dummys on a
6159 non-executable stack to work (where the target would stop
6160 with something like a SIGSEGV), then those tests might need
6161 to be re-instated. Given, however, that the tests were only
6162 enabled when momentary breakpoints were not being used, I
6163 suspect that it won't be the case.
488f131b 6164
ab04a2af
TT
6165 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6166 be necessary for call dummies on a non-executable stack on
6167 SPARC. */
488f131b 6168
bac7d97b 6169 /* See if the breakpoints module can explain the signal. */
47591c29
PA
6170 random_signal
6171 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6172 ecs->event_thread->suspend.stop_signal);
bac7d97b 6173
1cf4d951
PA
6174 /* Maybe this was a trap for a software breakpoint that has since
6175 been removed. */
6176 if (random_signal && target_stopped_by_sw_breakpoint ())
6177 {
5133a315
LM
6178 if (gdbarch_program_breakpoint_here_p (gdbarch,
6179 ecs->event_thread->suspend.stop_pc))
1cf4d951
PA
6180 {
6181 struct regcache *regcache;
6182 int decr_pc;
6183
6184 /* Re-adjust PC to what the program would see if GDB was not
6185 debugging it. */
00431a78 6186 regcache = get_thread_regcache (ecs->event_thread);
527a273a 6187 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
1cf4d951
PA
6188 if (decr_pc != 0)
6189 {
07036511
TT
6190 gdb::optional<scoped_restore_tmpl<int>>
6191 restore_operation_disable;
1cf4d951
PA
6192
6193 if (record_full_is_used ())
07036511
TT
6194 restore_operation_disable.emplace
6195 (record_full_gdb_operation_disable_set ());
1cf4d951 6196
f2ffa92b
PA
6197 regcache_write_pc (regcache,
6198 ecs->event_thread->suspend.stop_pc + decr_pc);
1cf4d951
PA
6199 }
6200 }
6201 else
6202 {
6203 /* A delayed software breakpoint event. Ignore the trap. */
6204 if (debug_infrun)
6205 fprintf_unfiltered (gdb_stdlog,
6206 "infrun: delayed software breakpoint "
6207 "trap, ignoring\n");
6208 random_signal = 0;
6209 }
6210 }
6211
6212 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6213 has since been removed. */
6214 if (random_signal && target_stopped_by_hw_breakpoint ())
6215 {
6216 /* A delayed hardware breakpoint event. Ignore the trap. */
6217 if (debug_infrun)
6218 fprintf_unfiltered (gdb_stdlog,
6219 "infrun: delayed hardware breakpoint/watchpoint "
6220 "trap, ignoring\n");
6221 random_signal = 0;
6222 }
6223
bac7d97b
PA
6224 /* If not, perhaps stepping/nexting can. */
6225 if (random_signal)
6226 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6227 && currently_stepping (ecs->event_thread));
ab04a2af 6228
2adfaa28
PA
6229 /* Perhaps the thread hit a single-step breakpoint of _another_
6230 thread. Single-step breakpoints are transparent to the
6231 breakpoints module. */
6232 if (random_signal)
6233 random_signal = !ecs->hit_singlestep_breakpoint;
6234
bac7d97b
PA
6235 /* No? Perhaps we got a moribund watchpoint. */
6236 if (random_signal)
6237 random_signal = !stopped_by_watchpoint;
ab04a2af 6238
c65d6b55
PA
6239 /* Always stop if the user explicitly requested this thread to
6240 remain stopped. */
6241 if (ecs->event_thread->stop_requested)
6242 {
6243 random_signal = 1;
6244 if (debug_infrun)
6245 fprintf_unfiltered (gdb_stdlog, "infrun: user-requested stop\n");
6246 }
6247
488f131b
JB
6248 /* For the program's own signals, act according to
6249 the signal handling tables. */
6250
ce12b012 6251 if (random_signal)
488f131b
JB
6252 {
6253 /* Signal not for debugging purposes. */
5b6d1e4f 6254 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
c9737c08 6255 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
488f131b 6256
527159b7 6257 if (debug_infrun)
c9737c08
PA
6258 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
6259 gdb_signal_to_symbol_string (stop_signal));
527159b7 6260
488f131b
JB
6261 stopped_by_random_signal = 1;
6262
252fbfc8
PA
6263 /* Always stop on signals if we're either just gaining control
6264 of the program, or the user explicitly requested this thread
6265 to remain stopped. */
d6b48e9c 6266 if (stop_soon != NO_STOP_QUIETLY
252fbfc8 6267 || ecs->event_thread->stop_requested
24291992 6268 || (!inf->detaching
16c381f0 6269 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
488f131b 6270 {
22bcd14b 6271 stop_waiting (ecs);
488f131b
JB
6272 return;
6273 }
b57bacec
PA
6274
6275 /* Notify observers the signal has "handle print" set. Note we
6276 returned early above if stopping; normal_stop handles the
6277 printing in that case. */
6278 if (signal_print[ecs->event_thread->suspend.stop_signal])
6279 {
6280 /* The signal table tells us to print about this signal. */
223ffa71 6281 target_terminal::ours_for_output ();
76727919 6282 gdb::observers::signal_received.notify (ecs->event_thread->suspend.stop_signal);
223ffa71 6283 target_terminal::inferior ();
b57bacec 6284 }
488f131b
JB
6285
6286 /* Clear the signal if it should not be passed. */
16c381f0 6287 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
a493e3e2 6288 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
488f131b 6289
f2ffa92b 6290 if (ecs->event_thread->prev_pc == ecs->event_thread->suspend.stop_pc
16c381f0 6291 && ecs->event_thread->control.trap_expected
8358c15c 6292 && ecs->event_thread->control.step_resume_breakpoint == NULL)
68f53502
AC
6293 {
6294 /* We were just starting a new sequence, attempting to
6295 single-step off of a breakpoint and expecting a SIGTRAP.
237fc4c9 6296 Instead this signal arrives. This signal will take us out
68f53502
AC
6297 of the stepping range so GDB needs to remember to, when
6298 the signal handler returns, resume stepping off that
6299 breakpoint. */
6300 /* To simplify things, "continue" is forced to use the same
6301 code paths as single-step - set a breakpoint at the
6302 signal return address and then, once hit, step off that
6303 breakpoint. */
237fc4c9
PA
6304 if (debug_infrun)
6305 fprintf_unfiltered (gdb_stdlog,
6306 "infrun: signal arrived while stepping over "
6307 "breakpoint\n");
d3169d93 6308
2c03e5be 6309 insert_hp_step_resume_breakpoint_at_frame (frame);
4e1c45ea 6310 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6311 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6312 ecs->event_thread->control.trap_expected = 0;
d137e6dc
PA
6313
6314 /* If we were nexting/stepping some other thread, switch to
6315 it, so that we don't continue it, losing control. */
6316 if (!switch_back_to_stepped_thread (ecs))
6317 keep_going (ecs);
9d799f85 6318 return;
68f53502 6319 }
9d799f85 6320
e5f8a7cc 6321 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
f2ffa92b
PA
6322 && (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6323 ecs->event_thread)
e5f8a7cc 6324 || ecs->event_thread->control.step_range_end == 1)
edb3359d 6325 && frame_id_eq (get_stack_frame_id (frame),
16c381f0 6326 ecs->event_thread->control.step_stack_frame_id)
8358c15c 6327 && ecs->event_thread->control.step_resume_breakpoint == NULL)
d303a6c7
AC
6328 {
6329 /* The inferior is about to take a signal that will take it
6330 out of the single step range. Set a breakpoint at the
6331 current PC (which is presumably where the signal handler
6332 will eventually return) and then allow the inferior to
6333 run free.
6334
6335 Note that this is only needed for a signal delivered
6336 while in the single-step range. Nested signals aren't a
6337 problem as they eventually all return. */
237fc4c9
PA
6338 if (debug_infrun)
6339 fprintf_unfiltered (gdb_stdlog,
6340 "infrun: signal may take us out of "
6341 "single-step range\n");
6342
372316f1 6343 clear_step_over_info ();
2c03e5be 6344 insert_hp_step_resume_breakpoint_at_frame (frame);
e5f8a7cc 6345 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6346 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6347 ecs->event_thread->control.trap_expected = 0;
9d799f85
AC
6348 keep_going (ecs);
6349 return;
d303a6c7 6350 }
9d799f85 6351
85102364 6352 /* Note: step_resume_breakpoint may be non-NULL. This occurs
9d799f85
AC
6353 when either there's a nested signal, or when there's a
6354 pending signal enabled just as the signal handler returns
6355 (leaving the inferior at the step-resume-breakpoint without
6356 actually executing it). Either way continue until the
6357 breakpoint is really hit. */
c447ac0b
PA
6358
6359 if (!switch_back_to_stepped_thread (ecs))
6360 {
6361 if (debug_infrun)
6362 fprintf_unfiltered (gdb_stdlog,
6363 "infrun: random signal, keep going\n");
6364
6365 keep_going (ecs);
6366 }
6367 return;
488f131b 6368 }
94c57d6a
PA
6369
6370 process_event_stop_test (ecs);
6371}
6372
6373/* Come here when we've got some debug event / signal we can explain
6374 (IOW, not a random signal), and test whether it should cause a
6375 stop, or whether we should resume the inferior (transparently).
6376 E.g., could be a breakpoint whose condition evaluates false; we
6377 could be still stepping within the line; etc. */
6378
6379static void
6380process_event_stop_test (struct execution_control_state *ecs)
6381{
6382 struct symtab_and_line stop_pc_sal;
6383 struct frame_info *frame;
6384 struct gdbarch *gdbarch;
cdaa5b73
PA
6385 CORE_ADDR jmp_buf_pc;
6386 struct bpstat_what what;
94c57d6a 6387
cdaa5b73 6388 /* Handle cases caused by hitting a breakpoint. */
611c83ae 6389
cdaa5b73
PA
6390 frame = get_current_frame ();
6391 gdbarch = get_frame_arch (frame);
fcf3daef 6392
cdaa5b73 6393 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
611c83ae 6394
cdaa5b73
PA
6395 if (what.call_dummy)
6396 {
6397 stop_stack_dummy = what.call_dummy;
6398 }
186c406b 6399
243a9253
PA
6400 /* A few breakpoint types have callbacks associated (e.g.,
6401 bp_jit_event). Run them now. */
6402 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
6403
cdaa5b73
PA
6404 /* If we hit an internal event that triggers symbol changes, the
6405 current frame will be invalidated within bpstat_what (e.g., if we
6406 hit an internal solib event). Re-fetch it. */
6407 frame = get_current_frame ();
6408 gdbarch = get_frame_arch (frame);
e2e4d78b 6409
cdaa5b73
PA
6410 switch (what.main_action)
6411 {
6412 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
6413 /* If we hit the breakpoint at longjmp while stepping, we
6414 install a momentary breakpoint at the target of the
6415 jmp_buf. */
186c406b 6416
cdaa5b73
PA
6417 if (debug_infrun)
6418 fprintf_unfiltered (gdb_stdlog,
6419 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
186c406b 6420
cdaa5b73 6421 ecs->event_thread->stepping_over_breakpoint = 1;
611c83ae 6422
cdaa5b73
PA
6423 if (what.is_longjmp)
6424 {
6425 struct value *arg_value;
6426
6427 /* If we set the longjmp breakpoint via a SystemTap probe,
6428 then use it to extract the arguments. The destination PC
6429 is the third argument to the probe. */
6430 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6431 if (arg_value)
8fa0c4f8
AA
6432 {
6433 jmp_buf_pc = value_as_address (arg_value);
6434 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6435 }
cdaa5b73
PA
6436 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6437 || !gdbarch_get_longjmp_target (gdbarch,
6438 frame, &jmp_buf_pc))
e2e4d78b 6439 {
cdaa5b73
PA
6440 if (debug_infrun)
6441 fprintf_unfiltered (gdb_stdlog,
6442 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
6443 "(!gdbarch_get_longjmp_target)\n");
6444 keep_going (ecs);
6445 return;
e2e4d78b 6446 }
e2e4d78b 6447
cdaa5b73
PA
6448 /* Insert a breakpoint at resume address. */
6449 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6450 }
6451 else
6452 check_exception_resume (ecs, frame);
6453 keep_going (ecs);
6454 return;
e81a37f7 6455
cdaa5b73
PA
6456 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6457 {
6458 struct frame_info *init_frame;
e81a37f7 6459
cdaa5b73 6460 /* There are several cases to consider.
c906108c 6461
cdaa5b73
PA
6462 1. The initiating frame no longer exists. In this case we
6463 must stop, because the exception or longjmp has gone too
6464 far.
2c03e5be 6465
cdaa5b73
PA
6466 2. The initiating frame exists, and is the same as the
6467 current frame. We stop, because the exception or longjmp
6468 has been caught.
2c03e5be 6469
cdaa5b73
PA
6470 3. The initiating frame exists and is different from the
6471 current frame. This means the exception or longjmp has
6472 been caught beneath the initiating frame, so keep going.
c906108c 6473
cdaa5b73
PA
6474 4. longjmp breakpoint has been placed just to protect
6475 against stale dummy frames and user is not interested in
6476 stopping around longjmps. */
c5aa993b 6477
cdaa5b73
PA
6478 if (debug_infrun)
6479 fprintf_unfiltered (gdb_stdlog,
6480 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
c5aa993b 6481
cdaa5b73
PA
6482 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6483 != NULL);
6484 delete_exception_resume_breakpoint (ecs->event_thread);
c5aa993b 6485
cdaa5b73
PA
6486 if (what.is_longjmp)
6487 {
b67a2c6f 6488 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
c5aa993b 6489
cdaa5b73 6490 if (!frame_id_p (ecs->event_thread->initiating_frame))
e5ef252a 6491 {
cdaa5b73
PA
6492 /* Case 4. */
6493 keep_going (ecs);
6494 return;
e5ef252a 6495 }
cdaa5b73 6496 }
c5aa993b 6497
cdaa5b73 6498 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
527159b7 6499
cdaa5b73
PA
6500 if (init_frame)
6501 {
6502 struct frame_id current_id
6503 = get_frame_id (get_current_frame ());
6504 if (frame_id_eq (current_id,
6505 ecs->event_thread->initiating_frame))
6506 {
6507 /* Case 2. Fall through. */
6508 }
6509 else
6510 {
6511 /* Case 3. */
6512 keep_going (ecs);
6513 return;
6514 }
68f53502 6515 }
488f131b 6516
cdaa5b73
PA
6517 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6518 exists. */
6519 delete_step_resume_breakpoint (ecs->event_thread);
e5ef252a 6520
bdc36728 6521 end_stepping_range (ecs);
cdaa5b73
PA
6522 }
6523 return;
e5ef252a 6524
cdaa5b73
PA
6525 case BPSTAT_WHAT_SINGLE:
6526 if (debug_infrun)
6527 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
6528 ecs->event_thread->stepping_over_breakpoint = 1;
6529 /* Still need to check other stuff, at least the case where we
6530 are stepping and step out of the right range. */
6531 break;
e5ef252a 6532
cdaa5b73
PA
6533 case BPSTAT_WHAT_STEP_RESUME:
6534 if (debug_infrun)
6535 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
e5ef252a 6536
cdaa5b73
PA
6537 delete_step_resume_breakpoint (ecs->event_thread);
6538 if (ecs->event_thread->control.proceed_to_finish
6539 && execution_direction == EXEC_REVERSE)
6540 {
6541 struct thread_info *tp = ecs->event_thread;
6542
6543 /* We are finishing a function in reverse, and just hit the
6544 step-resume breakpoint at the start address of the
6545 function, and we're almost there -- just need to back up
6546 by one more single-step, which should take us back to the
6547 function call. */
6548 tp->control.step_range_start = tp->control.step_range_end = 1;
6549 keep_going (ecs);
e5ef252a 6550 return;
cdaa5b73
PA
6551 }
6552 fill_in_stop_func (gdbarch, ecs);
f2ffa92b 6553 if (ecs->event_thread->suspend.stop_pc == ecs->stop_func_start
cdaa5b73
PA
6554 && execution_direction == EXEC_REVERSE)
6555 {
6556 /* We are stepping over a function call in reverse, and just
6557 hit the step-resume breakpoint at the start address of
6558 the function. Go back to single-stepping, which should
6559 take us back to the function call. */
6560 ecs->event_thread->stepping_over_breakpoint = 1;
6561 keep_going (ecs);
6562 return;
6563 }
6564 break;
e5ef252a 6565
cdaa5b73
PA
6566 case BPSTAT_WHAT_STOP_NOISY:
6567 if (debug_infrun)
6568 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
6569 stop_print_frame = 1;
e5ef252a 6570
99619bea
PA
6571 /* Assume the thread stopped for a breapoint. We'll still check
6572 whether a/the breakpoint is there when the thread is next
6573 resumed. */
6574 ecs->event_thread->stepping_over_breakpoint = 1;
e5ef252a 6575
22bcd14b 6576 stop_waiting (ecs);
cdaa5b73 6577 return;
e5ef252a 6578
cdaa5b73
PA
6579 case BPSTAT_WHAT_STOP_SILENT:
6580 if (debug_infrun)
6581 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
6582 stop_print_frame = 0;
e5ef252a 6583
99619bea
PA
6584 /* Assume the thread stopped for a breapoint. We'll still check
6585 whether a/the breakpoint is there when the thread is next
6586 resumed. */
6587 ecs->event_thread->stepping_over_breakpoint = 1;
22bcd14b 6588 stop_waiting (ecs);
cdaa5b73
PA
6589 return;
6590
6591 case BPSTAT_WHAT_HP_STEP_RESUME:
6592 if (debug_infrun)
6593 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
6594
6595 delete_step_resume_breakpoint (ecs->event_thread);
6596 if (ecs->event_thread->step_after_step_resume_breakpoint)
6597 {
6598 /* Back when the step-resume breakpoint was inserted, we
6599 were trying to single-step off a breakpoint. Go back to
6600 doing that. */
6601 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6602 ecs->event_thread->stepping_over_breakpoint = 1;
6603 keep_going (ecs);
6604 return;
e5ef252a 6605 }
cdaa5b73
PA
6606 break;
6607
6608 case BPSTAT_WHAT_KEEP_CHECKING:
6609 break;
e5ef252a 6610 }
c906108c 6611
af48d08f
PA
6612 /* If we stepped a permanent breakpoint and we had a high priority
6613 step-resume breakpoint for the address we stepped, but we didn't
6614 hit it, then we must have stepped into the signal handler. The
6615 step-resume was only necessary to catch the case of _not_
6616 stepping into the handler, so delete it, and fall through to
6617 checking whether the step finished. */
6618 if (ecs->event_thread->stepped_breakpoint)
6619 {
6620 struct breakpoint *sr_bp
6621 = ecs->event_thread->control.step_resume_breakpoint;
6622
8d707a12
PA
6623 if (sr_bp != NULL
6624 && sr_bp->loc->permanent
af48d08f
PA
6625 && sr_bp->type == bp_hp_step_resume
6626 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6627 {
6628 if (debug_infrun)
6629 fprintf_unfiltered (gdb_stdlog,
6630 "infrun: stepped permanent breakpoint, stopped in "
6631 "handler\n");
6632 delete_step_resume_breakpoint (ecs->event_thread);
6633 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6634 }
6635 }
6636
cdaa5b73
PA
6637 /* We come here if we hit a breakpoint but should not stop for it.
6638 Possibly we also were stepping and should stop for that. So fall
6639 through and test for stepping. But, if not stepping, do not
6640 stop. */
c906108c 6641
a7212384
UW
6642 /* In all-stop mode, if we're currently stepping but have stopped in
6643 some other thread, we need to switch back to the stepped thread. */
c447ac0b
PA
6644 if (switch_back_to_stepped_thread (ecs))
6645 return;
776f04fa 6646
8358c15c 6647 if (ecs->event_thread->control.step_resume_breakpoint)
488f131b 6648 {
527159b7 6649 if (debug_infrun)
d3169d93
DJ
6650 fprintf_unfiltered (gdb_stdlog,
6651 "infrun: step-resume breakpoint is inserted\n");
527159b7 6652
488f131b
JB
6653 /* Having a step-resume breakpoint overrides anything
6654 else having to do with stepping commands until
6655 that breakpoint is reached. */
488f131b
JB
6656 keep_going (ecs);
6657 return;
6658 }
c5aa993b 6659
16c381f0 6660 if (ecs->event_thread->control.step_range_end == 0)
488f131b 6661 {
527159b7 6662 if (debug_infrun)
8a9de0e4 6663 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
488f131b 6664 /* Likewise if we aren't even stepping. */
488f131b
JB
6665 keep_going (ecs);
6666 return;
6667 }
c5aa993b 6668
4b7703ad
JB
6669 /* Re-fetch current thread's frame in case the code above caused
6670 the frame cache to be re-initialized, making our FRAME variable
6671 a dangling pointer. */
6672 frame = get_current_frame ();
628fe4e4 6673 gdbarch = get_frame_arch (frame);
7e324e48 6674 fill_in_stop_func (gdbarch, ecs);
4b7703ad 6675
488f131b 6676 /* If stepping through a line, keep going if still within it.
c906108c 6677
488f131b
JB
6678 Note that step_range_end is the address of the first instruction
6679 beyond the step range, and NOT the address of the last instruction
31410e84
MS
6680 within it!
6681
6682 Note also that during reverse execution, we may be stepping
6683 through a function epilogue and therefore must detect when
6684 the current-frame changes in the middle of a line. */
6685
f2ffa92b
PA
6686 if (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6687 ecs->event_thread)
31410e84 6688 && (execution_direction != EXEC_REVERSE
388a8562 6689 || frame_id_eq (get_frame_id (frame),
16c381f0 6690 ecs->event_thread->control.step_frame_id)))
488f131b 6691 {
527159b7 6692 if (debug_infrun)
5af949e3
UW
6693 fprintf_unfiltered
6694 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
16c381f0
JK
6695 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6696 paddress (gdbarch, ecs->event_thread->control.step_range_end));
b2175913 6697
c1e36e3e
PA
6698 /* Tentatively re-enable range stepping; `resume' disables it if
6699 necessary (e.g., if we're stepping over a breakpoint or we
6700 have software watchpoints). */
6701 ecs->event_thread->control.may_range_step = 1;
6702
b2175913
MS
6703 /* When stepping backward, stop at beginning of line range
6704 (unless it's the function entry point, in which case
6705 keep going back to the call point). */
f2ffa92b 6706 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
16c381f0 6707 if (stop_pc == ecs->event_thread->control.step_range_start
b2175913
MS
6708 && stop_pc != ecs->stop_func_start
6709 && execution_direction == EXEC_REVERSE)
bdc36728 6710 end_stepping_range (ecs);
b2175913
MS
6711 else
6712 keep_going (ecs);
6713
488f131b
JB
6714 return;
6715 }
c5aa993b 6716
488f131b 6717 /* We stepped out of the stepping range. */
c906108c 6718
488f131b 6719 /* If we are stepping at the source level and entered the runtime
388a8562
MS
6720 loader dynamic symbol resolution code...
6721
6722 EXEC_FORWARD: we keep on single stepping until we exit the run
6723 time loader code and reach the callee's address.
6724
6725 EXEC_REVERSE: we've already executed the callee (backward), and
6726 the runtime loader code is handled just like any other
6727 undebuggable function call. Now we need only keep stepping
6728 backward through the trampoline code, and that's handled further
6729 down, so there is nothing for us to do here. */
6730
6731 if (execution_direction != EXEC_REVERSE
16c381f0 6732 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
f2ffa92b 6733 && in_solib_dynsym_resolve_code (ecs->event_thread->suspend.stop_pc))
488f131b 6734 {
4c8c40e6 6735 CORE_ADDR pc_after_resolver =
f2ffa92b
PA
6736 gdbarch_skip_solib_resolver (gdbarch,
6737 ecs->event_thread->suspend.stop_pc);
c906108c 6738
527159b7 6739 if (debug_infrun)
3e43a32a
MS
6740 fprintf_unfiltered (gdb_stdlog,
6741 "infrun: stepped into dynsym resolve code\n");
527159b7 6742
488f131b
JB
6743 if (pc_after_resolver)
6744 {
6745 /* Set up a step-resume breakpoint at the address
6746 indicated by SKIP_SOLIB_RESOLVER. */
51abb421 6747 symtab_and_line sr_sal;
488f131b 6748 sr_sal.pc = pc_after_resolver;
6c95b8df 6749 sr_sal.pspace = get_frame_program_space (frame);
488f131b 6750
a6d9a66e
UW
6751 insert_step_resume_breakpoint_at_sal (gdbarch,
6752 sr_sal, null_frame_id);
c5aa993b 6753 }
c906108c 6754
488f131b
JB
6755 keep_going (ecs);
6756 return;
6757 }
c906108c 6758
1d509aa6
MM
6759 /* Step through an indirect branch thunk. */
6760 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
f2ffa92b
PA
6761 && gdbarch_in_indirect_branch_thunk (gdbarch,
6762 ecs->event_thread->suspend.stop_pc))
1d509aa6
MM
6763 {
6764 if (debug_infrun)
6765 fprintf_unfiltered (gdb_stdlog,
6766 "infrun: stepped into indirect branch thunk\n");
6767 keep_going (ecs);
6768 return;
6769 }
6770
16c381f0
JK
6771 if (ecs->event_thread->control.step_range_end != 1
6772 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6773 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
568d6575 6774 && get_frame_type (frame) == SIGTRAMP_FRAME)
488f131b 6775 {
527159b7 6776 if (debug_infrun)
3e43a32a
MS
6777 fprintf_unfiltered (gdb_stdlog,
6778 "infrun: stepped into signal trampoline\n");
42edda50 6779 /* The inferior, while doing a "step" or "next", has ended up in
8fb3e588
AC
6780 a signal trampoline (either by a signal being delivered or by
6781 the signal handler returning). Just single-step until the
6782 inferior leaves the trampoline (either by calling the handler
6783 or returning). */
488f131b
JB
6784 keep_going (ecs);
6785 return;
6786 }
c906108c 6787
14132e89
MR
6788 /* If we're in the return path from a shared library trampoline,
6789 we want to proceed through the trampoline when stepping. */
6790 /* macro/2012-04-25: This needs to come before the subroutine
6791 call check below as on some targets return trampolines look
6792 like subroutine calls (MIPS16 return thunks). */
6793 if (gdbarch_in_solib_return_trampoline (gdbarch,
f2ffa92b
PA
6794 ecs->event_thread->suspend.stop_pc,
6795 ecs->stop_func_name)
14132e89
MR
6796 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
6797 {
6798 /* Determine where this trampoline returns. */
f2ffa92b
PA
6799 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6800 CORE_ADDR real_stop_pc
6801 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
14132e89
MR
6802
6803 if (debug_infrun)
6804 fprintf_unfiltered (gdb_stdlog,
6805 "infrun: stepped into solib return tramp\n");
6806
6807 /* Only proceed through if we know where it's going. */
6808 if (real_stop_pc)
6809 {
6810 /* And put the step-breakpoint there and go until there. */
51abb421 6811 symtab_and_line sr_sal;
14132e89
MR
6812 sr_sal.pc = real_stop_pc;
6813 sr_sal.section = find_pc_overlay (sr_sal.pc);
6814 sr_sal.pspace = get_frame_program_space (frame);
6815
6816 /* Do not specify what the fp should be when we stop since
6817 on some machines the prologue is where the new fp value
6818 is established. */
6819 insert_step_resume_breakpoint_at_sal (gdbarch,
6820 sr_sal, null_frame_id);
6821
6822 /* Restart without fiddling with the step ranges or
6823 other state. */
6824 keep_going (ecs);
6825 return;
6826 }
6827 }
6828
c17eaafe
DJ
6829 /* Check for subroutine calls. The check for the current frame
6830 equalling the step ID is not necessary - the check of the
6831 previous frame's ID is sufficient - but it is a common case and
6832 cheaper than checking the previous frame's ID.
14e60db5
DJ
6833
6834 NOTE: frame_id_eq will never report two invalid frame IDs as
6835 being equal, so to get into this block, both the current and
6836 previous frame must have valid frame IDs. */
005ca36a
JB
6837 /* The outer_frame_id check is a heuristic to detect stepping
6838 through startup code. If we step over an instruction which
6839 sets the stack pointer from an invalid value to a valid value,
6840 we may detect that as a subroutine call from the mythical
6841 "outermost" function. This could be fixed by marking
6842 outermost frames as !stack_p,code_p,special_p. Then the
6843 initial outermost frame, before sp was valid, would
ce6cca6d 6844 have code_addr == &_start. See the comment in frame_id_eq
005ca36a 6845 for more. */
edb3359d 6846 if (!frame_id_eq (get_stack_frame_id (frame),
16c381f0 6847 ecs->event_thread->control.step_stack_frame_id)
005ca36a 6848 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
16c381f0
JK
6849 ecs->event_thread->control.step_stack_frame_id)
6850 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
005ca36a 6851 outer_frame_id)
885eeb5b 6852 || (ecs->event_thread->control.step_start_function
f2ffa92b 6853 != find_pc_function (ecs->event_thread->suspend.stop_pc)))))
488f131b 6854 {
f2ffa92b 6855 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
95918acb 6856 CORE_ADDR real_stop_pc;
8fb3e588 6857
527159b7 6858 if (debug_infrun)
8a9de0e4 6859 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
527159b7 6860
b7a084be 6861 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
95918acb
AC
6862 {
6863 /* I presume that step_over_calls is only 0 when we're
6864 supposed to be stepping at the assembly language level
6865 ("stepi"). Just stop. */
388a8562 6866 /* And this works the same backward as frontward. MVS */
bdc36728 6867 end_stepping_range (ecs);
95918acb
AC
6868 return;
6869 }
8fb3e588 6870
388a8562
MS
6871 /* Reverse stepping through solib trampolines. */
6872
6873 if (execution_direction == EXEC_REVERSE
16c381f0 6874 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
388a8562
MS
6875 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6876 || (ecs->stop_func_start == 0
6877 && in_solib_dynsym_resolve_code (stop_pc))))
6878 {
6879 /* Any solib trampoline code can be handled in reverse
6880 by simply continuing to single-step. We have already
6881 executed the solib function (backwards), and a few
6882 steps will take us back through the trampoline to the
6883 caller. */
6884 keep_going (ecs);
6885 return;
6886 }
6887
16c381f0 6888 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
8567c30f 6889 {
b2175913
MS
6890 /* We're doing a "next".
6891
6892 Normal (forward) execution: set a breakpoint at the
6893 callee's return address (the address at which the caller
6894 will resume).
6895
6896 Reverse (backward) execution. set the step-resume
6897 breakpoint at the start of the function that we just
6898 stepped into (backwards), and continue to there. When we
6130d0b7 6899 get there, we'll need to single-step back to the caller. */
b2175913
MS
6900
6901 if (execution_direction == EXEC_REVERSE)
6902 {
acf9414f
JK
6903 /* If we're already at the start of the function, we've either
6904 just stepped backward into a single instruction function,
6905 or stepped back out of a signal handler to the first instruction
6906 of the function. Just keep going, which will single-step back
6907 to the caller. */
58c48e72 6908 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
acf9414f 6909 {
acf9414f 6910 /* Normal function call return (static or dynamic). */
51abb421 6911 symtab_and_line sr_sal;
acf9414f
JK
6912 sr_sal.pc = ecs->stop_func_start;
6913 sr_sal.pspace = get_frame_program_space (frame);
6914 insert_step_resume_breakpoint_at_sal (gdbarch,
6915 sr_sal, null_frame_id);
6916 }
b2175913
MS
6917 }
6918 else
568d6575 6919 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6920
8567c30f
AC
6921 keep_going (ecs);
6922 return;
6923 }
a53c66de 6924
95918acb 6925 /* If we are in a function call trampoline (a stub between the
8fb3e588
AC
6926 calling routine and the real function), locate the real
6927 function. That's what tells us (a) whether we want to step
6928 into it at all, and (b) what prologue we want to run to the
6929 end of, if we do step into it. */
568d6575 6930 real_stop_pc = skip_language_trampoline (frame, stop_pc);
95918acb 6931 if (real_stop_pc == 0)
568d6575 6932 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
95918acb
AC
6933 if (real_stop_pc != 0)
6934 ecs->stop_func_start = real_stop_pc;
8fb3e588 6935
db5f024e 6936 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
1b2bfbb9 6937 {
51abb421 6938 symtab_and_line sr_sal;
1b2bfbb9 6939 sr_sal.pc = ecs->stop_func_start;
6c95b8df 6940 sr_sal.pspace = get_frame_program_space (frame);
1b2bfbb9 6941
a6d9a66e
UW
6942 insert_step_resume_breakpoint_at_sal (gdbarch,
6943 sr_sal, null_frame_id);
8fb3e588
AC
6944 keep_going (ecs);
6945 return;
1b2bfbb9
RC
6946 }
6947
95918acb 6948 /* If we have line number information for the function we are
1bfeeb0f
JL
6949 thinking of stepping into and the function isn't on the skip
6950 list, step into it.
95918acb 6951
8fb3e588
AC
6952 If there are several symtabs at that PC (e.g. with include
6953 files), just want to know whether *any* of them have line
6954 numbers. find_pc_line handles this. */
95918acb
AC
6955 {
6956 struct symtab_and_line tmp_sal;
8fb3e588 6957
95918acb 6958 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
2b914b52 6959 if (tmp_sal.line != 0
85817405 6960 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4a4c04f1
BE
6961 tmp_sal)
6962 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
95918acb 6963 {
b2175913 6964 if (execution_direction == EXEC_REVERSE)
568d6575 6965 handle_step_into_function_backward (gdbarch, ecs);
b2175913 6966 else
568d6575 6967 handle_step_into_function (gdbarch, ecs);
95918acb
AC
6968 return;
6969 }
6970 }
6971
6972 /* If we have no line number and the step-stop-if-no-debug is
8fb3e588
AC
6973 set, we stop the step so that the user has a chance to switch
6974 in assembly mode. */
16c381f0 6975 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
078130d0 6976 && step_stop_if_no_debug)
95918acb 6977 {
bdc36728 6978 end_stepping_range (ecs);
95918acb
AC
6979 return;
6980 }
6981
b2175913
MS
6982 if (execution_direction == EXEC_REVERSE)
6983 {
acf9414f
JK
6984 /* If we're already at the start of the function, we've either just
6985 stepped backward into a single instruction function without line
6986 number info, or stepped back out of a signal handler to the first
6987 instruction of the function without line number info. Just keep
6988 going, which will single-step back to the caller. */
6989 if (ecs->stop_func_start != stop_pc)
6990 {
6991 /* Set a breakpoint at callee's start address.
6992 From there we can step once and be back in the caller. */
51abb421 6993 symtab_and_line sr_sal;
acf9414f
JK
6994 sr_sal.pc = ecs->stop_func_start;
6995 sr_sal.pspace = get_frame_program_space (frame);
6996 insert_step_resume_breakpoint_at_sal (gdbarch,
6997 sr_sal, null_frame_id);
6998 }
b2175913
MS
6999 }
7000 else
7001 /* Set a breakpoint at callee's return address (the address
7002 at which the caller will resume). */
568d6575 7003 insert_step_resume_breakpoint_at_caller (frame);
b2175913 7004
95918acb 7005 keep_going (ecs);
488f131b 7006 return;
488f131b 7007 }
c906108c 7008
fdd654f3
MS
7009 /* Reverse stepping through solib trampolines. */
7010
7011 if (execution_direction == EXEC_REVERSE
16c381f0 7012 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
fdd654f3 7013 {
f2ffa92b
PA
7014 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
7015
fdd654f3
MS
7016 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7017 || (ecs->stop_func_start == 0
7018 && in_solib_dynsym_resolve_code (stop_pc)))
7019 {
7020 /* Any solib trampoline code can be handled in reverse
7021 by simply continuing to single-step. We have already
7022 executed the solib function (backwards), and a few
7023 steps will take us back through the trampoline to the
7024 caller. */
7025 keep_going (ecs);
7026 return;
7027 }
7028 else if (in_solib_dynsym_resolve_code (stop_pc))
7029 {
7030 /* Stepped backward into the solib dynsym resolver.
7031 Set a breakpoint at its start and continue, then
7032 one more step will take us out. */
51abb421 7033 symtab_and_line sr_sal;
fdd654f3 7034 sr_sal.pc = ecs->stop_func_start;
9d1807c3 7035 sr_sal.pspace = get_frame_program_space (frame);
fdd654f3
MS
7036 insert_step_resume_breakpoint_at_sal (gdbarch,
7037 sr_sal, null_frame_id);
7038 keep_going (ecs);
7039 return;
7040 }
7041 }
7042
8c95582d
AB
7043 /* This always returns the sal for the inner-most frame when we are in a
7044 stack of inlined frames, even if GDB actually believes that it is in a
7045 more outer frame. This is checked for below by calls to
7046 inline_skipped_frames. */
f2ffa92b 7047 stop_pc_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
7ed0fe66 7048
1b2bfbb9
RC
7049 /* NOTE: tausq/2004-05-24: This if block used to be done before all
7050 the trampoline processing logic, however, there are some trampolines
7051 that have no names, so we should do trampoline handling first. */
16c381f0 7052 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7ed0fe66 7053 && ecs->stop_func_name == NULL
2afb61aa 7054 && stop_pc_sal.line == 0)
1b2bfbb9 7055 {
527159b7 7056 if (debug_infrun)
3e43a32a
MS
7057 fprintf_unfiltered (gdb_stdlog,
7058 "infrun: stepped into undebuggable function\n");
527159b7 7059
1b2bfbb9 7060 /* The inferior just stepped into, or returned to, an
7ed0fe66
DJ
7061 undebuggable function (where there is no debugging information
7062 and no line number corresponding to the address where the
1b2bfbb9
RC
7063 inferior stopped). Since we want to skip this kind of code,
7064 we keep going until the inferior returns from this
14e60db5
DJ
7065 function - unless the user has asked us not to (via
7066 set step-mode) or we no longer know how to get back
7067 to the call site. */
7068 if (step_stop_if_no_debug
c7ce8faa 7069 || !frame_id_p (frame_unwind_caller_id (frame)))
1b2bfbb9
RC
7070 {
7071 /* If we have no line number and the step-stop-if-no-debug
7072 is set, we stop the step so that the user has a chance to
7073 switch in assembly mode. */
bdc36728 7074 end_stepping_range (ecs);
1b2bfbb9
RC
7075 return;
7076 }
7077 else
7078 {
7079 /* Set a breakpoint at callee's return address (the address
7080 at which the caller will resume). */
568d6575 7081 insert_step_resume_breakpoint_at_caller (frame);
1b2bfbb9
RC
7082 keep_going (ecs);
7083 return;
7084 }
7085 }
7086
16c381f0 7087 if (ecs->event_thread->control.step_range_end == 1)
1b2bfbb9
RC
7088 {
7089 /* It is stepi or nexti. We always want to stop stepping after
7090 one instruction. */
527159b7 7091 if (debug_infrun)
8a9de0e4 7092 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
bdc36728 7093 end_stepping_range (ecs);
1b2bfbb9
RC
7094 return;
7095 }
7096
2afb61aa 7097 if (stop_pc_sal.line == 0)
488f131b
JB
7098 {
7099 /* We have no line number information. That means to stop
7100 stepping (does this always happen right after one instruction,
7101 when we do "s" in a function with no line numbers,
7102 or can this happen as a result of a return or longjmp?). */
527159b7 7103 if (debug_infrun)
8a9de0e4 7104 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
bdc36728 7105 end_stepping_range (ecs);
488f131b
JB
7106 return;
7107 }
c906108c 7108
edb3359d
DJ
7109 /* Look for "calls" to inlined functions, part one. If the inline
7110 frame machinery detected some skipped call sites, we have entered
7111 a new inline function. */
7112
7113 if (frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 7114 ecs->event_thread->control.step_frame_id)
00431a78 7115 && inline_skipped_frames (ecs->event_thread))
edb3359d 7116 {
edb3359d
DJ
7117 if (debug_infrun)
7118 fprintf_unfiltered (gdb_stdlog,
7119 "infrun: stepped into inlined function\n");
7120
51abb421 7121 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
edb3359d 7122
16c381f0 7123 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
edb3359d
DJ
7124 {
7125 /* For "step", we're going to stop. But if the call site
7126 for this inlined function is on the same source line as
7127 we were previously stepping, go down into the function
7128 first. Otherwise stop at the call site. */
7129
7130 if (call_sal.line == ecs->event_thread->current_line
7131 && call_sal.symtab == ecs->event_thread->current_symtab)
4a4c04f1
BE
7132 {
7133 step_into_inline_frame (ecs->event_thread);
7134 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
7135 {
7136 keep_going (ecs);
7137 return;
7138 }
7139 }
edb3359d 7140
bdc36728 7141 end_stepping_range (ecs);
edb3359d
DJ
7142 return;
7143 }
7144 else
7145 {
7146 /* For "next", we should stop at the call site if it is on a
7147 different source line. Otherwise continue through the
7148 inlined function. */
7149 if (call_sal.line == ecs->event_thread->current_line
7150 && call_sal.symtab == ecs->event_thread->current_symtab)
7151 keep_going (ecs);
7152 else
bdc36728 7153 end_stepping_range (ecs);
edb3359d
DJ
7154 return;
7155 }
7156 }
7157
7158 /* Look for "calls" to inlined functions, part two. If we are still
7159 in the same real function we were stepping through, but we have
7160 to go further up to find the exact frame ID, we are stepping
7161 through a more inlined call beyond its call site. */
7162
7163 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
7164 && !frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 7165 ecs->event_thread->control.step_frame_id)
edb3359d 7166 && stepped_in_from (get_current_frame (),
16c381f0 7167 ecs->event_thread->control.step_frame_id))
edb3359d
DJ
7168 {
7169 if (debug_infrun)
7170 fprintf_unfiltered (gdb_stdlog,
7171 "infrun: stepping through inlined function\n");
7172
4a4c04f1
BE
7173 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
7174 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
edb3359d
DJ
7175 keep_going (ecs);
7176 else
bdc36728 7177 end_stepping_range (ecs);
edb3359d
DJ
7178 return;
7179 }
7180
8c95582d 7181 bool refresh_step_info = true;
f2ffa92b 7182 if ((ecs->event_thread->suspend.stop_pc == stop_pc_sal.pc)
4e1c45ea
PA
7183 && (ecs->event_thread->current_line != stop_pc_sal.line
7184 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
488f131b 7185 {
8c95582d
AB
7186 if (stop_pc_sal.is_stmt)
7187 {
7188 /* We are at the start of a different line. So stop. Note that
7189 we don't stop if we step into the middle of a different line.
7190 That is said to make things like for (;;) statements work
7191 better. */
7192 if (debug_infrun)
7193 fprintf_unfiltered (gdb_stdlog,
7194 "infrun: stepped to a different line\n");
7195 end_stepping_range (ecs);
7196 return;
7197 }
7198 else if (frame_id_eq (get_frame_id (get_current_frame ()),
7199 ecs->event_thread->control.step_frame_id))
7200 {
7201 /* We are at the start of a different line, however, this line is
7202 not marked as a statement, and we have not changed frame. We
7203 ignore this line table entry, and continue stepping forward,
7204 looking for a better place to stop. */
7205 refresh_step_info = false;
7206 if (debug_infrun)
7207 fprintf_unfiltered (gdb_stdlog,
7208 "infrun: stepped to a different line, but "
7209 "it's not the start of a statement\n");
7210 }
488f131b 7211 }
c906108c 7212
488f131b 7213 /* We aren't done stepping.
c906108c 7214
488f131b
JB
7215 Optimize by setting the stepping range to the line.
7216 (We might not be in the original line, but if we entered a
7217 new line in mid-statement, we continue stepping. This makes
8c95582d
AB
7218 things like for(;;) statements work better.)
7219
7220 If we entered a SAL that indicates a non-statement line table entry,
7221 then we update the stepping range, but we don't update the step info,
7222 which includes things like the line number we are stepping away from.
7223 This means we will stop when we find a line table entry that is marked
7224 as is-statement, even if it matches the non-statement one we just
7225 stepped into. */
c906108c 7226
16c381f0
JK
7227 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
7228 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
c1e36e3e 7229 ecs->event_thread->control.may_range_step = 1;
8c95582d
AB
7230 if (refresh_step_info)
7231 set_step_info (ecs->event_thread, frame, stop_pc_sal);
488f131b 7232
527159b7 7233 if (debug_infrun)
8a9de0e4 7234 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
488f131b 7235 keep_going (ecs);
104c1213
JM
7236}
7237
c447ac0b
PA
7238/* In all-stop mode, if we're currently stepping but have stopped in
7239 some other thread, we may need to switch back to the stepped
7240 thread. Returns true we set the inferior running, false if we left
7241 it stopped (and the event needs further processing). */
7242
7243static int
7244switch_back_to_stepped_thread (struct execution_control_state *ecs)
7245{
fbea99ea 7246 if (!target_is_non_stop_p ())
c447ac0b 7247 {
99619bea
PA
7248 struct thread_info *stepping_thread;
7249
7250 /* If any thread is blocked on some internal breakpoint, and we
7251 simply need to step over that breakpoint to get it going
7252 again, do that first. */
7253
7254 /* However, if we see an event for the stepping thread, then we
7255 know all other threads have been moved past their breakpoints
7256 already. Let the caller check whether the step is finished,
7257 etc., before deciding to move it past a breakpoint. */
7258 if (ecs->event_thread->control.step_range_end != 0)
7259 return 0;
7260
7261 /* Check if the current thread is blocked on an incomplete
7262 step-over, interrupted by a random signal. */
7263 if (ecs->event_thread->control.trap_expected
7264 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
c447ac0b 7265 {
99619bea
PA
7266 if (debug_infrun)
7267 {
7268 fprintf_unfiltered (gdb_stdlog,
7269 "infrun: need to finish step-over of [%s]\n",
a068643d 7270 target_pid_to_str (ecs->event_thread->ptid).c_str ());
99619bea
PA
7271 }
7272 keep_going (ecs);
7273 return 1;
7274 }
2adfaa28 7275
99619bea
PA
7276 /* Check if the current thread is blocked by a single-step
7277 breakpoint of another thread. */
7278 if (ecs->hit_singlestep_breakpoint)
7279 {
7280 if (debug_infrun)
7281 {
7282 fprintf_unfiltered (gdb_stdlog,
7283 "infrun: need to step [%s] over single-step "
7284 "breakpoint\n",
a068643d 7285 target_pid_to_str (ecs->ptid).c_str ());
99619bea
PA
7286 }
7287 keep_going (ecs);
7288 return 1;
7289 }
7290
4d9d9d04
PA
7291 /* If this thread needs yet another step-over (e.g., stepping
7292 through a delay slot), do it first before moving on to
7293 another thread. */
7294 if (thread_still_needs_step_over (ecs->event_thread))
7295 {
7296 if (debug_infrun)
7297 {
7298 fprintf_unfiltered (gdb_stdlog,
7299 "infrun: thread [%s] still needs step-over\n",
a068643d 7300 target_pid_to_str (ecs->event_thread->ptid).c_str ());
4d9d9d04
PA
7301 }
7302 keep_going (ecs);
7303 return 1;
7304 }
70509625 7305
483805cf
PA
7306 /* If scheduler locking applies even if not stepping, there's no
7307 need to walk over threads. Above we've checked whether the
7308 current thread is stepping. If some other thread not the
7309 event thread is stepping, then it must be that scheduler
7310 locking is not in effect. */
856e7dd6 7311 if (schedlock_applies (ecs->event_thread))
483805cf
PA
7312 return 0;
7313
4d9d9d04
PA
7314 /* Otherwise, we no longer expect a trap in the current thread.
7315 Clear the trap_expected flag before switching back -- this is
7316 what keep_going does as well, if we call it. */
7317 ecs->event_thread->control.trap_expected = 0;
7318
7319 /* Likewise, clear the signal if it should not be passed. */
7320 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7321 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7322
7323 /* Do all pending step-overs before actually proceeding with
483805cf 7324 step/next/etc. */
4d9d9d04
PA
7325 if (start_step_over ())
7326 {
7327 prepare_to_wait (ecs);
7328 return 1;
7329 }
7330
7331 /* Look for the stepping/nexting thread. */
483805cf 7332 stepping_thread = NULL;
4d9d9d04 7333
08036331 7334 for (thread_info *tp : all_non_exited_threads ())
483805cf 7335 {
f3f8ece4
PA
7336 switch_to_thread_no_regs (tp);
7337
fbea99ea
PA
7338 /* Ignore threads of processes the caller is not
7339 resuming. */
483805cf 7340 if (!sched_multi
5b6d1e4f
PA
7341 && (tp->inf->process_target () != ecs->target
7342 || tp->inf->pid != ecs->ptid.pid ()))
483805cf
PA
7343 continue;
7344
7345 /* When stepping over a breakpoint, we lock all threads
7346 except the one that needs to move past the breakpoint.
7347 If a non-event thread has this set, the "incomplete
7348 step-over" check above should have caught it earlier. */
372316f1
PA
7349 if (tp->control.trap_expected)
7350 {
7351 internal_error (__FILE__, __LINE__,
7352 "[%s] has inconsistent state: "
7353 "trap_expected=%d\n",
a068643d 7354 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
7355 tp->control.trap_expected);
7356 }
483805cf
PA
7357
7358 /* Did we find the stepping thread? */
7359 if (tp->control.step_range_end)
7360 {
7361 /* Yep. There should only one though. */
7362 gdb_assert (stepping_thread == NULL);
7363
7364 /* The event thread is handled at the top, before we
7365 enter this loop. */
7366 gdb_assert (tp != ecs->event_thread);
7367
7368 /* If some thread other than the event thread is
7369 stepping, then scheduler locking can't be in effect,
7370 otherwise we wouldn't have resumed the current event
7371 thread in the first place. */
856e7dd6 7372 gdb_assert (!schedlock_applies (tp));
483805cf
PA
7373
7374 stepping_thread = tp;
7375 }
99619bea
PA
7376 }
7377
483805cf 7378 if (stepping_thread != NULL)
99619bea 7379 {
c447ac0b
PA
7380 if (debug_infrun)
7381 fprintf_unfiltered (gdb_stdlog,
7382 "infrun: switching back to stepped thread\n");
7383
2ac7589c
PA
7384 if (keep_going_stepped_thread (stepping_thread))
7385 {
7386 prepare_to_wait (ecs);
7387 return 1;
7388 }
7389 }
f3f8ece4
PA
7390
7391 switch_to_thread (ecs->event_thread);
2ac7589c 7392 }
2adfaa28 7393
2ac7589c
PA
7394 return 0;
7395}
2adfaa28 7396
2ac7589c
PA
7397/* Set a previously stepped thread back to stepping. Returns true on
7398 success, false if the resume is not possible (e.g., the thread
7399 vanished). */
7400
7401static int
7402keep_going_stepped_thread (struct thread_info *tp)
7403{
7404 struct frame_info *frame;
2ac7589c
PA
7405 struct execution_control_state ecss;
7406 struct execution_control_state *ecs = &ecss;
2adfaa28 7407
2ac7589c
PA
7408 /* If the stepping thread exited, then don't try to switch back and
7409 resume it, which could fail in several different ways depending
7410 on the target. Instead, just keep going.
2adfaa28 7411
2ac7589c
PA
7412 We can find a stepping dead thread in the thread list in two
7413 cases:
2adfaa28 7414
2ac7589c
PA
7415 - The target supports thread exit events, and when the target
7416 tries to delete the thread from the thread list, inferior_ptid
7417 pointed at the exiting thread. In such case, calling
7418 delete_thread does not really remove the thread from the list;
7419 instead, the thread is left listed, with 'exited' state.
64ce06e4 7420
2ac7589c
PA
7421 - The target's debug interface does not support thread exit
7422 events, and so we have no idea whatsoever if the previously
7423 stepping thread is still alive. For that reason, we need to
7424 synchronously query the target now. */
2adfaa28 7425
00431a78 7426 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
2ac7589c
PA
7427 {
7428 if (debug_infrun)
7429 fprintf_unfiltered (gdb_stdlog,
7430 "infrun: not resuming previously "
7431 "stepped thread, it has vanished\n");
7432
00431a78 7433 delete_thread (tp);
2ac7589c 7434 return 0;
c447ac0b 7435 }
2ac7589c
PA
7436
7437 if (debug_infrun)
7438 fprintf_unfiltered (gdb_stdlog,
7439 "infrun: resuming previously stepped thread\n");
7440
7441 reset_ecs (ecs, tp);
00431a78 7442 switch_to_thread (tp);
2ac7589c 7443
f2ffa92b 7444 tp->suspend.stop_pc = regcache_read_pc (get_thread_regcache (tp));
2ac7589c 7445 frame = get_current_frame ();
2ac7589c
PA
7446
7447 /* If the PC of the thread we were trying to single-step has
7448 changed, then that thread has trapped or been signaled, but the
7449 event has not been reported to GDB yet. Re-poll the target
7450 looking for this particular thread's event (i.e. temporarily
7451 enable schedlock) by:
7452
7453 - setting a break at the current PC
7454 - resuming that particular thread, only (by setting trap
7455 expected)
7456
7457 This prevents us continuously moving the single-step breakpoint
7458 forward, one instruction at a time, overstepping. */
7459
f2ffa92b 7460 if (tp->suspend.stop_pc != tp->prev_pc)
2ac7589c
PA
7461 {
7462 ptid_t resume_ptid;
7463
7464 if (debug_infrun)
7465 fprintf_unfiltered (gdb_stdlog,
7466 "infrun: expected thread advanced also (%s -> %s)\n",
7467 paddress (target_gdbarch (), tp->prev_pc),
f2ffa92b 7468 paddress (target_gdbarch (), tp->suspend.stop_pc));
2ac7589c
PA
7469
7470 /* Clear the info of the previous step-over, as it's no longer
7471 valid (if the thread was trying to step over a breakpoint, it
7472 has already succeeded). It's what keep_going would do too,
7473 if we called it. Do this before trying to insert the sss
7474 breakpoint, otherwise if we were previously trying to step
7475 over this exact address in another thread, the breakpoint is
7476 skipped. */
7477 clear_step_over_info ();
7478 tp->control.trap_expected = 0;
7479
7480 insert_single_step_breakpoint (get_frame_arch (frame),
7481 get_frame_address_space (frame),
f2ffa92b 7482 tp->suspend.stop_pc);
2ac7589c 7483
719546c4 7484 tp->resumed = true;
fbea99ea 7485 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
2ac7589c
PA
7486 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
7487 }
7488 else
7489 {
7490 if (debug_infrun)
7491 fprintf_unfiltered (gdb_stdlog,
7492 "infrun: expected thread still hasn't advanced\n");
7493
7494 keep_going_pass_signal (ecs);
7495 }
7496 return 1;
c447ac0b
PA
7497}
7498
8b061563
PA
7499/* Is thread TP in the middle of (software or hardware)
7500 single-stepping? (Note the result of this function must never be
7501 passed directly as target_resume's STEP parameter.) */
104c1213 7502
a289b8f6 7503static int
b3444185 7504currently_stepping (struct thread_info *tp)
a7212384 7505{
8358c15c
JK
7506 return ((tp->control.step_range_end
7507 && tp->control.step_resume_breakpoint == NULL)
7508 || tp->control.trap_expected
af48d08f 7509 || tp->stepped_breakpoint
8358c15c 7510 || bpstat_should_step ());
a7212384
UW
7511}
7512
b2175913
MS
7513/* Inferior has stepped into a subroutine call with source code that
7514 we should not step over. Do step to the first line of code in
7515 it. */
c2c6d25f
JM
7516
7517static void
568d6575
UW
7518handle_step_into_function (struct gdbarch *gdbarch,
7519 struct execution_control_state *ecs)
c2c6d25f 7520{
7e324e48
GB
7521 fill_in_stop_func (gdbarch, ecs);
7522
f2ffa92b
PA
7523 compunit_symtab *cust
7524 = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7525 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7526 ecs->stop_func_start
7527 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
c2c6d25f 7528
51abb421 7529 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
c2c6d25f
JM
7530 /* Use the step_resume_break to step until the end of the prologue,
7531 even if that involves jumps (as it seems to on the vax under
7532 4.2). */
7533 /* If the prologue ends in the middle of a source line, continue to
7534 the end of that source line (if it is still within the function).
7535 Otherwise, just go to end of prologue. */
2afb61aa
PA
7536 if (stop_func_sal.end
7537 && stop_func_sal.pc != ecs->stop_func_start
7538 && stop_func_sal.end < ecs->stop_func_end)
7539 ecs->stop_func_start = stop_func_sal.end;
c2c6d25f 7540
2dbd5e30
KB
7541 /* Architectures which require breakpoint adjustment might not be able
7542 to place a breakpoint at the computed address. If so, the test
7543 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7544 ecs->stop_func_start to an address at which a breakpoint may be
7545 legitimately placed.
8fb3e588 7546
2dbd5e30
KB
7547 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7548 made, GDB will enter an infinite loop when stepping through
7549 optimized code consisting of VLIW instructions which contain
7550 subinstructions corresponding to different source lines. On
7551 FR-V, it's not permitted to place a breakpoint on any but the
7552 first subinstruction of a VLIW instruction. When a breakpoint is
7553 set, GDB will adjust the breakpoint address to the beginning of
7554 the VLIW instruction. Thus, we need to make the corresponding
7555 adjustment here when computing the stop address. */
8fb3e588 7556
568d6575 7557 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
2dbd5e30
KB
7558 {
7559 ecs->stop_func_start
568d6575 7560 = gdbarch_adjust_breakpoint_address (gdbarch,
8fb3e588 7561 ecs->stop_func_start);
2dbd5e30
KB
7562 }
7563
f2ffa92b 7564 if (ecs->stop_func_start == ecs->event_thread->suspend.stop_pc)
c2c6d25f
JM
7565 {
7566 /* We are already there: stop now. */
bdc36728 7567 end_stepping_range (ecs);
c2c6d25f
JM
7568 return;
7569 }
7570 else
7571 {
7572 /* Put the step-breakpoint there and go until there. */
51abb421 7573 symtab_and_line sr_sal;
c2c6d25f
JM
7574 sr_sal.pc = ecs->stop_func_start;
7575 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
6c95b8df 7576 sr_sal.pspace = get_frame_program_space (get_current_frame ());
44cbf7b5 7577
c2c6d25f 7578 /* Do not specify what the fp should be when we stop since on
488f131b
JB
7579 some machines the prologue is where the new fp value is
7580 established. */
a6d9a66e 7581 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
c2c6d25f
JM
7582
7583 /* And make sure stepping stops right away then. */
16c381f0
JK
7584 ecs->event_thread->control.step_range_end
7585 = ecs->event_thread->control.step_range_start;
c2c6d25f
JM
7586 }
7587 keep_going (ecs);
7588}
d4f3574e 7589
b2175913
MS
7590/* Inferior has stepped backward into a subroutine call with source
7591 code that we should not step over. Do step to the beginning of the
7592 last line of code in it. */
7593
7594static void
568d6575
UW
7595handle_step_into_function_backward (struct gdbarch *gdbarch,
7596 struct execution_control_state *ecs)
b2175913 7597{
43f3e411 7598 struct compunit_symtab *cust;
167e4384 7599 struct symtab_and_line stop_func_sal;
b2175913 7600
7e324e48
GB
7601 fill_in_stop_func (gdbarch, ecs);
7602
f2ffa92b 7603 cust = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7604 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7605 ecs->stop_func_start
7606 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
b2175913 7607
f2ffa92b 7608 stop_func_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
b2175913
MS
7609
7610 /* OK, we're just going to keep stepping here. */
f2ffa92b 7611 if (stop_func_sal.pc == ecs->event_thread->suspend.stop_pc)
b2175913
MS
7612 {
7613 /* We're there already. Just stop stepping now. */
bdc36728 7614 end_stepping_range (ecs);
b2175913
MS
7615 }
7616 else
7617 {
7618 /* Else just reset the step range and keep going.
7619 No step-resume breakpoint, they don't work for
7620 epilogues, which can have multiple entry paths. */
16c381f0
JK
7621 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7622 ecs->event_thread->control.step_range_end = stop_func_sal.end;
b2175913
MS
7623 keep_going (ecs);
7624 }
7625 return;
7626}
7627
d3169d93 7628/* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
44cbf7b5
AC
7629 This is used to both functions and to skip over code. */
7630
7631static void
2c03e5be
PA
7632insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7633 struct symtab_and_line sr_sal,
7634 struct frame_id sr_id,
7635 enum bptype sr_type)
44cbf7b5 7636{
611c83ae
PA
7637 /* There should never be more than one step-resume or longjmp-resume
7638 breakpoint per thread, so we should never be setting a new
44cbf7b5 7639 step_resume_breakpoint when one is already active. */
8358c15c 7640 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
2c03e5be 7641 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
d3169d93
DJ
7642
7643 if (debug_infrun)
7644 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
7645 "infrun: inserting step-resume breakpoint at %s\n",
7646 paddress (gdbarch, sr_sal.pc));
d3169d93 7647
8358c15c 7648 inferior_thread ()->control.step_resume_breakpoint
454dafbd 7649 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
2c03e5be
PA
7650}
7651
9da8c2a0 7652void
2c03e5be
PA
7653insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7654 struct symtab_and_line sr_sal,
7655 struct frame_id sr_id)
7656{
7657 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7658 sr_sal, sr_id,
7659 bp_step_resume);
44cbf7b5 7660}
7ce450bd 7661
2c03e5be
PA
7662/* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7663 This is used to skip a potential signal handler.
7ce450bd 7664
14e60db5
DJ
7665 This is called with the interrupted function's frame. The signal
7666 handler, when it returns, will resume the interrupted function at
7667 RETURN_FRAME.pc. */
d303a6c7
AC
7668
7669static void
2c03e5be 7670insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
d303a6c7 7671{
f4c1edd8 7672 gdb_assert (return_frame != NULL);
d303a6c7 7673
51abb421
PA
7674 struct gdbarch *gdbarch = get_frame_arch (return_frame);
7675
7676 symtab_and_line sr_sal;
568d6575 7677 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
d303a6c7 7678 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7679 sr_sal.pspace = get_frame_program_space (return_frame);
d303a6c7 7680
2c03e5be
PA
7681 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7682 get_stack_frame_id (return_frame),
7683 bp_hp_step_resume);
d303a6c7
AC
7684}
7685
2c03e5be
PA
7686/* Insert a "step-resume breakpoint" at the previous frame's PC. This
7687 is used to skip a function after stepping into it (for "next" or if
7688 the called function has no debugging information).
14e60db5
DJ
7689
7690 The current function has almost always been reached by single
7691 stepping a call or return instruction. NEXT_FRAME belongs to the
7692 current function, and the breakpoint will be set at the caller's
7693 resume address.
7694
7695 This is a separate function rather than reusing
2c03e5be 7696 insert_hp_step_resume_breakpoint_at_frame in order to avoid
14e60db5 7697 get_prev_frame, which may stop prematurely (see the implementation
c7ce8faa 7698 of frame_unwind_caller_id for an example). */
14e60db5
DJ
7699
7700static void
7701insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7702{
14e60db5
DJ
7703 /* We shouldn't have gotten here if we don't know where the call site
7704 is. */
c7ce8faa 7705 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
14e60db5 7706
51abb421 7707 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
14e60db5 7708
51abb421 7709 symtab_and_line sr_sal;
c7ce8faa
DJ
7710 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7711 frame_unwind_caller_pc (next_frame));
14e60db5 7712 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7713 sr_sal.pspace = frame_unwind_program_space (next_frame);
14e60db5 7714
a6d9a66e 7715 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
c7ce8faa 7716 frame_unwind_caller_id (next_frame));
14e60db5
DJ
7717}
7718
611c83ae
PA
7719/* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7720 new breakpoint at the target of a jmp_buf. The handling of
7721 longjmp-resume uses the same mechanisms used for handling
7722 "step-resume" breakpoints. */
7723
7724static void
a6d9a66e 7725insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
611c83ae 7726{
e81a37f7
TT
7727 /* There should never be more than one longjmp-resume breakpoint per
7728 thread, so we should never be setting a new
611c83ae 7729 longjmp_resume_breakpoint when one is already active. */
e81a37f7 7730 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
611c83ae
PA
7731
7732 if (debug_infrun)
7733 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
7734 "infrun: inserting longjmp-resume breakpoint at %s\n",
7735 paddress (gdbarch, pc));
611c83ae 7736
e81a37f7 7737 inferior_thread ()->control.exception_resume_breakpoint =
454dafbd 7738 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
611c83ae
PA
7739}
7740
186c406b
TT
7741/* Insert an exception resume breakpoint. TP is the thread throwing
7742 the exception. The block B is the block of the unwinder debug hook
7743 function. FRAME is the frame corresponding to the call to this
7744 function. SYM is the symbol of the function argument holding the
7745 target PC of the exception. */
7746
7747static void
7748insert_exception_resume_breakpoint (struct thread_info *tp,
3977b71f 7749 const struct block *b,
186c406b
TT
7750 struct frame_info *frame,
7751 struct symbol *sym)
7752{
a70b8144 7753 try
186c406b 7754 {
63e43d3a 7755 struct block_symbol vsym;
186c406b
TT
7756 struct value *value;
7757 CORE_ADDR handler;
7758 struct breakpoint *bp;
7759
987012b8 7760 vsym = lookup_symbol_search_name (sym->search_name (),
de63c46b 7761 b, VAR_DOMAIN);
63e43d3a 7762 value = read_var_value (vsym.symbol, vsym.block, frame);
186c406b
TT
7763 /* If the value was optimized out, revert to the old behavior. */
7764 if (! value_optimized_out (value))
7765 {
7766 handler = value_as_address (value);
7767
7768 if (debug_infrun)
7769 fprintf_unfiltered (gdb_stdlog,
7770 "infrun: exception resume at %lx\n",
7771 (unsigned long) handler);
7772
7773 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd
TT
7774 handler,
7775 bp_exception_resume).release ();
c70a6932
JK
7776
7777 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
7778 frame = NULL;
7779
5d5658a1 7780 bp->thread = tp->global_num;
186c406b
TT
7781 inferior_thread ()->control.exception_resume_breakpoint = bp;
7782 }
7783 }
230d2906 7784 catch (const gdb_exception_error &e)
492d29ea
PA
7785 {
7786 /* We want to ignore errors here. */
7787 }
186c406b
TT
7788}
7789
28106bc2
SDJ
7790/* A helper for check_exception_resume that sets an
7791 exception-breakpoint based on a SystemTap probe. */
7792
7793static void
7794insert_exception_resume_from_probe (struct thread_info *tp,
729662a5 7795 const struct bound_probe *probe,
28106bc2
SDJ
7796 struct frame_info *frame)
7797{
7798 struct value *arg_value;
7799 CORE_ADDR handler;
7800 struct breakpoint *bp;
7801
7802 arg_value = probe_safe_evaluate_at_pc (frame, 1);
7803 if (!arg_value)
7804 return;
7805
7806 handler = value_as_address (arg_value);
7807
7808 if (debug_infrun)
7809 fprintf_unfiltered (gdb_stdlog,
7810 "infrun: exception resume at %s\n",
6bac7473 7811 paddress (get_objfile_arch (probe->objfile),
28106bc2
SDJ
7812 handler));
7813
7814 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd 7815 handler, bp_exception_resume).release ();
5d5658a1 7816 bp->thread = tp->global_num;
28106bc2
SDJ
7817 inferior_thread ()->control.exception_resume_breakpoint = bp;
7818}
7819
186c406b
TT
7820/* This is called when an exception has been intercepted. Check to
7821 see whether the exception's destination is of interest, and if so,
7822 set an exception resume breakpoint there. */
7823
7824static void
7825check_exception_resume (struct execution_control_state *ecs,
28106bc2 7826 struct frame_info *frame)
186c406b 7827{
729662a5 7828 struct bound_probe probe;
28106bc2
SDJ
7829 struct symbol *func;
7830
7831 /* First see if this exception unwinding breakpoint was set via a
7832 SystemTap probe point. If so, the probe has two arguments: the
7833 CFA and the HANDLER. We ignore the CFA, extract the handler, and
7834 set a breakpoint there. */
6bac7473 7835 probe = find_probe_by_pc (get_frame_pc (frame));
935676c9 7836 if (probe.prob)
28106bc2 7837 {
729662a5 7838 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
28106bc2
SDJ
7839 return;
7840 }
7841
7842 func = get_frame_function (frame);
7843 if (!func)
7844 return;
186c406b 7845
a70b8144 7846 try
186c406b 7847 {
3977b71f 7848 const struct block *b;
8157b174 7849 struct block_iterator iter;
186c406b
TT
7850 struct symbol *sym;
7851 int argno = 0;
7852
7853 /* The exception breakpoint is a thread-specific breakpoint on
7854 the unwinder's debug hook, declared as:
7855
7856 void _Unwind_DebugHook (void *cfa, void *handler);
7857
7858 The CFA argument indicates the frame to which control is
7859 about to be transferred. HANDLER is the destination PC.
7860
7861 We ignore the CFA and set a temporary breakpoint at HANDLER.
7862 This is not extremely efficient but it avoids issues in gdb
7863 with computing the DWARF CFA, and it also works even in weird
7864 cases such as throwing an exception from inside a signal
7865 handler. */
7866
7867 b = SYMBOL_BLOCK_VALUE (func);
7868 ALL_BLOCK_SYMBOLS (b, iter, sym)
7869 {
7870 if (!SYMBOL_IS_ARGUMENT (sym))
7871 continue;
7872
7873 if (argno == 0)
7874 ++argno;
7875 else
7876 {
7877 insert_exception_resume_breakpoint (ecs->event_thread,
7878 b, frame, sym);
7879 break;
7880 }
7881 }
7882 }
230d2906 7883 catch (const gdb_exception_error &e)
492d29ea
PA
7884 {
7885 }
186c406b
TT
7886}
7887
104c1213 7888static void
22bcd14b 7889stop_waiting (struct execution_control_state *ecs)
104c1213 7890{
527159b7 7891 if (debug_infrun)
22bcd14b 7892 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
527159b7 7893
cd0fc7c3
SS
7894 /* Let callers know we don't want to wait for the inferior anymore. */
7895 ecs->wait_some_more = 0;
fbea99ea
PA
7896
7897 /* If all-stop, but the target is always in non-stop mode, stop all
7898 threads now that we're presenting the stop to the user. */
7899 if (!non_stop && target_is_non_stop_p ())
7900 stop_all_threads ();
cd0fc7c3
SS
7901}
7902
4d9d9d04
PA
7903/* Like keep_going, but passes the signal to the inferior, even if the
7904 signal is set to nopass. */
d4f3574e
SS
7905
7906static void
4d9d9d04 7907keep_going_pass_signal (struct execution_control_state *ecs)
d4f3574e 7908{
d7e15655 7909 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
372316f1 7910 gdb_assert (!ecs->event_thread->resumed);
4d9d9d04 7911
d4f3574e 7912 /* Save the pc before execution, to compare with pc after stop. */
fb14de7b 7913 ecs->event_thread->prev_pc
00431a78 7914 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
d4f3574e 7915
4d9d9d04 7916 if (ecs->event_thread->control.trap_expected)
d4f3574e 7917 {
4d9d9d04
PA
7918 struct thread_info *tp = ecs->event_thread;
7919
7920 if (debug_infrun)
7921 fprintf_unfiltered (gdb_stdlog,
7922 "infrun: %s has trap_expected set, "
7923 "resuming to collect trap\n",
a068643d 7924 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04 7925
a9ba6bae
PA
7926 /* We haven't yet gotten our trap, and either: intercepted a
7927 non-signal event (e.g., a fork); or took a signal which we
7928 are supposed to pass through to the inferior. Simply
7929 continue. */
64ce06e4 7930 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e 7931 }
372316f1
PA
7932 else if (step_over_info_valid_p ())
7933 {
7934 /* Another thread is stepping over a breakpoint in-line. If
7935 this thread needs a step-over too, queue the request. In
7936 either case, this resume must be deferred for later. */
7937 struct thread_info *tp = ecs->event_thread;
7938
7939 if (ecs->hit_singlestep_breakpoint
7940 || thread_still_needs_step_over (tp))
7941 {
7942 if (debug_infrun)
7943 fprintf_unfiltered (gdb_stdlog,
7944 "infrun: step-over already in progress: "
7945 "step-over for %s deferred\n",
a068643d 7946 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
7947 thread_step_over_chain_enqueue (tp);
7948 }
7949 else
7950 {
7951 if (debug_infrun)
7952 fprintf_unfiltered (gdb_stdlog,
7953 "infrun: step-over in progress: "
7954 "resume of %s deferred\n",
a068643d 7955 target_pid_to_str (tp->ptid).c_str ());
372316f1 7956 }
372316f1 7957 }
d4f3574e
SS
7958 else
7959 {
31e77af2 7960 struct regcache *regcache = get_current_regcache ();
963f9c80
PA
7961 int remove_bp;
7962 int remove_wps;
8d297bbf 7963 step_over_what step_what;
31e77af2 7964
d4f3574e 7965 /* Either the trap was not expected, but we are continuing
a9ba6bae
PA
7966 anyway (if we got a signal, the user asked it be passed to
7967 the child)
7968 -- or --
7969 We got our expected trap, but decided we should resume from
7970 it.
d4f3574e 7971
a9ba6bae 7972 We're going to run this baby now!
d4f3574e 7973
c36b740a
VP
7974 Note that insert_breakpoints won't try to re-insert
7975 already inserted breakpoints. Therefore, we don't
7976 care if breakpoints were already inserted, or not. */
a9ba6bae 7977
31e77af2
PA
7978 /* If we need to step over a breakpoint, and we're not using
7979 displaced stepping to do so, insert all breakpoints
7980 (watchpoints, etc.) but the one we're stepping over, step one
7981 instruction, and then re-insert the breakpoint when that step
7982 is finished. */
963f9c80 7983
6c4cfb24
PA
7984 step_what = thread_still_needs_step_over (ecs->event_thread);
7985
963f9c80 7986 remove_bp = (ecs->hit_singlestep_breakpoint
6c4cfb24
PA
7987 || (step_what & STEP_OVER_BREAKPOINT));
7988 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
963f9c80 7989
cb71640d
PA
7990 /* We can't use displaced stepping if we need to step past a
7991 watchpoint. The instruction copied to the scratch pad would
7992 still trigger the watchpoint. */
7993 if (remove_bp
3fc8eb30 7994 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
45e8c884 7995 {
a01bda52 7996 set_step_over_info (regcache->aspace (),
21edc42f
YQ
7997 regcache_read_pc (regcache), remove_wps,
7998 ecs->event_thread->global_num);
45e8c884 7999 }
963f9c80 8000 else if (remove_wps)
21edc42f 8001 set_step_over_info (NULL, 0, remove_wps, -1);
372316f1
PA
8002
8003 /* If we now need to do an in-line step-over, we need to stop
8004 all other threads. Note this must be done before
8005 insert_breakpoints below, because that removes the breakpoint
8006 we're about to step over, otherwise other threads could miss
8007 it. */
fbea99ea 8008 if (step_over_info_valid_p () && target_is_non_stop_p ())
372316f1 8009 stop_all_threads ();
abbb1732 8010
31e77af2 8011 /* Stop stepping if inserting breakpoints fails. */
a70b8144 8012 try
31e77af2
PA
8013 {
8014 insert_breakpoints ();
8015 }
230d2906 8016 catch (const gdb_exception_error &e)
31e77af2
PA
8017 {
8018 exception_print (gdb_stderr, e);
22bcd14b 8019 stop_waiting (ecs);
bdf2a94a 8020 clear_step_over_info ();
31e77af2 8021 return;
d4f3574e
SS
8022 }
8023
963f9c80 8024 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
d4f3574e 8025
64ce06e4 8026 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e
SS
8027 }
8028
488f131b 8029 prepare_to_wait (ecs);
d4f3574e
SS
8030}
8031
4d9d9d04
PA
8032/* Called when we should continue running the inferior, because the
8033 current event doesn't cause a user visible stop. This does the
8034 resuming part; waiting for the next event is done elsewhere. */
8035
8036static void
8037keep_going (struct execution_control_state *ecs)
8038{
8039 if (ecs->event_thread->control.trap_expected
8040 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
8041 ecs->event_thread->control.trap_expected = 0;
8042
8043 if (!signal_program[ecs->event_thread->suspend.stop_signal])
8044 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
8045 keep_going_pass_signal (ecs);
8046}
8047
104c1213
JM
8048/* This function normally comes after a resume, before
8049 handle_inferior_event exits. It takes care of any last bits of
8050 housekeeping, and sets the all-important wait_some_more flag. */
cd0fc7c3 8051
104c1213
JM
8052static void
8053prepare_to_wait (struct execution_control_state *ecs)
cd0fc7c3 8054{
527159b7 8055 if (debug_infrun)
8a9de0e4 8056 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
104c1213 8057
104c1213 8058 ecs->wait_some_more = 1;
0b333c5e
PA
8059
8060 if (!target_is_async_p ())
8061 mark_infrun_async_event_handler ();
c906108c 8062}
11cf8741 8063
fd664c91 8064/* We are done with the step range of a step/next/si/ni command.
b57bacec 8065 Called once for each n of a "step n" operation. */
fd664c91
PA
8066
8067static void
bdc36728 8068end_stepping_range (struct execution_control_state *ecs)
fd664c91 8069{
bdc36728 8070 ecs->event_thread->control.stop_step = 1;
bdc36728 8071 stop_waiting (ecs);
fd664c91
PA
8072}
8073
33d62d64
JK
8074/* Several print_*_reason functions to print why the inferior has stopped.
8075 We always print something when the inferior exits, or receives a signal.
8076 The rest of the cases are dealt with later on in normal_stop and
8077 print_it_typical. Ideally there should be a call to one of these
8078 print_*_reason functions functions from handle_inferior_event each time
22bcd14b 8079 stop_waiting is called.
33d62d64 8080
fd664c91
PA
8081 Note that we don't call these directly, instead we delegate that to
8082 the interpreters, through observers. Interpreters then call these
8083 with whatever uiout is right. */
33d62d64 8084
fd664c91
PA
8085void
8086print_end_stepping_range_reason (struct ui_out *uiout)
33d62d64 8087{
fd664c91 8088 /* For CLI-like interpreters, print nothing. */
33d62d64 8089
112e8700 8090 if (uiout->is_mi_like_p ())
fd664c91 8091 {
112e8700 8092 uiout->field_string ("reason",
fd664c91
PA
8093 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
8094 }
8095}
33d62d64 8096
fd664c91
PA
8097void
8098print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
11cf8741 8099{
33d62d64 8100 annotate_signalled ();
112e8700
SM
8101 if (uiout->is_mi_like_p ())
8102 uiout->field_string
8103 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
8104 uiout->text ("\nProgram terminated with signal ");
33d62d64 8105 annotate_signal_name ();
112e8700 8106 uiout->field_string ("signal-name",
2ea28649 8107 gdb_signal_to_name (siggnal));
33d62d64 8108 annotate_signal_name_end ();
112e8700 8109 uiout->text (", ");
33d62d64 8110 annotate_signal_string ();
112e8700 8111 uiout->field_string ("signal-meaning",
2ea28649 8112 gdb_signal_to_string (siggnal));
33d62d64 8113 annotate_signal_string_end ();
112e8700
SM
8114 uiout->text (".\n");
8115 uiout->text ("The program no longer exists.\n");
33d62d64
JK
8116}
8117
fd664c91
PA
8118void
8119print_exited_reason (struct ui_out *uiout, int exitstatus)
33d62d64 8120{
fda326dd 8121 struct inferior *inf = current_inferior ();
a068643d 8122 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
fda326dd 8123
33d62d64
JK
8124 annotate_exited (exitstatus);
8125 if (exitstatus)
8126 {
112e8700
SM
8127 if (uiout->is_mi_like_p ())
8128 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
6a831f06
PA
8129 std::string exit_code_str
8130 = string_printf ("0%o", (unsigned int) exitstatus);
8131 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
8132 plongest (inf->num), pidstr.c_str (),
8133 string_field ("exit-code", exit_code_str.c_str ()));
33d62d64
JK
8134 }
8135 else
11cf8741 8136 {
112e8700
SM
8137 if (uiout->is_mi_like_p ())
8138 uiout->field_string
8139 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6a831f06
PA
8140 uiout->message ("[Inferior %s (%s) exited normally]\n",
8141 plongest (inf->num), pidstr.c_str ());
33d62d64 8142 }
33d62d64
JK
8143}
8144
012b3a21
WT
8145/* Some targets/architectures can do extra processing/display of
8146 segmentation faults. E.g., Intel MPX boundary faults.
8147 Call the architecture dependent function to handle the fault. */
8148
8149static void
8150handle_segmentation_fault (struct ui_out *uiout)
8151{
8152 struct regcache *regcache = get_current_regcache ();
ac7936df 8153 struct gdbarch *gdbarch = regcache->arch ();
012b3a21
WT
8154
8155 if (gdbarch_handle_segmentation_fault_p (gdbarch))
8156 gdbarch_handle_segmentation_fault (gdbarch, uiout);
8157}
8158
fd664c91
PA
8159void
8160print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
33d62d64 8161{
f303dbd6
PA
8162 struct thread_info *thr = inferior_thread ();
8163
33d62d64
JK
8164 annotate_signal ();
8165
112e8700 8166 if (uiout->is_mi_like_p ())
f303dbd6
PA
8167 ;
8168 else if (show_thread_that_caused_stop ())
33d62d64 8169 {
f303dbd6 8170 const char *name;
33d62d64 8171
112e8700 8172 uiout->text ("\nThread ");
33eca680 8173 uiout->field_string ("thread-id", print_thread_id (thr));
f303dbd6
PA
8174
8175 name = thr->name != NULL ? thr->name : target_thread_name (thr);
8176 if (name != NULL)
8177 {
112e8700 8178 uiout->text (" \"");
33eca680 8179 uiout->field_string ("name", name);
112e8700 8180 uiout->text ("\"");
f303dbd6 8181 }
33d62d64 8182 }
f303dbd6 8183 else
112e8700 8184 uiout->text ("\nProgram");
f303dbd6 8185
112e8700
SM
8186 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
8187 uiout->text (" stopped");
33d62d64
JK
8188 else
8189 {
112e8700 8190 uiout->text (" received signal ");
8b93c638 8191 annotate_signal_name ();
112e8700
SM
8192 if (uiout->is_mi_like_p ())
8193 uiout->field_string
8194 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
8195 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8b93c638 8196 annotate_signal_name_end ();
112e8700 8197 uiout->text (", ");
8b93c638 8198 annotate_signal_string ();
112e8700 8199 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
012b3a21
WT
8200
8201 if (siggnal == GDB_SIGNAL_SEGV)
8202 handle_segmentation_fault (uiout);
8203
8b93c638 8204 annotate_signal_string_end ();
33d62d64 8205 }
112e8700 8206 uiout->text (".\n");
33d62d64 8207}
252fbfc8 8208
fd664c91
PA
8209void
8210print_no_history_reason (struct ui_out *uiout)
33d62d64 8211{
112e8700 8212 uiout->text ("\nNo more reverse-execution history.\n");
11cf8741 8213}
43ff13b4 8214
0c7e1a46
PA
8215/* Print current location without a level number, if we have changed
8216 functions or hit a breakpoint. Print source line if we have one.
8217 bpstat_print contains the logic deciding in detail what to print,
8218 based on the event(s) that just occurred. */
8219
243a9253
PA
8220static void
8221print_stop_location (struct target_waitstatus *ws)
0c7e1a46
PA
8222{
8223 int bpstat_ret;
f486487f 8224 enum print_what source_flag;
0c7e1a46
PA
8225 int do_frame_printing = 1;
8226 struct thread_info *tp = inferior_thread ();
8227
8228 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
8229 switch (bpstat_ret)
8230 {
8231 case PRINT_UNKNOWN:
8232 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8233 should) carry around the function and does (or should) use
8234 that when doing a frame comparison. */
8235 if (tp->control.stop_step
8236 && frame_id_eq (tp->control.step_frame_id,
8237 get_frame_id (get_current_frame ()))
f2ffa92b
PA
8238 && (tp->control.step_start_function
8239 == find_pc_function (tp->suspend.stop_pc)))
0c7e1a46
PA
8240 {
8241 /* Finished step, just print source line. */
8242 source_flag = SRC_LINE;
8243 }
8244 else
8245 {
8246 /* Print location and source line. */
8247 source_flag = SRC_AND_LOC;
8248 }
8249 break;
8250 case PRINT_SRC_AND_LOC:
8251 /* Print location and source line. */
8252 source_flag = SRC_AND_LOC;
8253 break;
8254 case PRINT_SRC_ONLY:
8255 source_flag = SRC_LINE;
8256 break;
8257 case PRINT_NOTHING:
8258 /* Something bogus. */
8259 source_flag = SRC_LINE;
8260 do_frame_printing = 0;
8261 break;
8262 default:
8263 internal_error (__FILE__, __LINE__, _("Unknown value."));
8264 }
8265
8266 /* The behavior of this routine with respect to the source
8267 flag is:
8268 SRC_LINE: Print only source line
8269 LOCATION: Print only location
8270 SRC_AND_LOC: Print location and source line. */
8271 if (do_frame_printing)
8272 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
243a9253
PA
8273}
8274
243a9253
PA
8275/* See infrun.h. */
8276
8277void
4c7d57e7 8278print_stop_event (struct ui_out *uiout, bool displays)
243a9253 8279{
243a9253 8280 struct target_waitstatus last;
243a9253
PA
8281 struct thread_info *tp;
8282
5b6d1e4f 8283 get_last_target_status (nullptr, nullptr, &last);
243a9253 8284
67ad9399
TT
8285 {
8286 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
0c7e1a46 8287
67ad9399 8288 print_stop_location (&last);
243a9253 8289
67ad9399 8290 /* Display the auto-display expressions. */
4c7d57e7
TT
8291 if (displays)
8292 do_displays ();
67ad9399 8293 }
243a9253
PA
8294
8295 tp = inferior_thread ();
8296 if (tp->thread_fsm != NULL
46e3ed7f 8297 && tp->thread_fsm->finished_p ())
243a9253
PA
8298 {
8299 struct return_value_info *rv;
8300
46e3ed7f 8301 rv = tp->thread_fsm->return_value ();
243a9253
PA
8302 if (rv != NULL)
8303 print_return_value (uiout, rv);
8304 }
0c7e1a46
PA
8305}
8306
388a7084
PA
8307/* See infrun.h. */
8308
8309void
8310maybe_remove_breakpoints (void)
8311{
8312 if (!breakpoints_should_be_inserted_now () && target_has_execution)
8313 {
8314 if (remove_breakpoints ())
8315 {
223ffa71 8316 target_terminal::ours_for_output ();
388a7084
PA
8317 printf_filtered (_("Cannot remove breakpoints because "
8318 "program is no longer writable.\nFurther "
8319 "execution is probably impossible.\n"));
8320 }
8321 }
8322}
8323
4c2f2a79
PA
8324/* The execution context that just caused a normal stop. */
8325
8326struct stop_context
8327{
2d844eaf
TT
8328 stop_context ();
8329 ~stop_context ();
8330
8331 DISABLE_COPY_AND_ASSIGN (stop_context);
8332
8333 bool changed () const;
8334
4c2f2a79
PA
8335 /* The stop ID. */
8336 ULONGEST stop_id;
c906108c 8337
4c2f2a79 8338 /* The event PTID. */
c906108c 8339
4c2f2a79
PA
8340 ptid_t ptid;
8341
8342 /* If stopp for a thread event, this is the thread that caused the
8343 stop. */
8344 struct thread_info *thread;
8345
8346 /* The inferior that caused the stop. */
8347 int inf_num;
8348};
8349
2d844eaf 8350/* Initializes a new stop context. If stopped for a thread event, this
4c2f2a79
PA
8351 takes a strong reference to the thread. */
8352
2d844eaf 8353stop_context::stop_context ()
4c2f2a79 8354{
2d844eaf
TT
8355 stop_id = get_stop_id ();
8356 ptid = inferior_ptid;
8357 inf_num = current_inferior ()->num;
4c2f2a79 8358
d7e15655 8359 if (inferior_ptid != null_ptid)
4c2f2a79
PA
8360 {
8361 /* Take a strong reference so that the thread can't be deleted
8362 yet. */
2d844eaf
TT
8363 thread = inferior_thread ();
8364 thread->incref ();
4c2f2a79
PA
8365 }
8366 else
2d844eaf 8367 thread = NULL;
4c2f2a79
PA
8368}
8369
8370/* Release a stop context previously created with save_stop_context.
8371 Releases the strong reference to the thread as well. */
8372
2d844eaf 8373stop_context::~stop_context ()
4c2f2a79 8374{
2d844eaf
TT
8375 if (thread != NULL)
8376 thread->decref ();
4c2f2a79
PA
8377}
8378
8379/* Return true if the current context no longer matches the saved stop
8380 context. */
8381
2d844eaf
TT
8382bool
8383stop_context::changed () const
8384{
8385 if (ptid != inferior_ptid)
8386 return true;
8387 if (inf_num != current_inferior ()->num)
8388 return true;
8389 if (thread != NULL && thread->state != THREAD_STOPPED)
8390 return true;
8391 if (get_stop_id () != stop_id)
8392 return true;
8393 return false;
4c2f2a79
PA
8394}
8395
8396/* See infrun.h. */
8397
8398int
96baa820 8399normal_stop (void)
c906108c 8400{
73b65bb0 8401 struct target_waitstatus last;
73b65bb0 8402
5b6d1e4f 8403 get_last_target_status (nullptr, nullptr, &last);
73b65bb0 8404
4c2f2a79
PA
8405 new_stop_id ();
8406
29f49a6a
PA
8407 /* If an exception is thrown from this point on, make sure to
8408 propagate GDB's knowledge of the executing state to the
8409 frontend/user running state. A QUIT is an easy exception to see
8410 here, so do this before any filtered output. */
731f534f 8411
5b6d1e4f 8412 ptid_t finish_ptid = null_ptid;
731f534f 8413
c35b1492 8414 if (!non_stop)
5b6d1e4f 8415 finish_ptid = minus_one_ptid;
e1316e60
PA
8416 else if (last.kind == TARGET_WAITKIND_SIGNALLED
8417 || last.kind == TARGET_WAITKIND_EXITED)
8418 {
8419 /* On some targets, we may still have live threads in the
8420 inferior when we get a process exit event. E.g., for
8421 "checkpoint", when the current checkpoint/fork exits,
8422 linux-fork.c automatically switches to another fork from
8423 within target_mourn_inferior. */
731f534f 8424 if (inferior_ptid != null_ptid)
5b6d1e4f 8425 finish_ptid = ptid_t (inferior_ptid.pid ());
e1316e60
PA
8426 }
8427 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
5b6d1e4f
PA
8428 finish_ptid = inferior_ptid;
8429
8430 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
8431 if (finish_ptid != null_ptid)
8432 {
8433 maybe_finish_thread_state.emplace
8434 (user_visible_resume_target (finish_ptid), finish_ptid);
8435 }
29f49a6a 8436
b57bacec
PA
8437 /* As we're presenting a stop, and potentially removing breakpoints,
8438 update the thread list so we can tell whether there are threads
8439 running on the target. With target remote, for example, we can
8440 only learn about new threads when we explicitly update the thread
8441 list. Do this before notifying the interpreters about signal
8442 stops, end of stepping ranges, etc., so that the "new thread"
8443 output is emitted before e.g., "Program received signal FOO",
8444 instead of after. */
8445 update_thread_list ();
8446
8447 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
76727919 8448 gdb::observers::signal_received.notify (inferior_thread ()->suspend.stop_signal);
b57bacec 8449
c906108c
SS
8450 /* As with the notification of thread events, we want to delay
8451 notifying the user that we've switched thread context until
8452 the inferior actually stops.
8453
73b65bb0
DJ
8454 There's no point in saying anything if the inferior has exited.
8455 Note that SIGNALLED here means "exited with a signal", not
b65dc60b
PA
8456 "received a signal".
8457
8458 Also skip saying anything in non-stop mode. In that mode, as we
8459 don't want GDB to switch threads behind the user's back, to avoid
8460 races where the user is typing a command to apply to thread x,
8461 but GDB switches to thread y before the user finishes entering
8462 the command, fetch_inferior_event installs a cleanup to restore
8463 the current thread back to the thread the user had selected right
8464 after this event is handled, so we're not really switching, only
8465 informing of a stop. */
4f8d22e3 8466 if (!non_stop
731f534f 8467 && previous_inferior_ptid != inferior_ptid
73b65bb0
DJ
8468 && target_has_execution
8469 && last.kind != TARGET_WAITKIND_SIGNALLED
0e5bf2a8
PA
8470 && last.kind != TARGET_WAITKIND_EXITED
8471 && last.kind != TARGET_WAITKIND_NO_RESUMED)
c906108c 8472 {
0e454242 8473 SWITCH_THRU_ALL_UIS ()
3b12939d 8474 {
223ffa71 8475 target_terminal::ours_for_output ();
3b12939d 8476 printf_filtered (_("[Switching to %s]\n"),
a068643d 8477 target_pid_to_str (inferior_ptid).c_str ());
3b12939d
PA
8478 annotate_thread_changed ();
8479 }
39f77062 8480 previous_inferior_ptid = inferior_ptid;
c906108c 8481 }
c906108c 8482
0e5bf2a8
PA
8483 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
8484 {
0e454242 8485 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8486 if (current_ui->prompt_state == PROMPT_BLOCKED)
8487 {
223ffa71 8488 target_terminal::ours_for_output ();
3b12939d
PA
8489 printf_filtered (_("No unwaited-for children left.\n"));
8490 }
0e5bf2a8
PA
8491 }
8492
b57bacec 8493 /* Note: this depends on the update_thread_list call above. */
388a7084 8494 maybe_remove_breakpoints ();
c906108c 8495
c906108c
SS
8496 /* If an auto-display called a function and that got a signal,
8497 delete that auto-display to avoid an infinite recursion. */
8498
8499 if (stopped_by_random_signal)
8500 disable_current_display ();
8501
0e454242 8502 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8503 {
8504 async_enable_stdin ();
8505 }
c906108c 8506
388a7084 8507 /* Let the user/frontend see the threads as stopped. */
731f534f 8508 maybe_finish_thread_state.reset ();
388a7084
PA
8509
8510 /* Select innermost stack frame - i.e., current frame is frame 0,
8511 and current location is based on that. Handle the case where the
8512 dummy call is returning after being stopped. E.g. the dummy call
8513 previously hit a breakpoint. (If the dummy call returns
8514 normally, we won't reach here.) Do this before the stop hook is
8515 run, so that it doesn't get to see the temporary dummy frame,
8516 which is not where we'll present the stop. */
8517 if (has_stack_frames ())
8518 {
8519 if (stop_stack_dummy == STOP_STACK_DUMMY)
8520 {
8521 /* Pop the empty frame that contains the stack dummy. This
8522 also restores inferior state prior to the call (struct
8523 infcall_suspend_state). */
8524 struct frame_info *frame = get_current_frame ();
8525
8526 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8527 frame_pop (frame);
8528 /* frame_pop calls reinit_frame_cache as the last thing it
8529 does which means there's now no selected frame. */
8530 }
8531
8532 select_frame (get_current_frame ());
8533
8534 /* Set the current source location. */
8535 set_current_sal_from_frame (get_current_frame ());
8536 }
dd7e2d2b
PA
8537
8538 /* Look up the hook_stop and run it (CLI internally handles problem
8539 of stop_command's pre-hook not existing). */
4c2f2a79
PA
8540 if (stop_command != NULL)
8541 {
2d844eaf 8542 stop_context saved_context;
4c2f2a79 8543
a70b8144 8544 try
bf469271
PA
8545 {
8546 execute_cmd_pre_hook (stop_command);
8547 }
230d2906 8548 catch (const gdb_exception &ex)
bf469271
PA
8549 {
8550 exception_fprintf (gdb_stderr, ex,
8551 "Error while running hook_stop:\n");
8552 }
4c2f2a79
PA
8553
8554 /* If the stop hook resumes the target, then there's no point in
8555 trying to notify about the previous stop; its context is
8556 gone. Likewise if the command switches thread or inferior --
8557 the observers would print a stop for the wrong
8558 thread/inferior. */
2d844eaf
TT
8559 if (saved_context.changed ())
8560 return 1;
4c2f2a79 8561 }
dd7e2d2b 8562
388a7084
PA
8563 /* Notify observers about the stop. This is where the interpreters
8564 print the stop event. */
d7e15655 8565 if (inferior_ptid != null_ptid)
76727919 8566 gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
388a7084
PA
8567 stop_print_frame);
8568 else
76727919 8569 gdb::observers::normal_stop.notify (NULL, stop_print_frame);
347bddb7 8570
243a9253
PA
8571 annotate_stopped ();
8572
48844aa6
PA
8573 if (target_has_execution)
8574 {
8575 if (last.kind != TARGET_WAITKIND_SIGNALLED
fe726667
PA
8576 && last.kind != TARGET_WAITKIND_EXITED
8577 && last.kind != TARGET_WAITKIND_NO_RESUMED)
48844aa6
PA
8578 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8579 Delete any breakpoint that is to be deleted at the next stop. */
16c381f0 8580 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
94cc34af 8581 }
6c95b8df
PA
8582
8583 /* Try to get rid of automatically added inferiors that are no
8584 longer needed. Keeping those around slows down things linearly.
8585 Note that this never removes the current inferior. */
8586 prune_inferiors ();
4c2f2a79
PA
8587
8588 return 0;
c906108c 8589}
c906108c 8590\f
c5aa993b 8591int
96baa820 8592signal_stop_state (int signo)
c906108c 8593{
d6b48e9c 8594 return signal_stop[signo];
c906108c
SS
8595}
8596
c5aa993b 8597int
96baa820 8598signal_print_state (int signo)
c906108c
SS
8599{
8600 return signal_print[signo];
8601}
8602
c5aa993b 8603int
96baa820 8604signal_pass_state (int signo)
c906108c
SS
8605{
8606 return signal_program[signo];
8607}
8608
2455069d
UW
8609static void
8610signal_cache_update (int signo)
8611{
8612 if (signo == -1)
8613 {
a493e3e2 8614 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
2455069d
UW
8615 signal_cache_update (signo);
8616
8617 return;
8618 }
8619
8620 signal_pass[signo] = (signal_stop[signo] == 0
8621 && signal_print[signo] == 0
ab04a2af
TT
8622 && signal_program[signo] == 1
8623 && signal_catch[signo] == 0);
2455069d
UW
8624}
8625
488f131b 8626int
7bda5e4a 8627signal_stop_update (int signo, int state)
d4f3574e
SS
8628{
8629 int ret = signal_stop[signo];
abbb1732 8630
d4f3574e 8631 signal_stop[signo] = state;
2455069d 8632 signal_cache_update (signo);
d4f3574e
SS
8633 return ret;
8634}
8635
488f131b 8636int
7bda5e4a 8637signal_print_update (int signo, int state)
d4f3574e
SS
8638{
8639 int ret = signal_print[signo];
abbb1732 8640
d4f3574e 8641 signal_print[signo] = state;
2455069d 8642 signal_cache_update (signo);
d4f3574e
SS
8643 return ret;
8644}
8645
488f131b 8646int
7bda5e4a 8647signal_pass_update (int signo, int state)
d4f3574e
SS
8648{
8649 int ret = signal_program[signo];
abbb1732 8650
d4f3574e 8651 signal_program[signo] = state;
2455069d 8652 signal_cache_update (signo);
d4f3574e
SS
8653 return ret;
8654}
8655
ab04a2af
TT
8656/* Update the global 'signal_catch' from INFO and notify the
8657 target. */
8658
8659void
8660signal_catch_update (const unsigned int *info)
8661{
8662 int i;
8663
8664 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8665 signal_catch[i] = info[i] > 0;
8666 signal_cache_update (-1);
adc6a863 8667 target_pass_signals (signal_pass);
ab04a2af
TT
8668}
8669
c906108c 8670static void
96baa820 8671sig_print_header (void)
c906108c 8672{
3e43a32a
MS
8673 printf_filtered (_("Signal Stop\tPrint\tPass "
8674 "to program\tDescription\n"));
c906108c
SS
8675}
8676
8677static void
2ea28649 8678sig_print_info (enum gdb_signal oursig)
c906108c 8679{
2ea28649 8680 const char *name = gdb_signal_to_name (oursig);
c906108c 8681 int name_padding = 13 - strlen (name);
96baa820 8682
c906108c
SS
8683 if (name_padding <= 0)
8684 name_padding = 0;
8685
8686 printf_filtered ("%s", name);
488f131b 8687 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
c906108c
SS
8688 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8689 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
8690 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
2ea28649 8691 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
c906108c
SS
8692}
8693
8694/* Specify how various signals in the inferior should be handled. */
8695
8696static void
0b39b52e 8697handle_command (const char *args, int from_tty)
c906108c 8698{
c906108c 8699 int digits, wordlen;
b926417a 8700 int sigfirst, siglast;
2ea28649 8701 enum gdb_signal oursig;
c906108c 8702 int allsigs;
c906108c
SS
8703
8704 if (args == NULL)
8705 {
e2e0b3e5 8706 error_no_arg (_("signal to handle"));
c906108c
SS
8707 }
8708
1777feb0 8709 /* Allocate and zero an array of flags for which signals to handle. */
c906108c 8710
adc6a863
PA
8711 const size_t nsigs = GDB_SIGNAL_LAST;
8712 unsigned char sigs[nsigs] {};
c906108c 8713
1777feb0 8714 /* Break the command line up into args. */
c906108c 8715
773a1edc 8716 gdb_argv built_argv (args);
c906108c
SS
8717
8718 /* Walk through the args, looking for signal oursigs, signal names, and
8719 actions. Signal numbers and signal names may be interspersed with
8720 actions, with the actions being performed for all signals cumulatively
1777feb0 8721 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
c906108c 8722
773a1edc 8723 for (char *arg : built_argv)
c906108c 8724 {
773a1edc
TT
8725 wordlen = strlen (arg);
8726 for (digits = 0; isdigit (arg[digits]); digits++)
c906108c
SS
8727 {;
8728 }
8729 allsigs = 0;
8730 sigfirst = siglast = -1;
8731
773a1edc 8732 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
c906108c
SS
8733 {
8734 /* Apply action to all signals except those used by the
1777feb0 8735 debugger. Silently skip those. */
c906108c
SS
8736 allsigs = 1;
8737 sigfirst = 0;
8738 siglast = nsigs - 1;
8739 }
773a1edc 8740 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
c906108c
SS
8741 {
8742 SET_SIGS (nsigs, sigs, signal_stop);
8743 SET_SIGS (nsigs, sigs, signal_print);
8744 }
773a1edc 8745 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
c906108c
SS
8746 {
8747 UNSET_SIGS (nsigs, sigs, signal_program);
8748 }
773a1edc 8749 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
c906108c
SS
8750 {
8751 SET_SIGS (nsigs, sigs, signal_print);
8752 }
773a1edc 8753 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
c906108c
SS
8754 {
8755 SET_SIGS (nsigs, sigs, signal_program);
8756 }
773a1edc 8757 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
c906108c
SS
8758 {
8759 UNSET_SIGS (nsigs, sigs, signal_stop);
8760 }
773a1edc 8761 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
c906108c
SS
8762 {
8763 SET_SIGS (nsigs, sigs, signal_program);
8764 }
773a1edc 8765 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
c906108c
SS
8766 {
8767 UNSET_SIGS (nsigs, sigs, signal_print);
8768 UNSET_SIGS (nsigs, sigs, signal_stop);
8769 }
773a1edc 8770 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
c906108c
SS
8771 {
8772 UNSET_SIGS (nsigs, sigs, signal_program);
8773 }
8774 else if (digits > 0)
8775 {
8776 /* It is numeric. The numeric signal refers to our own
8777 internal signal numbering from target.h, not to host/target
8778 signal number. This is a feature; users really should be
8779 using symbolic names anyway, and the common ones like
8780 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8781
8782 sigfirst = siglast = (int)
773a1edc
TT
8783 gdb_signal_from_command (atoi (arg));
8784 if (arg[digits] == '-')
c906108c
SS
8785 {
8786 siglast = (int)
773a1edc 8787 gdb_signal_from_command (atoi (arg + digits + 1));
c906108c
SS
8788 }
8789 if (sigfirst > siglast)
8790 {
1777feb0 8791 /* Bet he didn't figure we'd think of this case... */
b926417a 8792 std::swap (sigfirst, siglast);
c906108c
SS
8793 }
8794 }
8795 else
8796 {
773a1edc 8797 oursig = gdb_signal_from_name (arg);
a493e3e2 8798 if (oursig != GDB_SIGNAL_UNKNOWN)
c906108c
SS
8799 {
8800 sigfirst = siglast = (int) oursig;
8801 }
8802 else
8803 {
8804 /* Not a number and not a recognized flag word => complain. */
773a1edc 8805 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
c906108c
SS
8806 }
8807 }
8808
8809 /* If any signal numbers or symbol names were found, set flags for
1777feb0 8810 which signals to apply actions to. */
c906108c 8811
b926417a 8812 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
c906108c 8813 {
2ea28649 8814 switch ((enum gdb_signal) signum)
c906108c 8815 {
a493e3e2
PA
8816 case GDB_SIGNAL_TRAP:
8817 case GDB_SIGNAL_INT:
c906108c
SS
8818 if (!allsigs && !sigs[signum])
8819 {
9e2f0ad4 8820 if (query (_("%s is used by the debugger.\n\
3e43a32a 8821Are you sure you want to change it? "),
2ea28649 8822 gdb_signal_to_name ((enum gdb_signal) signum)))
c906108c
SS
8823 {
8824 sigs[signum] = 1;
8825 }
8826 else
c119e040 8827 printf_unfiltered (_("Not confirmed, unchanged.\n"));
c906108c
SS
8828 }
8829 break;
a493e3e2
PA
8830 case GDB_SIGNAL_0:
8831 case GDB_SIGNAL_DEFAULT:
8832 case GDB_SIGNAL_UNKNOWN:
c906108c
SS
8833 /* Make sure that "all" doesn't print these. */
8834 break;
8835 default:
8836 sigs[signum] = 1;
8837 break;
8838 }
8839 }
c906108c
SS
8840 }
8841
b926417a 8842 for (int signum = 0; signum < nsigs; signum++)
3a031f65
PA
8843 if (sigs[signum])
8844 {
2455069d 8845 signal_cache_update (-1);
adc6a863
PA
8846 target_pass_signals (signal_pass);
8847 target_program_signals (signal_program);
c906108c 8848
3a031f65
PA
8849 if (from_tty)
8850 {
8851 /* Show the results. */
8852 sig_print_header ();
8853 for (; signum < nsigs; signum++)
8854 if (sigs[signum])
aead7601 8855 sig_print_info ((enum gdb_signal) signum);
3a031f65
PA
8856 }
8857
8858 break;
8859 }
c906108c
SS
8860}
8861
de0bea00
MF
8862/* Complete the "handle" command. */
8863
eb3ff9a5 8864static void
de0bea00 8865handle_completer (struct cmd_list_element *ignore,
eb3ff9a5 8866 completion_tracker &tracker,
6f937416 8867 const char *text, const char *word)
de0bea00 8868{
de0bea00
MF
8869 static const char * const keywords[] =
8870 {
8871 "all",
8872 "stop",
8873 "ignore",
8874 "print",
8875 "pass",
8876 "nostop",
8877 "noignore",
8878 "noprint",
8879 "nopass",
8880 NULL,
8881 };
8882
eb3ff9a5
PA
8883 signal_completer (ignore, tracker, text, word);
8884 complete_on_enum (tracker, keywords, word, word);
de0bea00
MF
8885}
8886
2ea28649
PA
8887enum gdb_signal
8888gdb_signal_from_command (int num)
ed01b82c
PA
8889{
8890 if (num >= 1 && num <= 15)
2ea28649 8891 return (enum gdb_signal) num;
ed01b82c
PA
8892 error (_("Only signals 1-15 are valid as numeric signals.\n\
8893Use \"info signals\" for a list of symbolic signals."));
8894}
8895
c906108c
SS
8896/* Print current contents of the tables set by the handle command.
8897 It is possible we should just be printing signals actually used
8898 by the current target (but for things to work right when switching
8899 targets, all signals should be in the signal tables). */
8900
8901static void
1d12d88f 8902info_signals_command (const char *signum_exp, int from_tty)
c906108c 8903{
2ea28649 8904 enum gdb_signal oursig;
abbb1732 8905
c906108c
SS
8906 sig_print_header ();
8907
8908 if (signum_exp)
8909 {
8910 /* First see if this is a symbol name. */
2ea28649 8911 oursig = gdb_signal_from_name (signum_exp);
a493e3e2 8912 if (oursig == GDB_SIGNAL_UNKNOWN)
c906108c
SS
8913 {
8914 /* No, try numeric. */
8915 oursig =
2ea28649 8916 gdb_signal_from_command (parse_and_eval_long (signum_exp));
c906108c
SS
8917 }
8918 sig_print_info (oursig);
8919 return;
8920 }
8921
8922 printf_filtered ("\n");
8923 /* These ugly casts brought to you by the native VAX compiler. */
a493e3e2
PA
8924 for (oursig = GDB_SIGNAL_FIRST;
8925 (int) oursig < (int) GDB_SIGNAL_LAST;
2ea28649 8926 oursig = (enum gdb_signal) ((int) oursig + 1))
c906108c
SS
8927 {
8928 QUIT;
8929
a493e3e2
PA
8930 if (oursig != GDB_SIGNAL_UNKNOWN
8931 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
c906108c
SS
8932 sig_print_info (oursig);
8933 }
8934
3e43a32a
MS
8935 printf_filtered (_("\nUse the \"handle\" command "
8936 "to change these tables.\n"));
c906108c 8937}
4aa995e1
PA
8938
8939/* The $_siginfo convenience variable is a bit special. We don't know
8940 for sure the type of the value until we actually have a chance to
7a9dd1b2 8941 fetch the data. The type can change depending on gdbarch, so it is
4aa995e1
PA
8942 also dependent on which thread you have selected.
8943
8944 1. making $_siginfo be an internalvar that creates a new value on
8945 access.
8946
8947 2. making the value of $_siginfo be an lval_computed value. */
8948
8949/* This function implements the lval_computed support for reading a
8950 $_siginfo value. */
8951
8952static void
8953siginfo_value_read (struct value *v)
8954{
8955 LONGEST transferred;
8956
a911d87a
PA
8957 /* If we can access registers, so can we access $_siginfo. Likewise
8958 vice versa. */
8959 validate_registers_access ();
c709acd1 8960
4aa995e1 8961 transferred =
8b88a78e 8962 target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO,
4aa995e1
PA
8963 NULL,
8964 value_contents_all_raw (v),
8965 value_offset (v),
8966 TYPE_LENGTH (value_type (v)));
8967
8968 if (transferred != TYPE_LENGTH (value_type (v)))
8969 error (_("Unable to read siginfo"));
8970}
8971
8972/* This function implements the lval_computed support for writing a
8973 $_siginfo value. */
8974
8975static void
8976siginfo_value_write (struct value *v, struct value *fromval)
8977{
8978 LONGEST transferred;
8979
a911d87a
PA
8980 /* If we can access registers, so can we access $_siginfo. Likewise
8981 vice versa. */
8982 validate_registers_access ();
c709acd1 8983
8b88a78e 8984 transferred = target_write (current_top_target (),
4aa995e1
PA
8985 TARGET_OBJECT_SIGNAL_INFO,
8986 NULL,
8987 value_contents_all_raw (fromval),
8988 value_offset (v),
8989 TYPE_LENGTH (value_type (fromval)));
8990
8991 if (transferred != TYPE_LENGTH (value_type (fromval)))
8992 error (_("Unable to write siginfo"));
8993}
8994
c8f2448a 8995static const struct lval_funcs siginfo_value_funcs =
4aa995e1
PA
8996 {
8997 siginfo_value_read,
8998 siginfo_value_write
8999 };
9000
9001/* Return a new value with the correct type for the siginfo object of
78267919
UW
9002 the current thread using architecture GDBARCH. Return a void value
9003 if there's no object available. */
4aa995e1 9004
2c0b251b 9005static struct value *
22d2b532
SDJ
9006siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
9007 void *ignore)
4aa995e1 9008{
4aa995e1 9009 if (target_has_stack
d7e15655 9010 && inferior_ptid != null_ptid
78267919 9011 && gdbarch_get_siginfo_type_p (gdbarch))
4aa995e1 9012 {
78267919 9013 struct type *type = gdbarch_get_siginfo_type (gdbarch);
abbb1732 9014
78267919 9015 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
4aa995e1
PA
9016 }
9017
78267919 9018 return allocate_value (builtin_type (gdbarch)->builtin_void);
4aa995e1
PA
9019}
9020
c906108c 9021\f
16c381f0
JK
9022/* infcall_suspend_state contains state about the program itself like its
9023 registers and any signal it received when it last stopped.
9024 This state must be restored regardless of how the inferior function call
9025 ends (either successfully, or after it hits a breakpoint or signal)
9026 if the program is to properly continue where it left off. */
9027
6bf78e29 9028class infcall_suspend_state
7a292a7a 9029{
6bf78e29
AB
9030public:
9031 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
9032 once the inferior function call has finished. */
9033 infcall_suspend_state (struct gdbarch *gdbarch,
9034 const struct thread_info *tp,
9035 struct regcache *regcache)
9036 : m_thread_suspend (tp->suspend),
9037 m_registers (new readonly_detached_regcache (*regcache))
9038 {
9039 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
9040
9041 if (gdbarch_get_siginfo_type_p (gdbarch))
9042 {
9043 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9044 size_t len = TYPE_LENGTH (type);
9045
9046 siginfo_data.reset ((gdb_byte *) xmalloc (len));
9047
9048 if (target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
9049 siginfo_data.get (), 0, len) != len)
9050 {
9051 /* Errors ignored. */
9052 siginfo_data.reset (nullptr);
9053 }
9054 }
9055
9056 if (siginfo_data)
9057 {
9058 m_siginfo_gdbarch = gdbarch;
9059 m_siginfo_data = std::move (siginfo_data);
9060 }
9061 }
9062
9063 /* Return a pointer to the stored register state. */
16c381f0 9064
6bf78e29
AB
9065 readonly_detached_regcache *registers () const
9066 {
9067 return m_registers.get ();
9068 }
9069
9070 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
9071
9072 void restore (struct gdbarch *gdbarch,
9073 struct thread_info *tp,
9074 struct regcache *regcache) const
9075 {
9076 tp->suspend = m_thread_suspend;
9077
9078 if (m_siginfo_gdbarch == gdbarch)
9079 {
9080 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9081
9082 /* Errors ignored. */
9083 target_write (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
9084 m_siginfo_data.get (), 0, TYPE_LENGTH (type));
9085 }
9086
9087 /* The inferior can be gone if the user types "print exit(0)"
9088 (and perhaps other times). */
9089 if (target_has_execution)
9090 /* NB: The register write goes through to the target. */
9091 regcache->restore (registers ());
9092 }
9093
9094private:
9095 /* How the current thread stopped before the inferior function call was
9096 executed. */
9097 struct thread_suspend_state m_thread_suspend;
9098
9099 /* The registers before the inferior function call was executed. */
9100 std::unique_ptr<readonly_detached_regcache> m_registers;
1736ad11 9101
35515841 9102 /* Format of SIGINFO_DATA or NULL if it is not present. */
6bf78e29 9103 struct gdbarch *m_siginfo_gdbarch = nullptr;
1736ad11
JK
9104
9105 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
9106 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
9107 content would be invalid. */
6bf78e29 9108 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
b89667eb
DE
9109};
9110
cb524840
TT
9111infcall_suspend_state_up
9112save_infcall_suspend_state ()
b89667eb 9113{
b89667eb 9114 struct thread_info *tp = inferior_thread ();
1736ad11 9115 struct regcache *regcache = get_current_regcache ();
ac7936df 9116 struct gdbarch *gdbarch = regcache->arch ();
1736ad11 9117
6bf78e29
AB
9118 infcall_suspend_state_up inf_state
9119 (new struct infcall_suspend_state (gdbarch, tp, regcache));
1736ad11 9120
6bf78e29
AB
9121 /* Having saved the current state, adjust the thread state, discarding
9122 any stop signal information. The stop signal is not useful when
9123 starting an inferior function call, and run_inferior_call will not use
9124 the signal due to its `proceed' call with GDB_SIGNAL_0. */
a493e3e2 9125 tp->suspend.stop_signal = GDB_SIGNAL_0;
35515841 9126
b89667eb
DE
9127 return inf_state;
9128}
9129
9130/* Restore inferior session state to INF_STATE. */
9131
9132void
16c381f0 9133restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb
DE
9134{
9135 struct thread_info *tp = inferior_thread ();
1736ad11 9136 struct regcache *regcache = get_current_regcache ();
ac7936df 9137 struct gdbarch *gdbarch = regcache->arch ();
b89667eb 9138
6bf78e29 9139 inf_state->restore (gdbarch, tp, regcache);
16c381f0 9140 discard_infcall_suspend_state (inf_state);
b89667eb
DE
9141}
9142
b89667eb 9143void
16c381f0 9144discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb 9145{
dd848631 9146 delete inf_state;
b89667eb
DE
9147}
9148
daf6667d 9149readonly_detached_regcache *
16c381f0 9150get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
b89667eb 9151{
6bf78e29 9152 return inf_state->registers ();
b89667eb
DE
9153}
9154
16c381f0
JK
9155/* infcall_control_state contains state regarding gdb's control of the
9156 inferior itself like stepping control. It also contains session state like
9157 the user's currently selected frame. */
b89667eb 9158
16c381f0 9159struct infcall_control_state
b89667eb 9160{
16c381f0
JK
9161 struct thread_control_state thread_control;
9162 struct inferior_control_state inferior_control;
d82142e2
JK
9163
9164 /* Other fields: */
ee841dd8
TT
9165 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
9166 int stopped_by_random_signal = 0;
7a292a7a 9167
b89667eb 9168 /* ID if the selected frame when the inferior function call was made. */
ee841dd8 9169 struct frame_id selected_frame_id {};
7a292a7a
SS
9170};
9171
c906108c 9172/* Save all of the information associated with the inferior<==>gdb
b89667eb 9173 connection. */
c906108c 9174
cb524840
TT
9175infcall_control_state_up
9176save_infcall_control_state ()
c906108c 9177{
cb524840 9178 infcall_control_state_up inf_status (new struct infcall_control_state);
4e1c45ea 9179 struct thread_info *tp = inferior_thread ();
d6b48e9c 9180 struct inferior *inf = current_inferior ();
7a292a7a 9181
16c381f0
JK
9182 inf_status->thread_control = tp->control;
9183 inf_status->inferior_control = inf->control;
d82142e2 9184
8358c15c 9185 tp->control.step_resume_breakpoint = NULL;
5b79abe7 9186 tp->control.exception_resume_breakpoint = NULL;
8358c15c 9187
16c381f0
JK
9188 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9189 chain. If caller's caller is walking the chain, they'll be happier if we
9190 hand them back the original chain when restore_infcall_control_state is
9191 called. */
9192 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
d82142e2
JK
9193
9194 /* Other fields: */
9195 inf_status->stop_stack_dummy = stop_stack_dummy;
9196 inf_status->stopped_by_random_signal = stopped_by_random_signal;
c5aa993b 9197
206415a3 9198 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
b89667eb 9199
7a292a7a 9200 return inf_status;
c906108c
SS
9201}
9202
bf469271
PA
9203static void
9204restore_selected_frame (const frame_id &fid)
c906108c 9205{
bf469271 9206 frame_info *frame = frame_find_by_id (fid);
c906108c 9207
aa0cd9c1
AC
9208 /* If inf_status->selected_frame_id is NULL, there was no previously
9209 selected frame. */
101dcfbe 9210 if (frame == NULL)
c906108c 9211 {
8a3fe4f8 9212 warning (_("Unable to restore previously selected frame."));
bf469271 9213 return;
c906108c
SS
9214 }
9215
0f7d239c 9216 select_frame (frame);
c906108c
SS
9217}
9218
b89667eb
DE
9219/* Restore inferior session state to INF_STATUS. */
9220
c906108c 9221void
16c381f0 9222restore_infcall_control_state (struct infcall_control_state *inf_status)
c906108c 9223{
4e1c45ea 9224 struct thread_info *tp = inferior_thread ();
d6b48e9c 9225 struct inferior *inf = current_inferior ();
4e1c45ea 9226
8358c15c
JK
9227 if (tp->control.step_resume_breakpoint)
9228 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
9229
5b79abe7
TT
9230 if (tp->control.exception_resume_breakpoint)
9231 tp->control.exception_resume_breakpoint->disposition
9232 = disp_del_at_next_stop;
9233
d82142e2 9234 /* Handle the bpstat_copy of the chain. */
16c381f0 9235 bpstat_clear (&tp->control.stop_bpstat);
d82142e2 9236
16c381f0
JK
9237 tp->control = inf_status->thread_control;
9238 inf->control = inf_status->inferior_control;
d82142e2
JK
9239
9240 /* Other fields: */
9241 stop_stack_dummy = inf_status->stop_stack_dummy;
9242 stopped_by_random_signal = inf_status->stopped_by_random_signal;
c906108c 9243
b89667eb 9244 if (target_has_stack)
c906108c 9245 {
bf469271 9246 /* The point of the try/catch is that if the stack is clobbered,
101dcfbe
AC
9247 walking the stack might encounter a garbage pointer and
9248 error() trying to dereference it. */
a70b8144 9249 try
bf469271
PA
9250 {
9251 restore_selected_frame (inf_status->selected_frame_id);
9252 }
230d2906 9253 catch (const gdb_exception_error &ex)
bf469271
PA
9254 {
9255 exception_fprintf (gdb_stderr, ex,
9256 "Unable to restore previously selected frame:\n");
9257 /* Error in restoring the selected frame. Select the
9258 innermost frame. */
9259 select_frame (get_current_frame ());
9260 }
c906108c 9261 }
c906108c 9262
ee841dd8 9263 delete inf_status;
7a292a7a 9264}
c906108c
SS
9265
9266void
16c381f0 9267discard_infcall_control_state (struct infcall_control_state *inf_status)
7a292a7a 9268{
8358c15c
JK
9269 if (inf_status->thread_control.step_resume_breakpoint)
9270 inf_status->thread_control.step_resume_breakpoint->disposition
9271 = disp_del_at_next_stop;
9272
5b79abe7
TT
9273 if (inf_status->thread_control.exception_resume_breakpoint)
9274 inf_status->thread_control.exception_resume_breakpoint->disposition
9275 = disp_del_at_next_stop;
9276
1777feb0 9277 /* See save_infcall_control_state for info on stop_bpstat. */
16c381f0 9278 bpstat_clear (&inf_status->thread_control.stop_bpstat);
8358c15c 9279
ee841dd8 9280 delete inf_status;
7a292a7a 9281}
b89667eb 9282\f
7f89fd65 9283/* See infrun.h. */
0c557179
SDJ
9284
9285void
9286clear_exit_convenience_vars (void)
9287{
9288 clear_internalvar (lookup_internalvar ("_exitsignal"));
9289 clear_internalvar (lookup_internalvar ("_exitcode"));
9290}
c5aa993b 9291\f
488f131b 9292
b2175913
MS
9293/* User interface for reverse debugging:
9294 Set exec-direction / show exec-direction commands
9295 (returns error unless target implements to_set_exec_direction method). */
9296
170742de 9297enum exec_direction_kind execution_direction = EXEC_FORWARD;
b2175913
MS
9298static const char exec_forward[] = "forward";
9299static const char exec_reverse[] = "reverse";
9300static const char *exec_direction = exec_forward;
40478521 9301static const char *const exec_direction_names[] = {
b2175913
MS
9302 exec_forward,
9303 exec_reverse,
9304 NULL
9305};
9306
9307static void
eb4c3f4a 9308set_exec_direction_func (const char *args, int from_tty,
b2175913
MS
9309 struct cmd_list_element *cmd)
9310{
9311 if (target_can_execute_reverse)
9312 {
9313 if (!strcmp (exec_direction, exec_forward))
9314 execution_direction = EXEC_FORWARD;
9315 else if (!strcmp (exec_direction, exec_reverse))
9316 execution_direction = EXEC_REVERSE;
9317 }
8bbed405
MS
9318 else
9319 {
9320 exec_direction = exec_forward;
9321 error (_("Target does not support this operation."));
9322 }
b2175913
MS
9323}
9324
9325static void
9326show_exec_direction_func (struct ui_file *out, int from_tty,
9327 struct cmd_list_element *cmd, const char *value)
9328{
9329 switch (execution_direction) {
9330 case EXEC_FORWARD:
9331 fprintf_filtered (out, _("Forward.\n"));
9332 break;
9333 case EXEC_REVERSE:
9334 fprintf_filtered (out, _("Reverse.\n"));
9335 break;
b2175913 9336 default:
d8b34453
PA
9337 internal_error (__FILE__, __LINE__,
9338 _("bogus execution_direction value: %d"),
9339 (int) execution_direction);
b2175913
MS
9340 }
9341}
9342
d4db2f36
PA
9343static void
9344show_schedule_multiple (struct ui_file *file, int from_tty,
9345 struct cmd_list_element *c, const char *value)
9346{
3e43a32a
MS
9347 fprintf_filtered (file, _("Resuming the execution of threads "
9348 "of all processes is %s.\n"), value);
d4db2f36 9349}
ad52ddc6 9350
22d2b532
SDJ
9351/* Implementation of `siginfo' variable. */
9352
9353static const struct internalvar_funcs siginfo_funcs =
9354{
9355 siginfo_make_value,
9356 NULL,
9357 NULL
9358};
9359
372316f1
PA
9360/* Callback for infrun's target events source. This is marked when a
9361 thread has a pending status to process. */
9362
9363static void
9364infrun_async_inferior_event_handler (gdb_client_data data)
9365{
372316f1
PA
9366 inferior_event_handler (INF_REG_EVENT, NULL);
9367}
9368
6c265988 9369void _initialize_infrun ();
c906108c 9370void
6c265988 9371_initialize_infrun ()
c906108c 9372{
de0bea00 9373 struct cmd_list_element *c;
c906108c 9374
372316f1
PA
9375 /* Register extra event sources in the event loop. */
9376 infrun_async_inferior_event_token
9377 = create_async_event_handler (infrun_async_inferior_event_handler, NULL);
9378
11db9430 9379 add_info ("signals", info_signals_command, _("\
1bedd215
AC
9380What debugger does when program gets various signals.\n\
9381Specify a signal as argument to print info on that signal only."));
c906108c
SS
9382 add_info_alias ("handle", "signals", 0);
9383
de0bea00 9384 c = add_com ("handle", class_run, handle_command, _("\
dfbd5e7b 9385Specify how to handle signals.\n\
486c7739 9386Usage: handle SIGNAL [ACTIONS]\n\
c906108c 9387Args are signals and actions to apply to those signals.\n\
dfbd5e7b 9388If no actions are specified, the current settings for the specified signals\n\
486c7739
MF
9389will be displayed instead.\n\
9390\n\
c906108c
SS
9391Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9392from 1-15 are allowed for compatibility with old versions of GDB.\n\
9393Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9394The special arg \"all\" is recognized to mean all signals except those\n\
1bedd215 9395used by the debugger, typically SIGTRAP and SIGINT.\n\
486c7739 9396\n\
1bedd215 9397Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
c906108c
SS
9398\"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9399Stop means reenter debugger if this signal happens (implies print).\n\
9400Print means print a message if this signal happens.\n\
9401Pass means let program see this signal; otherwise program doesn't know.\n\
9402Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
dfbd5e7b
PA
9403Pass and Stop may be combined.\n\
9404\n\
9405Multiple signals may be specified. Signal numbers and signal names\n\
9406may be interspersed with actions, with the actions being performed for\n\
9407all signals cumulatively specified."));
de0bea00 9408 set_cmd_completer (c, handle_completer);
486c7739 9409
c906108c 9410 if (!dbx_commands)
1a966eab
AC
9411 stop_command = add_cmd ("stop", class_obscure,
9412 not_just_help_class_command, _("\
9413There is no `stop' command, but you can set a hook on `stop'.\n\
c906108c 9414This allows you to set a list of commands to be run each time execution\n\
1a966eab 9415of the program stops."), &cmdlist);
c906108c 9416
ccce17b0 9417 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
85c07804
AC
9418Set inferior debugging."), _("\
9419Show inferior debugging."), _("\
9420When non-zero, inferior specific debugging is enabled."),
ccce17b0
YQ
9421 NULL,
9422 show_debug_infrun,
9423 &setdebuglist, &showdebuglist);
527159b7 9424
3e43a32a
MS
9425 add_setshow_boolean_cmd ("displaced", class_maintenance,
9426 &debug_displaced, _("\
237fc4c9
PA
9427Set displaced stepping debugging."), _("\
9428Show displaced stepping debugging."), _("\
9429When non-zero, displaced stepping specific debugging is enabled."),
9430 NULL,
9431 show_debug_displaced,
9432 &setdebuglist, &showdebuglist);
9433
ad52ddc6
PA
9434 add_setshow_boolean_cmd ("non-stop", no_class,
9435 &non_stop_1, _("\
9436Set whether gdb controls the inferior in non-stop mode."), _("\
9437Show whether gdb controls the inferior in non-stop mode."), _("\
9438When debugging a multi-threaded program and this setting is\n\
9439off (the default, also called all-stop mode), when one thread stops\n\
9440(for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9441all other threads in the program while you interact with the thread of\n\
9442interest. When you continue or step a thread, you can allow the other\n\
9443threads to run, or have them remain stopped, but while you inspect any\n\
9444thread's state, all threads stop.\n\
9445\n\
9446In non-stop mode, when one thread stops, other threads can continue\n\
9447to run freely. You'll be able to step each thread independently,\n\
9448leave it stopped or free to run as needed."),
9449 set_non_stop,
9450 show_non_stop,
9451 &setlist,
9452 &showlist);
9453
adc6a863 9454 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
c906108c
SS
9455 {
9456 signal_stop[i] = 1;
9457 signal_print[i] = 1;
9458 signal_program[i] = 1;
ab04a2af 9459 signal_catch[i] = 0;
c906108c
SS
9460 }
9461
4d9d9d04
PA
9462 /* Signals caused by debugger's own actions should not be given to
9463 the program afterwards.
9464
9465 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9466 explicitly specifies that it should be delivered to the target
9467 program. Typically, that would occur when a user is debugging a
9468 target monitor on a simulator: the target monitor sets a
9469 breakpoint; the simulator encounters this breakpoint and halts
9470 the simulation handing control to GDB; GDB, noting that the stop
9471 address doesn't map to any known breakpoint, returns control back
9472 to the simulator; the simulator then delivers the hardware
9473 equivalent of a GDB_SIGNAL_TRAP to the program being
9474 debugged. */
a493e3e2
PA
9475 signal_program[GDB_SIGNAL_TRAP] = 0;
9476 signal_program[GDB_SIGNAL_INT] = 0;
c906108c
SS
9477
9478 /* Signals that are not errors should not normally enter the debugger. */
a493e3e2
PA
9479 signal_stop[GDB_SIGNAL_ALRM] = 0;
9480 signal_print[GDB_SIGNAL_ALRM] = 0;
9481 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9482 signal_print[GDB_SIGNAL_VTALRM] = 0;
9483 signal_stop[GDB_SIGNAL_PROF] = 0;
9484 signal_print[GDB_SIGNAL_PROF] = 0;
9485 signal_stop[GDB_SIGNAL_CHLD] = 0;
9486 signal_print[GDB_SIGNAL_CHLD] = 0;
9487 signal_stop[GDB_SIGNAL_IO] = 0;
9488 signal_print[GDB_SIGNAL_IO] = 0;
9489 signal_stop[GDB_SIGNAL_POLL] = 0;
9490 signal_print[GDB_SIGNAL_POLL] = 0;
9491 signal_stop[GDB_SIGNAL_URG] = 0;
9492 signal_print[GDB_SIGNAL_URG] = 0;
9493 signal_stop[GDB_SIGNAL_WINCH] = 0;
9494 signal_print[GDB_SIGNAL_WINCH] = 0;
9495 signal_stop[GDB_SIGNAL_PRIO] = 0;
9496 signal_print[GDB_SIGNAL_PRIO] = 0;
c906108c 9497
cd0fc7c3
SS
9498 /* These signals are used internally by user-level thread
9499 implementations. (See signal(5) on Solaris.) Like the above
9500 signals, a healthy program receives and handles them as part of
9501 its normal operation. */
a493e3e2
PA
9502 signal_stop[GDB_SIGNAL_LWP] = 0;
9503 signal_print[GDB_SIGNAL_LWP] = 0;
9504 signal_stop[GDB_SIGNAL_WAITING] = 0;
9505 signal_print[GDB_SIGNAL_WAITING] = 0;
9506 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9507 signal_print[GDB_SIGNAL_CANCEL] = 0;
bc7b765a
JB
9508 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9509 signal_print[GDB_SIGNAL_LIBRT] = 0;
cd0fc7c3 9510
2455069d
UW
9511 /* Update cached state. */
9512 signal_cache_update (-1);
9513
85c07804
AC
9514 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9515 &stop_on_solib_events, _("\
9516Set stopping for shared library events."), _("\
9517Show stopping for shared library events."), _("\
c906108c
SS
9518If nonzero, gdb will give control to the user when the dynamic linker\n\
9519notifies gdb of shared library events. The most common event of interest\n\
85c07804 9520to the user would be loading/unloading of a new library."),
f9e14852 9521 set_stop_on_solib_events,
920d2a44 9522 show_stop_on_solib_events,
85c07804 9523 &setlist, &showlist);
c906108c 9524
7ab04401
AC
9525 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9526 follow_fork_mode_kind_names,
9527 &follow_fork_mode_string, _("\
9528Set debugger response to a program call of fork or vfork."), _("\
9529Show debugger response to a program call of fork or vfork."), _("\
c906108c
SS
9530A fork or vfork creates a new process. follow-fork-mode can be:\n\
9531 parent - the original process is debugged after a fork\n\
9532 child - the new process is debugged after a fork\n\
ea1dd7bc 9533The unfollowed process will continue to run.\n\
7ab04401
AC
9534By default, the debugger will follow the parent process."),
9535 NULL,
920d2a44 9536 show_follow_fork_mode_string,
7ab04401
AC
9537 &setlist, &showlist);
9538
6c95b8df
PA
9539 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9540 follow_exec_mode_names,
9541 &follow_exec_mode_string, _("\
9542Set debugger response to a program call of exec."), _("\
9543Show debugger response to a program call of exec."), _("\
9544An exec call replaces the program image of a process.\n\
9545\n\
9546follow-exec-mode can be:\n\
9547\n\
cce7e648 9548 new - the debugger creates a new inferior and rebinds the process\n\
6c95b8df
PA
9549to this new inferior. The program the process was running before\n\
9550the exec call can be restarted afterwards by restarting the original\n\
9551inferior.\n\
9552\n\
9553 same - the debugger keeps the process bound to the same inferior.\n\
9554The new executable image replaces the previous executable loaded in\n\
9555the inferior. Restarting the inferior after the exec call restarts\n\
9556the executable the process was running after the exec call.\n\
9557\n\
9558By default, the debugger will use the same inferior."),
9559 NULL,
9560 show_follow_exec_mode_string,
9561 &setlist, &showlist);
9562
7ab04401
AC
9563 add_setshow_enum_cmd ("scheduler-locking", class_run,
9564 scheduler_enums, &scheduler_mode, _("\
9565Set mode for locking scheduler during execution."), _("\
9566Show mode for locking scheduler during execution."), _("\
f2665db5
MM
9567off == no locking (threads may preempt at any time)\n\
9568on == full locking (no thread except the current thread may run)\n\
9569 This applies to both normal execution and replay mode.\n\
9570step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9571 In this mode, other threads may run during other commands.\n\
9572 This applies to both normal execution and replay mode.\n\
9573replay == scheduler locked in replay mode and unlocked during normal execution."),
7ab04401 9574 set_schedlock_func, /* traps on target vector */
920d2a44 9575 show_scheduler_mode,
7ab04401 9576 &setlist, &showlist);
5fbbeb29 9577
d4db2f36
PA
9578 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9579Set mode for resuming threads of all processes."), _("\
9580Show mode for resuming threads of all processes."), _("\
9581When on, execution commands (such as 'continue' or 'next') resume all\n\
9582threads of all processes. When off (which is the default), execution\n\
9583commands only resume the threads of the current process. The set of\n\
9584threads that are resumed is further refined by the scheduler-locking\n\
9585mode (see help set scheduler-locking)."),
9586 NULL,
9587 show_schedule_multiple,
9588 &setlist, &showlist);
9589
5bf193a2
AC
9590 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9591Set mode of the step operation."), _("\
9592Show mode of the step operation."), _("\
9593When set, doing a step over a function without debug line information\n\
9594will stop at the first instruction of that function. Otherwise, the\n\
9595function is skipped and the step command stops at a different source line."),
9596 NULL,
920d2a44 9597 show_step_stop_if_no_debug,
5bf193a2 9598 &setlist, &showlist);
ca6724c1 9599
72d0e2c5
YQ
9600 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9601 &can_use_displaced_stepping, _("\
237fc4c9
PA
9602Set debugger's willingness to use displaced stepping."), _("\
9603Show debugger's willingness to use displaced stepping."), _("\
fff08868
HZ
9604If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9605supported by the target architecture. If off, gdb will not use displaced\n\
9606stepping to step over breakpoints, even if such is supported by the target\n\
9607architecture. If auto (which is the default), gdb will use displaced stepping\n\
9608if the target architecture supports it and non-stop mode is active, but will not\n\
9609use it in all-stop mode (see help set non-stop)."),
72d0e2c5
YQ
9610 NULL,
9611 show_can_use_displaced_stepping,
9612 &setlist, &showlist);
237fc4c9 9613
b2175913
MS
9614 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9615 &exec_direction, _("Set direction of execution.\n\
9616Options are 'forward' or 'reverse'."),
9617 _("Show direction of execution (forward/reverse)."),
9618 _("Tells gdb whether to execute forward or backward."),
9619 set_exec_direction_func, show_exec_direction_func,
9620 &setlist, &showlist);
9621
6c95b8df
PA
9622 /* Set/show detach-on-fork: user-settable mode. */
9623
9624 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9625Set whether gdb will detach the child of a fork."), _("\
9626Show whether gdb will detach the child of a fork."), _("\
9627Tells gdb whether to detach the child of a fork."),
9628 NULL, NULL, &setlist, &showlist);
9629
03583c20
UW
9630 /* Set/show disable address space randomization mode. */
9631
9632 add_setshow_boolean_cmd ("disable-randomization", class_support,
9633 &disable_randomization, _("\
9634Set disabling of debuggee's virtual address space randomization."), _("\
9635Show disabling of debuggee's virtual address space randomization."), _("\
9636When this mode is on (which is the default), randomization of the virtual\n\
9637address space is disabled. Standalone programs run with the randomization\n\
9638enabled by default on some platforms."),
9639 &set_disable_randomization,
9640 &show_disable_randomization,
9641 &setlist, &showlist);
9642
ca6724c1 9643 /* ptid initializations */
ca6724c1
KB
9644 inferior_ptid = null_ptid;
9645 target_last_wait_ptid = minus_one_ptid;
5231c1fd 9646
76727919
TT
9647 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed);
9648 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested);
9649 gdb::observers::thread_exit.attach (infrun_thread_thread_exit);
9650 gdb::observers::inferior_exit.attach (infrun_inferior_exit);
4aa995e1
PA
9651
9652 /* Explicitly create without lookup, since that tries to create a
9653 value with a void typed value, and when we get here, gdbarch
9654 isn't initialized yet. At this point, we're quite sure there
9655 isn't another convenience variable of the same name. */
22d2b532 9656 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
d914c394
SS
9657
9658 add_setshow_boolean_cmd ("observer", no_class,
9659 &observer_mode_1, _("\
9660Set whether gdb controls the inferior in observer mode."), _("\
9661Show whether gdb controls the inferior in observer mode."), _("\
9662In observer mode, GDB can get data from the inferior, but not\n\
9663affect its execution. Registers and memory may not be changed,\n\
9664breakpoints may not be set, and the program cannot be interrupted\n\
9665or signalled."),
9666 set_observer_mode,
9667 show_observer_mode,
9668 &setlist,
9669 &showlist);
c906108c 9670}
This page took 2.804274 seconds and 4 git commands to generate.