[gdb/testsuite] Fix info-types-c.exp
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2021 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "displaced-stepping.h"
23 #include "infrun.h"
24 #include <ctype.h>
25 #include "symtab.h"
26 #include "frame.h"
27 #include "inferior.h"
28 #include "breakpoint.h"
29 #include "gdbcore.h"
30 #include "gdbcmd.h"
31 #include "target.h"
32 #include "target-connection.h"
33 #include "gdbthread.h"
34 #include "annotate.h"
35 #include "symfile.h"
36 #include "top.h"
37 #include "inf-loop.h"
38 #include "regcache.h"
39 #include "value.h"
40 #include "observable.h"
41 #include "language.h"
42 #include "solib.h"
43 #include "main.h"
44 #include "block.h"
45 #include "mi/mi-common.h"
46 #include "event-top.h"
47 #include "record.h"
48 #include "record-full.h"
49 #include "inline-frame.h"
50 #include "jit.h"
51 #include "tracepoint.h"
52 #include "skip.h"
53 #include "probe.h"
54 #include "objfiles.h"
55 #include "completer.h"
56 #include "target-descriptions.h"
57 #include "target-dcache.h"
58 #include "terminal.h"
59 #include "solist.h"
60 #include "gdbsupport/event-loop.h"
61 #include "thread-fsm.h"
62 #include "gdbsupport/enum-flags.h"
63 #include "progspace-and-thread.h"
64 #include "gdbsupport/gdb_optional.h"
65 #include "arch-utils.h"
66 #include "gdbsupport/scope-exit.h"
67 #include "gdbsupport/forward-scope-exit.h"
68 #include "gdbsupport/gdb_select.h"
69 #include <unordered_map>
70 #include "async-event.h"
71 #include "gdbsupport/selftest.h"
72 #include "scoped-mock-context.h"
73 #include "test-target.h"
74 #include "gdbsupport/common-debug.h"
75
76 /* Prototypes for local functions */
77
78 static void sig_print_info (enum gdb_signal);
79
80 static void sig_print_header (void);
81
82 static void follow_inferior_reset_breakpoints (void);
83
84 static bool currently_stepping (struct thread_info *tp);
85
86 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
87
88 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
89
90 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
91
92 static bool maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
93
94 static void resume (gdb_signal sig);
95
96 static void wait_for_inferior (inferior *inf);
97
98 /* Asynchronous signal handler registered as event loop source for
99 when we have pending events ready to be passed to the core. */
100 static struct async_event_handler *infrun_async_inferior_event_token;
101
102 /* Stores whether infrun_async was previously enabled or disabled.
103 Starts off as -1, indicating "never enabled/disabled". */
104 static int infrun_is_async = -1;
105
106 /* See infrun.h. */
107
108 void
109 infrun_async (int enable)
110 {
111 if (infrun_is_async != enable)
112 {
113 infrun_is_async = enable;
114
115 infrun_debug_printf ("enable=%d", enable);
116
117 if (enable)
118 mark_async_event_handler (infrun_async_inferior_event_token);
119 else
120 clear_async_event_handler (infrun_async_inferior_event_token);
121 }
122 }
123
124 /* See infrun.h. */
125
126 void
127 mark_infrun_async_event_handler (void)
128 {
129 mark_async_event_handler (infrun_async_inferior_event_token);
130 }
131
132 /* When set, stop the 'step' command if we enter a function which has
133 no line number information. The normal behavior is that we step
134 over such function. */
135 bool step_stop_if_no_debug = false;
136 static void
137 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
138 struct cmd_list_element *c, const char *value)
139 {
140 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
141 }
142
143 /* proceed and normal_stop use this to notify the user when the
144 inferior stopped in a different thread than it had been running
145 in. */
146
147 static ptid_t previous_inferior_ptid;
148
149 /* If set (default for legacy reasons), when following a fork, GDB
150 will detach from one of the fork branches, child or parent.
151 Exactly which branch is detached depends on 'set follow-fork-mode'
152 setting. */
153
154 static bool detach_fork = true;
155
156 bool debug_infrun = false;
157 static void
158 show_debug_infrun (struct ui_file *file, int from_tty,
159 struct cmd_list_element *c, const char *value)
160 {
161 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
162 }
163
164 /* Support for disabling address space randomization. */
165
166 bool disable_randomization = true;
167
168 static void
169 show_disable_randomization (struct ui_file *file, int from_tty,
170 struct cmd_list_element *c, const char *value)
171 {
172 if (target_supports_disable_randomization ())
173 fprintf_filtered (file,
174 _("Disabling randomization of debuggee's "
175 "virtual address space is %s.\n"),
176 value);
177 else
178 fputs_filtered (_("Disabling randomization of debuggee's "
179 "virtual address space is unsupported on\n"
180 "this platform.\n"), file);
181 }
182
183 static void
184 set_disable_randomization (const char *args, int from_tty,
185 struct cmd_list_element *c)
186 {
187 if (!target_supports_disable_randomization ())
188 error (_("Disabling randomization of debuggee's "
189 "virtual address space is unsupported on\n"
190 "this platform."));
191 }
192
193 /* User interface for non-stop mode. */
194
195 bool non_stop = false;
196 static bool non_stop_1 = false;
197
198 static void
199 set_non_stop (const char *args, int from_tty,
200 struct cmd_list_element *c)
201 {
202 if (target_has_execution ())
203 {
204 non_stop_1 = non_stop;
205 error (_("Cannot change this setting while the inferior is running."));
206 }
207
208 non_stop = non_stop_1;
209 }
210
211 static void
212 show_non_stop (struct ui_file *file, int from_tty,
213 struct cmd_list_element *c, const char *value)
214 {
215 fprintf_filtered (file,
216 _("Controlling the inferior in non-stop mode is %s.\n"),
217 value);
218 }
219
220 /* "Observer mode" is somewhat like a more extreme version of
221 non-stop, in which all GDB operations that might affect the
222 target's execution have been disabled. */
223
224 static bool observer_mode = false;
225 static bool observer_mode_1 = false;
226
227 static void
228 set_observer_mode (const char *args, int from_tty,
229 struct cmd_list_element *c)
230 {
231 if (target_has_execution ())
232 {
233 observer_mode_1 = observer_mode;
234 error (_("Cannot change this setting while the inferior is running."));
235 }
236
237 observer_mode = observer_mode_1;
238
239 may_write_registers = !observer_mode;
240 may_write_memory = !observer_mode;
241 may_insert_breakpoints = !observer_mode;
242 may_insert_tracepoints = !observer_mode;
243 /* We can insert fast tracepoints in or out of observer mode,
244 but enable them if we're going into this mode. */
245 if (observer_mode)
246 may_insert_fast_tracepoints = true;
247 may_stop = !observer_mode;
248 update_target_permissions ();
249
250 /* Going *into* observer mode we must force non-stop, then
251 going out we leave it that way. */
252 if (observer_mode)
253 {
254 pagination_enabled = 0;
255 non_stop = non_stop_1 = true;
256 }
257
258 if (from_tty)
259 printf_filtered (_("Observer mode is now %s.\n"),
260 (observer_mode ? "on" : "off"));
261 }
262
263 static void
264 show_observer_mode (struct ui_file *file, int from_tty,
265 struct cmd_list_element *c, const char *value)
266 {
267 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
268 }
269
270 /* This updates the value of observer mode based on changes in
271 permissions. Note that we are deliberately ignoring the values of
272 may-write-registers and may-write-memory, since the user may have
273 reason to enable these during a session, for instance to turn on a
274 debugging-related global. */
275
276 void
277 update_observer_mode (void)
278 {
279 bool newval = (!may_insert_breakpoints
280 && !may_insert_tracepoints
281 && may_insert_fast_tracepoints
282 && !may_stop
283 && non_stop);
284
285 /* Let the user know if things change. */
286 if (newval != observer_mode)
287 printf_filtered (_("Observer mode is now %s.\n"),
288 (newval ? "on" : "off"));
289
290 observer_mode = observer_mode_1 = newval;
291 }
292
293 /* Tables of how to react to signals; the user sets them. */
294
295 static unsigned char signal_stop[GDB_SIGNAL_LAST];
296 static unsigned char signal_print[GDB_SIGNAL_LAST];
297 static unsigned char signal_program[GDB_SIGNAL_LAST];
298
299 /* Table of signals that are registered with "catch signal". A
300 non-zero entry indicates that the signal is caught by some "catch
301 signal" command. */
302 static unsigned char signal_catch[GDB_SIGNAL_LAST];
303
304 /* Table of signals that the target may silently handle.
305 This is automatically determined from the flags above,
306 and simply cached here. */
307 static unsigned char signal_pass[GDB_SIGNAL_LAST];
308
309 #define SET_SIGS(nsigs,sigs,flags) \
310 do { \
311 int signum = (nsigs); \
312 while (signum-- > 0) \
313 if ((sigs)[signum]) \
314 (flags)[signum] = 1; \
315 } while (0)
316
317 #define UNSET_SIGS(nsigs,sigs,flags) \
318 do { \
319 int signum = (nsigs); \
320 while (signum-- > 0) \
321 if ((sigs)[signum]) \
322 (flags)[signum] = 0; \
323 } while (0)
324
325 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
326 this function is to avoid exporting `signal_program'. */
327
328 void
329 update_signals_program_target (void)
330 {
331 target_program_signals (signal_program);
332 }
333
334 /* Value to pass to target_resume() to cause all threads to resume. */
335
336 #define RESUME_ALL minus_one_ptid
337
338 /* Command list pointer for the "stop" placeholder. */
339
340 static struct cmd_list_element *stop_command;
341
342 /* Nonzero if we want to give control to the user when we're notified
343 of shared library events by the dynamic linker. */
344 int stop_on_solib_events;
345
346 /* Enable or disable optional shared library event breakpoints
347 as appropriate when the above flag is changed. */
348
349 static void
350 set_stop_on_solib_events (const char *args,
351 int from_tty, struct cmd_list_element *c)
352 {
353 update_solib_breakpoints ();
354 }
355
356 static void
357 show_stop_on_solib_events (struct ui_file *file, int from_tty,
358 struct cmd_list_element *c, const char *value)
359 {
360 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
361 value);
362 }
363
364 /* True after stop if current stack frame should be printed. */
365
366 static bool stop_print_frame;
367
368 /* This is a cached copy of the target/ptid/waitstatus of the last
369 event returned by target_wait()/deprecated_target_wait_hook().
370 This information is returned by get_last_target_status(). */
371 static process_stratum_target *target_last_proc_target;
372 static ptid_t target_last_wait_ptid;
373 static struct target_waitstatus target_last_waitstatus;
374
375 void init_thread_stepping_state (struct thread_info *tss);
376
377 static const char follow_fork_mode_child[] = "child";
378 static const char follow_fork_mode_parent[] = "parent";
379
380 static const char *const follow_fork_mode_kind_names[] = {
381 follow_fork_mode_child,
382 follow_fork_mode_parent,
383 NULL
384 };
385
386 static const char *follow_fork_mode_string = follow_fork_mode_parent;
387 static void
388 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
389 struct cmd_list_element *c, const char *value)
390 {
391 fprintf_filtered (file,
392 _("Debugger response to a program "
393 "call of fork or vfork is \"%s\".\n"),
394 value);
395 }
396 \f
397
398 /* Handle changes to the inferior list based on the type of fork,
399 which process is being followed, and whether the other process
400 should be detached. On entry inferior_ptid must be the ptid of
401 the fork parent. At return inferior_ptid is the ptid of the
402 followed inferior. */
403
404 static bool
405 follow_fork_inferior (bool follow_child, bool detach_fork)
406 {
407 int has_vforked;
408 ptid_t parent_ptid, child_ptid;
409
410 has_vforked = (inferior_thread ()->pending_follow.kind
411 == TARGET_WAITKIND_VFORKED);
412 parent_ptid = inferior_ptid;
413 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
414
415 if (has_vforked
416 && !non_stop /* Non-stop always resumes both branches. */
417 && current_ui->prompt_state == PROMPT_BLOCKED
418 && !(follow_child || detach_fork || sched_multi))
419 {
420 /* The parent stays blocked inside the vfork syscall until the
421 child execs or exits. If we don't let the child run, then
422 the parent stays blocked. If we're telling the parent to run
423 in the foreground, the user will not be able to ctrl-c to get
424 back the terminal, effectively hanging the debug session. */
425 fprintf_filtered (gdb_stderr, _("\
426 Can not resume the parent process over vfork in the foreground while\n\
427 holding the child stopped. Try \"set detach-on-fork\" or \
428 \"set schedule-multiple\".\n"));
429 return true;
430 }
431
432 if (!follow_child)
433 {
434 /* Detach new forked process? */
435 if (detach_fork)
436 {
437 /* Before detaching from the child, remove all breakpoints
438 from it. If we forked, then this has already been taken
439 care of by infrun.c. If we vforked however, any
440 breakpoint inserted in the parent is visible in the
441 child, even those added while stopped in a vfork
442 catchpoint. This will remove the breakpoints from the
443 parent also, but they'll be reinserted below. */
444 if (has_vforked)
445 {
446 /* Keep breakpoints list in sync. */
447 remove_breakpoints_inf (current_inferior ());
448 }
449
450 if (print_inferior_events)
451 {
452 /* Ensure that we have a process ptid. */
453 ptid_t process_ptid = ptid_t (child_ptid.pid ());
454
455 target_terminal::ours_for_output ();
456 fprintf_filtered (gdb_stdlog,
457 _("[Detaching after %s from child %s]\n"),
458 has_vforked ? "vfork" : "fork",
459 target_pid_to_str (process_ptid).c_str ());
460 }
461 }
462 else
463 {
464 struct inferior *parent_inf, *child_inf;
465
466 /* Add process to GDB's tables. */
467 child_inf = add_inferior (child_ptid.pid ());
468
469 parent_inf = current_inferior ();
470 child_inf->attach_flag = parent_inf->attach_flag;
471 copy_terminal_info (child_inf, parent_inf);
472 child_inf->gdbarch = parent_inf->gdbarch;
473 copy_inferior_target_desc_info (child_inf, parent_inf);
474
475 scoped_restore_current_pspace_and_thread restore_pspace_thread;
476
477 set_current_inferior (child_inf);
478 switch_to_no_thread ();
479 child_inf->symfile_flags = SYMFILE_NO_READ;
480 child_inf->push_target (parent_inf->process_target ());
481 thread_info *child_thr
482 = add_thread_silent (child_inf->process_target (), child_ptid);
483
484 /* If this is a vfork child, then the address-space is
485 shared with the parent. */
486 if (has_vforked)
487 {
488 child_inf->pspace = parent_inf->pspace;
489 child_inf->aspace = parent_inf->aspace;
490
491 exec_on_vfork ();
492
493 /* The parent will be frozen until the child is done
494 with the shared region. Keep track of the
495 parent. */
496 child_inf->vfork_parent = parent_inf;
497 child_inf->pending_detach = 0;
498 parent_inf->vfork_child = child_inf;
499 parent_inf->pending_detach = 0;
500
501 /* Now that the inferiors and program spaces are all
502 wired up, we can switch to the child thread (which
503 switches inferior and program space too). */
504 switch_to_thread (child_thr);
505 }
506 else
507 {
508 child_inf->aspace = new_address_space ();
509 child_inf->pspace = new program_space (child_inf->aspace);
510 child_inf->removable = 1;
511 set_current_program_space (child_inf->pspace);
512 clone_program_space (child_inf->pspace, parent_inf->pspace);
513
514 /* solib_create_inferior_hook relies on the current
515 thread. */
516 switch_to_thread (child_thr);
517
518 /* Let the shared library layer (e.g., solib-svr4) learn
519 about this new process, relocate the cloned exec, pull
520 in shared libraries, and install the solib event
521 breakpoint. If a "cloned-VM" event was propagated
522 better throughout the core, this wouldn't be
523 required. */
524 solib_create_inferior_hook (0);
525 }
526 }
527
528 if (has_vforked)
529 {
530 struct inferior *parent_inf;
531
532 parent_inf = current_inferior ();
533
534 /* If we detached from the child, then we have to be careful
535 to not insert breakpoints in the parent until the child
536 is done with the shared memory region. However, if we're
537 staying attached to the child, then we can and should
538 insert breakpoints, so that we can debug it. A
539 subsequent child exec or exit is enough to know when does
540 the child stops using the parent's address space. */
541 parent_inf->waiting_for_vfork_done = detach_fork;
542 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
543 }
544 }
545 else
546 {
547 /* Follow the child. */
548 struct inferior *parent_inf, *child_inf;
549 struct program_space *parent_pspace;
550
551 if (print_inferior_events)
552 {
553 std::string parent_pid = target_pid_to_str (parent_ptid);
554 std::string child_pid = target_pid_to_str (child_ptid);
555
556 target_terminal::ours_for_output ();
557 fprintf_filtered (gdb_stdlog,
558 _("[Attaching after %s %s to child %s]\n"),
559 parent_pid.c_str (),
560 has_vforked ? "vfork" : "fork",
561 child_pid.c_str ());
562 }
563
564 /* Add the new inferior first, so that the target_detach below
565 doesn't unpush the target. */
566
567 child_inf = add_inferior (child_ptid.pid ());
568
569 parent_inf = current_inferior ();
570 child_inf->attach_flag = parent_inf->attach_flag;
571 copy_terminal_info (child_inf, parent_inf);
572 child_inf->gdbarch = parent_inf->gdbarch;
573 copy_inferior_target_desc_info (child_inf, parent_inf);
574
575 parent_pspace = parent_inf->pspace;
576
577 process_stratum_target *target = parent_inf->process_target ();
578
579 {
580 /* Hold a strong reference to the target while (maybe)
581 detaching the parent. Otherwise detaching could close the
582 target. */
583 auto target_ref = target_ops_ref::new_reference (target);
584
585 /* If we're vforking, we want to hold on to the parent until
586 the child exits or execs. At child exec or exit time we
587 can remove the old breakpoints from the parent and detach
588 or resume debugging it. Otherwise, detach the parent now;
589 we'll want to reuse it's program/address spaces, but we
590 can't set them to the child before removing breakpoints
591 from the parent, otherwise, the breakpoints module could
592 decide to remove breakpoints from the wrong process (since
593 they'd be assigned to the same address space). */
594
595 if (has_vforked)
596 {
597 gdb_assert (child_inf->vfork_parent == NULL);
598 gdb_assert (parent_inf->vfork_child == NULL);
599 child_inf->vfork_parent = parent_inf;
600 child_inf->pending_detach = 0;
601 parent_inf->vfork_child = child_inf;
602 parent_inf->pending_detach = detach_fork;
603 parent_inf->waiting_for_vfork_done = 0;
604 }
605 else if (detach_fork)
606 {
607 if (print_inferior_events)
608 {
609 /* Ensure that we have a process ptid. */
610 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
611
612 target_terminal::ours_for_output ();
613 fprintf_filtered (gdb_stdlog,
614 _("[Detaching after fork from "
615 "parent %s]\n"),
616 target_pid_to_str (process_ptid).c_str ());
617 }
618
619 target_detach (parent_inf, 0);
620 parent_inf = NULL;
621 }
622
623 /* Note that the detach above makes PARENT_INF dangling. */
624
625 /* Add the child thread to the appropriate lists, and switch
626 to this new thread, before cloning the program space, and
627 informing the solib layer about this new process. */
628
629 set_current_inferior (child_inf);
630 child_inf->push_target (target);
631 }
632
633 thread_info *child_thr = add_thread_silent (target, child_ptid);
634
635 /* If this is a vfork child, then the address-space is shared
636 with the parent. If we detached from the parent, then we can
637 reuse the parent's program/address spaces. */
638 if (has_vforked || detach_fork)
639 {
640 child_inf->pspace = parent_pspace;
641 child_inf->aspace = child_inf->pspace->aspace;
642
643 exec_on_vfork ();
644 }
645 else
646 {
647 child_inf->aspace = new_address_space ();
648 child_inf->pspace = new program_space (child_inf->aspace);
649 child_inf->removable = 1;
650 child_inf->symfile_flags = SYMFILE_NO_READ;
651 set_current_program_space (child_inf->pspace);
652 clone_program_space (child_inf->pspace, parent_pspace);
653
654 /* Let the shared library layer (e.g., solib-svr4) learn
655 about this new process, relocate the cloned exec, pull in
656 shared libraries, and install the solib event breakpoint.
657 If a "cloned-VM" event was propagated better throughout
658 the core, this wouldn't be required. */
659 solib_create_inferior_hook (0);
660 }
661
662 switch_to_thread (child_thr);
663 }
664
665 target_follow_fork (follow_child, detach_fork);
666
667 return false;
668 }
669
670 /* Tell the target to follow the fork we're stopped at. Returns true
671 if the inferior should be resumed; false, if the target for some
672 reason decided it's best not to resume. */
673
674 static bool
675 follow_fork ()
676 {
677 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
678 bool should_resume = true;
679 struct thread_info *tp;
680
681 /* Copy user stepping state to the new inferior thread. FIXME: the
682 followed fork child thread should have a copy of most of the
683 parent thread structure's run control related fields, not just these.
684 Initialized to avoid "may be used uninitialized" warnings from gcc. */
685 struct breakpoint *step_resume_breakpoint = NULL;
686 struct breakpoint *exception_resume_breakpoint = NULL;
687 CORE_ADDR step_range_start = 0;
688 CORE_ADDR step_range_end = 0;
689 int current_line = 0;
690 symtab *current_symtab = NULL;
691 struct frame_id step_frame_id = { 0 };
692 struct thread_fsm *thread_fsm = NULL;
693
694 if (!non_stop)
695 {
696 process_stratum_target *wait_target;
697 ptid_t wait_ptid;
698 struct target_waitstatus wait_status;
699
700 /* Get the last target status returned by target_wait(). */
701 get_last_target_status (&wait_target, &wait_ptid, &wait_status);
702
703 /* If not stopped at a fork event, then there's nothing else to
704 do. */
705 if (wait_status.kind != TARGET_WAITKIND_FORKED
706 && wait_status.kind != TARGET_WAITKIND_VFORKED)
707 return 1;
708
709 /* Check if we switched over from WAIT_PTID, since the event was
710 reported. */
711 if (wait_ptid != minus_one_ptid
712 && (current_inferior ()->process_target () != wait_target
713 || inferior_ptid != wait_ptid))
714 {
715 /* We did. Switch back to WAIT_PTID thread, to tell the
716 target to follow it (in either direction). We'll
717 afterwards refuse to resume, and inform the user what
718 happened. */
719 thread_info *wait_thread = find_thread_ptid (wait_target, wait_ptid);
720 switch_to_thread (wait_thread);
721 should_resume = false;
722 }
723 }
724
725 tp = inferior_thread ();
726
727 /* If there were any forks/vforks that were caught and are now to be
728 followed, then do so now. */
729 switch (tp->pending_follow.kind)
730 {
731 case TARGET_WAITKIND_FORKED:
732 case TARGET_WAITKIND_VFORKED:
733 {
734 ptid_t parent, child;
735
736 /* If the user did a next/step, etc, over a fork call,
737 preserve the stepping state in the fork child. */
738 if (follow_child && should_resume)
739 {
740 step_resume_breakpoint = clone_momentary_breakpoint
741 (tp->control.step_resume_breakpoint);
742 step_range_start = tp->control.step_range_start;
743 step_range_end = tp->control.step_range_end;
744 current_line = tp->current_line;
745 current_symtab = tp->current_symtab;
746 step_frame_id = tp->control.step_frame_id;
747 exception_resume_breakpoint
748 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
749 thread_fsm = tp->thread_fsm;
750
751 /* For now, delete the parent's sr breakpoint, otherwise,
752 parent/child sr breakpoints are considered duplicates,
753 and the child version will not be installed. Remove
754 this when the breakpoints module becomes aware of
755 inferiors and address spaces. */
756 delete_step_resume_breakpoint (tp);
757 tp->control.step_range_start = 0;
758 tp->control.step_range_end = 0;
759 tp->control.step_frame_id = null_frame_id;
760 delete_exception_resume_breakpoint (tp);
761 tp->thread_fsm = NULL;
762 }
763
764 parent = inferior_ptid;
765 child = tp->pending_follow.value.related_pid;
766
767 process_stratum_target *parent_targ = tp->inf->process_target ();
768 /* Set up inferior(s) as specified by the caller, and tell the
769 target to do whatever is necessary to follow either parent
770 or child. */
771 if (follow_fork_inferior (follow_child, detach_fork))
772 {
773 /* Target refused to follow, or there's some other reason
774 we shouldn't resume. */
775 should_resume = 0;
776 }
777 else
778 {
779 /* This pending follow fork event is now handled, one way
780 or another. The previous selected thread may be gone
781 from the lists by now, but if it is still around, need
782 to clear the pending follow request. */
783 tp = find_thread_ptid (parent_targ, parent);
784 if (tp)
785 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
786
787 /* This makes sure we don't try to apply the "Switched
788 over from WAIT_PID" logic above. */
789 nullify_last_target_wait_ptid ();
790
791 /* If we followed the child, switch to it... */
792 if (follow_child)
793 {
794 thread_info *child_thr = find_thread_ptid (parent_targ, child);
795 switch_to_thread (child_thr);
796
797 /* ... and preserve the stepping state, in case the
798 user was stepping over the fork call. */
799 if (should_resume)
800 {
801 tp = inferior_thread ();
802 tp->control.step_resume_breakpoint
803 = step_resume_breakpoint;
804 tp->control.step_range_start = step_range_start;
805 tp->control.step_range_end = step_range_end;
806 tp->current_line = current_line;
807 tp->current_symtab = current_symtab;
808 tp->control.step_frame_id = step_frame_id;
809 tp->control.exception_resume_breakpoint
810 = exception_resume_breakpoint;
811 tp->thread_fsm = thread_fsm;
812 }
813 else
814 {
815 /* If we get here, it was because we're trying to
816 resume from a fork catchpoint, but, the user
817 has switched threads away from the thread that
818 forked. In that case, the resume command
819 issued is most likely not applicable to the
820 child, so just warn, and refuse to resume. */
821 warning (_("Not resuming: switched threads "
822 "before following fork child."));
823 }
824
825 /* Reset breakpoints in the child as appropriate. */
826 follow_inferior_reset_breakpoints ();
827 }
828 }
829 }
830 break;
831 case TARGET_WAITKIND_SPURIOUS:
832 /* Nothing to follow. */
833 break;
834 default:
835 internal_error (__FILE__, __LINE__,
836 "Unexpected pending_follow.kind %d\n",
837 tp->pending_follow.kind);
838 break;
839 }
840
841 return should_resume;
842 }
843
844 static void
845 follow_inferior_reset_breakpoints (void)
846 {
847 struct thread_info *tp = inferior_thread ();
848
849 /* Was there a step_resume breakpoint? (There was if the user
850 did a "next" at the fork() call.) If so, explicitly reset its
851 thread number. Cloned step_resume breakpoints are disabled on
852 creation, so enable it here now that it is associated with the
853 correct thread.
854
855 step_resumes are a form of bp that are made to be per-thread.
856 Since we created the step_resume bp when the parent process
857 was being debugged, and now are switching to the child process,
858 from the breakpoint package's viewpoint, that's a switch of
859 "threads". We must update the bp's notion of which thread
860 it is for, or it'll be ignored when it triggers. */
861
862 if (tp->control.step_resume_breakpoint)
863 {
864 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
865 tp->control.step_resume_breakpoint->loc->enabled = 1;
866 }
867
868 /* Treat exception_resume breakpoints like step_resume breakpoints. */
869 if (tp->control.exception_resume_breakpoint)
870 {
871 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
872 tp->control.exception_resume_breakpoint->loc->enabled = 1;
873 }
874
875 /* Reinsert all breakpoints in the child. The user may have set
876 breakpoints after catching the fork, in which case those
877 were never set in the child, but only in the parent. This makes
878 sure the inserted breakpoints match the breakpoint list. */
879
880 breakpoint_re_set ();
881 insert_breakpoints ();
882 }
883
884 /* The child has exited or execed: resume threads of the parent the
885 user wanted to be executing. */
886
887 static int
888 proceed_after_vfork_done (struct thread_info *thread,
889 void *arg)
890 {
891 int pid = * (int *) arg;
892
893 if (thread->ptid.pid () == pid
894 && thread->state == THREAD_RUNNING
895 && !thread->executing
896 && !thread->stop_requested
897 && thread->suspend.stop_signal == GDB_SIGNAL_0)
898 {
899 infrun_debug_printf ("resuming vfork parent thread %s",
900 target_pid_to_str (thread->ptid).c_str ());
901
902 switch_to_thread (thread);
903 clear_proceed_status (0);
904 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
905 }
906
907 return 0;
908 }
909
910 /* Called whenever we notice an exec or exit event, to handle
911 detaching or resuming a vfork parent. */
912
913 static void
914 handle_vfork_child_exec_or_exit (int exec)
915 {
916 struct inferior *inf = current_inferior ();
917
918 if (inf->vfork_parent)
919 {
920 int resume_parent = -1;
921
922 /* This exec or exit marks the end of the shared memory region
923 between the parent and the child. Break the bonds. */
924 inferior *vfork_parent = inf->vfork_parent;
925 inf->vfork_parent->vfork_child = NULL;
926 inf->vfork_parent = NULL;
927
928 /* If the user wanted to detach from the parent, now is the
929 time. */
930 if (vfork_parent->pending_detach)
931 {
932 struct program_space *pspace;
933 struct address_space *aspace;
934
935 /* follow-fork child, detach-on-fork on. */
936
937 vfork_parent->pending_detach = 0;
938
939 scoped_restore_current_pspace_and_thread restore_thread;
940
941 /* We're letting loose of the parent. */
942 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
943 switch_to_thread (tp);
944
945 /* We're about to detach from the parent, which implicitly
946 removes breakpoints from its address space. There's a
947 catch here: we want to reuse the spaces for the child,
948 but, parent/child are still sharing the pspace at this
949 point, although the exec in reality makes the kernel give
950 the child a fresh set of new pages. The problem here is
951 that the breakpoints module being unaware of this, would
952 likely chose the child process to write to the parent
953 address space. Swapping the child temporarily away from
954 the spaces has the desired effect. Yes, this is "sort
955 of" a hack. */
956
957 pspace = inf->pspace;
958 aspace = inf->aspace;
959 inf->aspace = NULL;
960 inf->pspace = NULL;
961
962 if (print_inferior_events)
963 {
964 std::string pidstr
965 = target_pid_to_str (ptid_t (vfork_parent->pid));
966
967 target_terminal::ours_for_output ();
968
969 if (exec)
970 {
971 fprintf_filtered (gdb_stdlog,
972 _("[Detaching vfork parent %s "
973 "after child exec]\n"), pidstr.c_str ());
974 }
975 else
976 {
977 fprintf_filtered (gdb_stdlog,
978 _("[Detaching vfork parent %s "
979 "after child exit]\n"), pidstr.c_str ());
980 }
981 }
982
983 target_detach (vfork_parent, 0);
984
985 /* Put it back. */
986 inf->pspace = pspace;
987 inf->aspace = aspace;
988 }
989 else if (exec)
990 {
991 /* We're staying attached to the parent, so, really give the
992 child a new address space. */
993 inf->pspace = new program_space (maybe_new_address_space ());
994 inf->aspace = inf->pspace->aspace;
995 inf->removable = 1;
996 set_current_program_space (inf->pspace);
997
998 resume_parent = vfork_parent->pid;
999 }
1000 else
1001 {
1002 /* If this is a vfork child exiting, then the pspace and
1003 aspaces were shared with the parent. Since we're
1004 reporting the process exit, we'll be mourning all that is
1005 found in the address space, and switching to null_ptid,
1006 preparing to start a new inferior. But, since we don't
1007 want to clobber the parent's address/program spaces, we
1008 go ahead and create a new one for this exiting
1009 inferior. */
1010
1011 /* Switch to no-thread while running clone_program_space, so
1012 that clone_program_space doesn't want to read the
1013 selected frame of a dead process. */
1014 scoped_restore_current_thread restore_thread;
1015 switch_to_no_thread ();
1016
1017 inf->pspace = new program_space (maybe_new_address_space ());
1018 inf->aspace = inf->pspace->aspace;
1019 set_current_program_space (inf->pspace);
1020 inf->removable = 1;
1021 inf->symfile_flags = SYMFILE_NO_READ;
1022 clone_program_space (inf->pspace, vfork_parent->pspace);
1023
1024 resume_parent = vfork_parent->pid;
1025 }
1026
1027 gdb_assert (current_program_space == inf->pspace);
1028
1029 if (non_stop && resume_parent != -1)
1030 {
1031 /* If the user wanted the parent to be running, let it go
1032 free now. */
1033 scoped_restore_current_thread restore_thread;
1034
1035 infrun_debug_printf ("resuming vfork parent process %d",
1036 resume_parent);
1037
1038 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
1039 }
1040 }
1041 }
1042
1043 /* Enum strings for "set|show follow-exec-mode". */
1044
1045 static const char follow_exec_mode_new[] = "new";
1046 static const char follow_exec_mode_same[] = "same";
1047 static const char *const follow_exec_mode_names[] =
1048 {
1049 follow_exec_mode_new,
1050 follow_exec_mode_same,
1051 NULL,
1052 };
1053
1054 static const char *follow_exec_mode_string = follow_exec_mode_same;
1055 static void
1056 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1057 struct cmd_list_element *c, const char *value)
1058 {
1059 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1060 }
1061
1062 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1063
1064 static void
1065 follow_exec (ptid_t ptid, const char *exec_file_target)
1066 {
1067 int pid = ptid.pid ();
1068 ptid_t process_ptid;
1069
1070 /* Switch terminal for any messages produced e.g. by
1071 breakpoint_re_set. */
1072 target_terminal::ours_for_output ();
1073
1074 /* This is an exec event that we actually wish to pay attention to.
1075 Refresh our symbol table to the newly exec'd program, remove any
1076 momentary bp's, etc.
1077
1078 If there are breakpoints, they aren't really inserted now,
1079 since the exec() transformed our inferior into a fresh set
1080 of instructions.
1081
1082 We want to preserve symbolic breakpoints on the list, since
1083 we have hopes that they can be reset after the new a.out's
1084 symbol table is read.
1085
1086 However, any "raw" breakpoints must be removed from the list
1087 (e.g., the solib bp's), since their address is probably invalid
1088 now.
1089
1090 And, we DON'T want to call delete_breakpoints() here, since
1091 that may write the bp's "shadow contents" (the instruction
1092 value that was overwritten with a TRAP instruction). Since
1093 we now have a new a.out, those shadow contents aren't valid. */
1094
1095 mark_breakpoints_out ();
1096
1097 /* The target reports the exec event to the main thread, even if
1098 some other thread does the exec, and even if the main thread was
1099 stopped or already gone. We may still have non-leader threads of
1100 the process on our list. E.g., on targets that don't have thread
1101 exit events (like remote); or on native Linux in non-stop mode if
1102 there were only two threads in the inferior and the non-leader
1103 one is the one that execs (and nothing forces an update of the
1104 thread list up to here). When debugging remotely, it's best to
1105 avoid extra traffic, when possible, so avoid syncing the thread
1106 list with the target, and instead go ahead and delete all threads
1107 of the process but one that reported the event. Note this must
1108 be done before calling update_breakpoints_after_exec, as
1109 otherwise clearing the threads' resources would reference stale
1110 thread breakpoints -- it may have been one of these threads that
1111 stepped across the exec. We could just clear their stepping
1112 states, but as long as we're iterating, might as well delete
1113 them. Deleting them now rather than at the next user-visible
1114 stop provides a nicer sequence of events for user and MI
1115 notifications. */
1116 for (thread_info *th : all_threads_safe ())
1117 if (th->ptid.pid () == pid && th->ptid != ptid)
1118 delete_thread (th);
1119
1120 /* We also need to clear any left over stale state for the
1121 leader/event thread. E.g., if there was any step-resume
1122 breakpoint or similar, it's gone now. We cannot truly
1123 step-to-next statement through an exec(). */
1124 thread_info *th = inferior_thread ();
1125 th->control.step_resume_breakpoint = NULL;
1126 th->control.exception_resume_breakpoint = NULL;
1127 th->control.single_step_breakpoints = NULL;
1128 th->control.step_range_start = 0;
1129 th->control.step_range_end = 0;
1130
1131 /* The user may have had the main thread held stopped in the
1132 previous image (e.g., schedlock on, or non-stop). Release
1133 it now. */
1134 th->stop_requested = 0;
1135
1136 update_breakpoints_after_exec ();
1137
1138 /* What is this a.out's name? */
1139 process_ptid = ptid_t (pid);
1140 printf_unfiltered (_("%s is executing new program: %s\n"),
1141 target_pid_to_str (process_ptid).c_str (),
1142 exec_file_target);
1143
1144 /* We've followed the inferior through an exec. Therefore, the
1145 inferior has essentially been killed & reborn. */
1146
1147 breakpoint_init_inferior (inf_execd);
1148
1149 gdb::unique_xmalloc_ptr<char> exec_file_host
1150 = exec_file_find (exec_file_target, NULL);
1151
1152 /* If we were unable to map the executable target pathname onto a host
1153 pathname, tell the user that. Otherwise GDB's subsequent behavior
1154 is confusing. Maybe it would even be better to stop at this point
1155 so that the user can specify a file manually before continuing. */
1156 if (exec_file_host == NULL)
1157 warning (_("Could not load symbols for executable %s.\n"
1158 "Do you need \"set sysroot\"?"),
1159 exec_file_target);
1160
1161 /* Reset the shared library package. This ensures that we get a
1162 shlib event when the child reaches "_start", at which point the
1163 dld will have had a chance to initialize the child. */
1164 /* Also, loading a symbol file below may trigger symbol lookups, and
1165 we don't want those to be satisfied by the libraries of the
1166 previous incarnation of this process. */
1167 no_shared_libraries (NULL, 0);
1168
1169 struct inferior *inf = current_inferior ();
1170
1171 if (follow_exec_mode_string == follow_exec_mode_new)
1172 {
1173 /* The user wants to keep the old inferior and program spaces
1174 around. Create a new fresh one, and switch to it. */
1175
1176 /* Do exit processing for the original inferior before setting the new
1177 inferior's pid. Having two inferiors with the same pid would confuse
1178 find_inferior_p(t)id. Transfer the terminal state and info from the
1179 old to the new inferior. */
1180 inferior *new_inferior = add_inferior_with_spaces ();
1181
1182 swap_terminal_info (new_inferior, inf);
1183 exit_inferior_silent (inf);
1184
1185 new_inferior->pid = pid;
1186 target_follow_exec (new_inferior, ptid, exec_file_target);
1187
1188 /* We continue with the new inferior. */
1189 inf = new_inferior;
1190 }
1191 else
1192 {
1193 /* The old description may no longer be fit for the new image.
1194 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1195 old description; we'll read a new one below. No need to do
1196 this on "follow-exec-mode new", as the old inferior stays
1197 around (its description is later cleared/refetched on
1198 restart). */
1199 target_clear_description ();
1200 target_follow_exec (inf, ptid, exec_file_target);
1201 }
1202
1203 gdb_assert (current_inferior () == inf);
1204 gdb_assert (current_program_space == inf->pspace);
1205
1206 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1207 because the proper displacement for a PIE (Position Independent
1208 Executable) main symbol file will only be computed by
1209 solib_create_inferior_hook below. breakpoint_re_set would fail
1210 to insert the breakpoints with the zero displacement. */
1211 try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
1212
1213 /* If the target can specify a description, read it. Must do this
1214 after flipping to the new executable (because the target supplied
1215 description must be compatible with the executable's
1216 architecture, and the old executable may e.g., be 32-bit, while
1217 the new one 64-bit), and before anything involving memory or
1218 registers. */
1219 target_find_description ();
1220
1221 gdb::observers::inferior_execd.notify (inf);
1222
1223 breakpoint_re_set ();
1224
1225 /* Reinsert all breakpoints. (Those which were symbolic have
1226 been reset to the proper address in the new a.out, thanks
1227 to symbol_file_command...). */
1228 insert_breakpoints ();
1229
1230 /* The next resume of this inferior should bring it to the shlib
1231 startup breakpoints. (If the user had also set bp's on
1232 "main" from the old (parent) process, then they'll auto-
1233 matically get reset there in the new process.). */
1234 }
1235
1236 /* The chain of threads that need to do a step-over operation to get
1237 past e.g., a breakpoint. What technique is used to step over the
1238 breakpoint/watchpoint does not matter -- all threads end up in the
1239 same queue, to maintain rough temporal order of execution, in order
1240 to avoid starvation, otherwise, we could e.g., find ourselves
1241 constantly stepping the same couple threads past their breakpoints
1242 over and over, if the single-step finish fast enough. */
1243 struct thread_info *global_thread_step_over_chain_head;
1244
1245 /* Bit flags indicating what the thread needs to step over. */
1246
1247 enum step_over_what_flag
1248 {
1249 /* Step over a breakpoint. */
1250 STEP_OVER_BREAKPOINT = 1,
1251
1252 /* Step past a non-continuable watchpoint, in order to let the
1253 instruction execute so we can evaluate the watchpoint
1254 expression. */
1255 STEP_OVER_WATCHPOINT = 2
1256 };
1257 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
1258
1259 /* Info about an instruction that is being stepped over. */
1260
1261 struct step_over_info
1262 {
1263 /* If we're stepping past a breakpoint, this is the address space
1264 and address of the instruction the breakpoint is set at. We'll
1265 skip inserting all breakpoints here. Valid iff ASPACE is
1266 non-NULL. */
1267 const address_space *aspace = nullptr;
1268 CORE_ADDR address = 0;
1269
1270 /* The instruction being stepped over triggers a nonsteppable
1271 watchpoint. If true, we'll skip inserting watchpoints. */
1272 int nonsteppable_watchpoint_p = 0;
1273
1274 /* The thread's global number. */
1275 int thread = -1;
1276 };
1277
1278 /* The step-over info of the location that is being stepped over.
1279
1280 Note that with async/breakpoint always-inserted mode, a user might
1281 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1282 being stepped over. As setting a new breakpoint inserts all
1283 breakpoints, we need to make sure the breakpoint being stepped over
1284 isn't inserted then. We do that by only clearing the step-over
1285 info when the step-over is actually finished (or aborted).
1286
1287 Presently GDB can only step over one breakpoint at any given time.
1288 Given threads that can't run code in the same address space as the
1289 breakpoint's can't really miss the breakpoint, GDB could be taught
1290 to step-over at most one breakpoint per address space (so this info
1291 could move to the address space object if/when GDB is extended).
1292 The set of breakpoints being stepped over will normally be much
1293 smaller than the set of all breakpoints, so a flag in the
1294 breakpoint location structure would be wasteful. A separate list
1295 also saves complexity and run-time, as otherwise we'd have to go
1296 through all breakpoint locations clearing their flag whenever we
1297 start a new sequence. Similar considerations weigh against storing
1298 this info in the thread object. Plus, not all step overs actually
1299 have breakpoint locations -- e.g., stepping past a single-step
1300 breakpoint, or stepping to complete a non-continuable
1301 watchpoint. */
1302 static struct step_over_info step_over_info;
1303
1304 /* Record the address of the breakpoint/instruction we're currently
1305 stepping over.
1306 N.B. We record the aspace and address now, instead of say just the thread,
1307 because when we need the info later the thread may be running. */
1308
1309 static void
1310 set_step_over_info (const address_space *aspace, CORE_ADDR address,
1311 int nonsteppable_watchpoint_p,
1312 int thread)
1313 {
1314 step_over_info.aspace = aspace;
1315 step_over_info.address = address;
1316 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1317 step_over_info.thread = thread;
1318 }
1319
1320 /* Called when we're not longer stepping over a breakpoint / an
1321 instruction, so all breakpoints are free to be (re)inserted. */
1322
1323 static void
1324 clear_step_over_info (void)
1325 {
1326 infrun_debug_printf ("clearing step over info");
1327 step_over_info.aspace = NULL;
1328 step_over_info.address = 0;
1329 step_over_info.nonsteppable_watchpoint_p = 0;
1330 step_over_info.thread = -1;
1331 }
1332
1333 /* See infrun.h. */
1334
1335 int
1336 stepping_past_instruction_at (struct address_space *aspace,
1337 CORE_ADDR address)
1338 {
1339 return (step_over_info.aspace != NULL
1340 && breakpoint_address_match (aspace, address,
1341 step_over_info.aspace,
1342 step_over_info.address));
1343 }
1344
1345 /* See infrun.h. */
1346
1347 int
1348 thread_is_stepping_over_breakpoint (int thread)
1349 {
1350 return (step_over_info.thread != -1
1351 && thread == step_over_info.thread);
1352 }
1353
1354 /* See infrun.h. */
1355
1356 int
1357 stepping_past_nonsteppable_watchpoint (void)
1358 {
1359 return step_over_info.nonsteppable_watchpoint_p;
1360 }
1361
1362 /* Returns true if step-over info is valid. */
1363
1364 static bool
1365 step_over_info_valid_p (void)
1366 {
1367 return (step_over_info.aspace != NULL
1368 || stepping_past_nonsteppable_watchpoint ());
1369 }
1370
1371 \f
1372 /* Displaced stepping. */
1373
1374 /* In non-stop debugging mode, we must take special care to manage
1375 breakpoints properly; in particular, the traditional strategy for
1376 stepping a thread past a breakpoint it has hit is unsuitable.
1377 'Displaced stepping' is a tactic for stepping one thread past a
1378 breakpoint it has hit while ensuring that other threads running
1379 concurrently will hit the breakpoint as they should.
1380
1381 The traditional way to step a thread T off a breakpoint in a
1382 multi-threaded program in all-stop mode is as follows:
1383
1384 a0) Initially, all threads are stopped, and breakpoints are not
1385 inserted.
1386 a1) We single-step T, leaving breakpoints uninserted.
1387 a2) We insert breakpoints, and resume all threads.
1388
1389 In non-stop debugging, however, this strategy is unsuitable: we
1390 don't want to have to stop all threads in the system in order to
1391 continue or step T past a breakpoint. Instead, we use displaced
1392 stepping:
1393
1394 n0) Initially, T is stopped, other threads are running, and
1395 breakpoints are inserted.
1396 n1) We copy the instruction "under" the breakpoint to a separate
1397 location, outside the main code stream, making any adjustments
1398 to the instruction, register, and memory state as directed by
1399 T's architecture.
1400 n2) We single-step T over the instruction at its new location.
1401 n3) We adjust the resulting register and memory state as directed
1402 by T's architecture. This includes resetting T's PC to point
1403 back into the main instruction stream.
1404 n4) We resume T.
1405
1406 This approach depends on the following gdbarch methods:
1407
1408 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1409 indicate where to copy the instruction, and how much space must
1410 be reserved there. We use these in step n1.
1411
1412 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1413 address, and makes any necessary adjustments to the instruction,
1414 register contents, and memory. We use this in step n1.
1415
1416 - gdbarch_displaced_step_fixup adjusts registers and memory after
1417 we have successfully single-stepped the instruction, to yield the
1418 same effect the instruction would have had if we had executed it
1419 at its original address. We use this in step n3.
1420
1421 The gdbarch_displaced_step_copy_insn and
1422 gdbarch_displaced_step_fixup functions must be written so that
1423 copying an instruction with gdbarch_displaced_step_copy_insn,
1424 single-stepping across the copied instruction, and then applying
1425 gdbarch_displaced_insn_fixup should have the same effects on the
1426 thread's memory and registers as stepping the instruction in place
1427 would have. Exactly which responsibilities fall to the copy and
1428 which fall to the fixup is up to the author of those functions.
1429
1430 See the comments in gdbarch.sh for details.
1431
1432 Note that displaced stepping and software single-step cannot
1433 currently be used in combination, although with some care I think
1434 they could be made to. Software single-step works by placing
1435 breakpoints on all possible subsequent instructions; if the
1436 displaced instruction is a PC-relative jump, those breakpoints
1437 could fall in very strange places --- on pages that aren't
1438 executable, or at addresses that are not proper instruction
1439 boundaries. (We do generally let other threads run while we wait
1440 to hit the software single-step breakpoint, and they might
1441 encounter such a corrupted instruction.) One way to work around
1442 this would be to have gdbarch_displaced_step_copy_insn fully
1443 simulate the effect of PC-relative instructions (and return NULL)
1444 on architectures that use software single-stepping.
1445
1446 In non-stop mode, we can have independent and simultaneous step
1447 requests, so more than one thread may need to simultaneously step
1448 over a breakpoint. The current implementation assumes there is
1449 only one scratch space per process. In this case, we have to
1450 serialize access to the scratch space. If thread A wants to step
1451 over a breakpoint, but we are currently waiting for some other
1452 thread to complete a displaced step, we leave thread A stopped and
1453 place it in the displaced_step_request_queue. Whenever a displaced
1454 step finishes, we pick the next thread in the queue and start a new
1455 displaced step operation on it. See displaced_step_prepare and
1456 displaced_step_finish for details. */
1457
1458 /* Return true if THREAD is doing a displaced step. */
1459
1460 static bool
1461 displaced_step_in_progress_thread (thread_info *thread)
1462 {
1463 gdb_assert (thread != NULL);
1464
1465 return thread->displaced_step_state.in_progress ();
1466 }
1467
1468 /* Return true if INF has a thread doing a displaced step. */
1469
1470 static bool
1471 displaced_step_in_progress (inferior *inf)
1472 {
1473 return inf->displaced_step_state.in_progress_count > 0;
1474 }
1475
1476 /* Return true if any thread is doing a displaced step. */
1477
1478 static bool
1479 displaced_step_in_progress_any_thread ()
1480 {
1481 for (inferior *inf : all_non_exited_inferiors ())
1482 {
1483 if (displaced_step_in_progress (inf))
1484 return true;
1485 }
1486
1487 return false;
1488 }
1489
1490 static void
1491 infrun_inferior_exit (struct inferior *inf)
1492 {
1493 inf->displaced_step_state.reset ();
1494 }
1495
1496 static void
1497 infrun_inferior_execd (inferior *inf)
1498 {
1499 /* If some threads where was doing a displaced step in this inferior at the
1500 moment of the exec, they no longer exist. Even if the exec'ing thread
1501 doing a displaced step, we don't want to to any fixup nor restore displaced
1502 stepping buffer bytes. */
1503 inf->displaced_step_state.reset ();
1504
1505 for (thread_info *thread : inf->threads ())
1506 thread->displaced_step_state.reset ();
1507
1508 /* Since an in-line step is done with everything else stopped, if there was
1509 one in progress at the time of the exec, it must have been the exec'ing
1510 thread. */
1511 clear_step_over_info ();
1512 }
1513
1514 /* If ON, and the architecture supports it, GDB will use displaced
1515 stepping to step over breakpoints. If OFF, or if the architecture
1516 doesn't support it, GDB will instead use the traditional
1517 hold-and-step approach. If AUTO (which is the default), GDB will
1518 decide which technique to use to step over breakpoints depending on
1519 whether the target works in a non-stop way (see use_displaced_stepping). */
1520
1521 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1522
1523 static void
1524 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1525 struct cmd_list_element *c,
1526 const char *value)
1527 {
1528 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1529 fprintf_filtered (file,
1530 _("Debugger's willingness to use displaced stepping "
1531 "to step over breakpoints is %s (currently %s).\n"),
1532 value, target_is_non_stop_p () ? "on" : "off");
1533 else
1534 fprintf_filtered (file,
1535 _("Debugger's willingness to use displaced stepping "
1536 "to step over breakpoints is %s.\n"), value);
1537 }
1538
1539 /* Return true if the gdbarch implements the required methods to use
1540 displaced stepping. */
1541
1542 static bool
1543 gdbarch_supports_displaced_stepping (gdbarch *arch)
1544 {
1545 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1546 that if `prepare` is provided, so is `finish`. */
1547 return gdbarch_displaced_step_prepare_p (arch);
1548 }
1549
1550 /* Return non-zero if displaced stepping can/should be used to step
1551 over breakpoints of thread TP. */
1552
1553 static bool
1554 use_displaced_stepping (thread_info *tp)
1555 {
1556 /* If the user disabled it explicitly, don't use displaced stepping. */
1557 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1558 return false;
1559
1560 /* If "auto", only use displaced stepping if the target operates in a non-stop
1561 way. */
1562 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1563 && !target_is_non_stop_p ())
1564 return false;
1565
1566 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1567
1568 /* If the architecture doesn't implement displaced stepping, don't use
1569 it. */
1570 if (!gdbarch_supports_displaced_stepping (gdbarch))
1571 return false;
1572
1573 /* If recording, don't use displaced stepping. */
1574 if (find_record_target () != nullptr)
1575 return false;
1576
1577 /* If displaced stepping failed before for this inferior, don't bother trying
1578 again. */
1579 if (tp->inf->displaced_step_state.failed_before)
1580 return false;
1581
1582 return true;
1583 }
1584
1585 /* Simple function wrapper around displaced_step_thread_state::reset. */
1586
1587 static void
1588 displaced_step_reset (displaced_step_thread_state *displaced)
1589 {
1590 displaced->reset ();
1591 }
1592
1593 /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1594 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1595
1596 using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
1597
1598 /* See infrun.h. */
1599
1600 std::string
1601 displaced_step_dump_bytes (const gdb_byte *buf, size_t len)
1602 {
1603 std::string ret;
1604
1605 for (size_t i = 0; i < len; i++)
1606 {
1607 if (i == 0)
1608 ret += string_printf ("%02x", buf[i]);
1609 else
1610 ret += string_printf (" %02x", buf[i]);
1611 }
1612
1613 return ret;
1614 }
1615
1616 /* Prepare to single-step, using displaced stepping.
1617
1618 Note that we cannot use displaced stepping when we have a signal to
1619 deliver. If we have a signal to deliver and an instruction to step
1620 over, then after the step, there will be no indication from the
1621 target whether the thread entered a signal handler or ignored the
1622 signal and stepped over the instruction successfully --- both cases
1623 result in a simple SIGTRAP. In the first case we mustn't do a
1624 fixup, and in the second case we must --- but we can't tell which.
1625 Comments in the code for 'random signals' in handle_inferior_event
1626 explain how we handle this case instead.
1627
1628 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1629 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1630 if displaced stepping this thread got queued; or
1631 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1632 stepped. */
1633
1634 static displaced_step_prepare_status
1635 displaced_step_prepare_throw (thread_info *tp)
1636 {
1637 regcache *regcache = get_thread_regcache (tp);
1638 struct gdbarch *gdbarch = regcache->arch ();
1639 displaced_step_thread_state &disp_step_thread_state
1640 = tp->displaced_step_state;
1641
1642 /* We should never reach this function if the architecture does not
1643 support displaced stepping. */
1644 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
1645
1646 /* Nor if the thread isn't meant to step over a breakpoint. */
1647 gdb_assert (tp->control.trap_expected);
1648
1649 /* Disable range stepping while executing in the scratch pad. We
1650 want a single-step even if executing the displaced instruction in
1651 the scratch buffer lands within the stepping range (e.g., a
1652 jump/branch). */
1653 tp->control.may_range_step = 0;
1654
1655 /* We are about to start a displaced step for this thread. If one is already
1656 in progress, something's wrong. */
1657 gdb_assert (!disp_step_thread_state.in_progress ());
1658
1659 if (tp->inf->displaced_step_state.unavailable)
1660 {
1661 /* The gdbarch tells us it's not worth asking to try a prepare because
1662 it is likely that it will return unavailable, so don't bother asking. */
1663
1664 displaced_debug_printf ("deferring step of %s",
1665 target_pid_to_str (tp->ptid).c_str ());
1666
1667 global_thread_step_over_chain_enqueue (tp);
1668 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1669 }
1670
1671 displaced_debug_printf ("displaced-stepping %s now",
1672 target_pid_to_str (tp->ptid).c_str ());
1673
1674 scoped_restore_current_thread restore_thread;
1675
1676 switch_to_thread (tp);
1677
1678 CORE_ADDR original_pc = regcache_read_pc (regcache);
1679 CORE_ADDR displaced_pc;
1680
1681 displaced_step_prepare_status status
1682 = gdbarch_displaced_step_prepare (gdbarch, tp, displaced_pc);
1683
1684 if (status == DISPLACED_STEP_PREPARE_STATUS_CANT)
1685 {
1686 displaced_debug_printf ("failed to prepare (%s)",
1687 target_pid_to_str (tp->ptid).c_str ());
1688
1689 return DISPLACED_STEP_PREPARE_STATUS_CANT;
1690 }
1691 else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
1692 {
1693 /* Not enough displaced stepping resources available, defer this
1694 request by placing it the queue. */
1695
1696 displaced_debug_printf ("not enough resources available, "
1697 "deferring step of %s",
1698 target_pid_to_str (tp->ptid).c_str ());
1699
1700 global_thread_step_over_chain_enqueue (tp);
1701
1702 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1703 }
1704
1705 gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
1706
1707 /* Save the information we need to fix things up if the step
1708 succeeds. */
1709 disp_step_thread_state.set (gdbarch);
1710
1711 tp->inf->displaced_step_state.in_progress_count++;
1712
1713 displaced_debug_printf ("prepared successfully thread=%s, "
1714 "original_pc=%s, displaced_pc=%s",
1715 target_pid_to_str (tp->ptid).c_str (),
1716 paddress (gdbarch, original_pc),
1717 paddress (gdbarch, displaced_pc));
1718
1719 return DISPLACED_STEP_PREPARE_STATUS_OK;
1720 }
1721
1722 /* Wrapper for displaced_step_prepare_throw that disabled further
1723 attempts at displaced stepping if we get a memory error. */
1724
1725 static displaced_step_prepare_status
1726 displaced_step_prepare (thread_info *thread)
1727 {
1728 displaced_step_prepare_status status
1729 = DISPLACED_STEP_PREPARE_STATUS_CANT;
1730
1731 try
1732 {
1733 status = displaced_step_prepare_throw (thread);
1734 }
1735 catch (const gdb_exception_error &ex)
1736 {
1737 if (ex.error != MEMORY_ERROR
1738 && ex.error != NOT_SUPPORTED_ERROR)
1739 throw;
1740
1741 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1742 ex.what ());
1743
1744 /* Be verbose if "set displaced-stepping" is "on", silent if
1745 "auto". */
1746 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1747 {
1748 warning (_("disabling displaced stepping: %s"),
1749 ex.what ());
1750 }
1751
1752 /* Disable further displaced stepping attempts. */
1753 thread->inf->displaced_step_state.failed_before = 1;
1754 }
1755
1756 return status;
1757 }
1758
1759 /* If we displaced stepped an instruction successfully, adjust registers and
1760 memory to yield the same effect the instruction would have had if we had
1761 executed it at its original address, and return
1762 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
1763 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
1764
1765 If the thread wasn't displaced stepping, return
1766 DISPLACED_STEP_FINISH_STATUS_OK as well. */
1767
1768 static displaced_step_finish_status
1769 displaced_step_finish (thread_info *event_thread, enum gdb_signal signal)
1770 {
1771 displaced_step_thread_state *displaced = &event_thread->displaced_step_state;
1772
1773 /* Was this thread performing a displaced step? */
1774 if (!displaced->in_progress ())
1775 return DISPLACED_STEP_FINISH_STATUS_OK;
1776
1777 gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0);
1778 event_thread->inf->displaced_step_state.in_progress_count--;
1779
1780 /* Fixup may need to read memory/registers. Switch to the thread
1781 that we're fixing up. Also, target_stopped_by_watchpoint checks
1782 the current thread, and displaced_step_restore performs ptid-dependent
1783 memory accesses using current_inferior(). */
1784 switch_to_thread (event_thread);
1785
1786 displaced_step_reset_cleanup cleanup (displaced);
1787
1788 /* Do the fixup, and release the resources acquired to do the displaced
1789 step. */
1790 return gdbarch_displaced_step_finish (displaced->get_original_gdbarch (),
1791 event_thread, signal);
1792 }
1793
1794 /* Data to be passed around while handling an event. This data is
1795 discarded between events. */
1796 struct execution_control_state
1797 {
1798 process_stratum_target *target;
1799 ptid_t ptid;
1800 /* The thread that got the event, if this was a thread event; NULL
1801 otherwise. */
1802 struct thread_info *event_thread;
1803
1804 struct target_waitstatus ws;
1805 int stop_func_filled_in;
1806 CORE_ADDR stop_func_start;
1807 CORE_ADDR stop_func_end;
1808 const char *stop_func_name;
1809 int wait_some_more;
1810
1811 /* True if the event thread hit the single-step breakpoint of
1812 another thread. Thus the event doesn't cause a stop, the thread
1813 needs to be single-stepped past the single-step breakpoint before
1814 we can switch back to the original stepping thread. */
1815 int hit_singlestep_breakpoint;
1816 };
1817
1818 /* Clear ECS and set it to point at TP. */
1819
1820 static void
1821 reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1822 {
1823 memset (ecs, 0, sizeof (*ecs));
1824 ecs->event_thread = tp;
1825 ecs->ptid = tp->ptid;
1826 }
1827
1828 static void keep_going_pass_signal (struct execution_control_state *ecs);
1829 static void prepare_to_wait (struct execution_control_state *ecs);
1830 static bool keep_going_stepped_thread (struct thread_info *tp);
1831 static step_over_what thread_still_needs_step_over (struct thread_info *tp);
1832
1833 /* Are there any pending step-over requests? If so, run all we can
1834 now and return true. Otherwise, return false. */
1835
1836 static bool
1837 start_step_over (void)
1838 {
1839 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1840
1841 thread_info *next;
1842
1843 /* Don't start a new step-over if we already have an in-line
1844 step-over operation ongoing. */
1845 if (step_over_info_valid_p ())
1846 return false;
1847
1848 /* Steal the global thread step over chain. As we try to initiate displaced
1849 steps, threads will be enqueued in the global chain if no buffers are
1850 available. If we iterated on the global chain directly, we might iterate
1851 indefinitely. */
1852 thread_info *threads_to_step = global_thread_step_over_chain_head;
1853 global_thread_step_over_chain_head = NULL;
1854
1855 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
1856 thread_step_over_chain_length (threads_to_step));
1857
1858 bool started = false;
1859
1860 /* On scope exit (whatever the reason, return or exception), if there are
1861 threads left in the THREADS_TO_STEP chain, put back these threads in the
1862 global list. */
1863 SCOPE_EXIT
1864 {
1865 if (threads_to_step == nullptr)
1866 infrun_debug_printf ("step-over queue now empty");
1867 else
1868 {
1869 infrun_debug_printf ("putting back %d threads to step in global queue",
1870 thread_step_over_chain_length (threads_to_step));
1871
1872 global_thread_step_over_chain_enqueue_chain (threads_to_step);
1873 }
1874 };
1875
1876 for (thread_info *tp = threads_to_step; tp != NULL; tp = next)
1877 {
1878 struct execution_control_state ecss;
1879 struct execution_control_state *ecs = &ecss;
1880 step_over_what step_what;
1881 int must_be_in_line;
1882
1883 gdb_assert (!tp->stop_requested);
1884
1885 next = thread_step_over_chain_next (threads_to_step, tp);
1886
1887 if (tp->inf->displaced_step_state.unavailable)
1888 {
1889 /* The arch told us to not even try preparing another displaced step
1890 for this inferior. Just leave the thread in THREADS_TO_STEP, it
1891 will get moved to the global chain on scope exit. */
1892 continue;
1893 }
1894
1895 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
1896 while we try to prepare the displaced step, we don't add it back to
1897 the global step over chain. This is to avoid a thread staying in the
1898 step over chain indefinitely if something goes wrong when resuming it
1899 If the error is intermittent and it still needs a step over, it will
1900 get enqueued again when we try to resume it normally. */
1901 thread_step_over_chain_remove (&threads_to_step, tp);
1902
1903 step_what = thread_still_needs_step_over (tp);
1904 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
1905 || ((step_what & STEP_OVER_BREAKPOINT)
1906 && !use_displaced_stepping (tp)));
1907
1908 /* We currently stop all threads of all processes to step-over
1909 in-line. If we need to start a new in-line step-over, let
1910 any pending displaced steps finish first. */
1911 if (must_be_in_line && displaced_step_in_progress_any_thread ())
1912 {
1913 global_thread_step_over_chain_enqueue (tp);
1914 continue;
1915 }
1916
1917 if (tp->control.trap_expected
1918 || tp->resumed
1919 || tp->executing)
1920 {
1921 internal_error (__FILE__, __LINE__,
1922 "[%s] has inconsistent state: "
1923 "trap_expected=%d, resumed=%d, executing=%d\n",
1924 target_pid_to_str (tp->ptid).c_str (),
1925 tp->control.trap_expected,
1926 tp->resumed,
1927 tp->executing);
1928 }
1929
1930 infrun_debug_printf ("resuming [%s] for step-over",
1931 target_pid_to_str (tp->ptid).c_str ());
1932
1933 /* keep_going_pass_signal skips the step-over if the breakpoint
1934 is no longer inserted. In all-stop, we want to keep looking
1935 for a thread that needs a step-over instead of resuming TP,
1936 because we wouldn't be able to resume anything else until the
1937 target stops again. In non-stop, the resume always resumes
1938 only TP, so it's OK to let the thread resume freely. */
1939 if (!target_is_non_stop_p () && !step_what)
1940 continue;
1941
1942 switch_to_thread (tp);
1943 reset_ecs (ecs, tp);
1944 keep_going_pass_signal (ecs);
1945
1946 if (!ecs->wait_some_more)
1947 error (_("Command aborted."));
1948
1949 /* If the thread's step over could not be initiated because no buffers
1950 were available, it was re-added to the global step over chain. */
1951 if (tp->resumed)
1952 {
1953 infrun_debug_printf ("[%s] was resumed.",
1954 target_pid_to_str (tp->ptid).c_str ());
1955 gdb_assert (!thread_is_in_step_over_chain (tp));
1956 }
1957 else
1958 {
1959 infrun_debug_printf ("[%s] was NOT resumed.",
1960 target_pid_to_str (tp->ptid).c_str ());
1961 gdb_assert (thread_is_in_step_over_chain (tp));
1962 }
1963
1964 /* If we started a new in-line step-over, we're done. */
1965 if (step_over_info_valid_p ())
1966 {
1967 gdb_assert (tp->control.trap_expected);
1968 started = true;
1969 break;
1970 }
1971
1972 if (!target_is_non_stop_p ())
1973 {
1974 /* On all-stop, shouldn't have resumed unless we needed a
1975 step over. */
1976 gdb_assert (tp->control.trap_expected
1977 || tp->step_after_step_resume_breakpoint);
1978
1979 /* With remote targets (at least), in all-stop, we can't
1980 issue any further remote commands until the program stops
1981 again. */
1982 started = true;
1983 break;
1984 }
1985
1986 /* Either the thread no longer needed a step-over, or a new
1987 displaced stepping sequence started. Even in the latter
1988 case, continue looking. Maybe we can also start another
1989 displaced step on a thread of other process. */
1990 }
1991
1992 return started;
1993 }
1994
1995 /* Update global variables holding ptids to hold NEW_PTID if they were
1996 holding OLD_PTID. */
1997 static void
1998 infrun_thread_ptid_changed (process_stratum_target *target,
1999 ptid_t old_ptid, ptid_t new_ptid)
2000 {
2001 if (inferior_ptid == old_ptid
2002 && current_inferior ()->process_target () == target)
2003 inferior_ptid = new_ptid;
2004 }
2005
2006 \f
2007
2008 static const char schedlock_off[] = "off";
2009 static const char schedlock_on[] = "on";
2010 static const char schedlock_step[] = "step";
2011 static const char schedlock_replay[] = "replay";
2012 static const char *const scheduler_enums[] = {
2013 schedlock_off,
2014 schedlock_on,
2015 schedlock_step,
2016 schedlock_replay,
2017 NULL
2018 };
2019 static const char *scheduler_mode = schedlock_replay;
2020 static void
2021 show_scheduler_mode (struct ui_file *file, int from_tty,
2022 struct cmd_list_element *c, const char *value)
2023 {
2024 fprintf_filtered (file,
2025 _("Mode for locking scheduler "
2026 "during execution is \"%s\".\n"),
2027 value);
2028 }
2029
2030 static void
2031 set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
2032 {
2033 if (!target_can_lock_scheduler ())
2034 {
2035 scheduler_mode = schedlock_off;
2036 error (_("Target '%s' cannot support this command."),
2037 target_shortname ());
2038 }
2039 }
2040
2041 /* True if execution commands resume all threads of all processes by
2042 default; otherwise, resume only threads of the current inferior
2043 process. */
2044 bool sched_multi = false;
2045
2046 /* Try to setup for software single stepping over the specified location.
2047 Return true if target_resume() should use hardware single step.
2048
2049 GDBARCH the current gdbarch.
2050 PC the location to step over. */
2051
2052 static bool
2053 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2054 {
2055 bool hw_step = true;
2056
2057 if (execution_direction == EXEC_FORWARD
2058 && gdbarch_software_single_step_p (gdbarch))
2059 hw_step = !insert_single_step_breakpoints (gdbarch);
2060
2061 return hw_step;
2062 }
2063
2064 /* See infrun.h. */
2065
2066 ptid_t
2067 user_visible_resume_ptid (int step)
2068 {
2069 ptid_t resume_ptid;
2070
2071 if (non_stop)
2072 {
2073 /* With non-stop mode on, threads are always handled
2074 individually. */
2075 resume_ptid = inferior_ptid;
2076 }
2077 else if ((scheduler_mode == schedlock_on)
2078 || (scheduler_mode == schedlock_step && step))
2079 {
2080 /* User-settable 'scheduler' mode requires solo thread
2081 resume. */
2082 resume_ptid = inferior_ptid;
2083 }
2084 else if ((scheduler_mode == schedlock_replay)
2085 && target_record_will_replay (minus_one_ptid, execution_direction))
2086 {
2087 /* User-settable 'scheduler' mode requires solo thread resume in replay
2088 mode. */
2089 resume_ptid = inferior_ptid;
2090 }
2091 else if (!sched_multi && target_supports_multi_process ())
2092 {
2093 /* Resume all threads of the current process (and none of other
2094 processes). */
2095 resume_ptid = ptid_t (inferior_ptid.pid ());
2096 }
2097 else
2098 {
2099 /* Resume all threads of all processes. */
2100 resume_ptid = RESUME_ALL;
2101 }
2102
2103 return resume_ptid;
2104 }
2105
2106 /* See infrun.h. */
2107
2108 process_stratum_target *
2109 user_visible_resume_target (ptid_t resume_ptid)
2110 {
2111 return (resume_ptid == minus_one_ptid && sched_multi
2112 ? NULL
2113 : current_inferior ()->process_target ());
2114 }
2115
2116 /* Return a ptid representing the set of threads that we will resume,
2117 in the perspective of the target, assuming run control handling
2118 does not require leaving some threads stopped (e.g., stepping past
2119 breakpoint). USER_STEP indicates whether we're about to start the
2120 target for a stepping command. */
2121
2122 static ptid_t
2123 internal_resume_ptid (int user_step)
2124 {
2125 /* In non-stop, we always control threads individually. Note that
2126 the target may always work in non-stop mode even with "set
2127 non-stop off", in which case user_visible_resume_ptid could
2128 return a wildcard ptid. */
2129 if (target_is_non_stop_p ())
2130 return inferior_ptid;
2131 else
2132 return user_visible_resume_ptid (user_step);
2133 }
2134
2135 /* Wrapper for target_resume, that handles infrun-specific
2136 bookkeeping. */
2137
2138 static void
2139 do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
2140 {
2141 struct thread_info *tp = inferior_thread ();
2142
2143 gdb_assert (!tp->stop_requested);
2144
2145 /* Install inferior's terminal modes. */
2146 target_terminal::inferior ();
2147
2148 /* Avoid confusing the next resume, if the next stop/resume
2149 happens to apply to another thread. */
2150 tp->suspend.stop_signal = GDB_SIGNAL_0;
2151
2152 /* Advise target which signals may be handled silently.
2153
2154 If we have removed breakpoints because we are stepping over one
2155 in-line (in any thread), we need to receive all signals to avoid
2156 accidentally skipping a breakpoint during execution of a signal
2157 handler.
2158
2159 Likewise if we're displaced stepping, otherwise a trap for a
2160 breakpoint in a signal handler might be confused with the
2161 displaced step finishing. We don't make the displaced_step_finish
2162 step distinguish the cases instead, because:
2163
2164 - a backtrace while stopped in the signal handler would show the
2165 scratch pad as frame older than the signal handler, instead of
2166 the real mainline code.
2167
2168 - when the thread is later resumed, the signal handler would
2169 return to the scratch pad area, which would no longer be
2170 valid. */
2171 if (step_over_info_valid_p ()
2172 || displaced_step_in_progress (tp->inf))
2173 target_pass_signals ({});
2174 else
2175 target_pass_signals (signal_pass);
2176
2177 target_resume (resume_ptid, step, sig);
2178
2179 if (target_can_async_p ())
2180 target_async (1);
2181 }
2182
2183 /* Resume the inferior. SIG is the signal to give the inferior
2184 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2185 call 'resume', which handles exceptions. */
2186
2187 static void
2188 resume_1 (enum gdb_signal sig)
2189 {
2190 struct regcache *regcache = get_current_regcache ();
2191 struct gdbarch *gdbarch = regcache->arch ();
2192 struct thread_info *tp = inferior_thread ();
2193 const address_space *aspace = regcache->aspace ();
2194 ptid_t resume_ptid;
2195 /* This represents the user's step vs continue request. When
2196 deciding whether "set scheduler-locking step" applies, it's the
2197 user's intention that counts. */
2198 const int user_step = tp->control.stepping_command;
2199 /* This represents what we'll actually request the target to do.
2200 This can decay from a step to a continue, if e.g., we need to
2201 implement single-stepping with breakpoints (software
2202 single-step). */
2203 bool step;
2204
2205 gdb_assert (!tp->stop_requested);
2206 gdb_assert (!thread_is_in_step_over_chain (tp));
2207
2208 if (tp->suspend.waitstatus_pending_p)
2209 {
2210 infrun_debug_printf
2211 ("thread %s has pending wait "
2212 "status %s (currently_stepping=%d).",
2213 target_pid_to_str (tp->ptid).c_str (),
2214 target_waitstatus_to_string (&tp->suspend.waitstatus).c_str (),
2215 currently_stepping (tp));
2216
2217 tp->inf->process_target ()->threads_executing = true;
2218 tp->resumed = true;
2219
2220 /* FIXME: What should we do if we are supposed to resume this
2221 thread with a signal? Maybe we should maintain a queue of
2222 pending signals to deliver. */
2223 if (sig != GDB_SIGNAL_0)
2224 {
2225 warning (_("Couldn't deliver signal %s to %s."),
2226 gdb_signal_to_name (sig),
2227 target_pid_to_str (tp->ptid).c_str ());
2228 }
2229
2230 tp->suspend.stop_signal = GDB_SIGNAL_0;
2231
2232 if (target_can_async_p ())
2233 {
2234 target_async (1);
2235 /* Tell the event loop we have an event to process. */
2236 mark_async_event_handler (infrun_async_inferior_event_token);
2237 }
2238 return;
2239 }
2240
2241 tp->stepped_breakpoint = 0;
2242
2243 /* Depends on stepped_breakpoint. */
2244 step = currently_stepping (tp);
2245
2246 if (current_inferior ()->waiting_for_vfork_done)
2247 {
2248 /* Don't try to single-step a vfork parent that is waiting for
2249 the child to get out of the shared memory region (by exec'ing
2250 or exiting). This is particularly important on software
2251 single-step archs, as the child process would trip on the
2252 software single step breakpoint inserted for the parent
2253 process. Since the parent will not actually execute any
2254 instruction until the child is out of the shared region (such
2255 are vfork's semantics), it is safe to simply continue it.
2256 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2257 the parent, and tell it to `keep_going', which automatically
2258 re-sets it stepping. */
2259 infrun_debug_printf ("resume : clear step");
2260 step = false;
2261 }
2262
2263 CORE_ADDR pc = regcache_read_pc (regcache);
2264
2265 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2266 "current thread [%s] at %s",
2267 step, gdb_signal_to_symbol_string (sig),
2268 tp->control.trap_expected,
2269 target_pid_to_str (inferior_ptid).c_str (),
2270 paddress (gdbarch, pc));
2271
2272 /* Normally, by the time we reach `resume', the breakpoints are either
2273 removed or inserted, as appropriate. The exception is if we're sitting
2274 at a permanent breakpoint; we need to step over it, but permanent
2275 breakpoints can't be removed. So we have to test for it here. */
2276 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2277 {
2278 if (sig != GDB_SIGNAL_0)
2279 {
2280 /* We have a signal to pass to the inferior. The resume
2281 may, or may not take us to the signal handler. If this
2282 is a step, we'll need to stop in the signal handler, if
2283 there's one, (if the target supports stepping into
2284 handlers), or in the next mainline instruction, if
2285 there's no handler. If this is a continue, we need to be
2286 sure to run the handler with all breakpoints inserted.
2287 In all cases, set a breakpoint at the current address
2288 (where the handler returns to), and once that breakpoint
2289 is hit, resume skipping the permanent breakpoint. If
2290 that breakpoint isn't hit, then we've stepped into the
2291 signal handler (or hit some other event). We'll delete
2292 the step-resume breakpoint then. */
2293
2294 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2295 "deliver signal first");
2296
2297 clear_step_over_info ();
2298 tp->control.trap_expected = 0;
2299
2300 if (tp->control.step_resume_breakpoint == NULL)
2301 {
2302 /* Set a "high-priority" step-resume, as we don't want
2303 user breakpoints at PC to trigger (again) when this
2304 hits. */
2305 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2306 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2307
2308 tp->step_after_step_resume_breakpoint = step;
2309 }
2310
2311 insert_breakpoints ();
2312 }
2313 else
2314 {
2315 /* There's no signal to pass, we can go ahead and skip the
2316 permanent breakpoint manually. */
2317 infrun_debug_printf ("skipping permanent breakpoint");
2318 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2319 /* Update pc to reflect the new address from which we will
2320 execute instructions. */
2321 pc = regcache_read_pc (regcache);
2322
2323 if (step)
2324 {
2325 /* We've already advanced the PC, so the stepping part
2326 is done. Now we need to arrange for a trap to be
2327 reported to handle_inferior_event. Set a breakpoint
2328 at the current PC, and run to it. Don't update
2329 prev_pc, because if we end in
2330 switch_back_to_stepped_thread, we want the "expected
2331 thread advanced also" branch to be taken. IOW, we
2332 don't want this thread to step further from PC
2333 (overstep). */
2334 gdb_assert (!step_over_info_valid_p ());
2335 insert_single_step_breakpoint (gdbarch, aspace, pc);
2336 insert_breakpoints ();
2337
2338 resume_ptid = internal_resume_ptid (user_step);
2339 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
2340 tp->resumed = true;
2341 return;
2342 }
2343 }
2344 }
2345
2346 /* If we have a breakpoint to step over, make sure to do a single
2347 step only. Same if we have software watchpoints. */
2348 if (tp->control.trap_expected || bpstat_should_step ())
2349 tp->control.may_range_step = 0;
2350
2351 /* If displaced stepping is enabled, step over breakpoints by executing a
2352 copy of the instruction at a different address.
2353
2354 We can't use displaced stepping when we have a signal to deliver;
2355 the comments for displaced_step_prepare explain why. The
2356 comments in the handle_inferior event for dealing with 'random
2357 signals' explain what we do instead.
2358
2359 We can't use displaced stepping when we are waiting for vfork_done
2360 event, displaced stepping breaks the vfork child similarly as single
2361 step software breakpoint. */
2362 if (tp->control.trap_expected
2363 && use_displaced_stepping (tp)
2364 && !step_over_info_valid_p ()
2365 && sig == GDB_SIGNAL_0
2366 && !current_inferior ()->waiting_for_vfork_done)
2367 {
2368 displaced_step_prepare_status prepare_status
2369 = displaced_step_prepare (tp);
2370
2371 if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
2372 {
2373 infrun_debug_printf ("Got placed in step-over queue");
2374
2375 tp->control.trap_expected = 0;
2376 return;
2377 }
2378 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT)
2379 {
2380 /* Fallback to stepping over the breakpoint in-line. */
2381
2382 if (target_is_non_stop_p ())
2383 stop_all_threads ();
2384
2385 set_step_over_info (regcache->aspace (),
2386 regcache_read_pc (regcache), 0, tp->global_num);
2387
2388 step = maybe_software_singlestep (gdbarch, pc);
2389
2390 insert_breakpoints ();
2391 }
2392 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
2393 {
2394 /* Update pc to reflect the new address from which we will
2395 execute instructions due to displaced stepping. */
2396 pc = regcache_read_pc (get_thread_regcache (tp));
2397
2398 step = gdbarch_displaced_step_hw_singlestep (gdbarch);
2399 }
2400 else
2401 gdb_assert_not_reached (_("Invalid displaced_step_prepare_status "
2402 "value."));
2403 }
2404
2405 /* Do we need to do it the hard way, w/temp breakpoints? */
2406 else if (step)
2407 step = maybe_software_singlestep (gdbarch, pc);
2408
2409 /* Currently, our software single-step implementation leads to different
2410 results than hardware single-stepping in one situation: when stepping
2411 into delivering a signal which has an associated signal handler,
2412 hardware single-step will stop at the first instruction of the handler,
2413 while software single-step will simply skip execution of the handler.
2414
2415 For now, this difference in behavior is accepted since there is no
2416 easy way to actually implement single-stepping into a signal handler
2417 without kernel support.
2418
2419 However, there is one scenario where this difference leads to follow-on
2420 problems: if we're stepping off a breakpoint by removing all breakpoints
2421 and then single-stepping. In this case, the software single-step
2422 behavior means that even if there is a *breakpoint* in the signal
2423 handler, GDB still would not stop.
2424
2425 Fortunately, we can at least fix this particular issue. We detect
2426 here the case where we are about to deliver a signal while software
2427 single-stepping with breakpoints removed. In this situation, we
2428 revert the decisions to remove all breakpoints and insert single-
2429 step breakpoints, and instead we install a step-resume breakpoint
2430 at the current address, deliver the signal without stepping, and
2431 once we arrive back at the step-resume breakpoint, actually step
2432 over the breakpoint we originally wanted to step over. */
2433 if (thread_has_single_step_breakpoints_set (tp)
2434 && sig != GDB_SIGNAL_0
2435 && step_over_info_valid_p ())
2436 {
2437 /* If we have nested signals or a pending signal is delivered
2438 immediately after a handler returns, might already have
2439 a step-resume breakpoint set on the earlier handler. We cannot
2440 set another step-resume breakpoint; just continue on until the
2441 original breakpoint is hit. */
2442 if (tp->control.step_resume_breakpoint == NULL)
2443 {
2444 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2445 tp->step_after_step_resume_breakpoint = 1;
2446 }
2447
2448 delete_single_step_breakpoints (tp);
2449
2450 clear_step_over_info ();
2451 tp->control.trap_expected = 0;
2452
2453 insert_breakpoints ();
2454 }
2455
2456 /* If STEP is set, it's a request to use hardware stepping
2457 facilities. But in that case, we should never
2458 use singlestep breakpoint. */
2459 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2460
2461 /* Decide the set of threads to ask the target to resume. */
2462 if (tp->control.trap_expected)
2463 {
2464 /* We're allowing a thread to run past a breakpoint it has
2465 hit, either by single-stepping the thread with the breakpoint
2466 removed, or by displaced stepping, with the breakpoint inserted.
2467 In the former case, we need to single-step only this thread,
2468 and keep others stopped, as they can miss this breakpoint if
2469 allowed to run. That's not really a problem for displaced
2470 stepping, but, we still keep other threads stopped, in case
2471 another thread is also stopped for a breakpoint waiting for
2472 its turn in the displaced stepping queue. */
2473 resume_ptid = inferior_ptid;
2474 }
2475 else
2476 resume_ptid = internal_resume_ptid (user_step);
2477
2478 if (execution_direction != EXEC_REVERSE
2479 && step && breakpoint_inserted_here_p (aspace, pc))
2480 {
2481 /* There are two cases where we currently need to step a
2482 breakpoint instruction when we have a signal to deliver:
2483
2484 - See handle_signal_stop where we handle random signals that
2485 could take out us out of the stepping range. Normally, in
2486 that case we end up continuing (instead of stepping) over the
2487 signal handler with a breakpoint at PC, but there are cases
2488 where we should _always_ single-step, even if we have a
2489 step-resume breakpoint, like when a software watchpoint is
2490 set. Assuming single-stepping and delivering a signal at the
2491 same time would takes us to the signal handler, then we could
2492 have removed the breakpoint at PC to step over it. However,
2493 some hardware step targets (like e.g., Mac OS) can't step
2494 into signal handlers, and for those, we need to leave the
2495 breakpoint at PC inserted, as otherwise if the handler
2496 recurses and executes PC again, it'll miss the breakpoint.
2497 So we leave the breakpoint inserted anyway, but we need to
2498 record that we tried to step a breakpoint instruction, so
2499 that adjust_pc_after_break doesn't end up confused.
2500
2501 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2502 in one thread after another thread that was stepping had been
2503 momentarily paused for a step-over. When we re-resume the
2504 stepping thread, it may be resumed from that address with a
2505 breakpoint that hasn't trapped yet. Seen with
2506 gdb.threads/non-stop-fair-events.exp, on targets that don't
2507 do displaced stepping. */
2508
2509 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2510 target_pid_to_str (tp->ptid).c_str ());
2511
2512 tp->stepped_breakpoint = 1;
2513
2514 /* Most targets can step a breakpoint instruction, thus
2515 executing it normally. But if this one cannot, just
2516 continue and we will hit it anyway. */
2517 if (gdbarch_cannot_step_breakpoint (gdbarch))
2518 step = false;
2519 }
2520
2521 if (debug_displaced
2522 && tp->control.trap_expected
2523 && use_displaced_stepping (tp)
2524 && !step_over_info_valid_p ())
2525 {
2526 struct regcache *resume_regcache = get_thread_regcache (tp);
2527 struct gdbarch *resume_gdbarch = resume_regcache->arch ();
2528 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2529 gdb_byte buf[4];
2530
2531 read_memory (actual_pc, buf, sizeof (buf));
2532 displaced_debug_printf ("run %s: %s",
2533 paddress (resume_gdbarch, actual_pc),
2534 displaced_step_dump_bytes
2535 (buf, sizeof (buf)).c_str ());
2536 }
2537
2538 if (tp->control.may_range_step)
2539 {
2540 /* If we're resuming a thread with the PC out of the step
2541 range, then we're doing some nested/finer run control
2542 operation, like stepping the thread out of the dynamic
2543 linker or the displaced stepping scratch pad. We
2544 shouldn't have allowed a range step then. */
2545 gdb_assert (pc_in_thread_step_range (pc, tp));
2546 }
2547
2548 do_target_resume (resume_ptid, step, sig);
2549 tp->resumed = true;
2550 }
2551
2552 /* Resume the inferior. SIG is the signal to give the inferior
2553 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2554 rolls back state on error. */
2555
2556 static void
2557 resume (gdb_signal sig)
2558 {
2559 try
2560 {
2561 resume_1 (sig);
2562 }
2563 catch (const gdb_exception &ex)
2564 {
2565 /* If resuming is being aborted for any reason, delete any
2566 single-step breakpoint resume_1 may have created, to avoid
2567 confusing the following resumption, and to avoid leaving
2568 single-step breakpoints perturbing other threads, in case
2569 we're running in non-stop mode. */
2570 if (inferior_ptid != null_ptid)
2571 delete_single_step_breakpoints (inferior_thread ());
2572 throw;
2573 }
2574 }
2575
2576 \f
2577 /* Proceeding. */
2578
2579 /* See infrun.h. */
2580
2581 /* Counter that tracks number of user visible stops. This can be used
2582 to tell whether a command has proceeded the inferior past the
2583 current location. This allows e.g., inferior function calls in
2584 breakpoint commands to not interrupt the command list. When the
2585 call finishes successfully, the inferior is standing at the same
2586 breakpoint as if nothing happened (and so we don't call
2587 normal_stop). */
2588 static ULONGEST current_stop_id;
2589
2590 /* See infrun.h. */
2591
2592 ULONGEST
2593 get_stop_id (void)
2594 {
2595 return current_stop_id;
2596 }
2597
2598 /* Called when we report a user visible stop. */
2599
2600 static void
2601 new_stop_id (void)
2602 {
2603 current_stop_id++;
2604 }
2605
2606 /* Clear out all variables saying what to do when inferior is continued.
2607 First do this, then set the ones you want, then call `proceed'. */
2608
2609 static void
2610 clear_proceed_status_thread (struct thread_info *tp)
2611 {
2612 infrun_debug_printf ("%s", target_pid_to_str (tp->ptid).c_str ());
2613
2614 /* If we're starting a new sequence, then the previous finished
2615 single-step is no longer relevant. */
2616 if (tp->suspend.waitstatus_pending_p)
2617 {
2618 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
2619 {
2620 infrun_debug_printf ("pending event of %s was a finished step. "
2621 "Discarding.",
2622 target_pid_to_str (tp->ptid).c_str ());
2623
2624 tp->suspend.waitstatus_pending_p = 0;
2625 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
2626 }
2627 else
2628 {
2629 infrun_debug_printf
2630 ("thread %s has pending wait status %s (currently_stepping=%d).",
2631 target_pid_to_str (tp->ptid).c_str (),
2632 target_waitstatus_to_string (&tp->suspend.waitstatus).c_str (),
2633 currently_stepping (tp));
2634 }
2635 }
2636
2637 /* If this signal should not be seen by program, give it zero.
2638 Used for debugging signals. */
2639 if (!signal_pass_state (tp->suspend.stop_signal))
2640 tp->suspend.stop_signal = GDB_SIGNAL_0;
2641
2642 delete tp->thread_fsm;
2643 tp->thread_fsm = NULL;
2644
2645 tp->control.trap_expected = 0;
2646 tp->control.step_range_start = 0;
2647 tp->control.step_range_end = 0;
2648 tp->control.may_range_step = 0;
2649 tp->control.step_frame_id = null_frame_id;
2650 tp->control.step_stack_frame_id = null_frame_id;
2651 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2652 tp->control.step_start_function = NULL;
2653 tp->stop_requested = 0;
2654
2655 tp->control.stop_step = 0;
2656
2657 tp->control.proceed_to_finish = 0;
2658
2659 tp->control.stepping_command = 0;
2660
2661 /* Discard any remaining commands or status from previous stop. */
2662 bpstat_clear (&tp->control.stop_bpstat);
2663 }
2664
2665 void
2666 clear_proceed_status (int step)
2667 {
2668 /* With scheduler-locking replay, stop replaying other threads if we're
2669 not replaying the user-visible resume ptid.
2670
2671 This is a convenience feature to not require the user to explicitly
2672 stop replaying the other threads. We're assuming that the user's
2673 intent is to resume tracing the recorded process. */
2674 if (!non_stop && scheduler_mode == schedlock_replay
2675 && target_record_is_replaying (minus_one_ptid)
2676 && !target_record_will_replay (user_visible_resume_ptid (step),
2677 execution_direction))
2678 target_record_stop_replaying ();
2679
2680 if (!non_stop && inferior_ptid != null_ptid)
2681 {
2682 ptid_t resume_ptid = user_visible_resume_ptid (step);
2683 process_stratum_target *resume_target
2684 = user_visible_resume_target (resume_ptid);
2685
2686 /* In all-stop mode, delete the per-thread status of all threads
2687 we're about to resume, implicitly and explicitly. */
2688 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
2689 clear_proceed_status_thread (tp);
2690 }
2691
2692 if (inferior_ptid != null_ptid)
2693 {
2694 struct inferior *inferior;
2695
2696 if (non_stop)
2697 {
2698 /* If in non-stop mode, only delete the per-thread status of
2699 the current thread. */
2700 clear_proceed_status_thread (inferior_thread ());
2701 }
2702
2703 inferior = current_inferior ();
2704 inferior->control.stop_soon = NO_STOP_QUIETLY;
2705 }
2706
2707 gdb::observers::about_to_proceed.notify ();
2708 }
2709
2710 /* Returns true if TP is still stopped at a breakpoint that needs
2711 stepping-over in order to make progress. If the breakpoint is gone
2712 meanwhile, we can skip the whole step-over dance. */
2713
2714 static bool
2715 thread_still_needs_step_over_bp (struct thread_info *tp)
2716 {
2717 if (tp->stepping_over_breakpoint)
2718 {
2719 struct regcache *regcache = get_thread_regcache (tp);
2720
2721 if (breakpoint_here_p (regcache->aspace (),
2722 regcache_read_pc (regcache))
2723 == ordinary_breakpoint_here)
2724 return true;
2725
2726 tp->stepping_over_breakpoint = 0;
2727 }
2728
2729 return false;
2730 }
2731
2732 /* Check whether thread TP still needs to start a step-over in order
2733 to make progress when resumed. Returns an bitwise or of enum
2734 step_over_what bits, indicating what needs to be stepped over. */
2735
2736 static step_over_what
2737 thread_still_needs_step_over (struct thread_info *tp)
2738 {
2739 step_over_what what = 0;
2740
2741 if (thread_still_needs_step_over_bp (tp))
2742 what |= STEP_OVER_BREAKPOINT;
2743
2744 if (tp->stepping_over_watchpoint
2745 && !target_have_steppable_watchpoint ())
2746 what |= STEP_OVER_WATCHPOINT;
2747
2748 return what;
2749 }
2750
2751 /* Returns true if scheduler locking applies. STEP indicates whether
2752 we're about to do a step/next-like command to a thread. */
2753
2754 static bool
2755 schedlock_applies (struct thread_info *tp)
2756 {
2757 return (scheduler_mode == schedlock_on
2758 || (scheduler_mode == schedlock_step
2759 && tp->control.stepping_command)
2760 || (scheduler_mode == schedlock_replay
2761 && target_record_will_replay (minus_one_ptid,
2762 execution_direction)));
2763 }
2764
2765 /* Set process_stratum_target::COMMIT_RESUMED_STATE in all target
2766 stacks that have threads executing and don't have threads with
2767 pending events. */
2768
2769 static void
2770 maybe_set_commit_resumed_all_targets ()
2771 {
2772 scoped_restore_current_thread restore_thread;
2773
2774 for (inferior *inf : all_non_exited_inferiors ())
2775 {
2776 process_stratum_target *proc_target = inf->process_target ();
2777
2778 if (proc_target->commit_resumed_state)
2779 {
2780 /* We already set this in a previous iteration, via another
2781 inferior sharing the process_stratum target. */
2782 continue;
2783 }
2784
2785 /* If the target has no resumed threads, it would be useless to
2786 ask it to commit the resumed threads. */
2787 if (!proc_target->threads_executing)
2788 {
2789 infrun_debug_printf ("not requesting commit-resumed for target "
2790 "%s, no resumed threads",
2791 proc_target->shortname ());
2792 continue;
2793 }
2794
2795 /* As an optimization, if a thread from this target has some
2796 status to report, handle it before requiring the target to
2797 commit its resumed threads: handling the status might lead to
2798 resuming more threads. */
2799 bool has_thread_with_pending_status = false;
2800 for (thread_info *thread : all_non_exited_threads (proc_target))
2801 if (thread->resumed && thread->suspend.waitstatus_pending_p)
2802 {
2803 has_thread_with_pending_status = true;
2804 break;
2805 }
2806
2807 if (has_thread_with_pending_status)
2808 {
2809 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
2810 " thread has a pending waitstatus",
2811 proc_target->shortname ());
2812 continue;
2813 }
2814
2815 switch_to_inferior_no_thread (inf);
2816
2817 if (target_has_pending_events ())
2818 {
2819 infrun_debug_printf ("not requesting commit-resumed for target %s, "
2820 "target has pending events",
2821 proc_target->shortname ());
2822 continue;
2823 }
2824
2825 infrun_debug_printf ("enabling commit-resumed for target %s",
2826 proc_target->shortname ());
2827
2828 proc_target->commit_resumed_state = true;
2829 }
2830 }
2831
2832 /* See infrun.h. */
2833
2834 void
2835 maybe_call_commit_resumed_all_targets ()
2836 {
2837 scoped_restore_current_thread restore_thread;
2838
2839 for (inferior *inf : all_non_exited_inferiors ())
2840 {
2841 process_stratum_target *proc_target = inf->process_target ();
2842
2843 if (!proc_target->commit_resumed_state)
2844 continue;
2845
2846 switch_to_inferior_no_thread (inf);
2847
2848 infrun_debug_printf ("calling commit_resumed for target %s",
2849 proc_target->shortname());
2850
2851 target_commit_resumed ();
2852 }
2853 }
2854
2855 /* To track nesting of scoped_disable_commit_resumed objects, ensuring
2856 that only the outermost one attempts to re-enable
2857 commit-resumed. */
2858 static bool enable_commit_resumed = true;
2859
2860 /* See infrun.h. */
2861
2862 scoped_disable_commit_resumed::scoped_disable_commit_resumed
2863 (const char *reason)
2864 : m_reason (reason),
2865 m_prev_enable_commit_resumed (enable_commit_resumed)
2866 {
2867 infrun_debug_printf ("reason=%s", m_reason);
2868
2869 enable_commit_resumed = false;
2870
2871 for (inferior *inf : all_non_exited_inferiors ())
2872 {
2873 process_stratum_target *proc_target = inf->process_target ();
2874
2875 if (m_prev_enable_commit_resumed)
2876 {
2877 /* This is the outermost instance: force all
2878 COMMIT_RESUMED_STATE to false. */
2879 proc_target->commit_resumed_state = false;
2880 }
2881 else
2882 {
2883 /* This is not the outermost instance, we expect
2884 COMMIT_RESUMED_STATE to have been cleared by the
2885 outermost instance. */
2886 gdb_assert (!proc_target->commit_resumed_state);
2887 }
2888 }
2889 }
2890
2891 /* See infrun.h. */
2892
2893 void
2894 scoped_disable_commit_resumed::reset ()
2895 {
2896 if (m_reset)
2897 return;
2898 m_reset = true;
2899
2900 infrun_debug_printf ("reason=%s", m_reason);
2901
2902 gdb_assert (!enable_commit_resumed);
2903
2904 enable_commit_resumed = m_prev_enable_commit_resumed;
2905
2906 if (m_prev_enable_commit_resumed)
2907 {
2908 /* This is the outermost instance, re-enable
2909 COMMIT_RESUMED_STATE on the targets where it's possible. */
2910 maybe_set_commit_resumed_all_targets ();
2911 }
2912 else
2913 {
2914 /* This is not the outermost instance, we expect
2915 COMMIT_RESUMED_STATE to still be false. */
2916 for (inferior *inf : all_non_exited_inferiors ())
2917 {
2918 process_stratum_target *proc_target = inf->process_target ();
2919 gdb_assert (!proc_target->commit_resumed_state);
2920 }
2921 }
2922 }
2923
2924 /* See infrun.h. */
2925
2926 scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
2927 {
2928 reset ();
2929 }
2930
2931 /* See infrun.h. */
2932
2933 void
2934 scoped_disable_commit_resumed::reset_and_commit ()
2935 {
2936 reset ();
2937 maybe_call_commit_resumed_all_targets ();
2938 }
2939
2940 /* See infrun.h. */
2941
2942 scoped_enable_commit_resumed::scoped_enable_commit_resumed
2943 (const char *reason)
2944 : m_reason (reason),
2945 m_prev_enable_commit_resumed (enable_commit_resumed)
2946 {
2947 infrun_debug_printf ("reason=%s", m_reason);
2948
2949 if (!enable_commit_resumed)
2950 {
2951 enable_commit_resumed = true;
2952
2953 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
2954 possible. */
2955 maybe_set_commit_resumed_all_targets ();
2956
2957 maybe_call_commit_resumed_all_targets ();
2958 }
2959 }
2960
2961 /* See infrun.h. */
2962
2963 scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
2964 {
2965 infrun_debug_printf ("reason=%s", m_reason);
2966
2967 gdb_assert (enable_commit_resumed);
2968
2969 enable_commit_resumed = m_prev_enable_commit_resumed;
2970
2971 if (!enable_commit_resumed)
2972 {
2973 /* Force all COMMIT_RESUMED_STATE back to false. */
2974 for (inferior *inf : all_non_exited_inferiors ())
2975 {
2976 process_stratum_target *proc_target = inf->process_target ();
2977 proc_target->commit_resumed_state = false;
2978 }
2979 }
2980 }
2981
2982 /* Check that all the targets we're about to resume are in non-stop
2983 mode. Ideally, we'd only care whether all targets support
2984 target-async, but we're not there yet. E.g., stop_all_threads
2985 doesn't know how to handle all-stop targets. Also, the remote
2986 protocol in all-stop mode is synchronous, irrespective of
2987 target-async, which means that things like a breakpoint re-set
2988 triggered by one target would try to read memory from all targets
2989 and fail. */
2990
2991 static void
2992 check_multi_target_resumption (process_stratum_target *resume_target)
2993 {
2994 if (!non_stop && resume_target == nullptr)
2995 {
2996 scoped_restore_current_thread restore_thread;
2997
2998 /* This is used to track whether we're resuming more than one
2999 target. */
3000 process_stratum_target *first_connection = nullptr;
3001
3002 /* The first inferior we see with a target that does not work in
3003 always-non-stop mode. */
3004 inferior *first_not_non_stop = nullptr;
3005
3006 for (inferior *inf : all_non_exited_inferiors ())
3007 {
3008 switch_to_inferior_no_thread (inf);
3009
3010 if (!target_has_execution ())
3011 continue;
3012
3013 process_stratum_target *proc_target
3014 = current_inferior ()->process_target();
3015
3016 if (!target_is_non_stop_p ())
3017 first_not_non_stop = inf;
3018
3019 if (first_connection == nullptr)
3020 first_connection = proc_target;
3021 else if (first_connection != proc_target
3022 && first_not_non_stop != nullptr)
3023 {
3024 switch_to_inferior_no_thread (first_not_non_stop);
3025
3026 proc_target = current_inferior ()->process_target();
3027
3028 error (_("Connection %d (%s) does not support "
3029 "multi-target resumption."),
3030 proc_target->connection_number,
3031 make_target_connection_string (proc_target).c_str ());
3032 }
3033 }
3034 }
3035 }
3036
3037 /* Basic routine for continuing the program in various fashions.
3038
3039 ADDR is the address to resume at, or -1 for resume where stopped.
3040 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3041 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
3042
3043 You should call clear_proceed_status before calling proceed. */
3044
3045 void
3046 proceed (CORE_ADDR addr, enum gdb_signal siggnal)
3047 {
3048 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
3049
3050 struct regcache *regcache;
3051 struct gdbarch *gdbarch;
3052 CORE_ADDR pc;
3053 struct execution_control_state ecss;
3054 struct execution_control_state *ecs = &ecss;
3055 bool started;
3056
3057 /* If we're stopped at a fork/vfork, follow the branch set by the
3058 "set follow-fork-mode" command; otherwise, we'll just proceed
3059 resuming the current thread. */
3060 if (!follow_fork ())
3061 {
3062 /* The target for some reason decided not to resume. */
3063 normal_stop ();
3064 if (target_can_async_p ())
3065 inferior_event_handler (INF_EXEC_COMPLETE);
3066 return;
3067 }
3068
3069 /* We'll update this if & when we switch to a new thread. */
3070 previous_inferior_ptid = inferior_ptid;
3071
3072 regcache = get_current_regcache ();
3073 gdbarch = regcache->arch ();
3074 const address_space *aspace = regcache->aspace ();
3075
3076 pc = regcache_read_pc_protected (regcache);
3077
3078 thread_info *cur_thr = inferior_thread ();
3079
3080 /* Fill in with reasonable starting values. */
3081 init_thread_stepping_state (cur_thr);
3082
3083 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
3084
3085 ptid_t resume_ptid
3086 = user_visible_resume_ptid (cur_thr->control.stepping_command);
3087 process_stratum_target *resume_target
3088 = user_visible_resume_target (resume_ptid);
3089
3090 check_multi_target_resumption (resume_target);
3091
3092 if (addr == (CORE_ADDR) -1)
3093 {
3094 if (pc == cur_thr->suspend.stop_pc
3095 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
3096 && execution_direction != EXEC_REVERSE)
3097 /* There is a breakpoint at the address we will resume at,
3098 step one instruction before inserting breakpoints so that
3099 we do not stop right away (and report a second hit at this
3100 breakpoint).
3101
3102 Note, we don't do this in reverse, because we won't
3103 actually be executing the breakpoint insn anyway.
3104 We'll be (un-)executing the previous instruction. */
3105 cur_thr->stepping_over_breakpoint = 1;
3106 else if (gdbarch_single_step_through_delay_p (gdbarch)
3107 && gdbarch_single_step_through_delay (gdbarch,
3108 get_current_frame ()))
3109 /* We stepped onto an instruction that needs to be stepped
3110 again before re-inserting the breakpoint, do so. */
3111 cur_thr->stepping_over_breakpoint = 1;
3112 }
3113 else
3114 {
3115 regcache_write_pc (regcache, addr);
3116 }
3117
3118 if (siggnal != GDB_SIGNAL_DEFAULT)
3119 cur_thr->suspend.stop_signal = siggnal;
3120
3121 /* If an exception is thrown from this point on, make sure to
3122 propagate GDB's knowledge of the executing state to the
3123 frontend/user running state. */
3124 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
3125
3126 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3127 threads (e.g., we might need to set threads stepping over
3128 breakpoints first), from the user/frontend's point of view, all
3129 threads in RESUME_PTID are now running. Unless we're calling an
3130 inferior function, as in that case we pretend the inferior
3131 doesn't run at all. */
3132 if (!cur_thr->control.in_infcall)
3133 set_running (resume_target, resume_ptid, true);
3134
3135 infrun_debug_printf ("addr=%s, signal=%s", paddress (gdbarch, addr),
3136 gdb_signal_to_symbol_string (siggnal));
3137
3138 annotate_starting ();
3139
3140 /* Make sure that output from GDB appears before output from the
3141 inferior. */
3142 gdb_flush (gdb_stdout);
3143
3144 /* Since we've marked the inferior running, give it the terminal. A
3145 QUIT/Ctrl-C from here on is forwarded to the target (which can
3146 still detect attempts to unblock a stuck connection with repeated
3147 Ctrl-C from within target_pass_ctrlc). */
3148 target_terminal::inferior ();
3149
3150 /* In a multi-threaded task we may select another thread and
3151 then continue or step.
3152
3153 But if a thread that we're resuming had stopped at a breakpoint,
3154 it will immediately cause another breakpoint stop without any
3155 execution (i.e. it will report a breakpoint hit incorrectly). So
3156 we must step over it first.
3157
3158 Look for threads other than the current (TP) that reported a
3159 breakpoint hit and haven't been resumed yet since. */
3160
3161 /* If scheduler locking applies, we can avoid iterating over all
3162 threads. */
3163 if (!non_stop && !schedlock_applies (cur_thr))
3164 {
3165 for (thread_info *tp : all_non_exited_threads (resume_target,
3166 resume_ptid))
3167 {
3168 switch_to_thread_no_regs (tp);
3169
3170 /* Ignore the current thread here. It's handled
3171 afterwards. */
3172 if (tp == cur_thr)
3173 continue;
3174
3175 if (!thread_still_needs_step_over (tp))
3176 continue;
3177
3178 gdb_assert (!thread_is_in_step_over_chain (tp));
3179
3180 infrun_debug_printf ("need to step-over [%s] first",
3181 target_pid_to_str (tp->ptid).c_str ());
3182
3183 global_thread_step_over_chain_enqueue (tp);
3184 }
3185
3186 switch_to_thread (cur_thr);
3187 }
3188
3189 /* Enqueue the current thread last, so that we move all other
3190 threads over their breakpoints first. */
3191 if (cur_thr->stepping_over_breakpoint)
3192 global_thread_step_over_chain_enqueue (cur_thr);
3193
3194 /* If the thread isn't started, we'll still need to set its prev_pc,
3195 so that switch_back_to_stepped_thread knows the thread hasn't
3196 advanced. Must do this before resuming any thread, as in
3197 all-stop/remote, once we resume we can't send any other packet
3198 until the target stops again. */
3199 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
3200
3201 {
3202 scoped_disable_commit_resumed disable_commit_resumed ("proceeding");
3203
3204 started = start_step_over ();
3205
3206 if (step_over_info_valid_p ())
3207 {
3208 /* Either this thread started a new in-line step over, or some
3209 other thread was already doing one. In either case, don't
3210 resume anything else until the step-over is finished. */
3211 }
3212 else if (started && !target_is_non_stop_p ())
3213 {
3214 /* A new displaced stepping sequence was started. In all-stop,
3215 we can't talk to the target anymore until it next stops. */
3216 }
3217 else if (!non_stop && target_is_non_stop_p ())
3218 {
3219 INFRUN_SCOPED_DEBUG_START_END
3220 ("resuming threads, all-stop-on-top-of-non-stop");
3221
3222 /* In all-stop, but the target is always in non-stop mode.
3223 Start all other threads that are implicitly resumed too. */
3224 for (thread_info *tp : all_non_exited_threads (resume_target,
3225 resume_ptid))
3226 {
3227 switch_to_thread_no_regs (tp);
3228
3229 if (!tp->inf->has_execution ())
3230 {
3231 infrun_debug_printf ("[%s] target has no execution",
3232 target_pid_to_str (tp->ptid).c_str ());
3233 continue;
3234 }
3235
3236 if (tp->resumed)
3237 {
3238 infrun_debug_printf ("[%s] resumed",
3239 target_pid_to_str (tp->ptid).c_str ());
3240 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
3241 continue;
3242 }
3243
3244 if (thread_is_in_step_over_chain (tp))
3245 {
3246 infrun_debug_printf ("[%s] needs step-over",
3247 target_pid_to_str (tp->ptid).c_str ());
3248 continue;
3249 }
3250
3251 infrun_debug_printf ("resuming %s",
3252 target_pid_to_str (tp->ptid).c_str ());
3253
3254 reset_ecs (ecs, tp);
3255 switch_to_thread (tp);
3256 keep_going_pass_signal (ecs);
3257 if (!ecs->wait_some_more)
3258 error (_("Command aborted."));
3259 }
3260 }
3261 else if (!cur_thr->resumed && !thread_is_in_step_over_chain (cur_thr))
3262 {
3263 /* The thread wasn't started, and isn't queued, run it now. */
3264 reset_ecs (ecs, cur_thr);
3265 switch_to_thread (cur_thr);
3266 keep_going_pass_signal (ecs);
3267 if (!ecs->wait_some_more)
3268 error (_("Command aborted."));
3269 }
3270
3271 disable_commit_resumed.reset_and_commit ();
3272 }
3273
3274 finish_state.release ();
3275
3276 /* If we've switched threads above, switch back to the previously
3277 current thread. We don't want the user to see a different
3278 selected thread. */
3279 switch_to_thread (cur_thr);
3280
3281 /* Tell the event loop to wait for it to stop. If the target
3282 supports asynchronous execution, it'll do this from within
3283 target_resume. */
3284 if (!target_can_async_p ())
3285 mark_async_event_handler (infrun_async_inferior_event_token);
3286 }
3287 \f
3288
3289 /* Start remote-debugging of a machine over a serial link. */
3290
3291 void
3292 start_remote (int from_tty)
3293 {
3294 inferior *inf = current_inferior ();
3295 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
3296
3297 /* Always go on waiting for the target, regardless of the mode. */
3298 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3299 indicate to wait_for_inferior that a target should timeout if
3300 nothing is returned (instead of just blocking). Because of this,
3301 targets expecting an immediate response need to, internally, set
3302 things up so that the target_wait() is forced to eventually
3303 timeout. */
3304 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3305 differentiate to its caller what the state of the target is after
3306 the initial open has been performed. Here we're assuming that
3307 the target has stopped. It should be possible to eventually have
3308 target_open() return to the caller an indication that the target
3309 is currently running and GDB state should be set to the same as
3310 for an async run. */
3311 wait_for_inferior (inf);
3312
3313 /* Now that the inferior has stopped, do any bookkeeping like
3314 loading shared libraries. We want to do this before normal_stop,
3315 so that the displayed frame is up to date. */
3316 post_create_inferior (from_tty);
3317
3318 normal_stop ();
3319 }
3320
3321 /* Initialize static vars when a new inferior begins. */
3322
3323 void
3324 init_wait_for_inferior (void)
3325 {
3326 /* These are meaningless until the first time through wait_for_inferior. */
3327
3328 breakpoint_init_inferior (inf_starting);
3329
3330 clear_proceed_status (0);
3331
3332 nullify_last_target_wait_ptid ();
3333
3334 previous_inferior_ptid = inferior_ptid;
3335 }
3336
3337 \f
3338
3339 static void handle_inferior_event (struct execution_control_state *ecs);
3340
3341 static void handle_step_into_function (struct gdbarch *gdbarch,
3342 struct execution_control_state *ecs);
3343 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3344 struct execution_control_state *ecs);
3345 static void handle_signal_stop (struct execution_control_state *ecs);
3346 static void check_exception_resume (struct execution_control_state *,
3347 struct frame_info *);
3348
3349 static void end_stepping_range (struct execution_control_state *ecs);
3350 static void stop_waiting (struct execution_control_state *ecs);
3351 static void keep_going (struct execution_control_state *ecs);
3352 static void process_event_stop_test (struct execution_control_state *ecs);
3353 static bool switch_back_to_stepped_thread (struct execution_control_state *ecs);
3354
3355 /* This function is attached as a "thread_stop_requested" observer.
3356 Cleanup local state that assumed the PTID was to be resumed, and
3357 report the stop to the frontend. */
3358
3359 static void
3360 infrun_thread_stop_requested (ptid_t ptid)
3361 {
3362 process_stratum_target *curr_target = current_inferior ()->process_target ();
3363
3364 /* PTID was requested to stop. If the thread was already stopped,
3365 but the user/frontend doesn't know about that yet (e.g., the
3366 thread had been temporarily paused for some step-over), set up
3367 for reporting the stop now. */
3368 for (thread_info *tp : all_threads (curr_target, ptid))
3369 {
3370 if (tp->state != THREAD_RUNNING)
3371 continue;
3372 if (tp->executing)
3373 continue;
3374
3375 /* Remove matching threads from the step-over queue, so
3376 start_step_over doesn't try to resume them
3377 automatically. */
3378 if (thread_is_in_step_over_chain (tp))
3379 global_thread_step_over_chain_remove (tp);
3380
3381 /* If the thread is stopped, but the user/frontend doesn't
3382 know about that yet, queue a pending event, as if the
3383 thread had just stopped now. Unless the thread already had
3384 a pending event. */
3385 if (!tp->suspend.waitstatus_pending_p)
3386 {
3387 tp->suspend.waitstatus_pending_p = 1;
3388 tp->suspend.waitstatus.kind = TARGET_WAITKIND_STOPPED;
3389 tp->suspend.waitstatus.value.sig = GDB_SIGNAL_0;
3390 }
3391
3392 /* Clear the inline-frame state, since we're re-processing the
3393 stop. */
3394 clear_inline_frame_state (tp);
3395
3396 /* If this thread was paused because some other thread was
3397 doing an inline-step over, let that finish first. Once
3398 that happens, we'll restart all threads and consume pending
3399 stop events then. */
3400 if (step_over_info_valid_p ())
3401 continue;
3402
3403 /* Otherwise we can process the (new) pending event now. Set
3404 it so this pending event is considered by
3405 do_target_wait. */
3406 tp->resumed = true;
3407 }
3408 }
3409
3410 static void
3411 infrun_thread_thread_exit (struct thread_info *tp, int silent)
3412 {
3413 if (target_last_proc_target == tp->inf->process_target ()
3414 && target_last_wait_ptid == tp->ptid)
3415 nullify_last_target_wait_ptid ();
3416 }
3417
3418 /* Delete the step resume, single-step and longjmp/exception resume
3419 breakpoints of TP. */
3420
3421 static void
3422 delete_thread_infrun_breakpoints (struct thread_info *tp)
3423 {
3424 delete_step_resume_breakpoint (tp);
3425 delete_exception_resume_breakpoint (tp);
3426 delete_single_step_breakpoints (tp);
3427 }
3428
3429 /* If the target still has execution, call FUNC for each thread that
3430 just stopped. In all-stop, that's all the non-exited threads; in
3431 non-stop, that's the current thread, only. */
3432
3433 typedef void (*for_each_just_stopped_thread_callback_func)
3434 (struct thread_info *tp);
3435
3436 static void
3437 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
3438 {
3439 if (!target_has_execution () || inferior_ptid == null_ptid)
3440 return;
3441
3442 if (target_is_non_stop_p ())
3443 {
3444 /* If in non-stop mode, only the current thread stopped. */
3445 func (inferior_thread ());
3446 }
3447 else
3448 {
3449 /* In all-stop mode, all threads have stopped. */
3450 for (thread_info *tp : all_non_exited_threads ())
3451 func (tp);
3452 }
3453 }
3454
3455 /* Delete the step resume and longjmp/exception resume breakpoints of
3456 the threads that just stopped. */
3457
3458 static void
3459 delete_just_stopped_threads_infrun_breakpoints (void)
3460 {
3461 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
3462 }
3463
3464 /* Delete the single-step breakpoints of the threads that just
3465 stopped. */
3466
3467 static void
3468 delete_just_stopped_threads_single_step_breakpoints (void)
3469 {
3470 for_each_just_stopped_thread (delete_single_step_breakpoints);
3471 }
3472
3473 /* See infrun.h. */
3474
3475 void
3476 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3477 const struct target_waitstatus *ws)
3478 {
3479 infrun_debug_printf ("target_wait (%d.%ld.%ld [%s], status) =",
3480 waiton_ptid.pid (),
3481 waiton_ptid.lwp (),
3482 waiton_ptid.tid (),
3483 target_pid_to_str (waiton_ptid).c_str ());
3484 infrun_debug_printf (" %d.%ld.%ld [%s],",
3485 result_ptid.pid (),
3486 result_ptid.lwp (),
3487 result_ptid.tid (),
3488 target_pid_to_str (result_ptid).c_str ());
3489 infrun_debug_printf (" %s", target_waitstatus_to_string (ws).c_str ());
3490 }
3491
3492 /* Select a thread at random, out of those which are resumed and have
3493 had events. */
3494
3495 static struct thread_info *
3496 random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
3497 {
3498 int num_events = 0;
3499
3500 auto has_event = [&] (thread_info *tp)
3501 {
3502 return (tp->ptid.matches (waiton_ptid)
3503 && tp->resumed
3504 && tp->suspend.waitstatus_pending_p);
3505 };
3506
3507 /* First see how many events we have. Count only resumed threads
3508 that have an event pending. */
3509 for (thread_info *tp : inf->non_exited_threads ())
3510 if (has_event (tp))
3511 num_events++;
3512
3513 if (num_events == 0)
3514 return NULL;
3515
3516 /* Now randomly pick a thread out of those that have had events. */
3517 int random_selector = (int) ((num_events * (double) rand ())
3518 / (RAND_MAX + 1.0));
3519
3520 if (num_events > 1)
3521 infrun_debug_printf ("Found %d events, selecting #%d",
3522 num_events, random_selector);
3523
3524 /* Select the Nth thread that has had an event. */
3525 for (thread_info *tp : inf->non_exited_threads ())
3526 if (has_event (tp))
3527 if (random_selector-- == 0)
3528 return tp;
3529
3530 gdb_assert_not_reached ("event thread not found");
3531 }
3532
3533 /* Wrapper for target_wait that first checks whether threads have
3534 pending statuses to report before actually asking the target for
3535 more events. INF is the inferior we're using to call target_wait
3536 on. */
3537
3538 static ptid_t
3539 do_target_wait_1 (inferior *inf, ptid_t ptid,
3540 target_waitstatus *status, target_wait_flags options)
3541 {
3542 ptid_t event_ptid;
3543 struct thread_info *tp;
3544
3545 /* We know that we are looking for an event in the target of inferior
3546 INF, but we don't know which thread the event might come from. As
3547 such we want to make sure that INFERIOR_PTID is reset so that none of
3548 the wait code relies on it - doing so is always a mistake. */
3549 switch_to_inferior_no_thread (inf);
3550
3551 /* First check if there is a resumed thread with a wait status
3552 pending. */
3553 if (ptid == minus_one_ptid || ptid.is_pid ())
3554 {
3555 tp = random_pending_event_thread (inf, ptid);
3556 }
3557 else
3558 {
3559 infrun_debug_printf ("Waiting for specific thread %s.",
3560 target_pid_to_str (ptid).c_str ());
3561
3562 /* We have a specific thread to check. */
3563 tp = find_thread_ptid (inf, ptid);
3564 gdb_assert (tp != NULL);
3565 if (!tp->suspend.waitstatus_pending_p)
3566 tp = NULL;
3567 }
3568
3569 if (tp != NULL
3570 && (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3571 || tp->suspend.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
3572 {
3573 struct regcache *regcache = get_thread_regcache (tp);
3574 struct gdbarch *gdbarch = regcache->arch ();
3575 CORE_ADDR pc;
3576 int discard = 0;
3577
3578 pc = regcache_read_pc (regcache);
3579
3580 if (pc != tp->suspend.stop_pc)
3581 {
3582 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
3583 target_pid_to_str (tp->ptid).c_str (),
3584 paddress (gdbarch, tp->suspend.stop_pc),
3585 paddress (gdbarch, pc));
3586 discard = 1;
3587 }
3588 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
3589 {
3590 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
3591 target_pid_to_str (tp->ptid).c_str (),
3592 paddress (gdbarch, pc));
3593
3594 discard = 1;
3595 }
3596
3597 if (discard)
3598 {
3599 infrun_debug_printf ("pending event of %s cancelled.",
3600 target_pid_to_str (tp->ptid).c_str ());
3601
3602 tp->suspend.waitstatus.kind = TARGET_WAITKIND_SPURIOUS;
3603 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3604 }
3605 }
3606
3607 if (tp != NULL)
3608 {
3609 infrun_debug_printf ("Using pending wait status %s for %s.",
3610 target_waitstatus_to_string
3611 (&tp->suspend.waitstatus).c_str (),
3612 target_pid_to_str (tp->ptid).c_str ());
3613
3614 /* Now that we've selected our final event LWP, un-adjust its PC
3615 if it was a software breakpoint (and the target doesn't
3616 always adjust the PC itself). */
3617 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3618 && !target_supports_stopped_by_sw_breakpoint ())
3619 {
3620 struct regcache *regcache;
3621 struct gdbarch *gdbarch;
3622 int decr_pc;
3623
3624 regcache = get_thread_regcache (tp);
3625 gdbarch = regcache->arch ();
3626
3627 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3628 if (decr_pc != 0)
3629 {
3630 CORE_ADDR pc;
3631
3632 pc = regcache_read_pc (regcache);
3633 regcache_write_pc (regcache, pc + decr_pc);
3634 }
3635 }
3636
3637 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3638 *status = tp->suspend.waitstatus;
3639 tp->suspend.waitstatus_pending_p = 0;
3640
3641 /* Wake up the event loop again, until all pending events are
3642 processed. */
3643 if (target_is_async_p ())
3644 mark_async_event_handler (infrun_async_inferior_event_token);
3645 return tp->ptid;
3646 }
3647
3648 /* But if we don't find one, we'll have to wait. */
3649
3650 /* We can't ask a non-async target to do a non-blocking wait, so this will be
3651 a blocking wait. */
3652 if (!target_can_async_p ())
3653 options &= ~TARGET_WNOHANG;
3654
3655 if (deprecated_target_wait_hook)
3656 event_ptid = deprecated_target_wait_hook (ptid, status, options);
3657 else
3658 event_ptid = target_wait (ptid, status, options);
3659
3660 return event_ptid;
3661 }
3662
3663 /* Wrapper for target_wait that first checks whether threads have
3664 pending statuses to report before actually asking the target for
3665 more events. Polls for events from all inferiors/targets. */
3666
3667 static bool
3668 do_target_wait (ptid_t wait_ptid, execution_control_state *ecs,
3669 target_wait_flags options)
3670 {
3671 int num_inferiors = 0;
3672 int random_selector;
3673
3674 /* For fairness, we pick the first inferior/target to poll at random
3675 out of all inferiors that may report events, and then continue
3676 polling the rest of the inferior list starting from that one in a
3677 circular fashion until the whole list is polled once. */
3678
3679 auto inferior_matches = [&wait_ptid] (inferior *inf)
3680 {
3681 return (inf->process_target () != NULL
3682 && ptid_t (inf->pid).matches (wait_ptid));
3683 };
3684
3685 /* First see how many matching inferiors we have. */
3686 for (inferior *inf : all_inferiors ())
3687 if (inferior_matches (inf))
3688 num_inferiors++;
3689
3690 if (num_inferiors == 0)
3691 {
3692 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3693 return false;
3694 }
3695
3696 /* Now randomly pick an inferior out of those that matched. */
3697 random_selector = (int)
3698 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
3699
3700 if (num_inferiors > 1)
3701 infrun_debug_printf ("Found %d inferiors, starting at #%d",
3702 num_inferiors, random_selector);
3703
3704 /* Select the Nth inferior that matched. */
3705
3706 inferior *selected = nullptr;
3707
3708 for (inferior *inf : all_inferiors ())
3709 if (inferior_matches (inf))
3710 if (random_selector-- == 0)
3711 {
3712 selected = inf;
3713 break;
3714 }
3715
3716 /* Now poll for events out of each of the matching inferior's
3717 targets, starting from the selected one. */
3718
3719 auto do_wait = [&] (inferior *inf)
3720 {
3721 ecs->ptid = do_target_wait_1 (inf, wait_ptid, &ecs->ws, options);
3722 ecs->target = inf->process_target ();
3723 return (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
3724 };
3725
3726 /* Needed in 'all-stop + target-non-stop' mode, because we end up
3727 here spuriously after the target is all stopped and we've already
3728 reported the stop to the user, polling for events. */
3729 scoped_restore_current_thread restore_thread;
3730
3731 int inf_num = selected->num;
3732 for (inferior *inf = selected; inf != NULL; inf = inf->next)
3733 if (inferior_matches (inf))
3734 if (do_wait (inf))
3735 return true;
3736
3737 for (inferior *inf = inferior_list;
3738 inf != NULL && inf->num < inf_num;
3739 inf = inf->next)
3740 if (inferior_matches (inf))
3741 if (do_wait (inf))
3742 return true;
3743
3744 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3745 return false;
3746 }
3747
3748 /* An event reported by wait_one. */
3749
3750 struct wait_one_event
3751 {
3752 /* The target the event came out of. */
3753 process_stratum_target *target;
3754
3755 /* The PTID the event was for. */
3756 ptid_t ptid;
3757
3758 /* The waitstatus. */
3759 target_waitstatus ws;
3760 };
3761
3762 static bool handle_one (const wait_one_event &event);
3763 static void restart_threads (struct thread_info *event_thread);
3764
3765 /* Prepare and stabilize the inferior for detaching it. E.g.,
3766 detaching while a thread is displaced stepping is a recipe for
3767 crashing it, as nothing would readjust the PC out of the scratch
3768 pad. */
3769
3770 void
3771 prepare_for_detach (void)
3772 {
3773 struct inferior *inf = current_inferior ();
3774 ptid_t pid_ptid = ptid_t (inf->pid);
3775 scoped_restore_current_thread restore_thread;
3776
3777 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
3778
3779 /* Remove all threads of INF from the global step-over chain. We
3780 want to stop any ongoing step-over, not start any new one. */
3781 thread_info *next;
3782 for (thread_info *tp = global_thread_step_over_chain_head;
3783 tp != nullptr;
3784 tp = next)
3785 {
3786 next = global_thread_step_over_chain_next (tp);
3787 if (tp->inf == inf)
3788 global_thread_step_over_chain_remove (tp);
3789 }
3790
3791 /* If we were already in the middle of an inline step-over, and the
3792 thread stepping belongs to the inferior we're detaching, we need
3793 to restart the threads of other inferiors. */
3794 if (step_over_info.thread != -1)
3795 {
3796 infrun_debug_printf ("inline step-over in-process while detaching");
3797
3798 thread_info *thr = find_thread_global_id (step_over_info.thread);
3799 if (thr->inf == inf)
3800 {
3801 /* Since we removed threads of INF from the step-over chain,
3802 we know this won't start a step-over for INF. */
3803 clear_step_over_info ();
3804
3805 if (target_is_non_stop_p ())
3806 {
3807 /* Start a new step-over in another thread if there's
3808 one that needs it. */
3809 start_step_over ();
3810
3811 /* Restart all other threads (except the
3812 previously-stepping thread, since that one is still
3813 running). */
3814 if (!step_over_info_valid_p ())
3815 restart_threads (thr);
3816 }
3817 }
3818 }
3819
3820 if (displaced_step_in_progress (inf))
3821 {
3822 infrun_debug_printf ("displaced-stepping in-process while detaching");
3823
3824 /* Stop threads currently displaced stepping, aborting it. */
3825
3826 for (thread_info *thr : inf->non_exited_threads ())
3827 {
3828 if (thr->displaced_step_state.in_progress ())
3829 {
3830 if (thr->executing)
3831 {
3832 if (!thr->stop_requested)
3833 {
3834 target_stop (thr->ptid);
3835 thr->stop_requested = true;
3836 }
3837 }
3838 else
3839 thr->resumed = false;
3840 }
3841 }
3842
3843 while (displaced_step_in_progress (inf))
3844 {
3845 wait_one_event event;
3846
3847 event.target = inf->process_target ();
3848 event.ptid = do_target_wait_1 (inf, pid_ptid, &event.ws, 0);
3849
3850 if (debug_infrun)
3851 print_target_wait_results (pid_ptid, event.ptid, &event.ws);
3852
3853 handle_one (event);
3854 }
3855
3856 /* It's OK to leave some of the threads of INF stopped, since
3857 they'll be detached shortly. */
3858 }
3859 }
3860
3861 /* Wait for control to return from inferior to debugger.
3862
3863 If inferior gets a signal, we may decide to start it up again
3864 instead of returning. That is why there is a loop in this function.
3865 When this function actually returns it means the inferior
3866 should be left stopped and GDB should read more commands. */
3867
3868 static void
3869 wait_for_inferior (inferior *inf)
3870 {
3871 infrun_debug_printf ("wait_for_inferior ()");
3872
3873 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
3874
3875 /* If an error happens while handling the event, propagate GDB's
3876 knowledge of the executing state to the frontend/user running
3877 state. */
3878 scoped_finish_thread_state finish_state
3879 (inf->process_target (), minus_one_ptid);
3880
3881 while (1)
3882 {
3883 struct execution_control_state ecss;
3884 struct execution_control_state *ecs = &ecss;
3885
3886 memset (ecs, 0, sizeof (*ecs));
3887
3888 overlay_cache_invalid = 1;
3889
3890 /* Flush target cache before starting to handle each event.
3891 Target was running and cache could be stale. This is just a
3892 heuristic. Running threads may modify target memory, but we
3893 don't get any event. */
3894 target_dcache_invalidate ();
3895
3896 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, 0);
3897 ecs->target = inf->process_target ();
3898
3899 if (debug_infrun)
3900 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
3901
3902 /* Now figure out what to do with the result of the result. */
3903 handle_inferior_event (ecs);
3904
3905 if (!ecs->wait_some_more)
3906 break;
3907 }
3908
3909 /* No error, don't finish the state yet. */
3910 finish_state.release ();
3911 }
3912
3913 /* Cleanup that reinstalls the readline callback handler, if the
3914 target is running in the background. If while handling the target
3915 event something triggered a secondary prompt, like e.g., a
3916 pagination prompt, we'll have removed the callback handler (see
3917 gdb_readline_wrapper_line). Need to do this as we go back to the
3918 event loop, ready to process further input. Note this has no
3919 effect if the handler hasn't actually been removed, because calling
3920 rl_callback_handler_install resets the line buffer, thus losing
3921 input. */
3922
3923 static void
3924 reinstall_readline_callback_handler_cleanup ()
3925 {
3926 struct ui *ui = current_ui;
3927
3928 if (!ui->async)
3929 {
3930 /* We're not going back to the top level event loop yet. Don't
3931 install the readline callback, as it'd prep the terminal,
3932 readline-style (raw, noecho) (e.g., --batch). We'll install
3933 it the next time the prompt is displayed, when we're ready
3934 for input. */
3935 return;
3936 }
3937
3938 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
3939 gdb_rl_callback_handler_reinstall ();
3940 }
3941
3942 /* Clean up the FSMs of threads that are now stopped. In non-stop,
3943 that's just the event thread. In all-stop, that's all threads. */
3944
3945 static void
3946 clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
3947 {
3948 if (ecs->event_thread != NULL
3949 && ecs->event_thread->thread_fsm != NULL)
3950 ecs->event_thread->thread_fsm->clean_up (ecs->event_thread);
3951
3952 if (!non_stop)
3953 {
3954 for (thread_info *thr : all_non_exited_threads ())
3955 {
3956 if (thr->thread_fsm == NULL)
3957 continue;
3958 if (thr == ecs->event_thread)
3959 continue;
3960
3961 switch_to_thread (thr);
3962 thr->thread_fsm->clean_up (thr);
3963 }
3964
3965 if (ecs->event_thread != NULL)
3966 switch_to_thread (ecs->event_thread);
3967 }
3968 }
3969
3970 /* Helper for all_uis_check_sync_execution_done that works on the
3971 current UI. */
3972
3973 static void
3974 check_curr_ui_sync_execution_done (void)
3975 {
3976 struct ui *ui = current_ui;
3977
3978 if (ui->prompt_state == PROMPT_NEEDED
3979 && ui->async
3980 && !gdb_in_secondary_prompt_p (ui))
3981 {
3982 target_terminal::ours ();
3983 gdb::observers::sync_execution_done.notify ();
3984 ui_register_input_event_handler (ui);
3985 }
3986 }
3987
3988 /* See infrun.h. */
3989
3990 void
3991 all_uis_check_sync_execution_done (void)
3992 {
3993 SWITCH_THRU_ALL_UIS ()
3994 {
3995 check_curr_ui_sync_execution_done ();
3996 }
3997 }
3998
3999 /* See infrun.h. */
4000
4001 void
4002 all_uis_on_sync_execution_starting (void)
4003 {
4004 SWITCH_THRU_ALL_UIS ()
4005 {
4006 if (current_ui->prompt_state == PROMPT_NEEDED)
4007 async_disable_stdin ();
4008 }
4009 }
4010
4011 /* Asynchronous version of wait_for_inferior. It is called by the
4012 event loop whenever a change of state is detected on the file
4013 descriptor corresponding to the target. It can be called more than
4014 once to complete a single execution command. In such cases we need
4015 to keep the state in a global variable ECSS. If it is the last time
4016 that this function is called for a single execution command, then
4017 report to the user that the inferior has stopped, and do the
4018 necessary cleanups. */
4019
4020 void
4021 fetch_inferior_event ()
4022 {
4023 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
4024
4025 struct execution_control_state ecss;
4026 struct execution_control_state *ecs = &ecss;
4027 int cmd_done = 0;
4028
4029 memset (ecs, 0, sizeof (*ecs));
4030
4031 /* Events are always processed with the main UI as current UI. This
4032 way, warnings, debug output, etc. are always consistently sent to
4033 the main console. */
4034 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
4035
4036 /* Temporarily disable pagination. Otherwise, the user would be
4037 given an option to press 'q' to quit, which would cause an early
4038 exit and could leave GDB in a half-baked state. */
4039 scoped_restore save_pagination
4040 = make_scoped_restore (&pagination_enabled, false);
4041
4042 /* End up with readline processing input, if necessary. */
4043 {
4044 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
4045
4046 /* We're handling a live event, so make sure we're doing live
4047 debugging. If we're looking at traceframes while the target is
4048 running, we're going to need to get back to that mode after
4049 handling the event. */
4050 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
4051 if (non_stop)
4052 {
4053 maybe_restore_traceframe.emplace ();
4054 set_current_traceframe (-1);
4055 }
4056
4057 /* The user/frontend should not notice a thread switch due to
4058 internal events. Make sure we revert to the user selected
4059 thread and frame after handling the event and running any
4060 breakpoint commands. */
4061 scoped_restore_current_thread restore_thread;
4062
4063 overlay_cache_invalid = 1;
4064 /* Flush target cache before starting to handle each event. Target
4065 was running and cache could be stale. This is just a heuristic.
4066 Running threads may modify target memory, but we don't get any
4067 event. */
4068 target_dcache_invalidate ();
4069
4070 scoped_restore save_exec_dir
4071 = make_scoped_restore (&execution_direction,
4072 target_execution_direction ());
4073
4074 /* Allow targets to pause their resumed threads while we handle
4075 the event. */
4076 scoped_disable_commit_resumed disable_commit_resumed ("handling event");
4077
4078 if (!do_target_wait (minus_one_ptid, ecs, TARGET_WNOHANG))
4079 {
4080 infrun_debug_printf ("do_target_wait returned no event");
4081 disable_commit_resumed.reset_and_commit ();
4082 return;
4083 }
4084
4085 gdb_assert (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
4086
4087 /* Switch to the target that generated the event, so we can do
4088 target calls. */
4089 switch_to_target_no_thread (ecs->target);
4090
4091 if (debug_infrun)
4092 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
4093
4094 /* If an error happens while handling the event, propagate GDB's
4095 knowledge of the executing state to the frontend/user running
4096 state. */
4097 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
4098 scoped_finish_thread_state finish_state (ecs->target, finish_ptid);
4099
4100 /* Get executed before scoped_restore_current_thread above to apply
4101 still for the thread which has thrown the exception. */
4102 auto defer_bpstat_clear
4103 = make_scope_exit (bpstat_clear_actions);
4104 auto defer_delete_threads
4105 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
4106
4107 /* Now figure out what to do with the result of the result. */
4108 handle_inferior_event (ecs);
4109
4110 if (!ecs->wait_some_more)
4111 {
4112 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4113 bool should_stop = true;
4114 struct thread_info *thr = ecs->event_thread;
4115
4116 delete_just_stopped_threads_infrun_breakpoints ();
4117
4118 if (thr != NULL)
4119 {
4120 struct thread_fsm *thread_fsm = thr->thread_fsm;
4121
4122 if (thread_fsm != NULL)
4123 should_stop = thread_fsm->should_stop (thr);
4124 }
4125
4126 if (!should_stop)
4127 {
4128 keep_going (ecs);
4129 }
4130 else
4131 {
4132 bool should_notify_stop = true;
4133 int proceeded = 0;
4134
4135 clean_up_just_stopped_threads_fsms (ecs);
4136
4137 if (thr != NULL && thr->thread_fsm != NULL)
4138 should_notify_stop = thr->thread_fsm->should_notify_stop ();
4139
4140 if (should_notify_stop)
4141 {
4142 /* We may not find an inferior if this was a process exit. */
4143 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
4144 proceeded = normal_stop ();
4145 }
4146
4147 if (!proceeded)
4148 {
4149 inferior_event_handler (INF_EXEC_COMPLETE);
4150 cmd_done = 1;
4151 }
4152
4153 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4154 previously selected thread is gone. We have two
4155 choices - switch to no thread selected, or restore the
4156 previously selected thread (now exited). We chose the
4157 later, just because that's what GDB used to do. After
4158 this, "info threads" says "The current thread <Thread
4159 ID 2> has terminated." instead of "No thread
4160 selected.". */
4161 if (!non_stop
4162 && cmd_done
4163 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
4164 restore_thread.dont_restore ();
4165 }
4166 }
4167
4168 defer_delete_threads.release ();
4169 defer_bpstat_clear.release ();
4170
4171 /* No error, don't finish the thread states yet. */
4172 finish_state.release ();
4173
4174 disable_commit_resumed.reset_and_commit ();
4175
4176 /* This scope is used to ensure that readline callbacks are
4177 reinstalled here. */
4178 }
4179
4180 /* If a UI was in sync execution mode, and now isn't, restore its
4181 prompt (a synchronous execution command has finished, and we're
4182 ready for input). */
4183 all_uis_check_sync_execution_done ();
4184
4185 if (cmd_done
4186 && exec_done_display_p
4187 && (inferior_ptid == null_ptid
4188 || inferior_thread ()->state != THREAD_RUNNING))
4189 printf_unfiltered (_("completed.\n"));
4190 }
4191
4192 /* See infrun.h. */
4193
4194 void
4195 set_step_info (thread_info *tp, struct frame_info *frame,
4196 struct symtab_and_line sal)
4197 {
4198 /* This can be removed once this function no longer implicitly relies on the
4199 inferior_ptid value. */
4200 gdb_assert (inferior_ptid == tp->ptid);
4201
4202 tp->control.step_frame_id = get_frame_id (frame);
4203 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
4204
4205 tp->current_symtab = sal.symtab;
4206 tp->current_line = sal.line;
4207 }
4208
4209 /* Clear context switchable stepping state. */
4210
4211 void
4212 init_thread_stepping_state (struct thread_info *tss)
4213 {
4214 tss->stepped_breakpoint = 0;
4215 tss->stepping_over_breakpoint = 0;
4216 tss->stepping_over_watchpoint = 0;
4217 tss->step_after_step_resume_breakpoint = 0;
4218 }
4219
4220 /* See infrun.h. */
4221
4222 void
4223 set_last_target_status (process_stratum_target *target, ptid_t ptid,
4224 target_waitstatus status)
4225 {
4226 target_last_proc_target = target;
4227 target_last_wait_ptid = ptid;
4228 target_last_waitstatus = status;
4229 }
4230
4231 /* See infrun.h. */
4232
4233 void
4234 get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4235 target_waitstatus *status)
4236 {
4237 if (target != nullptr)
4238 *target = target_last_proc_target;
4239 if (ptid != nullptr)
4240 *ptid = target_last_wait_ptid;
4241 if (status != nullptr)
4242 *status = target_last_waitstatus;
4243 }
4244
4245 /* See infrun.h. */
4246
4247 void
4248 nullify_last_target_wait_ptid (void)
4249 {
4250 target_last_proc_target = nullptr;
4251 target_last_wait_ptid = minus_one_ptid;
4252 target_last_waitstatus = {};
4253 }
4254
4255 /* Switch thread contexts. */
4256
4257 static void
4258 context_switch (execution_control_state *ecs)
4259 {
4260 if (ecs->ptid != inferior_ptid
4261 && (inferior_ptid == null_ptid
4262 || ecs->event_thread != inferior_thread ()))
4263 {
4264 infrun_debug_printf ("Switching context from %s to %s",
4265 target_pid_to_str (inferior_ptid).c_str (),
4266 target_pid_to_str (ecs->ptid).c_str ());
4267 }
4268
4269 switch_to_thread (ecs->event_thread);
4270 }
4271
4272 /* If the target can't tell whether we've hit breakpoints
4273 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4274 check whether that could have been caused by a breakpoint. If so,
4275 adjust the PC, per gdbarch_decr_pc_after_break. */
4276
4277 static void
4278 adjust_pc_after_break (struct thread_info *thread,
4279 struct target_waitstatus *ws)
4280 {
4281 struct regcache *regcache;
4282 struct gdbarch *gdbarch;
4283 CORE_ADDR breakpoint_pc, decr_pc;
4284
4285 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4286 we aren't, just return.
4287
4288 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4289 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4290 implemented by software breakpoints should be handled through the normal
4291 breakpoint layer.
4292
4293 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4294 different signals (SIGILL or SIGEMT for instance), but it is less
4295 clear where the PC is pointing afterwards. It may not match
4296 gdbarch_decr_pc_after_break. I don't know any specific target that
4297 generates these signals at breakpoints (the code has been in GDB since at
4298 least 1992) so I can not guess how to handle them here.
4299
4300 In earlier versions of GDB, a target with
4301 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4302 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4303 target with both of these set in GDB history, and it seems unlikely to be
4304 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4305
4306 if (ws->kind != TARGET_WAITKIND_STOPPED)
4307 return;
4308
4309 if (ws->value.sig != GDB_SIGNAL_TRAP)
4310 return;
4311
4312 /* In reverse execution, when a breakpoint is hit, the instruction
4313 under it has already been de-executed. The reported PC always
4314 points at the breakpoint address, so adjusting it further would
4315 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4316 architecture:
4317
4318 B1 0x08000000 : INSN1
4319 B2 0x08000001 : INSN2
4320 0x08000002 : INSN3
4321 PC -> 0x08000003 : INSN4
4322
4323 Say you're stopped at 0x08000003 as above. Reverse continuing
4324 from that point should hit B2 as below. Reading the PC when the
4325 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4326 been de-executed already.
4327
4328 B1 0x08000000 : INSN1
4329 B2 PC -> 0x08000001 : INSN2
4330 0x08000002 : INSN3
4331 0x08000003 : INSN4
4332
4333 We can't apply the same logic as for forward execution, because
4334 we would wrongly adjust the PC to 0x08000000, since there's a
4335 breakpoint at PC - 1. We'd then report a hit on B1, although
4336 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4337 behaviour. */
4338 if (execution_direction == EXEC_REVERSE)
4339 return;
4340
4341 /* If the target can tell whether the thread hit a SW breakpoint,
4342 trust it. Targets that can tell also adjust the PC
4343 themselves. */
4344 if (target_supports_stopped_by_sw_breakpoint ())
4345 return;
4346
4347 /* Note that relying on whether a breakpoint is planted in memory to
4348 determine this can fail. E.g,. the breakpoint could have been
4349 removed since. Or the thread could have been told to step an
4350 instruction the size of a breakpoint instruction, and only
4351 _after_ was a breakpoint inserted at its address. */
4352
4353 /* If this target does not decrement the PC after breakpoints, then
4354 we have nothing to do. */
4355 regcache = get_thread_regcache (thread);
4356 gdbarch = regcache->arch ();
4357
4358 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4359 if (decr_pc == 0)
4360 return;
4361
4362 const address_space *aspace = regcache->aspace ();
4363
4364 /* Find the location where (if we've hit a breakpoint) the
4365 breakpoint would be. */
4366 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
4367
4368 /* If the target can't tell whether a software breakpoint triggered,
4369 fallback to figuring it out based on breakpoints we think were
4370 inserted in the target, and on whether the thread was stepped or
4371 continued. */
4372
4373 /* Check whether there actually is a software breakpoint inserted at
4374 that location.
4375
4376 If in non-stop mode, a race condition is possible where we've
4377 removed a breakpoint, but stop events for that breakpoint were
4378 already queued and arrive later. To suppress those spurious
4379 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
4380 and retire them after a number of stop events are reported. Note
4381 this is an heuristic and can thus get confused. The real fix is
4382 to get the "stopped by SW BP and needs adjustment" info out of
4383 the target/kernel (and thus never reach here; see above). */
4384 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
4385 || (target_is_non_stop_p ()
4386 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
4387 {
4388 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
4389
4390 if (record_full_is_used ())
4391 restore_operation_disable.emplace
4392 (record_full_gdb_operation_disable_set ());
4393
4394 /* When using hardware single-step, a SIGTRAP is reported for both
4395 a completed single-step and a software breakpoint. Need to
4396 differentiate between the two, as the latter needs adjusting
4397 but the former does not.
4398
4399 The SIGTRAP can be due to a completed hardware single-step only if
4400 - we didn't insert software single-step breakpoints
4401 - this thread is currently being stepped
4402
4403 If any of these events did not occur, we must have stopped due
4404 to hitting a software breakpoint, and have to back up to the
4405 breakpoint address.
4406
4407 As a special case, we could have hardware single-stepped a
4408 software breakpoint. In this case (prev_pc == breakpoint_pc),
4409 we also need to back up to the breakpoint address. */
4410
4411 if (thread_has_single_step_breakpoints_set (thread)
4412 || !currently_stepping (thread)
4413 || (thread->stepped_breakpoint
4414 && thread->prev_pc == breakpoint_pc))
4415 regcache_write_pc (regcache, breakpoint_pc);
4416 }
4417 }
4418
4419 static bool
4420 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4421 {
4422 for (frame = get_prev_frame (frame);
4423 frame != NULL;
4424 frame = get_prev_frame (frame))
4425 {
4426 if (frame_id_eq (get_frame_id (frame), step_frame_id))
4427 return true;
4428
4429 if (get_frame_type (frame) != INLINE_FRAME)
4430 break;
4431 }
4432
4433 return false;
4434 }
4435
4436 /* Look for an inline frame that is marked for skip.
4437 If PREV_FRAME is TRUE start at the previous frame,
4438 otherwise start at the current frame. Stop at the
4439 first non-inline frame, or at the frame where the
4440 step started. */
4441
4442 static bool
4443 inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
4444 {
4445 struct frame_info *frame = get_current_frame ();
4446
4447 if (prev_frame)
4448 frame = get_prev_frame (frame);
4449
4450 for (; frame != NULL; frame = get_prev_frame (frame))
4451 {
4452 const char *fn = NULL;
4453 symtab_and_line sal;
4454 struct symbol *sym;
4455
4456 if (frame_id_eq (get_frame_id (frame), tp->control.step_frame_id))
4457 break;
4458 if (get_frame_type (frame) != INLINE_FRAME)
4459 break;
4460
4461 sal = find_frame_sal (frame);
4462 sym = get_frame_function (frame);
4463
4464 if (sym != NULL)
4465 fn = sym->print_name ();
4466
4467 if (sal.line != 0
4468 && function_name_is_marked_for_skip (fn, sal))
4469 return true;
4470 }
4471
4472 return false;
4473 }
4474
4475 /* If the event thread has the stop requested flag set, pretend it
4476 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4477 target_stop). */
4478
4479 static bool
4480 handle_stop_requested (struct execution_control_state *ecs)
4481 {
4482 if (ecs->event_thread->stop_requested)
4483 {
4484 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
4485 ecs->ws.value.sig = GDB_SIGNAL_0;
4486 handle_signal_stop (ecs);
4487 return true;
4488 }
4489 return false;
4490 }
4491
4492 /* Auxiliary function that handles syscall entry/return events.
4493 It returns true if the inferior should keep going (and GDB
4494 should ignore the event), or false if the event deserves to be
4495 processed. */
4496
4497 static bool
4498 handle_syscall_event (struct execution_control_state *ecs)
4499 {
4500 struct regcache *regcache;
4501 int syscall_number;
4502
4503 context_switch (ecs);
4504
4505 regcache = get_thread_regcache (ecs->event_thread);
4506 syscall_number = ecs->ws.value.syscall_number;
4507 ecs->event_thread->suspend.stop_pc = regcache_read_pc (regcache);
4508
4509 if (catch_syscall_enabled () > 0
4510 && catching_syscall_number (syscall_number) > 0)
4511 {
4512 infrun_debug_printf ("syscall number=%d", syscall_number);
4513
4514 ecs->event_thread->control.stop_bpstat
4515 = bpstat_stop_status (regcache->aspace (),
4516 ecs->event_thread->suspend.stop_pc,
4517 ecs->event_thread, &ecs->ws);
4518
4519 if (handle_stop_requested (ecs))
4520 return false;
4521
4522 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4523 {
4524 /* Catchpoint hit. */
4525 return false;
4526 }
4527 }
4528
4529 if (handle_stop_requested (ecs))
4530 return false;
4531
4532 /* If no catchpoint triggered for this, then keep going. */
4533 keep_going (ecs);
4534
4535 return true;
4536 }
4537
4538 /* Lazily fill in the execution_control_state's stop_func_* fields. */
4539
4540 static void
4541 fill_in_stop_func (struct gdbarch *gdbarch,
4542 struct execution_control_state *ecs)
4543 {
4544 if (!ecs->stop_func_filled_in)
4545 {
4546 const block *block;
4547 const general_symbol_info *gsi;
4548
4549 /* Don't care about return value; stop_func_start and stop_func_name
4550 will both be 0 if it doesn't work. */
4551 find_pc_partial_function_sym (ecs->event_thread->suspend.stop_pc,
4552 &gsi,
4553 &ecs->stop_func_start,
4554 &ecs->stop_func_end,
4555 &block);
4556 ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name ();
4557
4558 /* The call to find_pc_partial_function, above, will set
4559 stop_func_start and stop_func_end to the start and end
4560 of the range containing the stop pc. If this range
4561 contains the entry pc for the block (which is always the
4562 case for contiguous blocks), advance stop_func_start past
4563 the function's start offset and entrypoint. Note that
4564 stop_func_start is NOT advanced when in a range of a
4565 non-contiguous block that does not contain the entry pc. */
4566 if (block != nullptr
4567 && ecs->stop_func_start <= BLOCK_ENTRY_PC (block)
4568 && BLOCK_ENTRY_PC (block) < ecs->stop_func_end)
4569 {
4570 ecs->stop_func_start
4571 += gdbarch_deprecated_function_start_offset (gdbarch);
4572
4573 if (gdbarch_skip_entrypoint_p (gdbarch))
4574 ecs->stop_func_start
4575 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
4576 }
4577
4578 ecs->stop_func_filled_in = 1;
4579 }
4580 }
4581
4582
4583 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
4584
4585 static enum stop_kind
4586 get_inferior_stop_soon (execution_control_state *ecs)
4587 {
4588 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4589
4590 gdb_assert (inf != NULL);
4591 return inf->control.stop_soon;
4592 }
4593
4594 /* Poll for one event out of the current target. Store the resulting
4595 waitstatus in WS, and return the event ptid. Does not block. */
4596
4597 static ptid_t
4598 poll_one_curr_target (struct target_waitstatus *ws)
4599 {
4600 ptid_t event_ptid;
4601
4602 overlay_cache_invalid = 1;
4603
4604 /* Flush target cache before starting to handle each event.
4605 Target was running and cache could be stale. This is just a
4606 heuristic. Running threads may modify target memory, but we
4607 don't get any event. */
4608 target_dcache_invalidate ();
4609
4610 if (deprecated_target_wait_hook)
4611 event_ptid = deprecated_target_wait_hook (minus_one_ptid, ws, TARGET_WNOHANG);
4612 else
4613 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
4614
4615 if (debug_infrun)
4616 print_target_wait_results (minus_one_ptid, event_ptid, ws);
4617
4618 return event_ptid;
4619 }
4620
4621 /* Wait for one event out of any target. */
4622
4623 static wait_one_event
4624 wait_one ()
4625 {
4626 while (1)
4627 {
4628 for (inferior *inf : all_inferiors ())
4629 {
4630 process_stratum_target *target = inf->process_target ();
4631 if (target == NULL
4632 || !target->is_async_p ()
4633 || !target->threads_executing)
4634 continue;
4635
4636 switch_to_inferior_no_thread (inf);
4637
4638 wait_one_event event;
4639 event.target = target;
4640 event.ptid = poll_one_curr_target (&event.ws);
4641
4642 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
4643 {
4644 /* If nothing is resumed, remove the target from the
4645 event loop. */
4646 target_async (0);
4647 }
4648 else if (event.ws.kind != TARGET_WAITKIND_IGNORE)
4649 return event;
4650 }
4651
4652 /* Block waiting for some event. */
4653
4654 fd_set readfds;
4655 int nfds = 0;
4656
4657 FD_ZERO (&readfds);
4658
4659 for (inferior *inf : all_inferiors ())
4660 {
4661 process_stratum_target *target = inf->process_target ();
4662 if (target == NULL
4663 || !target->is_async_p ()
4664 || !target->threads_executing)
4665 continue;
4666
4667 int fd = target->async_wait_fd ();
4668 FD_SET (fd, &readfds);
4669 if (nfds <= fd)
4670 nfds = fd + 1;
4671 }
4672
4673 if (nfds == 0)
4674 {
4675 /* No waitable targets left. All must be stopped. */
4676 return {NULL, minus_one_ptid, {TARGET_WAITKIND_NO_RESUMED}};
4677 }
4678
4679 QUIT;
4680
4681 int numfds = interruptible_select (nfds, &readfds, 0, NULL, 0);
4682 if (numfds < 0)
4683 {
4684 if (errno == EINTR)
4685 continue;
4686 else
4687 perror_with_name ("interruptible_select");
4688 }
4689 }
4690 }
4691
4692 /* Save the thread's event and stop reason to process it later. */
4693
4694 static void
4695 save_waitstatus (struct thread_info *tp, const target_waitstatus *ws)
4696 {
4697 infrun_debug_printf ("saving status %s for %d.%ld.%ld",
4698 target_waitstatus_to_string (ws).c_str (),
4699 tp->ptid.pid (),
4700 tp->ptid.lwp (),
4701 tp->ptid.tid ());
4702
4703 /* Record for later. */
4704 tp->suspend.waitstatus = *ws;
4705 tp->suspend.waitstatus_pending_p = 1;
4706
4707 if (ws->kind == TARGET_WAITKIND_STOPPED
4708 && ws->value.sig == GDB_SIGNAL_TRAP)
4709 {
4710 struct regcache *regcache = get_thread_regcache (tp);
4711 const address_space *aspace = regcache->aspace ();
4712 CORE_ADDR pc = regcache_read_pc (regcache);
4713
4714 adjust_pc_after_break (tp, &tp->suspend.waitstatus);
4715
4716 scoped_restore_current_thread restore_thread;
4717 switch_to_thread (tp);
4718
4719 if (target_stopped_by_watchpoint ())
4720 {
4721 tp->suspend.stop_reason
4722 = TARGET_STOPPED_BY_WATCHPOINT;
4723 }
4724 else if (target_supports_stopped_by_sw_breakpoint ()
4725 && target_stopped_by_sw_breakpoint ())
4726 {
4727 tp->suspend.stop_reason
4728 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4729 }
4730 else if (target_supports_stopped_by_hw_breakpoint ()
4731 && target_stopped_by_hw_breakpoint ())
4732 {
4733 tp->suspend.stop_reason
4734 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4735 }
4736 else if (!target_supports_stopped_by_hw_breakpoint ()
4737 && hardware_breakpoint_inserted_here_p (aspace,
4738 pc))
4739 {
4740 tp->suspend.stop_reason
4741 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4742 }
4743 else if (!target_supports_stopped_by_sw_breakpoint ()
4744 && software_breakpoint_inserted_here_p (aspace,
4745 pc))
4746 {
4747 tp->suspend.stop_reason
4748 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4749 }
4750 else if (!thread_has_single_step_breakpoints_set (tp)
4751 && currently_stepping (tp))
4752 {
4753 tp->suspend.stop_reason
4754 = TARGET_STOPPED_BY_SINGLE_STEP;
4755 }
4756 }
4757 }
4758
4759 /* Mark the non-executing threads accordingly. In all-stop, all
4760 threads of all processes are stopped when we get any event
4761 reported. In non-stop mode, only the event thread stops. */
4762
4763 static void
4764 mark_non_executing_threads (process_stratum_target *target,
4765 ptid_t event_ptid,
4766 struct target_waitstatus ws)
4767 {
4768 ptid_t mark_ptid;
4769
4770 if (!target_is_non_stop_p ())
4771 mark_ptid = minus_one_ptid;
4772 else if (ws.kind == TARGET_WAITKIND_SIGNALLED
4773 || ws.kind == TARGET_WAITKIND_EXITED)
4774 {
4775 /* If we're handling a process exit in non-stop mode, even
4776 though threads haven't been deleted yet, one would think
4777 that there is nothing to do, as threads of the dead process
4778 will be soon deleted, and threads of any other process were
4779 left running. However, on some targets, threads survive a
4780 process exit event. E.g., for the "checkpoint" command,
4781 when the current checkpoint/fork exits, linux-fork.c
4782 automatically switches to another fork from within
4783 target_mourn_inferior, by associating the same
4784 inferior/thread to another fork. We haven't mourned yet at
4785 this point, but we must mark any threads left in the
4786 process as not-executing so that finish_thread_state marks
4787 them stopped (in the user's perspective) if/when we present
4788 the stop to the user. */
4789 mark_ptid = ptid_t (event_ptid.pid ());
4790 }
4791 else
4792 mark_ptid = event_ptid;
4793
4794 set_executing (target, mark_ptid, false);
4795
4796 /* Likewise the resumed flag. */
4797 set_resumed (target, mark_ptid, false);
4798 }
4799
4800 /* Handle one event after stopping threads. If the eventing thread
4801 reports back any interesting event, we leave it pending. If the
4802 eventing thread was in the middle of a displaced step, we
4803 cancel/finish it, and unless the thread's inferior is being
4804 detached, put the thread back in the step-over chain. Returns true
4805 if there are no resumed threads left in the target (thus there's no
4806 point in waiting further), false otherwise. */
4807
4808 static bool
4809 handle_one (const wait_one_event &event)
4810 {
4811 infrun_debug_printf
4812 ("%s %s", target_waitstatus_to_string (&event.ws).c_str (),
4813 target_pid_to_str (event.ptid).c_str ());
4814
4815 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
4816 {
4817 /* All resumed threads exited. */
4818 return true;
4819 }
4820 else if (event.ws.kind == TARGET_WAITKIND_THREAD_EXITED
4821 || event.ws.kind == TARGET_WAITKIND_EXITED
4822 || event.ws.kind == TARGET_WAITKIND_SIGNALLED)
4823 {
4824 /* One thread/process exited/signalled. */
4825
4826 thread_info *t = nullptr;
4827
4828 /* The target may have reported just a pid. If so, try
4829 the first non-exited thread. */
4830 if (event.ptid.is_pid ())
4831 {
4832 int pid = event.ptid.pid ();
4833 inferior *inf = find_inferior_pid (event.target, pid);
4834 for (thread_info *tp : inf->non_exited_threads ())
4835 {
4836 t = tp;
4837 break;
4838 }
4839
4840 /* If there is no available thread, the event would
4841 have to be appended to a per-inferior event list,
4842 which does not exist (and if it did, we'd have
4843 to adjust run control command to be able to
4844 resume such an inferior). We assert here instead
4845 of going into an infinite loop. */
4846 gdb_assert (t != nullptr);
4847
4848 infrun_debug_printf
4849 ("using %s", target_pid_to_str (t->ptid).c_str ());
4850 }
4851 else
4852 {
4853 t = find_thread_ptid (event.target, event.ptid);
4854 /* Check if this is the first time we see this thread.
4855 Don't bother adding if it individually exited. */
4856 if (t == nullptr
4857 && event.ws.kind != TARGET_WAITKIND_THREAD_EXITED)
4858 t = add_thread (event.target, event.ptid);
4859 }
4860
4861 if (t != nullptr)
4862 {
4863 /* Set the threads as non-executing to avoid
4864 another stop attempt on them. */
4865 switch_to_thread_no_regs (t);
4866 mark_non_executing_threads (event.target, event.ptid,
4867 event.ws);
4868 save_waitstatus (t, &event.ws);
4869 t->stop_requested = false;
4870 }
4871 }
4872 else
4873 {
4874 thread_info *t = find_thread_ptid (event.target, event.ptid);
4875 if (t == NULL)
4876 t = add_thread (event.target, event.ptid);
4877
4878 t->stop_requested = 0;
4879 t->executing = 0;
4880 t->resumed = false;
4881 t->control.may_range_step = 0;
4882
4883 /* This may be the first time we see the inferior report
4884 a stop. */
4885 inferior *inf = find_inferior_ptid (event.target, event.ptid);
4886 if (inf->needs_setup)
4887 {
4888 switch_to_thread_no_regs (t);
4889 setup_inferior (0);
4890 }
4891
4892 if (event.ws.kind == TARGET_WAITKIND_STOPPED
4893 && event.ws.value.sig == GDB_SIGNAL_0)
4894 {
4895 /* We caught the event that we intended to catch, so
4896 there's no event pending. */
4897 t->suspend.waitstatus.kind = TARGET_WAITKIND_IGNORE;
4898 t->suspend.waitstatus_pending_p = 0;
4899
4900 if (displaced_step_finish (t, GDB_SIGNAL_0)
4901 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
4902 {
4903 /* Add it back to the step-over queue. */
4904 infrun_debug_printf
4905 ("displaced-step of %s canceled",
4906 target_pid_to_str (t->ptid).c_str ());
4907
4908 t->control.trap_expected = 0;
4909 if (!t->inf->detaching)
4910 global_thread_step_over_chain_enqueue (t);
4911 }
4912 }
4913 else
4914 {
4915 enum gdb_signal sig;
4916 struct regcache *regcache;
4917
4918 infrun_debug_printf
4919 ("target_wait %s, saving status for %d.%ld.%ld",
4920 target_waitstatus_to_string (&event.ws).c_str (),
4921 t->ptid.pid (), t->ptid.lwp (), t->ptid.tid ());
4922
4923 /* Record for later. */
4924 save_waitstatus (t, &event.ws);
4925
4926 sig = (event.ws.kind == TARGET_WAITKIND_STOPPED
4927 ? event.ws.value.sig : GDB_SIGNAL_0);
4928
4929 if (displaced_step_finish (t, sig)
4930 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
4931 {
4932 /* Add it back to the step-over queue. */
4933 t->control.trap_expected = 0;
4934 if (!t->inf->detaching)
4935 global_thread_step_over_chain_enqueue (t);
4936 }
4937
4938 regcache = get_thread_regcache (t);
4939 t->suspend.stop_pc = regcache_read_pc (regcache);
4940
4941 infrun_debug_printf ("saved stop_pc=%s for %s "
4942 "(currently_stepping=%d)",
4943 paddress (target_gdbarch (),
4944 t->suspend.stop_pc),
4945 target_pid_to_str (t->ptid).c_str (),
4946 currently_stepping (t));
4947 }
4948 }
4949
4950 return false;
4951 }
4952
4953 /* See infrun.h. */
4954
4955 void
4956 stop_all_threads (void)
4957 {
4958 /* We may need multiple passes to discover all threads. */
4959 int pass;
4960 int iterations = 0;
4961
4962 gdb_assert (exists_non_stop_target ());
4963
4964 infrun_debug_printf ("starting");
4965
4966 scoped_restore_current_thread restore_thread;
4967
4968 /* Enable thread events of all targets. */
4969 for (auto *target : all_non_exited_process_targets ())
4970 {
4971 switch_to_target_no_thread (target);
4972 target_thread_events (true);
4973 }
4974
4975 SCOPE_EXIT
4976 {
4977 /* Disable thread events of all targets. */
4978 for (auto *target : all_non_exited_process_targets ())
4979 {
4980 switch_to_target_no_thread (target);
4981 target_thread_events (false);
4982 }
4983
4984 /* Use debug_prefixed_printf directly to get a meaningful function
4985 name. */
4986 if (debug_infrun)
4987 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
4988 };
4989
4990 /* Request threads to stop, and then wait for the stops. Because
4991 threads we already know about can spawn more threads while we're
4992 trying to stop them, and we only learn about new threads when we
4993 update the thread list, do this in a loop, and keep iterating
4994 until two passes find no threads that need to be stopped. */
4995 for (pass = 0; pass < 2; pass++, iterations++)
4996 {
4997 infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations);
4998 while (1)
4999 {
5000 int waits_needed = 0;
5001
5002 for (auto *target : all_non_exited_process_targets ())
5003 {
5004 switch_to_target_no_thread (target);
5005 update_thread_list ();
5006 }
5007
5008 /* Go through all threads looking for threads that we need
5009 to tell the target to stop. */
5010 for (thread_info *t : all_non_exited_threads ())
5011 {
5012 /* For a single-target setting with an all-stop target,
5013 we would not even arrive here. For a multi-target
5014 setting, until GDB is able to handle a mixture of
5015 all-stop and non-stop targets, simply skip all-stop
5016 targets' threads. This should be fine due to the
5017 protection of 'check_multi_target_resumption'. */
5018
5019 switch_to_thread_no_regs (t);
5020 if (!target_is_non_stop_p ())
5021 continue;
5022
5023 if (t->executing)
5024 {
5025 /* If already stopping, don't request a stop again.
5026 We just haven't seen the notification yet. */
5027 if (!t->stop_requested)
5028 {
5029 infrun_debug_printf (" %s executing, need stop",
5030 target_pid_to_str (t->ptid).c_str ());
5031 target_stop (t->ptid);
5032 t->stop_requested = 1;
5033 }
5034 else
5035 {
5036 infrun_debug_printf (" %s executing, already stopping",
5037 target_pid_to_str (t->ptid).c_str ());
5038 }
5039
5040 if (t->stop_requested)
5041 waits_needed++;
5042 }
5043 else
5044 {
5045 infrun_debug_printf (" %s not executing",
5046 target_pid_to_str (t->ptid).c_str ());
5047
5048 /* The thread may be not executing, but still be
5049 resumed with a pending status to process. */
5050 t->resumed = false;
5051 }
5052 }
5053
5054 if (waits_needed == 0)
5055 break;
5056
5057 /* If we find new threads on the second iteration, restart
5058 over. We want to see two iterations in a row with all
5059 threads stopped. */
5060 if (pass > 0)
5061 pass = -1;
5062
5063 for (int i = 0; i < waits_needed; i++)
5064 {
5065 wait_one_event event = wait_one ();
5066 if (handle_one (event))
5067 break;
5068 }
5069 }
5070 }
5071 }
5072
5073 /* Handle a TARGET_WAITKIND_NO_RESUMED event. */
5074
5075 static bool
5076 handle_no_resumed (struct execution_control_state *ecs)
5077 {
5078 if (target_can_async_p ())
5079 {
5080 bool any_sync = false;
5081
5082 for (ui *ui : all_uis ())
5083 {
5084 if (ui->prompt_state == PROMPT_BLOCKED)
5085 {
5086 any_sync = true;
5087 break;
5088 }
5089 }
5090 if (!any_sync)
5091 {
5092 /* There were no unwaited-for children left in the target, but,
5093 we're not synchronously waiting for events either. Just
5094 ignore. */
5095
5096 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
5097 prepare_to_wait (ecs);
5098 return true;
5099 }
5100 }
5101
5102 /* Otherwise, if we were running a synchronous execution command, we
5103 may need to cancel it and give the user back the terminal.
5104
5105 In non-stop mode, the target can't tell whether we've already
5106 consumed previous stop events, so it can end up sending us a
5107 no-resumed event like so:
5108
5109 #0 - thread 1 is left stopped
5110
5111 #1 - thread 2 is resumed and hits breakpoint
5112 -> TARGET_WAITKIND_STOPPED
5113
5114 #2 - thread 3 is resumed and exits
5115 this is the last resumed thread, so
5116 -> TARGET_WAITKIND_NO_RESUMED
5117
5118 #3 - gdb processes stop for thread 2 and decides to re-resume
5119 it.
5120
5121 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5122 thread 2 is now resumed, so the event should be ignored.
5123
5124 IOW, if the stop for thread 2 doesn't end a foreground command,
5125 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5126 event. But it could be that the event meant that thread 2 itself
5127 (or whatever other thread was the last resumed thread) exited.
5128
5129 To address this we refresh the thread list and check whether we
5130 have resumed threads _now_. In the example above, this removes
5131 thread 3 from the thread list. If thread 2 was re-resumed, we
5132 ignore this event. If we find no thread resumed, then we cancel
5133 the synchronous command and show "no unwaited-for " to the
5134 user. */
5135
5136 inferior *curr_inf = current_inferior ();
5137
5138 scoped_restore_current_thread restore_thread;
5139
5140 for (auto *target : all_non_exited_process_targets ())
5141 {
5142 switch_to_target_no_thread (target);
5143 update_thread_list ();
5144 }
5145
5146 /* If:
5147
5148 - the current target has no thread executing, and
5149 - the current inferior is native, and
5150 - the current inferior is the one which has the terminal, and
5151 - we did nothing,
5152
5153 then a Ctrl-C from this point on would remain stuck in the
5154 kernel, until a thread resumes and dequeues it. That would
5155 result in the GDB CLI not reacting to Ctrl-C, not able to
5156 interrupt the program. To address this, if the current inferior
5157 no longer has any thread executing, we give the terminal to some
5158 other inferior that has at least one thread executing. */
5159 bool swap_terminal = true;
5160
5161 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5162 whether to report it to the user. */
5163 bool ignore_event = false;
5164
5165 for (thread_info *thread : all_non_exited_threads ())
5166 {
5167 if (swap_terminal && thread->executing)
5168 {
5169 if (thread->inf != curr_inf)
5170 {
5171 target_terminal::ours ();
5172
5173 switch_to_thread (thread);
5174 target_terminal::inferior ();
5175 }
5176 swap_terminal = false;
5177 }
5178
5179 if (!ignore_event
5180 && (thread->executing
5181 || thread->suspend.waitstatus_pending_p))
5182 {
5183 /* Either there were no unwaited-for children left in the
5184 target at some point, but there are now, or some target
5185 other than the eventing one has unwaited-for children
5186 left. Just ignore. */
5187 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5188 "(ignoring: found resumed)");
5189
5190 ignore_event = true;
5191 }
5192
5193 if (ignore_event && !swap_terminal)
5194 break;
5195 }
5196
5197 if (ignore_event)
5198 {
5199 switch_to_inferior_no_thread (curr_inf);
5200 prepare_to_wait (ecs);
5201 return true;
5202 }
5203
5204 /* Go ahead and report the event. */
5205 return false;
5206 }
5207
5208 /* Given an execution control state that has been freshly filled in by
5209 an event from the inferior, figure out what it means and take
5210 appropriate action.
5211
5212 The alternatives are:
5213
5214 1) stop_waiting and return; to really stop and return to the
5215 debugger.
5216
5217 2) keep_going and return; to wait for the next event (set
5218 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5219 once). */
5220
5221 static void
5222 handle_inferior_event (struct execution_control_state *ecs)
5223 {
5224 /* Make sure that all temporary struct value objects that were
5225 created during the handling of the event get deleted at the
5226 end. */
5227 scoped_value_mark free_values;
5228
5229 infrun_debug_printf ("%s", target_waitstatus_to_string (&ecs->ws).c_str ());
5230
5231 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
5232 {
5233 /* We had an event in the inferior, but we are not interested in
5234 handling it at this level. The lower layers have already
5235 done what needs to be done, if anything.
5236
5237 One of the possible circumstances for this is when the
5238 inferior produces output for the console. The inferior has
5239 not stopped, and we are ignoring the event. Another possible
5240 circumstance is any event which the lower level knows will be
5241 reported multiple times without an intervening resume. */
5242 prepare_to_wait (ecs);
5243 return;
5244 }
5245
5246 if (ecs->ws.kind == TARGET_WAITKIND_THREAD_EXITED)
5247 {
5248 prepare_to_wait (ecs);
5249 return;
5250 }
5251
5252 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
5253 && handle_no_resumed (ecs))
5254 return;
5255
5256 /* Cache the last target/ptid/waitstatus. */
5257 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
5258
5259 /* Always clear state belonging to the previous time we stopped. */
5260 stop_stack_dummy = STOP_NONE;
5261
5262 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
5263 {
5264 /* No unwaited-for children left. IOW, all resumed children
5265 have exited. */
5266 stop_print_frame = false;
5267 stop_waiting (ecs);
5268 return;
5269 }
5270
5271 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
5272 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
5273 {
5274 ecs->event_thread = find_thread_ptid (ecs->target, ecs->ptid);
5275 /* If it's a new thread, add it to the thread database. */
5276 if (ecs->event_thread == NULL)
5277 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
5278
5279 /* Disable range stepping. If the next step request could use a
5280 range, this will be end up re-enabled then. */
5281 ecs->event_thread->control.may_range_step = 0;
5282 }
5283
5284 /* Dependent on valid ECS->EVENT_THREAD. */
5285 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
5286
5287 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5288 reinit_frame_cache ();
5289
5290 breakpoint_retire_moribund ();
5291
5292 /* First, distinguish signals caused by the debugger from signals
5293 that have to do with the program's own actions. Note that
5294 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5295 on the operating system version. Here we detect when a SIGILL or
5296 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5297 something similar for SIGSEGV, since a SIGSEGV will be generated
5298 when we're trying to execute a breakpoint instruction on a
5299 non-executable stack. This happens for call dummy breakpoints
5300 for architectures like SPARC that place call dummies on the
5301 stack. */
5302 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
5303 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
5304 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
5305 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
5306 {
5307 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
5308
5309 if (breakpoint_inserted_here_p (regcache->aspace (),
5310 regcache_read_pc (regcache)))
5311 {
5312 infrun_debug_printf ("Treating signal as SIGTRAP");
5313 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
5314 }
5315 }
5316
5317 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
5318
5319 switch (ecs->ws.kind)
5320 {
5321 case TARGET_WAITKIND_LOADED:
5322 {
5323 context_switch (ecs);
5324 /* Ignore gracefully during startup of the inferior, as it might
5325 be the shell which has just loaded some objects, otherwise
5326 add the symbols for the newly loaded objects. Also ignore at
5327 the beginning of an attach or remote session; we will query
5328 the full list of libraries once the connection is
5329 established. */
5330
5331 stop_kind stop_soon = get_inferior_stop_soon (ecs);
5332 if (stop_soon == NO_STOP_QUIETLY)
5333 {
5334 struct regcache *regcache;
5335
5336 regcache = get_thread_regcache (ecs->event_thread);
5337
5338 handle_solib_event ();
5339
5340 ecs->event_thread->control.stop_bpstat
5341 = bpstat_stop_status (regcache->aspace (),
5342 ecs->event_thread->suspend.stop_pc,
5343 ecs->event_thread, &ecs->ws);
5344
5345 if (handle_stop_requested (ecs))
5346 return;
5347
5348 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5349 {
5350 /* A catchpoint triggered. */
5351 process_event_stop_test (ecs);
5352 return;
5353 }
5354
5355 /* If requested, stop when the dynamic linker notifies
5356 gdb of events. This allows the user to get control
5357 and place breakpoints in initializer routines for
5358 dynamically loaded objects (among other things). */
5359 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5360 if (stop_on_solib_events)
5361 {
5362 /* Make sure we print "Stopped due to solib-event" in
5363 normal_stop. */
5364 stop_print_frame = true;
5365
5366 stop_waiting (ecs);
5367 return;
5368 }
5369 }
5370
5371 /* If we are skipping through a shell, or through shared library
5372 loading that we aren't interested in, resume the program. If
5373 we're running the program normally, also resume. */
5374 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
5375 {
5376 /* Loading of shared libraries might have changed breakpoint
5377 addresses. Make sure new breakpoints are inserted. */
5378 if (stop_soon == NO_STOP_QUIETLY)
5379 insert_breakpoints ();
5380 resume (GDB_SIGNAL_0);
5381 prepare_to_wait (ecs);
5382 return;
5383 }
5384
5385 /* But stop if we're attaching or setting up a remote
5386 connection. */
5387 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5388 || stop_soon == STOP_QUIETLY_REMOTE)
5389 {
5390 infrun_debug_printf ("quietly stopped");
5391 stop_waiting (ecs);
5392 return;
5393 }
5394
5395 internal_error (__FILE__, __LINE__,
5396 _("unhandled stop_soon: %d"), (int) stop_soon);
5397 }
5398
5399 case TARGET_WAITKIND_SPURIOUS:
5400 if (handle_stop_requested (ecs))
5401 return;
5402 context_switch (ecs);
5403 resume (GDB_SIGNAL_0);
5404 prepare_to_wait (ecs);
5405 return;
5406
5407 case TARGET_WAITKIND_THREAD_CREATED:
5408 if (handle_stop_requested (ecs))
5409 return;
5410 context_switch (ecs);
5411 if (!switch_back_to_stepped_thread (ecs))
5412 keep_going (ecs);
5413 return;
5414
5415 case TARGET_WAITKIND_EXITED:
5416 case TARGET_WAITKIND_SIGNALLED:
5417 {
5418 /* Depending on the system, ecs->ptid may point to a thread or
5419 to a process. On some targets, target_mourn_inferior may
5420 need to have access to the just-exited thread. That is the
5421 case of GNU/Linux's "checkpoint" support, for example.
5422 Call the switch_to_xxx routine as appropriate. */
5423 thread_info *thr = find_thread_ptid (ecs->target, ecs->ptid);
5424 if (thr != nullptr)
5425 switch_to_thread (thr);
5426 else
5427 {
5428 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5429 switch_to_inferior_no_thread (inf);
5430 }
5431 }
5432 handle_vfork_child_exec_or_exit (0);
5433 target_terminal::ours (); /* Must do this before mourn anyway. */
5434
5435 /* Clearing any previous state of convenience variables. */
5436 clear_exit_convenience_vars ();
5437
5438 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
5439 {
5440 /* Record the exit code in the convenience variable $_exitcode, so
5441 that the user can inspect this again later. */
5442 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5443 (LONGEST) ecs->ws.value.integer);
5444
5445 /* Also record this in the inferior itself. */
5446 current_inferior ()->has_exit_code = 1;
5447 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
5448
5449 /* Support the --return-child-result option. */
5450 return_child_result_value = ecs->ws.value.integer;
5451
5452 gdb::observers::exited.notify (ecs->ws.value.integer);
5453 }
5454 else
5455 {
5456 struct gdbarch *gdbarch = current_inferior ()->gdbarch;
5457
5458 if (gdbarch_gdb_signal_to_target_p (gdbarch))
5459 {
5460 /* Set the value of the internal variable $_exitsignal,
5461 which holds the signal uncaught by the inferior. */
5462 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5463 gdbarch_gdb_signal_to_target (gdbarch,
5464 ecs->ws.value.sig));
5465 }
5466 else
5467 {
5468 /* We don't have access to the target's method used for
5469 converting between signal numbers (GDB's internal
5470 representation <-> target's representation).
5471 Therefore, we cannot do a good job at displaying this
5472 information to the user. It's better to just warn
5473 her about it (if infrun debugging is enabled), and
5474 give up. */
5475 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
5476 "signal number.");
5477 }
5478
5479 gdb::observers::signal_exited.notify (ecs->ws.value.sig);
5480 }
5481
5482 gdb_flush (gdb_stdout);
5483 target_mourn_inferior (inferior_ptid);
5484 stop_print_frame = false;
5485 stop_waiting (ecs);
5486 return;
5487
5488 case TARGET_WAITKIND_FORKED:
5489 case TARGET_WAITKIND_VFORKED:
5490 /* Check whether the inferior is displaced stepping. */
5491 {
5492 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
5493 struct gdbarch *gdbarch = regcache->arch ();
5494 inferior *parent_inf = find_inferior_ptid (ecs->target, ecs->ptid);
5495
5496 /* If this is a fork (child gets its own address space copy)
5497 and some displaced step buffers were in use at the time of
5498 the fork, restore the displaced step buffer bytes in the
5499 child process.
5500
5501 Architectures which support displaced stepping and fork
5502 events must supply an implementation of
5503 gdbarch_displaced_step_restore_all_in_ptid. This is not
5504 enforced during gdbarch validation to support architectures
5505 which support displaced stepping but not forks. */
5506 if (ecs->ws.kind == TARGET_WAITKIND_FORKED
5507 && gdbarch_supports_displaced_stepping (gdbarch))
5508 gdbarch_displaced_step_restore_all_in_ptid
5509 (gdbarch, parent_inf, ecs->ws.value.related_pid);
5510
5511 /* If displaced stepping is supported, and thread ecs->ptid is
5512 displaced stepping. */
5513 if (displaced_step_in_progress_thread (ecs->event_thread))
5514 {
5515 struct regcache *child_regcache;
5516 CORE_ADDR parent_pc;
5517
5518 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5519 indicating that the displaced stepping of syscall instruction
5520 has been done. Perform cleanup for parent process here. Note
5521 that this operation also cleans up the child process for vfork,
5522 because their pages are shared. */
5523 displaced_step_finish (ecs->event_thread, GDB_SIGNAL_TRAP);
5524 /* Start a new step-over in another thread if there's one
5525 that needs it. */
5526 start_step_over ();
5527
5528 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5529 the child's PC is also within the scratchpad. Set the child's PC
5530 to the parent's PC value, which has already been fixed up.
5531 FIXME: we use the parent's aspace here, although we're touching
5532 the child, because the child hasn't been added to the inferior
5533 list yet at this point. */
5534
5535 child_regcache
5536 = get_thread_arch_aspace_regcache (parent_inf->process_target (),
5537 ecs->ws.value.related_pid,
5538 gdbarch,
5539 parent_inf->aspace);
5540 /* Read PC value of parent process. */
5541 parent_pc = regcache_read_pc (regcache);
5542
5543 displaced_debug_printf ("write child pc from %s to %s",
5544 paddress (gdbarch,
5545 regcache_read_pc (child_regcache)),
5546 paddress (gdbarch, parent_pc));
5547
5548 regcache_write_pc (child_regcache, parent_pc);
5549 }
5550 }
5551
5552 context_switch (ecs);
5553
5554 /* Immediately detach breakpoints from the child before there's
5555 any chance of letting the user delete breakpoints from the
5556 breakpoint lists. If we don't do this early, it's easy to
5557 leave left over traps in the child, vis: "break foo; catch
5558 fork; c; <fork>; del; c; <child calls foo>". We only follow
5559 the fork on the last `continue', and by that time the
5560 breakpoint at "foo" is long gone from the breakpoint table.
5561 If we vforked, then we don't need to unpatch here, since both
5562 parent and child are sharing the same memory pages; we'll
5563 need to unpatch at follow/detach time instead to be certain
5564 that new breakpoints added between catchpoint hit time and
5565 vfork follow are detached. */
5566 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
5567 {
5568 /* This won't actually modify the breakpoint list, but will
5569 physically remove the breakpoints from the child. */
5570 detach_breakpoints (ecs->ws.value.related_pid);
5571 }
5572
5573 delete_just_stopped_threads_single_step_breakpoints ();
5574
5575 /* In case the event is caught by a catchpoint, remember that
5576 the event is to be followed at the next resume of the thread,
5577 and not immediately. */
5578 ecs->event_thread->pending_follow = ecs->ws;
5579
5580 ecs->event_thread->suspend.stop_pc
5581 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
5582
5583 ecs->event_thread->control.stop_bpstat
5584 = bpstat_stop_status (get_current_regcache ()->aspace (),
5585 ecs->event_thread->suspend.stop_pc,
5586 ecs->event_thread, &ecs->ws);
5587
5588 if (handle_stop_requested (ecs))
5589 return;
5590
5591 /* If no catchpoint triggered for this, then keep going. Note
5592 that we're interested in knowing the bpstat actually causes a
5593 stop, not just if it may explain the signal. Software
5594 watchpoints, for example, always appear in the bpstat. */
5595 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5596 {
5597 bool follow_child
5598 = (follow_fork_mode_string == follow_fork_mode_child);
5599
5600 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5601
5602 process_stratum_target *targ
5603 = ecs->event_thread->inf->process_target ();
5604
5605 bool should_resume = follow_fork ();
5606
5607 /* Note that one of these may be an invalid pointer,
5608 depending on detach_fork. */
5609 thread_info *parent = ecs->event_thread;
5610 thread_info *child
5611 = find_thread_ptid (targ, ecs->ws.value.related_pid);
5612
5613 /* At this point, the parent is marked running, and the
5614 child is marked stopped. */
5615
5616 /* If not resuming the parent, mark it stopped. */
5617 if (follow_child && !detach_fork && !non_stop && !sched_multi)
5618 parent->set_running (false);
5619
5620 /* If resuming the child, mark it running. */
5621 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
5622 child->set_running (true);
5623
5624 /* In non-stop mode, also resume the other branch. */
5625 if (!detach_fork && (non_stop
5626 || (sched_multi && target_is_non_stop_p ())))
5627 {
5628 if (follow_child)
5629 switch_to_thread (parent);
5630 else
5631 switch_to_thread (child);
5632
5633 ecs->event_thread = inferior_thread ();
5634 ecs->ptid = inferior_ptid;
5635 keep_going (ecs);
5636 }
5637
5638 if (follow_child)
5639 switch_to_thread (child);
5640 else
5641 switch_to_thread (parent);
5642
5643 ecs->event_thread = inferior_thread ();
5644 ecs->ptid = inferior_ptid;
5645
5646 if (should_resume)
5647 keep_going (ecs);
5648 else
5649 stop_waiting (ecs);
5650 return;
5651 }
5652 process_event_stop_test (ecs);
5653 return;
5654
5655 case TARGET_WAITKIND_VFORK_DONE:
5656 /* Done with the shared memory region. Re-insert breakpoints in
5657 the parent, and keep going. */
5658
5659 context_switch (ecs);
5660
5661 current_inferior ()->waiting_for_vfork_done = 0;
5662 current_inferior ()->pspace->breakpoints_not_allowed = 0;
5663
5664 if (handle_stop_requested (ecs))
5665 return;
5666
5667 /* This also takes care of reinserting breakpoints in the
5668 previously locked inferior. */
5669 keep_going (ecs);
5670 return;
5671
5672 case TARGET_WAITKIND_EXECD:
5673
5674 /* Note we can't read registers yet (the stop_pc), because we
5675 don't yet know the inferior's post-exec architecture.
5676 'stop_pc' is explicitly read below instead. */
5677 switch_to_thread_no_regs (ecs->event_thread);
5678
5679 /* Do whatever is necessary to the parent branch of the vfork. */
5680 handle_vfork_child_exec_or_exit (1);
5681
5682 /* This causes the eventpoints and symbol table to be reset.
5683 Must do this now, before trying to determine whether to
5684 stop. */
5685 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
5686
5687 /* In follow_exec we may have deleted the original thread and
5688 created a new one. Make sure that the event thread is the
5689 execd thread for that case (this is a nop otherwise). */
5690 ecs->event_thread = inferior_thread ();
5691
5692 ecs->event_thread->suspend.stop_pc
5693 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
5694
5695 ecs->event_thread->control.stop_bpstat
5696 = bpstat_stop_status (get_current_regcache ()->aspace (),
5697 ecs->event_thread->suspend.stop_pc,
5698 ecs->event_thread, &ecs->ws);
5699
5700 /* Note that this may be referenced from inside
5701 bpstat_stop_status above, through inferior_has_execd. */
5702 xfree (ecs->ws.value.execd_pathname);
5703 ecs->ws.value.execd_pathname = NULL;
5704
5705 if (handle_stop_requested (ecs))
5706 return;
5707
5708 /* If no catchpoint triggered for this, then keep going. */
5709 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5710 {
5711 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5712 keep_going (ecs);
5713 return;
5714 }
5715 process_event_stop_test (ecs);
5716 return;
5717
5718 /* Be careful not to try to gather much state about a thread
5719 that's in a syscall. It's frequently a losing proposition. */
5720 case TARGET_WAITKIND_SYSCALL_ENTRY:
5721 /* Getting the current syscall number. */
5722 if (handle_syscall_event (ecs) == 0)
5723 process_event_stop_test (ecs);
5724 return;
5725
5726 /* Before examining the threads further, step this thread to
5727 get it entirely out of the syscall. (We get notice of the
5728 event when the thread is just on the verge of exiting a
5729 syscall. Stepping one instruction seems to get it back
5730 into user code.) */
5731 case TARGET_WAITKIND_SYSCALL_RETURN:
5732 if (handle_syscall_event (ecs) == 0)
5733 process_event_stop_test (ecs);
5734 return;
5735
5736 case TARGET_WAITKIND_STOPPED:
5737 handle_signal_stop (ecs);
5738 return;
5739
5740 case TARGET_WAITKIND_NO_HISTORY:
5741 /* Reverse execution: target ran out of history info. */
5742
5743 /* Switch to the stopped thread. */
5744 context_switch (ecs);
5745 infrun_debug_printf ("stopped");
5746
5747 delete_just_stopped_threads_single_step_breakpoints ();
5748 ecs->event_thread->suspend.stop_pc
5749 = regcache_read_pc (get_thread_regcache (inferior_thread ()));
5750
5751 if (handle_stop_requested (ecs))
5752 return;
5753
5754 gdb::observers::no_history.notify ();
5755 stop_waiting (ecs);
5756 return;
5757 }
5758 }
5759
5760 /* Restart threads back to what they were trying to do back when we
5761 paused them for an in-line step-over. The EVENT_THREAD thread is
5762 ignored. */
5763
5764 static void
5765 restart_threads (struct thread_info *event_thread)
5766 {
5767 /* In case the instruction just stepped spawned a new thread. */
5768 update_thread_list ();
5769
5770 for (thread_info *tp : all_non_exited_threads ())
5771 {
5772 if (tp->inf->detaching)
5773 {
5774 infrun_debug_printf ("restart threads: [%s] inferior detaching",
5775 target_pid_to_str (tp->ptid).c_str ());
5776 continue;
5777 }
5778
5779 switch_to_thread_no_regs (tp);
5780
5781 if (tp == event_thread)
5782 {
5783 infrun_debug_printf ("restart threads: [%s] is event thread",
5784 target_pid_to_str (tp->ptid).c_str ());
5785 continue;
5786 }
5787
5788 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5789 {
5790 infrun_debug_printf ("restart threads: [%s] not meant to be running",
5791 target_pid_to_str (tp->ptid).c_str ());
5792 continue;
5793 }
5794
5795 if (tp->resumed)
5796 {
5797 infrun_debug_printf ("restart threads: [%s] resumed",
5798 target_pid_to_str (tp->ptid).c_str ());
5799 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
5800 continue;
5801 }
5802
5803 if (thread_is_in_step_over_chain (tp))
5804 {
5805 infrun_debug_printf ("restart threads: [%s] needs step-over",
5806 target_pid_to_str (tp->ptid).c_str ());
5807 gdb_assert (!tp->resumed);
5808 continue;
5809 }
5810
5811
5812 if (tp->suspend.waitstatus_pending_p)
5813 {
5814 infrun_debug_printf ("restart threads: [%s] has pending status",
5815 target_pid_to_str (tp->ptid).c_str ());
5816 tp->resumed = true;
5817 continue;
5818 }
5819
5820 gdb_assert (!tp->stop_requested);
5821
5822 /* If some thread needs to start a step-over at this point, it
5823 should still be in the step-over queue, and thus skipped
5824 above. */
5825 if (thread_still_needs_step_over (tp))
5826 {
5827 internal_error (__FILE__, __LINE__,
5828 "thread [%s] needs a step-over, but not in "
5829 "step-over queue\n",
5830 target_pid_to_str (tp->ptid).c_str ());
5831 }
5832
5833 if (currently_stepping (tp))
5834 {
5835 infrun_debug_printf ("restart threads: [%s] was stepping",
5836 target_pid_to_str (tp->ptid).c_str ());
5837 keep_going_stepped_thread (tp);
5838 }
5839 else
5840 {
5841 struct execution_control_state ecss;
5842 struct execution_control_state *ecs = &ecss;
5843
5844 infrun_debug_printf ("restart threads: [%s] continuing",
5845 target_pid_to_str (tp->ptid).c_str ());
5846 reset_ecs (ecs, tp);
5847 switch_to_thread (tp);
5848 keep_going_pass_signal (ecs);
5849 }
5850 }
5851 }
5852
5853 /* Callback for iterate_over_threads. Find a resumed thread that has
5854 a pending waitstatus. */
5855
5856 static int
5857 resumed_thread_with_pending_status (struct thread_info *tp,
5858 void *arg)
5859 {
5860 return (tp->resumed
5861 && tp->suspend.waitstatus_pending_p);
5862 }
5863
5864 /* Called when we get an event that may finish an in-line or
5865 out-of-line (displaced stepping) step-over started previously.
5866 Return true if the event is processed and we should go back to the
5867 event loop; false if the caller should continue processing the
5868 event. */
5869
5870 static int
5871 finish_step_over (struct execution_control_state *ecs)
5872 {
5873 displaced_step_finish (ecs->event_thread,
5874 ecs->event_thread->suspend.stop_signal);
5875
5876 bool had_step_over_info = step_over_info_valid_p ();
5877
5878 if (had_step_over_info)
5879 {
5880 /* If we're stepping over a breakpoint with all threads locked,
5881 then only the thread that was stepped should be reporting
5882 back an event. */
5883 gdb_assert (ecs->event_thread->control.trap_expected);
5884
5885 clear_step_over_info ();
5886 }
5887
5888 if (!target_is_non_stop_p ())
5889 return 0;
5890
5891 /* Start a new step-over in another thread if there's one that
5892 needs it. */
5893 start_step_over ();
5894
5895 /* If we were stepping over a breakpoint before, and haven't started
5896 a new in-line step-over sequence, then restart all other threads
5897 (except the event thread). We can't do this in all-stop, as then
5898 e.g., we wouldn't be able to issue any other remote packet until
5899 these other threads stop. */
5900 if (had_step_over_info && !step_over_info_valid_p ())
5901 {
5902 struct thread_info *pending;
5903
5904 /* If we only have threads with pending statuses, the restart
5905 below won't restart any thread and so nothing re-inserts the
5906 breakpoint we just stepped over. But we need it inserted
5907 when we later process the pending events, otherwise if
5908 another thread has a pending event for this breakpoint too,
5909 we'd discard its event (because the breakpoint that
5910 originally caused the event was no longer inserted). */
5911 context_switch (ecs);
5912 insert_breakpoints ();
5913
5914 restart_threads (ecs->event_thread);
5915
5916 /* If we have events pending, go through handle_inferior_event
5917 again, picking up a pending event at random. This avoids
5918 thread starvation. */
5919
5920 /* But not if we just stepped over a watchpoint in order to let
5921 the instruction execute so we can evaluate its expression.
5922 The set of watchpoints that triggered is recorded in the
5923 breakpoint objects themselves (see bp->watchpoint_triggered).
5924 If we processed another event first, that other event could
5925 clobber this info. */
5926 if (ecs->event_thread->stepping_over_watchpoint)
5927 return 0;
5928
5929 pending = iterate_over_threads (resumed_thread_with_pending_status,
5930 NULL);
5931 if (pending != NULL)
5932 {
5933 struct thread_info *tp = ecs->event_thread;
5934 struct regcache *regcache;
5935
5936 infrun_debug_printf ("found resumed threads with "
5937 "pending events, saving status");
5938
5939 gdb_assert (pending != tp);
5940
5941 /* Record the event thread's event for later. */
5942 save_waitstatus (tp, &ecs->ws);
5943 /* This was cleared early, by handle_inferior_event. Set it
5944 so this pending event is considered by
5945 do_target_wait. */
5946 tp->resumed = true;
5947
5948 gdb_assert (!tp->executing);
5949
5950 regcache = get_thread_regcache (tp);
5951 tp->suspend.stop_pc = regcache_read_pc (regcache);
5952
5953 infrun_debug_printf ("saved stop_pc=%s for %s "
5954 "(currently_stepping=%d)",
5955 paddress (target_gdbarch (),
5956 tp->suspend.stop_pc),
5957 target_pid_to_str (tp->ptid).c_str (),
5958 currently_stepping (tp));
5959
5960 /* This in-line step-over finished; clear this so we won't
5961 start a new one. This is what handle_signal_stop would
5962 do, if we returned false. */
5963 tp->stepping_over_breakpoint = 0;
5964
5965 /* Wake up the event loop again. */
5966 mark_async_event_handler (infrun_async_inferior_event_token);
5967
5968 prepare_to_wait (ecs);
5969 return 1;
5970 }
5971 }
5972
5973 return 0;
5974 }
5975
5976 /* Come here when the program has stopped with a signal. */
5977
5978 static void
5979 handle_signal_stop (struct execution_control_state *ecs)
5980 {
5981 struct frame_info *frame;
5982 struct gdbarch *gdbarch;
5983 int stopped_by_watchpoint;
5984 enum stop_kind stop_soon;
5985 int random_signal;
5986
5987 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
5988
5989 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
5990
5991 /* Do we need to clean up the state of a thread that has
5992 completed a displaced single-step? (Doing so usually affects
5993 the PC, so do it here, before we set stop_pc.) */
5994 if (finish_step_over (ecs))
5995 return;
5996
5997 /* If we either finished a single-step or hit a breakpoint, but
5998 the user wanted this thread to be stopped, pretend we got a
5999 SIG0 (generic unsignaled stop). */
6000 if (ecs->event_thread->stop_requested
6001 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
6002 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6003
6004 ecs->event_thread->suspend.stop_pc
6005 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
6006
6007 context_switch (ecs);
6008
6009 if (deprecated_context_hook)
6010 deprecated_context_hook (ecs->event_thread->global_num);
6011
6012 if (debug_infrun)
6013 {
6014 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
6015 struct gdbarch *reg_gdbarch = regcache->arch ();
6016
6017 infrun_debug_printf ("stop_pc=%s",
6018 paddress (reg_gdbarch,
6019 ecs->event_thread->suspend.stop_pc));
6020 if (target_stopped_by_watchpoint ())
6021 {
6022 CORE_ADDR addr;
6023
6024 infrun_debug_printf ("stopped by watchpoint");
6025
6026 if (target_stopped_data_address (current_inferior ()->top_target (),
6027 &addr))
6028 infrun_debug_printf ("stopped data address=%s",
6029 paddress (reg_gdbarch, addr));
6030 else
6031 infrun_debug_printf ("(no data address available)");
6032 }
6033 }
6034
6035 /* This is originated from start_remote(), start_inferior() and
6036 shared libraries hook functions. */
6037 stop_soon = get_inferior_stop_soon (ecs);
6038 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
6039 {
6040 infrun_debug_printf ("quietly stopped");
6041 stop_print_frame = true;
6042 stop_waiting (ecs);
6043 return;
6044 }
6045
6046 /* This originates from attach_command(). We need to overwrite
6047 the stop_signal here, because some kernels don't ignore a
6048 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6049 See more comments in inferior.h. On the other hand, if we
6050 get a non-SIGSTOP, report it to the user - assume the backend
6051 will handle the SIGSTOP if it should show up later.
6052
6053 Also consider that the attach is complete when we see a
6054 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6055 target extended-remote report it instead of a SIGSTOP
6056 (e.g. gdbserver). We already rely on SIGTRAP being our
6057 signal, so this is no exception.
6058
6059 Also consider that the attach is complete when we see a
6060 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6061 the target to stop all threads of the inferior, in case the
6062 low level attach operation doesn't stop them implicitly. If
6063 they weren't stopped implicitly, then the stub will report a
6064 GDB_SIGNAL_0, meaning: stopped for no particular reason
6065 other than GDB's request. */
6066 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6067 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
6068 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6069 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
6070 {
6071 stop_print_frame = true;
6072 stop_waiting (ecs);
6073 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6074 return;
6075 }
6076
6077 /* At this point, get hold of the now-current thread's frame. */
6078 frame = get_current_frame ();
6079 gdbarch = get_frame_arch (frame);
6080
6081 /* Pull the single step breakpoints out of the target. */
6082 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
6083 {
6084 struct regcache *regcache;
6085 CORE_ADDR pc;
6086
6087 regcache = get_thread_regcache (ecs->event_thread);
6088 const address_space *aspace = regcache->aspace ();
6089
6090 pc = regcache_read_pc (regcache);
6091
6092 /* However, before doing so, if this single-step breakpoint was
6093 actually for another thread, set this thread up for moving
6094 past it. */
6095 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
6096 aspace, pc))
6097 {
6098 if (single_step_breakpoint_inserted_here_p (aspace, pc))
6099 {
6100 infrun_debug_printf ("[%s] hit another thread's single-step "
6101 "breakpoint",
6102 target_pid_to_str (ecs->ptid).c_str ());
6103 ecs->hit_singlestep_breakpoint = 1;
6104 }
6105 }
6106 else
6107 {
6108 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6109 target_pid_to_str (ecs->ptid).c_str ());
6110 }
6111 }
6112 delete_just_stopped_threads_single_step_breakpoints ();
6113
6114 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6115 && ecs->event_thread->control.trap_expected
6116 && ecs->event_thread->stepping_over_watchpoint)
6117 stopped_by_watchpoint = 0;
6118 else
6119 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
6120
6121 /* If necessary, step over this watchpoint. We'll be back to display
6122 it in a moment. */
6123 if (stopped_by_watchpoint
6124 && (target_have_steppable_watchpoint ()
6125 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
6126 {
6127 /* At this point, we are stopped at an instruction which has
6128 attempted to write to a piece of memory under control of
6129 a watchpoint. The instruction hasn't actually executed
6130 yet. If we were to evaluate the watchpoint expression
6131 now, we would get the old value, and therefore no change
6132 would seem to have occurred.
6133
6134 In order to make watchpoints work `right', we really need
6135 to complete the memory write, and then evaluate the
6136 watchpoint expression. We do this by single-stepping the
6137 target.
6138
6139 It may not be necessary to disable the watchpoint to step over
6140 it. For example, the PA can (with some kernel cooperation)
6141 single step over a watchpoint without disabling the watchpoint.
6142
6143 It is far more common to need to disable a watchpoint to step
6144 the inferior over it. If we have non-steppable watchpoints,
6145 we must disable the current watchpoint; it's simplest to
6146 disable all watchpoints.
6147
6148 Any breakpoint at PC must also be stepped over -- if there's
6149 one, it will have already triggered before the watchpoint
6150 triggered, and we either already reported it to the user, or
6151 it didn't cause a stop and we called keep_going. In either
6152 case, if there was a breakpoint at PC, we must be trying to
6153 step past it. */
6154 ecs->event_thread->stepping_over_watchpoint = 1;
6155 keep_going (ecs);
6156 return;
6157 }
6158
6159 ecs->event_thread->stepping_over_breakpoint = 0;
6160 ecs->event_thread->stepping_over_watchpoint = 0;
6161 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
6162 ecs->event_thread->control.stop_step = 0;
6163 stop_print_frame = true;
6164 stopped_by_random_signal = 0;
6165 bpstat stop_chain = NULL;
6166
6167 /* Hide inlined functions starting here, unless we just performed stepi or
6168 nexti. After stepi and nexti, always show the innermost frame (not any
6169 inline function call sites). */
6170 if (ecs->event_thread->control.step_range_end != 1)
6171 {
6172 const address_space *aspace
6173 = get_thread_regcache (ecs->event_thread)->aspace ();
6174
6175 /* skip_inline_frames is expensive, so we avoid it if we can
6176 determine that the address is one where functions cannot have
6177 been inlined. This improves performance with inferiors that
6178 load a lot of shared libraries, because the solib event
6179 breakpoint is defined as the address of a function (i.e. not
6180 inline). Note that we have to check the previous PC as well
6181 as the current one to catch cases when we have just
6182 single-stepped off a breakpoint prior to reinstating it.
6183 Note that we're assuming that the code we single-step to is
6184 not inline, but that's not definitive: there's nothing
6185 preventing the event breakpoint function from containing
6186 inlined code, and the single-step ending up there. If the
6187 user had set a breakpoint on that inlined code, the missing
6188 skip_inline_frames call would break things. Fortunately
6189 that's an extremely unlikely scenario. */
6190 if (!pc_at_non_inline_function (aspace,
6191 ecs->event_thread->suspend.stop_pc,
6192 &ecs->ws)
6193 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6194 && ecs->event_thread->control.trap_expected
6195 && pc_at_non_inline_function (aspace,
6196 ecs->event_thread->prev_pc,
6197 &ecs->ws)))
6198 {
6199 stop_chain = build_bpstat_chain (aspace,
6200 ecs->event_thread->suspend.stop_pc,
6201 &ecs->ws);
6202 skip_inline_frames (ecs->event_thread, stop_chain);
6203
6204 /* Re-fetch current thread's frame in case that invalidated
6205 the frame cache. */
6206 frame = get_current_frame ();
6207 gdbarch = get_frame_arch (frame);
6208 }
6209 }
6210
6211 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6212 && ecs->event_thread->control.trap_expected
6213 && gdbarch_single_step_through_delay_p (gdbarch)
6214 && currently_stepping (ecs->event_thread))
6215 {
6216 /* We're trying to step off a breakpoint. Turns out that we're
6217 also on an instruction that needs to be stepped multiple
6218 times before it's been fully executing. E.g., architectures
6219 with a delay slot. It needs to be stepped twice, once for
6220 the instruction and once for the delay slot. */
6221 int step_through_delay
6222 = gdbarch_single_step_through_delay (gdbarch, frame);
6223
6224 if (step_through_delay)
6225 infrun_debug_printf ("step through delay");
6226
6227 if (ecs->event_thread->control.step_range_end == 0
6228 && step_through_delay)
6229 {
6230 /* The user issued a continue when stopped at a breakpoint.
6231 Set up for another trap and get out of here. */
6232 ecs->event_thread->stepping_over_breakpoint = 1;
6233 keep_going (ecs);
6234 return;
6235 }
6236 else if (step_through_delay)
6237 {
6238 /* The user issued a step when stopped at a breakpoint.
6239 Maybe we should stop, maybe we should not - the delay
6240 slot *might* correspond to a line of source. In any
6241 case, don't decide that here, just set
6242 ecs->stepping_over_breakpoint, making sure we
6243 single-step again before breakpoints are re-inserted. */
6244 ecs->event_thread->stepping_over_breakpoint = 1;
6245 }
6246 }
6247
6248 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6249 handles this event. */
6250 ecs->event_thread->control.stop_bpstat
6251 = bpstat_stop_status (get_current_regcache ()->aspace (),
6252 ecs->event_thread->suspend.stop_pc,
6253 ecs->event_thread, &ecs->ws, stop_chain);
6254
6255 /* Following in case break condition called a
6256 function. */
6257 stop_print_frame = true;
6258
6259 /* This is where we handle "moribund" watchpoints. Unlike
6260 software breakpoints traps, hardware watchpoint traps are
6261 always distinguishable from random traps. If no high-level
6262 watchpoint is associated with the reported stop data address
6263 anymore, then the bpstat does not explain the signal ---
6264 simply make sure to ignore it if `stopped_by_watchpoint' is
6265 set. */
6266
6267 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6268 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6269 GDB_SIGNAL_TRAP)
6270 && stopped_by_watchpoint)
6271 {
6272 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
6273 "ignoring");
6274 }
6275
6276 /* NOTE: cagney/2003-03-29: These checks for a random signal
6277 at one stage in the past included checks for an inferior
6278 function call's call dummy's return breakpoint. The original
6279 comment, that went with the test, read:
6280
6281 ``End of a stack dummy. Some systems (e.g. Sony news) give
6282 another signal besides SIGTRAP, so check here as well as
6283 above.''
6284
6285 If someone ever tries to get call dummys on a
6286 non-executable stack to work (where the target would stop
6287 with something like a SIGSEGV), then those tests might need
6288 to be re-instated. Given, however, that the tests were only
6289 enabled when momentary breakpoints were not being used, I
6290 suspect that it won't be the case.
6291
6292 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6293 be necessary for call dummies on a non-executable stack on
6294 SPARC. */
6295
6296 /* See if the breakpoints module can explain the signal. */
6297 random_signal
6298 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6299 ecs->event_thread->suspend.stop_signal);
6300
6301 /* Maybe this was a trap for a software breakpoint that has since
6302 been removed. */
6303 if (random_signal && target_stopped_by_sw_breakpoint ())
6304 {
6305 if (gdbarch_program_breakpoint_here_p (gdbarch,
6306 ecs->event_thread->suspend.stop_pc))
6307 {
6308 struct regcache *regcache;
6309 int decr_pc;
6310
6311 /* Re-adjust PC to what the program would see if GDB was not
6312 debugging it. */
6313 regcache = get_thread_regcache (ecs->event_thread);
6314 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
6315 if (decr_pc != 0)
6316 {
6317 gdb::optional<scoped_restore_tmpl<int>>
6318 restore_operation_disable;
6319
6320 if (record_full_is_used ())
6321 restore_operation_disable.emplace
6322 (record_full_gdb_operation_disable_set ());
6323
6324 regcache_write_pc (regcache,
6325 ecs->event_thread->suspend.stop_pc + decr_pc);
6326 }
6327 }
6328 else
6329 {
6330 /* A delayed software breakpoint event. Ignore the trap. */
6331 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
6332 random_signal = 0;
6333 }
6334 }
6335
6336 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6337 has since been removed. */
6338 if (random_signal && target_stopped_by_hw_breakpoint ())
6339 {
6340 /* A delayed hardware breakpoint event. Ignore the trap. */
6341 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
6342 "trap, ignoring");
6343 random_signal = 0;
6344 }
6345
6346 /* If not, perhaps stepping/nexting can. */
6347 if (random_signal)
6348 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6349 && currently_stepping (ecs->event_thread));
6350
6351 /* Perhaps the thread hit a single-step breakpoint of _another_
6352 thread. Single-step breakpoints are transparent to the
6353 breakpoints module. */
6354 if (random_signal)
6355 random_signal = !ecs->hit_singlestep_breakpoint;
6356
6357 /* No? Perhaps we got a moribund watchpoint. */
6358 if (random_signal)
6359 random_signal = !stopped_by_watchpoint;
6360
6361 /* Always stop if the user explicitly requested this thread to
6362 remain stopped. */
6363 if (ecs->event_thread->stop_requested)
6364 {
6365 random_signal = 1;
6366 infrun_debug_printf ("user-requested stop");
6367 }
6368
6369 /* For the program's own signals, act according to
6370 the signal handling tables. */
6371
6372 if (random_signal)
6373 {
6374 /* Signal not for debugging purposes. */
6375 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
6376
6377 infrun_debug_printf ("random signal (%s)",
6378 gdb_signal_to_symbol_string (stop_signal));
6379
6380 stopped_by_random_signal = 1;
6381
6382 /* Always stop on signals if we're either just gaining control
6383 of the program, or the user explicitly requested this thread
6384 to remain stopped. */
6385 if (stop_soon != NO_STOP_QUIETLY
6386 || ecs->event_thread->stop_requested
6387 || signal_stop_state (ecs->event_thread->suspend.stop_signal))
6388 {
6389 stop_waiting (ecs);
6390 return;
6391 }
6392
6393 /* Notify observers the signal has "handle print" set. Note we
6394 returned early above if stopping; normal_stop handles the
6395 printing in that case. */
6396 if (signal_print[ecs->event_thread->suspend.stop_signal])
6397 {
6398 /* The signal table tells us to print about this signal. */
6399 target_terminal::ours_for_output ();
6400 gdb::observers::signal_received.notify (ecs->event_thread->suspend.stop_signal);
6401 target_terminal::inferior ();
6402 }
6403
6404 /* Clear the signal if it should not be passed. */
6405 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
6406 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6407
6408 if (ecs->event_thread->prev_pc == ecs->event_thread->suspend.stop_pc
6409 && ecs->event_thread->control.trap_expected
6410 && ecs->event_thread->control.step_resume_breakpoint == NULL)
6411 {
6412 /* We were just starting a new sequence, attempting to
6413 single-step off of a breakpoint and expecting a SIGTRAP.
6414 Instead this signal arrives. This signal will take us out
6415 of the stepping range so GDB needs to remember to, when
6416 the signal handler returns, resume stepping off that
6417 breakpoint. */
6418 /* To simplify things, "continue" is forced to use the same
6419 code paths as single-step - set a breakpoint at the
6420 signal return address and then, once hit, step off that
6421 breakpoint. */
6422 infrun_debug_printf ("signal arrived while stepping over breakpoint");
6423
6424 insert_hp_step_resume_breakpoint_at_frame (frame);
6425 ecs->event_thread->step_after_step_resume_breakpoint = 1;
6426 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6427 ecs->event_thread->control.trap_expected = 0;
6428
6429 /* If we were nexting/stepping some other thread, switch to
6430 it, so that we don't continue it, losing control. */
6431 if (!switch_back_to_stepped_thread (ecs))
6432 keep_going (ecs);
6433 return;
6434 }
6435
6436 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
6437 && (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6438 ecs->event_thread)
6439 || ecs->event_thread->control.step_range_end == 1)
6440 && frame_id_eq (get_stack_frame_id (frame),
6441 ecs->event_thread->control.step_stack_frame_id)
6442 && ecs->event_thread->control.step_resume_breakpoint == NULL)
6443 {
6444 /* The inferior is about to take a signal that will take it
6445 out of the single step range. Set a breakpoint at the
6446 current PC (which is presumably where the signal handler
6447 will eventually return) and then allow the inferior to
6448 run free.
6449
6450 Note that this is only needed for a signal delivered
6451 while in the single-step range. Nested signals aren't a
6452 problem as they eventually all return. */
6453 infrun_debug_printf ("signal may take us out of single-step range");
6454
6455 clear_step_over_info ();
6456 insert_hp_step_resume_breakpoint_at_frame (frame);
6457 ecs->event_thread->step_after_step_resume_breakpoint = 1;
6458 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6459 ecs->event_thread->control.trap_expected = 0;
6460 keep_going (ecs);
6461 return;
6462 }
6463
6464 /* Note: step_resume_breakpoint may be non-NULL. This occurs
6465 when either there's a nested signal, or when there's a
6466 pending signal enabled just as the signal handler returns
6467 (leaving the inferior at the step-resume-breakpoint without
6468 actually executing it). Either way continue until the
6469 breakpoint is really hit. */
6470
6471 if (!switch_back_to_stepped_thread (ecs))
6472 {
6473 infrun_debug_printf ("random signal, keep going");
6474
6475 keep_going (ecs);
6476 }
6477 return;
6478 }
6479
6480 process_event_stop_test (ecs);
6481 }
6482
6483 /* Come here when we've got some debug event / signal we can explain
6484 (IOW, not a random signal), and test whether it should cause a
6485 stop, or whether we should resume the inferior (transparently).
6486 E.g., could be a breakpoint whose condition evaluates false; we
6487 could be still stepping within the line; etc. */
6488
6489 static void
6490 process_event_stop_test (struct execution_control_state *ecs)
6491 {
6492 struct symtab_and_line stop_pc_sal;
6493 struct frame_info *frame;
6494 struct gdbarch *gdbarch;
6495 CORE_ADDR jmp_buf_pc;
6496 struct bpstat_what what;
6497
6498 /* Handle cases caused by hitting a breakpoint. */
6499
6500 frame = get_current_frame ();
6501 gdbarch = get_frame_arch (frame);
6502
6503 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
6504
6505 if (what.call_dummy)
6506 {
6507 stop_stack_dummy = what.call_dummy;
6508 }
6509
6510 /* A few breakpoint types have callbacks associated (e.g.,
6511 bp_jit_event). Run them now. */
6512 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
6513
6514 /* If we hit an internal event that triggers symbol changes, the
6515 current frame will be invalidated within bpstat_what (e.g., if we
6516 hit an internal solib event). Re-fetch it. */
6517 frame = get_current_frame ();
6518 gdbarch = get_frame_arch (frame);
6519
6520 switch (what.main_action)
6521 {
6522 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
6523 /* If we hit the breakpoint at longjmp while stepping, we
6524 install a momentary breakpoint at the target of the
6525 jmp_buf. */
6526
6527 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
6528
6529 ecs->event_thread->stepping_over_breakpoint = 1;
6530
6531 if (what.is_longjmp)
6532 {
6533 struct value *arg_value;
6534
6535 /* If we set the longjmp breakpoint via a SystemTap probe,
6536 then use it to extract the arguments. The destination PC
6537 is the third argument to the probe. */
6538 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6539 if (arg_value)
6540 {
6541 jmp_buf_pc = value_as_address (arg_value);
6542 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6543 }
6544 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6545 || !gdbarch_get_longjmp_target (gdbarch,
6546 frame, &jmp_buf_pc))
6547 {
6548 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
6549 "(!gdbarch_get_longjmp_target)");
6550 keep_going (ecs);
6551 return;
6552 }
6553
6554 /* Insert a breakpoint at resume address. */
6555 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6556 }
6557 else
6558 check_exception_resume (ecs, frame);
6559 keep_going (ecs);
6560 return;
6561
6562 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6563 {
6564 struct frame_info *init_frame;
6565
6566 /* There are several cases to consider.
6567
6568 1. The initiating frame no longer exists. In this case we
6569 must stop, because the exception or longjmp has gone too
6570 far.
6571
6572 2. The initiating frame exists, and is the same as the
6573 current frame. We stop, because the exception or longjmp
6574 has been caught.
6575
6576 3. The initiating frame exists and is different from the
6577 current frame. This means the exception or longjmp has
6578 been caught beneath the initiating frame, so keep going.
6579
6580 4. longjmp breakpoint has been placed just to protect
6581 against stale dummy frames and user is not interested in
6582 stopping around longjmps. */
6583
6584 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
6585
6586 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6587 != NULL);
6588 delete_exception_resume_breakpoint (ecs->event_thread);
6589
6590 if (what.is_longjmp)
6591 {
6592 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
6593
6594 if (!frame_id_p (ecs->event_thread->initiating_frame))
6595 {
6596 /* Case 4. */
6597 keep_going (ecs);
6598 return;
6599 }
6600 }
6601
6602 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
6603
6604 if (init_frame)
6605 {
6606 struct frame_id current_id
6607 = get_frame_id (get_current_frame ());
6608 if (frame_id_eq (current_id,
6609 ecs->event_thread->initiating_frame))
6610 {
6611 /* Case 2. Fall through. */
6612 }
6613 else
6614 {
6615 /* Case 3. */
6616 keep_going (ecs);
6617 return;
6618 }
6619 }
6620
6621 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6622 exists. */
6623 delete_step_resume_breakpoint (ecs->event_thread);
6624
6625 end_stepping_range (ecs);
6626 }
6627 return;
6628
6629 case BPSTAT_WHAT_SINGLE:
6630 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
6631 ecs->event_thread->stepping_over_breakpoint = 1;
6632 /* Still need to check other stuff, at least the case where we
6633 are stepping and step out of the right range. */
6634 break;
6635
6636 case BPSTAT_WHAT_STEP_RESUME:
6637 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
6638
6639 delete_step_resume_breakpoint (ecs->event_thread);
6640 if (ecs->event_thread->control.proceed_to_finish
6641 && execution_direction == EXEC_REVERSE)
6642 {
6643 struct thread_info *tp = ecs->event_thread;
6644
6645 /* We are finishing a function in reverse, and just hit the
6646 step-resume breakpoint at the start address of the
6647 function, and we're almost there -- just need to back up
6648 by one more single-step, which should take us back to the
6649 function call. */
6650 tp->control.step_range_start = tp->control.step_range_end = 1;
6651 keep_going (ecs);
6652 return;
6653 }
6654 fill_in_stop_func (gdbarch, ecs);
6655 if (ecs->event_thread->suspend.stop_pc == ecs->stop_func_start
6656 && execution_direction == EXEC_REVERSE)
6657 {
6658 /* We are stepping over a function call in reverse, and just
6659 hit the step-resume breakpoint at the start address of
6660 the function. Go back to single-stepping, which should
6661 take us back to the function call. */
6662 ecs->event_thread->stepping_over_breakpoint = 1;
6663 keep_going (ecs);
6664 return;
6665 }
6666 break;
6667
6668 case BPSTAT_WHAT_STOP_NOISY:
6669 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
6670 stop_print_frame = true;
6671
6672 /* Assume the thread stopped for a breakpoint. We'll still check
6673 whether a/the breakpoint is there when the thread is next
6674 resumed. */
6675 ecs->event_thread->stepping_over_breakpoint = 1;
6676
6677 stop_waiting (ecs);
6678 return;
6679
6680 case BPSTAT_WHAT_STOP_SILENT:
6681 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
6682 stop_print_frame = false;
6683
6684 /* Assume the thread stopped for a breakpoint. We'll still check
6685 whether a/the breakpoint is there when the thread is next
6686 resumed. */
6687 ecs->event_thread->stepping_over_breakpoint = 1;
6688 stop_waiting (ecs);
6689 return;
6690
6691 case BPSTAT_WHAT_HP_STEP_RESUME:
6692 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
6693
6694 delete_step_resume_breakpoint (ecs->event_thread);
6695 if (ecs->event_thread->step_after_step_resume_breakpoint)
6696 {
6697 /* Back when the step-resume breakpoint was inserted, we
6698 were trying to single-step off a breakpoint. Go back to
6699 doing that. */
6700 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6701 ecs->event_thread->stepping_over_breakpoint = 1;
6702 keep_going (ecs);
6703 return;
6704 }
6705 break;
6706
6707 case BPSTAT_WHAT_KEEP_CHECKING:
6708 break;
6709 }
6710
6711 /* If we stepped a permanent breakpoint and we had a high priority
6712 step-resume breakpoint for the address we stepped, but we didn't
6713 hit it, then we must have stepped into the signal handler. The
6714 step-resume was only necessary to catch the case of _not_
6715 stepping into the handler, so delete it, and fall through to
6716 checking whether the step finished. */
6717 if (ecs->event_thread->stepped_breakpoint)
6718 {
6719 struct breakpoint *sr_bp
6720 = ecs->event_thread->control.step_resume_breakpoint;
6721
6722 if (sr_bp != NULL
6723 && sr_bp->loc->permanent
6724 && sr_bp->type == bp_hp_step_resume
6725 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6726 {
6727 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
6728 delete_step_resume_breakpoint (ecs->event_thread);
6729 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6730 }
6731 }
6732
6733 /* We come here if we hit a breakpoint but should not stop for it.
6734 Possibly we also were stepping and should stop for that. So fall
6735 through and test for stepping. But, if not stepping, do not
6736 stop. */
6737
6738 /* In all-stop mode, if we're currently stepping but have stopped in
6739 some other thread, we need to switch back to the stepped thread. */
6740 if (switch_back_to_stepped_thread (ecs))
6741 return;
6742
6743 if (ecs->event_thread->control.step_resume_breakpoint)
6744 {
6745 infrun_debug_printf ("step-resume breakpoint is inserted");
6746
6747 /* Having a step-resume breakpoint overrides anything
6748 else having to do with stepping commands until
6749 that breakpoint is reached. */
6750 keep_going (ecs);
6751 return;
6752 }
6753
6754 if (ecs->event_thread->control.step_range_end == 0)
6755 {
6756 infrun_debug_printf ("no stepping, continue");
6757 /* Likewise if we aren't even stepping. */
6758 keep_going (ecs);
6759 return;
6760 }
6761
6762 /* Re-fetch current thread's frame in case the code above caused
6763 the frame cache to be re-initialized, making our FRAME variable
6764 a dangling pointer. */
6765 frame = get_current_frame ();
6766 gdbarch = get_frame_arch (frame);
6767 fill_in_stop_func (gdbarch, ecs);
6768
6769 /* If stepping through a line, keep going if still within it.
6770
6771 Note that step_range_end is the address of the first instruction
6772 beyond the step range, and NOT the address of the last instruction
6773 within it!
6774
6775 Note also that during reverse execution, we may be stepping
6776 through a function epilogue and therefore must detect when
6777 the current-frame changes in the middle of a line. */
6778
6779 if (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6780 ecs->event_thread)
6781 && (execution_direction != EXEC_REVERSE
6782 || frame_id_eq (get_frame_id (frame),
6783 ecs->event_thread->control.step_frame_id)))
6784 {
6785 infrun_debug_printf
6786 ("stepping inside range [%s-%s]",
6787 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6788 paddress (gdbarch, ecs->event_thread->control.step_range_end));
6789
6790 /* Tentatively re-enable range stepping; `resume' disables it if
6791 necessary (e.g., if we're stepping over a breakpoint or we
6792 have software watchpoints). */
6793 ecs->event_thread->control.may_range_step = 1;
6794
6795 /* When stepping backward, stop at beginning of line range
6796 (unless it's the function entry point, in which case
6797 keep going back to the call point). */
6798 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6799 if (stop_pc == ecs->event_thread->control.step_range_start
6800 && stop_pc != ecs->stop_func_start
6801 && execution_direction == EXEC_REVERSE)
6802 end_stepping_range (ecs);
6803 else
6804 keep_going (ecs);
6805
6806 return;
6807 }
6808
6809 /* We stepped out of the stepping range. */
6810
6811 /* If we are stepping at the source level and entered the runtime
6812 loader dynamic symbol resolution code...
6813
6814 EXEC_FORWARD: we keep on single stepping until we exit the run
6815 time loader code and reach the callee's address.
6816
6817 EXEC_REVERSE: we've already executed the callee (backward), and
6818 the runtime loader code is handled just like any other
6819 undebuggable function call. Now we need only keep stepping
6820 backward through the trampoline code, and that's handled further
6821 down, so there is nothing for us to do here. */
6822
6823 if (execution_direction != EXEC_REVERSE
6824 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6825 && in_solib_dynsym_resolve_code (ecs->event_thread->suspend.stop_pc))
6826 {
6827 CORE_ADDR pc_after_resolver =
6828 gdbarch_skip_solib_resolver (gdbarch,
6829 ecs->event_thread->suspend.stop_pc);
6830
6831 infrun_debug_printf ("stepped into dynsym resolve code");
6832
6833 if (pc_after_resolver)
6834 {
6835 /* Set up a step-resume breakpoint at the address
6836 indicated by SKIP_SOLIB_RESOLVER. */
6837 symtab_and_line sr_sal;
6838 sr_sal.pc = pc_after_resolver;
6839 sr_sal.pspace = get_frame_program_space (frame);
6840
6841 insert_step_resume_breakpoint_at_sal (gdbarch,
6842 sr_sal, null_frame_id);
6843 }
6844
6845 keep_going (ecs);
6846 return;
6847 }
6848
6849 /* Step through an indirect branch thunk. */
6850 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
6851 && gdbarch_in_indirect_branch_thunk (gdbarch,
6852 ecs->event_thread->suspend.stop_pc))
6853 {
6854 infrun_debug_printf ("stepped into indirect branch thunk");
6855 keep_going (ecs);
6856 return;
6857 }
6858
6859 if (ecs->event_thread->control.step_range_end != 1
6860 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6861 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
6862 && get_frame_type (frame) == SIGTRAMP_FRAME)
6863 {
6864 infrun_debug_printf ("stepped into signal trampoline");
6865 /* The inferior, while doing a "step" or "next", has ended up in
6866 a signal trampoline (either by a signal being delivered or by
6867 the signal handler returning). Just single-step until the
6868 inferior leaves the trampoline (either by calling the handler
6869 or returning). */
6870 keep_going (ecs);
6871 return;
6872 }
6873
6874 /* If we're in the return path from a shared library trampoline,
6875 we want to proceed through the trampoline when stepping. */
6876 /* macro/2012-04-25: This needs to come before the subroutine
6877 call check below as on some targets return trampolines look
6878 like subroutine calls (MIPS16 return thunks). */
6879 if (gdbarch_in_solib_return_trampoline (gdbarch,
6880 ecs->event_thread->suspend.stop_pc,
6881 ecs->stop_func_name)
6882 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
6883 {
6884 /* Determine where this trampoline returns. */
6885 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6886 CORE_ADDR real_stop_pc
6887 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
6888
6889 infrun_debug_printf ("stepped into solib return tramp");
6890
6891 /* Only proceed through if we know where it's going. */
6892 if (real_stop_pc)
6893 {
6894 /* And put the step-breakpoint there and go until there. */
6895 symtab_and_line sr_sal;
6896 sr_sal.pc = real_stop_pc;
6897 sr_sal.section = find_pc_overlay (sr_sal.pc);
6898 sr_sal.pspace = get_frame_program_space (frame);
6899
6900 /* Do not specify what the fp should be when we stop since
6901 on some machines the prologue is where the new fp value
6902 is established. */
6903 insert_step_resume_breakpoint_at_sal (gdbarch,
6904 sr_sal, null_frame_id);
6905
6906 /* Restart without fiddling with the step ranges or
6907 other state. */
6908 keep_going (ecs);
6909 return;
6910 }
6911 }
6912
6913 /* Check for subroutine calls. The check for the current frame
6914 equalling the step ID is not necessary - the check of the
6915 previous frame's ID is sufficient - but it is a common case and
6916 cheaper than checking the previous frame's ID.
6917
6918 NOTE: frame_id_eq will never report two invalid frame IDs as
6919 being equal, so to get into this block, both the current and
6920 previous frame must have valid frame IDs. */
6921 /* The outer_frame_id check is a heuristic to detect stepping
6922 through startup code. If we step over an instruction which
6923 sets the stack pointer from an invalid value to a valid value,
6924 we may detect that as a subroutine call from the mythical
6925 "outermost" function. This could be fixed by marking
6926 outermost frames as !stack_p,code_p,special_p. Then the
6927 initial outermost frame, before sp was valid, would
6928 have code_addr == &_start. See the comment in frame_id_eq
6929 for more. */
6930 if (!frame_id_eq (get_stack_frame_id (frame),
6931 ecs->event_thread->control.step_stack_frame_id)
6932 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
6933 ecs->event_thread->control.step_stack_frame_id)
6934 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
6935 outer_frame_id)
6936 || (ecs->event_thread->control.step_start_function
6937 != find_pc_function (ecs->event_thread->suspend.stop_pc)))))
6938 {
6939 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6940 CORE_ADDR real_stop_pc;
6941
6942 infrun_debug_printf ("stepped into subroutine");
6943
6944 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
6945 {
6946 /* I presume that step_over_calls is only 0 when we're
6947 supposed to be stepping at the assembly language level
6948 ("stepi"). Just stop. */
6949 /* And this works the same backward as frontward. MVS */
6950 end_stepping_range (ecs);
6951 return;
6952 }
6953
6954 /* Reverse stepping through solib trampolines. */
6955
6956 if (execution_direction == EXEC_REVERSE
6957 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
6958 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6959 || (ecs->stop_func_start == 0
6960 && in_solib_dynsym_resolve_code (stop_pc))))
6961 {
6962 /* Any solib trampoline code can be handled in reverse
6963 by simply continuing to single-step. We have already
6964 executed the solib function (backwards), and a few
6965 steps will take us back through the trampoline to the
6966 caller. */
6967 keep_going (ecs);
6968 return;
6969 }
6970
6971 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
6972 {
6973 /* We're doing a "next".
6974
6975 Normal (forward) execution: set a breakpoint at the
6976 callee's return address (the address at which the caller
6977 will resume).
6978
6979 Reverse (backward) execution. set the step-resume
6980 breakpoint at the start of the function that we just
6981 stepped into (backwards), and continue to there. When we
6982 get there, we'll need to single-step back to the caller. */
6983
6984 if (execution_direction == EXEC_REVERSE)
6985 {
6986 /* If we're already at the start of the function, we've either
6987 just stepped backward into a single instruction function,
6988 or stepped back out of a signal handler to the first instruction
6989 of the function. Just keep going, which will single-step back
6990 to the caller. */
6991 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
6992 {
6993 /* Normal function call return (static or dynamic). */
6994 symtab_and_line sr_sal;
6995 sr_sal.pc = ecs->stop_func_start;
6996 sr_sal.pspace = get_frame_program_space (frame);
6997 insert_step_resume_breakpoint_at_sal (gdbarch,
6998 sr_sal, null_frame_id);
6999 }
7000 }
7001 else
7002 insert_step_resume_breakpoint_at_caller (frame);
7003
7004 keep_going (ecs);
7005 return;
7006 }
7007
7008 /* If we are in a function call trampoline (a stub between the
7009 calling routine and the real function), locate the real
7010 function. That's what tells us (a) whether we want to step
7011 into it at all, and (b) what prologue we want to run to the
7012 end of, if we do step into it. */
7013 real_stop_pc = skip_language_trampoline (frame, stop_pc);
7014 if (real_stop_pc == 0)
7015 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7016 if (real_stop_pc != 0)
7017 ecs->stop_func_start = real_stop_pc;
7018
7019 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
7020 {
7021 symtab_and_line sr_sal;
7022 sr_sal.pc = ecs->stop_func_start;
7023 sr_sal.pspace = get_frame_program_space (frame);
7024
7025 insert_step_resume_breakpoint_at_sal (gdbarch,
7026 sr_sal, null_frame_id);
7027 keep_going (ecs);
7028 return;
7029 }
7030
7031 /* If we have line number information for the function we are
7032 thinking of stepping into and the function isn't on the skip
7033 list, step into it.
7034
7035 If there are several symtabs at that PC (e.g. with include
7036 files), just want to know whether *any* of them have line
7037 numbers. find_pc_line handles this. */
7038 {
7039 struct symtab_and_line tmp_sal;
7040
7041 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
7042 if (tmp_sal.line != 0
7043 && !function_name_is_marked_for_skip (ecs->stop_func_name,
7044 tmp_sal)
7045 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
7046 {
7047 if (execution_direction == EXEC_REVERSE)
7048 handle_step_into_function_backward (gdbarch, ecs);
7049 else
7050 handle_step_into_function (gdbarch, ecs);
7051 return;
7052 }
7053 }
7054
7055 /* If we have no line number and the step-stop-if-no-debug is
7056 set, we stop the step so that the user has a chance to switch
7057 in assembly mode. */
7058 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7059 && step_stop_if_no_debug)
7060 {
7061 end_stepping_range (ecs);
7062 return;
7063 }
7064
7065 if (execution_direction == EXEC_REVERSE)
7066 {
7067 /* If we're already at the start of the function, we've either just
7068 stepped backward into a single instruction function without line
7069 number info, or stepped back out of a signal handler to the first
7070 instruction of the function without line number info. Just keep
7071 going, which will single-step back to the caller. */
7072 if (ecs->stop_func_start != stop_pc)
7073 {
7074 /* Set a breakpoint at callee's start address.
7075 From there we can step once and be back in the caller. */
7076 symtab_and_line sr_sal;
7077 sr_sal.pc = ecs->stop_func_start;
7078 sr_sal.pspace = get_frame_program_space (frame);
7079 insert_step_resume_breakpoint_at_sal (gdbarch,
7080 sr_sal, null_frame_id);
7081 }
7082 }
7083 else
7084 /* Set a breakpoint at callee's return address (the address
7085 at which the caller will resume). */
7086 insert_step_resume_breakpoint_at_caller (frame);
7087
7088 keep_going (ecs);
7089 return;
7090 }
7091
7092 /* Reverse stepping through solib trampolines. */
7093
7094 if (execution_direction == EXEC_REVERSE
7095 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7096 {
7097 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
7098
7099 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7100 || (ecs->stop_func_start == 0
7101 && in_solib_dynsym_resolve_code (stop_pc)))
7102 {
7103 /* Any solib trampoline code can be handled in reverse
7104 by simply continuing to single-step. We have already
7105 executed the solib function (backwards), and a few
7106 steps will take us back through the trampoline to the
7107 caller. */
7108 keep_going (ecs);
7109 return;
7110 }
7111 else if (in_solib_dynsym_resolve_code (stop_pc))
7112 {
7113 /* Stepped backward into the solib dynsym resolver.
7114 Set a breakpoint at its start and continue, then
7115 one more step will take us out. */
7116 symtab_and_line sr_sal;
7117 sr_sal.pc = ecs->stop_func_start;
7118 sr_sal.pspace = get_frame_program_space (frame);
7119 insert_step_resume_breakpoint_at_sal (gdbarch,
7120 sr_sal, null_frame_id);
7121 keep_going (ecs);
7122 return;
7123 }
7124 }
7125
7126 /* This always returns the sal for the inner-most frame when we are in a
7127 stack of inlined frames, even if GDB actually believes that it is in a
7128 more outer frame. This is checked for below by calls to
7129 inline_skipped_frames. */
7130 stop_pc_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
7131
7132 /* NOTE: tausq/2004-05-24: This if block used to be done before all
7133 the trampoline processing logic, however, there are some trampolines
7134 that have no names, so we should do trampoline handling first. */
7135 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7136 && ecs->stop_func_name == NULL
7137 && stop_pc_sal.line == 0)
7138 {
7139 infrun_debug_printf ("stepped into undebuggable function");
7140
7141 /* The inferior just stepped into, or returned to, an
7142 undebuggable function (where there is no debugging information
7143 and no line number corresponding to the address where the
7144 inferior stopped). Since we want to skip this kind of code,
7145 we keep going until the inferior returns from this
7146 function - unless the user has asked us not to (via
7147 set step-mode) or we no longer know how to get back
7148 to the call site. */
7149 if (step_stop_if_no_debug
7150 || !frame_id_p (frame_unwind_caller_id (frame)))
7151 {
7152 /* If we have no line number and the step-stop-if-no-debug
7153 is set, we stop the step so that the user has a chance to
7154 switch in assembly mode. */
7155 end_stepping_range (ecs);
7156 return;
7157 }
7158 else
7159 {
7160 /* Set a breakpoint at callee's return address (the address
7161 at which the caller will resume). */
7162 insert_step_resume_breakpoint_at_caller (frame);
7163 keep_going (ecs);
7164 return;
7165 }
7166 }
7167
7168 if (ecs->event_thread->control.step_range_end == 1)
7169 {
7170 /* It is stepi or nexti. We always want to stop stepping after
7171 one instruction. */
7172 infrun_debug_printf ("stepi/nexti");
7173 end_stepping_range (ecs);
7174 return;
7175 }
7176
7177 if (stop_pc_sal.line == 0)
7178 {
7179 /* We have no line number information. That means to stop
7180 stepping (does this always happen right after one instruction,
7181 when we do "s" in a function with no line numbers,
7182 or can this happen as a result of a return or longjmp?). */
7183 infrun_debug_printf ("line number info");
7184 end_stepping_range (ecs);
7185 return;
7186 }
7187
7188 /* Look for "calls" to inlined functions, part one. If the inline
7189 frame machinery detected some skipped call sites, we have entered
7190 a new inline function. */
7191
7192 if (frame_id_eq (get_frame_id (get_current_frame ()),
7193 ecs->event_thread->control.step_frame_id)
7194 && inline_skipped_frames (ecs->event_thread))
7195 {
7196 infrun_debug_printf ("stepped into inlined function");
7197
7198 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
7199
7200 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
7201 {
7202 /* For "step", we're going to stop. But if the call site
7203 for this inlined function is on the same source line as
7204 we were previously stepping, go down into the function
7205 first. Otherwise stop at the call site. */
7206
7207 if (call_sal.line == ecs->event_thread->current_line
7208 && call_sal.symtab == ecs->event_thread->current_symtab)
7209 {
7210 step_into_inline_frame (ecs->event_thread);
7211 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
7212 {
7213 keep_going (ecs);
7214 return;
7215 }
7216 }
7217
7218 end_stepping_range (ecs);
7219 return;
7220 }
7221 else
7222 {
7223 /* For "next", we should stop at the call site if it is on a
7224 different source line. Otherwise continue through the
7225 inlined function. */
7226 if (call_sal.line == ecs->event_thread->current_line
7227 && call_sal.symtab == ecs->event_thread->current_symtab)
7228 keep_going (ecs);
7229 else
7230 end_stepping_range (ecs);
7231 return;
7232 }
7233 }
7234
7235 /* Look for "calls" to inlined functions, part two. If we are still
7236 in the same real function we were stepping through, but we have
7237 to go further up to find the exact frame ID, we are stepping
7238 through a more inlined call beyond its call site. */
7239
7240 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
7241 && !frame_id_eq (get_frame_id (get_current_frame ()),
7242 ecs->event_thread->control.step_frame_id)
7243 && stepped_in_from (get_current_frame (),
7244 ecs->event_thread->control.step_frame_id))
7245 {
7246 infrun_debug_printf ("stepping through inlined function");
7247
7248 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
7249 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
7250 keep_going (ecs);
7251 else
7252 end_stepping_range (ecs);
7253 return;
7254 }
7255
7256 bool refresh_step_info = true;
7257 if ((ecs->event_thread->suspend.stop_pc == stop_pc_sal.pc)
7258 && (ecs->event_thread->current_line != stop_pc_sal.line
7259 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
7260 {
7261 /* We are at a different line. */
7262
7263 if (stop_pc_sal.is_stmt)
7264 {
7265 /* We are at the start of a statement.
7266
7267 So stop. Note that we don't stop if we step into the middle of a
7268 statement. That is said to make things like for (;;) statements
7269 work better. */
7270 infrun_debug_printf ("stepped to a different line");
7271 end_stepping_range (ecs);
7272 return;
7273 }
7274 else if (frame_id_eq (get_frame_id (get_current_frame ()),
7275 ecs->event_thread->control.step_frame_id))
7276 {
7277 /* We are not at the start of a statement, and we have not changed
7278 frame.
7279
7280 We ignore this line table entry, and continue stepping forward,
7281 looking for a better place to stop. */
7282 refresh_step_info = false;
7283 infrun_debug_printf ("stepped to a different line, but "
7284 "it's not the start of a statement");
7285 }
7286 else
7287 {
7288 /* We are not the start of a statement, and we have changed frame.
7289
7290 We ignore this line table entry, and continue stepping forward,
7291 looking for a better place to stop. Keep refresh_step_info at
7292 true to note that the frame has changed, but ignore the line
7293 number to make sure we don't ignore a subsequent entry with the
7294 same line number. */
7295 stop_pc_sal.line = 0;
7296 infrun_debug_printf ("stepped to a different frame, but "
7297 "it's not the start of a statement");
7298 }
7299 }
7300
7301 /* We aren't done stepping.
7302
7303 Optimize by setting the stepping range to the line.
7304 (We might not be in the original line, but if we entered a
7305 new line in mid-statement, we continue stepping. This makes
7306 things like for(;;) statements work better.)
7307
7308 If we entered a SAL that indicates a non-statement line table entry,
7309 then we update the stepping range, but we don't update the step info,
7310 which includes things like the line number we are stepping away from.
7311 This means we will stop when we find a line table entry that is marked
7312 as is-statement, even if it matches the non-statement one we just
7313 stepped into. */
7314
7315 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
7316 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
7317 ecs->event_thread->control.may_range_step = 1;
7318 if (refresh_step_info)
7319 set_step_info (ecs->event_thread, frame, stop_pc_sal);
7320
7321 infrun_debug_printf ("keep going");
7322 keep_going (ecs);
7323 }
7324
7325 static bool restart_stepped_thread (process_stratum_target *resume_target,
7326 ptid_t resume_ptid);
7327
7328 /* In all-stop mode, if we're currently stepping but have stopped in
7329 some other thread, we may need to switch back to the stepped
7330 thread. Returns true we set the inferior running, false if we left
7331 it stopped (and the event needs further processing). */
7332
7333 static bool
7334 switch_back_to_stepped_thread (struct execution_control_state *ecs)
7335 {
7336 if (!target_is_non_stop_p ())
7337 {
7338 /* If any thread is blocked on some internal breakpoint, and we
7339 simply need to step over that breakpoint to get it going
7340 again, do that first. */
7341
7342 /* However, if we see an event for the stepping thread, then we
7343 know all other threads have been moved past their breakpoints
7344 already. Let the caller check whether the step is finished,
7345 etc., before deciding to move it past a breakpoint. */
7346 if (ecs->event_thread->control.step_range_end != 0)
7347 return false;
7348
7349 /* Check if the current thread is blocked on an incomplete
7350 step-over, interrupted by a random signal. */
7351 if (ecs->event_thread->control.trap_expected
7352 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
7353 {
7354 infrun_debug_printf
7355 ("need to finish step-over of [%s]",
7356 target_pid_to_str (ecs->event_thread->ptid).c_str ());
7357 keep_going (ecs);
7358 return true;
7359 }
7360
7361 /* Check if the current thread is blocked by a single-step
7362 breakpoint of another thread. */
7363 if (ecs->hit_singlestep_breakpoint)
7364 {
7365 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
7366 target_pid_to_str (ecs->ptid).c_str ());
7367 keep_going (ecs);
7368 return true;
7369 }
7370
7371 /* If this thread needs yet another step-over (e.g., stepping
7372 through a delay slot), do it first before moving on to
7373 another thread. */
7374 if (thread_still_needs_step_over (ecs->event_thread))
7375 {
7376 infrun_debug_printf
7377 ("thread [%s] still needs step-over",
7378 target_pid_to_str (ecs->event_thread->ptid).c_str ());
7379 keep_going (ecs);
7380 return true;
7381 }
7382
7383 /* If scheduler locking applies even if not stepping, there's no
7384 need to walk over threads. Above we've checked whether the
7385 current thread is stepping. If some other thread not the
7386 event thread is stepping, then it must be that scheduler
7387 locking is not in effect. */
7388 if (schedlock_applies (ecs->event_thread))
7389 return false;
7390
7391 /* Otherwise, we no longer expect a trap in the current thread.
7392 Clear the trap_expected flag before switching back -- this is
7393 what keep_going does as well, if we call it. */
7394 ecs->event_thread->control.trap_expected = 0;
7395
7396 /* Likewise, clear the signal if it should not be passed. */
7397 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7398 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7399
7400 if (restart_stepped_thread (ecs->target, ecs->ptid))
7401 {
7402 prepare_to_wait (ecs);
7403 return true;
7404 }
7405
7406 switch_to_thread (ecs->event_thread);
7407 }
7408
7409 return false;
7410 }
7411
7412 /* Look for the thread that was stepping, and resume it.
7413 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
7414 is resuming. Return true if a thread was started, false
7415 otherwise. */
7416
7417 static bool
7418 restart_stepped_thread (process_stratum_target *resume_target,
7419 ptid_t resume_ptid)
7420 {
7421 /* Do all pending step-overs before actually proceeding with
7422 step/next/etc. */
7423 if (start_step_over ())
7424 return true;
7425
7426 for (thread_info *tp : all_threads_safe ())
7427 {
7428 if (tp->state == THREAD_EXITED)
7429 continue;
7430
7431 if (tp->suspend.waitstatus_pending_p)
7432 continue;
7433
7434 /* Ignore threads of processes the caller is not
7435 resuming. */
7436 if (!sched_multi
7437 && (tp->inf->process_target () != resume_target
7438 || tp->inf->pid != resume_ptid.pid ()))
7439 continue;
7440
7441 if (tp->control.trap_expected)
7442 {
7443 infrun_debug_printf ("switching back to stepped thread (step-over)");
7444
7445 if (keep_going_stepped_thread (tp))
7446 return true;
7447 }
7448 }
7449
7450 for (thread_info *tp : all_threads_safe ())
7451 {
7452 if (tp->state == THREAD_EXITED)
7453 continue;
7454
7455 if (tp->suspend.waitstatus_pending_p)
7456 continue;
7457
7458 /* Ignore threads of processes the caller is not
7459 resuming. */
7460 if (!sched_multi
7461 && (tp->inf->process_target () != resume_target
7462 || tp->inf->pid != resume_ptid.pid ()))
7463 continue;
7464
7465 /* Did we find the stepping thread? */
7466 if (tp->control.step_range_end)
7467 {
7468 infrun_debug_printf ("switching back to stepped thread (stepping)");
7469
7470 if (keep_going_stepped_thread (tp))
7471 return true;
7472 }
7473 }
7474
7475 return false;
7476 }
7477
7478 /* See infrun.h. */
7479
7480 void
7481 restart_after_all_stop_detach (process_stratum_target *proc_target)
7482 {
7483 /* Note we don't check target_is_non_stop_p() here, because the
7484 current inferior may no longer have a process_stratum target
7485 pushed, as we just detached. */
7486
7487 /* See if we have a THREAD_RUNNING thread that need to be
7488 re-resumed. If we have any thread that is already executing,
7489 then we don't need to resume the target -- it is already been
7490 resumed. With the remote target (in all-stop), it's even
7491 impossible to issue another resumption if the target is already
7492 resumed, until the target reports a stop. */
7493 for (thread_info *thr : all_threads (proc_target))
7494 {
7495 if (thr->state != THREAD_RUNNING)
7496 continue;
7497
7498 /* If we have any thread that is already executing, then we
7499 don't need to resume the target -- it is already been
7500 resumed. */
7501 if (thr->executing)
7502 return;
7503
7504 /* If we have a pending event to process, skip resuming the
7505 target and go straight to processing it. */
7506 if (thr->resumed && thr->suspend.waitstatus_pending_p)
7507 return;
7508 }
7509
7510 /* Alright, we need to re-resume the target. If a thread was
7511 stepping, we need to restart it stepping. */
7512 if (restart_stepped_thread (proc_target, minus_one_ptid))
7513 return;
7514
7515 /* Otherwise, find the first THREAD_RUNNING thread and resume
7516 it. */
7517 for (thread_info *thr : all_threads (proc_target))
7518 {
7519 if (thr->state != THREAD_RUNNING)
7520 continue;
7521
7522 execution_control_state ecs;
7523 reset_ecs (&ecs, thr);
7524 switch_to_thread (thr);
7525 keep_going (&ecs);
7526 return;
7527 }
7528 }
7529
7530 /* Set a previously stepped thread back to stepping. Returns true on
7531 success, false if the resume is not possible (e.g., the thread
7532 vanished). */
7533
7534 static bool
7535 keep_going_stepped_thread (struct thread_info *tp)
7536 {
7537 struct frame_info *frame;
7538 struct execution_control_state ecss;
7539 struct execution_control_state *ecs = &ecss;
7540
7541 /* If the stepping thread exited, then don't try to switch back and
7542 resume it, which could fail in several different ways depending
7543 on the target. Instead, just keep going.
7544
7545 We can find a stepping dead thread in the thread list in two
7546 cases:
7547
7548 - The target supports thread exit events, and when the target
7549 tries to delete the thread from the thread list, inferior_ptid
7550 pointed at the exiting thread. In such case, calling
7551 delete_thread does not really remove the thread from the list;
7552 instead, the thread is left listed, with 'exited' state.
7553
7554 - The target's debug interface does not support thread exit
7555 events, and so we have no idea whatsoever if the previously
7556 stepping thread is still alive. For that reason, we need to
7557 synchronously query the target now. */
7558
7559 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
7560 {
7561 infrun_debug_printf ("not resuming previously stepped thread, it has "
7562 "vanished");
7563
7564 delete_thread (tp);
7565 return false;
7566 }
7567
7568 infrun_debug_printf ("resuming previously stepped thread");
7569
7570 reset_ecs (ecs, tp);
7571 switch_to_thread (tp);
7572
7573 tp->suspend.stop_pc = regcache_read_pc (get_thread_regcache (tp));
7574 frame = get_current_frame ();
7575
7576 /* If the PC of the thread we were trying to single-step has
7577 changed, then that thread has trapped or been signaled, but the
7578 event has not been reported to GDB yet. Re-poll the target
7579 looking for this particular thread's event (i.e. temporarily
7580 enable schedlock) by:
7581
7582 - setting a break at the current PC
7583 - resuming that particular thread, only (by setting trap
7584 expected)
7585
7586 This prevents us continuously moving the single-step breakpoint
7587 forward, one instruction at a time, overstepping. */
7588
7589 if (tp->suspend.stop_pc != tp->prev_pc)
7590 {
7591 ptid_t resume_ptid;
7592
7593 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
7594 paddress (target_gdbarch (), tp->prev_pc),
7595 paddress (target_gdbarch (), tp->suspend.stop_pc));
7596
7597 /* Clear the info of the previous step-over, as it's no longer
7598 valid (if the thread was trying to step over a breakpoint, it
7599 has already succeeded). It's what keep_going would do too,
7600 if we called it. Do this before trying to insert the sss
7601 breakpoint, otherwise if we were previously trying to step
7602 over this exact address in another thread, the breakpoint is
7603 skipped. */
7604 clear_step_over_info ();
7605 tp->control.trap_expected = 0;
7606
7607 insert_single_step_breakpoint (get_frame_arch (frame),
7608 get_frame_address_space (frame),
7609 tp->suspend.stop_pc);
7610
7611 tp->resumed = true;
7612 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
7613 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
7614 }
7615 else
7616 {
7617 infrun_debug_printf ("expected thread still hasn't advanced");
7618
7619 keep_going_pass_signal (ecs);
7620 }
7621
7622 return true;
7623 }
7624
7625 /* Is thread TP in the middle of (software or hardware)
7626 single-stepping? (Note the result of this function must never be
7627 passed directly as target_resume's STEP parameter.) */
7628
7629 static bool
7630 currently_stepping (struct thread_info *tp)
7631 {
7632 return ((tp->control.step_range_end
7633 && tp->control.step_resume_breakpoint == NULL)
7634 || tp->control.trap_expected
7635 || tp->stepped_breakpoint
7636 || bpstat_should_step ());
7637 }
7638
7639 /* Inferior has stepped into a subroutine call with source code that
7640 we should not step over. Do step to the first line of code in
7641 it. */
7642
7643 static void
7644 handle_step_into_function (struct gdbarch *gdbarch,
7645 struct execution_control_state *ecs)
7646 {
7647 fill_in_stop_func (gdbarch, ecs);
7648
7649 compunit_symtab *cust
7650 = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
7651 if (cust != NULL && compunit_language (cust) != language_asm)
7652 ecs->stop_func_start
7653 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
7654
7655 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
7656 /* Use the step_resume_break to step until the end of the prologue,
7657 even if that involves jumps (as it seems to on the vax under
7658 4.2). */
7659 /* If the prologue ends in the middle of a source line, continue to
7660 the end of that source line (if it is still within the function).
7661 Otherwise, just go to end of prologue. */
7662 if (stop_func_sal.end
7663 && stop_func_sal.pc != ecs->stop_func_start
7664 && stop_func_sal.end < ecs->stop_func_end)
7665 ecs->stop_func_start = stop_func_sal.end;
7666
7667 /* Architectures which require breakpoint adjustment might not be able
7668 to place a breakpoint at the computed address. If so, the test
7669 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7670 ecs->stop_func_start to an address at which a breakpoint may be
7671 legitimately placed.
7672
7673 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7674 made, GDB will enter an infinite loop when stepping through
7675 optimized code consisting of VLIW instructions which contain
7676 subinstructions corresponding to different source lines. On
7677 FR-V, it's not permitted to place a breakpoint on any but the
7678 first subinstruction of a VLIW instruction. When a breakpoint is
7679 set, GDB will adjust the breakpoint address to the beginning of
7680 the VLIW instruction. Thus, we need to make the corresponding
7681 adjustment here when computing the stop address. */
7682
7683 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
7684 {
7685 ecs->stop_func_start
7686 = gdbarch_adjust_breakpoint_address (gdbarch,
7687 ecs->stop_func_start);
7688 }
7689
7690 if (ecs->stop_func_start == ecs->event_thread->suspend.stop_pc)
7691 {
7692 /* We are already there: stop now. */
7693 end_stepping_range (ecs);
7694 return;
7695 }
7696 else
7697 {
7698 /* Put the step-breakpoint there and go until there. */
7699 symtab_and_line sr_sal;
7700 sr_sal.pc = ecs->stop_func_start;
7701 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
7702 sr_sal.pspace = get_frame_program_space (get_current_frame ());
7703
7704 /* Do not specify what the fp should be when we stop since on
7705 some machines the prologue is where the new fp value is
7706 established. */
7707 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
7708
7709 /* And make sure stepping stops right away then. */
7710 ecs->event_thread->control.step_range_end
7711 = ecs->event_thread->control.step_range_start;
7712 }
7713 keep_going (ecs);
7714 }
7715
7716 /* Inferior has stepped backward into a subroutine call with source
7717 code that we should not step over. Do step to the beginning of the
7718 last line of code in it. */
7719
7720 static void
7721 handle_step_into_function_backward (struct gdbarch *gdbarch,
7722 struct execution_control_state *ecs)
7723 {
7724 struct compunit_symtab *cust;
7725 struct symtab_and_line stop_func_sal;
7726
7727 fill_in_stop_func (gdbarch, ecs);
7728
7729 cust = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
7730 if (cust != NULL && compunit_language (cust) != language_asm)
7731 ecs->stop_func_start
7732 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
7733
7734 stop_func_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
7735
7736 /* OK, we're just going to keep stepping here. */
7737 if (stop_func_sal.pc == ecs->event_thread->suspend.stop_pc)
7738 {
7739 /* We're there already. Just stop stepping now. */
7740 end_stepping_range (ecs);
7741 }
7742 else
7743 {
7744 /* Else just reset the step range and keep going.
7745 No step-resume breakpoint, they don't work for
7746 epilogues, which can have multiple entry paths. */
7747 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7748 ecs->event_thread->control.step_range_end = stop_func_sal.end;
7749 keep_going (ecs);
7750 }
7751 return;
7752 }
7753
7754 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
7755 This is used to both functions and to skip over code. */
7756
7757 static void
7758 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7759 struct symtab_and_line sr_sal,
7760 struct frame_id sr_id,
7761 enum bptype sr_type)
7762 {
7763 /* There should never be more than one step-resume or longjmp-resume
7764 breakpoint per thread, so we should never be setting a new
7765 step_resume_breakpoint when one is already active. */
7766 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
7767 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
7768
7769 infrun_debug_printf ("inserting step-resume breakpoint at %s",
7770 paddress (gdbarch, sr_sal.pc));
7771
7772 inferior_thread ()->control.step_resume_breakpoint
7773 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
7774 }
7775
7776 void
7777 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7778 struct symtab_and_line sr_sal,
7779 struct frame_id sr_id)
7780 {
7781 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7782 sr_sal, sr_id,
7783 bp_step_resume);
7784 }
7785
7786 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7787 This is used to skip a potential signal handler.
7788
7789 This is called with the interrupted function's frame. The signal
7790 handler, when it returns, will resume the interrupted function at
7791 RETURN_FRAME.pc. */
7792
7793 static void
7794 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
7795 {
7796 gdb_assert (return_frame != NULL);
7797
7798 struct gdbarch *gdbarch = get_frame_arch (return_frame);
7799
7800 symtab_and_line sr_sal;
7801 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
7802 sr_sal.section = find_pc_overlay (sr_sal.pc);
7803 sr_sal.pspace = get_frame_program_space (return_frame);
7804
7805 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7806 get_stack_frame_id (return_frame),
7807 bp_hp_step_resume);
7808 }
7809
7810 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
7811 is used to skip a function after stepping into it (for "next" or if
7812 the called function has no debugging information).
7813
7814 The current function has almost always been reached by single
7815 stepping a call or return instruction. NEXT_FRAME belongs to the
7816 current function, and the breakpoint will be set at the caller's
7817 resume address.
7818
7819 This is a separate function rather than reusing
7820 insert_hp_step_resume_breakpoint_at_frame in order to avoid
7821 get_prev_frame, which may stop prematurely (see the implementation
7822 of frame_unwind_caller_id for an example). */
7823
7824 static void
7825 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7826 {
7827 /* We shouldn't have gotten here if we don't know where the call site
7828 is. */
7829 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
7830
7831 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
7832
7833 symtab_and_line sr_sal;
7834 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7835 frame_unwind_caller_pc (next_frame));
7836 sr_sal.section = find_pc_overlay (sr_sal.pc);
7837 sr_sal.pspace = frame_unwind_program_space (next_frame);
7838
7839 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
7840 frame_unwind_caller_id (next_frame));
7841 }
7842
7843 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7844 new breakpoint at the target of a jmp_buf. The handling of
7845 longjmp-resume uses the same mechanisms used for handling
7846 "step-resume" breakpoints. */
7847
7848 static void
7849 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
7850 {
7851 /* There should never be more than one longjmp-resume breakpoint per
7852 thread, so we should never be setting a new
7853 longjmp_resume_breakpoint when one is already active. */
7854 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
7855
7856 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
7857 paddress (gdbarch, pc));
7858
7859 inferior_thread ()->control.exception_resume_breakpoint =
7860 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
7861 }
7862
7863 /* Insert an exception resume breakpoint. TP is the thread throwing
7864 the exception. The block B is the block of the unwinder debug hook
7865 function. FRAME is the frame corresponding to the call to this
7866 function. SYM is the symbol of the function argument holding the
7867 target PC of the exception. */
7868
7869 static void
7870 insert_exception_resume_breakpoint (struct thread_info *tp,
7871 const struct block *b,
7872 struct frame_info *frame,
7873 struct symbol *sym)
7874 {
7875 try
7876 {
7877 struct block_symbol vsym;
7878 struct value *value;
7879 CORE_ADDR handler;
7880 struct breakpoint *bp;
7881
7882 vsym = lookup_symbol_search_name (sym->search_name (),
7883 b, VAR_DOMAIN);
7884 value = read_var_value (vsym.symbol, vsym.block, frame);
7885 /* If the value was optimized out, revert to the old behavior. */
7886 if (! value_optimized_out (value))
7887 {
7888 handler = value_as_address (value);
7889
7890 infrun_debug_printf ("exception resume at %lx",
7891 (unsigned long) handler);
7892
7893 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
7894 handler,
7895 bp_exception_resume).release ();
7896
7897 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
7898 frame = NULL;
7899
7900 bp->thread = tp->global_num;
7901 inferior_thread ()->control.exception_resume_breakpoint = bp;
7902 }
7903 }
7904 catch (const gdb_exception_error &e)
7905 {
7906 /* We want to ignore errors here. */
7907 }
7908 }
7909
7910 /* A helper for check_exception_resume that sets an
7911 exception-breakpoint based on a SystemTap probe. */
7912
7913 static void
7914 insert_exception_resume_from_probe (struct thread_info *tp,
7915 const struct bound_probe *probe,
7916 struct frame_info *frame)
7917 {
7918 struct value *arg_value;
7919 CORE_ADDR handler;
7920 struct breakpoint *bp;
7921
7922 arg_value = probe_safe_evaluate_at_pc (frame, 1);
7923 if (!arg_value)
7924 return;
7925
7926 handler = value_as_address (arg_value);
7927
7928 infrun_debug_printf ("exception resume at %s",
7929 paddress (probe->objfile->arch (), handler));
7930
7931 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
7932 handler, bp_exception_resume).release ();
7933 bp->thread = tp->global_num;
7934 inferior_thread ()->control.exception_resume_breakpoint = bp;
7935 }
7936
7937 /* This is called when an exception has been intercepted. Check to
7938 see whether the exception's destination is of interest, and if so,
7939 set an exception resume breakpoint there. */
7940
7941 static void
7942 check_exception_resume (struct execution_control_state *ecs,
7943 struct frame_info *frame)
7944 {
7945 struct bound_probe probe;
7946 struct symbol *func;
7947
7948 /* First see if this exception unwinding breakpoint was set via a
7949 SystemTap probe point. If so, the probe has two arguments: the
7950 CFA and the HANDLER. We ignore the CFA, extract the handler, and
7951 set a breakpoint there. */
7952 probe = find_probe_by_pc (get_frame_pc (frame));
7953 if (probe.prob)
7954 {
7955 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
7956 return;
7957 }
7958
7959 func = get_frame_function (frame);
7960 if (!func)
7961 return;
7962
7963 try
7964 {
7965 const struct block *b;
7966 struct block_iterator iter;
7967 struct symbol *sym;
7968 int argno = 0;
7969
7970 /* The exception breakpoint is a thread-specific breakpoint on
7971 the unwinder's debug hook, declared as:
7972
7973 void _Unwind_DebugHook (void *cfa, void *handler);
7974
7975 The CFA argument indicates the frame to which control is
7976 about to be transferred. HANDLER is the destination PC.
7977
7978 We ignore the CFA and set a temporary breakpoint at HANDLER.
7979 This is not extremely efficient but it avoids issues in gdb
7980 with computing the DWARF CFA, and it also works even in weird
7981 cases such as throwing an exception from inside a signal
7982 handler. */
7983
7984 b = SYMBOL_BLOCK_VALUE (func);
7985 ALL_BLOCK_SYMBOLS (b, iter, sym)
7986 {
7987 if (!SYMBOL_IS_ARGUMENT (sym))
7988 continue;
7989
7990 if (argno == 0)
7991 ++argno;
7992 else
7993 {
7994 insert_exception_resume_breakpoint (ecs->event_thread,
7995 b, frame, sym);
7996 break;
7997 }
7998 }
7999 }
8000 catch (const gdb_exception_error &e)
8001 {
8002 }
8003 }
8004
8005 static void
8006 stop_waiting (struct execution_control_state *ecs)
8007 {
8008 infrun_debug_printf ("stop_waiting");
8009
8010 /* Let callers know we don't want to wait for the inferior anymore. */
8011 ecs->wait_some_more = 0;
8012
8013 /* If all-stop, but there exists a non-stop target, stop all
8014 threads now that we're presenting the stop to the user. */
8015 if (!non_stop && exists_non_stop_target ())
8016 stop_all_threads ();
8017 }
8018
8019 /* Like keep_going, but passes the signal to the inferior, even if the
8020 signal is set to nopass. */
8021
8022 static void
8023 keep_going_pass_signal (struct execution_control_state *ecs)
8024 {
8025 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
8026 gdb_assert (!ecs->event_thread->resumed);
8027
8028 /* Save the pc before execution, to compare with pc after stop. */
8029 ecs->event_thread->prev_pc
8030 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
8031
8032 if (ecs->event_thread->control.trap_expected)
8033 {
8034 struct thread_info *tp = ecs->event_thread;
8035
8036 infrun_debug_printf ("%s has trap_expected set, "
8037 "resuming to collect trap",
8038 target_pid_to_str (tp->ptid).c_str ());
8039
8040 /* We haven't yet gotten our trap, and either: intercepted a
8041 non-signal event (e.g., a fork); or took a signal which we
8042 are supposed to pass through to the inferior. Simply
8043 continue. */
8044 resume (ecs->event_thread->suspend.stop_signal);
8045 }
8046 else if (step_over_info_valid_p ())
8047 {
8048 /* Another thread is stepping over a breakpoint in-line. If
8049 this thread needs a step-over too, queue the request. In
8050 either case, this resume must be deferred for later. */
8051 struct thread_info *tp = ecs->event_thread;
8052
8053 if (ecs->hit_singlestep_breakpoint
8054 || thread_still_needs_step_over (tp))
8055 {
8056 infrun_debug_printf ("step-over already in progress: "
8057 "step-over for %s deferred",
8058 target_pid_to_str (tp->ptid).c_str ());
8059 global_thread_step_over_chain_enqueue (tp);
8060 }
8061 else
8062 {
8063 infrun_debug_printf ("step-over in progress: resume of %s deferred",
8064 target_pid_to_str (tp->ptid).c_str ());
8065 }
8066 }
8067 else
8068 {
8069 struct regcache *regcache = get_current_regcache ();
8070 int remove_bp;
8071 int remove_wps;
8072 step_over_what step_what;
8073
8074 /* Either the trap was not expected, but we are continuing
8075 anyway (if we got a signal, the user asked it be passed to
8076 the child)
8077 -- or --
8078 We got our expected trap, but decided we should resume from
8079 it.
8080
8081 We're going to run this baby now!
8082
8083 Note that insert_breakpoints won't try to re-insert
8084 already inserted breakpoints. Therefore, we don't
8085 care if breakpoints were already inserted, or not. */
8086
8087 /* If we need to step over a breakpoint, and we're not using
8088 displaced stepping to do so, insert all breakpoints
8089 (watchpoints, etc.) but the one we're stepping over, step one
8090 instruction, and then re-insert the breakpoint when that step
8091 is finished. */
8092
8093 step_what = thread_still_needs_step_over (ecs->event_thread);
8094
8095 remove_bp = (ecs->hit_singlestep_breakpoint
8096 || (step_what & STEP_OVER_BREAKPOINT));
8097 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
8098
8099 /* We can't use displaced stepping if we need to step past a
8100 watchpoint. The instruction copied to the scratch pad would
8101 still trigger the watchpoint. */
8102 if (remove_bp
8103 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
8104 {
8105 set_step_over_info (regcache->aspace (),
8106 regcache_read_pc (regcache), remove_wps,
8107 ecs->event_thread->global_num);
8108 }
8109 else if (remove_wps)
8110 set_step_over_info (NULL, 0, remove_wps, -1);
8111
8112 /* If we now need to do an in-line step-over, we need to stop
8113 all other threads. Note this must be done before
8114 insert_breakpoints below, because that removes the breakpoint
8115 we're about to step over, otherwise other threads could miss
8116 it. */
8117 if (step_over_info_valid_p () && target_is_non_stop_p ())
8118 stop_all_threads ();
8119
8120 /* Stop stepping if inserting breakpoints fails. */
8121 try
8122 {
8123 insert_breakpoints ();
8124 }
8125 catch (const gdb_exception_error &e)
8126 {
8127 exception_print (gdb_stderr, e);
8128 stop_waiting (ecs);
8129 clear_step_over_info ();
8130 return;
8131 }
8132
8133 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
8134
8135 resume (ecs->event_thread->suspend.stop_signal);
8136 }
8137
8138 prepare_to_wait (ecs);
8139 }
8140
8141 /* Called when we should continue running the inferior, because the
8142 current event doesn't cause a user visible stop. This does the
8143 resuming part; waiting for the next event is done elsewhere. */
8144
8145 static void
8146 keep_going (struct execution_control_state *ecs)
8147 {
8148 if (ecs->event_thread->control.trap_expected
8149 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
8150 ecs->event_thread->control.trap_expected = 0;
8151
8152 if (!signal_program[ecs->event_thread->suspend.stop_signal])
8153 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
8154 keep_going_pass_signal (ecs);
8155 }
8156
8157 /* This function normally comes after a resume, before
8158 handle_inferior_event exits. It takes care of any last bits of
8159 housekeeping, and sets the all-important wait_some_more flag. */
8160
8161 static void
8162 prepare_to_wait (struct execution_control_state *ecs)
8163 {
8164 infrun_debug_printf ("prepare_to_wait");
8165
8166 ecs->wait_some_more = 1;
8167
8168 /* If the target can't async, emulate it by marking the infrun event
8169 handler such that as soon as we get back to the event-loop, we
8170 immediately end up in fetch_inferior_event again calling
8171 target_wait. */
8172 if (!target_can_async_p ())
8173 mark_infrun_async_event_handler ();
8174 }
8175
8176 /* We are done with the step range of a step/next/si/ni command.
8177 Called once for each n of a "step n" operation. */
8178
8179 static void
8180 end_stepping_range (struct execution_control_state *ecs)
8181 {
8182 ecs->event_thread->control.stop_step = 1;
8183 stop_waiting (ecs);
8184 }
8185
8186 /* Several print_*_reason functions to print why the inferior has stopped.
8187 We always print something when the inferior exits, or receives a signal.
8188 The rest of the cases are dealt with later on in normal_stop and
8189 print_it_typical. Ideally there should be a call to one of these
8190 print_*_reason functions functions from handle_inferior_event each time
8191 stop_waiting is called.
8192
8193 Note that we don't call these directly, instead we delegate that to
8194 the interpreters, through observers. Interpreters then call these
8195 with whatever uiout is right. */
8196
8197 void
8198 print_end_stepping_range_reason (struct ui_out *uiout)
8199 {
8200 /* For CLI-like interpreters, print nothing. */
8201
8202 if (uiout->is_mi_like_p ())
8203 {
8204 uiout->field_string ("reason",
8205 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
8206 }
8207 }
8208
8209 void
8210 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
8211 {
8212 annotate_signalled ();
8213 if (uiout->is_mi_like_p ())
8214 uiout->field_string
8215 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
8216 uiout->text ("\nProgram terminated with signal ");
8217 annotate_signal_name ();
8218 uiout->field_string ("signal-name",
8219 gdb_signal_to_name (siggnal));
8220 annotate_signal_name_end ();
8221 uiout->text (", ");
8222 annotate_signal_string ();
8223 uiout->field_string ("signal-meaning",
8224 gdb_signal_to_string (siggnal));
8225 annotate_signal_string_end ();
8226 uiout->text (".\n");
8227 uiout->text ("The program no longer exists.\n");
8228 }
8229
8230 void
8231 print_exited_reason (struct ui_out *uiout, int exitstatus)
8232 {
8233 struct inferior *inf = current_inferior ();
8234 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
8235
8236 annotate_exited (exitstatus);
8237 if (exitstatus)
8238 {
8239 if (uiout->is_mi_like_p ())
8240 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
8241 std::string exit_code_str
8242 = string_printf ("0%o", (unsigned int) exitstatus);
8243 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
8244 plongest (inf->num), pidstr.c_str (),
8245 string_field ("exit-code", exit_code_str.c_str ()));
8246 }
8247 else
8248 {
8249 if (uiout->is_mi_like_p ())
8250 uiout->field_string
8251 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
8252 uiout->message ("[Inferior %s (%s) exited normally]\n",
8253 plongest (inf->num), pidstr.c_str ());
8254 }
8255 }
8256
8257 void
8258 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
8259 {
8260 struct thread_info *thr = inferior_thread ();
8261
8262 annotate_signal ();
8263
8264 if (uiout->is_mi_like_p ())
8265 ;
8266 else if (show_thread_that_caused_stop ())
8267 {
8268 const char *name;
8269
8270 uiout->text ("\nThread ");
8271 uiout->field_string ("thread-id", print_thread_id (thr));
8272
8273 name = thr->name != NULL ? thr->name : target_thread_name (thr);
8274 if (name != NULL)
8275 {
8276 uiout->text (" \"");
8277 uiout->field_string ("name", name);
8278 uiout->text ("\"");
8279 }
8280 }
8281 else
8282 uiout->text ("\nProgram");
8283
8284 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
8285 uiout->text (" stopped");
8286 else
8287 {
8288 uiout->text (" received signal ");
8289 annotate_signal_name ();
8290 if (uiout->is_mi_like_p ())
8291 uiout->field_string
8292 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
8293 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8294 annotate_signal_name_end ();
8295 uiout->text (", ");
8296 annotate_signal_string ();
8297 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
8298
8299 struct regcache *regcache = get_current_regcache ();
8300 struct gdbarch *gdbarch = regcache->arch ();
8301 if (gdbarch_report_signal_info_p (gdbarch))
8302 gdbarch_report_signal_info (gdbarch, uiout, siggnal);
8303
8304 annotate_signal_string_end ();
8305 }
8306 uiout->text (".\n");
8307 }
8308
8309 void
8310 print_no_history_reason (struct ui_out *uiout)
8311 {
8312 uiout->text ("\nNo more reverse-execution history.\n");
8313 }
8314
8315 /* Print current location without a level number, if we have changed
8316 functions or hit a breakpoint. Print source line if we have one.
8317 bpstat_print contains the logic deciding in detail what to print,
8318 based on the event(s) that just occurred. */
8319
8320 static void
8321 print_stop_location (struct target_waitstatus *ws)
8322 {
8323 int bpstat_ret;
8324 enum print_what source_flag;
8325 int do_frame_printing = 1;
8326 struct thread_info *tp = inferior_thread ();
8327
8328 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
8329 switch (bpstat_ret)
8330 {
8331 case PRINT_UNKNOWN:
8332 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8333 should) carry around the function and does (or should) use
8334 that when doing a frame comparison. */
8335 if (tp->control.stop_step
8336 && frame_id_eq (tp->control.step_frame_id,
8337 get_frame_id (get_current_frame ()))
8338 && (tp->control.step_start_function
8339 == find_pc_function (tp->suspend.stop_pc)))
8340 {
8341 /* Finished step, just print source line. */
8342 source_flag = SRC_LINE;
8343 }
8344 else
8345 {
8346 /* Print location and source line. */
8347 source_flag = SRC_AND_LOC;
8348 }
8349 break;
8350 case PRINT_SRC_AND_LOC:
8351 /* Print location and source line. */
8352 source_flag = SRC_AND_LOC;
8353 break;
8354 case PRINT_SRC_ONLY:
8355 source_flag = SRC_LINE;
8356 break;
8357 case PRINT_NOTHING:
8358 /* Something bogus. */
8359 source_flag = SRC_LINE;
8360 do_frame_printing = 0;
8361 break;
8362 default:
8363 internal_error (__FILE__, __LINE__, _("Unknown value."));
8364 }
8365
8366 /* The behavior of this routine with respect to the source
8367 flag is:
8368 SRC_LINE: Print only source line
8369 LOCATION: Print only location
8370 SRC_AND_LOC: Print location and source line. */
8371 if (do_frame_printing)
8372 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
8373 }
8374
8375 /* See infrun.h. */
8376
8377 void
8378 print_stop_event (struct ui_out *uiout, bool displays)
8379 {
8380 struct target_waitstatus last;
8381 struct thread_info *tp;
8382
8383 get_last_target_status (nullptr, nullptr, &last);
8384
8385 {
8386 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
8387
8388 print_stop_location (&last);
8389
8390 /* Display the auto-display expressions. */
8391 if (displays)
8392 do_displays ();
8393 }
8394
8395 tp = inferior_thread ();
8396 if (tp->thread_fsm != NULL
8397 && tp->thread_fsm->finished_p ())
8398 {
8399 struct return_value_info *rv;
8400
8401 rv = tp->thread_fsm->return_value ();
8402 if (rv != NULL)
8403 print_return_value (uiout, rv);
8404 }
8405 }
8406
8407 /* See infrun.h. */
8408
8409 void
8410 maybe_remove_breakpoints (void)
8411 {
8412 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
8413 {
8414 if (remove_breakpoints ())
8415 {
8416 target_terminal::ours_for_output ();
8417 printf_filtered (_("Cannot remove breakpoints because "
8418 "program is no longer writable.\nFurther "
8419 "execution is probably impossible.\n"));
8420 }
8421 }
8422 }
8423
8424 /* The execution context that just caused a normal stop. */
8425
8426 struct stop_context
8427 {
8428 stop_context ();
8429
8430 DISABLE_COPY_AND_ASSIGN (stop_context);
8431
8432 bool changed () const;
8433
8434 /* The stop ID. */
8435 ULONGEST stop_id;
8436
8437 /* The event PTID. */
8438
8439 ptid_t ptid;
8440
8441 /* If stopp for a thread event, this is the thread that caused the
8442 stop. */
8443 thread_info_ref thread;
8444
8445 /* The inferior that caused the stop. */
8446 int inf_num;
8447 };
8448
8449 /* Initializes a new stop context. If stopped for a thread event, this
8450 takes a strong reference to the thread. */
8451
8452 stop_context::stop_context ()
8453 {
8454 stop_id = get_stop_id ();
8455 ptid = inferior_ptid;
8456 inf_num = current_inferior ()->num;
8457
8458 if (inferior_ptid != null_ptid)
8459 {
8460 /* Take a strong reference so that the thread can't be deleted
8461 yet. */
8462 thread = thread_info_ref::new_reference (inferior_thread ());
8463 }
8464 }
8465
8466 /* Return true if the current context no longer matches the saved stop
8467 context. */
8468
8469 bool
8470 stop_context::changed () const
8471 {
8472 if (ptid != inferior_ptid)
8473 return true;
8474 if (inf_num != current_inferior ()->num)
8475 return true;
8476 if (thread != NULL && thread->state != THREAD_STOPPED)
8477 return true;
8478 if (get_stop_id () != stop_id)
8479 return true;
8480 return false;
8481 }
8482
8483 /* See infrun.h. */
8484
8485 int
8486 normal_stop (void)
8487 {
8488 struct target_waitstatus last;
8489
8490 get_last_target_status (nullptr, nullptr, &last);
8491
8492 new_stop_id ();
8493
8494 /* If an exception is thrown from this point on, make sure to
8495 propagate GDB's knowledge of the executing state to the
8496 frontend/user running state. A QUIT is an easy exception to see
8497 here, so do this before any filtered output. */
8498
8499 ptid_t finish_ptid = null_ptid;
8500
8501 if (!non_stop)
8502 finish_ptid = minus_one_ptid;
8503 else if (last.kind == TARGET_WAITKIND_SIGNALLED
8504 || last.kind == TARGET_WAITKIND_EXITED)
8505 {
8506 /* On some targets, we may still have live threads in the
8507 inferior when we get a process exit event. E.g., for
8508 "checkpoint", when the current checkpoint/fork exits,
8509 linux-fork.c automatically switches to another fork from
8510 within target_mourn_inferior. */
8511 if (inferior_ptid != null_ptid)
8512 finish_ptid = ptid_t (inferior_ptid.pid ());
8513 }
8514 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
8515 finish_ptid = inferior_ptid;
8516
8517 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
8518 if (finish_ptid != null_ptid)
8519 {
8520 maybe_finish_thread_state.emplace
8521 (user_visible_resume_target (finish_ptid), finish_ptid);
8522 }
8523
8524 /* As we're presenting a stop, and potentially removing breakpoints,
8525 update the thread list so we can tell whether there are threads
8526 running on the target. With target remote, for example, we can
8527 only learn about new threads when we explicitly update the thread
8528 list. Do this before notifying the interpreters about signal
8529 stops, end of stepping ranges, etc., so that the "new thread"
8530 output is emitted before e.g., "Program received signal FOO",
8531 instead of after. */
8532 update_thread_list ();
8533
8534 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
8535 gdb::observers::signal_received.notify (inferior_thread ()->suspend.stop_signal);
8536
8537 /* As with the notification of thread events, we want to delay
8538 notifying the user that we've switched thread context until
8539 the inferior actually stops.
8540
8541 There's no point in saying anything if the inferior has exited.
8542 Note that SIGNALLED here means "exited with a signal", not
8543 "received a signal".
8544
8545 Also skip saying anything in non-stop mode. In that mode, as we
8546 don't want GDB to switch threads behind the user's back, to avoid
8547 races where the user is typing a command to apply to thread x,
8548 but GDB switches to thread y before the user finishes entering
8549 the command, fetch_inferior_event installs a cleanup to restore
8550 the current thread back to the thread the user had selected right
8551 after this event is handled, so we're not really switching, only
8552 informing of a stop. */
8553 if (!non_stop
8554 && previous_inferior_ptid != inferior_ptid
8555 && target_has_execution ()
8556 && last.kind != TARGET_WAITKIND_SIGNALLED
8557 && last.kind != TARGET_WAITKIND_EXITED
8558 && last.kind != TARGET_WAITKIND_NO_RESUMED)
8559 {
8560 SWITCH_THRU_ALL_UIS ()
8561 {
8562 target_terminal::ours_for_output ();
8563 printf_filtered (_("[Switching to %s]\n"),
8564 target_pid_to_str (inferior_ptid).c_str ());
8565 annotate_thread_changed ();
8566 }
8567 previous_inferior_ptid = inferior_ptid;
8568 }
8569
8570 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
8571 {
8572 SWITCH_THRU_ALL_UIS ()
8573 if (current_ui->prompt_state == PROMPT_BLOCKED)
8574 {
8575 target_terminal::ours_for_output ();
8576 printf_filtered (_("No unwaited-for children left.\n"));
8577 }
8578 }
8579
8580 /* Note: this depends on the update_thread_list call above. */
8581 maybe_remove_breakpoints ();
8582
8583 /* If an auto-display called a function and that got a signal,
8584 delete that auto-display to avoid an infinite recursion. */
8585
8586 if (stopped_by_random_signal)
8587 disable_current_display ();
8588
8589 SWITCH_THRU_ALL_UIS ()
8590 {
8591 async_enable_stdin ();
8592 }
8593
8594 /* Let the user/frontend see the threads as stopped. */
8595 maybe_finish_thread_state.reset ();
8596
8597 /* Select innermost stack frame - i.e., current frame is frame 0,
8598 and current location is based on that. Handle the case where the
8599 dummy call is returning after being stopped. E.g. the dummy call
8600 previously hit a breakpoint. (If the dummy call returns
8601 normally, we won't reach here.) Do this before the stop hook is
8602 run, so that it doesn't get to see the temporary dummy frame,
8603 which is not where we'll present the stop. */
8604 if (has_stack_frames ())
8605 {
8606 if (stop_stack_dummy == STOP_STACK_DUMMY)
8607 {
8608 /* Pop the empty frame that contains the stack dummy. This
8609 also restores inferior state prior to the call (struct
8610 infcall_suspend_state). */
8611 struct frame_info *frame = get_current_frame ();
8612
8613 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8614 frame_pop (frame);
8615 /* frame_pop calls reinit_frame_cache as the last thing it
8616 does which means there's now no selected frame. */
8617 }
8618
8619 select_frame (get_current_frame ());
8620
8621 /* Set the current source location. */
8622 set_current_sal_from_frame (get_current_frame ());
8623 }
8624
8625 /* Look up the hook_stop and run it (CLI internally handles problem
8626 of stop_command's pre-hook not existing). */
8627 if (stop_command != NULL)
8628 {
8629 stop_context saved_context;
8630
8631 try
8632 {
8633 execute_cmd_pre_hook (stop_command);
8634 }
8635 catch (const gdb_exception &ex)
8636 {
8637 exception_fprintf (gdb_stderr, ex,
8638 "Error while running hook_stop:\n");
8639 }
8640
8641 /* If the stop hook resumes the target, then there's no point in
8642 trying to notify about the previous stop; its context is
8643 gone. Likewise if the command switches thread or inferior --
8644 the observers would print a stop for the wrong
8645 thread/inferior. */
8646 if (saved_context.changed ())
8647 return 1;
8648 }
8649
8650 /* Notify observers about the stop. This is where the interpreters
8651 print the stop event. */
8652 if (inferior_ptid != null_ptid)
8653 gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
8654 stop_print_frame);
8655 else
8656 gdb::observers::normal_stop.notify (NULL, stop_print_frame);
8657
8658 annotate_stopped ();
8659
8660 if (target_has_execution ())
8661 {
8662 if (last.kind != TARGET_WAITKIND_SIGNALLED
8663 && last.kind != TARGET_WAITKIND_EXITED
8664 && last.kind != TARGET_WAITKIND_NO_RESUMED)
8665 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8666 Delete any breakpoint that is to be deleted at the next stop. */
8667 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
8668 }
8669
8670 /* Try to get rid of automatically added inferiors that are no
8671 longer needed. Keeping those around slows down things linearly.
8672 Note that this never removes the current inferior. */
8673 prune_inferiors ();
8674
8675 return 0;
8676 }
8677 \f
8678 int
8679 signal_stop_state (int signo)
8680 {
8681 return signal_stop[signo];
8682 }
8683
8684 int
8685 signal_print_state (int signo)
8686 {
8687 return signal_print[signo];
8688 }
8689
8690 int
8691 signal_pass_state (int signo)
8692 {
8693 return signal_program[signo];
8694 }
8695
8696 static void
8697 signal_cache_update (int signo)
8698 {
8699 if (signo == -1)
8700 {
8701 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
8702 signal_cache_update (signo);
8703
8704 return;
8705 }
8706
8707 signal_pass[signo] = (signal_stop[signo] == 0
8708 && signal_print[signo] == 0
8709 && signal_program[signo] == 1
8710 && signal_catch[signo] == 0);
8711 }
8712
8713 int
8714 signal_stop_update (int signo, int state)
8715 {
8716 int ret = signal_stop[signo];
8717
8718 signal_stop[signo] = state;
8719 signal_cache_update (signo);
8720 return ret;
8721 }
8722
8723 int
8724 signal_print_update (int signo, int state)
8725 {
8726 int ret = signal_print[signo];
8727
8728 signal_print[signo] = state;
8729 signal_cache_update (signo);
8730 return ret;
8731 }
8732
8733 int
8734 signal_pass_update (int signo, int state)
8735 {
8736 int ret = signal_program[signo];
8737
8738 signal_program[signo] = state;
8739 signal_cache_update (signo);
8740 return ret;
8741 }
8742
8743 /* Update the global 'signal_catch' from INFO and notify the
8744 target. */
8745
8746 void
8747 signal_catch_update (const unsigned int *info)
8748 {
8749 int i;
8750
8751 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8752 signal_catch[i] = info[i] > 0;
8753 signal_cache_update (-1);
8754 target_pass_signals (signal_pass);
8755 }
8756
8757 static void
8758 sig_print_header (void)
8759 {
8760 printf_filtered (_("Signal Stop\tPrint\tPass "
8761 "to program\tDescription\n"));
8762 }
8763
8764 static void
8765 sig_print_info (enum gdb_signal oursig)
8766 {
8767 const char *name = gdb_signal_to_name (oursig);
8768 int name_padding = 13 - strlen (name);
8769
8770 if (name_padding <= 0)
8771 name_padding = 0;
8772
8773 printf_filtered ("%s", name);
8774 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
8775 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8776 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
8777 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
8778 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
8779 }
8780
8781 /* Specify how various signals in the inferior should be handled. */
8782
8783 static void
8784 handle_command (const char *args, int from_tty)
8785 {
8786 int digits, wordlen;
8787 int sigfirst, siglast;
8788 enum gdb_signal oursig;
8789 int allsigs;
8790
8791 if (args == NULL)
8792 {
8793 error_no_arg (_("signal to handle"));
8794 }
8795
8796 /* Allocate and zero an array of flags for which signals to handle. */
8797
8798 const size_t nsigs = GDB_SIGNAL_LAST;
8799 unsigned char sigs[nsigs] {};
8800
8801 /* Break the command line up into args. */
8802
8803 gdb_argv built_argv (args);
8804
8805 /* Walk through the args, looking for signal oursigs, signal names, and
8806 actions. Signal numbers and signal names may be interspersed with
8807 actions, with the actions being performed for all signals cumulatively
8808 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
8809
8810 for (char *arg : built_argv)
8811 {
8812 wordlen = strlen (arg);
8813 for (digits = 0; isdigit (arg[digits]); digits++)
8814 {;
8815 }
8816 allsigs = 0;
8817 sigfirst = siglast = -1;
8818
8819 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
8820 {
8821 /* Apply action to all signals except those used by the
8822 debugger. Silently skip those. */
8823 allsigs = 1;
8824 sigfirst = 0;
8825 siglast = nsigs - 1;
8826 }
8827 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
8828 {
8829 SET_SIGS (nsigs, sigs, signal_stop);
8830 SET_SIGS (nsigs, sigs, signal_print);
8831 }
8832 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
8833 {
8834 UNSET_SIGS (nsigs, sigs, signal_program);
8835 }
8836 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
8837 {
8838 SET_SIGS (nsigs, sigs, signal_print);
8839 }
8840 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
8841 {
8842 SET_SIGS (nsigs, sigs, signal_program);
8843 }
8844 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
8845 {
8846 UNSET_SIGS (nsigs, sigs, signal_stop);
8847 }
8848 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
8849 {
8850 SET_SIGS (nsigs, sigs, signal_program);
8851 }
8852 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
8853 {
8854 UNSET_SIGS (nsigs, sigs, signal_print);
8855 UNSET_SIGS (nsigs, sigs, signal_stop);
8856 }
8857 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
8858 {
8859 UNSET_SIGS (nsigs, sigs, signal_program);
8860 }
8861 else if (digits > 0)
8862 {
8863 /* It is numeric. The numeric signal refers to our own
8864 internal signal numbering from target.h, not to host/target
8865 signal number. This is a feature; users really should be
8866 using symbolic names anyway, and the common ones like
8867 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8868
8869 sigfirst = siglast = (int)
8870 gdb_signal_from_command (atoi (arg));
8871 if (arg[digits] == '-')
8872 {
8873 siglast = (int)
8874 gdb_signal_from_command (atoi (arg + digits + 1));
8875 }
8876 if (sigfirst > siglast)
8877 {
8878 /* Bet he didn't figure we'd think of this case... */
8879 std::swap (sigfirst, siglast);
8880 }
8881 }
8882 else
8883 {
8884 oursig = gdb_signal_from_name (arg);
8885 if (oursig != GDB_SIGNAL_UNKNOWN)
8886 {
8887 sigfirst = siglast = (int) oursig;
8888 }
8889 else
8890 {
8891 /* Not a number and not a recognized flag word => complain. */
8892 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
8893 }
8894 }
8895
8896 /* If any signal numbers or symbol names were found, set flags for
8897 which signals to apply actions to. */
8898
8899 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
8900 {
8901 switch ((enum gdb_signal) signum)
8902 {
8903 case GDB_SIGNAL_TRAP:
8904 case GDB_SIGNAL_INT:
8905 if (!allsigs && !sigs[signum])
8906 {
8907 if (query (_("%s is used by the debugger.\n\
8908 Are you sure you want to change it? "),
8909 gdb_signal_to_name ((enum gdb_signal) signum)))
8910 {
8911 sigs[signum] = 1;
8912 }
8913 else
8914 printf_unfiltered (_("Not confirmed, unchanged.\n"));
8915 }
8916 break;
8917 case GDB_SIGNAL_0:
8918 case GDB_SIGNAL_DEFAULT:
8919 case GDB_SIGNAL_UNKNOWN:
8920 /* Make sure that "all" doesn't print these. */
8921 break;
8922 default:
8923 sigs[signum] = 1;
8924 break;
8925 }
8926 }
8927 }
8928
8929 for (int signum = 0; signum < nsigs; signum++)
8930 if (sigs[signum])
8931 {
8932 signal_cache_update (-1);
8933 target_pass_signals (signal_pass);
8934 target_program_signals (signal_program);
8935
8936 if (from_tty)
8937 {
8938 /* Show the results. */
8939 sig_print_header ();
8940 for (; signum < nsigs; signum++)
8941 if (sigs[signum])
8942 sig_print_info ((enum gdb_signal) signum);
8943 }
8944
8945 break;
8946 }
8947 }
8948
8949 /* Complete the "handle" command. */
8950
8951 static void
8952 handle_completer (struct cmd_list_element *ignore,
8953 completion_tracker &tracker,
8954 const char *text, const char *word)
8955 {
8956 static const char * const keywords[] =
8957 {
8958 "all",
8959 "stop",
8960 "ignore",
8961 "print",
8962 "pass",
8963 "nostop",
8964 "noignore",
8965 "noprint",
8966 "nopass",
8967 NULL,
8968 };
8969
8970 signal_completer (ignore, tracker, text, word);
8971 complete_on_enum (tracker, keywords, word, word);
8972 }
8973
8974 enum gdb_signal
8975 gdb_signal_from_command (int num)
8976 {
8977 if (num >= 1 && num <= 15)
8978 return (enum gdb_signal) num;
8979 error (_("Only signals 1-15 are valid as numeric signals.\n\
8980 Use \"info signals\" for a list of symbolic signals."));
8981 }
8982
8983 /* Print current contents of the tables set by the handle command.
8984 It is possible we should just be printing signals actually used
8985 by the current target (but for things to work right when switching
8986 targets, all signals should be in the signal tables). */
8987
8988 static void
8989 info_signals_command (const char *signum_exp, int from_tty)
8990 {
8991 enum gdb_signal oursig;
8992
8993 sig_print_header ();
8994
8995 if (signum_exp)
8996 {
8997 /* First see if this is a symbol name. */
8998 oursig = gdb_signal_from_name (signum_exp);
8999 if (oursig == GDB_SIGNAL_UNKNOWN)
9000 {
9001 /* No, try numeric. */
9002 oursig =
9003 gdb_signal_from_command (parse_and_eval_long (signum_exp));
9004 }
9005 sig_print_info (oursig);
9006 return;
9007 }
9008
9009 printf_filtered ("\n");
9010 /* These ugly casts brought to you by the native VAX compiler. */
9011 for (oursig = GDB_SIGNAL_FIRST;
9012 (int) oursig < (int) GDB_SIGNAL_LAST;
9013 oursig = (enum gdb_signal) ((int) oursig + 1))
9014 {
9015 QUIT;
9016
9017 if (oursig != GDB_SIGNAL_UNKNOWN
9018 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
9019 sig_print_info (oursig);
9020 }
9021
9022 printf_filtered (_("\nUse the \"handle\" command "
9023 "to change these tables.\n"));
9024 }
9025
9026 /* The $_siginfo convenience variable is a bit special. We don't know
9027 for sure the type of the value until we actually have a chance to
9028 fetch the data. The type can change depending on gdbarch, so it is
9029 also dependent on which thread you have selected.
9030
9031 1. making $_siginfo be an internalvar that creates a new value on
9032 access.
9033
9034 2. making the value of $_siginfo be an lval_computed value. */
9035
9036 /* This function implements the lval_computed support for reading a
9037 $_siginfo value. */
9038
9039 static void
9040 siginfo_value_read (struct value *v)
9041 {
9042 LONGEST transferred;
9043
9044 /* If we can access registers, so can we access $_siginfo. Likewise
9045 vice versa. */
9046 validate_registers_access ();
9047
9048 transferred =
9049 target_read (current_inferior ()->top_target (),
9050 TARGET_OBJECT_SIGNAL_INFO,
9051 NULL,
9052 value_contents_all_raw (v),
9053 value_offset (v),
9054 TYPE_LENGTH (value_type (v)));
9055
9056 if (transferred != TYPE_LENGTH (value_type (v)))
9057 error (_("Unable to read siginfo"));
9058 }
9059
9060 /* This function implements the lval_computed support for writing a
9061 $_siginfo value. */
9062
9063 static void
9064 siginfo_value_write (struct value *v, struct value *fromval)
9065 {
9066 LONGEST transferred;
9067
9068 /* If we can access registers, so can we access $_siginfo. Likewise
9069 vice versa. */
9070 validate_registers_access ();
9071
9072 transferred = target_write (current_inferior ()->top_target (),
9073 TARGET_OBJECT_SIGNAL_INFO,
9074 NULL,
9075 value_contents_all_raw (fromval),
9076 value_offset (v),
9077 TYPE_LENGTH (value_type (fromval)));
9078
9079 if (transferred != TYPE_LENGTH (value_type (fromval)))
9080 error (_("Unable to write siginfo"));
9081 }
9082
9083 static const struct lval_funcs siginfo_value_funcs =
9084 {
9085 siginfo_value_read,
9086 siginfo_value_write
9087 };
9088
9089 /* Return a new value with the correct type for the siginfo object of
9090 the current thread using architecture GDBARCH. Return a void value
9091 if there's no object available. */
9092
9093 static struct value *
9094 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
9095 void *ignore)
9096 {
9097 if (target_has_stack ()
9098 && inferior_ptid != null_ptid
9099 && gdbarch_get_siginfo_type_p (gdbarch))
9100 {
9101 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9102
9103 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
9104 }
9105
9106 return allocate_value (builtin_type (gdbarch)->builtin_void);
9107 }
9108
9109 \f
9110 /* infcall_suspend_state contains state about the program itself like its
9111 registers and any signal it received when it last stopped.
9112 This state must be restored regardless of how the inferior function call
9113 ends (either successfully, or after it hits a breakpoint or signal)
9114 if the program is to properly continue where it left off. */
9115
9116 class infcall_suspend_state
9117 {
9118 public:
9119 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
9120 once the inferior function call has finished. */
9121 infcall_suspend_state (struct gdbarch *gdbarch,
9122 const struct thread_info *tp,
9123 struct regcache *regcache)
9124 : m_thread_suspend (tp->suspend),
9125 m_registers (new readonly_detached_regcache (*regcache))
9126 {
9127 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
9128
9129 if (gdbarch_get_siginfo_type_p (gdbarch))
9130 {
9131 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9132 size_t len = TYPE_LENGTH (type);
9133
9134 siginfo_data.reset ((gdb_byte *) xmalloc (len));
9135
9136 if (target_read (current_inferior ()->top_target (),
9137 TARGET_OBJECT_SIGNAL_INFO, NULL,
9138 siginfo_data.get (), 0, len) != len)
9139 {
9140 /* Errors ignored. */
9141 siginfo_data.reset (nullptr);
9142 }
9143 }
9144
9145 if (siginfo_data)
9146 {
9147 m_siginfo_gdbarch = gdbarch;
9148 m_siginfo_data = std::move (siginfo_data);
9149 }
9150 }
9151
9152 /* Return a pointer to the stored register state. */
9153
9154 readonly_detached_regcache *registers () const
9155 {
9156 return m_registers.get ();
9157 }
9158
9159 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
9160
9161 void restore (struct gdbarch *gdbarch,
9162 struct thread_info *tp,
9163 struct regcache *regcache) const
9164 {
9165 tp->suspend = m_thread_suspend;
9166
9167 if (m_siginfo_gdbarch == gdbarch)
9168 {
9169 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9170
9171 /* Errors ignored. */
9172 target_write (current_inferior ()->top_target (),
9173 TARGET_OBJECT_SIGNAL_INFO, NULL,
9174 m_siginfo_data.get (), 0, TYPE_LENGTH (type));
9175 }
9176
9177 /* The inferior can be gone if the user types "print exit(0)"
9178 (and perhaps other times). */
9179 if (target_has_execution ())
9180 /* NB: The register write goes through to the target. */
9181 regcache->restore (registers ());
9182 }
9183
9184 private:
9185 /* How the current thread stopped before the inferior function call was
9186 executed. */
9187 struct thread_suspend_state m_thread_suspend;
9188
9189 /* The registers before the inferior function call was executed. */
9190 std::unique_ptr<readonly_detached_regcache> m_registers;
9191
9192 /* Format of SIGINFO_DATA or NULL if it is not present. */
9193 struct gdbarch *m_siginfo_gdbarch = nullptr;
9194
9195 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
9196 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
9197 content would be invalid. */
9198 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
9199 };
9200
9201 infcall_suspend_state_up
9202 save_infcall_suspend_state ()
9203 {
9204 struct thread_info *tp = inferior_thread ();
9205 struct regcache *regcache = get_current_regcache ();
9206 struct gdbarch *gdbarch = regcache->arch ();
9207
9208 infcall_suspend_state_up inf_state
9209 (new struct infcall_suspend_state (gdbarch, tp, regcache));
9210
9211 /* Having saved the current state, adjust the thread state, discarding
9212 any stop signal information. The stop signal is not useful when
9213 starting an inferior function call, and run_inferior_call will not use
9214 the signal due to its `proceed' call with GDB_SIGNAL_0. */
9215 tp->suspend.stop_signal = GDB_SIGNAL_0;
9216
9217 return inf_state;
9218 }
9219
9220 /* Restore inferior session state to INF_STATE. */
9221
9222 void
9223 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
9224 {
9225 struct thread_info *tp = inferior_thread ();
9226 struct regcache *regcache = get_current_regcache ();
9227 struct gdbarch *gdbarch = regcache->arch ();
9228
9229 inf_state->restore (gdbarch, tp, regcache);
9230 discard_infcall_suspend_state (inf_state);
9231 }
9232
9233 void
9234 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
9235 {
9236 delete inf_state;
9237 }
9238
9239 readonly_detached_regcache *
9240 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
9241 {
9242 return inf_state->registers ();
9243 }
9244
9245 /* infcall_control_state contains state regarding gdb's control of the
9246 inferior itself like stepping control. It also contains session state like
9247 the user's currently selected frame. */
9248
9249 struct infcall_control_state
9250 {
9251 struct thread_control_state thread_control;
9252 struct inferior_control_state inferior_control;
9253
9254 /* Other fields: */
9255 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
9256 int stopped_by_random_signal = 0;
9257
9258 /* ID and level of the selected frame when the inferior function
9259 call was made. */
9260 struct frame_id selected_frame_id {};
9261 int selected_frame_level = -1;
9262 };
9263
9264 /* Save all of the information associated with the inferior<==>gdb
9265 connection. */
9266
9267 infcall_control_state_up
9268 save_infcall_control_state ()
9269 {
9270 infcall_control_state_up inf_status (new struct infcall_control_state);
9271 struct thread_info *tp = inferior_thread ();
9272 struct inferior *inf = current_inferior ();
9273
9274 inf_status->thread_control = tp->control;
9275 inf_status->inferior_control = inf->control;
9276
9277 tp->control.step_resume_breakpoint = NULL;
9278 tp->control.exception_resume_breakpoint = NULL;
9279
9280 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9281 chain. If caller's caller is walking the chain, they'll be happier if we
9282 hand them back the original chain when restore_infcall_control_state is
9283 called. */
9284 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
9285
9286 /* Other fields: */
9287 inf_status->stop_stack_dummy = stop_stack_dummy;
9288 inf_status->stopped_by_random_signal = stopped_by_random_signal;
9289
9290 save_selected_frame (&inf_status->selected_frame_id,
9291 &inf_status->selected_frame_level);
9292
9293 return inf_status;
9294 }
9295
9296 /* Restore inferior session state to INF_STATUS. */
9297
9298 void
9299 restore_infcall_control_state (struct infcall_control_state *inf_status)
9300 {
9301 struct thread_info *tp = inferior_thread ();
9302 struct inferior *inf = current_inferior ();
9303
9304 if (tp->control.step_resume_breakpoint)
9305 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
9306
9307 if (tp->control.exception_resume_breakpoint)
9308 tp->control.exception_resume_breakpoint->disposition
9309 = disp_del_at_next_stop;
9310
9311 /* Handle the bpstat_copy of the chain. */
9312 bpstat_clear (&tp->control.stop_bpstat);
9313
9314 tp->control = inf_status->thread_control;
9315 inf->control = inf_status->inferior_control;
9316
9317 /* Other fields: */
9318 stop_stack_dummy = inf_status->stop_stack_dummy;
9319 stopped_by_random_signal = inf_status->stopped_by_random_signal;
9320
9321 if (target_has_stack ())
9322 {
9323 restore_selected_frame (inf_status->selected_frame_id,
9324 inf_status->selected_frame_level);
9325 }
9326
9327 delete inf_status;
9328 }
9329
9330 void
9331 discard_infcall_control_state (struct infcall_control_state *inf_status)
9332 {
9333 if (inf_status->thread_control.step_resume_breakpoint)
9334 inf_status->thread_control.step_resume_breakpoint->disposition
9335 = disp_del_at_next_stop;
9336
9337 if (inf_status->thread_control.exception_resume_breakpoint)
9338 inf_status->thread_control.exception_resume_breakpoint->disposition
9339 = disp_del_at_next_stop;
9340
9341 /* See save_infcall_control_state for info on stop_bpstat. */
9342 bpstat_clear (&inf_status->thread_control.stop_bpstat);
9343
9344 delete inf_status;
9345 }
9346 \f
9347 /* See infrun.h. */
9348
9349 void
9350 clear_exit_convenience_vars (void)
9351 {
9352 clear_internalvar (lookup_internalvar ("_exitsignal"));
9353 clear_internalvar (lookup_internalvar ("_exitcode"));
9354 }
9355 \f
9356
9357 /* User interface for reverse debugging:
9358 Set exec-direction / show exec-direction commands
9359 (returns error unless target implements to_set_exec_direction method). */
9360
9361 enum exec_direction_kind execution_direction = EXEC_FORWARD;
9362 static const char exec_forward[] = "forward";
9363 static const char exec_reverse[] = "reverse";
9364 static const char *exec_direction = exec_forward;
9365 static const char *const exec_direction_names[] = {
9366 exec_forward,
9367 exec_reverse,
9368 NULL
9369 };
9370
9371 static void
9372 set_exec_direction_func (const char *args, int from_tty,
9373 struct cmd_list_element *cmd)
9374 {
9375 if (target_can_execute_reverse ())
9376 {
9377 if (!strcmp (exec_direction, exec_forward))
9378 execution_direction = EXEC_FORWARD;
9379 else if (!strcmp (exec_direction, exec_reverse))
9380 execution_direction = EXEC_REVERSE;
9381 }
9382 else
9383 {
9384 exec_direction = exec_forward;
9385 error (_("Target does not support this operation."));
9386 }
9387 }
9388
9389 static void
9390 show_exec_direction_func (struct ui_file *out, int from_tty,
9391 struct cmd_list_element *cmd, const char *value)
9392 {
9393 switch (execution_direction) {
9394 case EXEC_FORWARD:
9395 fprintf_filtered (out, _("Forward.\n"));
9396 break;
9397 case EXEC_REVERSE:
9398 fprintf_filtered (out, _("Reverse.\n"));
9399 break;
9400 default:
9401 internal_error (__FILE__, __LINE__,
9402 _("bogus execution_direction value: %d"),
9403 (int) execution_direction);
9404 }
9405 }
9406
9407 static void
9408 show_schedule_multiple (struct ui_file *file, int from_tty,
9409 struct cmd_list_element *c, const char *value)
9410 {
9411 fprintf_filtered (file, _("Resuming the execution of threads "
9412 "of all processes is %s.\n"), value);
9413 }
9414
9415 /* Implementation of `siginfo' variable. */
9416
9417 static const struct internalvar_funcs siginfo_funcs =
9418 {
9419 siginfo_make_value,
9420 NULL,
9421 NULL
9422 };
9423
9424 /* Callback for infrun's target events source. This is marked when a
9425 thread has a pending status to process. */
9426
9427 static void
9428 infrun_async_inferior_event_handler (gdb_client_data data)
9429 {
9430 clear_async_event_handler (infrun_async_inferior_event_token);
9431 inferior_event_handler (INF_REG_EVENT);
9432 }
9433
9434 #if GDB_SELF_TEST
9435 namespace selftests
9436 {
9437
9438 /* Verify that when two threads with the same ptid exist (from two different
9439 targets) and one of them changes ptid, we only update inferior_ptid if
9440 it is appropriate. */
9441
9442 static void
9443 infrun_thread_ptid_changed ()
9444 {
9445 gdbarch *arch = current_inferior ()->gdbarch;
9446
9447 /* The thread which inferior_ptid represents changes ptid. */
9448 {
9449 scoped_restore_current_pspace_and_thread restore;
9450
9451 scoped_mock_context<test_target_ops> target1 (arch);
9452 scoped_mock_context<test_target_ops> target2 (arch);
9453 target2.mock_inferior.next = &target1.mock_inferior;
9454
9455 ptid_t old_ptid (111, 222);
9456 ptid_t new_ptid (111, 333);
9457
9458 target1.mock_inferior.pid = old_ptid.pid ();
9459 target1.mock_thread.ptid = old_ptid;
9460 target2.mock_inferior.pid = old_ptid.pid ();
9461 target2.mock_thread.ptid = old_ptid;
9462
9463 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9464 set_current_inferior (&target1.mock_inferior);
9465
9466 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9467
9468 gdb_assert (inferior_ptid == new_ptid);
9469 }
9470
9471 /* A thread with the same ptid as inferior_ptid, but from another target,
9472 changes ptid. */
9473 {
9474 scoped_restore_current_pspace_and_thread restore;
9475
9476 scoped_mock_context<test_target_ops> target1 (arch);
9477 scoped_mock_context<test_target_ops> target2 (arch);
9478 target2.mock_inferior.next = &target1.mock_inferior;
9479
9480 ptid_t old_ptid (111, 222);
9481 ptid_t new_ptid (111, 333);
9482
9483 target1.mock_inferior.pid = old_ptid.pid ();
9484 target1.mock_thread.ptid = old_ptid;
9485 target2.mock_inferior.pid = old_ptid.pid ();
9486 target2.mock_thread.ptid = old_ptid;
9487
9488 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9489 set_current_inferior (&target2.mock_inferior);
9490
9491 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9492
9493 gdb_assert (inferior_ptid == old_ptid);
9494 }
9495 }
9496
9497 } /* namespace selftests */
9498
9499 #endif /* GDB_SELF_TEST */
9500
9501 void _initialize_infrun ();
9502 void
9503 _initialize_infrun ()
9504 {
9505 struct cmd_list_element *c;
9506
9507 /* Register extra event sources in the event loop. */
9508 infrun_async_inferior_event_token
9509 = create_async_event_handler (infrun_async_inferior_event_handler, NULL,
9510 "infrun");
9511
9512 cmd_list_element *info_signals_cmd
9513 = add_info ("signals", info_signals_command, _("\
9514 What debugger does when program gets various signals.\n\
9515 Specify a signal as argument to print info on that signal only."));
9516 add_info_alias ("handle", info_signals_cmd, 0);
9517
9518 c = add_com ("handle", class_run, handle_command, _("\
9519 Specify how to handle signals.\n\
9520 Usage: handle SIGNAL [ACTIONS]\n\
9521 Args are signals and actions to apply to those signals.\n\
9522 If no actions are specified, the current settings for the specified signals\n\
9523 will be displayed instead.\n\
9524 \n\
9525 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9526 from 1-15 are allowed for compatibility with old versions of GDB.\n\
9527 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9528 The special arg \"all\" is recognized to mean all signals except those\n\
9529 used by the debugger, typically SIGTRAP and SIGINT.\n\
9530 \n\
9531 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
9532 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9533 Stop means reenter debugger if this signal happens (implies print).\n\
9534 Print means print a message if this signal happens.\n\
9535 Pass means let program see this signal; otherwise program doesn't know.\n\
9536 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
9537 Pass and Stop may be combined.\n\
9538 \n\
9539 Multiple signals may be specified. Signal numbers and signal names\n\
9540 may be interspersed with actions, with the actions being performed for\n\
9541 all signals cumulatively specified."));
9542 set_cmd_completer (c, handle_completer);
9543
9544 if (!dbx_commands)
9545 stop_command = add_cmd ("stop", class_obscure,
9546 not_just_help_class_command, _("\
9547 There is no `stop' command, but you can set a hook on `stop'.\n\
9548 This allows you to set a list of commands to be run each time execution\n\
9549 of the program stops."), &cmdlist);
9550
9551 add_setshow_boolean_cmd
9552 ("infrun", class_maintenance, &debug_infrun,
9553 _("Set inferior debugging."),
9554 _("Show inferior debugging."),
9555 _("When non-zero, inferior specific debugging is enabled."),
9556 NULL, show_debug_infrun, &setdebuglist, &showdebuglist);
9557
9558 add_setshow_boolean_cmd ("non-stop", no_class,
9559 &non_stop_1, _("\
9560 Set whether gdb controls the inferior in non-stop mode."), _("\
9561 Show whether gdb controls the inferior in non-stop mode."), _("\
9562 When debugging a multi-threaded program and this setting is\n\
9563 off (the default, also called all-stop mode), when one thread stops\n\
9564 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9565 all other threads in the program while you interact with the thread of\n\
9566 interest. When you continue or step a thread, you can allow the other\n\
9567 threads to run, or have them remain stopped, but while you inspect any\n\
9568 thread's state, all threads stop.\n\
9569 \n\
9570 In non-stop mode, when one thread stops, other threads can continue\n\
9571 to run freely. You'll be able to step each thread independently,\n\
9572 leave it stopped or free to run as needed."),
9573 set_non_stop,
9574 show_non_stop,
9575 &setlist,
9576 &showlist);
9577
9578 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
9579 {
9580 signal_stop[i] = 1;
9581 signal_print[i] = 1;
9582 signal_program[i] = 1;
9583 signal_catch[i] = 0;
9584 }
9585
9586 /* Signals caused by debugger's own actions should not be given to
9587 the program afterwards.
9588
9589 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9590 explicitly specifies that it should be delivered to the target
9591 program. Typically, that would occur when a user is debugging a
9592 target monitor on a simulator: the target monitor sets a
9593 breakpoint; the simulator encounters this breakpoint and halts
9594 the simulation handing control to GDB; GDB, noting that the stop
9595 address doesn't map to any known breakpoint, returns control back
9596 to the simulator; the simulator then delivers the hardware
9597 equivalent of a GDB_SIGNAL_TRAP to the program being
9598 debugged. */
9599 signal_program[GDB_SIGNAL_TRAP] = 0;
9600 signal_program[GDB_SIGNAL_INT] = 0;
9601
9602 /* Signals that are not errors should not normally enter the debugger. */
9603 signal_stop[GDB_SIGNAL_ALRM] = 0;
9604 signal_print[GDB_SIGNAL_ALRM] = 0;
9605 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9606 signal_print[GDB_SIGNAL_VTALRM] = 0;
9607 signal_stop[GDB_SIGNAL_PROF] = 0;
9608 signal_print[GDB_SIGNAL_PROF] = 0;
9609 signal_stop[GDB_SIGNAL_CHLD] = 0;
9610 signal_print[GDB_SIGNAL_CHLD] = 0;
9611 signal_stop[GDB_SIGNAL_IO] = 0;
9612 signal_print[GDB_SIGNAL_IO] = 0;
9613 signal_stop[GDB_SIGNAL_POLL] = 0;
9614 signal_print[GDB_SIGNAL_POLL] = 0;
9615 signal_stop[GDB_SIGNAL_URG] = 0;
9616 signal_print[GDB_SIGNAL_URG] = 0;
9617 signal_stop[GDB_SIGNAL_WINCH] = 0;
9618 signal_print[GDB_SIGNAL_WINCH] = 0;
9619 signal_stop[GDB_SIGNAL_PRIO] = 0;
9620 signal_print[GDB_SIGNAL_PRIO] = 0;
9621
9622 /* These signals are used internally by user-level thread
9623 implementations. (See signal(5) on Solaris.) Like the above
9624 signals, a healthy program receives and handles them as part of
9625 its normal operation. */
9626 signal_stop[GDB_SIGNAL_LWP] = 0;
9627 signal_print[GDB_SIGNAL_LWP] = 0;
9628 signal_stop[GDB_SIGNAL_WAITING] = 0;
9629 signal_print[GDB_SIGNAL_WAITING] = 0;
9630 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9631 signal_print[GDB_SIGNAL_CANCEL] = 0;
9632 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9633 signal_print[GDB_SIGNAL_LIBRT] = 0;
9634
9635 /* Update cached state. */
9636 signal_cache_update (-1);
9637
9638 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9639 &stop_on_solib_events, _("\
9640 Set stopping for shared library events."), _("\
9641 Show stopping for shared library events."), _("\
9642 If nonzero, gdb will give control to the user when the dynamic linker\n\
9643 notifies gdb of shared library events. The most common event of interest\n\
9644 to the user would be loading/unloading of a new library."),
9645 set_stop_on_solib_events,
9646 show_stop_on_solib_events,
9647 &setlist, &showlist);
9648
9649 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9650 follow_fork_mode_kind_names,
9651 &follow_fork_mode_string, _("\
9652 Set debugger response to a program call of fork or vfork."), _("\
9653 Show debugger response to a program call of fork or vfork."), _("\
9654 A fork or vfork creates a new process. follow-fork-mode can be:\n\
9655 parent - the original process is debugged after a fork\n\
9656 child - the new process is debugged after a fork\n\
9657 The unfollowed process will continue to run.\n\
9658 By default, the debugger will follow the parent process."),
9659 NULL,
9660 show_follow_fork_mode_string,
9661 &setlist, &showlist);
9662
9663 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9664 follow_exec_mode_names,
9665 &follow_exec_mode_string, _("\
9666 Set debugger response to a program call of exec."), _("\
9667 Show debugger response to a program call of exec."), _("\
9668 An exec call replaces the program image of a process.\n\
9669 \n\
9670 follow-exec-mode can be:\n\
9671 \n\
9672 new - the debugger creates a new inferior and rebinds the process\n\
9673 to this new inferior. The program the process was running before\n\
9674 the exec call can be restarted afterwards by restarting the original\n\
9675 inferior.\n\
9676 \n\
9677 same - the debugger keeps the process bound to the same inferior.\n\
9678 The new executable image replaces the previous executable loaded in\n\
9679 the inferior. Restarting the inferior after the exec call restarts\n\
9680 the executable the process was running after the exec call.\n\
9681 \n\
9682 By default, the debugger will use the same inferior."),
9683 NULL,
9684 show_follow_exec_mode_string,
9685 &setlist, &showlist);
9686
9687 add_setshow_enum_cmd ("scheduler-locking", class_run,
9688 scheduler_enums, &scheduler_mode, _("\
9689 Set mode for locking scheduler during execution."), _("\
9690 Show mode for locking scheduler during execution."), _("\
9691 off == no locking (threads may preempt at any time)\n\
9692 on == full locking (no thread except the current thread may run)\n\
9693 This applies to both normal execution and replay mode.\n\
9694 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9695 In this mode, other threads may run during other commands.\n\
9696 This applies to both normal execution and replay mode.\n\
9697 replay == scheduler locked in replay mode and unlocked during normal execution."),
9698 set_schedlock_func, /* traps on target vector */
9699 show_scheduler_mode,
9700 &setlist, &showlist);
9701
9702 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9703 Set mode for resuming threads of all processes."), _("\
9704 Show mode for resuming threads of all processes."), _("\
9705 When on, execution commands (such as 'continue' or 'next') resume all\n\
9706 threads of all processes. When off (which is the default), execution\n\
9707 commands only resume the threads of the current process. The set of\n\
9708 threads that are resumed is further refined by the scheduler-locking\n\
9709 mode (see help set scheduler-locking)."),
9710 NULL,
9711 show_schedule_multiple,
9712 &setlist, &showlist);
9713
9714 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9715 Set mode of the step operation."), _("\
9716 Show mode of the step operation."), _("\
9717 When set, doing a step over a function without debug line information\n\
9718 will stop at the first instruction of that function. Otherwise, the\n\
9719 function is skipped and the step command stops at a different source line."),
9720 NULL,
9721 show_step_stop_if_no_debug,
9722 &setlist, &showlist);
9723
9724 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9725 &can_use_displaced_stepping, _("\
9726 Set debugger's willingness to use displaced stepping."), _("\
9727 Show debugger's willingness to use displaced stepping."), _("\
9728 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9729 supported by the target architecture. If off, gdb will not use displaced\n\
9730 stepping to step over breakpoints, even if such is supported by the target\n\
9731 architecture. If auto (which is the default), gdb will use displaced stepping\n\
9732 if the target architecture supports it and non-stop mode is active, but will not\n\
9733 use it in all-stop mode (see help set non-stop)."),
9734 NULL,
9735 show_can_use_displaced_stepping,
9736 &setlist, &showlist);
9737
9738 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9739 &exec_direction, _("Set direction of execution.\n\
9740 Options are 'forward' or 'reverse'."),
9741 _("Show direction of execution (forward/reverse)."),
9742 _("Tells gdb whether to execute forward or backward."),
9743 set_exec_direction_func, show_exec_direction_func,
9744 &setlist, &showlist);
9745
9746 /* Set/show detach-on-fork: user-settable mode. */
9747
9748 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9749 Set whether gdb will detach the child of a fork."), _("\
9750 Show whether gdb will detach the child of a fork."), _("\
9751 Tells gdb whether to detach the child of a fork."),
9752 NULL, NULL, &setlist, &showlist);
9753
9754 /* Set/show disable address space randomization mode. */
9755
9756 add_setshow_boolean_cmd ("disable-randomization", class_support,
9757 &disable_randomization, _("\
9758 Set disabling of debuggee's virtual address space randomization."), _("\
9759 Show disabling of debuggee's virtual address space randomization."), _("\
9760 When this mode is on (which is the default), randomization of the virtual\n\
9761 address space is disabled. Standalone programs run with the randomization\n\
9762 enabled by default on some platforms."),
9763 &set_disable_randomization,
9764 &show_disable_randomization,
9765 &setlist, &showlist);
9766
9767 /* ptid initializations */
9768 inferior_ptid = null_ptid;
9769 target_last_wait_ptid = minus_one_ptid;
9770
9771 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed,
9772 "infrun");
9773 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested,
9774 "infrun");
9775 gdb::observers::thread_exit.attach (infrun_thread_thread_exit, "infrun");
9776 gdb::observers::inferior_exit.attach (infrun_inferior_exit, "infrun");
9777 gdb::observers::inferior_execd.attach (infrun_inferior_execd, "infrun");
9778
9779 /* Explicitly create without lookup, since that tries to create a
9780 value with a void typed value, and when we get here, gdbarch
9781 isn't initialized yet. At this point, we're quite sure there
9782 isn't another convenience variable of the same name. */
9783 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
9784
9785 add_setshow_boolean_cmd ("observer", no_class,
9786 &observer_mode_1, _("\
9787 Set whether gdb controls the inferior in observer mode."), _("\
9788 Show whether gdb controls the inferior in observer mode."), _("\
9789 In observer mode, GDB can get data from the inferior, but not\n\
9790 affect its execution. Registers and memory may not be changed,\n\
9791 breakpoints may not be set, and the program cannot be interrupted\n\
9792 or signalled."),
9793 set_observer_mode,
9794 show_observer_mode,
9795 &setlist,
9796 &showlist);
9797
9798 #if GDB_SELF_TEST
9799 selftests::register_test ("infrun_thread_ptid_changed",
9800 selftests::infrun_thread_ptid_changed);
9801 #endif
9802 }
This page took 0.240081 seconds and 4 git commands to generate.