[gdb] Fix stepping over fork with follow-fork-mode child and gcc-8
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2020 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "breakpoint.h"
28 #include "gdbcore.h"
29 #include "gdbcmd.h"
30 #include "target.h"
31 #include "target-connection.h"
32 #include "gdbthread.h"
33 #include "annotate.h"
34 #include "symfile.h"
35 #include "top.h"
36 #include "inf-loop.h"
37 #include "regcache.h"
38 #include "value.h"
39 #include "observable.h"
40 #include "language.h"
41 #include "solib.h"
42 #include "main.h"
43 #include "block.h"
44 #include "mi/mi-common.h"
45 #include "event-top.h"
46 #include "record.h"
47 #include "record-full.h"
48 #include "inline-frame.h"
49 #include "jit.h"
50 #include "tracepoint.h"
51 #include "skip.h"
52 #include "probe.h"
53 #include "objfiles.h"
54 #include "completer.h"
55 #include "target-descriptions.h"
56 #include "target-dcache.h"
57 #include "terminal.h"
58 #include "solist.h"
59 #include "gdbsupport/event-loop.h"
60 #include "thread-fsm.h"
61 #include "gdbsupport/enum-flags.h"
62 #include "progspace-and-thread.h"
63 #include "gdbsupport/gdb_optional.h"
64 #include "arch-utils.h"
65 #include "gdbsupport/scope-exit.h"
66 #include "gdbsupport/forward-scope-exit.h"
67 #include "gdbsupport/gdb_select.h"
68 #include <unordered_map>
69 #include "async-event.h"
70
71 /* Prototypes for local functions */
72
73 static void sig_print_info (enum gdb_signal);
74
75 static void sig_print_header (void);
76
77 static void follow_inferior_reset_breakpoints (void);
78
79 static int currently_stepping (struct thread_info *tp);
80
81 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
82
83 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
84
85 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
86
87 static int maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
88
89 static void resume (gdb_signal sig);
90
91 static void wait_for_inferior (inferior *inf);
92
93 /* Asynchronous signal handler registered as event loop source for
94 when we have pending events ready to be passed to the core. */
95 static struct async_event_handler *infrun_async_inferior_event_token;
96
97 /* Stores whether infrun_async was previously enabled or disabled.
98 Starts off as -1, indicating "never enabled/disabled". */
99 static int infrun_is_async = -1;
100
101 /* See infrun.h. */
102
103 void
104 infrun_async (int enable)
105 {
106 if (infrun_is_async != enable)
107 {
108 infrun_is_async = enable;
109
110 if (debug_infrun)
111 fprintf_unfiltered (gdb_stdlog,
112 "infrun: infrun_async(%d)\n",
113 enable);
114
115 if (enable)
116 mark_async_event_handler (infrun_async_inferior_event_token);
117 else
118 clear_async_event_handler (infrun_async_inferior_event_token);
119 }
120 }
121
122 /* See infrun.h. */
123
124 void
125 mark_infrun_async_event_handler (void)
126 {
127 mark_async_event_handler (infrun_async_inferior_event_token);
128 }
129
130 /* When set, stop the 'step' command if we enter a function which has
131 no line number information. The normal behavior is that we step
132 over such function. */
133 bool step_stop_if_no_debug = false;
134 static void
135 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
136 struct cmd_list_element *c, const char *value)
137 {
138 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
139 }
140
141 /* proceed and normal_stop use this to notify the user when the
142 inferior stopped in a different thread than it had been running
143 in. */
144
145 static ptid_t previous_inferior_ptid;
146
147 /* If set (default for legacy reasons), when following a fork, GDB
148 will detach from one of the fork branches, child or parent.
149 Exactly which branch is detached depends on 'set follow-fork-mode'
150 setting. */
151
152 static bool detach_fork = true;
153
154 bool debug_displaced = false;
155 static void
156 show_debug_displaced (struct ui_file *file, int from_tty,
157 struct cmd_list_element *c, const char *value)
158 {
159 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
160 }
161
162 unsigned int debug_infrun = 0;
163 static void
164 show_debug_infrun (struct ui_file *file, int from_tty,
165 struct cmd_list_element *c, const char *value)
166 {
167 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
168 }
169
170
171 /* Support for disabling address space randomization. */
172
173 bool disable_randomization = true;
174
175 static void
176 show_disable_randomization (struct ui_file *file, int from_tty,
177 struct cmd_list_element *c, const char *value)
178 {
179 if (target_supports_disable_randomization ())
180 fprintf_filtered (file,
181 _("Disabling randomization of debuggee's "
182 "virtual address space is %s.\n"),
183 value);
184 else
185 fputs_filtered (_("Disabling randomization of debuggee's "
186 "virtual address space is unsupported on\n"
187 "this platform.\n"), file);
188 }
189
190 static void
191 set_disable_randomization (const char *args, int from_tty,
192 struct cmd_list_element *c)
193 {
194 if (!target_supports_disable_randomization ())
195 error (_("Disabling randomization of debuggee's "
196 "virtual address space is unsupported on\n"
197 "this platform."));
198 }
199
200 /* User interface for non-stop mode. */
201
202 bool non_stop = false;
203 static bool non_stop_1 = false;
204
205 static void
206 set_non_stop (const char *args, int from_tty,
207 struct cmd_list_element *c)
208 {
209 if (target_has_execution)
210 {
211 non_stop_1 = non_stop;
212 error (_("Cannot change this setting while the inferior is running."));
213 }
214
215 non_stop = non_stop_1;
216 }
217
218 static void
219 show_non_stop (struct ui_file *file, int from_tty,
220 struct cmd_list_element *c, const char *value)
221 {
222 fprintf_filtered (file,
223 _("Controlling the inferior in non-stop mode is %s.\n"),
224 value);
225 }
226
227 /* "Observer mode" is somewhat like a more extreme version of
228 non-stop, in which all GDB operations that might affect the
229 target's execution have been disabled. */
230
231 bool observer_mode = false;
232 static bool observer_mode_1 = false;
233
234 static void
235 set_observer_mode (const char *args, int from_tty,
236 struct cmd_list_element *c)
237 {
238 if (target_has_execution)
239 {
240 observer_mode_1 = observer_mode;
241 error (_("Cannot change this setting while the inferior is running."));
242 }
243
244 observer_mode = observer_mode_1;
245
246 may_write_registers = !observer_mode;
247 may_write_memory = !observer_mode;
248 may_insert_breakpoints = !observer_mode;
249 may_insert_tracepoints = !observer_mode;
250 /* We can insert fast tracepoints in or out of observer mode,
251 but enable them if we're going into this mode. */
252 if (observer_mode)
253 may_insert_fast_tracepoints = true;
254 may_stop = !observer_mode;
255 update_target_permissions ();
256
257 /* Going *into* observer mode we must force non-stop, then
258 going out we leave it that way. */
259 if (observer_mode)
260 {
261 pagination_enabled = 0;
262 non_stop = non_stop_1 = true;
263 }
264
265 if (from_tty)
266 printf_filtered (_("Observer mode is now %s.\n"),
267 (observer_mode ? "on" : "off"));
268 }
269
270 static void
271 show_observer_mode (struct ui_file *file, int from_tty,
272 struct cmd_list_element *c, const char *value)
273 {
274 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
275 }
276
277 /* This updates the value of observer mode based on changes in
278 permissions. Note that we are deliberately ignoring the values of
279 may-write-registers and may-write-memory, since the user may have
280 reason to enable these during a session, for instance to turn on a
281 debugging-related global. */
282
283 void
284 update_observer_mode (void)
285 {
286 bool newval = (!may_insert_breakpoints
287 && !may_insert_tracepoints
288 && may_insert_fast_tracepoints
289 && !may_stop
290 && non_stop);
291
292 /* Let the user know if things change. */
293 if (newval != observer_mode)
294 printf_filtered (_("Observer mode is now %s.\n"),
295 (newval ? "on" : "off"));
296
297 observer_mode = observer_mode_1 = newval;
298 }
299
300 /* Tables of how to react to signals; the user sets them. */
301
302 static unsigned char signal_stop[GDB_SIGNAL_LAST];
303 static unsigned char signal_print[GDB_SIGNAL_LAST];
304 static unsigned char signal_program[GDB_SIGNAL_LAST];
305
306 /* Table of signals that are registered with "catch signal". A
307 non-zero entry indicates that the signal is caught by some "catch
308 signal" command. */
309 static unsigned char signal_catch[GDB_SIGNAL_LAST];
310
311 /* Table of signals that the target may silently handle.
312 This is automatically determined from the flags above,
313 and simply cached here. */
314 static unsigned char signal_pass[GDB_SIGNAL_LAST];
315
316 #define SET_SIGS(nsigs,sigs,flags) \
317 do { \
318 int signum = (nsigs); \
319 while (signum-- > 0) \
320 if ((sigs)[signum]) \
321 (flags)[signum] = 1; \
322 } while (0)
323
324 #define UNSET_SIGS(nsigs,sigs,flags) \
325 do { \
326 int signum = (nsigs); \
327 while (signum-- > 0) \
328 if ((sigs)[signum]) \
329 (flags)[signum] = 0; \
330 } while (0)
331
332 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
333 this function is to avoid exporting `signal_program'. */
334
335 void
336 update_signals_program_target (void)
337 {
338 target_program_signals (signal_program);
339 }
340
341 /* Value to pass to target_resume() to cause all threads to resume. */
342
343 #define RESUME_ALL minus_one_ptid
344
345 /* Command list pointer for the "stop" placeholder. */
346
347 static struct cmd_list_element *stop_command;
348
349 /* Nonzero if we want to give control to the user when we're notified
350 of shared library events by the dynamic linker. */
351 int stop_on_solib_events;
352
353 /* Enable or disable optional shared library event breakpoints
354 as appropriate when the above flag is changed. */
355
356 static void
357 set_stop_on_solib_events (const char *args,
358 int from_tty, struct cmd_list_element *c)
359 {
360 update_solib_breakpoints ();
361 }
362
363 static void
364 show_stop_on_solib_events (struct ui_file *file, int from_tty,
365 struct cmd_list_element *c, const char *value)
366 {
367 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
368 value);
369 }
370
371 /* Nonzero after stop if current stack frame should be printed. */
372
373 static int stop_print_frame;
374
375 /* This is a cached copy of the target/ptid/waitstatus of the last
376 event returned by target_wait()/deprecated_target_wait_hook().
377 This information is returned by get_last_target_status(). */
378 static process_stratum_target *target_last_proc_target;
379 static ptid_t target_last_wait_ptid;
380 static struct target_waitstatus target_last_waitstatus;
381
382 void init_thread_stepping_state (struct thread_info *tss);
383
384 static const char follow_fork_mode_child[] = "child";
385 static const char follow_fork_mode_parent[] = "parent";
386
387 static const char *const follow_fork_mode_kind_names[] = {
388 follow_fork_mode_child,
389 follow_fork_mode_parent,
390 NULL
391 };
392
393 static const char *follow_fork_mode_string = follow_fork_mode_parent;
394 static void
395 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
396 struct cmd_list_element *c, const char *value)
397 {
398 fprintf_filtered (file,
399 _("Debugger response to a program "
400 "call of fork or vfork is \"%s\".\n"),
401 value);
402 }
403 \f
404
405 /* Handle changes to the inferior list based on the type of fork,
406 which process is being followed, and whether the other process
407 should be detached. On entry inferior_ptid must be the ptid of
408 the fork parent. At return inferior_ptid is the ptid of the
409 followed inferior. */
410
411 static bool
412 follow_fork_inferior (bool follow_child, bool detach_fork)
413 {
414 int has_vforked;
415 ptid_t parent_ptid, child_ptid;
416
417 has_vforked = (inferior_thread ()->pending_follow.kind
418 == TARGET_WAITKIND_VFORKED);
419 parent_ptid = inferior_ptid;
420 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
421
422 if (has_vforked
423 && !non_stop /* Non-stop always resumes both branches. */
424 && current_ui->prompt_state == PROMPT_BLOCKED
425 && !(follow_child || detach_fork || sched_multi))
426 {
427 /* The parent stays blocked inside the vfork syscall until the
428 child execs or exits. If we don't let the child run, then
429 the parent stays blocked. If we're telling the parent to run
430 in the foreground, the user will not be able to ctrl-c to get
431 back the terminal, effectively hanging the debug session. */
432 fprintf_filtered (gdb_stderr, _("\
433 Can not resume the parent process over vfork in the foreground while\n\
434 holding the child stopped. Try \"set detach-on-fork\" or \
435 \"set schedule-multiple\".\n"));
436 return 1;
437 }
438
439 if (!follow_child)
440 {
441 /* Detach new forked process? */
442 if (detach_fork)
443 {
444 /* Before detaching from the child, remove all breakpoints
445 from it. If we forked, then this has already been taken
446 care of by infrun.c. If we vforked however, any
447 breakpoint inserted in the parent is visible in the
448 child, even those added while stopped in a vfork
449 catchpoint. This will remove the breakpoints from the
450 parent also, but they'll be reinserted below. */
451 if (has_vforked)
452 {
453 /* Keep breakpoints list in sync. */
454 remove_breakpoints_inf (current_inferior ());
455 }
456
457 if (print_inferior_events)
458 {
459 /* Ensure that we have a process ptid. */
460 ptid_t process_ptid = ptid_t (child_ptid.pid ());
461
462 target_terminal::ours_for_output ();
463 fprintf_filtered (gdb_stdlog,
464 _("[Detaching after %s from child %s]\n"),
465 has_vforked ? "vfork" : "fork",
466 target_pid_to_str (process_ptid).c_str ());
467 }
468 }
469 else
470 {
471 struct inferior *parent_inf, *child_inf;
472
473 /* Add process to GDB's tables. */
474 child_inf = add_inferior (child_ptid.pid ());
475
476 parent_inf = current_inferior ();
477 child_inf->attach_flag = parent_inf->attach_flag;
478 copy_terminal_info (child_inf, parent_inf);
479 child_inf->gdbarch = parent_inf->gdbarch;
480 copy_inferior_target_desc_info (child_inf, parent_inf);
481
482 scoped_restore_current_pspace_and_thread restore_pspace_thread;
483
484 set_current_inferior (child_inf);
485 switch_to_no_thread ();
486 child_inf->symfile_flags = SYMFILE_NO_READ;
487 push_target (parent_inf->process_target ());
488 add_thread_silent (child_inf->process_target (), child_ptid);
489 inferior_ptid = child_ptid;
490
491 /* If this is a vfork child, then the address-space is
492 shared with the parent. */
493 if (has_vforked)
494 {
495 child_inf->pspace = parent_inf->pspace;
496 child_inf->aspace = parent_inf->aspace;
497
498 exec_on_vfork ();
499
500 /* The parent will be frozen until the child is done
501 with the shared region. Keep track of the
502 parent. */
503 child_inf->vfork_parent = parent_inf;
504 child_inf->pending_detach = 0;
505 parent_inf->vfork_child = child_inf;
506 parent_inf->pending_detach = 0;
507 }
508 else
509 {
510 child_inf->aspace = new_address_space ();
511 child_inf->pspace = new program_space (child_inf->aspace);
512 child_inf->removable = 1;
513 set_current_program_space (child_inf->pspace);
514 clone_program_space (child_inf->pspace, parent_inf->pspace);
515
516 /* Let the shared library layer (e.g., solib-svr4) learn
517 about this new process, relocate the cloned exec, pull
518 in shared libraries, and install the solib event
519 breakpoint. If a "cloned-VM" event was propagated
520 better throughout the core, this wouldn't be
521 required. */
522 solib_create_inferior_hook (0);
523 }
524 }
525
526 if (has_vforked)
527 {
528 struct inferior *parent_inf;
529
530 parent_inf = current_inferior ();
531
532 /* If we detached from the child, then we have to be careful
533 to not insert breakpoints in the parent until the child
534 is done with the shared memory region. However, if we're
535 staying attached to the child, then we can and should
536 insert breakpoints, so that we can debug it. A
537 subsequent child exec or exit is enough to know when does
538 the child stops using the parent's address space. */
539 parent_inf->waiting_for_vfork_done = detach_fork;
540 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
541 }
542 }
543 else
544 {
545 /* Follow the child. */
546 struct inferior *parent_inf, *child_inf;
547 struct program_space *parent_pspace;
548
549 if (print_inferior_events)
550 {
551 std::string parent_pid = target_pid_to_str (parent_ptid);
552 std::string child_pid = target_pid_to_str (child_ptid);
553
554 target_terminal::ours_for_output ();
555 fprintf_filtered (gdb_stdlog,
556 _("[Attaching after %s %s to child %s]\n"),
557 parent_pid.c_str (),
558 has_vforked ? "vfork" : "fork",
559 child_pid.c_str ());
560 }
561
562 /* Add the new inferior first, so that the target_detach below
563 doesn't unpush the target. */
564
565 child_inf = add_inferior (child_ptid.pid ());
566
567 parent_inf = current_inferior ();
568 child_inf->attach_flag = parent_inf->attach_flag;
569 copy_terminal_info (child_inf, parent_inf);
570 child_inf->gdbarch = parent_inf->gdbarch;
571 copy_inferior_target_desc_info (child_inf, parent_inf);
572
573 parent_pspace = parent_inf->pspace;
574
575 process_stratum_target *target = parent_inf->process_target ();
576
577 {
578 /* Hold a strong reference to the target while (maybe)
579 detaching the parent. Otherwise detaching could close the
580 target. */
581 auto target_ref = target_ops_ref::new_reference (target);
582
583 /* If we're vforking, we want to hold on to the parent until
584 the child exits or execs. At child exec or exit time we
585 can remove the old breakpoints from the parent and detach
586 or resume debugging it. Otherwise, detach the parent now;
587 we'll want to reuse it's program/address spaces, but we
588 can't set them to the child before removing breakpoints
589 from the parent, otherwise, the breakpoints module could
590 decide to remove breakpoints from the wrong process (since
591 they'd be assigned to the same address space). */
592
593 if (has_vforked)
594 {
595 gdb_assert (child_inf->vfork_parent == NULL);
596 gdb_assert (parent_inf->vfork_child == NULL);
597 child_inf->vfork_parent = parent_inf;
598 child_inf->pending_detach = 0;
599 parent_inf->vfork_child = child_inf;
600 parent_inf->pending_detach = detach_fork;
601 parent_inf->waiting_for_vfork_done = 0;
602 }
603 else if (detach_fork)
604 {
605 if (print_inferior_events)
606 {
607 /* Ensure that we have a process ptid. */
608 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
609
610 target_terminal::ours_for_output ();
611 fprintf_filtered (gdb_stdlog,
612 _("[Detaching after fork from "
613 "parent %s]\n"),
614 target_pid_to_str (process_ptid).c_str ());
615 }
616
617 target_detach (parent_inf, 0);
618 parent_inf = NULL;
619 }
620
621 /* Note that the detach above makes PARENT_INF dangling. */
622
623 /* Add the child thread to the appropriate lists, and switch
624 to this new thread, before cloning the program space, and
625 informing the solib layer about this new process. */
626
627 set_current_inferior (child_inf);
628 push_target (target);
629 }
630
631 add_thread_silent (target, child_ptid);
632 inferior_ptid = child_ptid;
633
634 /* If this is a vfork child, then the address-space is shared
635 with the parent. If we detached from the parent, then we can
636 reuse the parent's program/address spaces. */
637 if (has_vforked || detach_fork)
638 {
639 child_inf->pspace = parent_pspace;
640 child_inf->aspace = child_inf->pspace->aspace;
641
642 exec_on_vfork ();
643 }
644 else
645 {
646 child_inf->aspace = new_address_space ();
647 child_inf->pspace = new program_space (child_inf->aspace);
648 child_inf->removable = 1;
649 child_inf->symfile_flags = SYMFILE_NO_READ;
650 set_current_program_space (child_inf->pspace);
651 clone_program_space (child_inf->pspace, parent_pspace);
652
653 /* Let the shared library layer (e.g., solib-svr4) learn
654 about this new process, relocate the cloned exec, pull in
655 shared libraries, and install the solib event breakpoint.
656 If a "cloned-VM" event was propagated better throughout
657 the core, this wouldn't be required. */
658 solib_create_inferior_hook (0);
659 }
660 }
661
662 return target_follow_fork (follow_child, detach_fork);
663 }
664
665 /* Tell the target to follow the fork we're stopped at. Returns true
666 if the inferior should be resumed; false, if the target for some
667 reason decided it's best not to resume. */
668
669 static bool
670 follow_fork ()
671 {
672 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
673 bool should_resume = true;
674 struct thread_info *tp;
675
676 /* Copy user stepping state to the new inferior thread. FIXME: the
677 followed fork child thread should have a copy of most of the
678 parent thread structure's run control related fields, not just these.
679 Initialized to avoid "may be used uninitialized" warnings from gcc. */
680 struct breakpoint *step_resume_breakpoint = NULL;
681 struct breakpoint *exception_resume_breakpoint = NULL;
682 CORE_ADDR step_range_start = 0;
683 CORE_ADDR step_range_end = 0;
684 int current_line = 0;
685 symtab *current_symtab = NULL;
686 struct frame_id step_frame_id = { 0 };
687 struct thread_fsm *thread_fsm = NULL;
688
689 if (!non_stop)
690 {
691 process_stratum_target *wait_target;
692 ptid_t wait_ptid;
693 struct target_waitstatus wait_status;
694
695 /* Get the last target status returned by target_wait(). */
696 get_last_target_status (&wait_target, &wait_ptid, &wait_status);
697
698 /* If not stopped at a fork event, then there's nothing else to
699 do. */
700 if (wait_status.kind != TARGET_WAITKIND_FORKED
701 && wait_status.kind != TARGET_WAITKIND_VFORKED)
702 return 1;
703
704 /* Check if we switched over from WAIT_PTID, since the event was
705 reported. */
706 if (wait_ptid != minus_one_ptid
707 && (current_inferior ()->process_target () != wait_target
708 || inferior_ptid != wait_ptid))
709 {
710 /* We did. Switch back to WAIT_PTID thread, to tell the
711 target to follow it (in either direction). We'll
712 afterwards refuse to resume, and inform the user what
713 happened. */
714 thread_info *wait_thread = find_thread_ptid (wait_target, wait_ptid);
715 switch_to_thread (wait_thread);
716 should_resume = false;
717 }
718 }
719
720 tp = inferior_thread ();
721
722 /* If there were any forks/vforks that were caught and are now to be
723 followed, then do so now. */
724 switch (tp->pending_follow.kind)
725 {
726 case TARGET_WAITKIND_FORKED:
727 case TARGET_WAITKIND_VFORKED:
728 {
729 ptid_t parent, child;
730
731 /* If the user did a next/step, etc, over a fork call,
732 preserve the stepping state in the fork child. */
733 if (follow_child && should_resume)
734 {
735 step_resume_breakpoint = clone_momentary_breakpoint
736 (tp->control.step_resume_breakpoint);
737 step_range_start = tp->control.step_range_start;
738 step_range_end = tp->control.step_range_end;
739 current_line = tp->current_line;
740 current_symtab = tp->current_symtab;
741 step_frame_id = tp->control.step_frame_id;
742 exception_resume_breakpoint
743 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
744 thread_fsm = tp->thread_fsm;
745
746 /* For now, delete the parent's sr breakpoint, otherwise,
747 parent/child sr breakpoints are considered duplicates,
748 and the child version will not be installed. Remove
749 this when the breakpoints module becomes aware of
750 inferiors and address spaces. */
751 delete_step_resume_breakpoint (tp);
752 tp->control.step_range_start = 0;
753 tp->control.step_range_end = 0;
754 tp->control.step_frame_id = null_frame_id;
755 delete_exception_resume_breakpoint (tp);
756 tp->thread_fsm = NULL;
757 }
758
759 parent = inferior_ptid;
760 child = tp->pending_follow.value.related_pid;
761
762 process_stratum_target *parent_targ = tp->inf->process_target ();
763 /* Set up inferior(s) as specified by the caller, and tell the
764 target to do whatever is necessary to follow either parent
765 or child. */
766 if (follow_fork_inferior (follow_child, detach_fork))
767 {
768 /* Target refused to follow, or there's some other reason
769 we shouldn't resume. */
770 should_resume = 0;
771 }
772 else
773 {
774 /* This pending follow fork event is now handled, one way
775 or another. The previous selected thread may be gone
776 from the lists by now, but if it is still around, need
777 to clear the pending follow request. */
778 tp = find_thread_ptid (parent_targ, parent);
779 if (tp)
780 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
781
782 /* This makes sure we don't try to apply the "Switched
783 over from WAIT_PID" logic above. */
784 nullify_last_target_wait_ptid ();
785
786 /* If we followed the child, switch to it... */
787 if (follow_child)
788 {
789 thread_info *child_thr = find_thread_ptid (parent_targ, child);
790 switch_to_thread (child_thr);
791
792 /* ... and preserve the stepping state, in case the
793 user was stepping over the fork call. */
794 if (should_resume)
795 {
796 tp = inferior_thread ();
797 tp->control.step_resume_breakpoint
798 = step_resume_breakpoint;
799 tp->control.step_range_start = step_range_start;
800 tp->control.step_range_end = step_range_end;
801 tp->current_line = current_line;
802 tp->current_symtab = current_symtab;
803 tp->control.step_frame_id = step_frame_id;
804 tp->control.exception_resume_breakpoint
805 = exception_resume_breakpoint;
806 tp->thread_fsm = thread_fsm;
807 }
808 else
809 {
810 /* If we get here, it was because we're trying to
811 resume from a fork catchpoint, but, the user
812 has switched threads away from the thread that
813 forked. In that case, the resume command
814 issued is most likely not applicable to the
815 child, so just warn, and refuse to resume. */
816 warning (_("Not resuming: switched threads "
817 "before following fork child."));
818 }
819
820 /* Reset breakpoints in the child as appropriate. */
821 follow_inferior_reset_breakpoints ();
822 }
823 }
824 }
825 break;
826 case TARGET_WAITKIND_SPURIOUS:
827 /* Nothing to follow. */
828 break;
829 default:
830 internal_error (__FILE__, __LINE__,
831 "Unexpected pending_follow.kind %d\n",
832 tp->pending_follow.kind);
833 break;
834 }
835
836 return should_resume;
837 }
838
839 static void
840 follow_inferior_reset_breakpoints (void)
841 {
842 struct thread_info *tp = inferior_thread ();
843
844 /* Was there a step_resume breakpoint? (There was if the user
845 did a "next" at the fork() call.) If so, explicitly reset its
846 thread number. Cloned step_resume breakpoints are disabled on
847 creation, so enable it here now that it is associated with the
848 correct thread.
849
850 step_resumes are a form of bp that are made to be per-thread.
851 Since we created the step_resume bp when the parent process
852 was being debugged, and now are switching to the child process,
853 from the breakpoint package's viewpoint, that's a switch of
854 "threads". We must update the bp's notion of which thread
855 it is for, or it'll be ignored when it triggers. */
856
857 if (tp->control.step_resume_breakpoint)
858 {
859 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
860 tp->control.step_resume_breakpoint->loc->enabled = 1;
861 }
862
863 /* Treat exception_resume breakpoints like step_resume breakpoints. */
864 if (tp->control.exception_resume_breakpoint)
865 {
866 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
867 tp->control.exception_resume_breakpoint->loc->enabled = 1;
868 }
869
870 /* Reinsert all breakpoints in the child. The user may have set
871 breakpoints after catching the fork, in which case those
872 were never set in the child, but only in the parent. This makes
873 sure the inserted breakpoints match the breakpoint list. */
874
875 breakpoint_re_set ();
876 insert_breakpoints ();
877 }
878
879 /* The child has exited or execed: resume threads of the parent the
880 user wanted to be executing. */
881
882 static int
883 proceed_after_vfork_done (struct thread_info *thread,
884 void *arg)
885 {
886 int pid = * (int *) arg;
887
888 if (thread->ptid.pid () == pid
889 && thread->state == THREAD_RUNNING
890 && !thread->executing
891 && !thread->stop_requested
892 && thread->suspend.stop_signal == GDB_SIGNAL_0)
893 {
894 if (debug_infrun)
895 fprintf_unfiltered (gdb_stdlog,
896 "infrun: resuming vfork parent thread %s\n",
897 target_pid_to_str (thread->ptid).c_str ());
898
899 switch_to_thread (thread);
900 clear_proceed_status (0);
901 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
902 }
903
904 return 0;
905 }
906
907 /* Save/restore inferior_ptid, current program space and current
908 inferior. Only use this if the current context points at an exited
909 inferior (and therefore there's no current thread to save). */
910 class scoped_restore_exited_inferior
911 {
912 public:
913 scoped_restore_exited_inferior ()
914 : m_saved_ptid (&inferior_ptid)
915 {}
916
917 private:
918 scoped_restore_tmpl<ptid_t> m_saved_ptid;
919 scoped_restore_current_program_space m_pspace;
920 scoped_restore_current_inferior m_inferior;
921 };
922
923 /* Called whenever we notice an exec or exit event, to handle
924 detaching or resuming a vfork parent. */
925
926 static void
927 handle_vfork_child_exec_or_exit (int exec)
928 {
929 struct inferior *inf = current_inferior ();
930
931 if (inf->vfork_parent)
932 {
933 int resume_parent = -1;
934
935 /* This exec or exit marks the end of the shared memory region
936 between the parent and the child. Break the bonds. */
937 inferior *vfork_parent = inf->vfork_parent;
938 inf->vfork_parent->vfork_child = NULL;
939 inf->vfork_parent = NULL;
940
941 /* If the user wanted to detach from the parent, now is the
942 time. */
943 if (vfork_parent->pending_detach)
944 {
945 struct thread_info *tp;
946 struct program_space *pspace;
947 struct address_space *aspace;
948
949 /* follow-fork child, detach-on-fork on. */
950
951 vfork_parent->pending_detach = 0;
952
953 gdb::optional<scoped_restore_exited_inferior>
954 maybe_restore_inferior;
955 gdb::optional<scoped_restore_current_pspace_and_thread>
956 maybe_restore_thread;
957
958 /* If we're handling a child exit, then inferior_ptid points
959 at the inferior's pid, not to a thread. */
960 if (!exec)
961 maybe_restore_inferior.emplace ();
962 else
963 maybe_restore_thread.emplace ();
964
965 /* We're letting loose of the parent. */
966 tp = any_live_thread_of_inferior (vfork_parent);
967 switch_to_thread (tp);
968
969 /* We're about to detach from the parent, which implicitly
970 removes breakpoints from its address space. There's a
971 catch here: we want to reuse the spaces for the child,
972 but, parent/child are still sharing the pspace at this
973 point, although the exec in reality makes the kernel give
974 the child a fresh set of new pages. The problem here is
975 that the breakpoints module being unaware of this, would
976 likely chose the child process to write to the parent
977 address space. Swapping the child temporarily away from
978 the spaces has the desired effect. Yes, this is "sort
979 of" a hack. */
980
981 pspace = inf->pspace;
982 aspace = inf->aspace;
983 inf->aspace = NULL;
984 inf->pspace = NULL;
985
986 if (print_inferior_events)
987 {
988 std::string pidstr
989 = target_pid_to_str (ptid_t (vfork_parent->pid));
990
991 target_terminal::ours_for_output ();
992
993 if (exec)
994 {
995 fprintf_filtered (gdb_stdlog,
996 _("[Detaching vfork parent %s "
997 "after child exec]\n"), pidstr.c_str ());
998 }
999 else
1000 {
1001 fprintf_filtered (gdb_stdlog,
1002 _("[Detaching vfork parent %s "
1003 "after child exit]\n"), pidstr.c_str ());
1004 }
1005 }
1006
1007 target_detach (vfork_parent, 0);
1008
1009 /* Put it back. */
1010 inf->pspace = pspace;
1011 inf->aspace = aspace;
1012 }
1013 else if (exec)
1014 {
1015 /* We're staying attached to the parent, so, really give the
1016 child a new address space. */
1017 inf->pspace = new program_space (maybe_new_address_space ());
1018 inf->aspace = inf->pspace->aspace;
1019 inf->removable = 1;
1020 set_current_program_space (inf->pspace);
1021
1022 resume_parent = vfork_parent->pid;
1023 }
1024 else
1025 {
1026 /* If this is a vfork child exiting, then the pspace and
1027 aspaces were shared with the parent. Since we're
1028 reporting the process exit, we'll be mourning all that is
1029 found in the address space, and switching to null_ptid,
1030 preparing to start a new inferior. But, since we don't
1031 want to clobber the parent's address/program spaces, we
1032 go ahead and create a new one for this exiting
1033 inferior. */
1034
1035 /* Switch to null_ptid while running clone_program_space, so
1036 that clone_program_space doesn't want to read the
1037 selected frame of a dead process. */
1038 scoped_restore restore_ptid
1039 = make_scoped_restore (&inferior_ptid, null_ptid);
1040
1041 inf->pspace = new program_space (maybe_new_address_space ());
1042 inf->aspace = inf->pspace->aspace;
1043 set_current_program_space (inf->pspace);
1044 inf->removable = 1;
1045 inf->symfile_flags = SYMFILE_NO_READ;
1046 clone_program_space (inf->pspace, vfork_parent->pspace);
1047
1048 resume_parent = vfork_parent->pid;
1049 }
1050
1051 gdb_assert (current_program_space == inf->pspace);
1052
1053 if (non_stop && resume_parent != -1)
1054 {
1055 /* If the user wanted the parent to be running, let it go
1056 free now. */
1057 scoped_restore_current_thread restore_thread;
1058
1059 if (debug_infrun)
1060 fprintf_unfiltered (gdb_stdlog,
1061 "infrun: resuming vfork parent process %d\n",
1062 resume_parent);
1063
1064 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
1065 }
1066 }
1067 }
1068
1069 /* Enum strings for "set|show follow-exec-mode". */
1070
1071 static const char follow_exec_mode_new[] = "new";
1072 static const char follow_exec_mode_same[] = "same";
1073 static const char *const follow_exec_mode_names[] =
1074 {
1075 follow_exec_mode_new,
1076 follow_exec_mode_same,
1077 NULL,
1078 };
1079
1080 static const char *follow_exec_mode_string = follow_exec_mode_same;
1081 static void
1082 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1083 struct cmd_list_element *c, const char *value)
1084 {
1085 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1086 }
1087
1088 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1089
1090 static void
1091 follow_exec (ptid_t ptid, const char *exec_file_target)
1092 {
1093 struct inferior *inf = current_inferior ();
1094 int pid = ptid.pid ();
1095 ptid_t process_ptid;
1096
1097 /* Switch terminal for any messages produced e.g. by
1098 breakpoint_re_set. */
1099 target_terminal::ours_for_output ();
1100
1101 /* This is an exec event that we actually wish to pay attention to.
1102 Refresh our symbol table to the newly exec'd program, remove any
1103 momentary bp's, etc.
1104
1105 If there are breakpoints, they aren't really inserted now,
1106 since the exec() transformed our inferior into a fresh set
1107 of instructions.
1108
1109 We want to preserve symbolic breakpoints on the list, since
1110 we have hopes that they can be reset after the new a.out's
1111 symbol table is read.
1112
1113 However, any "raw" breakpoints must be removed from the list
1114 (e.g., the solib bp's), since their address is probably invalid
1115 now.
1116
1117 And, we DON'T want to call delete_breakpoints() here, since
1118 that may write the bp's "shadow contents" (the instruction
1119 value that was overwritten with a TRAP instruction). Since
1120 we now have a new a.out, those shadow contents aren't valid. */
1121
1122 mark_breakpoints_out ();
1123
1124 /* The target reports the exec event to the main thread, even if
1125 some other thread does the exec, and even if the main thread was
1126 stopped or already gone. We may still have non-leader threads of
1127 the process on our list. E.g., on targets that don't have thread
1128 exit events (like remote); or on native Linux in non-stop mode if
1129 there were only two threads in the inferior and the non-leader
1130 one is the one that execs (and nothing forces an update of the
1131 thread list up to here). When debugging remotely, it's best to
1132 avoid extra traffic, when possible, so avoid syncing the thread
1133 list with the target, and instead go ahead and delete all threads
1134 of the process but one that reported the event. Note this must
1135 be done before calling update_breakpoints_after_exec, as
1136 otherwise clearing the threads' resources would reference stale
1137 thread breakpoints -- it may have been one of these threads that
1138 stepped across the exec. We could just clear their stepping
1139 states, but as long as we're iterating, might as well delete
1140 them. Deleting them now rather than at the next user-visible
1141 stop provides a nicer sequence of events for user and MI
1142 notifications. */
1143 for (thread_info *th : all_threads_safe ())
1144 if (th->ptid.pid () == pid && th->ptid != ptid)
1145 delete_thread (th);
1146
1147 /* We also need to clear any left over stale state for the
1148 leader/event thread. E.g., if there was any step-resume
1149 breakpoint or similar, it's gone now. We cannot truly
1150 step-to-next statement through an exec(). */
1151 thread_info *th = inferior_thread ();
1152 th->control.step_resume_breakpoint = NULL;
1153 th->control.exception_resume_breakpoint = NULL;
1154 th->control.single_step_breakpoints = NULL;
1155 th->control.step_range_start = 0;
1156 th->control.step_range_end = 0;
1157
1158 /* The user may have had the main thread held stopped in the
1159 previous image (e.g., schedlock on, or non-stop). Release
1160 it now. */
1161 th->stop_requested = 0;
1162
1163 update_breakpoints_after_exec ();
1164
1165 /* What is this a.out's name? */
1166 process_ptid = ptid_t (pid);
1167 printf_unfiltered (_("%s is executing new program: %s\n"),
1168 target_pid_to_str (process_ptid).c_str (),
1169 exec_file_target);
1170
1171 /* We've followed the inferior through an exec. Therefore, the
1172 inferior has essentially been killed & reborn. */
1173
1174 breakpoint_init_inferior (inf_execd);
1175
1176 gdb::unique_xmalloc_ptr<char> exec_file_host
1177 = exec_file_find (exec_file_target, NULL);
1178
1179 /* If we were unable to map the executable target pathname onto a host
1180 pathname, tell the user that. Otherwise GDB's subsequent behavior
1181 is confusing. Maybe it would even be better to stop at this point
1182 so that the user can specify a file manually before continuing. */
1183 if (exec_file_host == NULL)
1184 warning (_("Could not load symbols for executable %s.\n"
1185 "Do you need \"set sysroot\"?"),
1186 exec_file_target);
1187
1188 /* Reset the shared library package. This ensures that we get a
1189 shlib event when the child reaches "_start", at which point the
1190 dld will have had a chance to initialize the child. */
1191 /* Also, loading a symbol file below may trigger symbol lookups, and
1192 we don't want those to be satisfied by the libraries of the
1193 previous incarnation of this process. */
1194 no_shared_libraries (NULL, 0);
1195
1196 if (follow_exec_mode_string == follow_exec_mode_new)
1197 {
1198 /* The user wants to keep the old inferior and program spaces
1199 around. Create a new fresh one, and switch to it. */
1200
1201 /* Do exit processing for the original inferior before setting the new
1202 inferior's pid. Having two inferiors with the same pid would confuse
1203 find_inferior_p(t)id. Transfer the terminal state and info from the
1204 old to the new inferior. */
1205 inf = add_inferior_with_spaces ();
1206 swap_terminal_info (inf, current_inferior ());
1207 exit_inferior_silent (current_inferior ());
1208
1209 inf->pid = pid;
1210 target_follow_exec (inf, exec_file_target);
1211
1212 inferior *org_inferior = current_inferior ();
1213 switch_to_inferior_no_thread (inf);
1214 push_target (org_inferior->process_target ());
1215 thread_info *thr = add_thread (inf->process_target (), ptid);
1216 switch_to_thread (thr);
1217 }
1218 else
1219 {
1220 /* The old description may no longer be fit for the new image.
1221 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1222 old description; we'll read a new one below. No need to do
1223 this on "follow-exec-mode new", as the old inferior stays
1224 around (its description is later cleared/refetched on
1225 restart). */
1226 target_clear_description ();
1227 }
1228
1229 gdb_assert (current_program_space == inf->pspace);
1230
1231 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1232 because the proper displacement for a PIE (Position Independent
1233 Executable) main symbol file will only be computed by
1234 solib_create_inferior_hook below. breakpoint_re_set would fail
1235 to insert the breakpoints with the zero displacement. */
1236 try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
1237
1238 /* If the target can specify a description, read it. Must do this
1239 after flipping to the new executable (because the target supplied
1240 description must be compatible with the executable's
1241 architecture, and the old executable may e.g., be 32-bit, while
1242 the new one 64-bit), and before anything involving memory or
1243 registers. */
1244 target_find_description ();
1245
1246 solib_create_inferior_hook (0);
1247
1248 jit_inferior_created_hook ();
1249
1250 breakpoint_re_set ();
1251
1252 /* Reinsert all breakpoints. (Those which were symbolic have
1253 been reset to the proper address in the new a.out, thanks
1254 to symbol_file_command...). */
1255 insert_breakpoints ();
1256
1257 /* The next resume of this inferior should bring it to the shlib
1258 startup breakpoints. (If the user had also set bp's on
1259 "main" from the old (parent) process, then they'll auto-
1260 matically get reset there in the new process.). */
1261 }
1262
1263 /* The queue of threads that need to do a step-over operation to get
1264 past e.g., a breakpoint. What technique is used to step over the
1265 breakpoint/watchpoint does not matter -- all threads end up in the
1266 same queue, to maintain rough temporal order of execution, in order
1267 to avoid starvation, otherwise, we could e.g., find ourselves
1268 constantly stepping the same couple threads past their breakpoints
1269 over and over, if the single-step finish fast enough. */
1270 struct thread_info *step_over_queue_head;
1271
1272 /* Bit flags indicating what the thread needs to step over. */
1273
1274 enum step_over_what_flag
1275 {
1276 /* Step over a breakpoint. */
1277 STEP_OVER_BREAKPOINT = 1,
1278
1279 /* Step past a non-continuable watchpoint, in order to let the
1280 instruction execute so we can evaluate the watchpoint
1281 expression. */
1282 STEP_OVER_WATCHPOINT = 2
1283 };
1284 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
1285
1286 /* Info about an instruction that is being stepped over. */
1287
1288 struct step_over_info
1289 {
1290 /* If we're stepping past a breakpoint, this is the address space
1291 and address of the instruction the breakpoint is set at. We'll
1292 skip inserting all breakpoints here. Valid iff ASPACE is
1293 non-NULL. */
1294 const address_space *aspace;
1295 CORE_ADDR address;
1296
1297 /* The instruction being stepped over triggers a nonsteppable
1298 watchpoint. If true, we'll skip inserting watchpoints. */
1299 int nonsteppable_watchpoint_p;
1300
1301 /* The thread's global number. */
1302 int thread;
1303 };
1304
1305 /* The step-over info of the location that is being stepped over.
1306
1307 Note that with async/breakpoint always-inserted mode, a user might
1308 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1309 being stepped over. As setting a new breakpoint inserts all
1310 breakpoints, we need to make sure the breakpoint being stepped over
1311 isn't inserted then. We do that by only clearing the step-over
1312 info when the step-over is actually finished (or aborted).
1313
1314 Presently GDB can only step over one breakpoint at any given time.
1315 Given threads that can't run code in the same address space as the
1316 breakpoint's can't really miss the breakpoint, GDB could be taught
1317 to step-over at most one breakpoint per address space (so this info
1318 could move to the address space object if/when GDB is extended).
1319 The set of breakpoints being stepped over will normally be much
1320 smaller than the set of all breakpoints, so a flag in the
1321 breakpoint location structure would be wasteful. A separate list
1322 also saves complexity and run-time, as otherwise we'd have to go
1323 through all breakpoint locations clearing their flag whenever we
1324 start a new sequence. Similar considerations weigh against storing
1325 this info in the thread object. Plus, not all step overs actually
1326 have breakpoint locations -- e.g., stepping past a single-step
1327 breakpoint, or stepping to complete a non-continuable
1328 watchpoint. */
1329 static struct step_over_info step_over_info;
1330
1331 /* Record the address of the breakpoint/instruction we're currently
1332 stepping over.
1333 N.B. We record the aspace and address now, instead of say just the thread,
1334 because when we need the info later the thread may be running. */
1335
1336 static void
1337 set_step_over_info (const address_space *aspace, CORE_ADDR address,
1338 int nonsteppable_watchpoint_p,
1339 int thread)
1340 {
1341 step_over_info.aspace = aspace;
1342 step_over_info.address = address;
1343 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1344 step_over_info.thread = thread;
1345 }
1346
1347 /* Called when we're not longer stepping over a breakpoint / an
1348 instruction, so all breakpoints are free to be (re)inserted. */
1349
1350 static void
1351 clear_step_over_info (void)
1352 {
1353 if (debug_infrun)
1354 fprintf_unfiltered (gdb_stdlog,
1355 "infrun: clear_step_over_info\n");
1356 step_over_info.aspace = NULL;
1357 step_over_info.address = 0;
1358 step_over_info.nonsteppable_watchpoint_p = 0;
1359 step_over_info.thread = -1;
1360 }
1361
1362 /* See infrun.h. */
1363
1364 int
1365 stepping_past_instruction_at (struct address_space *aspace,
1366 CORE_ADDR address)
1367 {
1368 return (step_over_info.aspace != NULL
1369 && breakpoint_address_match (aspace, address,
1370 step_over_info.aspace,
1371 step_over_info.address));
1372 }
1373
1374 /* See infrun.h. */
1375
1376 int
1377 thread_is_stepping_over_breakpoint (int thread)
1378 {
1379 return (step_over_info.thread != -1
1380 && thread == step_over_info.thread);
1381 }
1382
1383 /* See infrun.h. */
1384
1385 int
1386 stepping_past_nonsteppable_watchpoint (void)
1387 {
1388 return step_over_info.nonsteppable_watchpoint_p;
1389 }
1390
1391 /* Returns true if step-over info is valid. */
1392
1393 static int
1394 step_over_info_valid_p (void)
1395 {
1396 return (step_over_info.aspace != NULL
1397 || stepping_past_nonsteppable_watchpoint ());
1398 }
1399
1400 \f
1401 /* Displaced stepping. */
1402
1403 /* In non-stop debugging mode, we must take special care to manage
1404 breakpoints properly; in particular, the traditional strategy for
1405 stepping a thread past a breakpoint it has hit is unsuitable.
1406 'Displaced stepping' is a tactic for stepping one thread past a
1407 breakpoint it has hit while ensuring that other threads running
1408 concurrently will hit the breakpoint as they should.
1409
1410 The traditional way to step a thread T off a breakpoint in a
1411 multi-threaded program in all-stop mode is as follows:
1412
1413 a0) Initially, all threads are stopped, and breakpoints are not
1414 inserted.
1415 a1) We single-step T, leaving breakpoints uninserted.
1416 a2) We insert breakpoints, and resume all threads.
1417
1418 In non-stop debugging, however, this strategy is unsuitable: we
1419 don't want to have to stop all threads in the system in order to
1420 continue or step T past a breakpoint. Instead, we use displaced
1421 stepping:
1422
1423 n0) Initially, T is stopped, other threads are running, and
1424 breakpoints are inserted.
1425 n1) We copy the instruction "under" the breakpoint to a separate
1426 location, outside the main code stream, making any adjustments
1427 to the instruction, register, and memory state as directed by
1428 T's architecture.
1429 n2) We single-step T over the instruction at its new location.
1430 n3) We adjust the resulting register and memory state as directed
1431 by T's architecture. This includes resetting T's PC to point
1432 back into the main instruction stream.
1433 n4) We resume T.
1434
1435 This approach depends on the following gdbarch methods:
1436
1437 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1438 indicate where to copy the instruction, and how much space must
1439 be reserved there. We use these in step n1.
1440
1441 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1442 address, and makes any necessary adjustments to the instruction,
1443 register contents, and memory. We use this in step n1.
1444
1445 - gdbarch_displaced_step_fixup adjusts registers and memory after
1446 we have successfully single-stepped the instruction, to yield the
1447 same effect the instruction would have had if we had executed it
1448 at its original address. We use this in step n3.
1449
1450 The gdbarch_displaced_step_copy_insn and
1451 gdbarch_displaced_step_fixup functions must be written so that
1452 copying an instruction with gdbarch_displaced_step_copy_insn,
1453 single-stepping across the copied instruction, and then applying
1454 gdbarch_displaced_insn_fixup should have the same effects on the
1455 thread's memory and registers as stepping the instruction in place
1456 would have. Exactly which responsibilities fall to the copy and
1457 which fall to the fixup is up to the author of those functions.
1458
1459 See the comments in gdbarch.sh for details.
1460
1461 Note that displaced stepping and software single-step cannot
1462 currently be used in combination, although with some care I think
1463 they could be made to. Software single-step works by placing
1464 breakpoints on all possible subsequent instructions; if the
1465 displaced instruction is a PC-relative jump, those breakpoints
1466 could fall in very strange places --- on pages that aren't
1467 executable, or at addresses that are not proper instruction
1468 boundaries. (We do generally let other threads run while we wait
1469 to hit the software single-step breakpoint, and they might
1470 encounter such a corrupted instruction.) One way to work around
1471 this would be to have gdbarch_displaced_step_copy_insn fully
1472 simulate the effect of PC-relative instructions (and return NULL)
1473 on architectures that use software single-stepping.
1474
1475 In non-stop mode, we can have independent and simultaneous step
1476 requests, so more than one thread may need to simultaneously step
1477 over a breakpoint. The current implementation assumes there is
1478 only one scratch space per process. In this case, we have to
1479 serialize access to the scratch space. If thread A wants to step
1480 over a breakpoint, but we are currently waiting for some other
1481 thread to complete a displaced step, we leave thread A stopped and
1482 place it in the displaced_step_request_queue. Whenever a displaced
1483 step finishes, we pick the next thread in the queue and start a new
1484 displaced step operation on it. See displaced_step_prepare and
1485 displaced_step_fixup for details. */
1486
1487 /* Default destructor for displaced_step_closure. */
1488
1489 displaced_step_closure::~displaced_step_closure () = default;
1490
1491 /* Get the displaced stepping state of process PID. */
1492
1493 static displaced_step_inferior_state *
1494 get_displaced_stepping_state (inferior *inf)
1495 {
1496 return &inf->displaced_step_state;
1497 }
1498
1499 /* Returns true if any inferior has a thread doing a displaced
1500 step. */
1501
1502 static bool
1503 displaced_step_in_progress_any_inferior ()
1504 {
1505 for (inferior *i : all_inferiors ())
1506 {
1507 if (i->displaced_step_state.step_thread != nullptr)
1508 return true;
1509 }
1510
1511 return false;
1512 }
1513
1514 /* Return true if thread represented by PTID is doing a displaced
1515 step. */
1516
1517 static int
1518 displaced_step_in_progress_thread (thread_info *thread)
1519 {
1520 gdb_assert (thread != NULL);
1521
1522 return get_displaced_stepping_state (thread->inf)->step_thread == thread;
1523 }
1524
1525 /* Return true if process PID has a thread doing a displaced step. */
1526
1527 static int
1528 displaced_step_in_progress (inferior *inf)
1529 {
1530 return get_displaced_stepping_state (inf)->step_thread != nullptr;
1531 }
1532
1533 /* If inferior is in displaced stepping, and ADDR equals to starting address
1534 of copy area, return corresponding displaced_step_closure. Otherwise,
1535 return NULL. */
1536
1537 struct displaced_step_closure*
1538 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1539 {
1540 displaced_step_inferior_state *displaced
1541 = get_displaced_stepping_state (current_inferior ());
1542
1543 /* If checking the mode of displaced instruction in copy area. */
1544 if (displaced->step_thread != nullptr
1545 && displaced->step_copy == addr)
1546 return displaced->step_closure.get ();
1547
1548 return NULL;
1549 }
1550
1551 static void
1552 infrun_inferior_exit (struct inferior *inf)
1553 {
1554 inf->displaced_step_state.reset ();
1555 }
1556
1557 /* If ON, and the architecture supports it, GDB will use displaced
1558 stepping to step over breakpoints. If OFF, or if the architecture
1559 doesn't support it, GDB will instead use the traditional
1560 hold-and-step approach. If AUTO (which is the default), GDB will
1561 decide which technique to use to step over breakpoints depending on
1562 whether the target works in a non-stop way (see use_displaced_stepping). */
1563
1564 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1565
1566 static void
1567 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1568 struct cmd_list_element *c,
1569 const char *value)
1570 {
1571 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1572 fprintf_filtered (file,
1573 _("Debugger's willingness to use displaced stepping "
1574 "to step over breakpoints is %s (currently %s).\n"),
1575 value, target_is_non_stop_p () ? "on" : "off");
1576 else
1577 fprintf_filtered (file,
1578 _("Debugger's willingness to use displaced stepping "
1579 "to step over breakpoints is %s.\n"), value);
1580 }
1581
1582 /* Return true if the gdbarch implements the required methods to use
1583 displaced stepping. */
1584
1585 static bool
1586 gdbarch_supports_displaced_stepping (gdbarch *arch)
1587 {
1588 /* Only check for the presence of step_copy_insn. Other required methods
1589 are checked by the gdbarch validation. */
1590 return gdbarch_displaced_step_copy_insn_p (arch);
1591 }
1592
1593 /* Return non-zero if displaced stepping can/should be used to step
1594 over breakpoints of thread TP. */
1595
1596 static bool
1597 use_displaced_stepping (thread_info *tp)
1598 {
1599 /* If the user disabled it explicitly, don't use displaced stepping. */
1600 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1601 return false;
1602
1603 /* If "auto", only use displaced stepping if the target operates in a non-stop
1604 way. */
1605 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1606 && !target_is_non_stop_p ())
1607 return false;
1608
1609 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1610
1611 /* If the architecture doesn't implement displaced stepping, don't use
1612 it. */
1613 if (!gdbarch_supports_displaced_stepping (gdbarch))
1614 return false;
1615
1616 /* If recording, don't use displaced stepping. */
1617 if (find_record_target () != nullptr)
1618 return false;
1619
1620 displaced_step_inferior_state *displaced_state
1621 = get_displaced_stepping_state (tp->inf);
1622
1623 /* If displaced stepping failed before for this inferior, don't bother trying
1624 again. */
1625 if (displaced_state->failed_before)
1626 return false;
1627
1628 return true;
1629 }
1630
1631 /* Simple function wrapper around displaced_step_inferior_state::reset. */
1632
1633 static void
1634 displaced_step_reset (displaced_step_inferior_state *displaced)
1635 {
1636 displaced->reset ();
1637 }
1638
1639 /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1640 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1641
1642 using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
1643
1644 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1645 void
1646 displaced_step_dump_bytes (struct ui_file *file,
1647 const gdb_byte *buf,
1648 size_t len)
1649 {
1650 int i;
1651
1652 for (i = 0; i < len; i++)
1653 fprintf_unfiltered (file, "%02x ", buf[i]);
1654 fputs_unfiltered ("\n", file);
1655 }
1656
1657 /* Prepare to single-step, using displaced stepping.
1658
1659 Note that we cannot use displaced stepping when we have a signal to
1660 deliver. If we have a signal to deliver and an instruction to step
1661 over, then after the step, there will be no indication from the
1662 target whether the thread entered a signal handler or ignored the
1663 signal and stepped over the instruction successfully --- both cases
1664 result in a simple SIGTRAP. In the first case we mustn't do a
1665 fixup, and in the second case we must --- but we can't tell which.
1666 Comments in the code for 'random signals' in handle_inferior_event
1667 explain how we handle this case instead.
1668
1669 Returns 1 if preparing was successful -- this thread is going to be
1670 stepped now; 0 if displaced stepping this thread got queued; or -1
1671 if this instruction can't be displaced stepped. */
1672
1673 static int
1674 displaced_step_prepare_throw (thread_info *tp)
1675 {
1676 regcache *regcache = get_thread_regcache (tp);
1677 struct gdbarch *gdbarch = regcache->arch ();
1678 const address_space *aspace = regcache->aspace ();
1679 CORE_ADDR original, copy;
1680 ULONGEST len;
1681 int status;
1682
1683 /* We should never reach this function if the architecture does not
1684 support displaced stepping. */
1685 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
1686
1687 /* Nor if the thread isn't meant to step over a breakpoint. */
1688 gdb_assert (tp->control.trap_expected);
1689
1690 /* Disable range stepping while executing in the scratch pad. We
1691 want a single-step even if executing the displaced instruction in
1692 the scratch buffer lands within the stepping range (e.g., a
1693 jump/branch). */
1694 tp->control.may_range_step = 0;
1695
1696 /* We have to displaced step one thread at a time, as we only have
1697 access to a single scratch space per inferior. */
1698
1699 displaced_step_inferior_state *displaced
1700 = get_displaced_stepping_state (tp->inf);
1701
1702 if (displaced->step_thread != nullptr)
1703 {
1704 /* Already waiting for a displaced step to finish. Defer this
1705 request and place in queue. */
1706
1707 if (debug_displaced)
1708 fprintf_unfiltered (gdb_stdlog,
1709 "displaced: deferring step of %s\n",
1710 target_pid_to_str (tp->ptid).c_str ());
1711
1712 thread_step_over_chain_enqueue (tp);
1713 return 0;
1714 }
1715 else
1716 {
1717 if (debug_displaced)
1718 fprintf_unfiltered (gdb_stdlog,
1719 "displaced: stepping %s now\n",
1720 target_pid_to_str (tp->ptid).c_str ());
1721 }
1722
1723 displaced_step_reset (displaced);
1724
1725 scoped_restore_current_thread restore_thread;
1726
1727 switch_to_thread (tp);
1728
1729 original = regcache_read_pc (regcache);
1730
1731 copy = gdbarch_displaced_step_location (gdbarch);
1732 len = gdbarch_max_insn_length (gdbarch);
1733
1734 if (breakpoint_in_range_p (aspace, copy, len))
1735 {
1736 /* There's a breakpoint set in the scratch pad location range
1737 (which is usually around the entry point). We'd either
1738 install it before resuming, which would overwrite/corrupt the
1739 scratch pad, or if it was already inserted, this displaced
1740 step would overwrite it. The latter is OK in the sense that
1741 we already assume that no thread is going to execute the code
1742 in the scratch pad range (after initial startup) anyway, but
1743 the former is unacceptable. Simply punt and fallback to
1744 stepping over this breakpoint in-line. */
1745 if (debug_displaced)
1746 {
1747 fprintf_unfiltered (gdb_stdlog,
1748 "displaced: breakpoint set in scratch pad. "
1749 "Stepping over breakpoint in-line instead.\n");
1750 }
1751
1752 return -1;
1753 }
1754
1755 /* Save the original contents of the copy area. */
1756 displaced->step_saved_copy.resize (len);
1757 status = target_read_memory (copy, displaced->step_saved_copy.data (), len);
1758 if (status != 0)
1759 throw_error (MEMORY_ERROR,
1760 _("Error accessing memory address %s (%s) for "
1761 "displaced-stepping scratch space."),
1762 paddress (gdbarch, copy), safe_strerror (status));
1763 if (debug_displaced)
1764 {
1765 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1766 paddress (gdbarch, copy));
1767 displaced_step_dump_bytes (gdb_stdlog,
1768 displaced->step_saved_copy.data (),
1769 len);
1770 };
1771
1772 displaced->step_closure
1773 = gdbarch_displaced_step_copy_insn (gdbarch, original, copy, regcache);
1774 if (displaced->step_closure == NULL)
1775 {
1776 /* The architecture doesn't know how or want to displaced step
1777 this instruction or instruction sequence. Fallback to
1778 stepping over the breakpoint in-line. */
1779 return -1;
1780 }
1781
1782 /* Save the information we need to fix things up if the step
1783 succeeds. */
1784 displaced->step_thread = tp;
1785 displaced->step_gdbarch = gdbarch;
1786 displaced->step_original = original;
1787 displaced->step_copy = copy;
1788
1789 {
1790 displaced_step_reset_cleanup cleanup (displaced);
1791
1792 /* Resume execution at the copy. */
1793 regcache_write_pc (regcache, copy);
1794
1795 cleanup.release ();
1796 }
1797
1798 if (debug_displaced)
1799 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1800 paddress (gdbarch, copy));
1801
1802 return 1;
1803 }
1804
1805 /* Wrapper for displaced_step_prepare_throw that disabled further
1806 attempts at displaced stepping if we get a memory error. */
1807
1808 static int
1809 displaced_step_prepare (thread_info *thread)
1810 {
1811 int prepared = -1;
1812
1813 try
1814 {
1815 prepared = displaced_step_prepare_throw (thread);
1816 }
1817 catch (const gdb_exception_error &ex)
1818 {
1819 struct displaced_step_inferior_state *displaced_state;
1820
1821 if (ex.error != MEMORY_ERROR
1822 && ex.error != NOT_SUPPORTED_ERROR)
1823 throw;
1824
1825 if (debug_infrun)
1826 {
1827 fprintf_unfiltered (gdb_stdlog,
1828 "infrun: disabling displaced stepping: %s\n",
1829 ex.what ());
1830 }
1831
1832 /* Be verbose if "set displaced-stepping" is "on", silent if
1833 "auto". */
1834 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1835 {
1836 warning (_("disabling displaced stepping: %s"),
1837 ex.what ());
1838 }
1839
1840 /* Disable further displaced stepping attempts. */
1841 displaced_state
1842 = get_displaced_stepping_state (thread->inf);
1843 displaced_state->failed_before = 1;
1844 }
1845
1846 return prepared;
1847 }
1848
1849 static void
1850 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1851 const gdb_byte *myaddr, int len)
1852 {
1853 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
1854
1855 inferior_ptid = ptid;
1856 write_memory (memaddr, myaddr, len);
1857 }
1858
1859 /* Restore the contents of the copy area for thread PTID. */
1860
1861 static void
1862 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1863 ptid_t ptid)
1864 {
1865 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1866
1867 write_memory_ptid (ptid, displaced->step_copy,
1868 displaced->step_saved_copy.data (), len);
1869 if (debug_displaced)
1870 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1871 target_pid_to_str (ptid).c_str (),
1872 paddress (displaced->step_gdbarch,
1873 displaced->step_copy));
1874 }
1875
1876 /* If we displaced stepped an instruction successfully, adjust
1877 registers and memory to yield the same effect the instruction would
1878 have had if we had executed it at its original address, and return
1879 1. If the instruction didn't complete, relocate the PC and return
1880 -1. If the thread wasn't displaced stepping, return 0. */
1881
1882 static int
1883 displaced_step_fixup (thread_info *event_thread, enum gdb_signal signal)
1884 {
1885 struct displaced_step_inferior_state *displaced
1886 = get_displaced_stepping_state (event_thread->inf);
1887 int ret;
1888
1889 /* Was this event for the thread we displaced? */
1890 if (displaced->step_thread != event_thread)
1891 return 0;
1892
1893 /* Fixup may need to read memory/registers. Switch to the thread
1894 that we're fixing up. Also, target_stopped_by_watchpoint checks
1895 the current thread, and displaced_step_restore performs ptid-dependent
1896 memory accesses using current_inferior() and current_top_target(). */
1897 switch_to_thread (event_thread);
1898
1899 displaced_step_reset_cleanup cleanup (displaced);
1900
1901 displaced_step_restore (displaced, displaced->step_thread->ptid);
1902
1903 /* Did the instruction complete successfully? */
1904 if (signal == GDB_SIGNAL_TRAP
1905 && !(target_stopped_by_watchpoint ()
1906 && (gdbarch_have_nonsteppable_watchpoint (displaced->step_gdbarch)
1907 || target_have_steppable_watchpoint)))
1908 {
1909 /* Fix up the resulting state. */
1910 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1911 displaced->step_closure.get (),
1912 displaced->step_original,
1913 displaced->step_copy,
1914 get_thread_regcache (displaced->step_thread));
1915 ret = 1;
1916 }
1917 else
1918 {
1919 /* Since the instruction didn't complete, all we can do is
1920 relocate the PC. */
1921 struct regcache *regcache = get_thread_regcache (event_thread);
1922 CORE_ADDR pc = regcache_read_pc (regcache);
1923
1924 pc = displaced->step_original + (pc - displaced->step_copy);
1925 regcache_write_pc (regcache, pc);
1926 ret = -1;
1927 }
1928
1929 return ret;
1930 }
1931
1932 /* Data to be passed around while handling an event. This data is
1933 discarded between events. */
1934 struct execution_control_state
1935 {
1936 process_stratum_target *target;
1937 ptid_t ptid;
1938 /* The thread that got the event, if this was a thread event; NULL
1939 otherwise. */
1940 struct thread_info *event_thread;
1941
1942 struct target_waitstatus ws;
1943 int stop_func_filled_in;
1944 CORE_ADDR stop_func_start;
1945 CORE_ADDR stop_func_end;
1946 const char *stop_func_name;
1947 int wait_some_more;
1948
1949 /* True if the event thread hit the single-step breakpoint of
1950 another thread. Thus the event doesn't cause a stop, the thread
1951 needs to be single-stepped past the single-step breakpoint before
1952 we can switch back to the original stepping thread. */
1953 int hit_singlestep_breakpoint;
1954 };
1955
1956 /* Clear ECS and set it to point at TP. */
1957
1958 static void
1959 reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1960 {
1961 memset (ecs, 0, sizeof (*ecs));
1962 ecs->event_thread = tp;
1963 ecs->ptid = tp->ptid;
1964 }
1965
1966 static void keep_going_pass_signal (struct execution_control_state *ecs);
1967 static void prepare_to_wait (struct execution_control_state *ecs);
1968 static int keep_going_stepped_thread (struct thread_info *tp);
1969 static step_over_what thread_still_needs_step_over (struct thread_info *tp);
1970
1971 /* Are there any pending step-over requests? If so, run all we can
1972 now and return true. Otherwise, return false. */
1973
1974 static int
1975 start_step_over (void)
1976 {
1977 struct thread_info *tp, *next;
1978
1979 /* Don't start a new step-over if we already have an in-line
1980 step-over operation ongoing. */
1981 if (step_over_info_valid_p ())
1982 return 0;
1983
1984 for (tp = step_over_queue_head; tp != NULL; tp = next)
1985 {
1986 struct execution_control_state ecss;
1987 struct execution_control_state *ecs = &ecss;
1988 step_over_what step_what;
1989 int must_be_in_line;
1990
1991 gdb_assert (!tp->stop_requested);
1992
1993 next = thread_step_over_chain_next (tp);
1994
1995 /* If this inferior already has a displaced step in process,
1996 don't start a new one. */
1997 if (displaced_step_in_progress (tp->inf))
1998 continue;
1999
2000 step_what = thread_still_needs_step_over (tp);
2001 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
2002 || ((step_what & STEP_OVER_BREAKPOINT)
2003 && !use_displaced_stepping (tp)));
2004
2005 /* We currently stop all threads of all processes to step-over
2006 in-line. If we need to start a new in-line step-over, let
2007 any pending displaced steps finish first. */
2008 if (must_be_in_line && displaced_step_in_progress_any_inferior ())
2009 return 0;
2010
2011 thread_step_over_chain_remove (tp);
2012
2013 if (step_over_queue_head == NULL)
2014 {
2015 if (debug_infrun)
2016 fprintf_unfiltered (gdb_stdlog,
2017 "infrun: step-over queue now empty\n");
2018 }
2019
2020 if (tp->control.trap_expected
2021 || tp->resumed
2022 || tp->executing)
2023 {
2024 internal_error (__FILE__, __LINE__,
2025 "[%s] has inconsistent state: "
2026 "trap_expected=%d, resumed=%d, executing=%d\n",
2027 target_pid_to_str (tp->ptid).c_str (),
2028 tp->control.trap_expected,
2029 tp->resumed,
2030 tp->executing);
2031 }
2032
2033 if (debug_infrun)
2034 fprintf_unfiltered (gdb_stdlog,
2035 "infrun: resuming [%s] for step-over\n",
2036 target_pid_to_str (tp->ptid).c_str ());
2037
2038 /* keep_going_pass_signal skips the step-over if the breakpoint
2039 is no longer inserted. In all-stop, we want to keep looking
2040 for a thread that needs a step-over instead of resuming TP,
2041 because we wouldn't be able to resume anything else until the
2042 target stops again. In non-stop, the resume always resumes
2043 only TP, so it's OK to let the thread resume freely. */
2044 if (!target_is_non_stop_p () && !step_what)
2045 continue;
2046
2047 switch_to_thread (tp);
2048 reset_ecs (ecs, tp);
2049 keep_going_pass_signal (ecs);
2050
2051 if (!ecs->wait_some_more)
2052 error (_("Command aborted."));
2053
2054 gdb_assert (tp->resumed);
2055
2056 /* If we started a new in-line step-over, we're done. */
2057 if (step_over_info_valid_p ())
2058 {
2059 gdb_assert (tp->control.trap_expected);
2060 return 1;
2061 }
2062
2063 if (!target_is_non_stop_p ())
2064 {
2065 /* On all-stop, shouldn't have resumed unless we needed a
2066 step over. */
2067 gdb_assert (tp->control.trap_expected
2068 || tp->step_after_step_resume_breakpoint);
2069
2070 /* With remote targets (at least), in all-stop, we can't
2071 issue any further remote commands until the program stops
2072 again. */
2073 return 1;
2074 }
2075
2076 /* Either the thread no longer needed a step-over, or a new
2077 displaced stepping sequence started. Even in the latter
2078 case, continue looking. Maybe we can also start another
2079 displaced step on a thread of other process. */
2080 }
2081
2082 return 0;
2083 }
2084
2085 /* Update global variables holding ptids to hold NEW_PTID if they were
2086 holding OLD_PTID. */
2087 static void
2088 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
2089 {
2090 if (inferior_ptid == old_ptid)
2091 inferior_ptid = new_ptid;
2092 }
2093
2094 \f
2095
2096 static const char schedlock_off[] = "off";
2097 static const char schedlock_on[] = "on";
2098 static const char schedlock_step[] = "step";
2099 static const char schedlock_replay[] = "replay";
2100 static const char *const scheduler_enums[] = {
2101 schedlock_off,
2102 schedlock_on,
2103 schedlock_step,
2104 schedlock_replay,
2105 NULL
2106 };
2107 static const char *scheduler_mode = schedlock_replay;
2108 static void
2109 show_scheduler_mode (struct ui_file *file, int from_tty,
2110 struct cmd_list_element *c, const char *value)
2111 {
2112 fprintf_filtered (file,
2113 _("Mode for locking scheduler "
2114 "during execution is \"%s\".\n"),
2115 value);
2116 }
2117
2118 static void
2119 set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
2120 {
2121 if (!target_can_lock_scheduler)
2122 {
2123 scheduler_mode = schedlock_off;
2124 error (_("Target '%s' cannot support this command."), target_shortname);
2125 }
2126 }
2127
2128 /* True if execution commands resume all threads of all processes by
2129 default; otherwise, resume only threads of the current inferior
2130 process. */
2131 bool sched_multi = false;
2132
2133 /* Try to setup for software single stepping over the specified location.
2134 Return 1 if target_resume() should use hardware single step.
2135
2136 GDBARCH the current gdbarch.
2137 PC the location to step over. */
2138
2139 static int
2140 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2141 {
2142 int hw_step = 1;
2143
2144 if (execution_direction == EXEC_FORWARD
2145 && gdbarch_software_single_step_p (gdbarch))
2146 hw_step = !insert_single_step_breakpoints (gdbarch);
2147
2148 return hw_step;
2149 }
2150
2151 /* See infrun.h. */
2152
2153 ptid_t
2154 user_visible_resume_ptid (int step)
2155 {
2156 ptid_t resume_ptid;
2157
2158 if (non_stop)
2159 {
2160 /* With non-stop mode on, threads are always handled
2161 individually. */
2162 resume_ptid = inferior_ptid;
2163 }
2164 else if ((scheduler_mode == schedlock_on)
2165 || (scheduler_mode == schedlock_step && step))
2166 {
2167 /* User-settable 'scheduler' mode requires solo thread
2168 resume. */
2169 resume_ptid = inferior_ptid;
2170 }
2171 else if ((scheduler_mode == schedlock_replay)
2172 && target_record_will_replay (minus_one_ptid, execution_direction))
2173 {
2174 /* User-settable 'scheduler' mode requires solo thread resume in replay
2175 mode. */
2176 resume_ptid = inferior_ptid;
2177 }
2178 else if (!sched_multi && target_supports_multi_process ())
2179 {
2180 /* Resume all threads of the current process (and none of other
2181 processes). */
2182 resume_ptid = ptid_t (inferior_ptid.pid ());
2183 }
2184 else
2185 {
2186 /* Resume all threads of all processes. */
2187 resume_ptid = RESUME_ALL;
2188 }
2189
2190 return resume_ptid;
2191 }
2192
2193 /* See infrun.h. */
2194
2195 process_stratum_target *
2196 user_visible_resume_target (ptid_t resume_ptid)
2197 {
2198 return (resume_ptid == minus_one_ptid && sched_multi
2199 ? NULL
2200 : current_inferior ()->process_target ());
2201 }
2202
2203 /* Return a ptid representing the set of threads that we will resume,
2204 in the perspective of the target, assuming run control handling
2205 does not require leaving some threads stopped (e.g., stepping past
2206 breakpoint). USER_STEP indicates whether we're about to start the
2207 target for a stepping command. */
2208
2209 static ptid_t
2210 internal_resume_ptid (int user_step)
2211 {
2212 /* In non-stop, we always control threads individually. Note that
2213 the target may always work in non-stop mode even with "set
2214 non-stop off", in which case user_visible_resume_ptid could
2215 return a wildcard ptid. */
2216 if (target_is_non_stop_p ())
2217 return inferior_ptid;
2218 else
2219 return user_visible_resume_ptid (user_step);
2220 }
2221
2222 /* Wrapper for target_resume, that handles infrun-specific
2223 bookkeeping. */
2224
2225 static void
2226 do_target_resume (ptid_t resume_ptid, int step, enum gdb_signal sig)
2227 {
2228 struct thread_info *tp = inferior_thread ();
2229
2230 gdb_assert (!tp->stop_requested);
2231
2232 /* Install inferior's terminal modes. */
2233 target_terminal::inferior ();
2234
2235 /* Avoid confusing the next resume, if the next stop/resume
2236 happens to apply to another thread. */
2237 tp->suspend.stop_signal = GDB_SIGNAL_0;
2238
2239 /* Advise target which signals may be handled silently.
2240
2241 If we have removed breakpoints because we are stepping over one
2242 in-line (in any thread), we need to receive all signals to avoid
2243 accidentally skipping a breakpoint during execution of a signal
2244 handler.
2245
2246 Likewise if we're displaced stepping, otherwise a trap for a
2247 breakpoint in a signal handler might be confused with the
2248 displaced step finishing. We don't make the displaced_step_fixup
2249 step distinguish the cases instead, because:
2250
2251 - a backtrace while stopped in the signal handler would show the
2252 scratch pad as frame older than the signal handler, instead of
2253 the real mainline code.
2254
2255 - when the thread is later resumed, the signal handler would
2256 return to the scratch pad area, which would no longer be
2257 valid. */
2258 if (step_over_info_valid_p ()
2259 || displaced_step_in_progress (tp->inf))
2260 target_pass_signals ({});
2261 else
2262 target_pass_signals (signal_pass);
2263
2264 target_resume (resume_ptid, step, sig);
2265
2266 target_commit_resume ();
2267
2268 if (target_can_async_p ())
2269 target_async (1);
2270 }
2271
2272 /* Resume the inferior. SIG is the signal to give the inferior
2273 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2274 call 'resume', which handles exceptions. */
2275
2276 static void
2277 resume_1 (enum gdb_signal sig)
2278 {
2279 struct regcache *regcache = get_current_regcache ();
2280 struct gdbarch *gdbarch = regcache->arch ();
2281 struct thread_info *tp = inferior_thread ();
2282 CORE_ADDR pc = regcache_read_pc (regcache);
2283 const address_space *aspace = regcache->aspace ();
2284 ptid_t resume_ptid;
2285 /* This represents the user's step vs continue request. When
2286 deciding whether "set scheduler-locking step" applies, it's the
2287 user's intention that counts. */
2288 const int user_step = tp->control.stepping_command;
2289 /* This represents what we'll actually request the target to do.
2290 This can decay from a step to a continue, if e.g., we need to
2291 implement single-stepping with breakpoints (software
2292 single-step). */
2293 int step;
2294
2295 gdb_assert (!tp->stop_requested);
2296 gdb_assert (!thread_is_in_step_over_chain (tp));
2297
2298 if (tp->suspend.waitstatus_pending_p)
2299 {
2300 if (debug_infrun)
2301 {
2302 std::string statstr
2303 = target_waitstatus_to_string (&tp->suspend.waitstatus);
2304
2305 fprintf_unfiltered (gdb_stdlog,
2306 "infrun: resume: thread %s has pending wait "
2307 "status %s (currently_stepping=%d).\n",
2308 target_pid_to_str (tp->ptid).c_str (),
2309 statstr.c_str (),
2310 currently_stepping (tp));
2311 }
2312
2313 tp->inf->process_target ()->threads_executing = true;
2314 tp->resumed = true;
2315
2316 /* FIXME: What should we do if we are supposed to resume this
2317 thread with a signal? Maybe we should maintain a queue of
2318 pending signals to deliver. */
2319 if (sig != GDB_SIGNAL_0)
2320 {
2321 warning (_("Couldn't deliver signal %s to %s."),
2322 gdb_signal_to_name (sig),
2323 target_pid_to_str (tp->ptid).c_str ());
2324 }
2325
2326 tp->suspend.stop_signal = GDB_SIGNAL_0;
2327
2328 if (target_can_async_p ())
2329 {
2330 target_async (1);
2331 /* Tell the event loop we have an event to process. */
2332 mark_async_event_handler (infrun_async_inferior_event_token);
2333 }
2334 return;
2335 }
2336
2337 tp->stepped_breakpoint = 0;
2338
2339 /* Depends on stepped_breakpoint. */
2340 step = currently_stepping (tp);
2341
2342 if (current_inferior ()->waiting_for_vfork_done)
2343 {
2344 /* Don't try to single-step a vfork parent that is waiting for
2345 the child to get out of the shared memory region (by exec'ing
2346 or exiting). This is particularly important on software
2347 single-step archs, as the child process would trip on the
2348 software single step breakpoint inserted for the parent
2349 process. Since the parent will not actually execute any
2350 instruction until the child is out of the shared region (such
2351 are vfork's semantics), it is safe to simply continue it.
2352 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2353 the parent, and tell it to `keep_going', which automatically
2354 re-sets it stepping. */
2355 if (debug_infrun)
2356 fprintf_unfiltered (gdb_stdlog,
2357 "infrun: resume : clear step\n");
2358 step = 0;
2359 }
2360
2361 if (debug_infrun)
2362 fprintf_unfiltered (gdb_stdlog,
2363 "infrun: resume (step=%d, signal=%s), "
2364 "trap_expected=%d, current thread [%s] at %s\n",
2365 step, gdb_signal_to_symbol_string (sig),
2366 tp->control.trap_expected,
2367 target_pid_to_str (inferior_ptid).c_str (),
2368 paddress (gdbarch, pc));
2369
2370 /* Normally, by the time we reach `resume', the breakpoints are either
2371 removed or inserted, as appropriate. The exception is if we're sitting
2372 at a permanent breakpoint; we need to step over it, but permanent
2373 breakpoints can't be removed. So we have to test for it here. */
2374 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2375 {
2376 if (sig != GDB_SIGNAL_0)
2377 {
2378 /* We have a signal to pass to the inferior. The resume
2379 may, or may not take us to the signal handler. If this
2380 is a step, we'll need to stop in the signal handler, if
2381 there's one, (if the target supports stepping into
2382 handlers), or in the next mainline instruction, if
2383 there's no handler. If this is a continue, we need to be
2384 sure to run the handler with all breakpoints inserted.
2385 In all cases, set a breakpoint at the current address
2386 (where the handler returns to), and once that breakpoint
2387 is hit, resume skipping the permanent breakpoint. If
2388 that breakpoint isn't hit, then we've stepped into the
2389 signal handler (or hit some other event). We'll delete
2390 the step-resume breakpoint then. */
2391
2392 if (debug_infrun)
2393 fprintf_unfiltered (gdb_stdlog,
2394 "infrun: resume: skipping permanent breakpoint, "
2395 "deliver signal first\n");
2396
2397 clear_step_over_info ();
2398 tp->control.trap_expected = 0;
2399
2400 if (tp->control.step_resume_breakpoint == NULL)
2401 {
2402 /* Set a "high-priority" step-resume, as we don't want
2403 user breakpoints at PC to trigger (again) when this
2404 hits. */
2405 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2406 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2407
2408 tp->step_after_step_resume_breakpoint = step;
2409 }
2410
2411 insert_breakpoints ();
2412 }
2413 else
2414 {
2415 /* There's no signal to pass, we can go ahead and skip the
2416 permanent breakpoint manually. */
2417 if (debug_infrun)
2418 fprintf_unfiltered (gdb_stdlog,
2419 "infrun: resume: skipping permanent breakpoint\n");
2420 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2421 /* Update pc to reflect the new address from which we will
2422 execute instructions. */
2423 pc = regcache_read_pc (regcache);
2424
2425 if (step)
2426 {
2427 /* We've already advanced the PC, so the stepping part
2428 is done. Now we need to arrange for a trap to be
2429 reported to handle_inferior_event. Set a breakpoint
2430 at the current PC, and run to it. Don't update
2431 prev_pc, because if we end in
2432 switch_back_to_stepped_thread, we want the "expected
2433 thread advanced also" branch to be taken. IOW, we
2434 don't want this thread to step further from PC
2435 (overstep). */
2436 gdb_assert (!step_over_info_valid_p ());
2437 insert_single_step_breakpoint (gdbarch, aspace, pc);
2438 insert_breakpoints ();
2439
2440 resume_ptid = internal_resume_ptid (user_step);
2441 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
2442 tp->resumed = true;
2443 return;
2444 }
2445 }
2446 }
2447
2448 /* If we have a breakpoint to step over, make sure to do a single
2449 step only. Same if we have software watchpoints. */
2450 if (tp->control.trap_expected || bpstat_should_step ())
2451 tp->control.may_range_step = 0;
2452
2453 /* If displaced stepping is enabled, step over breakpoints by executing a
2454 copy of the instruction at a different address.
2455
2456 We can't use displaced stepping when we have a signal to deliver;
2457 the comments for displaced_step_prepare explain why. The
2458 comments in the handle_inferior event for dealing with 'random
2459 signals' explain what we do instead.
2460
2461 We can't use displaced stepping when we are waiting for vfork_done
2462 event, displaced stepping breaks the vfork child similarly as single
2463 step software breakpoint. */
2464 if (tp->control.trap_expected
2465 && use_displaced_stepping (tp)
2466 && !step_over_info_valid_p ()
2467 && sig == GDB_SIGNAL_0
2468 && !current_inferior ()->waiting_for_vfork_done)
2469 {
2470 int prepared = displaced_step_prepare (tp);
2471
2472 if (prepared == 0)
2473 {
2474 if (debug_infrun)
2475 fprintf_unfiltered (gdb_stdlog,
2476 "Got placed in step-over queue\n");
2477
2478 tp->control.trap_expected = 0;
2479 return;
2480 }
2481 else if (prepared < 0)
2482 {
2483 /* Fallback to stepping over the breakpoint in-line. */
2484
2485 if (target_is_non_stop_p ())
2486 stop_all_threads ();
2487
2488 set_step_over_info (regcache->aspace (),
2489 regcache_read_pc (regcache), 0, tp->global_num);
2490
2491 step = maybe_software_singlestep (gdbarch, pc);
2492
2493 insert_breakpoints ();
2494 }
2495 else if (prepared > 0)
2496 {
2497 struct displaced_step_inferior_state *displaced;
2498
2499 /* Update pc to reflect the new address from which we will
2500 execute instructions due to displaced stepping. */
2501 pc = regcache_read_pc (get_thread_regcache (tp));
2502
2503 displaced = get_displaced_stepping_state (tp->inf);
2504 step = gdbarch_displaced_step_hw_singlestep
2505 (gdbarch, displaced->step_closure.get ());
2506 }
2507 }
2508
2509 /* Do we need to do it the hard way, w/temp breakpoints? */
2510 else if (step)
2511 step = maybe_software_singlestep (gdbarch, pc);
2512
2513 /* Currently, our software single-step implementation leads to different
2514 results than hardware single-stepping in one situation: when stepping
2515 into delivering a signal which has an associated signal handler,
2516 hardware single-step will stop at the first instruction of the handler,
2517 while software single-step will simply skip execution of the handler.
2518
2519 For now, this difference in behavior is accepted since there is no
2520 easy way to actually implement single-stepping into a signal handler
2521 without kernel support.
2522
2523 However, there is one scenario where this difference leads to follow-on
2524 problems: if we're stepping off a breakpoint by removing all breakpoints
2525 and then single-stepping. In this case, the software single-step
2526 behavior means that even if there is a *breakpoint* in the signal
2527 handler, GDB still would not stop.
2528
2529 Fortunately, we can at least fix this particular issue. We detect
2530 here the case where we are about to deliver a signal while software
2531 single-stepping with breakpoints removed. In this situation, we
2532 revert the decisions to remove all breakpoints and insert single-
2533 step breakpoints, and instead we install a step-resume breakpoint
2534 at the current address, deliver the signal without stepping, and
2535 once we arrive back at the step-resume breakpoint, actually step
2536 over the breakpoint we originally wanted to step over. */
2537 if (thread_has_single_step_breakpoints_set (tp)
2538 && sig != GDB_SIGNAL_0
2539 && step_over_info_valid_p ())
2540 {
2541 /* If we have nested signals or a pending signal is delivered
2542 immediately after a handler returns, might already have
2543 a step-resume breakpoint set on the earlier handler. We cannot
2544 set another step-resume breakpoint; just continue on until the
2545 original breakpoint is hit. */
2546 if (tp->control.step_resume_breakpoint == NULL)
2547 {
2548 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2549 tp->step_after_step_resume_breakpoint = 1;
2550 }
2551
2552 delete_single_step_breakpoints (tp);
2553
2554 clear_step_over_info ();
2555 tp->control.trap_expected = 0;
2556
2557 insert_breakpoints ();
2558 }
2559
2560 /* If STEP is set, it's a request to use hardware stepping
2561 facilities. But in that case, we should never
2562 use singlestep breakpoint. */
2563 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2564
2565 /* Decide the set of threads to ask the target to resume. */
2566 if (tp->control.trap_expected)
2567 {
2568 /* We're allowing a thread to run past a breakpoint it has
2569 hit, either by single-stepping the thread with the breakpoint
2570 removed, or by displaced stepping, with the breakpoint inserted.
2571 In the former case, we need to single-step only this thread,
2572 and keep others stopped, as they can miss this breakpoint if
2573 allowed to run. That's not really a problem for displaced
2574 stepping, but, we still keep other threads stopped, in case
2575 another thread is also stopped for a breakpoint waiting for
2576 its turn in the displaced stepping queue. */
2577 resume_ptid = inferior_ptid;
2578 }
2579 else
2580 resume_ptid = internal_resume_ptid (user_step);
2581
2582 if (execution_direction != EXEC_REVERSE
2583 && step && breakpoint_inserted_here_p (aspace, pc))
2584 {
2585 /* There are two cases where we currently need to step a
2586 breakpoint instruction when we have a signal to deliver:
2587
2588 - See handle_signal_stop where we handle random signals that
2589 could take out us out of the stepping range. Normally, in
2590 that case we end up continuing (instead of stepping) over the
2591 signal handler with a breakpoint at PC, but there are cases
2592 where we should _always_ single-step, even if we have a
2593 step-resume breakpoint, like when a software watchpoint is
2594 set. Assuming single-stepping and delivering a signal at the
2595 same time would takes us to the signal handler, then we could
2596 have removed the breakpoint at PC to step over it. However,
2597 some hardware step targets (like e.g., Mac OS) can't step
2598 into signal handlers, and for those, we need to leave the
2599 breakpoint at PC inserted, as otherwise if the handler
2600 recurses and executes PC again, it'll miss the breakpoint.
2601 So we leave the breakpoint inserted anyway, but we need to
2602 record that we tried to step a breakpoint instruction, so
2603 that adjust_pc_after_break doesn't end up confused.
2604
2605 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2606 in one thread after another thread that was stepping had been
2607 momentarily paused for a step-over. When we re-resume the
2608 stepping thread, it may be resumed from that address with a
2609 breakpoint that hasn't trapped yet. Seen with
2610 gdb.threads/non-stop-fair-events.exp, on targets that don't
2611 do displaced stepping. */
2612
2613 if (debug_infrun)
2614 fprintf_unfiltered (gdb_stdlog,
2615 "infrun: resume: [%s] stepped breakpoint\n",
2616 target_pid_to_str (tp->ptid).c_str ());
2617
2618 tp->stepped_breakpoint = 1;
2619
2620 /* Most targets can step a breakpoint instruction, thus
2621 executing it normally. But if this one cannot, just
2622 continue and we will hit it anyway. */
2623 if (gdbarch_cannot_step_breakpoint (gdbarch))
2624 step = 0;
2625 }
2626
2627 if (debug_displaced
2628 && tp->control.trap_expected
2629 && use_displaced_stepping (tp)
2630 && !step_over_info_valid_p ())
2631 {
2632 struct regcache *resume_regcache = get_thread_regcache (tp);
2633 struct gdbarch *resume_gdbarch = resume_regcache->arch ();
2634 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2635 gdb_byte buf[4];
2636
2637 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2638 paddress (resume_gdbarch, actual_pc));
2639 read_memory (actual_pc, buf, sizeof (buf));
2640 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2641 }
2642
2643 if (tp->control.may_range_step)
2644 {
2645 /* If we're resuming a thread with the PC out of the step
2646 range, then we're doing some nested/finer run control
2647 operation, like stepping the thread out of the dynamic
2648 linker or the displaced stepping scratch pad. We
2649 shouldn't have allowed a range step then. */
2650 gdb_assert (pc_in_thread_step_range (pc, tp));
2651 }
2652
2653 do_target_resume (resume_ptid, step, sig);
2654 tp->resumed = true;
2655 }
2656
2657 /* Resume the inferior. SIG is the signal to give the inferior
2658 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2659 rolls back state on error. */
2660
2661 static void
2662 resume (gdb_signal sig)
2663 {
2664 try
2665 {
2666 resume_1 (sig);
2667 }
2668 catch (const gdb_exception &ex)
2669 {
2670 /* If resuming is being aborted for any reason, delete any
2671 single-step breakpoint resume_1 may have created, to avoid
2672 confusing the following resumption, and to avoid leaving
2673 single-step breakpoints perturbing other threads, in case
2674 we're running in non-stop mode. */
2675 if (inferior_ptid != null_ptid)
2676 delete_single_step_breakpoints (inferior_thread ());
2677 throw;
2678 }
2679 }
2680
2681 \f
2682 /* Proceeding. */
2683
2684 /* See infrun.h. */
2685
2686 /* Counter that tracks number of user visible stops. This can be used
2687 to tell whether a command has proceeded the inferior past the
2688 current location. This allows e.g., inferior function calls in
2689 breakpoint commands to not interrupt the command list. When the
2690 call finishes successfully, the inferior is standing at the same
2691 breakpoint as if nothing happened (and so we don't call
2692 normal_stop). */
2693 static ULONGEST current_stop_id;
2694
2695 /* See infrun.h. */
2696
2697 ULONGEST
2698 get_stop_id (void)
2699 {
2700 return current_stop_id;
2701 }
2702
2703 /* Called when we report a user visible stop. */
2704
2705 static void
2706 new_stop_id (void)
2707 {
2708 current_stop_id++;
2709 }
2710
2711 /* Clear out all variables saying what to do when inferior is continued.
2712 First do this, then set the ones you want, then call `proceed'. */
2713
2714 static void
2715 clear_proceed_status_thread (struct thread_info *tp)
2716 {
2717 if (debug_infrun)
2718 fprintf_unfiltered (gdb_stdlog,
2719 "infrun: clear_proceed_status_thread (%s)\n",
2720 target_pid_to_str (tp->ptid).c_str ());
2721
2722 /* If we're starting a new sequence, then the previous finished
2723 single-step is no longer relevant. */
2724 if (tp->suspend.waitstatus_pending_p)
2725 {
2726 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
2727 {
2728 if (debug_infrun)
2729 fprintf_unfiltered (gdb_stdlog,
2730 "infrun: clear_proceed_status: pending "
2731 "event of %s was a finished step. "
2732 "Discarding.\n",
2733 target_pid_to_str (tp->ptid).c_str ());
2734
2735 tp->suspend.waitstatus_pending_p = 0;
2736 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
2737 }
2738 else if (debug_infrun)
2739 {
2740 std::string statstr
2741 = target_waitstatus_to_string (&tp->suspend.waitstatus);
2742
2743 fprintf_unfiltered (gdb_stdlog,
2744 "infrun: clear_proceed_status_thread: thread %s "
2745 "has pending wait status %s "
2746 "(currently_stepping=%d).\n",
2747 target_pid_to_str (tp->ptid).c_str (),
2748 statstr.c_str (),
2749 currently_stepping (tp));
2750 }
2751 }
2752
2753 /* If this signal should not be seen by program, give it zero.
2754 Used for debugging signals. */
2755 if (!signal_pass_state (tp->suspend.stop_signal))
2756 tp->suspend.stop_signal = GDB_SIGNAL_0;
2757
2758 delete tp->thread_fsm;
2759 tp->thread_fsm = NULL;
2760
2761 tp->control.trap_expected = 0;
2762 tp->control.step_range_start = 0;
2763 tp->control.step_range_end = 0;
2764 tp->control.may_range_step = 0;
2765 tp->control.step_frame_id = null_frame_id;
2766 tp->control.step_stack_frame_id = null_frame_id;
2767 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2768 tp->control.step_start_function = NULL;
2769 tp->stop_requested = 0;
2770
2771 tp->control.stop_step = 0;
2772
2773 tp->control.proceed_to_finish = 0;
2774
2775 tp->control.stepping_command = 0;
2776
2777 /* Discard any remaining commands or status from previous stop. */
2778 bpstat_clear (&tp->control.stop_bpstat);
2779 }
2780
2781 void
2782 clear_proceed_status (int step)
2783 {
2784 /* With scheduler-locking replay, stop replaying other threads if we're
2785 not replaying the user-visible resume ptid.
2786
2787 This is a convenience feature to not require the user to explicitly
2788 stop replaying the other threads. We're assuming that the user's
2789 intent is to resume tracing the recorded process. */
2790 if (!non_stop && scheduler_mode == schedlock_replay
2791 && target_record_is_replaying (minus_one_ptid)
2792 && !target_record_will_replay (user_visible_resume_ptid (step),
2793 execution_direction))
2794 target_record_stop_replaying ();
2795
2796 if (!non_stop && inferior_ptid != null_ptid)
2797 {
2798 ptid_t resume_ptid = user_visible_resume_ptid (step);
2799 process_stratum_target *resume_target
2800 = user_visible_resume_target (resume_ptid);
2801
2802 /* In all-stop mode, delete the per-thread status of all threads
2803 we're about to resume, implicitly and explicitly. */
2804 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
2805 clear_proceed_status_thread (tp);
2806 }
2807
2808 if (inferior_ptid != null_ptid)
2809 {
2810 struct inferior *inferior;
2811
2812 if (non_stop)
2813 {
2814 /* If in non-stop mode, only delete the per-thread status of
2815 the current thread. */
2816 clear_proceed_status_thread (inferior_thread ());
2817 }
2818
2819 inferior = current_inferior ();
2820 inferior->control.stop_soon = NO_STOP_QUIETLY;
2821 }
2822
2823 gdb::observers::about_to_proceed.notify ();
2824 }
2825
2826 /* Returns true if TP is still stopped at a breakpoint that needs
2827 stepping-over in order to make progress. If the breakpoint is gone
2828 meanwhile, we can skip the whole step-over dance. */
2829
2830 static int
2831 thread_still_needs_step_over_bp (struct thread_info *tp)
2832 {
2833 if (tp->stepping_over_breakpoint)
2834 {
2835 struct regcache *regcache = get_thread_regcache (tp);
2836
2837 if (breakpoint_here_p (regcache->aspace (),
2838 regcache_read_pc (regcache))
2839 == ordinary_breakpoint_here)
2840 return 1;
2841
2842 tp->stepping_over_breakpoint = 0;
2843 }
2844
2845 return 0;
2846 }
2847
2848 /* Check whether thread TP still needs to start a step-over in order
2849 to make progress when resumed. Returns an bitwise or of enum
2850 step_over_what bits, indicating what needs to be stepped over. */
2851
2852 static step_over_what
2853 thread_still_needs_step_over (struct thread_info *tp)
2854 {
2855 step_over_what what = 0;
2856
2857 if (thread_still_needs_step_over_bp (tp))
2858 what |= STEP_OVER_BREAKPOINT;
2859
2860 if (tp->stepping_over_watchpoint
2861 && !target_have_steppable_watchpoint)
2862 what |= STEP_OVER_WATCHPOINT;
2863
2864 return what;
2865 }
2866
2867 /* Returns true if scheduler locking applies. STEP indicates whether
2868 we're about to do a step/next-like command to a thread. */
2869
2870 static int
2871 schedlock_applies (struct thread_info *tp)
2872 {
2873 return (scheduler_mode == schedlock_on
2874 || (scheduler_mode == schedlock_step
2875 && tp->control.stepping_command)
2876 || (scheduler_mode == schedlock_replay
2877 && target_record_will_replay (minus_one_ptid,
2878 execution_direction)));
2879 }
2880
2881 /* Calls target_commit_resume on all targets. */
2882
2883 static void
2884 commit_resume_all_targets ()
2885 {
2886 scoped_restore_current_thread restore_thread;
2887
2888 /* Map between process_target and a representative inferior. This
2889 is to avoid committing a resume in the same target more than
2890 once. Resumptions must be idempotent, so this is an
2891 optimization. */
2892 std::unordered_map<process_stratum_target *, inferior *> conn_inf;
2893
2894 for (inferior *inf : all_non_exited_inferiors ())
2895 if (inf->has_execution ())
2896 conn_inf[inf->process_target ()] = inf;
2897
2898 for (const auto &ci : conn_inf)
2899 {
2900 inferior *inf = ci.second;
2901 switch_to_inferior_no_thread (inf);
2902 target_commit_resume ();
2903 }
2904 }
2905
2906 /* Check that all the targets we're about to resume are in non-stop
2907 mode. Ideally, we'd only care whether all targets support
2908 target-async, but we're not there yet. E.g., stop_all_threads
2909 doesn't know how to handle all-stop targets. Also, the remote
2910 protocol in all-stop mode is synchronous, irrespective of
2911 target-async, which means that things like a breakpoint re-set
2912 triggered by one target would try to read memory from all targets
2913 and fail. */
2914
2915 static void
2916 check_multi_target_resumption (process_stratum_target *resume_target)
2917 {
2918 if (!non_stop && resume_target == nullptr)
2919 {
2920 scoped_restore_current_thread restore_thread;
2921
2922 /* This is used to track whether we're resuming more than one
2923 target. */
2924 process_stratum_target *first_connection = nullptr;
2925
2926 /* The first inferior we see with a target that does not work in
2927 always-non-stop mode. */
2928 inferior *first_not_non_stop = nullptr;
2929
2930 for (inferior *inf : all_non_exited_inferiors (resume_target))
2931 {
2932 switch_to_inferior_no_thread (inf);
2933
2934 if (!target_has_execution)
2935 continue;
2936
2937 process_stratum_target *proc_target
2938 = current_inferior ()->process_target();
2939
2940 if (!target_is_non_stop_p ())
2941 first_not_non_stop = inf;
2942
2943 if (first_connection == nullptr)
2944 first_connection = proc_target;
2945 else if (first_connection != proc_target
2946 && first_not_non_stop != nullptr)
2947 {
2948 switch_to_inferior_no_thread (first_not_non_stop);
2949
2950 proc_target = current_inferior ()->process_target();
2951
2952 error (_("Connection %d (%s) does not support "
2953 "multi-target resumption."),
2954 proc_target->connection_number,
2955 make_target_connection_string (proc_target).c_str ());
2956 }
2957 }
2958 }
2959 }
2960
2961 /* Basic routine for continuing the program in various fashions.
2962
2963 ADDR is the address to resume at, or -1 for resume where stopped.
2964 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
2965 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
2966
2967 You should call clear_proceed_status before calling proceed. */
2968
2969 void
2970 proceed (CORE_ADDR addr, enum gdb_signal siggnal)
2971 {
2972 struct regcache *regcache;
2973 struct gdbarch *gdbarch;
2974 CORE_ADDR pc;
2975 struct execution_control_state ecss;
2976 struct execution_control_state *ecs = &ecss;
2977 int started;
2978
2979 /* If we're stopped at a fork/vfork, follow the branch set by the
2980 "set follow-fork-mode" command; otherwise, we'll just proceed
2981 resuming the current thread. */
2982 if (!follow_fork ())
2983 {
2984 /* The target for some reason decided not to resume. */
2985 normal_stop ();
2986 if (target_can_async_p ())
2987 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2988 return;
2989 }
2990
2991 /* We'll update this if & when we switch to a new thread. */
2992 previous_inferior_ptid = inferior_ptid;
2993
2994 regcache = get_current_regcache ();
2995 gdbarch = regcache->arch ();
2996 const address_space *aspace = regcache->aspace ();
2997
2998 pc = regcache_read_pc (regcache);
2999 thread_info *cur_thr = inferior_thread ();
3000
3001 /* Fill in with reasonable starting values. */
3002 init_thread_stepping_state (cur_thr);
3003
3004 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
3005
3006 ptid_t resume_ptid
3007 = user_visible_resume_ptid (cur_thr->control.stepping_command);
3008 process_stratum_target *resume_target
3009 = user_visible_resume_target (resume_ptid);
3010
3011 check_multi_target_resumption (resume_target);
3012
3013 if (addr == (CORE_ADDR) -1)
3014 {
3015 if (pc == cur_thr->suspend.stop_pc
3016 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
3017 && execution_direction != EXEC_REVERSE)
3018 /* There is a breakpoint at the address we will resume at,
3019 step one instruction before inserting breakpoints so that
3020 we do not stop right away (and report a second hit at this
3021 breakpoint).
3022
3023 Note, we don't do this in reverse, because we won't
3024 actually be executing the breakpoint insn anyway.
3025 We'll be (un-)executing the previous instruction. */
3026 cur_thr->stepping_over_breakpoint = 1;
3027 else if (gdbarch_single_step_through_delay_p (gdbarch)
3028 && gdbarch_single_step_through_delay (gdbarch,
3029 get_current_frame ()))
3030 /* We stepped onto an instruction that needs to be stepped
3031 again before re-inserting the breakpoint, do so. */
3032 cur_thr->stepping_over_breakpoint = 1;
3033 }
3034 else
3035 {
3036 regcache_write_pc (regcache, addr);
3037 }
3038
3039 if (siggnal != GDB_SIGNAL_DEFAULT)
3040 cur_thr->suspend.stop_signal = siggnal;
3041
3042 /* If an exception is thrown from this point on, make sure to
3043 propagate GDB's knowledge of the executing state to the
3044 frontend/user running state. */
3045 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
3046
3047 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3048 threads (e.g., we might need to set threads stepping over
3049 breakpoints first), from the user/frontend's point of view, all
3050 threads in RESUME_PTID are now running. Unless we're calling an
3051 inferior function, as in that case we pretend the inferior
3052 doesn't run at all. */
3053 if (!cur_thr->control.in_infcall)
3054 set_running (resume_target, resume_ptid, true);
3055
3056 if (debug_infrun)
3057 fprintf_unfiltered (gdb_stdlog,
3058 "infrun: proceed (addr=%s, signal=%s)\n",
3059 paddress (gdbarch, addr),
3060 gdb_signal_to_symbol_string (siggnal));
3061
3062 annotate_starting ();
3063
3064 /* Make sure that output from GDB appears before output from the
3065 inferior. */
3066 gdb_flush (gdb_stdout);
3067
3068 /* Since we've marked the inferior running, give it the terminal. A
3069 QUIT/Ctrl-C from here on is forwarded to the target (which can
3070 still detect attempts to unblock a stuck connection with repeated
3071 Ctrl-C from within target_pass_ctrlc). */
3072 target_terminal::inferior ();
3073
3074 /* In a multi-threaded task we may select another thread and
3075 then continue or step.
3076
3077 But if a thread that we're resuming had stopped at a breakpoint,
3078 it will immediately cause another breakpoint stop without any
3079 execution (i.e. it will report a breakpoint hit incorrectly). So
3080 we must step over it first.
3081
3082 Look for threads other than the current (TP) that reported a
3083 breakpoint hit and haven't been resumed yet since. */
3084
3085 /* If scheduler locking applies, we can avoid iterating over all
3086 threads. */
3087 if (!non_stop && !schedlock_applies (cur_thr))
3088 {
3089 for (thread_info *tp : all_non_exited_threads (resume_target,
3090 resume_ptid))
3091 {
3092 switch_to_thread_no_regs (tp);
3093
3094 /* Ignore the current thread here. It's handled
3095 afterwards. */
3096 if (tp == cur_thr)
3097 continue;
3098
3099 if (!thread_still_needs_step_over (tp))
3100 continue;
3101
3102 gdb_assert (!thread_is_in_step_over_chain (tp));
3103
3104 if (debug_infrun)
3105 fprintf_unfiltered (gdb_stdlog,
3106 "infrun: need to step-over [%s] first\n",
3107 target_pid_to_str (tp->ptid).c_str ());
3108
3109 thread_step_over_chain_enqueue (tp);
3110 }
3111
3112 switch_to_thread (cur_thr);
3113 }
3114
3115 /* Enqueue the current thread last, so that we move all other
3116 threads over their breakpoints first. */
3117 if (cur_thr->stepping_over_breakpoint)
3118 thread_step_over_chain_enqueue (cur_thr);
3119
3120 /* If the thread isn't started, we'll still need to set its prev_pc,
3121 so that switch_back_to_stepped_thread knows the thread hasn't
3122 advanced. Must do this before resuming any thread, as in
3123 all-stop/remote, once we resume we can't send any other packet
3124 until the target stops again. */
3125 cur_thr->prev_pc = regcache_read_pc (regcache);
3126
3127 {
3128 scoped_restore save_defer_tc = make_scoped_defer_target_commit_resume ();
3129
3130 started = start_step_over ();
3131
3132 if (step_over_info_valid_p ())
3133 {
3134 /* Either this thread started a new in-line step over, or some
3135 other thread was already doing one. In either case, don't
3136 resume anything else until the step-over is finished. */
3137 }
3138 else if (started && !target_is_non_stop_p ())
3139 {
3140 /* A new displaced stepping sequence was started. In all-stop,
3141 we can't talk to the target anymore until it next stops. */
3142 }
3143 else if (!non_stop && target_is_non_stop_p ())
3144 {
3145 /* In all-stop, but the target is always in non-stop mode.
3146 Start all other threads that are implicitly resumed too. */
3147 for (thread_info *tp : all_non_exited_threads (resume_target,
3148 resume_ptid))
3149 {
3150 switch_to_thread_no_regs (tp);
3151
3152 if (!tp->inf->has_execution ())
3153 {
3154 if (debug_infrun)
3155 fprintf_unfiltered (gdb_stdlog,
3156 "infrun: proceed: [%s] target has "
3157 "no execution\n",
3158 target_pid_to_str (tp->ptid).c_str ());
3159 continue;
3160 }
3161
3162 if (tp->resumed)
3163 {
3164 if (debug_infrun)
3165 fprintf_unfiltered (gdb_stdlog,
3166 "infrun: proceed: [%s] resumed\n",
3167 target_pid_to_str (tp->ptid).c_str ());
3168 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
3169 continue;
3170 }
3171
3172 if (thread_is_in_step_over_chain (tp))
3173 {
3174 if (debug_infrun)
3175 fprintf_unfiltered (gdb_stdlog,
3176 "infrun: proceed: [%s] needs step-over\n",
3177 target_pid_to_str (tp->ptid).c_str ());
3178 continue;
3179 }
3180
3181 if (debug_infrun)
3182 fprintf_unfiltered (gdb_stdlog,
3183 "infrun: proceed: resuming %s\n",
3184 target_pid_to_str (tp->ptid).c_str ());
3185
3186 reset_ecs (ecs, tp);
3187 switch_to_thread (tp);
3188 keep_going_pass_signal (ecs);
3189 if (!ecs->wait_some_more)
3190 error (_("Command aborted."));
3191 }
3192 }
3193 else if (!cur_thr->resumed && !thread_is_in_step_over_chain (cur_thr))
3194 {
3195 /* The thread wasn't started, and isn't queued, run it now. */
3196 reset_ecs (ecs, cur_thr);
3197 switch_to_thread (cur_thr);
3198 keep_going_pass_signal (ecs);
3199 if (!ecs->wait_some_more)
3200 error (_("Command aborted."));
3201 }
3202 }
3203
3204 commit_resume_all_targets ();
3205
3206 finish_state.release ();
3207
3208 /* If we've switched threads above, switch back to the previously
3209 current thread. We don't want the user to see a different
3210 selected thread. */
3211 switch_to_thread (cur_thr);
3212
3213 /* Tell the event loop to wait for it to stop. If the target
3214 supports asynchronous execution, it'll do this from within
3215 target_resume. */
3216 if (!target_can_async_p ())
3217 mark_async_event_handler (infrun_async_inferior_event_token);
3218 }
3219 \f
3220
3221 /* Start remote-debugging of a machine over a serial link. */
3222
3223 void
3224 start_remote (int from_tty)
3225 {
3226 inferior *inf = current_inferior ();
3227 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
3228
3229 /* Always go on waiting for the target, regardless of the mode. */
3230 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3231 indicate to wait_for_inferior that a target should timeout if
3232 nothing is returned (instead of just blocking). Because of this,
3233 targets expecting an immediate response need to, internally, set
3234 things up so that the target_wait() is forced to eventually
3235 timeout. */
3236 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3237 differentiate to its caller what the state of the target is after
3238 the initial open has been performed. Here we're assuming that
3239 the target has stopped. It should be possible to eventually have
3240 target_open() return to the caller an indication that the target
3241 is currently running and GDB state should be set to the same as
3242 for an async run. */
3243 wait_for_inferior (inf);
3244
3245 /* Now that the inferior has stopped, do any bookkeeping like
3246 loading shared libraries. We want to do this before normal_stop,
3247 so that the displayed frame is up to date. */
3248 post_create_inferior (current_top_target (), from_tty);
3249
3250 normal_stop ();
3251 }
3252
3253 /* Initialize static vars when a new inferior begins. */
3254
3255 void
3256 init_wait_for_inferior (void)
3257 {
3258 /* These are meaningless until the first time through wait_for_inferior. */
3259
3260 breakpoint_init_inferior (inf_starting);
3261
3262 clear_proceed_status (0);
3263
3264 nullify_last_target_wait_ptid ();
3265
3266 previous_inferior_ptid = inferior_ptid;
3267 }
3268
3269 \f
3270
3271 static void handle_inferior_event (struct execution_control_state *ecs);
3272
3273 static void handle_step_into_function (struct gdbarch *gdbarch,
3274 struct execution_control_state *ecs);
3275 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3276 struct execution_control_state *ecs);
3277 static void handle_signal_stop (struct execution_control_state *ecs);
3278 static void check_exception_resume (struct execution_control_state *,
3279 struct frame_info *);
3280
3281 static void end_stepping_range (struct execution_control_state *ecs);
3282 static void stop_waiting (struct execution_control_state *ecs);
3283 static void keep_going (struct execution_control_state *ecs);
3284 static void process_event_stop_test (struct execution_control_state *ecs);
3285 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
3286
3287 /* This function is attached as a "thread_stop_requested" observer.
3288 Cleanup local state that assumed the PTID was to be resumed, and
3289 report the stop to the frontend. */
3290
3291 static void
3292 infrun_thread_stop_requested (ptid_t ptid)
3293 {
3294 process_stratum_target *curr_target = current_inferior ()->process_target ();
3295
3296 /* PTID was requested to stop. If the thread was already stopped,
3297 but the user/frontend doesn't know about that yet (e.g., the
3298 thread had been temporarily paused for some step-over), set up
3299 for reporting the stop now. */
3300 for (thread_info *tp : all_threads (curr_target, ptid))
3301 {
3302 if (tp->state != THREAD_RUNNING)
3303 continue;
3304 if (tp->executing)
3305 continue;
3306
3307 /* Remove matching threads from the step-over queue, so
3308 start_step_over doesn't try to resume them
3309 automatically. */
3310 if (thread_is_in_step_over_chain (tp))
3311 thread_step_over_chain_remove (tp);
3312
3313 /* If the thread is stopped, but the user/frontend doesn't
3314 know about that yet, queue a pending event, as if the
3315 thread had just stopped now. Unless the thread already had
3316 a pending event. */
3317 if (!tp->suspend.waitstatus_pending_p)
3318 {
3319 tp->suspend.waitstatus_pending_p = 1;
3320 tp->suspend.waitstatus.kind = TARGET_WAITKIND_STOPPED;
3321 tp->suspend.waitstatus.value.sig = GDB_SIGNAL_0;
3322 }
3323
3324 /* Clear the inline-frame state, since we're re-processing the
3325 stop. */
3326 clear_inline_frame_state (tp);
3327
3328 /* If this thread was paused because some other thread was
3329 doing an inline-step over, let that finish first. Once
3330 that happens, we'll restart all threads and consume pending
3331 stop events then. */
3332 if (step_over_info_valid_p ())
3333 continue;
3334
3335 /* Otherwise we can process the (new) pending event now. Set
3336 it so this pending event is considered by
3337 do_target_wait. */
3338 tp->resumed = true;
3339 }
3340 }
3341
3342 static void
3343 infrun_thread_thread_exit (struct thread_info *tp, int silent)
3344 {
3345 if (target_last_proc_target == tp->inf->process_target ()
3346 && target_last_wait_ptid == tp->ptid)
3347 nullify_last_target_wait_ptid ();
3348 }
3349
3350 /* Delete the step resume, single-step and longjmp/exception resume
3351 breakpoints of TP. */
3352
3353 static void
3354 delete_thread_infrun_breakpoints (struct thread_info *tp)
3355 {
3356 delete_step_resume_breakpoint (tp);
3357 delete_exception_resume_breakpoint (tp);
3358 delete_single_step_breakpoints (tp);
3359 }
3360
3361 /* If the target still has execution, call FUNC for each thread that
3362 just stopped. In all-stop, that's all the non-exited threads; in
3363 non-stop, that's the current thread, only. */
3364
3365 typedef void (*for_each_just_stopped_thread_callback_func)
3366 (struct thread_info *tp);
3367
3368 static void
3369 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
3370 {
3371 if (!target_has_execution || inferior_ptid == null_ptid)
3372 return;
3373
3374 if (target_is_non_stop_p ())
3375 {
3376 /* If in non-stop mode, only the current thread stopped. */
3377 func (inferior_thread ());
3378 }
3379 else
3380 {
3381 /* In all-stop mode, all threads have stopped. */
3382 for (thread_info *tp : all_non_exited_threads ())
3383 func (tp);
3384 }
3385 }
3386
3387 /* Delete the step resume and longjmp/exception resume breakpoints of
3388 the threads that just stopped. */
3389
3390 static void
3391 delete_just_stopped_threads_infrun_breakpoints (void)
3392 {
3393 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
3394 }
3395
3396 /* Delete the single-step breakpoints of the threads that just
3397 stopped. */
3398
3399 static void
3400 delete_just_stopped_threads_single_step_breakpoints (void)
3401 {
3402 for_each_just_stopped_thread (delete_single_step_breakpoints);
3403 }
3404
3405 /* See infrun.h. */
3406
3407 void
3408 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3409 const struct target_waitstatus *ws)
3410 {
3411 std::string status_string = target_waitstatus_to_string (ws);
3412 string_file stb;
3413
3414 /* The text is split over several lines because it was getting too long.
3415 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3416 output as a unit; we want only one timestamp printed if debug_timestamp
3417 is set. */
3418
3419 stb.printf ("infrun: target_wait (%d.%ld.%ld",
3420 waiton_ptid.pid (),
3421 waiton_ptid.lwp (),
3422 waiton_ptid.tid ());
3423 if (waiton_ptid.pid () != -1)
3424 stb.printf (" [%s]", target_pid_to_str (waiton_ptid).c_str ());
3425 stb.printf (", status) =\n");
3426 stb.printf ("infrun: %d.%ld.%ld [%s],\n",
3427 result_ptid.pid (),
3428 result_ptid.lwp (),
3429 result_ptid.tid (),
3430 target_pid_to_str (result_ptid).c_str ());
3431 stb.printf ("infrun: %s\n", status_string.c_str ());
3432
3433 /* This uses %s in part to handle %'s in the text, but also to avoid
3434 a gcc error: the format attribute requires a string literal. */
3435 fprintf_unfiltered (gdb_stdlog, "%s", stb.c_str ());
3436 }
3437
3438 /* Select a thread at random, out of those which are resumed and have
3439 had events. */
3440
3441 static struct thread_info *
3442 random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
3443 {
3444 int num_events = 0;
3445
3446 auto has_event = [&] (thread_info *tp)
3447 {
3448 return (tp->ptid.matches (waiton_ptid)
3449 && tp->resumed
3450 && tp->suspend.waitstatus_pending_p);
3451 };
3452
3453 /* First see how many events we have. Count only resumed threads
3454 that have an event pending. */
3455 for (thread_info *tp : inf->non_exited_threads ())
3456 if (has_event (tp))
3457 num_events++;
3458
3459 if (num_events == 0)
3460 return NULL;
3461
3462 /* Now randomly pick a thread out of those that have had events. */
3463 int random_selector = (int) ((num_events * (double) rand ())
3464 / (RAND_MAX + 1.0));
3465
3466 if (debug_infrun && num_events > 1)
3467 fprintf_unfiltered (gdb_stdlog,
3468 "infrun: Found %d events, selecting #%d\n",
3469 num_events, random_selector);
3470
3471 /* Select the Nth thread that has had an event. */
3472 for (thread_info *tp : inf->non_exited_threads ())
3473 if (has_event (tp))
3474 if (random_selector-- == 0)
3475 return tp;
3476
3477 gdb_assert_not_reached ("event thread not found");
3478 }
3479
3480 /* Wrapper for target_wait that first checks whether threads have
3481 pending statuses to report before actually asking the target for
3482 more events. INF is the inferior we're using to call target_wait
3483 on. */
3484
3485 static ptid_t
3486 do_target_wait_1 (inferior *inf, ptid_t ptid,
3487 target_waitstatus *status, int options)
3488 {
3489 ptid_t event_ptid;
3490 struct thread_info *tp;
3491
3492 /* We know that we are looking for an event in the target of inferior
3493 INF, but we don't know which thread the event might come from. As
3494 such we want to make sure that INFERIOR_PTID is reset so that none of
3495 the wait code relies on it - doing so is always a mistake. */
3496 switch_to_inferior_no_thread (inf);
3497
3498 /* First check if there is a resumed thread with a wait status
3499 pending. */
3500 if (ptid == minus_one_ptid || ptid.is_pid ())
3501 {
3502 tp = random_pending_event_thread (inf, ptid);
3503 }
3504 else
3505 {
3506 if (debug_infrun)
3507 fprintf_unfiltered (gdb_stdlog,
3508 "infrun: Waiting for specific thread %s.\n",
3509 target_pid_to_str (ptid).c_str ());
3510
3511 /* We have a specific thread to check. */
3512 tp = find_thread_ptid (inf, ptid);
3513 gdb_assert (tp != NULL);
3514 if (!tp->suspend.waitstatus_pending_p)
3515 tp = NULL;
3516 }
3517
3518 if (tp != NULL
3519 && (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3520 || tp->suspend.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
3521 {
3522 struct regcache *regcache = get_thread_regcache (tp);
3523 struct gdbarch *gdbarch = regcache->arch ();
3524 CORE_ADDR pc;
3525 int discard = 0;
3526
3527 pc = regcache_read_pc (regcache);
3528
3529 if (pc != tp->suspend.stop_pc)
3530 {
3531 if (debug_infrun)
3532 fprintf_unfiltered (gdb_stdlog,
3533 "infrun: PC of %s changed. was=%s, now=%s\n",
3534 target_pid_to_str (tp->ptid).c_str (),
3535 paddress (gdbarch, tp->suspend.stop_pc),
3536 paddress (gdbarch, pc));
3537 discard = 1;
3538 }
3539 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
3540 {
3541 if (debug_infrun)
3542 fprintf_unfiltered (gdb_stdlog,
3543 "infrun: previous breakpoint of %s, at %s gone\n",
3544 target_pid_to_str (tp->ptid).c_str (),
3545 paddress (gdbarch, pc));
3546
3547 discard = 1;
3548 }
3549
3550 if (discard)
3551 {
3552 if (debug_infrun)
3553 fprintf_unfiltered (gdb_stdlog,
3554 "infrun: pending event of %s cancelled.\n",
3555 target_pid_to_str (tp->ptid).c_str ());
3556
3557 tp->suspend.waitstatus.kind = TARGET_WAITKIND_SPURIOUS;
3558 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3559 }
3560 }
3561
3562 if (tp != NULL)
3563 {
3564 if (debug_infrun)
3565 {
3566 std::string statstr
3567 = target_waitstatus_to_string (&tp->suspend.waitstatus);
3568
3569 fprintf_unfiltered (gdb_stdlog,
3570 "infrun: Using pending wait status %s for %s.\n",
3571 statstr.c_str (),
3572 target_pid_to_str (tp->ptid).c_str ());
3573 }
3574
3575 /* Now that we've selected our final event LWP, un-adjust its PC
3576 if it was a software breakpoint (and the target doesn't
3577 always adjust the PC itself). */
3578 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3579 && !target_supports_stopped_by_sw_breakpoint ())
3580 {
3581 struct regcache *regcache;
3582 struct gdbarch *gdbarch;
3583 int decr_pc;
3584
3585 regcache = get_thread_regcache (tp);
3586 gdbarch = regcache->arch ();
3587
3588 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3589 if (decr_pc != 0)
3590 {
3591 CORE_ADDR pc;
3592
3593 pc = regcache_read_pc (regcache);
3594 regcache_write_pc (regcache, pc + decr_pc);
3595 }
3596 }
3597
3598 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3599 *status = tp->suspend.waitstatus;
3600 tp->suspend.waitstatus_pending_p = 0;
3601
3602 /* Wake up the event loop again, until all pending events are
3603 processed. */
3604 if (target_is_async_p ())
3605 mark_async_event_handler (infrun_async_inferior_event_token);
3606 return tp->ptid;
3607 }
3608
3609 /* But if we don't find one, we'll have to wait. */
3610
3611 if (deprecated_target_wait_hook)
3612 event_ptid = deprecated_target_wait_hook (ptid, status, options);
3613 else
3614 event_ptid = target_wait (ptid, status, options);
3615
3616 return event_ptid;
3617 }
3618
3619 /* Returns true if INF has any resumed thread with a status
3620 pending. */
3621
3622 static bool
3623 threads_are_resumed_pending_p (inferior *inf)
3624 {
3625 for (thread_info *tp : inf->non_exited_threads ())
3626 if (tp->resumed
3627 && tp->suspend.waitstatus_pending_p)
3628 return true;
3629
3630 return false;
3631 }
3632
3633 /* Wrapper for target_wait that first checks whether threads have
3634 pending statuses to report before actually asking the target for
3635 more events. Polls for events from all inferiors/targets. */
3636
3637 static bool
3638 do_target_wait (ptid_t wait_ptid, execution_control_state *ecs, int options)
3639 {
3640 int num_inferiors = 0;
3641 int random_selector;
3642
3643 /* For fairness, we pick the first inferior/target to poll at
3644 random, and then continue polling the rest of the inferior list
3645 starting from that one in a circular fashion until the whole list
3646 is polled once. */
3647
3648 auto inferior_matches = [&wait_ptid] (inferior *inf)
3649 {
3650 return (inf->process_target () != NULL
3651 && (threads_are_executing (inf->process_target ())
3652 || threads_are_resumed_pending_p (inf))
3653 && ptid_t (inf->pid).matches (wait_ptid));
3654 };
3655
3656 /* First see how many resumed inferiors we have. */
3657 for (inferior *inf : all_inferiors ())
3658 if (inferior_matches (inf))
3659 num_inferiors++;
3660
3661 if (num_inferiors == 0)
3662 {
3663 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3664 return false;
3665 }
3666
3667 /* Now randomly pick an inferior out of those that were resumed. */
3668 random_selector = (int)
3669 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
3670
3671 if (debug_infrun && num_inferiors > 1)
3672 fprintf_unfiltered (gdb_stdlog,
3673 "infrun: Found %d inferiors, starting at #%d\n",
3674 num_inferiors, random_selector);
3675
3676 /* Select the Nth inferior that was resumed. */
3677
3678 inferior *selected = nullptr;
3679
3680 for (inferior *inf : all_inferiors ())
3681 if (inferior_matches (inf))
3682 if (random_selector-- == 0)
3683 {
3684 selected = inf;
3685 break;
3686 }
3687
3688 /* Now poll for events out of each of the resumed inferior's
3689 targets, starting from the selected one. */
3690
3691 auto do_wait = [&] (inferior *inf)
3692 {
3693 ecs->ptid = do_target_wait_1 (inf, wait_ptid, &ecs->ws, options);
3694 ecs->target = inf->process_target ();
3695 return (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
3696 };
3697
3698 /* Needed in all-stop+target-non-stop mode, because we end up here
3699 spuriously after the target is all stopped and we've already
3700 reported the stop to the user, polling for events. */
3701 scoped_restore_current_thread restore_thread;
3702
3703 int inf_num = selected->num;
3704 for (inferior *inf = selected; inf != NULL; inf = inf->next)
3705 if (inferior_matches (inf))
3706 if (do_wait (inf))
3707 return true;
3708
3709 for (inferior *inf = inferior_list;
3710 inf != NULL && inf->num < inf_num;
3711 inf = inf->next)
3712 if (inferior_matches (inf))
3713 if (do_wait (inf))
3714 return true;
3715
3716 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3717 return false;
3718 }
3719
3720 /* Prepare and stabilize the inferior for detaching it. E.g.,
3721 detaching while a thread is displaced stepping is a recipe for
3722 crashing it, as nothing would readjust the PC out of the scratch
3723 pad. */
3724
3725 void
3726 prepare_for_detach (void)
3727 {
3728 struct inferior *inf = current_inferior ();
3729 ptid_t pid_ptid = ptid_t (inf->pid);
3730
3731 displaced_step_inferior_state *displaced = get_displaced_stepping_state (inf);
3732
3733 /* Is any thread of this process displaced stepping? If not,
3734 there's nothing else to do. */
3735 if (displaced->step_thread == nullptr)
3736 return;
3737
3738 if (debug_infrun)
3739 fprintf_unfiltered (gdb_stdlog,
3740 "displaced-stepping in-process while detaching");
3741
3742 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
3743
3744 while (displaced->step_thread != nullptr)
3745 {
3746 struct execution_control_state ecss;
3747 struct execution_control_state *ecs;
3748
3749 ecs = &ecss;
3750 memset (ecs, 0, sizeof (*ecs));
3751
3752 overlay_cache_invalid = 1;
3753 /* Flush target cache before starting to handle each event.
3754 Target was running and cache could be stale. This is just a
3755 heuristic. Running threads may modify target memory, but we
3756 don't get any event. */
3757 target_dcache_invalidate ();
3758
3759 do_target_wait (pid_ptid, ecs, 0);
3760
3761 if (debug_infrun)
3762 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3763
3764 /* If an error happens while handling the event, propagate GDB's
3765 knowledge of the executing state to the frontend/user running
3766 state. */
3767 scoped_finish_thread_state finish_state (inf->process_target (),
3768 minus_one_ptid);
3769
3770 /* Now figure out what to do with the result of the result. */
3771 handle_inferior_event (ecs);
3772
3773 /* No error, don't finish the state yet. */
3774 finish_state.release ();
3775
3776 /* Breakpoints and watchpoints are not installed on the target
3777 at this point, and signals are passed directly to the
3778 inferior, so this must mean the process is gone. */
3779 if (!ecs->wait_some_more)
3780 {
3781 restore_detaching.release ();
3782 error (_("Program exited while detaching"));
3783 }
3784 }
3785
3786 restore_detaching.release ();
3787 }
3788
3789 /* Wait for control to return from inferior to debugger.
3790
3791 If inferior gets a signal, we may decide to start it up again
3792 instead of returning. That is why there is a loop in this function.
3793 When this function actually returns it means the inferior
3794 should be left stopped and GDB should read more commands. */
3795
3796 static void
3797 wait_for_inferior (inferior *inf)
3798 {
3799 if (debug_infrun)
3800 fprintf_unfiltered
3801 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
3802
3803 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
3804
3805 /* If an error happens while handling the event, propagate GDB's
3806 knowledge of the executing state to the frontend/user running
3807 state. */
3808 scoped_finish_thread_state finish_state
3809 (inf->process_target (), minus_one_ptid);
3810
3811 while (1)
3812 {
3813 struct execution_control_state ecss;
3814 struct execution_control_state *ecs = &ecss;
3815
3816 memset (ecs, 0, sizeof (*ecs));
3817
3818 overlay_cache_invalid = 1;
3819
3820 /* Flush target cache before starting to handle each event.
3821 Target was running and cache could be stale. This is just a
3822 heuristic. Running threads may modify target memory, but we
3823 don't get any event. */
3824 target_dcache_invalidate ();
3825
3826 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, 0);
3827 ecs->target = inf->process_target ();
3828
3829 if (debug_infrun)
3830 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
3831
3832 /* Now figure out what to do with the result of the result. */
3833 handle_inferior_event (ecs);
3834
3835 if (!ecs->wait_some_more)
3836 break;
3837 }
3838
3839 /* No error, don't finish the state yet. */
3840 finish_state.release ();
3841 }
3842
3843 /* Cleanup that reinstalls the readline callback handler, if the
3844 target is running in the background. If while handling the target
3845 event something triggered a secondary prompt, like e.g., a
3846 pagination prompt, we'll have removed the callback handler (see
3847 gdb_readline_wrapper_line). Need to do this as we go back to the
3848 event loop, ready to process further input. Note this has no
3849 effect if the handler hasn't actually been removed, because calling
3850 rl_callback_handler_install resets the line buffer, thus losing
3851 input. */
3852
3853 static void
3854 reinstall_readline_callback_handler_cleanup ()
3855 {
3856 struct ui *ui = current_ui;
3857
3858 if (!ui->async)
3859 {
3860 /* We're not going back to the top level event loop yet. Don't
3861 install the readline callback, as it'd prep the terminal,
3862 readline-style (raw, noecho) (e.g., --batch). We'll install
3863 it the next time the prompt is displayed, when we're ready
3864 for input. */
3865 return;
3866 }
3867
3868 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
3869 gdb_rl_callback_handler_reinstall ();
3870 }
3871
3872 /* Clean up the FSMs of threads that are now stopped. In non-stop,
3873 that's just the event thread. In all-stop, that's all threads. */
3874
3875 static void
3876 clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
3877 {
3878 if (ecs->event_thread != NULL
3879 && ecs->event_thread->thread_fsm != NULL)
3880 ecs->event_thread->thread_fsm->clean_up (ecs->event_thread);
3881
3882 if (!non_stop)
3883 {
3884 for (thread_info *thr : all_non_exited_threads ())
3885 {
3886 if (thr->thread_fsm == NULL)
3887 continue;
3888 if (thr == ecs->event_thread)
3889 continue;
3890
3891 switch_to_thread (thr);
3892 thr->thread_fsm->clean_up (thr);
3893 }
3894
3895 if (ecs->event_thread != NULL)
3896 switch_to_thread (ecs->event_thread);
3897 }
3898 }
3899
3900 /* Helper for all_uis_check_sync_execution_done that works on the
3901 current UI. */
3902
3903 static void
3904 check_curr_ui_sync_execution_done (void)
3905 {
3906 struct ui *ui = current_ui;
3907
3908 if (ui->prompt_state == PROMPT_NEEDED
3909 && ui->async
3910 && !gdb_in_secondary_prompt_p (ui))
3911 {
3912 target_terminal::ours ();
3913 gdb::observers::sync_execution_done.notify ();
3914 ui_register_input_event_handler (ui);
3915 }
3916 }
3917
3918 /* See infrun.h. */
3919
3920 void
3921 all_uis_check_sync_execution_done (void)
3922 {
3923 SWITCH_THRU_ALL_UIS ()
3924 {
3925 check_curr_ui_sync_execution_done ();
3926 }
3927 }
3928
3929 /* See infrun.h. */
3930
3931 void
3932 all_uis_on_sync_execution_starting (void)
3933 {
3934 SWITCH_THRU_ALL_UIS ()
3935 {
3936 if (current_ui->prompt_state == PROMPT_NEEDED)
3937 async_disable_stdin ();
3938 }
3939 }
3940
3941 /* Asynchronous version of wait_for_inferior. It is called by the
3942 event loop whenever a change of state is detected on the file
3943 descriptor corresponding to the target. It can be called more than
3944 once to complete a single execution command. In such cases we need
3945 to keep the state in a global variable ECSS. If it is the last time
3946 that this function is called for a single execution command, then
3947 report to the user that the inferior has stopped, and do the
3948 necessary cleanups. */
3949
3950 void
3951 fetch_inferior_event (void *client_data)
3952 {
3953 struct execution_control_state ecss;
3954 struct execution_control_state *ecs = &ecss;
3955 int cmd_done = 0;
3956
3957 memset (ecs, 0, sizeof (*ecs));
3958
3959 /* Events are always processed with the main UI as current UI. This
3960 way, warnings, debug output, etc. are always consistently sent to
3961 the main console. */
3962 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
3963
3964 /* End up with readline processing input, if necessary. */
3965 {
3966 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
3967
3968 /* We're handling a live event, so make sure we're doing live
3969 debugging. If we're looking at traceframes while the target is
3970 running, we're going to need to get back to that mode after
3971 handling the event. */
3972 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
3973 if (non_stop)
3974 {
3975 maybe_restore_traceframe.emplace ();
3976 set_current_traceframe (-1);
3977 }
3978
3979 /* The user/frontend should not notice a thread switch due to
3980 internal events. Make sure we revert to the user selected
3981 thread and frame after handling the event and running any
3982 breakpoint commands. */
3983 scoped_restore_current_thread restore_thread;
3984
3985 overlay_cache_invalid = 1;
3986 /* Flush target cache before starting to handle each event. Target
3987 was running and cache could be stale. This is just a heuristic.
3988 Running threads may modify target memory, but we don't get any
3989 event. */
3990 target_dcache_invalidate ();
3991
3992 scoped_restore save_exec_dir
3993 = make_scoped_restore (&execution_direction,
3994 target_execution_direction ());
3995
3996 if (!do_target_wait (minus_one_ptid, ecs, TARGET_WNOHANG))
3997 return;
3998
3999 gdb_assert (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
4000
4001 /* Switch to the target that generated the event, so we can do
4002 target calls. Any inferior bound to the target will do, so we
4003 just switch to the first we find. */
4004 for (inferior *inf : all_inferiors (ecs->target))
4005 {
4006 switch_to_inferior_no_thread (inf);
4007 break;
4008 }
4009
4010 if (debug_infrun)
4011 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
4012
4013 /* If an error happens while handling the event, propagate GDB's
4014 knowledge of the executing state to the frontend/user running
4015 state. */
4016 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
4017 scoped_finish_thread_state finish_state (ecs->target, finish_ptid);
4018
4019 /* Get executed before scoped_restore_current_thread above to apply
4020 still for the thread which has thrown the exception. */
4021 auto defer_bpstat_clear
4022 = make_scope_exit (bpstat_clear_actions);
4023 auto defer_delete_threads
4024 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
4025
4026 /* Now figure out what to do with the result of the result. */
4027 handle_inferior_event (ecs);
4028
4029 if (!ecs->wait_some_more)
4030 {
4031 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4032 int should_stop = 1;
4033 struct thread_info *thr = ecs->event_thread;
4034
4035 delete_just_stopped_threads_infrun_breakpoints ();
4036
4037 if (thr != NULL)
4038 {
4039 struct thread_fsm *thread_fsm = thr->thread_fsm;
4040
4041 if (thread_fsm != NULL)
4042 should_stop = thread_fsm->should_stop (thr);
4043 }
4044
4045 if (!should_stop)
4046 {
4047 keep_going (ecs);
4048 }
4049 else
4050 {
4051 bool should_notify_stop = true;
4052 int proceeded = 0;
4053
4054 clean_up_just_stopped_threads_fsms (ecs);
4055
4056 if (thr != NULL && thr->thread_fsm != NULL)
4057 should_notify_stop = thr->thread_fsm->should_notify_stop ();
4058
4059 if (should_notify_stop)
4060 {
4061 /* We may not find an inferior if this was a process exit. */
4062 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
4063 proceeded = normal_stop ();
4064 }
4065
4066 if (!proceeded)
4067 {
4068 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
4069 cmd_done = 1;
4070 }
4071
4072 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4073 previously selected thread is gone. We have two
4074 choices - switch to no thread selected, or restore the
4075 previously selected thread (now exited). We chose the
4076 later, just because that's what GDB used to do. After
4077 this, "info threads" says "The current thread <Thread
4078 ID 2> has terminated." instead of "No thread
4079 selected.". */
4080 if (!non_stop
4081 && cmd_done
4082 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
4083 restore_thread.dont_restore ();
4084 }
4085 }
4086
4087 defer_delete_threads.release ();
4088 defer_bpstat_clear.release ();
4089
4090 /* No error, don't finish the thread states yet. */
4091 finish_state.release ();
4092
4093 /* This scope is used to ensure that readline callbacks are
4094 reinstalled here. */
4095 }
4096
4097 /* If a UI was in sync execution mode, and now isn't, restore its
4098 prompt (a synchronous execution command has finished, and we're
4099 ready for input). */
4100 all_uis_check_sync_execution_done ();
4101
4102 if (cmd_done
4103 && exec_done_display_p
4104 && (inferior_ptid == null_ptid
4105 || inferior_thread ()->state != THREAD_RUNNING))
4106 printf_unfiltered (_("completed.\n"));
4107 }
4108
4109 /* See infrun.h. */
4110
4111 void
4112 set_step_info (thread_info *tp, struct frame_info *frame,
4113 struct symtab_and_line sal)
4114 {
4115 /* This can be removed once this function no longer implicitly relies on the
4116 inferior_ptid value. */
4117 gdb_assert (inferior_ptid == tp->ptid);
4118
4119 tp->control.step_frame_id = get_frame_id (frame);
4120 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
4121
4122 tp->current_symtab = sal.symtab;
4123 tp->current_line = sal.line;
4124 }
4125
4126 /* Clear context switchable stepping state. */
4127
4128 void
4129 init_thread_stepping_state (struct thread_info *tss)
4130 {
4131 tss->stepped_breakpoint = 0;
4132 tss->stepping_over_breakpoint = 0;
4133 tss->stepping_over_watchpoint = 0;
4134 tss->step_after_step_resume_breakpoint = 0;
4135 }
4136
4137 /* See infrun.h. */
4138
4139 void
4140 set_last_target_status (process_stratum_target *target, ptid_t ptid,
4141 target_waitstatus status)
4142 {
4143 target_last_proc_target = target;
4144 target_last_wait_ptid = ptid;
4145 target_last_waitstatus = status;
4146 }
4147
4148 /* See infrun.h. */
4149
4150 void
4151 get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4152 target_waitstatus *status)
4153 {
4154 if (target != nullptr)
4155 *target = target_last_proc_target;
4156 if (ptid != nullptr)
4157 *ptid = target_last_wait_ptid;
4158 if (status != nullptr)
4159 *status = target_last_waitstatus;
4160 }
4161
4162 /* See infrun.h. */
4163
4164 void
4165 nullify_last_target_wait_ptid (void)
4166 {
4167 target_last_proc_target = nullptr;
4168 target_last_wait_ptid = minus_one_ptid;
4169 target_last_waitstatus = {};
4170 }
4171
4172 /* Switch thread contexts. */
4173
4174 static void
4175 context_switch (execution_control_state *ecs)
4176 {
4177 if (debug_infrun
4178 && ecs->ptid != inferior_ptid
4179 && (inferior_ptid == null_ptid
4180 || ecs->event_thread != inferior_thread ()))
4181 {
4182 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
4183 target_pid_to_str (inferior_ptid).c_str ());
4184 fprintf_unfiltered (gdb_stdlog, "to %s\n",
4185 target_pid_to_str (ecs->ptid).c_str ());
4186 }
4187
4188 switch_to_thread (ecs->event_thread);
4189 }
4190
4191 /* If the target can't tell whether we've hit breakpoints
4192 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4193 check whether that could have been caused by a breakpoint. If so,
4194 adjust the PC, per gdbarch_decr_pc_after_break. */
4195
4196 static void
4197 adjust_pc_after_break (struct thread_info *thread,
4198 struct target_waitstatus *ws)
4199 {
4200 struct regcache *regcache;
4201 struct gdbarch *gdbarch;
4202 CORE_ADDR breakpoint_pc, decr_pc;
4203
4204 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4205 we aren't, just return.
4206
4207 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4208 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4209 implemented by software breakpoints should be handled through the normal
4210 breakpoint layer.
4211
4212 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4213 different signals (SIGILL or SIGEMT for instance), but it is less
4214 clear where the PC is pointing afterwards. It may not match
4215 gdbarch_decr_pc_after_break. I don't know any specific target that
4216 generates these signals at breakpoints (the code has been in GDB since at
4217 least 1992) so I can not guess how to handle them here.
4218
4219 In earlier versions of GDB, a target with
4220 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4221 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4222 target with both of these set in GDB history, and it seems unlikely to be
4223 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4224
4225 if (ws->kind != TARGET_WAITKIND_STOPPED)
4226 return;
4227
4228 if (ws->value.sig != GDB_SIGNAL_TRAP)
4229 return;
4230
4231 /* In reverse execution, when a breakpoint is hit, the instruction
4232 under it has already been de-executed. The reported PC always
4233 points at the breakpoint address, so adjusting it further would
4234 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4235 architecture:
4236
4237 B1 0x08000000 : INSN1
4238 B2 0x08000001 : INSN2
4239 0x08000002 : INSN3
4240 PC -> 0x08000003 : INSN4
4241
4242 Say you're stopped at 0x08000003 as above. Reverse continuing
4243 from that point should hit B2 as below. Reading the PC when the
4244 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4245 been de-executed already.
4246
4247 B1 0x08000000 : INSN1
4248 B2 PC -> 0x08000001 : INSN2
4249 0x08000002 : INSN3
4250 0x08000003 : INSN4
4251
4252 We can't apply the same logic as for forward execution, because
4253 we would wrongly adjust the PC to 0x08000000, since there's a
4254 breakpoint at PC - 1. We'd then report a hit on B1, although
4255 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4256 behaviour. */
4257 if (execution_direction == EXEC_REVERSE)
4258 return;
4259
4260 /* If the target can tell whether the thread hit a SW breakpoint,
4261 trust it. Targets that can tell also adjust the PC
4262 themselves. */
4263 if (target_supports_stopped_by_sw_breakpoint ())
4264 return;
4265
4266 /* Note that relying on whether a breakpoint is planted in memory to
4267 determine this can fail. E.g,. the breakpoint could have been
4268 removed since. Or the thread could have been told to step an
4269 instruction the size of a breakpoint instruction, and only
4270 _after_ was a breakpoint inserted at its address. */
4271
4272 /* If this target does not decrement the PC after breakpoints, then
4273 we have nothing to do. */
4274 regcache = get_thread_regcache (thread);
4275 gdbarch = regcache->arch ();
4276
4277 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4278 if (decr_pc == 0)
4279 return;
4280
4281 const address_space *aspace = regcache->aspace ();
4282
4283 /* Find the location where (if we've hit a breakpoint) the
4284 breakpoint would be. */
4285 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
4286
4287 /* If the target can't tell whether a software breakpoint triggered,
4288 fallback to figuring it out based on breakpoints we think were
4289 inserted in the target, and on whether the thread was stepped or
4290 continued. */
4291
4292 /* Check whether there actually is a software breakpoint inserted at
4293 that location.
4294
4295 If in non-stop mode, a race condition is possible where we've
4296 removed a breakpoint, but stop events for that breakpoint were
4297 already queued and arrive later. To suppress those spurious
4298 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
4299 and retire them after a number of stop events are reported. Note
4300 this is an heuristic and can thus get confused. The real fix is
4301 to get the "stopped by SW BP and needs adjustment" info out of
4302 the target/kernel (and thus never reach here; see above). */
4303 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
4304 || (target_is_non_stop_p ()
4305 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
4306 {
4307 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
4308
4309 if (record_full_is_used ())
4310 restore_operation_disable.emplace
4311 (record_full_gdb_operation_disable_set ());
4312
4313 /* When using hardware single-step, a SIGTRAP is reported for both
4314 a completed single-step and a software breakpoint. Need to
4315 differentiate between the two, as the latter needs adjusting
4316 but the former does not.
4317
4318 The SIGTRAP can be due to a completed hardware single-step only if
4319 - we didn't insert software single-step breakpoints
4320 - this thread is currently being stepped
4321
4322 If any of these events did not occur, we must have stopped due
4323 to hitting a software breakpoint, and have to back up to the
4324 breakpoint address.
4325
4326 As a special case, we could have hardware single-stepped a
4327 software breakpoint. In this case (prev_pc == breakpoint_pc),
4328 we also need to back up to the breakpoint address. */
4329
4330 if (thread_has_single_step_breakpoints_set (thread)
4331 || !currently_stepping (thread)
4332 || (thread->stepped_breakpoint
4333 && thread->prev_pc == breakpoint_pc))
4334 regcache_write_pc (regcache, breakpoint_pc);
4335 }
4336 }
4337
4338 static int
4339 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4340 {
4341 for (frame = get_prev_frame (frame);
4342 frame != NULL;
4343 frame = get_prev_frame (frame))
4344 {
4345 if (frame_id_eq (get_frame_id (frame), step_frame_id))
4346 return 1;
4347 if (get_frame_type (frame) != INLINE_FRAME)
4348 break;
4349 }
4350
4351 return 0;
4352 }
4353
4354 /* Look for an inline frame that is marked for skip.
4355 If PREV_FRAME is TRUE start at the previous frame,
4356 otherwise start at the current frame. Stop at the
4357 first non-inline frame, or at the frame where the
4358 step started. */
4359
4360 static bool
4361 inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
4362 {
4363 struct frame_info *frame = get_current_frame ();
4364
4365 if (prev_frame)
4366 frame = get_prev_frame (frame);
4367
4368 for (; frame != NULL; frame = get_prev_frame (frame))
4369 {
4370 const char *fn = NULL;
4371 symtab_and_line sal;
4372 struct symbol *sym;
4373
4374 if (frame_id_eq (get_frame_id (frame), tp->control.step_frame_id))
4375 break;
4376 if (get_frame_type (frame) != INLINE_FRAME)
4377 break;
4378
4379 sal = find_frame_sal (frame);
4380 sym = get_frame_function (frame);
4381
4382 if (sym != NULL)
4383 fn = sym->print_name ();
4384
4385 if (sal.line != 0
4386 && function_name_is_marked_for_skip (fn, sal))
4387 return true;
4388 }
4389
4390 return false;
4391 }
4392
4393 /* If the event thread has the stop requested flag set, pretend it
4394 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4395 target_stop). */
4396
4397 static bool
4398 handle_stop_requested (struct execution_control_state *ecs)
4399 {
4400 if (ecs->event_thread->stop_requested)
4401 {
4402 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
4403 ecs->ws.value.sig = GDB_SIGNAL_0;
4404 handle_signal_stop (ecs);
4405 return true;
4406 }
4407 return false;
4408 }
4409
4410 /* Auxiliary function that handles syscall entry/return events.
4411 It returns 1 if the inferior should keep going (and GDB
4412 should ignore the event), or 0 if the event deserves to be
4413 processed. */
4414
4415 static int
4416 handle_syscall_event (struct execution_control_state *ecs)
4417 {
4418 struct regcache *regcache;
4419 int syscall_number;
4420
4421 context_switch (ecs);
4422
4423 regcache = get_thread_regcache (ecs->event_thread);
4424 syscall_number = ecs->ws.value.syscall_number;
4425 ecs->event_thread->suspend.stop_pc = regcache_read_pc (regcache);
4426
4427 if (catch_syscall_enabled () > 0
4428 && catching_syscall_number (syscall_number) > 0)
4429 {
4430 if (debug_infrun)
4431 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
4432 syscall_number);
4433
4434 ecs->event_thread->control.stop_bpstat
4435 = bpstat_stop_status (regcache->aspace (),
4436 ecs->event_thread->suspend.stop_pc,
4437 ecs->event_thread, &ecs->ws);
4438
4439 if (handle_stop_requested (ecs))
4440 return 0;
4441
4442 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4443 {
4444 /* Catchpoint hit. */
4445 return 0;
4446 }
4447 }
4448
4449 if (handle_stop_requested (ecs))
4450 return 0;
4451
4452 /* If no catchpoint triggered for this, then keep going. */
4453 keep_going (ecs);
4454 return 1;
4455 }
4456
4457 /* Lazily fill in the execution_control_state's stop_func_* fields. */
4458
4459 static void
4460 fill_in_stop_func (struct gdbarch *gdbarch,
4461 struct execution_control_state *ecs)
4462 {
4463 if (!ecs->stop_func_filled_in)
4464 {
4465 const block *block;
4466
4467 /* Don't care about return value; stop_func_start and stop_func_name
4468 will both be 0 if it doesn't work. */
4469 find_pc_partial_function (ecs->event_thread->suspend.stop_pc,
4470 &ecs->stop_func_name,
4471 &ecs->stop_func_start,
4472 &ecs->stop_func_end,
4473 &block);
4474
4475 /* The call to find_pc_partial_function, above, will set
4476 stop_func_start and stop_func_end to the start and end
4477 of the range containing the stop pc. If this range
4478 contains the entry pc for the block (which is always the
4479 case for contiguous blocks), advance stop_func_start past
4480 the function's start offset and entrypoint. Note that
4481 stop_func_start is NOT advanced when in a range of a
4482 non-contiguous block that does not contain the entry pc. */
4483 if (block != nullptr
4484 && ecs->stop_func_start <= BLOCK_ENTRY_PC (block)
4485 && BLOCK_ENTRY_PC (block) < ecs->stop_func_end)
4486 {
4487 ecs->stop_func_start
4488 += gdbarch_deprecated_function_start_offset (gdbarch);
4489
4490 if (gdbarch_skip_entrypoint_p (gdbarch))
4491 ecs->stop_func_start
4492 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
4493 }
4494
4495 ecs->stop_func_filled_in = 1;
4496 }
4497 }
4498
4499
4500 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
4501
4502 static enum stop_kind
4503 get_inferior_stop_soon (execution_control_state *ecs)
4504 {
4505 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4506
4507 gdb_assert (inf != NULL);
4508 return inf->control.stop_soon;
4509 }
4510
4511 /* Poll for one event out of the current target. Store the resulting
4512 waitstatus in WS, and return the event ptid. Does not block. */
4513
4514 static ptid_t
4515 poll_one_curr_target (struct target_waitstatus *ws)
4516 {
4517 ptid_t event_ptid;
4518
4519 overlay_cache_invalid = 1;
4520
4521 /* Flush target cache before starting to handle each event.
4522 Target was running and cache could be stale. This is just a
4523 heuristic. Running threads may modify target memory, but we
4524 don't get any event. */
4525 target_dcache_invalidate ();
4526
4527 if (deprecated_target_wait_hook)
4528 event_ptid = deprecated_target_wait_hook (minus_one_ptid, ws, TARGET_WNOHANG);
4529 else
4530 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
4531
4532 if (debug_infrun)
4533 print_target_wait_results (minus_one_ptid, event_ptid, ws);
4534
4535 return event_ptid;
4536 }
4537
4538 /* An event reported by wait_one. */
4539
4540 struct wait_one_event
4541 {
4542 /* The target the event came out of. */
4543 process_stratum_target *target;
4544
4545 /* The PTID the event was for. */
4546 ptid_t ptid;
4547
4548 /* The waitstatus. */
4549 target_waitstatus ws;
4550 };
4551
4552 /* Wait for one event out of any target. */
4553
4554 static wait_one_event
4555 wait_one ()
4556 {
4557 while (1)
4558 {
4559 for (inferior *inf : all_inferiors ())
4560 {
4561 process_stratum_target *target = inf->process_target ();
4562 if (target == NULL
4563 || !target->is_async_p ()
4564 || !target->threads_executing)
4565 continue;
4566
4567 switch_to_inferior_no_thread (inf);
4568
4569 wait_one_event event;
4570 event.target = target;
4571 event.ptid = poll_one_curr_target (&event.ws);
4572
4573 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
4574 {
4575 /* If nothing is resumed, remove the target from the
4576 event loop. */
4577 target_async (0);
4578 }
4579 else if (event.ws.kind != TARGET_WAITKIND_IGNORE)
4580 return event;
4581 }
4582
4583 /* Block waiting for some event. */
4584
4585 fd_set readfds;
4586 int nfds = 0;
4587
4588 FD_ZERO (&readfds);
4589
4590 for (inferior *inf : all_inferiors ())
4591 {
4592 process_stratum_target *target = inf->process_target ();
4593 if (target == NULL
4594 || !target->is_async_p ()
4595 || !target->threads_executing)
4596 continue;
4597
4598 int fd = target->async_wait_fd ();
4599 FD_SET (fd, &readfds);
4600 if (nfds <= fd)
4601 nfds = fd + 1;
4602 }
4603
4604 if (nfds == 0)
4605 {
4606 /* No waitable targets left. All must be stopped. */
4607 return {NULL, minus_one_ptid, {TARGET_WAITKIND_NO_RESUMED}};
4608 }
4609
4610 QUIT;
4611
4612 int numfds = interruptible_select (nfds, &readfds, 0, NULL, 0);
4613 if (numfds < 0)
4614 {
4615 if (errno == EINTR)
4616 continue;
4617 else
4618 perror_with_name ("interruptible_select");
4619 }
4620 }
4621 }
4622
4623 /* Generate a wrapper for target_stopped_by_REASON that works on PTID
4624 instead of the current thread. */
4625 #define THREAD_STOPPED_BY(REASON) \
4626 static int \
4627 thread_stopped_by_ ## REASON (ptid_t ptid) \
4628 { \
4629 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid); \
4630 inferior_ptid = ptid; \
4631 \
4632 return target_stopped_by_ ## REASON (); \
4633 }
4634
4635 /* Generate thread_stopped_by_watchpoint. */
4636 THREAD_STOPPED_BY (watchpoint)
4637 /* Generate thread_stopped_by_sw_breakpoint. */
4638 THREAD_STOPPED_BY (sw_breakpoint)
4639 /* Generate thread_stopped_by_hw_breakpoint. */
4640 THREAD_STOPPED_BY (hw_breakpoint)
4641
4642 /* Save the thread's event and stop reason to process it later. */
4643
4644 static void
4645 save_waitstatus (struct thread_info *tp, const target_waitstatus *ws)
4646 {
4647 if (debug_infrun)
4648 {
4649 std::string statstr = target_waitstatus_to_string (ws);
4650
4651 fprintf_unfiltered (gdb_stdlog,
4652 "infrun: saving status %s for %d.%ld.%ld\n",
4653 statstr.c_str (),
4654 tp->ptid.pid (),
4655 tp->ptid.lwp (),
4656 tp->ptid.tid ());
4657 }
4658
4659 /* Record for later. */
4660 tp->suspend.waitstatus = *ws;
4661 tp->suspend.waitstatus_pending_p = 1;
4662
4663 struct regcache *regcache = get_thread_regcache (tp);
4664 const address_space *aspace = regcache->aspace ();
4665
4666 if (ws->kind == TARGET_WAITKIND_STOPPED
4667 && ws->value.sig == GDB_SIGNAL_TRAP)
4668 {
4669 CORE_ADDR pc = regcache_read_pc (regcache);
4670
4671 adjust_pc_after_break (tp, &tp->suspend.waitstatus);
4672
4673 if (thread_stopped_by_watchpoint (tp->ptid))
4674 {
4675 tp->suspend.stop_reason
4676 = TARGET_STOPPED_BY_WATCHPOINT;
4677 }
4678 else if (target_supports_stopped_by_sw_breakpoint ()
4679 && thread_stopped_by_sw_breakpoint (tp->ptid))
4680 {
4681 tp->suspend.stop_reason
4682 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4683 }
4684 else if (target_supports_stopped_by_hw_breakpoint ()
4685 && thread_stopped_by_hw_breakpoint (tp->ptid))
4686 {
4687 tp->suspend.stop_reason
4688 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4689 }
4690 else if (!target_supports_stopped_by_hw_breakpoint ()
4691 && hardware_breakpoint_inserted_here_p (aspace,
4692 pc))
4693 {
4694 tp->suspend.stop_reason
4695 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4696 }
4697 else if (!target_supports_stopped_by_sw_breakpoint ()
4698 && software_breakpoint_inserted_here_p (aspace,
4699 pc))
4700 {
4701 tp->suspend.stop_reason
4702 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4703 }
4704 else if (!thread_has_single_step_breakpoints_set (tp)
4705 && currently_stepping (tp))
4706 {
4707 tp->suspend.stop_reason
4708 = TARGET_STOPPED_BY_SINGLE_STEP;
4709 }
4710 }
4711 }
4712
4713 /* See infrun.h. */
4714
4715 void
4716 stop_all_threads (void)
4717 {
4718 /* We may need multiple passes to discover all threads. */
4719 int pass;
4720 int iterations = 0;
4721
4722 gdb_assert (exists_non_stop_target ());
4723
4724 if (debug_infrun)
4725 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads\n");
4726
4727 scoped_restore_current_thread restore_thread;
4728
4729 target_thread_events (1);
4730 SCOPE_EXIT { target_thread_events (0); };
4731
4732 /* Request threads to stop, and then wait for the stops. Because
4733 threads we already know about can spawn more threads while we're
4734 trying to stop them, and we only learn about new threads when we
4735 update the thread list, do this in a loop, and keep iterating
4736 until two passes find no threads that need to be stopped. */
4737 for (pass = 0; pass < 2; pass++, iterations++)
4738 {
4739 if (debug_infrun)
4740 fprintf_unfiltered (gdb_stdlog,
4741 "infrun: stop_all_threads, pass=%d, "
4742 "iterations=%d\n", pass, iterations);
4743 while (1)
4744 {
4745 int need_wait = 0;
4746
4747 update_thread_list ();
4748
4749 /* Go through all threads looking for threads that we need
4750 to tell the target to stop. */
4751 for (thread_info *t : all_non_exited_threads ())
4752 {
4753 /* For a single-target setting with an all-stop target,
4754 we would not even arrive here. For a multi-target
4755 setting, until GDB is able to handle a mixture of
4756 all-stop and non-stop targets, simply skip all-stop
4757 targets' threads. This should be fine due to the
4758 protection of 'check_multi_target_resumption'. */
4759
4760 switch_to_thread_no_regs (t);
4761 if (!target_is_non_stop_p ())
4762 continue;
4763
4764 if (t->executing)
4765 {
4766 /* If already stopping, don't request a stop again.
4767 We just haven't seen the notification yet. */
4768 if (!t->stop_requested)
4769 {
4770 if (debug_infrun)
4771 fprintf_unfiltered (gdb_stdlog,
4772 "infrun: %s executing, "
4773 "need stop\n",
4774 target_pid_to_str (t->ptid).c_str ());
4775 target_stop (t->ptid);
4776 t->stop_requested = 1;
4777 }
4778 else
4779 {
4780 if (debug_infrun)
4781 fprintf_unfiltered (gdb_stdlog,
4782 "infrun: %s executing, "
4783 "already stopping\n",
4784 target_pid_to_str (t->ptid).c_str ());
4785 }
4786
4787 if (t->stop_requested)
4788 need_wait = 1;
4789 }
4790 else
4791 {
4792 if (debug_infrun)
4793 fprintf_unfiltered (gdb_stdlog,
4794 "infrun: %s not executing\n",
4795 target_pid_to_str (t->ptid).c_str ());
4796
4797 /* The thread may be not executing, but still be
4798 resumed with a pending status to process. */
4799 t->resumed = false;
4800 }
4801 }
4802
4803 if (!need_wait)
4804 break;
4805
4806 /* If we find new threads on the second iteration, restart
4807 over. We want to see two iterations in a row with all
4808 threads stopped. */
4809 if (pass > 0)
4810 pass = -1;
4811
4812 wait_one_event event = wait_one ();
4813
4814 if (debug_infrun)
4815 {
4816 fprintf_unfiltered (gdb_stdlog,
4817 "infrun: stop_all_threads %s %s\n",
4818 target_waitstatus_to_string (&event.ws).c_str (),
4819 target_pid_to_str (event.ptid).c_str ());
4820 }
4821
4822 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED
4823 || event.ws.kind == TARGET_WAITKIND_THREAD_EXITED
4824 || event.ws.kind == TARGET_WAITKIND_EXITED
4825 || event.ws.kind == TARGET_WAITKIND_SIGNALLED)
4826 {
4827 /* All resumed threads exited
4828 or one thread/process exited/signalled. */
4829 }
4830 else
4831 {
4832 thread_info *t = find_thread_ptid (event.target, event.ptid);
4833 if (t == NULL)
4834 t = add_thread (event.target, event.ptid);
4835
4836 t->stop_requested = 0;
4837 t->executing = 0;
4838 t->resumed = false;
4839 t->control.may_range_step = 0;
4840
4841 /* This may be the first time we see the inferior report
4842 a stop. */
4843 inferior *inf = find_inferior_ptid (event.target, event.ptid);
4844 if (inf->needs_setup)
4845 {
4846 switch_to_thread_no_regs (t);
4847 setup_inferior (0);
4848 }
4849
4850 if (event.ws.kind == TARGET_WAITKIND_STOPPED
4851 && event.ws.value.sig == GDB_SIGNAL_0)
4852 {
4853 /* We caught the event that we intended to catch, so
4854 there's no event pending. */
4855 t->suspend.waitstatus.kind = TARGET_WAITKIND_IGNORE;
4856 t->suspend.waitstatus_pending_p = 0;
4857
4858 if (displaced_step_fixup (t, GDB_SIGNAL_0) < 0)
4859 {
4860 /* Add it back to the step-over queue. */
4861 if (debug_infrun)
4862 {
4863 fprintf_unfiltered (gdb_stdlog,
4864 "infrun: displaced-step of %s "
4865 "canceled: adding back to the "
4866 "step-over queue\n",
4867 target_pid_to_str (t->ptid).c_str ());
4868 }
4869 t->control.trap_expected = 0;
4870 thread_step_over_chain_enqueue (t);
4871 }
4872 }
4873 else
4874 {
4875 enum gdb_signal sig;
4876 struct regcache *regcache;
4877
4878 if (debug_infrun)
4879 {
4880 std::string statstr = target_waitstatus_to_string (&event.ws);
4881
4882 fprintf_unfiltered (gdb_stdlog,
4883 "infrun: target_wait %s, saving "
4884 "status for %d.%ld.%ld\n",
4885 statstr.c_str (),
4886 t->ptid.pid (),
4887 t->ptid.lwp (),
4888 t->ptid.tid ());
4889 }
4890
4891 /* Record for later. */
4892 save_waitstatus (t, &event.ws);
4893
4894 sig = (event.ws.kind == TARGET_WAITKIND_STOPPED
4895 ? event.ws.value.sig : GDB_SIGNAL_0);
4896
4897 if (displaced_step_fixup (t, sig) < 0)
4898 {
4899 /* Add it back to the step-over queue. */
4900 t->control.trap_expected = 0;
4901 thread_step_over_chain_enqueue (t);
4902 }
4903
4904 regcache = get_thread_regcache (t);
4905 t->suspend.stop_pc = regcache_read_pc (regcache);
4906
4907 if (debug_infrun)
4908 {
4909 fprintf_unfiltered (gdb_stdlog,
4910 "infrun: saved stop_pc=%s for %s "
4911 "(currently_stepping=%d)\n",
4912 paddress (target_gdbarch (),
4913 t->suspend.stop_pc),
4914 target_pid_to_str (t->ptid).c_str (),
4915 currently_stepping (t));
4916 }
4917 }
4918 }
4919 }
4920 }
4921
4922 if (debug_infrun)
4923 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads done\n");
4924 }
4925
4926 /* Handle a TARGET_WAITKIND_NO_RESUMED event. */
4927
4928 static int
4929 handle_no_resumed (struct execution_control_state *ecs)
4930 {
4931 if (target_can_async_p ())
4932 {
4933 struct ui *ui;
4934 int any_sync = 0;
4935
4936 ALL_UIS (ui)
4937 {
4938 if (ui->prompt_state == PROMPT_BLOCKED)
4939 {
4940 any_sync = 1;
4941 break;
4942 }
4943 }
4944 if (!any_sync)
4945 {
4946 /* There were no unwaited-for children left in the target, but,
4947 we're not synchronously waiting for events either. Just
4948 ignore. */
4949
4950 if (debug_infrun)
4951 fprintf_unfiltered (gdb_stdlog,
4952 "infrun: TARGET_WAITKIND_NO_RESUMED "
4953 "(ignoring: bg)\n");
4954 prepare_to_wait (ecs);
4955 return 1;
4956 }
4957 }
4958
4959 /* Otherwise, if we were running a synchronous execution command, we
4960 may need to cancel it and give the user back the terminal.
4961
4962 In non-stop mode, the target can't tell whether we've already
4963 consumed previous stop events, so it can end up sending us a
4964 no-resumed event like so:
4965
4966 #0 - thread 1 is left stopped
4967
4968 #1 - thread 2 is resumed and hits breakpoint
4969 -> TARGET_WAITKIND_STOPPED
4970
4971 #2 - thread 3 is resumed and exits
4972 this is the last resumed thread, so
4973 -> TARGET_WAITKIND_NO_RESUMED
4974
4975 #3 - gdb processes stop for thread 2 and decides to re-resume
4976 it.
4977
4978 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
4979 thread 2 is now resumed, so the event should be ignored.
4980
4981 IOW, if the stop for thread 2 doesn't end a foreground command,
4982 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
4983 event. But it could be that the event meant that thread 2 itself
4984 (or whatever other thread was the last resumed thread) exited.
4985
4986 To address this we refresh the thread list and check whether we
4987 have resumed threads _now_. In the example above, this removes
4988 thread 3 from the thread list. If thread 2 was re-resumed, we
4989 ignore this event. If we find no thread resumed, then we cancel
4990 the synchronous command show "no unwaited-for " to the user. */
4991 update_thread_list ();
4992
4993 for (thread_info *thread : all_non_exited_threads (ecs->target))
4994 {
4995 if (thread->executing
4996 || thread->suspend.waitstatus_pending_p)
4997 {
4998 /* There were no unwaited-for children left in the target at
4999 some point, but there are now. Just ignore. */
5000 if (debug_infrun)
5001 fprintf_unfiltered (gdb_stdlog,
5002 "infrun: TARGET_WAITKIND_NO_RESUMED "
5003 "(ignoring: found resumed)\n");
5004 prepare_to_wait (ecs);
5005 return 1;
5006 }
5007 }
5008
5009 /* Note however that we may find no resumed thread because the whole
5010 process exited meanwhile (thus updating the thread list results
5011 in an empty thread list). In this case we know we'll be getting
5012 a process exit event shortly. */
5013 for (inferior *inf : all_non_exited_inferiors (ecs->target))
5014 {
5015 thread_info *thread = any_live_thread_of_inferior (inf);
5016 if (thread == NULL)
5017 {
5018 if (debug_infrun)
5019 fprintf_unfiltered (gdb_stdlog,
5020 "infrun: TARGET_WAITKIND_NO_RESUMED "
5021 "(expect process exit)\n");
5022 prepare_to_wait (ecs);
5023 return 1;
5024 }
5025 }
5026
5027 /* Go ahead and report the event. */
5028 return 0;
5029 }
5030
5031 /* Given an execution control state that has been freshly filled in by
5032 an event from the inferior, figure out what it means and take
5033 appropriate action.
5034
5035 The alternatives are:
5036
5037 1) stop_waiting and return; to really stop and return to the
5038 debugger.
5039
5040 2) keep_going and return; to wait for the next event (set
5041 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5042 once). */
5043
5044 static void
5045 handle_inferior_event (struct execution_control_state *ecs)
5046 {
5047 /* Make sure that all temporary struct value objects that were
5048 created during the handling of the event get deleted at the
5049 end. */
5050 scoped_value_mark free_values;
5051
5052 enum stop_kind stop_soon;
5053
5054 if (debug_infrun)
5055 fprintf_unfiltered (gdb_stdlog, "infrun: handle_inferior_event %s\n",
5056 target_waitstatus_to_string (&ecs->ws).c_str ());
5057
5058 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
5059 {
5060 /* We had an event in the inferior, but we are not interested in
5061 handling it at this level. The lower layers have already
5062 done what needs to be done, if anything.
5063
5064 One of the possible circumstances for this is when the
5065 inferior produces output for the console. The inferior has
5066 not stopped, and we are ignoring the event. Another possible
5067 circumstance is any event which the lower level knows will be
5068 reported multiple times without an intervening resume. */
5069 prepare_to_wait (ecs);
5070 return;
5071 }
5072
5073 if (ecs->ws.kind == TARGET_WAITKIND_THREAD_EXITED)
5074 {
5075 prepare_to_wait (ecs);
5076 return;
5077 }
5078
5079 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
5080 && handle_no_resumed (ecs))
5081 return;
5082
5083 /* Cache the last target/ptid/waitstatus. */
5084 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
5085
5086 /* Always clear state belonging to the previous time we stopped. */
5087 stop_stack_dummy = STOP_NONE;
5088
5089 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
5090 {
5091 /* No unwaited-for children left. IOW, all resumed children
5092 have exited. */
5093 stop_print_frame = 0;
5094 stop_waiting (ecs);
5095 return;
5096 }
5097
5098 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
5099 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
5100 {
5101 ecs->event_thread = find_thread_ptid (ecs->target, ecs->ptid);
5102 /* If it's a new thread, add it to the thread database. */
5103 if (ecs->event_thread == NULL)
5104 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
5105
5106 /* Disable range stepping. If the next step request could use a
5107 range, this will be end up re-enabled then. */
5108 ecs->event_thread->control.may_range_step = 0;
5109 }
5110
5111 /* Dependent on valid ECS->EVENT_THREAD. */
5112 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
5113
5114 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5115 reinit_frame_cache ();
5116
5117 breakpoint_retire_moribund ();
5118
5119 /* First, distinguish signals caused by the debugger from signals
5120 that have to do with the program's own actions. Note that
5121 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5122 on the operating system version. Here we detect when a SIGILL or
5123 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5124 something similar for SIGSEGV, since a SIGSEGV will be generated
5125 when we're trying to execute a breakpoint instruction on a
5126 non-executable stack. This happens for call dummy breakpoints
5127 for architectures like SPARC that place call dummies on the
5128 stack. */
5129 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
5130 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
5131 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
5132 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
5133 {
5134 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
5135
5136 if (breakpoint_inserted_here_p (regcache->aspace (),
5137 regcache_read_pc (regcache)))
5138 {
5139 if (debug_infrun)
5140 fprintf_unfiltered (gdb_stdlog,
5141 "infrun: Treating signal as SIGTRAP\n");
5142 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
5143 }
5144 }
5145
5146 /* Mark the non-executing threads accordingly. In all-stop, all
5147 threads of all processes are stopped when we get any event
5148 reported. In non-stop mode, only the event thread stops. */
5149 {
5150 ptid_t mark_ptid;
5151
5152 if (!target_is_non_stop_p ())
5153 mark_ptid = minus_one_ptid;
5154 else if (ecs->ws.kind == TARGET_WAITKIND_SIGNALLED
5155 || ecs->ws.kind == TARGET_WAITKIND_EXITED)
5156 {
5157 /* If we're handling a process exit in non-stop mode, even
5158 though threads haven't been deleted yet, one would think
5159 that there is nothing to do, as threads of the dead process
5160 will be soon deleted, and threads of any other process were
5161 left running. However, on some targets, threads survive a
5162 process exit event. E.g., for the "checkpoint" command,
5163 when the current checkpoint/fork exits, linux-fork.c
5164 automatically switches to another fork from within
5165 target_mourn_inferior, by associating the same
5166 inferior/thread to another fork. We haven't mourned yet at
5167 this point, but we must mark any threads left in the
5168 process as not-executing so that finish_thread_state marks
5169 them stopped (in the user's perspective) if/when we present
5170 the stop to the user. */
5171 mark_ptid = ptid_t (ecs->ptid.pid ());
5172 }
5173 else
5174 mark_ptid = ecs->ptid;
5175
5176 set_executing (ecs->target, mark_ptid, false);
5177
5178 /* Likewise the resumed flag. */
5179 set_resumed (ecs->target, mark_ptid, false);
5180 }
5181
5182 switch (ecs->ws.kind)
5183 {
5184 case TARGET_WAITKIND_LOADED:
5185 context_switch (ecs);
5186 /* Ignore gracefully during startup of the inferior, as it might
5187 be the shell which has just loaded some objects, otherwise
5188 add the symbols for the newly loaded objects. Also ignore at
5189 the beginning of an attach or remote session; we will query
5190 the full list of libraries once the connection is
5191 established. */
5192
5193 stop_soon = get_inferior_stop_soon (ecs);
5194 if (stop_soon == NO_STOP_QUIETLY)
5195 {
5196 struct regcache *regcache;
5197
5198 regcache = get_thread_regcache (ecs->event_thread);
5199
5200 handle_solib_event ();
5201
5202 ecs->event_thread->control.stop_bpstat
5203 = bpstat_stop_status (regcache->aspace (),
5204 ecs->event_thread->suspend.stop_pc,
5205 ecs->event_thread, &ecs->ws);
5206
5207 if (handle_stop_requested (ecs))
5208 return;
5209
5210 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5211 {
5212 /* A catchpoint triggered. */
5213 process_event_stop_test (ecs);
5214 return;
5215 }
5216
5217 /* If requested, stop when the dynamic linker notifies
5218 gdb of events. This allows the user to get control
5219 and place breakpoints in initializer routines for
5220 dynamically loaded objects (among other things). */
5221 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5222 if (stop_on_solib_events)
5223 {
5224 /* Make sure we print "Stopped due to solib-event" in
5225 normal_stop. */
5226 stop_print_frame = 1;
5227
5228 stop_waiting (ecs);
5229 return;
5230 }
5231 }
5232
5233 /* If we are skipping through a shell, or through shared library
5234 loading that we aren't interested in, resume the program. If
5235 we're running the program normally, also resume. */
5236 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
5237 {
5238 /* Loading of shared libraries might have changed breakpoint
5239 addresses. Make sure new breakpoints are inserted. */
5240 if (stop_soon == NO_STOP_QUIETLY)
5241 insert_breakpoints ();
5242 resume (GDB_SIGNAL_0);
5243 prepare_to_wait (ecs);
5244 return;
5245 }
5246
5247 /* But stop if we're attaching or setting up a remote
5248 connection. */
5249 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5250 || stop_soon == STOP_QUIETLY_REMOTE)
5251 {
5252 if (debug_infrun)
5253 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
5254 stop_waiting (ecs);
5255 return;
5256 }
5257
5258 internal_error (__FILE__, __LINE__,
5259 _("unhandled stop_soon: %d"), (int) stop_soon);
5260
5261 case TARGET_WAITKIND_SPURIOUS:
5262 if (handle_stop_requested (ecs))
5263 return;
5264 context_switch (ecs);
5265 resume (GDB_SIGNAL_0);
5266 prepare_to_wait (ecs);
5267 return;
5268
5269 case TARGET_WAITKIND_THREAD_CREATED:
5270 if (handle_stop_requested (ecs))
5271 return;
5272 context_switch (ecs);
5273 if (!switch_back_to_stepped_thread (ecs))
5274 keep_going (ecs);
5275 return;
5276
5277 case TARGET_WAITKIND_EXITED:
5278 case TARGET_WAITKIND_SIGNALLED:
5279 inferior_ptid = ecs->ptid;
5280 set_current_inferior (find_inferior_ptid (ecs->target, ecs->ptid));
5281 set_current_program_space (current_inferior ()->pspace);
5282 handle_vfork_child_exec_or_exit (0);
5283 target_terminal::ours (); /* Must do this before mourn anyway. */
5284
5285 /* Clearing any previous state of convenience variables. */
5286 clear_exit_convenience_vars ();
5287
5288 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
5289 {
5290 /* Record the exit code in the convenience variable $_exitcode, so
5291 that the user can inspect this again later. */
5292 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5293 (LONGEST) ecs->ws.value.integer);
5294
5295 /* Also record this in the inferior itself. */
5296 current_inferior ()->has_exit_code = 1;
5297 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
5298
5299 /* Support the --return-child-result option. */
5300 return_child_result_value = ecs->ws.value.integer;
5301
5302 gdb::observers::exited.notify (ecs->ws.value.integer);
5303 }
5304 else
5305 {
5306 struct gdbarch *gdbarch = current_inferior ()->gdbarch;
5307
5308 if (gdbarch_gdb_signal_to_target_p (gdbarch))
5309 {
5310 /* Set the value of the internal variable $_exitsignal,
5311 which holds the signal uncaught by the inferior. */
5312 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5313 gdbarch_gdb_signal_to_target (gdbarch,
5314 ecs->ws.value.sig));
5315 }
5316 else
5317 {
5318 /* We don't have access to the target's method used for
5319 converting between signal numbers (GDB's internal
5320 representation <-> target's representation).
5321 Therefore, we cannot do a good job at displaying this
5322 information to the user. It's better to just warn
5323 her about it (if infrun debugging is enabled), and
5324 give up. */
5325 if (debug_infrun)
5326 fprintf_filtered (gdb_stdlog, _("\
5327 Cannot fill $_exitsignal with the correct signal number.\n"));
5328 }
5329
5330 gdb::observers::signal_exited.notify (ecs->ws.value.sig);
5331 }
5332
5333 gdb_flush (gdb_stdout);
5334 target_mourn_inferior (inferior_ptid);
5335 stop_print_frame = 0;
5336 stop_waiting (ecs);
5337 return;
5338
5339 case TARGET_WAITKIND_FORKED:
5340 case TARGET_WAITKIND_VFORKED:
5341 /* Check whether the inferior is displaced stepping. */
5342 {
5343 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
5344 struct gdbarch *gdbarch = regcache->arch ();
5345
5346 /* If checking displaced stepping is supported, and thread
5347 ecs->ptid is displaced stepping. */
5348 if (displaced_step_in_progress_thread (ecs->event_thread))
5349 {
5350 struct inferior *parent_inf
5351 = find_inferior_ptid (ecs->target, ecs->ptid);
5352 struct regcache *child_regcache;
5353 CORE_ADDR parent_pc;
5354
5355 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
5356 {
5357 struct displaced_step_inferior_state *displaced
5358 = get_displaced_stepping_state (parent_inf);
5359
5360 /* Restore scratch pad for child process. */
5361 displaced_step_restore (displaced, ecs->ws.value.related_pid);
5362 }
5363
5364 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5365 indicating that the displaced stepping of syscall instruction
5366 has been done. Perform cleanup for parent process here. Note
5367 that this operation also cleans up the child process for vfork,
5368 because their pages are shared. */
5369 displaced_step_fixup (ecs->event_thread, GDB_SIGNAL_TRAP);
5370 /* Start a new step-over in another thread if there's one
5371 that needs it. */
5372 start_step_over ();
5373
5374 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5375 the child's PC is also within the scratchpad. Set the child's PC
5376 to the parent's PC value, which has already been fixed up.
5377 FIXME: we use the parent's aspace here, although we're touching
5378 the child, because the child hasn't been added to the inferior
5379 list yet at this point. */
5380
5381 child_regcache
5382 = get_thread_arch_aspace_regcache (parent_inf->process_target (),
5383 ecs->ws.value.related_pid,
5384 gdbarch,
5385 parent_inf->aspace);
5386 /* Read PC value of parent process. */
5387 parent_pc = regcache_read_pc (regcache);
5388
5389 if (debug_displaced)
5390 fprintf_unfiltered (gdb_stdlog,
5391 "displaced: write child pc from %s to %s\n",
5392 paddress (gdbarch,
5393 regcache_read_pc (child_regcache)),
5394 paddress (gdbarch, parent_pc));
5395
5396 regcache_write_pc (child_regcache, parent_pc);
5397 }
5398 }
5399
5400 context_switch (ecs);
5401
5402 /* Immediately detach breakpoints from the child before there's
5403 any chance of letting the user delete breakpoints from the
5404 breakpoint lists. If we don't do this early, it's easy to
5405 leave left over traps in the child, vis: "break foo; catch
5406 fork; c; <fork>; del; c; <child calls foo>". We only follow
5407 the fork on the last `continue', and by that time the
5408 breakpoint at "foo" is long gone from the breakpoint table.
5409 If we vforked, then we don't need to unpatch here, since both
5410 parent and child are sharing the same memory pages; we'll
5411 need to unpatch at follow/detach time instead to be certain
5412 that new breakpoints added between catchpoint hit time and
5413 vfork follow are detached. */
5414 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
5415 {
5416 /* This won't actually modify the breakpoint list, but will
5417 physically remove the breakpoints from the child. */
5418 detach_breakpoints (ecs->ws.value.related_pid);
5419 }
5420
5421 delete_just_stopped_threads_single_step_breakpoints ();
5422
5423 /* In case the event is caught by a catchpoint, remember that
5424 the event is to be followed at the next resume of the thread,
5425 and not immediately. */
5426 ecs->event_thread->pending_follow = ecs->ws;
5427
5428 ecs->event_thread->suspend.stop_pc
5429 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
5430
5431 ecs->event_thread->control.stop_bpstat
5432 = bpstat_stop_status (get_current_regcache ()->aspace (),
5433 ecs->event_thread->suspend.stop_pc,
5434 ecs->event_thread, &ecs->ws);
5435
5436 if (handle_stop_requested (ecs))
5437 return;
5438
5439 /* If no catchpoint triggered for this, then keep going. Note
5440 that we're interested in knowing the bpstat actually causes a
5441 stop, not just if it may explain the signal. Software
5442 watchpoints, for example, always appear in the bpstat. */
5443 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5444 {
5445 bool follow_child
5446 = (follow_fork_mode_string == follow_fork_mode_child);
5447
5448 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5449
5450 process_stratum_target *targ
5451 = ecs->event_thread->inf->process_target ();
5452
5453 bool should_resume = follow_fork ();
5454
5455 /* Note that one of these may be an invalid pointer,
5456 depending on detach_fork. */
5457 thread_info *parent = ecs->event_thread;
5458 thread_info *child
5459 = find_thread_ptid (targ, ecs->ws.value.related_pid);
5460
5461 /* At this point, the parent is marked running, and the
5462 child is marked stopped. */
5463
5464 /* If not resuming the parent, mark it stopped. */
5465 if (follow_child && !detach_fork && !non_stop && !sched_multi)
5466 parent->set_running (false);
5467
5468 /* If resuming the child, mark it running. */
5469 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
5470 child->set_running (true);
5471
5472 /* In non-stop mode, also resume the other branch. */
5473 if (!detach_fork && (non_stop
5474 || (sched_multi && target_is_non_stop_p ())))
5475 {
5476 if (follow_child)
5477 switch_to_thread (parent);
5478 else
5479 switch_to_thread (child);
5480
5481 ecs->event_thread = inferior_thread ();
5482 ecs->ptid = inferior_ptid;
5483 keep_going (ecs);
5484 }
5485
5486 if (follow_child)
5487 switch_to_thread (child);
5488 else
5489 switch_to_thread (parent);
5490
5491 ecs->event_thread = inferior_thread ();
5492 ecs->ptid = inferior_ptid;
5493
5494 if (should_resume)
5495 keep_going (ecs);
5496 else
5497 stop_waiting (ecs);
5498 return;
5499 }
5500 process_event_stop_test (ecs);
5501 return;
5502
5503 case TARGET_WAITKIND_VFORK_DONE:
5504 /* Done with the shared memory region. Re-insert breakpoints in
5505 the parent, and keep going. */
5506
5507 context_switch (ecs);
5508
5509 current_inferior ()->waiting_for_vfork_done = 0;
5510 current_inferior ()->pspace->breakpoints_not_allowed = 0;
5511
5512 if (handle_stop_requested (ecs))
5513 return;
5514
5515 /* This also takes care of reinserting breakpoints in the
5516 previously locked inferior. */
5517 keep_going (ecs);
5518 return;
5519
5520 case TARGET_WAITKIND_EXECD:
5521
5522 /* Note we can't read registers yet (the stop_pc), because we
5523 don't yet know the inferior's post-exec architecture.
5524 'stop_pc' is explicitly read below instead. */
5525 switch_to_thread_no_regs (ecs->event_thread);
5526
5527 /* Do whatever is necessary to the parent branch of the vfork. */
5528 handle_vfork_child_exec_or_exit (1);
5529
5530 /* This causes the eventpoints and symbol table to be reset.
5531 Must do this now, before trying to determine whether to
5532 stop. */
5533 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
5534
5535 /* In follow_exec we may have deleted the original thread and
5536 created a new one. Make sure that the event thread is the
5537 execd thread for that case (this is a nop otherwise). */
5538 ecs->event_thread = inferior_thread ();
5539
5540 ecs->event_thread->suspend.stop_pc
5541 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
5542
5543 ecs->event_thread->control.stop_bpstat
5544 = bpstat_stop_status (get_current_regcache ()->aspace (),
5545 ecs->event_thread->suspend.stop_pc,
5546 ecs->event_thread, &ecs->ws);
5547
5548 /* Note that this may be referenced from inside
5549 bpstat_stop_status above, through inferior_has_execd. */
5550 xfree (ecs->ws.value.execd_pathname);
5551 ecs->ws.value.execd_pathname = NULL;
5552
5553 if (handle_stop_requested (ecs))
5554 return;
5555
5556 /* If no catchpoint triggered for this, then keep going. */
5557 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5558 {
5559 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5560 keep_going (ecs);
5561 return;
5562 }
5563 process_event_stop_test (ecs);
5564 return;
5565
5566 /* Be careful not to try to gather much state about a thread
5567 that's in a syscall. It's frequently a losing proposition. */
5568 case TARGET_WAITKIND_SYSCALL_ENTRY:
5569 /* Getting the current syscall number. */
5570 if (handle_syscall_event (ecs) == 0)
5571 process_event_stop_test (ecs);
5572 return;
5573
5574 /* Before examining the threads further, step this thread to
5575 get it entirely out of the syscall. (We get notice of the
5576 event when the thread is just on the verge of exiting a
5577 syscall. Stepping one instruction seems to get it back
5578 into user code.) */
5579 case TARGET_WAITKIND_SYSCALL_RETURN:
5580 if (handle_syscall_event (ecs) == 0)
5581 process_event_stop_test (ecs);
5582 return;
5583
5584 case TARGET_WAITKIND_STOPPED:
5585 handle_signal_stop (ecs);
5586 return;
5587
5588 case TARGET_WAITKIND_NO_HISTORY:
5589 /* Reverse execution: target ran out of history info. */
5590
5591 /* Switch to the stopped thread. */
5592 context_switch (ecs);
5593 if (debug_infrun)
5594 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
5595
5596 delete_just_stopped_threads_single_step_breakpoints ();
5597 ecs->event_thread->suspend.stop_pc
5598 = regcache_read_pc (get_thread_regcache (inferior_thread ()));
5599
5600 if (handle_stop_requested (ecs))
5601 return;
5602
5603 gdb::observers::no_history.notify ();
5604 stop_waiting (ecs);
5605 return;
5606 }
5607 }
5608
5609 /* Restart threads back to what they were trying to do back when we
5610 paused them for an in-line step-over. The EVENT_THREAD thread is
5611 ignored. */
5612
5613 static void
5614 restart_threads (struct thread_info *event_thread)
5615 {
5616 /* In case the instruction just stepped spawned a new thread. */
5617 update_thread_list ();
5618
5619 for (thread_info *tp : all_non_exited_threads ())
5620 {
5621 switch_to_thread_no_regs (tp);
5622
5623 if (tp == event_thread)
5624 {
5625 if (debug_infrun)
5626 fprintf_unfiltered (gdb_stdlog,
5627 "infrun: restart threads: "
5628 "[%s] is event thread\n",
5629 target_pid_to_str (tp->ptid).c_str ());
5630 continue;
5631 }
5632
5633 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5634 {
5635 if (debug_infrun)
5636 fprintf_unfiltered (gdb_stdlog,
5637 "infrun: restart threads: "
5638 "[%s] not meant to be running\n",
5639 target_pid_to_str (tp->ptid).c_str ());
5640 continue;
5641 }
5642
5643 if (tp->resumed)
5644 {
5645 if (debug_infrun)
5646 fprintf_unfiltered (gdb_stdlog,
5647 "infrun: restart threads: [%s] resumed\n",
5648 target_pid_to_str (tp->ptid).c_str ());
5649 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
5650 continue;
5651 }
5652
5653 if (thread_is_in_step_over_chain (tp))
5654 {
5655 if (debug_infrun)
5656 fprintf_unfiltered (gdb_stdlog,
5657 "infrun: restart threads: "
5658 "[%s] needs step-over\n",
5659 target_pid_to_str (tp->ptid).c_str ());
5660 gdb_assert (!tp->resumed);
5661 continue;
5662 }
5663
5664
5665 if (tp->suspend.waitstatus_pending_p)
5666 {
5667 if (debug_infrun)
5668 fprintf_unfiltered (gdb_stdlog,
5669 "infrun: restart threads: "
5670 "[%s] has pending status\n",
5671 target_pid_to_str (tp->ptid).c_str ());
5672 tp->resumed = true;
5673 continue;
5674 }
5675
5676 gdb_assert (!tp->stop_requested);
5677
5678 /* If some thread needs to start a step-over at this point, it
5679 should still be in the step-over queue, and thus skipped
5680 above. */
5681 if (thread_still_needs_step_over (tp))
5682 {
5683 internal_error (__FILE__, __LINE__,
5684 "thread [%s] needs a step-over, but not in "
5685 "step-over queue\n",
5686 target_pid_to_str (tp->ptid).c_str ());
5687 }
5688
5689 if (currently_stepping (tp))
5690 {
5691 if (debug_infrun)
5692 fprintf_unfiltered (gdb_stdlog,
5693 "infrun: restart threads: [%s] was stepping\n",
5694 target_pid_to_str (tp->ptid).c_str ());
5695 keep_going_stepped_thread (tp);
5696 }
5697 else
5698 {
5699 struct execution_control_state ecss;
5700 struct execution_control_state *ecs = &ecss;
5701
5702 if (debug_infrun)
5703 fprintf_unfiltered (gdb_stdlog,
5704 "infrun: restart threads: [%s] continuing\n",
5705 target_pid_to_str (tp->ptid).c_str ());
5706 reset_ecs (ecs, tp);
5707 switch_to_thread (tp);
5708 keep_going_pass_signal (ecs);
5709 }
5710 }
5711 }
5712
5713 /* Callback for iterate_over_threads. Find a resumed thread that has
5714 a pending waitstatus. */
5715
5716 static int
5717 resumed_thread_with_pending_status (struct thread_info *tp,
5718 void *arg)
5719 {
5720 return (tp->resumed
5721 && tp->suspend.waitstatus_pending_p);
5722 }
5723
5724 /* Called when we get an event that may finish an in-line or
5725 out-of-line (displaced stepping) step-over started previously.
5726 Return true if the event is processed and we should go back to the
5727 event loop; false if the caller should continue processing the
5728 event. */
5729
5730 static int
5731 finish_step_over (struct execution_control_state *ecs)
5732 {
5733 int had_step_over_info;
5734
5735 displaced_step_fixup (ecs->event_thread,
5736 ecs->event_thread->suspend.stop_signal);
5737
5738 had_step_over_info = step_over_info_valid_p ();
5739
5740 if (had_step_over_info)
5741 {
5742 /* If we're stepping over a breakpoint with all threads locked,
5743 then only the thread that was stepped should be reporting
5744 back an event. */
5745 gdb_assert (ecs->event_thread->control.trap_expected);
5746
5747 clear_step_over_info ();
5748 }
5749
5750 if (!target_is_non_stop_p ())
5751 return 0;
5752
5753 /* Start a new step-over in another thread if there's one that
5754 needs it. */
5755 start_step_over ();
5756
5757 /* If we were stepping over a breakpoint before, and haven't started
5758 a new in-line step-over sequence, then restart all other threads
5759 (except the event thread). We can't do this in all-stop, as then
5760 e.g., we wouldn't be able to issue any other remote packet until
5761 these other threads stop. */
5762 if (had_step_over_info && !step_over_info_valid_p ())
5763 {
5764 struct thread_info *pending;
5765
5766 /* If we only have threads with pending statuses, the restart
5767 below won't restart any thread and so nothing re-inserts the
5768 breakpoint we just stepped over. But we need it inserted
5769 when we later process the pending events, otherwise if
5770 another thread has a pending event for this breakpoint too,
5771 we'd discard its event (because the breakpoint that
5772 originally caused the event was no longer inserted). */
5773 context_switch (ecs);
5774 insert_breakpoints ();
5775
5776 restart_threads (ecs->event_thread);
5777
5778 /* If we have events pending, go through handle_inferior_event
5779 again, picking up a pending event at random. This avoids
5780 thread starvation. */
5781
5782 /* But not if we just stepped over a watchpoint in order to let
5783 the instruction execute so we can evaluate its expression.
5784 The set of watchpoints that triggered is recorded in the
5785 breakpoint objects themselves (see bp->watchpoint_triggered).
5786 If we processed another event first, that other event could
5787 clobber this info. */
5788 if (ecs->event_thread->stepping_over_watchpoint)
5789 return 0;
5790
5791 pending = iterate_over_threads (resumed_thread_with_pending_status,
5792 NULL);
5793 if (pending != NULL)
5794 {
5795 struct thread_info *tp = ecs->event_thread;
5796 struct regcache *regcache;
5797
5798 if (debug_infrun)
5799 {
5800 fprintf_unfiltered (gdb_stdlog,
5801 "infrun: found resumed threads with "
5802 "pending events, saving status\n");
5803 }
5804
5805 gdb_assert (pending != tp);
5806
5807 /* Record the event thread's event for later. */
5808 save_waitstatus (tp, &ecs->ws);
5809 /* This was cleared early, by handle_inferior_event. Set it
5810 so this pending event is considered by
5811 do_target_wait. */
5812 tp->resumed = true;
5813
5814 gdb_assert (!tp->executing);
5815
5816 regcache = get_thread_regcache (tp);
5817 tp->suspend.stop_pc = regcache_read_pc (regcache);
5818
5819 if (debug_infrun)
5820 {
5821 fprintf_unfiltered (gdb_stdlog,
5822 "infrun: saved stop_pc=%s for %s "
5823 "(currently_stepping=%d)\n",
5824 paddress (target_gdbarch (),
5825 tp->suspend.stop_pc),
5826 target_pid_to_str (tp->ptid).c_str (),
5827 currently_stepping (tp));
5828 }
5829
5830 /* This in-line step-over finished; clear this so we won't
5831 start a new one. This is what handle_signal_stop would
5832 do, if we returned false. */
5833 tp->stepping_over_breakpoint = 0;
5834
5835 /* Wake up the event loop again. */
5836 mark_async_event_handler (infrun_async_inferior_event_token);
5837
5838 prepare_to_wait (ecs);
5839 return 1;
5840 }
5841 }
5842
5843 return 0;
5844 }
5845
5846 /* Come here when the program has stopped with a signal. */
5847
5848 static void
5849 handle_signal_stop (struct execution_control_state *ecs)
5850 {
5851 struct frame_info *frame;
5852 struct gdbarch *gdbarch;
5853 int stopped_by_watchpoint;
5854 enum stop_kind stop_soon;
5855 int random_signal;
5856
5857 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
5858
5859 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
5860
5861 /* Do we need to clean up the state of a thread that has
5862 completed a displaced single-step? (Doing so usually affects
5863 the PC, so do it here, before we set stop_pc.) */
5864 if (finish_step_over (ecs))
5865 return;
5866
5867 /* If we either finished a single-step or hit a breakpoint, but
5868 the user wanted this thread to be stopped, pretend we got a
5869 SIG0 (generic unsignaled stop). */
5870 if (ecs->event_thread->stop_requested
5871 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
5872 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5873
5874 ecs->event_thread->suspend.stop_pc
5875 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
5876
5877 if (debug_infrun)
5878 {
5879 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
5880 struct gdbarch *reg_gdbarch = regcache->arch ();
5881
5882 switch_to_thread (ecs->event_thread);
5883
5884 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
5885 paddress (reg_gdbarch,
5886 ecs->event_thread->suspend.stop_pc));
5887 if (target_stopped_by_watchpoint ())
5888 {
5889 CORE_ADDR addr;
5890
5891 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
5892
5893 if (target_stopped_data_address (current_top_target (), &addr))
5894 fprintf_unfiltered (gdb_stdlog,
5895 "infrun: stopped data address = %s\n",
5896 paddress (reg_gdbarch, addr));
5897 else
5898 fprintf_unfiltered (gdb_stdlog,
5899 "infrun: (no data address available)\n");
5900 }
5901 }
5902
5903 /* This is originated from start_remote(), start_inferior() and
5904 shared libraries hook functions. */
5905 stop_soon = get_inferior_stop_soon (ecs);
5906 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
5907 {
5908 context_switch (ecs);
5909 if (debug_infrun)
5910 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
5911 stop_print_frame = 1;
5912 stop_waiting (ecs);
5913 return;
5914 }
5915
5916 /* This originates from attach_command(). We need to overwrite
5917 the stop_signal here, because some kernels don't ignore a
5918 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
5919 See more comments in inferior.h. On the other hand, if we
5920 get a non-SIGSTOP, report it to the user - assume the backend
5921 will handle the SIGSTOP if it should show up later.
5922
5923 Also consider that the attach is complete when we see a
5924 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
5925 target extended-remote report it instead of a SIGSTOP
5926 (e.g. gdbserver). We already rely on SIGTRAP being our
5927 signal, so this is no exception.
5928
5929 Also consider that the attach is complete when we see a
5930 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
5931 the target to stop all threads of the inferior, in case the
5932 low level attach operation doesn't stop them implicitly. If
5933 they weren't stopped implicitly, then the stub will report a
5934 GDB_SIGNAL_0, meaning: stopped for no particular reason
5935 other than GDB's request. */
5936 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5937 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
5938 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5939 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
5940 {
5941 stop_print_frame = 1;
5942 stop_waiting (ecs);
5943 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5944 return;
5945 }
5946
5947 /* See if something interesting happened to the non-current thread. If
5948 so, then switch to that thread. */
5949 if (ecs->ptid != inferior_ptid)
5950 {
5951 if (debug_infrun)
5952 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
5953
5954 context_switch (ecs);
5955
5956 if (deprecated_context_hook)
5957 deprecated_context_hook (ecs->event_thread->global_num);
5958 }
5959
5960 /* At this point, get hold of the now-current thread's frame. */
5961 frame = get_current_frame ();
5962 gdbarch = get_frame_arch (frame);
5963
5964 /* Pull the single step breakpoints out of the target. */
5965 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
5966 {
5967 struct regcache *regcache;
5968 CORE_ADDR pc;
5969
5970 regcache = get_thread_regcache (ecs->event_thread);
5971 const address_space *aspace = regcache->aspace ();
5972
5973 pc = regcache_read_pc (regcache);
5974
5975 /* However, before doing so, if this single-step breakpoint was
5976 actually for another thread, set this thread up for moving
5977 past it. */
5978 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
5979 aspace, pc))
5980 {
5981 if (single_step_breakpoint_inserted_here_p (aspace, pc))
5982 {
5983 if (debug_infrun)
5984 {
5985 fprintf_unfiltered (gdb_stdlog,
5986 "infrun: [%s] hit another thread's "
5987 "single-step breakpoint\n",
5988 target_pid_to_str (ecs->ptid).c_str ());
5989 }
5990 ecs->hit_singlestep_breakpoint = 1;
5991 }
5992 }
5993 else
5994 {
5995 if (debug_infrun)
5996 {
5997 fprintf_unfiltered (gdb_stdlog,
5998 "infrun: [%s] hit its "
5999 "single-step breakpoint\n",
6000 target_pid_to_str (ecs->ptid).c_str ());
6001 }
6002 }
6003 }
6004 delete_just_stopped_threads_single_step_breakpoints ();
6005
6006 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6007 && ecs->event_thread->control.trap_expected
6008 && ecs->event_thread->stepping_over_watchpoint)
6009 stopped_by_watchpoint = 0;
6010 else
6011 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
6012
6013 /* If necessary, step over this watchpoint. We'll be back to display
6014 it in a moment. */
6015 if (stopped_by_watchpoint
6016 && (target_have_steppable_watchpoint
6017 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
6018 {
6019 /* At this point, we are stopped at an instruction which has
6020 attempted to write to a piece of memory under control of
6021 a watchpoint. The instruction hasn't actually executed
6022 yet. If we were to evaluate the watchpoint expression
6023 now, we would get the old value, and therefore no change
6024 would seem to have occurred.
6025
6026 In order to make watchpoints work `right', we really need
6027 to complete the memory write, and then evaluate the
6028 watchpoint expression. We do this by single-stepping the
6029 target.
6030
6031 It may not be necessary to disable the watchpoint to step over
6032 it. For example, the PA can (with some kernel cooperation)
6033 single step over a watchpoint without disabling the watchpoint.
6034
6035 It is far more common to need to disable a watchpoint to step
6036 the inferior over it. If we have non-steppable watchpoints,
6037 we must disable the current watchpoint; it's simplest to
6038 disable all watchpoints.
6039
6040 Any breakpoint at PC must also be stepped over -- if there's
6041 one, it will have already triggered before the watchpoint
6042 triggered, and we either already reported it to the user, or
6043 it didn't cause a stop and we called keep_going. In either
6044 case, if there was a breakpoint at PC, we must be trying to
6045 step past it. */
6046 ecs->event_thread->stepping_over_watchpoint = 1;
6047 keep_going (ecs);
6048 return;
6049 }
6050
6051 ecs->event_thread->stepping_over_breakpoint = 0;
6052 ecs->event_thread->stepping_over_watchpoint = 0;
6053 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
6054 ecs->event_thread->control.stop_step = 0;
6055 stop_print_frame = 1;
6056 stopped_by_random_signal = 0;
6057 bpstat stop_chain = NULL;
6058
6059 /* Hide inlined functions starting here, unless we just performed stepi or
6060 nexti. After stepi and nexti, always show the innermost frame (not any
6061 inline function call sites). */
6062 if (ecs->event_thread->control.step_range_end != 1)
6063 {
6064 const address_space *aspace
6065 = get_thread_regcache (ecs->event_thread)->aspace ();
6066
6067 /* skip_inline_frames is expensive, so we avoid it if we can
6068 determine that the address is one where functions cannot have
6069 been inlined. This improves performance with inferiors that
6070 load a lot of shared libraries, because the solib event
6071 breakpoint is defined as the address of a function (i.e. not
6072 inline). Note that we have to check the previous PC as well
6073 as the current one to catch cases when we have just
6074 single-stepped off a breakpoint prior to reinstating it.
6075 Note that we're assuming that the code we single-step to is
6076 not inline, but that's not definitive: there's nothing
6077 preventing the event breakpoint function from containing
6078 inlined code, and the single-step ending up there. If the
6079 user had set a breakpoint on that inlined code, the missing
6080 skip_inline_frames call would break things. Fortunately
6081 that's an extremely unlikely scenario. */
6082 if (!pc_at_non_inline_function (aspace,
6083 ecs->event_thread->suspend.stop_pc,
6084 &ecs->ws)
6085 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6086 && ecs->event_thread->control.trap_expected
6087 && pc_at_non_inline_function (aspace,
6088 ecs->event_thread->prev_pc,
6089 &ecs->ws)))
6090 {
6091 stop_chain = build_bpstat_chain (aspace,
6092 ecs->event_thread->suspend.stop_pc,
6093 &ecs->ws);
6094 skip_inline_frames (ecs->event_thread, stop_chain);
6095
6096 /* Re-fetch current thread's frame in case that invalidated
6097 the frame cache. */
6098 frame = get_current_frame ();
6099 gdbarch = get_frame_arch (frame);
6100 }
6101 }
6102
6103 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6104 && ecs->event_thread->control.trap_expected
6105 && gdbarch_single_step_through_delay_p (gdbarch)
6106 && currently_stepping (ecs->event_thread))
6107 {
6108 /* We're trying to step off a breakpoint. Turns out that we're
6109 also on an instruction that needs to be stepped multiple
6110 times before it's been fully executing. E.g., architectures
6111 with a delay slot. It needs to be stepped twice, once for
6112 the instruction and once for the delay slot. */
6113 int step_through_delay
6114 = gdbarch_single_step_through_delay (gdbarch, frame);
6115
6116 if (debug_infrun && step_through_delay)
6117 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
6118 if (ecs->event_thread->control.step_range_end == 0
6119 && step_through_delay)
6120 {
6121 /* The user issued a continue when stopped at a breakpoint.
6122 Set up for another trap and get out of here. */
6123 ecs->event_thread->stepping_over_breakpoint = 1;
6124 keep_going (ecs);
6125 return;
6126 }
6127 else if (step_through_delay)
6128 {
6129 /* The user issued a step when stopped at a breakpoint.
6130 Maybe we should stop, maybe we should not - the delay
6131 slot *might* correspond to a line of source. In any
6132 case, don't decide that here, just set
6133 ecs->stepping_over_breakpoint, making sure we
6134 single-step again before breakpoints are re-inserted. */
6135 ecs->event_thread->stepping_over_breakpoint = 1;
6136 }
6137 }
6138
6139 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6140 handles this event. */
6141 ecs->event_thread->control.stop_bpstat
6142 = bpstat_stop_status (get_current_regcache ()->aspace (),
6143 ecs->event_thread->suspend.stop_pc,
6144 ecs->event_thread, &ecs->ws, stop_chain);
6145
6146 /* Following in case break condition called a
6147 function. */
6148 stop_print_frame = 1;
6149
6150 /* This is where we handle "moribund" watchpoints. Unlike
6151 software breakpoints traps, hardware watchpoint traps are
6152 always distinguishable from random traps. If no high-level
6153 watchpoint is associated with the reported stop data address
6154 anymore, then the bpstat does not explain the signal ---
6155 simply make sure to ignore it if `stopped_by_watchpoint' is
6156 set. */
6157
6158 if (debug_infrun
6159 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6160 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6161 GDB_SIGNAL_TRAP)
6162 && stopped_by_watchpoint)
6163 fprintf_unfiltered (gdb_stdlog,
6164 "infrun: no user watchpoint explains "
6165 "watchpoint SIGTRAP, ignoring\n");
6166
6167 /* NOTE: cagney/2003-03-29: These checks for a random signal
6168 at one stage in the past included checks for an inferior
6169 function call's call dummy's return breakpoint. The original
6170 comment, that went with the test, read:
6171
6172 ``End of a stack dummy. Some systems (e.g. Sony news) give
6173 another signal besides SIGTRAP, so check here as well as
6174 above.''
6175
6176 If someone ever tries to get call dummys on a
6177 non-executable stack to work (where the target would stop
6178 with something like a SIGSEGV), then those tests might need
6179 to be re-instated. Given, however, that the tests were only
6180 enabled when momentary breakpoints were not being used, I
6181 suspect that it won't be the case.
6182
6183 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6184 be necessary for call dummies on a non-executable stack on
6185 SPARC. */
6186
6187 /* See if the breakpoints module can explain the signal. */
6188 random_signal
6189 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6190 ecs->event_thread->suspend.stop_signal);
6191
6192 /* Maybe this was a trap for a software breakpoint that has since
6193 been removed. */
6194 if (random_signal && target_stopped_by_sw_breakpoint ())
6195 {
6196 if (gdbarch_program_breakpoint_here_p (gdbarch,
6197 ecs->event_thread->suspend.stop_pc))
6198 {
6199 struct regcache *regcache;
6200 int decr_pc;
6201
6202 /* Re-adjust PC to what the program would see if GDB was not
6203 debugging it. */
6204 regcache = get_thread_regcache (ecs->event_thread);
6205 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
6206 if (decr_pc != 0)
6207 {
6208 gdb::optional<scoped_restore_tmpl<int>>
6209 restore_operation_disable;
6210
6211 if (record_full_is_used ())
6212 restore_operation_disable.emplace
6213 (record_full_gdb_operation_disable_set ());
6214
6215 regcache_write_pc (regcache,
6216 ecs->event_thread->suspend.stop_pc + decr_pc);
6217 }
6218 }
6219 else
6220 {
6221 /* A delayed software breakpoint event. Ignore the trap. */
6222 if (debug_infrun)
6223 fprintf_unfiltered (gdb_stdlog,
6224 "infrun: delayed software breakpoint "
6225 "trap, ignoring\n");
6226 random_signal = 0;
6227 }
6228 }
6229
6230 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6231 has since been removed. */
6232 if (random_signal && target_stopped_by_hw_breakpoint ())
6233 {
6234 /* A delayed hardware breakpoint event. Ignore the trap. */
6235 if (debug_infrun)
6236 fprintf_unfiltered (gdb_stdlog,
6237 "infrun: delayed hardware breakpoint/watchpoint "
6238 "trap, ignoring\n");
6239 random_signal = 0;
6240 }
6241
6242 /* If not, perhaps stepping/nexting can. */
6243 if (random_signal)
6244 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6245 && currently_stepping (ecs->event_thread));
6246
6247 /* Perhaps the thread hit a single-step breakpoint of _another_
6248 thread. Single-step breakpoints are transparent to the
6249 breakpoints module. */
6250 if (random_signal)
6251 random_signal = !ecs->hit_singlestep_breakpoint;
6252
6253 /* No? Perhaps we got a moribund watchpoint. */
6254 if (random_signal)
6255 random_signal = !stopped_by_watchpoint;
6256
6257 /* Always stop if the user explicitly requested this thread to
6258 remain stopped. */
6259 if (ecs->event_thread->stop_requested)
6260 {
6261 random_signal = 1;
6262 if (debug_infrun)
6263 fprintf_unfiltered (gdb_stdlog, "infrun: user-requested stop\n");
6264 }
6265
6266 /* For the program's own signals, act according to
6267 the signal handling tables. */
6268
6269 if (random_signal)
6270 {
6271 /* Signal not for debugging purposes. */
6272 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
6273 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
6274
6275 if (debug_infrun)
6276 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
6277 gdb_signal_to_symbol_string (stop_signal));
6278
6279 stopped_by_random_signal = 1;
6280
6281 /* Always stop on signals if we're either just gaining control
6282 of the program, or the user explicitly requested this thread
6283 to remain stopped. */
6284 if (stop_soon != NO_STOP_QUIETLY
6285 || ecs->event_thread->stop_requested
6286 || (!inf->detaching
6287 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
6288 {
6289 stop_waiting (ecs);
6290 return;
6291 }
6292
6293 /* Notify observers the signal has "handle print" set. Note we
6294 returned early above if stopping; normal_stop handles the
6295 printing in that case. */
6296 if (signal_print[ecs->event_thread->suspend.stop_signal])
6297 {
6298 /* The signal table tells us to print about this signal. */
6299 target_terminal::ours_for_output ();
6300 gdb::observers::signal_received.notify (ecs->event_thread->suspend.stop_signal);
6301 target_terminal::inferior ();
6302 }
6303
6304 /* Clear the signal if it should not be passed. */
6305 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
6306 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6307
6308 if (ecs->event_thread->prev_pc == ecs->event_thread->suspend.stop_pc
6309 && ecs->event_thread->control.trap_expected
6310 && ecs->event_thread->control.step_resume_breakpoint == NULL)
6311 {
6312 /* We were just starting a new sequence, attempting to
6313 single-step off of a breakpoint and expecting a SIGTRAP.
6314 Instead this signal arrives. This signal will take us out
6315 of the stepping range so GDB needs to remember to, when
6316 the signal handler returns, resume stepping off that
6317 breakpoint. */
6318 /* To simplify things, "continue" is forced to use the same
6319 code paths as single-step - set a breakpoint at the
6320 signal return address and then, once hit, step off that
6321 breakpoint. */
6322 if (debug_infrun)
6323 fprintf_unfiltered (gdb_stdlog,
6324 "infrun: signal arrived while stepping over "
6325 "breakpoint\n");
6326
6327 insert_hp_step_resume_breakpoint_at_frame (frame);
6328 ecs->event_thread->step_after_step_resume_breakpoint = 1;
6329 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6330 ecs->event_thread->control.trap_expected = 0;
6331
6332 /* If we were nexting/stepping some other thread, switch to
6333 it, so that we don't continue it, losing control. */
6334 if (!switch_back_to_stepped_thread (ecs))
6335 keep_going (ecs);
6336 return;
6337 }
6338
6339 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
6340 && (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6341 ecs->event_thread)
6342 || ecs->event_thread->control.step_range_end == 1)
6343 && frame_id_eq (get_stack_frame_id (frame),
6344 ecs->event_thread->control.step_stack_frame_id)
6345 && ecs->event_thread->control.step_resume_breakpoint == NULL)
6346 {
6347 /* The inferior is about to take a signal that will take it
6348 out of the single step range. Set a breakpoint at the
6349 current PC (which is presumably where the signal handler
6350 will eventually return) and then allow the inferior to
6351 run free.
6352
6353 Note that this is only needed for a signal delivered
6354 while in the single-step range. Nested signals aren't a
6355 problem as they eventually all return. */
6356 if (debug_infrun)
6357 fprintf_unfiltered (gdb_stdlog,
6358 "infrun: signal may take us out of "
6359 "single-step range\n");
6360
6361 clear_step_over_info ();
6362 insert_hp_step_resume_breakpoint_at_frame (frame);
6363 ecs->event_thread->step_after_step_resume_breakpoint = 1;
6364 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6365 ecs->event_thread->control.trap_expected = 0;
6366 keep_going (ecs);
6367 return;
6368 }
6369
6370 /* Note: step_resume_breakpoint may be non-NULL. This occurs
6371 when either there's a nested signal, or when there's a
6372 pending signal enabled just as the signal handler returns
6373 (leaving the inferior at the step-resume-breakpoint without
6374 actually executing it). Either way continue until the
6375 breakpoint is really hit. */
6376
6377 if (!switch_back_to_stepped_thread (ecs))
6378 {
6379 if (debug_infrun)
6380 fprintf_unfiltered (gdb_stdlog,
6381 "infrun: random signal, keep going\n");
6382
6383 keep_going (ecs);
6384 }
6385 return;
6386 }
6387
6388 process_event_stop_test (ecs);
6389 }
6390
6391 /* Come here when we've got some debug event / signal we can explain
6392 (IOW, not a random signal), and test whether it should cause a
6393 stop, or whether we should resume the inferior (transparently).
6394 E.g., could be a breakpoint whose condition evaluates false; we
6395 could be still stepping within the line; etc. */
6396
6397 static void
6398 process_event_stop_test (struct execution_control_state *ecs)
6399 {
6400 struct symtab_and_line stop_pc_sal;
6401 struct frame_info *frame;
6402 struct gdbarch *gdbarch;
6403 CORE_ADDR jmp_buf_pc;
6404 struct bpstat_what what;
6405
6406 /* Handle cases caused by hitting a breakpoint. */
6407
6408 frame = get_current_frame ();
6409 gdbarch = get_frame_arch (frame);
6410
6411 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
6412
6413 if (what.call_dummy)
6414 {
6415 stop_stack_dummy = what.call_dummy;
6416 }
6417
6418 /* A few breakpoint types have callbacks associated (e.g.,
6419 bp_jit_event). Run them now. */
6420 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
6421
6422 /* If we hit an internal event that triggers symbol changes, the
6423 current frame will be invalidated within bpstat_what (e.g., if we
6424 hit an internal solib event). Re-fetch it. */
6425 frame = get_current_frame ();
6426 gdbarch = get_frame_arch (frame);
6427
6428 switch (what.main_action)
6429 {
6430 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
6431 /* If we hit the breakpoint at longjmp while stepping, we
6432 install a momentary breakpoint at the target of the
6433 jmp_buf. */
6434
6435 if (debug_infrun)
6436 fprintf_unfiltered (gdb_stdlog,
6437 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
6438
6439 ecs->event_thread->stepping_over_breakpoint = 1;
6440
6441 if (what.is_longjmp)
6442 {
6443 struct value *arg_value;
6444
6445 /* If we set the longjmp breakpoint via a SystemTap probe,
6446 then use it to extract the arguments. The destination PC
6447 is the third argument to the probe. */
6448 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6449 if (arg_value)
6450 {
6451 jmp_buf_pc = value_as_address (arg_value);
6452 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6453 }
6454 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6455 || !gdbarch_get_longjmp_target (gdbarch,
6456 frame, &jmp_buf_pc))
6457 {
6458 if (debug_infrun)
6459 fprintf_unfiltered (gdb_stdlog,
6460 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
6461 "(!gdbarch_get_longjmp_target)\n");
6462 keep_going (ecs);
6463 return;
6464 }
6465
6466 /* Insert a breakpoint at resume address. */
6467 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6468 }
6469 else
6470 check_exception_resume (ecs, frame);
6471 keep_going (ecs);
6472 return;
6473
6474 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6475 {
6476 struct frame_info *init_frame;
6477
6478 /* There are several cases to consider.
6479
6480 1. The initiating frame no longer exists. In this case we
6481 must stop, because the exception or longjmp has gone too
6482 far.
6483
6484 2. The initiating frame exists, and is the same as the
6485 current frame. We stop, because the exception or longjmp
6486 has been caught.
6487
6488 3. The initiating frame exists and is different from the
6489 current frame. This means the exception or longjmp has
6490 been caught beneath the initiating frame, so keep going.
6491
6492 4. longjmp breakpoint has been placed just to protect
6493 against stale dummy frames and user is not interested in
6494 stopping around longjmps. */
6495
6496 if (debug_infrun)
6497 fprintf_unfiltered (gdb_stdlog,
6498 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
6499
6500 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6501 != NULL);
6502 delete_exception_resume_breakpoint (ecs->event_thread);
6503
6504 if (what.is_longjmp)
6505 {
6506 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
6507
6508 if (!frame_id_p (ecs->event_thread->initiating_frame))
6509 {
6510 /* Case 4. */
6511 keep_going (ecs);
6512 return;
6513 }
6514 }
6515
6516 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
6517
6518 if (init_frame)
6519 {
6520 struct frame_id current_id
6521 = get_frame_id (get_current_frame ());
6522 if (frame_id_eq (current_id,
6523 ecs->event_thread->initiating_frame))
6524 {
6525 /* Case 2. Fall through. */
6526 }
6527 else
6528 {
6529 /* Case 3. */
6530 keep_going (ecs);
6531 return;
6532 }
6533 }
6534
6535 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6536 exists. */
6537 delete_step_resume_breakpoint (ecs->event_thread);
6538
6539 end_stepping_range (ecs);
6540 }
6541 return;
6542
6543 case BPSTAT_WHAT_SINGLE:
6544 if (debug_infrun)
6545 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
6546 ecs->event_thread->stepping_over_breakpoint = 1;
6547 /* Still need to check other stuff, at least the case where we
6548 are stepping and step out of the right range. */
6549 break;
6550
6551 case BPSTAT_WHAT_STEP_RESUME:
6552 if (debug_infrun)
6553 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
6554
6555 delete_step_resume_breakpoint (ecs->event_thread);
6556 if (ecs->event_thread->control.proceed_to_finish
6557 && execution_direction == EXEC_REVERSE)
6558 {
6559 struct thread_info *tp = ecs->event_thread;
6560
6561 /* We are finishing a function in reverse, and just hit the
6562 step-resume breakpoint at the start address of the
6563 function, and we're almost there -- just need to back up
6564 by one more single-step, which should take us back to the
6565 function call. */
6566 tp->control.step_range_start = tp->control.step_range_end = 1;
6567 keep_going (ecs);
6568 return;
6569 }
6570 fill_in_stop_func (gdbarch, ecs);
6571 if (ecs->event_thread->suspend.stop_pc == ecs->stop_func_start
6572 && execution_direction == EXEC_REVERSE)
6573 {
6574 /* We are stepping over a function call in reverse, and just
6575 hit the step-resume breakpoint at the start address of
6576 the function. Go back to single-stepping, which should
6577 take us back to the function call. */
6578 ecs->event_thread->stepping_over_breakpoint = 1;
6579 keep_going (ecs);
6580 return;
6581 }
6582 break;
6583
6584 case BPSTAT_WHAT_STOP_NOISY:
6585 if (debug_infrun)
6586 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
6587 stop_print_frame = 1;
6588
6589 /* Assume the thread stopped for a breapoint. We'll still check
6590 whether a/the breakpoint is there when the thread is next
6591 resumed. */
6592 ecs->event_thread->stepping_over_breakpoint = 1;
6593
6594 stop_waiting (ecs);
6595 return;
6596
6597 case BPSTAT_WHAT_STOP_SILENT:
6598 if (debug_infrun)
6599 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
6600 stop_print_frame = 0;
6601
6602 /* Assume the thread stopped for a breapoint. We'll still check
6603 whether a/the breakpoint is there when the thread is next
6604 resumed. */
6605 ecs->event_thread->stepping_over_breakpoint = 1;
6606 stop_waiting (ecs);
6607 return;
6608
6609 case BPSTAT_WHAT_HP_STEP_RESUME:
6610 if (debug_infrun)
6611 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
6612
6613 delete_step_resume_breakpoint (ecs->event_thread);
6614 if (ecs->event_thread->step_after_step_resume_breakpoint)
6615 {
6616 /* Back when the step-resume breakpoint was inserted, we
6617 were trying to single-step off a breakpoint. Go back to
6618 doing that. */
6619 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6620 ecs->event_thread->stepping_over_breakpoint = 1;
6621 keep_going (ecs);
6622 return;
6623 }
6624 break;
6625
6626 case BPSTAT_WHAT_KEEP_CHECKING:
6627 break;
6628 }
6629
6630 /* If we stepped a permanent breakpoint and we had a high priority
6631 step-resume breakpoint for the address we stepped, but we didn't
6632 hit it, then we must have stepped into the signal handler. The
6633 step-resume was only necessary to catch the case of _not_
6634 stepping into the handler, so delete it, and fall through to
6635 checking whether the step finished. */
6636 if (ecs->event_thread->stepped_breakpoint)
6637 {
6638 struct breakpoint *sr_bp
6639 = ecs->event_thread->control.step_resume_breakpoint;
6640
6641 if (sr_bp != NULL
6642 && sr_bp->loc->permanent
6643 && sr_bp->type == bp_hp_step_resume
6644 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6645 {
6646 if (debug_infrun)
6647 fprintf_unfiltered (gdb_stdlog,
6648 "infrun: stepped permanent breakpoint, stopped in "
6649 "handler\n");
6650 delete_step_resume_breakpoint (ecs->event_thread);
6651 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6652 }
6653 }
6654
6655 /* We come here if we hit a breakpoint but should not stop for it.
6656 Possibly we also were stepping and should stop for that. So fall
6657 through and test for stepping. But, if not stepping, do not
6658 stop. */
6659
6660 /* In all-stop mode, if we're currently stepping but have stopped in
6661 some other thread, we need to switch back to the stepped thread. */
6662 if (switch_back_to_stepped_thread (ecs))
6663 return;
6664
6665 if (ecs->event_thread->control.step_resume_breakpoint)
6666 {
6667 if (debug_infrun)
6668 fprintf_unfiltered (gdb_stdlog,
6669 "infrun: step-resume breakpoint is inserted\n");
6670
6671 /* Having a step-resume breakpoint overrides anything
6672 else having to do with stepping commands until
6673 that breakpoint is reached. */
6674 keep_going (ecs);
6675 return;
6676 }
6677
6678 if (ecs->event_thread->control.step_range_end == 0)
6679 {
6680 if (debug_infrun)
6681 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
6682 /* Likewise if we aren't even stepping. */
6683 keep_going (ecs);
6684 return;
6685 }
6686
6687 /* Re-fetch current thread's frame in case the code above caused
6688 the frame cache to be re-initialized, making our FRAME variable
6689 a dangling pointer. */
6690 frame = get_current_frame ();
6691 gdbarch = get_frame_arch (frame);
6692 fill_in_stop_func (gdbarch, ecs);
6693
6694 /* If stepping through a line, keep going if still within it.
6695
6696 Note that step_range_end is the address of the first instruction
6697 beyond the step range, and NOT the address of the last instruction
6698 within it!
6699
6700 Note also that during reverse execution, we may be stepping
6701 through a function epilogue and therefore must detect when
6702 the current-frame changes in the middle of a line. */
6703
6704 if (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6705 ecs->event_thread)
6706 && (execution_direction != EXEC_REVERSE
6707 || frame_id_eq (get_frame_id (frame),
6708 ecs->event_thread->control.step_frame_id)))
6709 {
6710 if (debug_infrun)
6711 fprintf_unfiltered
6712 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
6713 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6714 paddress (gdbarch, ecs->event_thread->control.step_range_end));
6715
6716 /* Tentatively re-enable range stepping; `resume' disables it if
6717 necessary (e.g., if we're stepping over a breakpoint or we
6718 have software watchpoints). */
6719 ecs->event_thread->control.may_range_step = 1;
6720
6721 /* When stepping backward, stop at beginning of line range
6722 (unless it's the function entry point, in which case
6723 keep going back to the call point). */
6724 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6725 if (stop_pc == ecs->event_thread->control.step_range_start
6726 && stop_pc != ecs->stop_func_start
6727 && execution_direction == EXEC_REVERSE)
6728 end_stepping_range (ecs);
6729 else
6730 keep_going (ecs);
6731
6732 return;
6733 }
6734
6735 /* We stepped out of the stepping range. */
6736
6737 /* If we are stepping at the source level and entered the runtime
6738 loader dynamic symbol resolution code...
6739
6740 EXEC_FORWARD: we keep on single stepping until we exit the run
6741 time loader code and reach the callee's address.
6742
6743 EXEC_REVERSE: we've already executed the callee (backward), and
6744 the runtime loader code is handled just like any other
6745 undebuggable function call. Now we need only keep stepping
6746 backward through the trampoline code, and that's handled further
6747 down, so there is nothing for us to do here. */
6748
6749 if (execution_direction != EXEC_REVERSE
6750 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6751 && in_solib_dynsym_resolve_code (ecs->event_thread->suspend.stop_pc))
6752 {
6753 CORE_ADDR pc_after_resolver =
6754 gdbarch_skip_solib_resolver (gdbarch,
6755 ecs->event_thread->suspend.stop_pc);
6756
6757 if (debug_infrun)
6758 fprintf_unfiltered (gdb_stdlog,
6759 "infrun: stepped into dynsym resolve code\n");
6760
6761 if (pc_after_resolver)
6762 {
6763 /* Set up a step-resume breakpoint at the address
6764 indicated by SKIP_SOLIB_RESOLVER. */
6765 symtab_and_line sr_sal;
6766 sr_sal.pc = pc_after_resolver;
6767 sr_sal.pspace = get_frame_program_space (frame);
6768
6769 insert_step_resume_breakpoint_at_sal (gdbarch,
6770 sr_sal, null_frame_id);
6771 }
6772
6773 keep_going (ecs);
6774 return;
6775 }
6776
6777 /* Step through an indirect branch thunk. */
6778 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
6779 && gdbarch_in_indirect_branch_thunk (gdbarch,
6780 ecs->event_thread->suspend.stop_pc))
6781 {
6782 if (debug_infrun)
6783 fprintf_unfiltered (gdb_stdlog,
6784 "infrun: stepped into indirect branch thunk\n");
6785 keep_going (ecs);
6786 return;
6787 }
6788
6789 if (ecs->event_thread->control.step_range_end != 1
6790 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6791 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
6792 && get_frame_type (frame) == SIGTRAMP_FRAME)
6793 {
6794 if (debug_infrun)
6795 fprintf_unfiltered (gdb_stdlog,
6796 "infrun: stepped into signal trampoline\n");
6797 /* The inferior, while doing a "step" or "next", has ended up in
6798 a signal trampoline (either by a signal being delivered or by
6799 the signal handler returning). Just single-step until the
6800 inferior leaves the trampoline (either by calling the handler
6801 or returning). */
6802 keep_going (ecs);
6803 return;
6804 }
6805
6806 /* If we're in the return path from a shared library trampoline,
6807 we want to proceed through the trampoline when stepping. */
6808 /* macro/2012-04-25: This needs to come before the subroutine
6809 call check below as on some targets return trampolines look
6810 like subroutine calls (MIPS16 return thunks). */
6811 if (gdbarch_in_solib_return_trampoline (gdbarch,
6812 ecs->event_thread->suspend.stop_pc,
6813 ecs->stop_func_name)
6814 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
6815 {
6816 /* Determine where this trampoline returns. */
6817 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6818 CORE_ADDR real_stop_pc
6819 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
6820
6821 if (debug_infrun)
6822 fprintf_unfiltered (gdb_stdlog,
6823 "infrun: stepped into solib return tramp\n");
6824
6825 /* Only proceed through if we know where it's going. */
6826 if (real_stop_pc)
6827 {
6828 /* And put the step-breakpoint there and go until there. */
6829 symtab_and_line sr_sal;
6830 sr_sal.pc = real_stop_pc;
6831 sr_sal.section = find_pc_overlay (sr_sal.pc);
6832 sr_sal.pspace = get_frame_program_space (frame);
6833
6834 /* Do not specify what the fp should be when we stop since
6835 on some machines the prologue is where the new fp value
6836 is established. */
6837 insert_step_resume_breakpoint_at_sal (gdbarch,
6838 sr_sal, null_frame_id);
6839
6840 /* Restart without fiddling with the step ranges or
6841 other state. */
6842 keep_going (ecs);
6843 return;
6844 }
6845 }
6846
6847 /* Check for subroutine calls. The check for the current frame
6848 equalling the step ID is not necessary - the check of the
6849 previous frame's ID is sufficient - but it is a common case and
6850 cheaper than checking the previous frame's ID.
6851
6852 NOTE: frame_id_eq will never report two invalid frame IDs as
6853 being equal, so to get into this block, both the current and
6854 previous frame must have valid frame IDs. */
6855 /* The outer_frame_id check is a heuristic to detect stepping
6856 through startup code. If we step over an instruction which
6857 sets the stack pointer from an invalid value to a valid value,
6858 we may detect that as a subroutine call from the mythical
6859 "outermost" function. This could be fixed by marking
6860 outermost frames as !stack_p,code_p,special_p. Then the
6861 initial outermost frame, before sp was valid, would
6862 have code_addr == &_start. See the comment in frame_id_eq
6863 for more. */
6864 if (!frame_id_eq (get_stack_frame_id (frame),
6865 ecs->event_thread->control.step_stack_frame_id)
6866 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
6867 ecs->event_thread->control.step_stack_frame_id)
6868 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
6869 outer_frame_id)
6870 || (ecs->event_thread->control.step_start_function
6871 != find_pc_function (ecs->event_thread->suspend.stop_pc)))))
6872 {
6873 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6874 CORE_ADDR real_stop_pc;
6875
6876 if (debug_infrun)
6877 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
6878
6879 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
6880 {
6881 /* I presume that step_over_calls is only 0 when we're
6882 supposed to be stepping at the assembly language level
6883 ("stepi"). Just stop. */
6884 /* And this works the same backward as frontward. MVS */
6885 end_stepping_range (ecs);
6886 return;
6887 }
6888
6889 /* Reverse stepping through solib trampolines. */
6890
6891 if (execution_direction == EXEC_REVERSE
6892 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
6893 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6894 || (ecs->stop_func_start == 0
6895 && in_solib_dynsym_resolve_code (stop_pc))))
6896 {
6897 /* Any solib trampoline code can be handled in reverse
6898 by simply continuing to single-step. We have already
6899 executed the solib function (backwards), and a few
6900 steps will take us back through the trampoline to the
6901 caller. */
6902 keep_going (ecs);
6903 return;
6904 }
6905
6906 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
6907 {
6908 /* We're doing a "next".
6909
6910 Normal (forward) execution: set a breakpoint at the
6911 callee's return address (the address at which the caller
6912 will resume).
6913
6914 Reverse (backward) execution. set the step-resume
6915 breakpoint at the start of the function that we just
6916 stepped into (backwards), and continue to there. When we
6917 get there, we'll need to single-step back to the caller. */
6918
6919 if (execution_direction == EXEC_REVERSE)
6920 {
6921 /* If we're already at the start of the function, we've either
6922 just stepped backward into a single instruction function,
6923 or stepped back out of a signal handler to the first instruction
6924 of the function. Just keep going, which will single-step back
6925 to the caller. */
6926 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
6927 {
6928 /* Normal function call return (static or dynamic). */
6929 symtab_and_line sr_sal;
6930 sr_sal.pc = ecs->stop_func_start;
6931 sr_sal.pspace = get_frame_program_space (frame);
6932 insert_step_resume_breakpoint_at_sal (gdbarch,
6933 sr_sal, null_frame_id);
6934 }
6935 }
6936 else
6937 insert_step_resume_breakpoint_at_caller (frame);
6938
6939 keep_going (ecs);
6940 return;
6941 }
6942
6943 /* If we are in a function call trampoline (a stub between the
6944 calling routine and the real function), locate the real
6945 function. That's what tells us (a) whether we want to step
6946 into it at all, and (b) what prologue we want to run to the
6947 end of, if we do step into it. */
6948 real_stop_pc = skip_language_trampoline (frame, stop_pc);
6949 if (real_stop_pc == 0)
6950 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
6951 if (real_stop_pc != 0)
6952 ecs->stop_func_start = real_stop_pc;
6953
6954 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
6955 {
6956 symtab_and_line sr_sal;
6957 sr_sal.pc = ecs->stop_func_start;
6958 sr_sal.pspace = get_frame_program_space (frame);
6959
6960 insert_step_resume_breakpoint_at_sal (gdbarch,
6961 sr_sal, null_frame_id);
6962 keep_going (ecs);
6963 return;
6964 }
6965
6966 /* If we have line number information for the function we are
6967 thinking of stepping into and the function isn't on the skip
6968 list, step into it.
6969
6970 If there are several symtabs at that PC (e.g. with include
6971 files), just want to know whether *any* of them have line
6972 numbers. find_pc_line handles this. */
6973 {
6974 struct symtab_and_line tmp_sal;
6975
6976 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
6977 if (tmp_sal.line != 0
6978 && !function_name_is_marked_for_skip (ecs->stop_func_name,
6979 tmp_sal)
6980 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
6981 {
6982 if (execution_direction == EXEC_REVERSE)
6983 handle_step_into_function_backward (gdbarch, ecs);
6984 else
6985 handle_step_into_function (gdbarch, ecs);
6986 return;
6987 }
6988 }
6989
6990 /* If we have no line number and the step-stop-if-no-debug is
6991 set, we stop the step so that the user has a chance to switch
6992 in assembly mode. */
6993 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6994 && step_stop_if_no_debug)
6995 {
6996 end_stepping_range (ecs);
6997 return;
6998 }
6999
7000 if (execution_direction == EXEC_REVERSE)
7001 {
7002 /* If we're already at the start of the function, we've either just
7003 stepped backward into a single instruction function without line
7004 number info, or stepped back out of a signal handler to the first
7005 instruction of the function without line number info. Just keep
7006 going, which will single-step back to the caller. */
7007 if (ecs->stop_func_start != stop_pc)
7008 {
7009 /* Set a breakpoint at callee's start address.
7010 From there we can step once and be back in the caller. */
7011 symtab_and_line sr_sal;
7012 sr_sal.pc = ecs->stop_func_start;
7013 sr_sal.pspace = get_frame_program_space (frame);
7014 insert_step_resume_breakpoint_at_sal (gdbarch,
7015 sr_sal, null_frame_id);
7016 }
7017 }
7018 else
7019 /* Set a breakpoint at callee's return address (the address
7020 at which the caller will resume). */
7021 insert_step_resume_breakpoint_at_caller (frame);
7022
7023 keep_going (ecs);
7024 return;
7025 }
7026
7027 /* Reverse stepping through solib trampolines. */
7028
7029 if (execution_direction == EXEC_REVERSE
7030 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7031 {
7032 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
7033
7034 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7035 || (ecs->stop_func_start == 0
7036 && in_solib_dynsym_resolve_code (stop_pc)))
7037 {
7038 /* Any solib trampoline code can be handled in reverse
7039 by simply continuing to single-step. We have already
7040 executed the solib function (backwards), and a few
7041 steps will take us back through the trampoline to the
7042 caller. */
7043 keep_going (ecs);
7044 return;
7045 }
7046 else if (in_solib_dynsym_resolve_code (stop_pc))
7047 {
7048 /* Stepped backward into the solib dynsym resolver.
7049 Set a breakpoint at its start and continue, then
7050 one more step will take us out. */
7051 symtab_and_line sr_sal;
7052 sr_sal.pc = ecs->stop_func_start;
7053 sr_sal.pspace = get_frame_program_space (frame);
7054 insert_step_resume_breakpoint_at_sal (gdbarch,
7055 sr_sal, null_frame_id);
7056 keep_going (ecs);
7057 return;
7058 }
7059 }
7060
7061 /* This always returns the sal for the inner-most frame when we are in a
7062 stack of inlined frames, even if GDB actually believes that it is in a
7063 more outer frame. This is checked for below by calls to
7064 inline_skipped_frames. */
7065 stop_pc_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
7066
7067 /* NOTE: tausq/2004-05-24: This if block used to be done before all
7068 the trampoline processing logic, however, there are some trampolines
7069 that have no names, so we should do trampoline handling first. */
7070 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7071 && ecs->stop_func_name == NULL
7072 && stop_pc_sal.line == 0)
7073 {
7074 if (debug_infrun)
7075 fprintf_unfiltered (gdb_stdlog,
7076 "infrun: stepped into undebuggable function\n");
7077
7078 /* The inferior just stepped into, or returned to, an
7079 undebuggable function (where there is no debugging information
7080 and no line number corresponding to the address where the
7081 inferior stopped). Since we want to skip this kind of code,
7082 we keep going until the inferior returns from this
7083 function - unless the user has asked us not to (via
7084 set step-mode) or we no longer know how to get back
7085 to the call site. */
7086 if (step_stop_if_no_debug
7087 || !frame_id_p (frame_unwind_caller_id (frame)))
7088 {
7089 /* If we have no line number and the step-stop-if-no-debug
7090 is set, we stop the step so that the user has a chance to
7091 switch in assembly mode. */
7092 end_stepping_range (ecs);
7093 return;
7094 }
7095 else
7096 {
7097 /* Set a breakpoint at callee's return address (the address
7098 at which the caller will resume). */
7099 insert_step_resume_breakpoint_at_caller (frame);
7100 keep_going (ecs);
7101 return;
7102 }
7103 }
7104
7105 if (ecs->event_thread->control.step_range_end == 1)
7106 {
7107 /* It is stepi or nexti. We always want to stop stepping after
7108 one instruction. */
7109 if (debug_infrun)
7110 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
7111 end_stepping_range (ecs);
7112 return;
7113 }
7114
7115 if (stop_pc_sal.line == 0)
7116 {
7117 /* We have no line number information. That means to stop
7118 stepping (does this always happen right after one instruction,
7119 when we do "s" in a function with no line numbers,
7120 or can this happen as a result of a return or longjmp?). */
7121 if (debug_infrun)
7122 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
7123 end_stepping_range (ecs);
7124 return;
7125 }
7126
7127 /* Look for "calls" to inlined functions, part one. If the inline
7128 frame machinery detected some skipped call sites, we have entered
7129 a new inline function. */
7130
7131 if (frame_id_eq (get_frame_id (get_current_frame ()),
7132 ecs->event_thread->control.step_frame_id)
7133 && inline_skipped_frames (ecs->event_thread))
7134 {
7135 if (debug_infrun)
7136 fprintf_unfiltered (gdb_stdlog,
7137 "infrun: stepped into inlined function\n");
7138
7139 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
7140
7141 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
7142 {
7143 /* For "step", we're going to stop. But if the call site
7144 for this inlined function is on the same source line as
7145 we were previously stepping, go down into the function
7146 first. Otherwise stop at the call site. */
7147
7148 if (call_sal.line == ecs->event_thread->current_line
7149 && call_sal.symtab == ecs->event_thread->current_symtab)
7150 {
7151 step_into_inline_frame (ecs->event_thread);
7152 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
7153 {
7154 keep_going (ecs);
7155 return;
7156 }
7157 }
7158
7159 end_stepping_range (ecs);
7160 return;
7161 }
7162 else
7163 {
7164 /* For "next", we should stop at the call site if it is on a
7165 different source line. Otherwise continue through the
7166 inlined function. */
7167 if (call_sal.line == ecs->event_thread->current_line
7168 && call_sal.symtab == ecs->event_thread->current_symtab)
7169 keep_going (ecs);
7170 else
7171 end_stepping_range (ecs);
7172 return;
7173 }
7174 }
7175
7176 /* Look for "calls" to inlined functions, part two. If we are still
7177 in the same real function we were stepping through, but we have
7178 to go further up to find the exact frame ID, we are stepping
7179 through a more inlined call beyond its call site. */
7180
7181 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
7182 && !frame_id_eq (get_frame_id (get_current_frame ()),
7183 ecs->event_thread->control.step_frame_id)
7184 && stepped_in_from (get_current_frame (),
7185 ecs->event_thread->control.step_frame_id))
7186 {
7187 if (debug_infrun)
7188 fprintf_unfiltered (gdb_stdlog,
7189 "infrun: stepping through inlined function\n");
7190
7191 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
7192 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
7193 keep_going (ecs);
7194 else
7195 end_stepping_range (ecs);
7196 return;
7197 }
7198
7199 bool refresh_step_info = true;
7200 if ((ecs->event_thread->suspend.stop_pc == stop_pc_sal.pc)
7201 && (ecs->event_thread->current_line != stop_pc_sal.line
7202 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
7203 {
7204 if (stop_pc_sal.is_stmt)
7205 {
7206 /* We are at the start of a different line. So stop. Note that
7207 we don't stop if we step into the middle of a different line.
7208 That is said to make things like for (;;) statements work
7209 better. */
7210 if (debug_infrun)
7211 fprintf_unfiltered (gdb_stdlog,
7212 "infrun: stepped to a different line\n");
7213 end_stepping_range (ecs);
7214 return;
7215 }
7216 else if (frame_id_eq (get_frame_id (get_current_frame ()),
7217 ecs->event_thread->control.step_frame_id))
7218 {
7219 /* We are at the start of a different line, however, this line is
7220 not marked as a statement, and we have not changed frame. We
7221 ignore this line table entry, and continue stepping forward,
7222 looking for a better place to stop. */
7223 refresh_step_info = false;
7224 if (debug_infrun)
7225 fprintf_unfiltered (gdb_stdlog,
7226 "infrun: stepped to a different line, but "
7227 "it's not the start of a statement\n");
7228 }
7229 }
7230
7231 /* We aren't done stepping.
7232
7233 Optimize by setting the stepping range to the line.
7234 (We might not be in the original line, but if we entered a
7235 new line in mid-statement, we continue stepping. This makes
7236 things like for(;;) statements work better.)
7237
7238 If we entered a SAL that indicates a non-statement line table entry,
7239 then we update the stepping range, but we don't update the step info,
7240 which includes things like the line number we are stepping away from.
7241 This means we will stop when we find a line table entry that is marked
7242 as is-statement, even if it matches the non-statement one we just
7243 stepped into. */
7244
7245 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
7246 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
7247 ecs->event_thread->control.may_range_step = 1;
7248 if (refresh_step_info)
7249 set_step_info (ecs->event_thread, frame, stop_pc_sal);
7250
7251 if (debug_infrun)
7252 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
7253 keep_going (ecs);
7254 }
7255
7256 /* In all-stop mode, if we're currently stepping but have stopped in
7257 some other thread, we may need to switch back to the stepped
7258 thread. Returns true we set the inferior running, false if we left
7259 it stopped (and the event needs further processing). */
7260
7261 static int
7262 switch_back_to_stepped_thread (struct execution_control_state *ecs)
7263 {
7264 if (!target_is_non_stop_p ())
7265 {
7266 struct thread_info *stepping_thread;
7267
7268 /* If any thread is blocked on some internal breakpoint, and we
7269 simply need to step over that breakpoint to get it going
7270 again, do that first. */
7271
7272 /* However, if we see an event for the stepping thread, then we
7273 know all other threads have been moved past their breakpoints
7274 already. Let the caller check whether the step is finished,
7275 etc., before deciding to move it past a breakpoint. */
7276 if (ecs->event_thread->control.step_range_end != 0)
7277 return 0;
7278
7279 /* Check if the current thread is blocked on an incomplete
7280 step-over, interrupted by a random signal. */
7281 if (ecs->event_thread->control.trap_expected
7282 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
7283 {
7284 if (debug_infrun)
7285 {
7286 fprintf_unfiltered (gdb_stdlog,
7287 "infrun: need to finish step-over of [%s]\n",
7288 target_pid_to_str (ecs->event_thread->ptid).c_str ());
7289 }
7290 keep_going (ecs);
7291 return 1;
7292 }
7293
7294 /* Check if the current thread is blocked by a single-step
7295 breakpoint of another thread. */
7296 if (ecs->hit_singlestep_breakpoint)
7297 {
7298 if (debug_infrun)
7299 {
7300 fprintf_unfiltered (gdb_stdlog,
7301 "infrun: need to step [%s] over single-step "
7302 "breakpoint\n",
7303 target_pid_to_str (ecs->ptid).c_str ());
7304 }
7305 keep_going (ecs);
7306 return 1;
7307 }
7308
7309 /* If this thread needs yet another step-over (e.g., stepping
7310 through a delay slot), do it first before moving on to
7311 another thread. */
7312 if (thread_still_needs_step_over (ecs->event_thread))
7313 {
7314 if (debug_infrun)
7315 {
7316 fprintf_unfiltered (gdb_stdlog,
7317 "infrun: thread [%s] still needs step-over\n",
7318 target_pid_to_str (ecs->event_thread->ptid).c_str ());
7319 }
7320 keep_going (ecs);
7321 return 1;
7322 }
7323
7324 /* If scheduler locking applies even if not stepping, there's no
7325 need to walk over threads. Above we've checked whether the
7326 current thread is stepping. If some other thread not the
7327 event thread is stepping, then it must be that scheduler
7328 locking is not in effect. */
7329 if (schedlock_applies (ecs->event_thread))
7330 return 0;
7331
7332 /* Otherwise, we no longer expect a trap in the current thread.
7333 Clear the trap_expected flag before switching back -- this is
7334 what keep_going does as well, if we call it. */
7335 ecs->event_thread->control.trap_expected = 0;
7336
7337 /* Likewise, clear the signal if it should not be passed. */
7338 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7339 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7340
7341 /* Do all pending step-overs before actually proceeding with
7342 step/next/etc. */
7343 if (start_step_over ())
7344 {
7345 prepare_to_wait (ecs);
7346 return 1;
7347 }
7348
7349 /* Look for the stepping/nexting thread. */
7350 stepping_thread = NULL;
7351
7352 for (thread_info *tp : all_non_exited_threads ())
7353 {
7354 switch_to_thread_no_regs (tp);
7355
7356 /* Ignore threads of processes the caller is not
7357 resuming. */
7358 if (!sched_multi
7359 && (tp->inf->process_target () != ecs->target
7360 || tp->inf->pid != ecs->ptid.pid ()))
7361 continue;
7362
7363 /* When stepping over a breakpoint, we lock all threads
7364 except the one that needs to move past the breakpoint.
7365 If a non-event thread has this set, the "incomplete
7366 step-over" check above should have caught it earlier. */
7367 if (tp->control.trap_expected)
7368 {
7369 internal_error (__FILE__, __LINE__,
7370 "[%s] has inconsistent state: "
7371 "trap_expected=%d\n",
7372 target_pid_to_str (tp->ptid).c_str (),
7373 tp->control.trap_expected);
7374 }
7375
7376 /* Did we find the stepping thread? */
7377 if (tp->control.step_range_end)
7378 {
7379 /* Yep. There should only one though. */
7380 gdb_assert (stepping_thread == NULL);
7381
7382 /* The event thread is handled at the top, before we
7383 enter this loop. */
7384 gdb_assert (tp != ecs->event_thread);
7385
7386 /* If some thread other than the event thread is
7387 stepping, then scheduler locking can't be in effect,
7388 otherwise we wouldn't have resumed the current event
7389 thread in the first place. */
7390 gdb_assert (!schedlock_applies (tp));
7391
7392 stepping_thread = tp;
7393 }
7394 }
7395
7396 if (stepping_thread != NULL)
7397 {
7398 if (debug_infrun)
7399 fprintf_unfiltered (gdb_stdlog,
7400 "infrun: switching back to stepped thread\n");
7401
7402 if (keep_going_stepped_thread (stepping_thread))
7403 {
7404 prepare_to_wait (ecs);
7405 return 1;
7406 }
7407 }
7408
7409 switch_to_thread (ecs->event_thread);
7410 }
7411
7412 return 0;
7413 }
7414
7415 /* Set a previously stepped thread back to stepping. Returns true on
7416 success, false if the resume is not possible (e.g., the thread
7417 vanished). */
7418
7419 static int
7420 keep_going_stepped_thread (struct thread_info *tp)
7421 {
7422 struct frame_info *frame;
7423 struct execution_control_state ecss;
7424 struct execution_control_state *ecs = &ecss;
7425
7426 /* If the stepping thread exited, then don't try to switch back and
7427 resume it, which could fail in several different ways depending
7428 on the target. Instead, just keep going.
7429
7430 We can find a stepping dead thread in the thread list in two
7431 cases:
7432
7433 - The target supports thread exit events, and when the target
7434 tries to delete the thread from the thread list, inferior_ptid
7435 pointed at the exiting thread. In such case, calling
7436 delete_thread does not really remove the thread from the list;
7437 instead, the thread is left listed, with 'exited' state.
7438
7439 - The target's debug interface does not support thread exit
7440 events, and so we have no idea whatsoever if the previously
7441 stepping thread is still alive. For that reason, we need to
7442 synchronously query the target now. */
7443
7444 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
7445 {
7446 if (debug_infrun)
7447 fprintf_unfiltered (gdb_stdlog,
7448 "infrun: not resuming previously "
7449 "stepped thread, it has vanished\n");
7450
7451 delete_thread (tp);
7452 return 0;
7453 }
7454
7455 if (debug_infrun)
7456 fprintf_unfiltered (gdb_stdlog,
7457 "infrun: resuming previously stepped thread\n");
7458
7459 reset_ecs (ecs, tp);
7460 switch_to_thread (tp);
7461
7462 tp->suspend.stop_pc = regcache_read_pc (get_thread_regcache (tp));
7463 frame = get_current_frame ();
7464
7465 /* If the PC of the thread we were trying to single-step has
7466 changed, then that thread has trapped or been signaled, but the
7467 event has not been reported to GDB yet. Re-poll the target
7468 looking for this particular thread's event (i.e. temporarily
7469 enable schedlock) by:
7470
7471 - setting a break at the current PC
7472 - resuming that particular thread, only (by setting trap
7473 expected)
7474
7475 This prevents us continuously moving the single-step breakpoint
7476 forward, one instruction at a time, overstepping. */
7477
7478 if (tp->suspend.stop_pc != tp->prev_pc)
7479 {
7480 ptid_t resume_ptid;
7481
7482 if (debug_infrun)
7483 fprintf_unfiltered (gdb_stdlog,
7484 "infrun: expected thread advanced also (%s -> %s)\n",
7485 paddress (target_gdbarch (), tp->prev_pc),
7486 paddress (target_gdbarch (), tp->suspend.stop_pc));
7487
7488 /* Clear the info of the previous step-over, as it's no longer
7489 valid (if the thread was trying to step over a breakpoint, it
7490 has already succeeded). It's what keep_going would do too,
7491 if we called it. Do this before trying to insert the sss
7492 breakpoint, otherwise if we were previously trying to step
7493 over this exact address in another thread, the breakpoint is
7494 skipped. */
7495 clear_step_over_info ();
7496 tp->control.trap_expected = 0;
7497
7498 insert_single_step_breakpoint (get_frame_arch (frame),
7499 get_frame_address_space (frame),
7500 tp->suspend.stop_pc);
7501
7502 tp->resumed = true;
7503 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
7504 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
7505 }
7506 else
7507 {
7508 if (debug_infrun)
7509 fprintf_unfiltered (gdb_stdlog,
7510 "infrun: expected thread still hasn't advanced\n");
7511
7512 keep_going_pass_signal (ecs);
7513 }
7514 return 1;
7515 }
7516
7517 /* Is thread TP in the middle of (software or hardware)
7518 single-stepping? (Note the result of this function must never be
7519 passed directly as target_resume's STEP parameter.) */
7520
7521 static int
7522 currently_stepping (struct thread_info *tp)
7523 {
7524 return ((tp->control.step_range_end
7525 && tp->control.step_resume_breakpoint == NULL)
7526 || tp->control.trap_expected
7527 || tp->stepped_breakpoint
7528 || bpstat_should_step ());
7529 }
7530
7531 /* Inferior has stepped into a subroutine call with source code that
7532 we should not step over. Do step to the first line of code in
7533 it. */
7534
7535 static void
7536 handle_step_into_function (struct gdbarch *gdbarch,
7537 struct execution_control_state *ecs)
7538 {
7539 fill_in_stop_func (gdbarch, ecs);
7540
7541 compunit_symtab *cust
7542 = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
7543 if (cust != NULL && compunit_language (cust) != language_asm)
7544 ecs->stop_func_start
7545 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
7546
7547 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
7548 /* Use the step_resume_break to step until the end of the prologue,
7549 even if that involves jumps (as it seems to on the vax under
7550 4.2). */
7551 /* If the prologue ends in the middle of a source line, continue to
7552 the end of that source line (if it is still within the function).
7553 Otherwise, just go to end of prologue. */
7554 if (stop_func_sal.end
7555 && stop_func_sal.pc != ecs->stop_func_start
7556 && stop_func_sal.end < ecs->stop_func_end)
7557 ecs->stop_func_start = stop_func_sal.end;
7558
7559 /* Architectures which require breakpoint adjustment might not be able
7560 to place a breakpoint at the computed address. If so, the test
7561 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7562 ecs->stop_func_start to an address at which a breakpoint may be
7563 legitimately placed.
7564
7565 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7566 made, GDB will enter an infinite loop when stepping through
7567 optimized code consisting of VLIW instructions which contain
7568 subinstructions corresponding to different source lines. On
7569 FR-V, it's not permitted to place a breakpoint on any but the
7570 first subinstruction of a VLIW instruction. When a breakpoint is
7571 set, GDB will adjust the breakpoint address to the beginning of
7572 the VLIW instruction. Thus, we need to make the corresponding
7573 adjustment here when computing the stop address. */
7574
7575 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
7576 {
7577 ecs->stop_func_start
7578 = gdbarch_adjust_breakpoint_address (gdbarch,
7579 ecs->stop_func_start);
7580 }
7581
7582 if (ecs->stop_func_start == ecs->event_thread->suspend.stop_pc)
7583 {
7584 /* We are already there: stop now. */
7585 end_stepping_range (ecs);
7586 return;
7587 }
7588 else
7589 {
7590 /* Put the step-breakpoint there and go until there. */
7591 symtab_and_line sr_sal;
7592 sr_sal.pc = ecs->stop_func_start;
7593 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
7594 sr_sal.pspace = get_frame_program_space (get_current_frame ());
7595
7596 /* Do not specify what the fp should be when we stop since on
7597 some machines the prologue is where the new fp value is
7598 established. */
7599 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
7600
7601 /* And make sure stepping stops right away then. */
7602 ecs->event_thread->control.step_range_end
7603 = ecs->event_thread->control.step_range_start;
7604 }
7605 keep_going (ecs);
7606 }
7607
7608 /* Inferior has stepped backward into a subroutine call with source
7609 code that we should not step over. Do step to the beginning of the
7610 last line of code in it. */
7611
7612 static void
7613 handle_step_into_function_backward (struct gdbarch *gdbarch,
7614 struct execution_control_state *ecs)
7615 {
7616 struct compunit_symtab *cust;
7617 struct symtab_and_line stop_func_sal;
7618
7619 fill_in_stop_func (gdbarch, ecs);
7620
7621 cust = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
7622 if (cust != NULL && compunit_language (cust) != language_asm)
7623 ecs->stop_func_start
7624 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
7625
7626 stop_func_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
7627
7628 /* OK, we're just going to keep stepping here. */
7629 if (stop_func_sal.pc == ecs->event_thread->suspend.stop_pc)
7630 {
7631 /* We're there already. Just stop stepping now. */
7632 end_stepping_range (ecs);
7633 }
7634 else
7635 {
7636 /* Else just reset the step range and keep going.
7637 No step-resume breakpoint, they don't work for
7638 epilogues, which can have multiple entry paths. */
7639 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7640 ecs->event_thread->control.step_range_end = stop_func_sal.end;
7641 keep_going (ecs);
7642 }
7643 return;
7644 }
7645
7646 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
7647 This is used to both functions and to skip over code. */
7648
7649 static void
7650 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7651 struct symtab_and_line sr_sal,
7652 struct frame_id sr_id,
7653 enum bptype sr_type)
7654 {
7655 /* There should never be more than one step-resume or longjmp-resume
7656 breakpoint per thread, so we should never be setting a new
7657 step_resume_breakpoint when one is already active. */
7658 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
7659 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
7660
7661 if (debug_infrun)
7662 fprintf_unfiltered (gdb_stdlog,
7663 "infrun: inserting step-resume breakpoint at %s\n",
7664 paddress (gdbarch, sr_sal.pc));
7665
7666 inferior_thread ()->control.step_resume_breakpoint
7667 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
7668 }
7669
7670 void
7671 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7672 struct symtab_and_line sr_sal,
7673 struct frame_id sr_id)
7674 {
7675 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7676 sr_sal, sr_id,
7677 bp_step_resume);
7678 }
7679
7680 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7681 This is used to skip a potential signal handler.
7682
7683 This is called with the interrupted function's frame. The signal
7684 handler, when it returns, will resume the interrupted function at
7685 RETURN_FRAME.pc. */
7686
7687 static void
7688 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
7689 {
7690 gdb_assert (return_frame != NULL);
7691
7692 struct gdbarch *gdbarch = get_frame_arch (return_frame);
7693
7694 symtab_and_line sr_sal;
7695 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
7696 sr_sal.section = find_pc_overlay (sr_sal.pc);
7697 sr_sal.pspace = get_frame_program_space (return_frame);
7698
7699 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7700 get_stack_frame_id (return_frame),
7701 bp_hp_step_resume);
7702 }
7703
7704 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
7705 is used to skip a function after stepping into it (for "next" or if
7706 the called function has no debugging information).
7707
7708 The current function has almost always been reached by single
7709 stepping a call or return instruction. NEXT_FRAME belongs to the
7710 current function, and the breakpoint will be set at the caller's
7711 resume address.
7712
7713 This is a separate function rather than reusing
7714 insert_hp_step_resume_breakpoint_at_frame in order to avoid
7715 get_prev_frame, which may stop prematurely (see the implementation
7716 of frame_unwind_caller_id for an example). */
7717
7718 static void
7719 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7720 {
7721 /* We shouldn't have gotten here if we don't know where the call site
7722 is. */
7723 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
7724
7725 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
7726
7727 symtab_and_line sr_sal;
7728 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7729 frame_unwind_caller_pc (next_frame));
7730 sr_sal.section = find_pc_overlay (sr_sal.pc);
7731 sr_sal.pspace = frame_unwind_program_space (next_frame);
7732
7733 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
7734 frame_unwind_caller_id (next_frame));
7735 }
7736
7737 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7738 new breakpoint at the target of a jmp_buf. The handling of
7739 longjmp-resume uses the same mechanisms used for handling
7740 "step-resume" breakpoints. */
7741
7742 static void
7743 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
7744 {
7745 /* There should never be more than one longjmp-resume breakpoint per
7746 thread, so we should never be setting a new
7747 longjmp_resume_breakpoint when one is already active. */
7748 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
7749
7750 if (debug_infrun)
7751 fprintf_unfiltered (gdb_stdlog,
7752 "infrun: inserting longjmp-resume breakpoint at %s\n",
7753 paddress (gdbarch, pc));
7754
7755 inferior_thread ()->control.exception_resume_breakpoint =
7756 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
7757 }
7758
7759 /* Insert an exception resume breakpoint. TP is the thread throwing
7760 the exception. The block B is the block of the unwinder debug hook
7761 function. FRAME is the frame corresponding to the call to this
7762 function. SYM is the symbol of the function argument holding the
7763 target PC of the exception. */
7764
7765 static void
7766 insert_exception_resume_breakpoint (struct thread_info *tp,
7767 const struct block *b,
7768 struct frame_info *frame,
7769 struct symbol *sym)
7770 {
7771 try
7772 {
7773 struct block_symbol vsym;
7774 struct value *value;
7775 CORE_ADDR handler;
7776 struct breakpoint *bp;
7777
7778 vsym = lookup_symbol_search_name (sym->search_name (),
7779 b, VAR_DOMAIN);
7780 value = read_var_value (vsym.symbol, vsym.block, frame);
7781 /* If the value was optimized out, revert to the old behavior. */
7782 if (! value_optimized_out (value))
7783 {
7784 handler = value_as_address (value);
7785
7786 if (debug_infrun)
7787 fprintf_unfiltered (gdb_stdlog,
7788 "infrun: exception resume at %lx\n",
7789 (unsigned long) handler);
7790
7791 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
7792 handler,
7793 bp_exception_resume).release ();
7794
7795 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
7796 frame = NULL;
7797
7798 bp->thread = tp->global_num;
7799 inferior_thread ()->control.exception_resume_breakpoint = bp;
7800 }
7801 }
7802 catch (const gdb_exception_error &e)
7803 {
7804 /* We want to ignore errors here. */
7805 }
7806 }
7807
7808 /* A helper for check_exception_resume that sets an
7809 exception-breakpoint based on a SystemTap probe. */
7810
7811 static void
7812 insert_exception_resume_from_probe (struct thread_info *tp,
7813 const struct bound_probe *probe,
7814 struct frame_info *frame)
7815 {
7816 struct value *arg_value;
7817 CORE_ADDR handler;
7818 struct breakpoint *bp;
7819
7820 arg_value = probe_safe_evaluate_at_pc (frame, 1);
7821 if (!arg_value)
7822 return;
7823
7824 handler = value_as_address (arg_value);
7825
7826 if (debug_infrun)
7827 fprintf_unfiltered (gdb_stdlog,
7828 "infrun: exception resume at %s\n",
7829 paddress (probe->objfile->arch (),
7830 handler));
7831
7832 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
7833 handler, bp_exception_resume).release ();
7834 bp->thread = tp->global_num;
7835 inferior_thread ()->control.exception_resume_breakpoint = bp;
7836 }
7837
7838 /* This is called when an exception has been intercepted. Check to
7839 see whether the exception's destination is of interest, and if so,
7840 set an exception resume breakpoint there. */
7841
7842 static void
7843 check_exception_resume (struct execution_control_state *ecs,
7844 struct frame_info *frame)
7845 {
7846 struct bound_probe probe;
7847 struct symbol *func;
7848
7849 /* First see if this exception unwinding breakpoint was set via a
7850 SystemTap probe point. If so, the probe has two arguments: the
7851 CFA and the HANDLER. We ignore the CFA, extract the handler, and
7852 set a breakpoint there. */
7853 probe = find_probe_by_pc (get_frame_pc (frame));
7854 if (probe.prob)
7855 {
7856 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
7857 return;
7858 }
7859
7860 func = get_frame_function (frame);
7861 if (!func)
7862 return;
7863
7864 try
7865 {
7866 const struct block *b;
7867 struct block_iterator iter;
7868 struct symbol *sym;
7869 int argno = 0;
7870
7871 /* The exception breakpoint is a thread-specific breakpoint on
7872 the unwinder's debug hook, declared as:
7873
7874 void _Unwind_DebugHook (void *cfa, void *handler);
7875
7876 The CFA argument indicates the frame to which control is
7877 about to be transferred. HANDLER is the destination PC.
7878
7879 We ignore the CFA and set a temporary breakpoint at HANDLER.
7880 This is not extremely efficient but it avoids issues in gdb
7881 with computing the DWARF CFA, and it also works even in weird
7882 cases such as throwing an exception from inside a signal
7883 handler. */
7884
7885 b = SYMBOL_BLOCK_VALUE (func);
7886 ALL_BLOCK_SYMBOLS (b, iter, sym)
7887 {
7888 if (!SYMBOL_IS_ARGUMENT (sym))
7889 continue;
7890
7891 if (argno == 0)
7892 ++argno;
7893 else
7894 {
7895 insert_exception_resume_breakpoint (ecs->event_thread,
7896 b, frame, sym);
7897 break;
7898 }
7899 }
7900 }
7901 catch (const gdb_exception_error &e)
7902 {
7903 }
7904 }
7905
7906 static void
7907 stop_waiting (struct execution_control_state *ecs)
7908 {
7909 if (debug_infrun)
7910 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
7911
7912 /* Let callers know we don't want to wait for the inferior anymore. */
7913 ecs->wait_some_more = 0;
7914
7915 /* If all-stop, but there exists a non-stop target, stop all
7916 threads now that we're presenting the stop to the user. */
7917 if (!non_stop && exists_non_stop_target ())
7918 stop_all_threads ();
7919 }
7920
7921 /* Like keep_going, but passes the signal to the inferior, even if the
7922 signal is set to nopass. */
7923
7924 static void
7925 keep_going_pass_signal (struct execution_control_state *ecs)
7926 {
7927 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
7928 gdb_assert (!ecs->event_thread->resumed);
7929
7930 /* Save the pc before execution, to compare with pc after stop. */
7931 ecs->event_thread->prev_pc
7932 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
7933
7934 if (ecs->event_thread->control.trap_expected)
7935 {
7936 struct thread_info *tp = ecs->event_thread;
7937
7938 if (debug_infrun)
7939 fprintf_unfiltered (gdb_stdlog,
7940 "infrun: %s has trap_expected set, "
7941 "resuming to collect trap\n",
7942 target_pid_to_str (tp->ptid).c_str ());
7943
7944 /* We haven't yet gotten our trap, and either: intercepted a
7945 non-signal event (e.g., a fork); or took a signal which we
7946 are supposed to pass through to the inferior. Simply
7947 continue. */
7948 resume (ecs->event_thread->suspend.stop_signal);
7949 }
7950 else if (step_over_info_valid_p ())
7951 {
7952 /* Another thread is stepping over a breakpoint in-line. If
7953 this thread needs a step-over too, queue the request. In
7954 either case, this resume must be deferred for later. */
7955 struct thread_info *tp = ecs->event_thread;
7956
7957 if (ecs->hit_singlestep_breakpoint
7958 || thread_still_needs_step_over (tp))
7959 {
7960 if (debug_infrun)
7961 fprintf_unfiltered (gdb_stdlog,
7962 "infrun: step-over already in progress: "
7963 "step-over for %s deferred\n",
7964 target_pid_to_str (tp->ptid).c_str ());
7965 thread_step_over_chain_enqueue (tp);
7966 }
7967 else
7968 {
7969 if (debug_infrun)
7970 fprintf_unfiltered (gdb_stdlog,
7971 "infrun: step-over in progress: "
7972 "resume of %s deferred\n",
7973 target_pid_to_str (tp->ptid).c_str ());
7974 }
7975 }
7976 else
7977 {
7978 struct regcache *regcache = get_current_regcache ();
7979 int remove_bp;
7980 int remove_wps;
7981 step_over_what step_what;
7982
7983 /* Either the trap was not expected, but we are continuing
7984 anyway (if we got a signal, the user asked it be passed to
7985 the child)
7986 -- or --
7987 We got our expected trap, but decided we should resume from
7988 it.
7989
7990 We're going to run this baby now!
7991
7992 Note that insert_breakpoints won't try to re-insert
7993 already inserted breakpoints. Therefore, we don't
7994 care if breakpoints were already inserted, or not. */
7995
7996 /* If we need to step over a breakpoint, and we're not using
7997 displaced stepping to do so, insert all breakpoints
7998 (watchpoints, etc.) but the one we're stepping over, step one
7999 instruction, and then re-insert the breakpoint when that step
8000 is finished. */
8001
8002 step_what = thread_still_needs_step_over (ecs->event_thread);
8003
8004 remove_bp = (ecs->hit_singlestep_breakpoint
8005 || (step_what & STEP_OVER_BREAKPOINT));
8006 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
8007
8008 /* We can't use displaced stepping if we need to step past a
8009 watchpoint. The instruction copied to the scratch pad would
8010 still trigger the watchpoint. */
8011 if (remove_bp
8012 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
8013 {
8014 set_step_over_info (regcache->aspace (),
8015 regcache_read_pc (regcache), remove_wps,
8016 ecs->event_thread->global_num);
8017 }
8018 else if (remove_wps)
8019 set_step_over_info (NULL, 0, remove_wps, -1);
8020
8021 /* If we now need to do an in-line step-over, we need to stop
8022 all other threads. Note this must be done before
8023 insert_breakpoints below, because that removes the breakpoint
8024 we're about to step over, otherwise other threads could miss
8025 it. */
8026 if (step_over_info_valid_p () && target_is_non_stop_p ())
8027 stop_all_threads ();
8028
8029 /* Stop stepping if inserting breakpoints fails. */
8030 try
8031 {
8032 insert_breakpoints ();
8033 }
8034 catch (const gdb_exception_error &e)
8035 {
8036 exception_print (gdb_stderr, e);
8037 stop_waiting (ecs);
8038 clear_step_over_info ();
8039 return;
8040 }
8041
8042 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
8043
8044 resume (ecs->event_thread->suspend.stop_signal);
8045 }
8046
8047 prepare_to_wait (ecs);
8048 }
8049
8050 /* Called when we should continue running the inferior, because the
8051 current event doesn't cause a user visible stop. This does the
8052 resuming part; waiting for the next event is done elsewhere. */
8053
8054 static void
8055 keep_going (struct execution_control_state *ecs)
8056 {
8057 if (ecs->event_thread->control.trap_expected
8058 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
8059 ecs->event_thread->control.trap_expected = 0;
8060
8061 if (!signal_program[ecs->event_thread->suspend.stop_signal])
8062 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
8063 keep_going_pass_signal (ecs);
8064 }
8065
8066 /* This function normally comes after a resume, before
8067 handle_inferior_event exits. It takes care of any last bits of
8068 housekeeping, and sets the all-important wait_some_more flag. */
8069
8070 static void
8071 prepare_to_wait (struct execution_control_state *ecs)
8072 {
8073 if (debug_infrun)
8074 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
8075
8076 ecs->wait_some_more = 1;
8077
8078 if (!target_is_async_p ())
8079 mark_infrun_async_event_handler ();
8080 }
8081
8082 /* We are done with the step range of a step/next/si/ni command.
8083 Called once for each n of a "step n" operation. */
8084
8085 static void
8086 end_stepping_range (struct execution_control_state *ecs)
8087 {
8088 ecs->event_thread->control.stop_step = 1;
8089 stop_waiting (ecs);
8090 }
8091
8092 /* Several print_*_reason functions to print why the inferior has stopped.
8093 We always print something when the inferior exits, or receives a signal.
8094 The rest of the cases are dealt with later on in normal_stop and
8095 print_it_typical. Ideally there should be a call to one of these
8096 print_*_reason functions functions from handle_inferior_event each time
8097 stop_waiting is called.
8098
8099 Note that we don't call these directly, instead we delegate that to
8100 the interpreters, through observers. Interpreters then call these
8101 with whatever uiout is right. */
8102
8103 void
8104 print_end_stepping_range_reason (struct ui_out *uiout)
8105 {
8106 /* For CLI-like interpreters, print nothing. */
8107
8108 if (uiout->is_mi_like_p ())
8109 {
8110 uiout->field_string ("reason",
8111 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
8112 }
8113 }
8114
8115 void
8116 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
8117 {
8118 annotate_signalled ();
8119 if (uiout->is_mi_like_p ())
8120 uiout->field_string
8121 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
8122 uiout->text ("\nProgram terminated with signal ");
8123 annotate_signal_name ();
8124 uiout->field_string ("signal-name",
8125 gdb_signal_to_name (siggnal));
8126 annotate_signal_name_end ();
8127 uiout->text (", ");
8128 annotate_signal_string ();
8129 uiout->field_string ("signal-meaning",
8130 gdb_signal_to_string (siggnal));
8131 annotate_signal_string_end ();
8132 uiout->text (".\n");
8133 uiout->text ("The program no longer exists.\n");
8134 }
8135
8136 void
8137 print_exited_reason (struct ui_out *uiout, int exitstatus)
8138 {
8139 struct inferior *inf = current_inferior ();
8140 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
8141
8142 annotate_exited (exitstatus);
8143 if (exitstatus)
8144 {
8145 if (uiout->is_mi_like_p ())
8146 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
8147 std::string exit_code_str
8148 = string_printf ("0%o", (unsigned int) exitstatus);
8149 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
8150 plongest (inf->num), pidstr.c_str (),
8151 string_field ("exit-code", exit_code_str.c_str ()));
8152 }
8153 else
8154 {
8155 if (uiout->is_mi_like_p ())
8156 uiout->field_string
8157 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
8158 uiout->message ("[Inferior %s (%s) exited normally]\n",
8159 plongest (inf->num), pidstr.c_str ());
8160 }
8161 }
8162
8163 /* Some targets/architectures can do extra processing/display of
8164 segmentation faults. E.g., Intel MPX boundary faults.
8165 Call the architecture dependent function to handle the fault. */
8166
8167 static void
8168 handle_segmentation_fault (struct ui_out *uiout)
8169 {
8170 struct regcache *regcache = get_current_regcache ();
8171 struct gdbarch *gdbarch = regcache->arch ();
8172
8173 if (gdbarch_handle_segmentation_fault_p (gdbarch))
8174 gdbarch_handle_segmentation_fault (gdbarch, uiout);
8175 }
8176
8177 void
8178 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
8179 {
8180 struct thread_info *thr = inferior_thread ();
8181
8182 annotate_signal ();
8183
8184 if (uiout->is_mi_like_p ())
8185 ;
8186 else if (show_thread_that_caused_stop ())
8187 {
8188 const char *name;
8189
8190 uiout->text ("\nThread ");
8191 uiout->field_string ("thread-id", print_thread_id (thr));
8192
8193 name = thr->name != NULL ? thr->name : target_thread_name (thr);
8194 if (name != NULL)
8195 {
8196 uiout->text (" \"");
8197 uiout->field_string ("name", name);
8198 uiout->text ("\"");
8199 }
8200 }
8201 else
8202 uiout->text ("\nProgram");
8203
8204 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
8205 uiout->text (" stopped");
8206 else
8207 {
8208 uiout->text (" received signal ");
8209 annotate_signal_name ();
8210 if (uiout->is_mi_like_p ())
8211 uiout->field_string
8212 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
8213 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8214 annotate_signal_name_end ();
8215 uiout->text (", ");
8216 annotate_signal_string ();
8217 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
8218
8219 if (siggnal == GDB_SIGNAL_SEGV)
8220 handle_segmentation_fault (uiout);
8221
8222 annotate_signal_string_end ();
8223 }
8224 uiout->text (".\n");
8225 }
8226
8227 void
8228 print_no_history_reason (struct ui_out *uiout)
8229 {
8230 uiout->text ("\nNo more reverse-execution history.\n");
8231 }
8232
8233 /* Print current location without a level number, if we have changed
8234 functions or hit a breakpoint. Print source line if we have one.
8235 bpstat_print contains the logic deciding in detail what to print,
8236 based on the event(s) that just occurred. */
8237
8238 static void
8239 print_stop_location (struct target_waitstatus *ws)
8240 {
8241 int bpstat_ret;
8242 enum print_what source_flag;
8243 int do_frame_printing = 1;
8244 struct thread_info *tp = inferior_thread ();
8245
8246 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
8247 switch (bpstat_ret)
8248 {
8249 case PRINT_UNKNOWN:
8250 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8251 should) carry around the function and does (or should) use
8252 that when doing a frame comparison. */
8253 if (tp->control.stop_step
8254 && frame_id_eq (tp->control.step_frame_id,
8255 get_frame_id (get_current_frame ()))
8256 && (tp->control.step_start_function
8257 == find_pc_function (tp->suspend.stop_pc)))
8258 {
8259 /* Finished step, just print source line. */
8260 source_flag = SRC_LINE;
8261 }
8262 else
8263 {
8264 /* Print location and source line. */
8265 source_flag = SRC_AND_LOC;
8266 }
8267 break;
8268 case PRINT_SRC_AND_LOC:
8269 /* Print location and source line. */
8270 source_flag = SRC_AND_LOC;
8271 break;
8272 case PRINT_SRC_ONLY:
8273 source_flag = SRC_LINE;
8274 break;
8275 case PRINT_NOTHING:
8276 /* Something bogus. */
8277 source_flag = SRC_LINE;
8278 do_frame_printing = 0;
8279 break;
8280 default:
8281 internal_error (__FILE__, __LINE__, _("Unknown value."));
8282 }
8283
8284 /* The behavior of this routine with respect to the source
8285 flag is:
8286 SRC_LINE: Print only source line
8287 LOCATION: Print only location
8288 SRC_AND_LOC: Print location and source line. */
8289 if (do_frame_printing)
8290 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
8291 }
8292
8293 /* See infrun.h. */
8294
8295 void
8296 print_stop_event (struct ui_out *uiout, bool displays)
8297 {
8298 struct target_waitstatus last;
8299 struct thread_info *tp;
8300
8301 get_last_target_status (nullptr, nullptr, &last);
8302
8303 {
8304 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
8305
8306 print_stop_location (&last);
8307
8308 /* Display the auto-display expressions. */
8309 if (displays)
8310 do_displays ();
8311 }
8312
8313 tp = inferior_thread ();
8314 if (tp->thread_fsm != NULL
8315 && tp->thread_fsm->finished_p ())
8316 {
8317 struct return_value_info *rv;
8318
8319 rv = tp->thread_fsm->return_value ();
8320 if (rv != NULL)
8321 print_return_value (uiout, rv);
8322 }
8323 }
8324
8325 /* See infrun.h. */
8326
8327 void
8328 maybe_remove_breakpoints (void)
8329 {
8330 if (!breakpoints_should_be_inserted_now () && target_has_execution)
8331 {
8332 if (remove_breakpoints ())
8333 {
8334 target_terminal::ours_for_output ();
8335 printf_filtered (_("Cannot remove breakpoints because "
8336 "program is no longer writable.\nFurther "
8337 "execution is probably impossible.\n"));
8338 }
8339 }
8340 }
8341
8342 /* The execution context that just caused a normal stop. */
8343
8344 struct stop_context
8345 {
8346 stop_context ();
8347 ~stop_context ();
8348
8349 DISABLE_COPY_AND_ASSIGN (stop_context);
8350
8351 bool changed () const;
8352
8353 /* The stop ID. */
8354 ULONGEST stop_id;
8355
8356 /* The event PTID. */
8357
8358 ptid_t ptid;
8359
8360 /* If stopp for a thread event, this is the thread that caused the
8361 stop. */
8362 struct thread_info *thread;
8363
8364 /* The inferior that caused the stop. */
8365 int inf_num;
8366 };
8367
8368 /* Initializes a new stop context. If stopped for a thread event, this
8369 takes a strong reference to the thread. */
8370
8371 stop_context::stop_context ()
8372 {
8373 stop_id = get_stop_id ();
8374 ptid = inferior_ptid;
8375 inf_num = current_inferior ()->num;
8376
8377 if (inferior_ptid != null_ptid)
8378 {
8379 /* Take a strong reference so that the thread can't be deleted
8380 yet. */
8381 thread = inferior_thread ();
8382 thread->incref ();
8383 }
8384 else
8385 thread = NULL;
8386 }
8387
8388 /* Release a stop context previously created with save_stop_context.
8389 Releases the strong reference to the thread as well. */
8390
8391 stop_context::~stop_context ()
8392 {
8393 if (thread != NULL)
8394 thread->decref ();
8395 }
8396
8397 /* Return true if the current context no longer matches the saved stop
8398 context. */
8399
8400 bool
8401 stop_context::changed () const
8402 {
8403 if (ptid != inferior_ptid)
8404 return true;
8405 if (inf_num != current_inferior ()->num)
8406 return true;
8407 if (thread != NULL && thread->state != THREAD_STOPPED)
8408 return true;
8409 if (get_stop_id () != stop_id)
8410 return true;
8411 return false;
8412 }
8413
8414 /* See infrun.h. */
8415
8416 int
8417 normal_stop (void)
8418 {
8419 struct target_waitstatus last;
8420
8421 get_last_target_status (nullptr, nullptr, &last);
8422
8423 new_stop_id ();
8424
8425 /* If an exception is thrown from this point on, make sure to
8426 propagate GDB's knowledge of the executing state to the
8427 frontend/user running state. A QUIT is an easy exception to see
8428 here, so do this before any filtered output. */
8429
8430 ptid_t finish_ptid = null_ptid;
8431
8432 if (!non_stop)
8433 finish_ptid = minus_one_ptid;
8434 else if (last.kind == TARGET_WAITKIND_SIGNALLED
8435 || last.kind == TARGET_WAITKIND_EXITED)
8436 {
8437 /* On some targets, we may still have live threads in the
8438 inferior when we get a process exit event. E.g., for
8439 "checkpoint", when the current checkpoint/fork exits,
8440 linux-fork.c automatically switches to another fork from
8441 within target_mourn_inferior. */
8442 if (inferior_ptid != null_ptid)
8443 finish_ptid = ptid_t (inferior_ptid.pid ());
8444 }
8445 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
8446 finish_ptid = inferior_ptid;
8447
8448 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
8449 if (finish_ptid != null_ptid)
8450 {
8451 maybe_finish_thread_state.emplace
8452 (user_visible_resume_target (finish_ptid), finish_ptid);
8453 }
8454
8455 /* As we're presenting a stop, and potentially removing breakpoints,
8456 update the thread list so we can tell whether there are threads
8457 running on the target. With target remote, for example, we can
8458 only learn about new threads when we explicitly update the thread
8459 list. Do this before notifying the interpreters about signal
8460 stops, end of stepping ranges, etc., so that the "new thread"
8461 output is emitted before e.g., "Program received signal FOO",
8462 instead of after. */
8463 update_thread_list ();
8464
8465 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
8466 gdb::observers::signal_received.notify (inferior_thread ()->suspend.stop_signal);
8467
8468 /* As with the notification of thread events, we want to delay
8469 notifying the user that we've switched thread context until
8470 the inferior actually stops.
8471
8472 There's no point in saying anything if the inferior has exited.
8473 Note that SIGNALLED here means "exited with a signal", not
8474 "received a signal".
8475
8476 Also skip saying anything in non-stop mode. In that mode, as we
8477 don't want GDB to switch threads behind the user's back, to avoid
8478 races where the user is typing a command to apply to thread x,
8479 but GDB switches to thread y before the user finishes entering
8480 the command, fetch_inferior_event installs a cleanup to restore
8481 the current thread back to the thread the user had selected right
8482 after this event is handled, so we're not really switching, only
8483 informing of a stop. */
8484 if (!non_stop
8485 && previous_inferior_ptid != inferior_ptid
8486 && target_has_execution
8487 && last.kind != TARGET_WAITKIND_SIGNALLED
8488 && last.kind != TARGET_WAITKIND_EXITED
8489 && last.kind != TARGET_WAITKIND_NO_RESUMED)
8490 {
8491 SWITCH_THRU_ALL_UIS ()
8492 {
8493 target_terminal::ours_for_output ();
8494 printf_filtered (_("[Switching to %s]\n"),
8495 target_pid_to_str (inferior_ptid).c_str ());
8496 annotate_thread_changed ();
8497 }
8498 previous_inferior_ptid = inferior_ptid;
8499 }
8500
8501 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
8502 {
8503 SWITCH_THRU_ALL_UIS ()
8504 if (current_ui->prompt_state == PROMPT_BLOCKED)
8505 {
8506 target_terminal::ours_for_output ();
8507 printf_filtered (_("No unwaited-for children left.\n"));
8508 }
8509 }
8510
8511 /* Note: this depends on the update_thread_list call above. */
8512 maybe_remove_breakpoints ();
8513
8514 /* If an auto-display called a function and that got a signal,
8515 delete that auto-display to avoid an infinite recursion. */
8516
8517 if (stopped_by_random_signal)
8518 disable_current_display ();
8519
8520 SWITCH_THRU_ALL_UIS ()
8521 {
8522 async_enable_stdin ();
8523 }
8524
8525 /* Let the user/frontend see the threads as stopped. */
8526 maybe_finish_thread_state.reset ();
8527
8528 /* Select innermost stack frame - i.e., current frame is frame 0,
8529 and current location is based on that. Handle the case where the
8530 dummy call is returning after being stopped. E.g. the dummy call
8531 previously hit a breakpoint. (If the dummy call returns
8532 normally, we won't reach here.) Do this before the stop hook is
8533 run, so that it doesn't get to see the temporary dummy frame,
8534 which is not where we'll present the stop. */
8535 if (has_stack_frames ())
8536 {
8537 if (stop_stack_dummy == STOP_STACK_DUMMY)
8538 {
8539 /* Pop the empty frame that contains the stack dummy. This
8540 also restores inferior state prior to the call (struct
8541 infcall_suspend_state). */
8542 struct frame_info *frame = get_current_frame ();
8543
8544 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8545 frame_pop (frame);
8546 /* frame_pop calls reinit_frame_cache as the last thing it
8547 does which means there's now no selected frame. */
8548 }
8549
8550 select_frame (get_current_frame ());
8551
8552 /* Set the current source location. */
8553 set_current_sal_from_frame (get_current_frame ());
8554 }
8555
8556 /* Look up the hook_stop and run it (CLI internally handles problem
8557 of stop_command's pre-hook not existing). */
8558 if (stop_command != NULL)
8559 {
8560 stop_context saved_context;
8561
8562 try
8563 {
8564 execute_cmd_pre_hook (stop_command);
8565 }
8566 catch (const gdb_exception &ex)
8567 {
8568 exception_fprintf (gdb_stderr, ex,
8569 "Error while running hook_stop:\n");
8570 }
8571
8572 /* If the stop hook resumes the target, then there's no point in
8573 trying to notify about the previous stop; its context is
8574 gone. Likewise if the command switches thread or inferior --
8575 the observers would print a stop for the wrong
8576 thread/inferior. */
8577 if (saved_context.changed ())
8578 return 1;
8579 }
8580
8581 /* Notify observers about the stop. This is where the interpreters
8582 print the stop event. */
8583 if (inferior_ptid != null_ptid)
8584 gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
8585 stop_print_frame);
8586 else
8587 gdb::observers::normal_stop.notify (NULL, stop_print_frame);
8588
8589 annotate_stopped ();
8590
8591 if (target_has_execution)
8592 {
8593 if (last.kind != TARGET_WAITKIND_SIGNALLED
8594 && last.kind != TARGET_WAITKIND_EXITED
8595 && last.kind != TARGET_WAITKIND_NO_RESUMED)
8596 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8597 Delete any breakpoint that is to be deleted at the next stop. */
8598 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
8599 }
8600
8601 /* Try to get rid of automatically added inferiors that are no
8602 longer needed. Keeping those around slows down things linearly.
8603 Note that this never removes the current inferior. */
8604 prune_inferiors ();
8605
8606 return 0;
8607 }
8608 \f
8609 int
8610 signal_stop_state (int signo)
8611 {
8612 return signal_stop[signo];
8613 }
8614
8615 int
8616 signal_print_state (int signo)
8617 {
8618 return signal_print[signo];
8619 }
8620
8621 int
8622 signal_pass_state (int signo)
8623 {
8624 return signal_program[signo];
8625 }
8626
8627 static void
8628 signal_cache_update (int signo)
8629 {
8630 if (signo == -1)
8631 {
8632 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
8633 signal_cache_update (signo);
8634
8635 return;
8636 }
8637
8638 signal_pass[signo] = (signal_stop[signo] == 0
8639 && signal_print[signo] == 0
8640 && signal_program[signo] == 1
8641 && signal_catch[signo] == 0);
8642 }
8643
8644 int
8645 signal_stop_update (int signo, int state)
8646 {
8647 int ret = signal_stop[signo];
8648
8649 signal_stop[signo] = state;
8650 signal_cache_update (signo);
8651 return ret;
8652 }
8653
8654 int
8655 signal_print_update (int signo, int state)
8656 {
8657 int ret = signal_print[signo];
8658
8659 signal_print[signo] = state;
8660 signal_cache_update (signo);
8661 return ret;
8662 }
8663
8664 int
8665 signal_pass_update (int signo, int state)
8666 {
8667 int ret = signal_program[signo];
8668
8669 signal_program[signo] = state;
8670 signal_cache_update (signo);
8671 return ret;
8672 }
8673
8674 /* Update the global 'signal_catch' from INFO and notify the
8675 target. */
8676
8677 void
8678 signal_catch_update (const unsigned int *info)
8679 {
8680 int i;
8681
8682 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8683 signal_catch[i] = info[i] > 0;
8684 signal_cache_update (-1);
8685 target_pass_signals (signal_pass);
8686 }
8687
8688 static void
8689 sig_print_header (void)
8690 {
8691 printf_filtered (_("Signal Stop\tPrint\tPass "
8692 "to program\tDescription\n"));
8693 }
8694
8695 static void
8696 sig_print_info (enum gdb_signal oursig)
8697 {
8698 const char *name = gdb_signal_to_name (oursig);
8699 int name_padding = 13 - strlen (name);
8700
8701 if (name_padding <= 0)
8702 name_padding = 0;
8703
8704 printf_filtered ("%s", name);
8705 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
8706 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8707 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
8708 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
8709 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
8710 }
8711
8712 /* Specify how various signals in the inferior should be handled. */
8713
8714 static void
8715 handle_command (const char *args, int from_tty)
8716 {
8717 int digits, wordlen;
8718 int sigfirst, siglast;
8719 enum gdb_signal oursig;
8720 int allsigs;
8721
8722 if (args == NULL)
8723 {
8724 error_no_arg (_("signal to handle"));
8725 }
8726
8727 /* Allocate and zero an array of flags for which signals to handle. */
8728
8729 const size_t nsigs = GDB_SIGNAL_LAST;
8730 unsigned char sigs[nsigs] {};
8731
8732 /* Break the command line up into args. */
8733
8734 gdb_argv built_argv (args);
8735
8736 /* Walk through the args, looking for signal oursigs, signal names, and
8737 actions. Signal numbers and signal names may be interspersed with
8738 actions, with the actions being performed for all signals cumulatively
8739 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
8740
8741 for (char *arg : built_argv)
8742 {
8743 wordlen = strlen (arg);
8744 for (digits = 0; isdigit (arg[digits]); digits++)
8745 {;
8746 }
8747 allsigs = 0;
8748 sigfirst = siglast = -1;
8749
8750 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
8751 {
8752 /* Apply action to all signals except those used by the
8753 debugger. Silently skip those. */
8754 allsigs = 1;
8755 sigfirst = 0;
8756 siglast = nsigs - 1;
8757 }
8758 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
8759 {
8760 SET_SIGS (nsigs, sigs, signal_stop);
8761 SET_SIGS (nsigs, sigs, signal_print);
8762 }
8763 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
8764 {
8765 UNSET_SIGS (nsigs, sigs, signal_program);
8766 }
8767 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
8768 {
8769 SET_SIGS (nsigs, sigs, signal_print);
8770 }
8771 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
8772 {
8773 SET_SIGS (nsigs, sigs, signal_program);
8774 }
8775 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
8776 {
8777 UNSET_SIGS (nsigs, sigs, signal_stop);
8778 }
8779 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
8780 {
8781 SET_SIGS (nsigs, sigs, signal_program);
8782 }
8783 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
8784 {
8785 UNSET_SIGS (nsigs, sigs, signal_print);
8786 UNSET_SIGS (nsigs, sigs, signal_stop);
8787 }
8788 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
8789 {
8790 UNSET_SIGS (nsigs, sigs, signal_program);
8791 }
8792 else if (digits > 0)
8793 {
8794 /* It is numeric. The numeric signal refers to our own
8795 internal signal numbering from target.h, not to host/target
8796 signal number. This is a feature; users really should be
8797 using symbolic names anyway, and the common ones like
8798 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8799
8800 sigfirst = siglast = (int)
8801 gdb_signal_from_command (atoi (arg));
8802 if (arg[digits] == '-')
8803 {
8804 siglast = (int)
8805 gdb_signal_from_command (atoi (arg + digits + 1));
8806 }
8807 if (sigfirst > siglast)
8808 {
8809 /* Bet he didn't figure we'd think of this case... */
8810 std::swap (sigfirst, siglast);
8811 }
8812 }
8813 else
8814 {
8815 oursig = gdb_signal_from_name (arg);
8816 if (oursig != GDB_SIGNAL_UNKNOWN)
8817 {
8818 sigfirst = siglast = (int) oursig;
8819 }
8820 else
8821 {
8822 /* Not a number and not a recognized flag word => complain. */
8823 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
8824 }
8825 }
8826
8827 /* If any signal numbers or symbol names were found, set flags for
8828 which signals to apply actions to. */
8829
8830 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
8831 {
8832 switch ((enum gdb_signal) signum)
8833 {
8834 case GDB_SIGNAL_TRAP:
8835 case GDB_SIGNAL_INT:
8836 if (!allsigs && !sigs[signum])
8837 {
8838 if (query (_("%s is used by the debugger.\n\
8839 Are you sure you want to change it? "),
8840 gdb_signal_to_name ((enum gdb_signal) signum)))
8841 {
8842 sigs[signum] = 1;
8843 }
8844 else
8845 printf_unfiltered (_("Not confirmed, unchanged.\n"));
8846 }
8847 break;
8848 case GDB_SIGNAL_0:
8849 case GDB_SIGNAL_DEFAULT:
8850 case GDB_SIGNAL_UNKNOWN:
8851 /* Make sure that "all" doesn't print these. */
8852 break;
8853 default:
8854 sigs[signum] = 1;
8855 break;
8856 }
8857 }
8858 }
8859
8860 for (int signum = 0; signum < nsigs; signum++)
8861 if (sigs[signum])
8862 {
8863 signal_cache_update (-1);
8864 target_pass_signals (signal_pass);
8865 target_program_signals (signal_program);
8866
8867 if (from_tty)
8868 {
8869 /* Show the results. */
8870 sig_print_header ();
8871 for (; signum < nsigs; signum++)
8872 if (sigs[signum])
8873 sig_print_info ((enum gdb_signal) signum);
8874 }
8875
8876 break;
8877 }
8878 }
8879
8880 /* Complete the "handle" command. */
8881
8882 static void
8883 handle_completer (struct cmd_list_element *ignore,
8884 completion_tracker &tracker,
8885 const char *text, const char *word)
8886 {
8887 static const char * const keywords[] =
8888 {
8889 "all",
8890 "stop",
8891 "ignore",
8892 "print",
8893 "pass",
8894 "nostop",
8895 "noignore",
8896 "noprint",
8897 "nopass",
8898 NULL,
8899 };
8900
8901 signal_completer (ignore, tracker, text, word);
8902 complete_on_enum (tracker, keywords, word, word);
8903 }
8904
8905 enum gdb_signal
8906 gdb_signal_from_command (int num)
8907 {
8908 if (num >= 1 && num <= 15)
8909 return (enum gdb_signal) num;
8910 error (_("Only signals 1-15 are valid as numeric signals.\n\
8911 Use \"info signals\" for a list of symbolic signals."));
8912 }
8913
8914 /* Print current contents of the tables set by the handle command.
8915 It is possible we should just be printing signals actually used
8916 by the current target (but for things to work right when switching
8917 targets, all signals should be in the signal tables). */
8918
8919 static void
8920 info_signals_command (const char *signum_exp, int from_tty)
8921 {
8922 enum gdb_signal oursig;
8923
8924 sig_print_header ();
8925
8926 if (signum_exp)
8927 {
8928 /* First see if this is a symbol name. */
8929 oursig = gdb_signal_from_name (signum_exp);
8930 if (oursig == GDB_SIGNAL_UNKNOWN)
8931 {
8932 /* No, try numeric. */
8933 oursig =
8934 gdb_signal_from_command (parse_and_eval_long (signum_exp));
8935 }
8936 sig_print_info (oursig);
8937 return;
8938 }
8939
8940 printf_filtered ("\n");
8941 /* These ugly casts brought to you by the native VAX compiler. */
8942 for (oursig = GDB_SIGNAL_FIRST;
8943 (int) oursig < (int) GDB_SIGNAL_LAST;
8944 oursig = (enum gdb_signal) ((int) oursig + 1))
8945 {
8946 QUIT;
8947
8948 if (oursig != GDB_SIGNAL_UNKNOWN
8949 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
8950 sig_print_info (oursig);
8951 }
8952
8953 printf_filtered (_("\nUse the \"handle\" command "
8954 "to change these tables.\n"));
8955 }
8956
8957 /* The $_siginfo convenience variable is a bit special. We don't know
8958 for sure the type of the value until we actually have a chance to
8959 fetch the data. The type can change depending on gdbarch, so it is
8960 also dependent on which thread you have selected.
8961
8962 1. making $_siginfo be an internalvar that creates a new value on
8963 access.
8964
8965 2. making the value of $_siginfo be an lval_computed value. */
8966
8967 /* This function implements the lval_computed support for reading a
8968 $_siginfo value. */
8969
8970 static void
8971 siginfo_value_read (struct value *v)
8972 {
8973 LONGEST transferred;
8974
8975 /* If we can access registers, so can we access $_siginfo. Likewise
8976 vice versa. */
8977 validate_registers_access ();
8978
8979 transferred =
8980 target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO,
8981 NULL,
8982 value_contents_all_raw (v),
8983 value_offset (v),
8984 TYPE_LENGTH (value_type (v)));
8985
8986 if (transferred != TYPE_LENGTH (value_type (v)))
8987 error (_("Unable to read siginfo"));
8988 }
8989
8990 /* This function implements the lval_computed support for writing a
8991 $_siginfo value. */
8992
8993 static void
8994 siginfo_value_write (struct value *v, struct value *fromval)
8995 {
8996 LONGEST transferred;
8997
8998 /* If we can access registers, so can we access $_siginfo. Likewise
8999 vice versa. */
9000 validate_registers_access ();
9001
9002 transferred = target_write (current_top_target (),
9003 TARGET_OBJECT_SIGNAL_INFO,
9004 NULL,
9005 value_contents_all_raw (fromval),
9006 value_offset (v),
9007 TYPE_LENGTH (value_type (fromval)));
9008
9009 if (transferred != TYPE_LENGTH (value_type (fromval)))
9010 error (_("Unable to write siginfo"));
9011 }
9012
9013 static const struct lval_funcs siginfo_value_funcs =
9014 {
9015 siginfo_value_read,
9016 siginfo_value_write
9017 };
9018
9019 /* Return a new value with the correct type for the siginfo object of
9020 the current thread using architecture GDBARCH. Return a void value
9021 if there's no object available. */
9022
9023 static struct value *
9024 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
9025 void *ignore)
9026 {
9027 if (target_has_stack
9028 && inferior_ptid != null_ptid
9029 && gdbarch_get_siginfo_type_p (gdbarch))
9030 {
9031 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9032
9033 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
9034 }
9035
9036 return allocate_value (builtin_type (gdbarch)->builtin_void);
9037 }
9038
9039 \f
9040 /* infcall_suspend_state contains state about the program itself like its
9041 registers and any signal it received when it last stopped.
9042 This state must be restored regardless of how the inferior function call
9043 ends (either successfully, or after it hits a breakpoint or signal)
9044 if the program is to properly continue where it left off. */
9045
9046 class infcall_suspend_state
9047 {
9048 public:
9049 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
9050 once the inferior function call has finished. */
9051 infcall_suspend_state (struct gdbarch *gdbarch,
9052 const struct thread_info *tp,
9053 struct regcache *regcache)
9054 : m_thread_suspend (tp->suspend),
9055 m_registers (new readonly_detached_regcache (*regcache))
9056 {
9057 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
9058
9059 if (gdbarch_get_siginfo_type_p (gdbarch))
9060 {
9061 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9062 size_t len = TYPE_LENGTH (type);
9063
9064 siginfo_data.reset ((gdb_byte *) xmalloc (len));
9065
9066 if (target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
9067 siginfo_data.get (), 0, len) != len)
9068 {
9069 /* Errors ignored. */
9070 siginfo_data.reset (nullptr);
9071 }
9072 }
9073
9074 if (siginfo_data)
9075 {
9076 m_siginfo_gdbarch = gdbarch;
9077 m_siginfo_data = std::move (siginfo_data);
9078 }
9079 }
9080
9081 /* Return a pointer to the stored register state. */
9082
9083 readonly_detached_regcache *registers () const
9084 {
9085 return m_registers.get ();
9086 }
9087
9088 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
9089
9090 void restore (struct gdbarch *gdbarch,
9091 struct thread_info *tp,
9092 struct regcache *regcache) const
9093 {
9094 tp->suspend = m_thread_suspend;
9095
9096 if (m_siginfo_gdbarch == gdbarch)
9097 {
9098 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9099
9100 /* Errors ignored. */
9101 target_write (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
9102 m_siginfo_data.get (), 0, TYPE_LENGTH (type));
9103 }
9104
9105 /* The inferior can be gone if the user types "print exit(0)"
9106 (and perhaps other times). */
9107 if (target_has_execution)
9108 /* NB: The register write goes through to the target. */
9109 regcache->restore (registers ());
9110 }
9111
9112 private:
9113 /* How the current thread stopped before the inferior function call was
9114 executed. */
9115 struct thread_suspend_state m_thread_suspend;
9116
9117 /* The registers before the inferior function call was executed. */
9118 std::unique_ptr<readonly_detached_regcache> m_registers;
9119
9120 /* Format of SIGINFO_DATA or NULL if it is not present. */
9121 struct gdbarch *m_siginfo_gdbarch = nullptr;
9122
9123 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
9124 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
9125 content would be invalid. */
9126 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
9127 };
9128
9129 infcall_suspend_state_up
9130 save_infcall_suspend_state ()
9131 {
9132 struct thread_info *tp = inferior_thread ();
9133 struct regcache *regcache = get_current_regcache ();
9134 struct gdbarch *gdbarch = regcache->arch ();
9135
9136 infcall_suspend_state_up inf_state
9137 (new struct infcall_suspend_state (gdbarch, tp, regcache));
9138
9139 /* Having saved the current state, adjust the thread state, discarding
9140 any stop signal information. The stop signal is not useful when
9141 starting an inferior function call, and run_inferior_call will not use
9142 the signal due to its `proceed' call with GDB_SIGNAL_0. */
9143 tp->suspend.stop_signal = GDB_SIGNAL_0;
9144
9145 return inf_state;
9146 }
9147
9148 /* Restore inferior session state to INF_STATE. */
9149
9150 void
9151 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
9152 {
9153 struct thread_info *tp = inferior_thread ();
9154 struct regcache *regcache = get_current_regcache ();
9155 struct gdbarch *gdbarch = regcache->arch ();
9156
9157 inf_state->restore (gdbarch, tp, regcache);
9158 discard_infcall_suspend_state (inf_state);
9159 }
9160
9161 void
9162 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
9163 {
9164 delete inf_state;
9165 }
9166
9167 readonly_detached_regcache *
9168 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
9169 {
9170 return inf_state->registers ();
9171 }
9172
9173 /* infcall_control_state contains state regarding gdb's control of the
9174 inferior itself like stepping control. It also contains session state like
9175 the user's currently selected frame. */
9176
9177 struct infcall_control_state
9178 {
9179 struct thread_control_state thread_control;
9180 struct inferior_control_state inferior_control;
9181
9182 /* Other fields: */
9183 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
9184 int stopped_by_random_signal = 0;
9185
9186 /* ID if the selected frame when the inferior function call was made. */
9187 struct frame_id selected_frame_id {};
9188 };
9189
9190 /* Save all of the information associated with the inferior<==>gdb
9191 connection. */
9192
9193 infcall_control_state_up
9194 save_infcall_control_state ()
9195 {
9196 infcall_control_state_up inf_status (new struct infcall_control_state);
9197 struct thread_info *tp = inferior_thread ();
9198 struct inferior *inf = current_inferior ();
9199
9200 inf_status->thread_control = tp->control;
9201 inf_status->inferior_control = inf->control;
9202
9203 tp->control.step_resume_breakpoint = NULL;
9204 tp->control.exception_resume_breakpoint = NULL;
9205
9206 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9207 chain. If caller's caller is walking the chain, they'll be happier if we
9208 hand them back the original chain when restore_infcall_control_state is
9209 called. */
9210 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
9211
9212 /* Other fields: */
9213 inf_status->stop_stack_dummy = stop_stack_dummy;
9214 inf_status->stopped_by_random_signal = stopped_by_random_signal;
9215
9216 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
9217
9218 return inf_status;
9219 }
9220
9221 static void
9222 restore_selected_frame (const frame_id &fid)
9223 {
9224 frame_info *frame = frame_find_by_id (fid);
9225
9226 /* If inf_status->selected_frame_id is NULL, there was no previously
9227 selected frame. */
9228 if (frame == NULL)
9229 {
9230 warning (_("Unable to restore previously selected frame."));
9231 return;
9232 }
9233
9234 select_frame (frame);
9235 }
9236
9237 /* Restore inferior session state to INF_STATUS. */
9238
9239 void
9240 restore_infcall_control_state (struct infcall_control_state *inf_status)
9241 {
9242 struct thread_info *tp = inferior_thread ();
9243 struct inferior *inf = current_inferior ();
9244
9245 if (tp->control.step_resume_breakpoint)
9246 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
9247
9248 if (tp->control.exception_resume_breakpoint)
9249 tp->control.exception_resume_breakpoint->disposition
9250 = disp_del_at_next_stop;
9251
9252 /* Handle the bpstat_copy of the chain. */
9253 bpstat_clear (&tp->control.stop_bpstat);
9254
9255 tp->control = inf_status->thread_control;
9256 inf->control = inf_status->inferior_control;
9257
9258 /* Other fields: */
9259 stop_stack_dummy = inf_status->stop_stack_dummy;
9260 stopped_by_random_signal = inf_status->stopped_by_random_signal;
9261
9262 if (target_has_stack)
9263 {
9264 /* The point of the try/catch is that if the stack is clobbered,
9265 walking the stack might encounter a garbage pointer and
9266 error() trying to dereference it. */
9267 try
9268 {
9269 restore_selected_frame (inf_status->selected_frame_id);
9270 }
9271 catch (const gdb_exception_error &ex)
9272 {
9273 exception_fprintf (gdb_stderr, ex,
9274 "Unable to restore previously selected frame:\n");
9275 /* Error in restoring the selected frame. Select the
9276 innermost frame. */
9277 select_frame (get_current_frame ());
9278 }
9279 }
9280
9281 delete inf_status;
9282 }
9283
9284 void
9285 discard_infcall_control_state (struct infcall_control_state *inf_status)
9286 {
9287 if (inf_status->thread_control.step_resume_breakpoint)
9288 inf_status->thread_control.step_resume_breakpoint->disposition
9289 = disp_del_at_next_stop;
9290
9291 if (inf_status->thread_control.exception_resume_breakpoint)
9292 inf_status->thread_control.exception_resume_breakpoint->disposition
9293 = disp_del_at_next_stop;
9294
9295 /* See save_infcall_control_state for info on stop_bpstat. */
9296 bpstat_clear (&inf_status->thread_control.stop_bpstat);
9297
9298 delete inf_status;
9299 }
9300 \f
9301 /* See infrun.h. */
9302
9303 void
9304 clear_exit_convenience_vars (void)
9305 {
9306 clear_internalvar (lookup_internalvar ("_exitsignal"));
9307 clear_internalvar (lookup_internalvar ("_exitcode"));
9308 }
9309 \f
9310
9311 /* User interface for reverse debugging:
9312 Set exec-direction / show exec-direction commands
9313 (returns error unless target implements to_set_exec_direction method). */
9314
9315 enum exec_direction_kind execution_direction = EXEC_FORWARD;
9316 static const char exec_forward[] = "forward";
9317 static const char exec_reverse[] = "reverse";
9318 static const char *exec_direction = exec_forward;
9319 static const char *const exec_direction_names[] = {
9320 exec_forward,
9321 exec_reverse,
9322 NULL
9323 };
9324
9325 static void
9326 set_exec_direction_func (const char *args, int from_tty,
9327 struct cmd_list_element *cmd)
9328 {
9329 if (target_can_execute_reverse)
9330 {
9331 if (!strcmp (exec_direction, exec_forward))
9332 execution_direction = EXEC_FORWARD;
9333 else if (!strcmp (exec_direction, exec_reverse))
9334 execution_direction = EXEC_REVERSE;
9335 }
9336 else
9337 {
9338 exec_direction = exec_forward;
9339 error (_("Target does not support this operation."));
9340 }
9341 }
9342
9343 static void
9344 show_exec_direction_func (struct ui_file *out, int from_tty,
9345 struct cmd_list_element *cmd, const char *value)
9346 {
9347 switch (execution_direction) {
9348 case EXEC_FORWARD:
9349 fprintf_filtered (out, _("Forward.\n"));
9350 break;
9351 case EXEC_REVERSE:
9352 fprintf_filtered (out, _("Reverse.\n"));
9353 break;
9354 default:
9355 internal_error (__FILE__, __LINE__,
9356 _("bogus execution_direction value: %d"),
9357 (int) execution_direction);
9358 }
9359 }
9360
9361 static void
9362 show_schedule_multiple (struct ui_file *file, int from_tty,
9363 struct cmd_list_element *c, const char *value)
9364 {
9365 fprintf_filtered (file, _("Resuming the execution of threads "
9366 "of all processes is %s.\n"), value);
9367 }
9368
9369 /* Implementation of `siginfo' variable. */
9370
9371 static const struct internalvar_funcs siginfo_funcs =
9372 {
9373 siginfo_make_value,
9374 NULL,
9375 NULL
9376 };
9377
9378 /* Callback for infrun's target events source. This is marked when a
9379 thread has a pending status to process. */
9380
9381 static void
9382 infrun_async_inferior_event_handler (gdb_client_data data)
9383 {
9384 inferior_event_handler (INF_REG_EVENT, NULL);
9385 }
9386
9387 void _initialize_infrun ();
9388 void
9389 _initialize_infrun ()
9390 {
9391 struct cmd_list_element *c;
9392
9393 /* Register extra event sources in the event loop. */
9394 infrun_async_inferior_event_token
9395 = create_async_event_handler (infrun_async_inferior_event_handler, NULL);
9396
9397 add_info ("signals", info_signals_command, _("\
9398 What debugger does when program gets various signals.\n\
9399 Specify a signal as argument to print info on that signal only."));
9400 add_info_alias ("handle", "signals", 0);
9401
9402 c = add_com ("handle", class_run, handle_command, _("\
9403 Specify how to handle signals.\n\
9404 Usage: handle SIGNAL [ACTIONS]\n\
9405 Args are signals and actions to apply to those signals.\n\
9406 If no actions are specified, the current settings for the specified signals\n\
9407 will be displayed instead.\n\
9408 \n\
9409 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9410 from 1-15 are allowed for compatibility with old versions of GDB.\n\
9411 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9412 The special arg \"all\" is recognized to mean all signals except those\n\
9413 used by the debugger, typically SIGTRAP and SIGINT.\n\
9414 \n\
9415 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
9416 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9417 Stop means reenter debugger if this signal happens (implies print).\n\
9418 Print means print a message if this signal happens.\n\
9419 Pass means let program see this signal; otherwise program doesn't know.\n\
9420 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
9421 Pass and Stop may be combined.\n\
9422 \n\
9423 Multiple signals may be specified. Signal numbers and signal names\n\
9424 may be interspersed with actions, with the actions being performed for\n\
9425 all signals cumulatively specified."));
9426 set_cmd_completer (c, handle_completer);
9427
9428 if (!dbx_commands)
9429 stop_command = add_cmd ("stop", class_obscure,
9430 not_just_help_class_command, _("\
9431 There is no `stop' command, but you can set a hook on `stop'.\n\
9432 This allows you to set a list of commands to be run each time execution\n\
9433 of the program stops."), &cmdlist);
9434
9435 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
9436 Set inferior debugging."), _("\
9437 Show inferior debugging."), _("\
9438 When non-zero, inferior specific debugging is enabled."),
9439 NULL,
9440 show_debug_infrun,
9441 &setdebuglist, &showdebuglist);
9442
9443 add_setshow_boolean_cmd ("displaced", class_maintenance,
9444 &debug_displaced, _("\
9445 Set displaced stepping debugging."), _("\
9446 Show displaced stepping debugging."), _("\
9447 When non-zero, displaced stepping specific debugging is enabled."),
9448 NULL,
9449 show_debug_displaced,
9450 &setdebuglist, &showdebuglist);
9451
9452 add_setshow_boolean_cmd ("non-stop", no_class,
9453 &non_stop_1, _("\
9454 Set whether gdb controls the inferior in non-stop mode."), _("\
9455 Show whether gdb controls the inferior in non-stop mode."), _("\
9456 When debugging a multi-threaded program and this setting is\n\
9457 off (the default, also called all-stop mode), when one thread stops\n\
9458 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9459 all other threads in the program while you interact with the thread of\n\
9460 interest. When you continue or step a thread, you can allow the other\n\
9461 threads to run, or have them remain stopped, but while you inspect any\n\
9462 thread's state, all threads stop.\n\
9463 \n\
9464 In non-stop mode, when one thread stops, other threads can continue\n\
9465 to run freely. You'll be able to step each thread independently,\n\
9466 leave it stopped or free to run as needed."),
9467 set_non_stop,
9468 show_non_stop,
9469 &setlist,
9470 &showlist);
9471
9472 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
9473 {
9474 signal_stop[i] = 1;
9475 signal_print[i] = 1;
9476 signal_program[i] = 1;
9477 signal_catch[i] = 0;
9478 }
9479
9480 /* Signals caused by debugger's own actions should not be given to
9481 the program afterwards.
9482
9483 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9484 explicitly specifies that it should be delivered to the target
9485 program. Typically, that would occur when a user is debugging a
9486 target monitor on a simulator: the target monitor sets a
9487 breakpoint; the simulator encounters this breakpoint and halts
9488 the simulation handing control to GDB; GDB, noting that the stop
9489 address doesn't map to any known breakpoint, returns control back
9490 to the simulator; the simulator then delivers the hardware
9491 equivalent of a GDB_SIGNAL_TRAP to the program being
9492 debugged. */
9493 signal_program[GDB_SIGNAL_TRAP] = 0;
9494 signal_program[GDB_SIGNAL_INT] = 0;
9495
9496 /* Signals that are not errors should not normally enter the debugger. */
9497 signal_stop[GDB_SIGNAL_ALRM] = 0;
9498 signal_print[GDB_SIGNAL_ALRM] = 0;
9499 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9500 signal_print[GDB_SIGNAL_VTALRM] = 0;
9501 signal_stop[GDB_SIGNAL_PROF] = 0;
9502 signal_print[GDB_SIGNAL_PROF] = 0;
9503 signal_stop[GDB_SIGNAL_CHLD] = 0;
9504 signal_print[GDB_SIGNAL_CHLD] = 0;
9505 signal_stop[GDB_SIGNAL_IO] = 0;
9506 signal_print[GDB_SIGNAL_IO] = 0;
9507 signal_stop[GDB_SIGNAL_POLL] = 0;
9508 signal_print[GDB_SIGNAL_POLL] = 0;
9509 signal_stop[GDB_SIGNAL_URG] = 0;
9510 signal_print[GDB_SIGNAL_URG] = 0;
9511 signal_stop[GDB_SIGNAL_WINCH] = 0;
9512 signal_print[GDB_SIGNAL_WINCH] = 0;
9513 signal_stop[GDB_SIGNAL_PRIO] = 0;
9514 signal_print[GDB_SIGNAL_PRIO] = 0;
9515
9516 /* These signals are used internally by user-level thread
9517 implementations. (See signal(5) on Solaris.) Like the above
9518 signals, a healthy program receives and handles them as part of
9519 its normal operation. */
9520 signal_stop[GDB_SIGNAL_LWP] = 0;
9521 signal_print[GDB_SIGNAL_LWP] = 0;
9522 signal_stop[GDB_SIGNAL_WAITING] = 0;
9523 signal_print[GDB_SIGNAL_WAITING] = 0;
9524 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9525 signal_print[GDB_SIGNAL_CANCEL] = 0;
9526 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9527 signal_print[GDB_SIGNAL_LIBRT] = 0;
9528
9529 /* Update cached state. */
9530 signal_cache_update (-1);
9531
9532 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9533 &stop_on_solib_events, _("\
9534 Set stopping for shared library events."), _("\
9535 Show stopping for shared library events."), _("\
9536 If nonzero, gdb will give control to the user when the dynamic linker\n\
9537 notifies gdb of shared library events. The most common event of interest\n\
9538 to the user would be loading/unloading of a new library."),
9539 set_stop_on_solib_events,
9540 show_stop_on_solib_events,
9541 &setlist, &showlist);
9542
9543 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9544 follow_fork_mode_kind_names,
9545 &follow_fork_mode_string, _("\
9546 Set debugger response to a program call of fork or vfork."), _("\
9547 Show debugger response to a program call of fork or vfork."), _("\
9548 A fork or vfork creates a new process. follow-fork-mode can be:\n\
9549 parent - the original process is debugged after a fork\n\
9550 child - the new process is debugged after a fork\n\
9551 The unfollowed process will continue to run.\n\
9552 By default, the debugger will follow the parent process."),
9553 NULL,
9554 show_follow_fork_mode_string,
9555 &setlist, &showlist);
9556
9557 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9558 follow_exec_mode_names,
9559 &follow_exec_mode_string, _("\
9560 Set debugger response to a program call of exec."), _("\
9561 Show debugger response to a program call of exec."), _("\
9562 An exec call replaces the program image of a process.\n\
9563 \n\
9564 follow-exec-mode can be:\n\
9565 \n\
9566 new - the debugger creates a new inferior and rebinds the process\n\
9567 to this new inferior. The program the process was running before\n\
9568 the exec call can be restarted afterwards by restarting the original\n\
9569 inferior.\n\
9570 \n\
9571 same - the debugger keeps the process bound to the same inferior.\n\
9572 The new executable image replaces the previous executable loaded in\n\
9573 the inferior. Restarting the inferior after the exec call restarts\n\
9574 the executable the process was running after the exec call.\n\
9575 \n\
9576 By default, the debugger will use the same inferior."),
9577 NULL,
9578 show_follow_exec_mode_string,
9579 &setlist, &showlist);
9580
9581 add_setshow_enum_cmd ("scheduler-locking", class_run,
9582 scheduler_enums, &scheduler_mode, _("\
9583 Set mode for locking scheduler during execution."), _("\
9584 Show mode for locking scheduler during execution."), _("\
9585 off == no locking (threads may preempt at any time)\n\
9586 on == full locking (no thread except the current thread may run)\n\
9587 This applies to both normal execution and replay mode.\n\
9588 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9589 In this mode, other threads may run during other commands.\n\
9590 This applies to both normal execution and replay mode.\n\
9591 replay == scheduler locked in replay mode and unlocked during normal execution."),
9592 set_schedlock_func, /* traps on target vector */
9593 show_scheduler_mode,
9594 &setlist, &showlist);
9595
9596 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9597 Set mode for resuming threads of all processes."), _("\
9598 Show mode for resuming threads of all processes."), _("\
9599 When on, execution commands (such as 'continue' or 'next') resume all\n\
9600 threads of all processes. When off (which is the default), execution\n\
9601 commands only resume the threads of the current process. The set of\n\
9602 threads that are resumed is further refined by the scheduler-locking\n\
9603 mode (see help set scheduler-locking)."),
9604 NULL,
9605 show_schedule_multiple,
9606 &setlist, &showlist);
9607
9608 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9609 Set mode of the step operation."), _("\
9610 Show mode of the step operation."), _("\
9611 When set, doing a step over a function without debug line information\n\
9612 will stop at the first instruction of that function. Otherwise, the\n\
9613 function is skipped and the step command stops at a different source line."),
9614 NULL,
9615 show_step_stop_if_no_debug,
9616 &setlist, &showlist);
9617
9618 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9619 &can_use_displaced_stepping, _("\
9620 Set debugger's willingness to use displaced stepping."), _("\
9621 Show debugger's willingness to use displaced stepping."), _("\
9622 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9623 supported by the target architecture. If off, gdb will not use displaced\n\
9624 stepping to step over breakpoints, even if such is supported by the target\n\
9625 architecture. If auto (which is the default), gdb will use displaced stepping\n\
9626 if the target architecture supports it and non-stop mode is active, but will not\n\
9627 use it in all-stop mode (see help set non-stop)."),
9628 NULL,
9629 show_can_use_displaced_stepping,
9630 &setlist, &showlist);
9631
9632 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9633 &exec_direction, _("Set direction of execution.\n\
9634 Options are 'forward' or 'reverse'."),
9635 _("Show direction of execution (forward/reverse)."),
9636 _("Tells gdb whether to execute forward or backward."),
9637 set_exec_direction_func, show_exec_direction_func,
9638 &setlist, &showlist);
9639
9640 /* Set/show detach-on-fork: user-settable mode. */
9641
9642 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9643 Set whether gdb will detach the child of a fork."), _("\
9644 Show whether gdb will detach the child of a fork."), _("\
9645 Tells gdb whether to detach the child of a fork."),
9646 NULL, NULL, &setlist, &showlist);
9647
9648 /* Set/show disable address space randomization mode. */
9649
9650 add_setshow_boolean_cmd ("disable-randomization", class_support,
9651 &disable_randomization, _("\
9652 Set disabling of debuggee's virtual address space randomization."), _("\
9653 Show disabling of debuggee's virtual address space randomization."), _("\
9654 When this mode is on (which is the default), randomization of the virtual\n\
9655 address space is disabled. Standalone programs run with the randomization\n\
9656 enabled by default on some platforms."),
9657 &set_disable_randomization,
9658 &show_disable_randomization,
9659 &setlist, &showlist);
9660
9661 /* ptid initializations */
9662 inferior_ptid = null_ptid;
9663 target_last_wait_ptid = minus_one_ptid;
9664
9665 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed);
9666 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested);
9667 gdb::observers::thread_exit.attach (infrun_thread_thread_exit);
9668 gdb::observers::inferior_exit.attach (infrun_inferior_exit);
9669
9670 /* Explicitly create without lookup, since that tries to create a
9671 value with a void typed value, and when we get here, gdbarch
9672 isn't initialized yet. At this point, we're quite sure there
9673 isn't another convenience variable of the same name. */
9674 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
9675
9676 add_setshow_boolean_cmd ("observer", no_class,
9677 &observer_mode_1, _("\
9678 Set whether gdb controls the inferior in observer mode."), _("\
9679 Show whether gdb controls the inferior in observer mode."), _("\
9680 In observer mode, GDB can get data from the inferior, but not\n\
9681 affect its execution. Registers and memory may not be changed,\n\
9682 breakpoints may not be set, and the program cannot be interrupted\n\
9683 or signalled."),
9684 set_observer_mode,
9685 show_observer_mode,
9686 &setlist,
9687 &showlist);
9688 }
This page took 0.232036 seconds and 4 git commands to generate.