Use all_non_exited_inferiors in infrun.c
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2020 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "breakpoint.h"
28 #include "gdbcore.h"
29 #include "gdbcmd.h"
30 #include "target.h"
31 #include "gdbthread.h"
32 #include "annotate.h"
33 #include "symfile.h"
34 #include "top.h"
35 #include "inf-loop.h"
36 #include "regcache.h"
37 #include "value.h"
38 #include "observable.h"
39 #include "language.h"
40 #include "solib.h"
41 #include "main.h"
42 #include "block.h"
43 #include "mi/mi-common.h"
44 #include "event-top.h"
45 #include "record.h"
46 #include "record-full.h"
47 #include "inline-frame.h"
48 #include "jit.h"
49 #include "tracepoint.h"
50 #include "skip.h"
51 #include "probe.h"
52 #include "objfiles.h"
53 #include "completer.h"
54 #include "target-descriptions.h"
55 #include "target-dcache.h"
56 #include "terminal.h"
57 #include "solist.h"
58 #include "event-loop.h"
59 #include "thread-fsm.h"
60 #include "gdbsupport/enum-flags.h"
61 #include "progspace-and-thread.h"
62 #include "gdbsupport/gdb_optional.h"
63 #include "arch-utils.h"
64 #include "gdbsupport/scope-exit.h"
65 #include "gdbsupport/forward-scope-exit.h"
66
67 /* Prototypes for local functions */
68
69 static void sig_print_info (enum gdb_signal);
70
71 static void sig_print_header (void);
72
73 static int follow_fork (void);
74
75 static int follow_fork_inferior (int follow_child, int detach_fork);
76
77 static void follow_inferior_reset_breakpoints (void);
78
79 static int currently_stepping (struct thread_info *tp);
80
81 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
82
83 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
84
85 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
86
87 static int maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
88
89 static void resume (gdb_signal sig);
90
91 /* Asynchronous signal handler registered as event loop source for
92 when we have pending events ready to be passed to the core. */
93 static struct async_event_handler *infrun_async_inferior_event_token;
94
95 /* Stores whether infrun_async was previously enabled or disabled.
96 Starts off as -1, indicating "never enabled/disabled". */
97 static int infrun_is_async = -1;
98
99 /* See infrun.h. */
100
101 void
102 infrun_async (int enable)
103 {
104 if (infrun_is_async != enable)
105 {
106 infrun_is_async = enable;
107
108 if (debug_infrun)
109 fprintf_unfiltered (gdb_stdlog,
110 "infrun: infrun_async(%d)\n",
111 enable);
112
113 if (enable)
114 mark_async_event_handler (infrun_async_inferior_event_token);
115 else
116 clear_async_event_handler (infrun_async_inferior_event_token);
117 }
118 }
119
120 /* See infrun.h. */
121
122 void
123 mark_infrun_async_event_handler (void)
124 {
125 mark_async_event_handler (infrun_async_inferior_event_token);
126 }
127
128 /* When set, stop the 'step' command if we enter a function which has
129 no line number information. The normal behavior is that we step
130 over such function. */
131 bool step_stop_if_no_debug = false;
132 static void
133 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
134 struct cmd_list_element *c, const char *value)
135 {
136 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
137 }
138
139 /* proceed and normal_stop use this to notify the user when the
140 inferior stopped in a different thread than it had been running
141 in. */
142
143 static ptid_t previous_inferior_ptid;
144
145 /* If set (default for legacy reasons), when following a fork, GDB
146 will detach from one of the fork branches, child or parent.
147 Exactly which branch is detached depends on 'set follow-fork-mode'
148 setting. */
149
150 static bool detach_fork = true;
151
152 bool debug_displaced = false;
153 static void
154 show_debug_displaced (struct ui_file *file, int from_tty,
155 struct cmd_list_element *c, const char *value)
156 {
157 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
158 }
159
160 unsigned int debug_infrun = 0;
161 static void
162 show_debug_infrun (struct ui_file *file, int from_tty,
163 struct cmd_list_element *c, const char *value)
164 {
165 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
166 }
167
168
169 /* Support for disabling address space randomization. */
170
171 bool disable_randomization = true;
172
173 static void
174 show_disable_randomization (struct ui_file *file, int from_tty,
175 struct cmd_list_element *c, const char *value)
176 {
177 if (target_supports_disable_randomization ())
178 fprintf_filtered (file,
179 _("Disabling randomization of debuggee's "
180 "virtual address space is %s.\n"),
181 value);
182 else
183 fputs_filtered (_("Disabling randomization of debuggee's "
184 "virtual address space is unsupported on\n"
185 "this platform.\n"), file);
186 }
187
188 static void
189 set_disable_randomization (const char *args, int from_tty,
190 struct cmd_list_element *c)
191 {
192 if (!target_supports_disable_randomization ())
193 error (_("Disabling randomization of debuggee's "
194 "virtual address space is unsupported on\n"
195 "this platform."));
196 }
197
198 /* User interface for non-stop mode. */
199
200 bool non_stop = false;
201 static bool non_stop_1 = false;
202
203 static void
204 set_non_stop (const char *args, int from_tty,
205 struct cmd_list_element *c)
206 {
207 if (target_has_execution)
208 {
209 non_stop_1 = non_stop;
210 error (_("Cannot change this setting while the inferior is running."));
211 }
212
213 non_stop = non_stop_1;
214 }
215
216 static void
217 show_non_stop (struct ui_file *file, int from_tty,
218 struct cmd_list_element *c, const char *value)
219 {
220 fprintf_filtered (file,
221 _("Controlling the inferior in non-stop mode is %s.\n"),
222 value);
223 }
224
225 /* "Observer mode" is somewhat like a more extreme version of
226 non-stop, in which all GDB operations that might affect the
227 target's execution have been disabled. */
228
229 bool observer_mode = false;
230 static bool observer_mode_1 = false;
231
232 static void
233 set_observer_mode (const char *args, int from_tty,
234 struct cmd_list_element *c)
235 {
236 if (target_has_execution)
237 {
238 observer_mode_1 = observer_mode;
239 error (_("Cannot change this setting while the inferior is running."));
240 }
241
242 observer_mode = observer_mode_1;
243
244 may_write_registers = !observer_mode;
245 may_write_memory = !observer_mode;
246 may_insert_breakpoints = !observer_mode;
247 may_insert_tracepoints = !observer_mode;
248 /* We can insert fast tracepoints in or out of observer mode,
249 but enable them if we're going into this mode. */
250 if (observer_mode)
251 may_insert_fast_tracepoints = true;
252 may_stop = !observer_mode;
253 update_target_permissions ();
254
255 /* Going *into* observer mode we must force non-stop, then
256 going out we leave it that way. */
257 if (observer_mode)
258 {
259 pagination_enabled = 0;
260 non_stop = non_stop_1 = true;
261 }
262
263 if (from_tty)
264 printf_filtered (_("Observer mode is now %s.\n"),
265 (observer_mode ? "on" : "off"));
266 }
267
268 static void
269 show_observer_mode (struct ui_file *file, int from_tty,
270 struct cmd_list_element *c, const char *value)
271 {
272 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
273 }
274
275 /* This updates the value of observer mode based on changes in
276 permissions. Note that we are deliberately ignoring the values of
277 may-write-registers and may-write-memory, since the user may have
278 reason to enable these during a session, for instance to turn on a
279 debugging-related global. */
280
281 void
282 update_observer_mode (void)
283 {
284 bool newval = (!may_insert_breakpoints
285 && !may_insert_tracepoints
286 && may_insert_fast_tracepoints
287 && !may_stop
288 && non_stop);
289
290 /* Let the user know if things change. */
291 if (newval != observer_mode)
292 printf_filtered (_("Observer mode is now %s.\n"),
293 (newval ? "on" : "off"));
294
295 observer_mode = observer_mode_1 = newval;
296 }
297
298 /* Tables of how to react to signals; the user sets them. */
299
300 static unsigned char signal_stop[GDB_SIGNAL_LAST];
301 static unsigned char signal_print[GDB_SIGNAL_LAST];
302 static unsigned char signal_program[GDB_SIGNAL_LAST];
303
304 /* Table of signals that are registered with "catch signal". A
305 non-zero entry indicates that the signal is caught by some "catch
306 signal" command. */
307 static unsigned char signal_catch[GDB_SIGNAL_LAST];
308
309 /* Table of signals that the target may silently handle.
310 This is automatically determined from the flags above,
311 and simply cached here. */
312 static unsigned char signal_pass[GDB_SIGNAL_LAST];
313
314 #define SET_SIGS(nsigs,sigs,flags) \
315 do { \
316 int signum = (nsigs); \
317 while (signum-- > 0) \
318 if ((sigs)[signum]) \
319 (flags)[signum] = 1; \
320 } while (0)
321
322 #define UNSET_SIGS(nsigs,sigs,flags) \
323 do { \
324 int signum = (nsigs); \
325 while (signum-- > 0) \
326 if ((sigs)[signum]) \
327 (flags)[signum] = 0; \
328 } while (0)
329
330 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
331 this function is to avoid exporting `signal_program'. */
332
333 void
334 update_signals_program_target (void)
335 {
336 target_program_signals (signal_program);
337 }
338
339 /* Value to pass to target_resume() to cause all threads to resume. */
340
341 #define RESUME_ALL minus_one_ptid
342
343 /* Command list pointer for the "stop" placeholder. */
344
345 static struct cmd_list_element *stop_command;
346
347 /* Nonzero if we want to give control to the user when we're notified
348 of shared library events by the dynamic linker. */
349 int stop_on_solib_events;
350
351 /* Enable or disable optional shared library event breakpoints
352 as appropriate when the above flag is changed. */
353
354 static void
355 set_stop_on_solib_events (const char *args,
356 int from_tty, struct cmd_list_element *c)
357 {
358 update_solib_breakpoints ();
359 }
360
361 static void
362 show_stop_on_solib_events (struct ui_file *file, int from_tty,
363 struct cmd_list_element *c, const char *value)
364 {
365 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
366 value);
367 }
368
369 /* Nonzero after stop if current stack frame should be printed. */
370
371 static int stop_print_frame;
372
373 /* This is a cached copy of the pid/waitstatus of the last event
374 returned by target_wait()/deprecated_target_wait_hook(). This
375 information is returned by get_last_target_status(). */
376 static ptid_t target_last_wait_ptid;
377 static struct target_waitstatus target_last_waitstatus;
378
379 void init_thread_stepping_state (struct thread_info *tss);
380
381 static const char follow_fork_mode_child[] = "child";
382 static const char follow_fork_mode_parent[] = "parent";
383
384 static const char *const follow_fork_mode_kind_names[] = {
385 follow_fork_mode_child,
386 follow_fork_mode_parent,
387 NULL
388 };
389
390 static const char *follow_fork_mode_string = follow_fork_mode_parent;
391 static void
392 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
393 struct cmd_list_element *c, const char *value)
394 {
395 fprintf_filtered (file,
396 _("Debugger response to a program "
397 "call of fork or vfork is \"%s\".\n"),
398 value);
399 }
400 \f
401
402 /* Handle changes to the inferior list based on the type of fork,
403 which process is being followed, and whether the other process
404 should be detached. On entry inferior_ptid must be the ptid of
405 the fork parent. At return inferior_ptid is the ptid of the
406 followed inferior. */
407
408 static int
409 follow_fork_inferior (int follow_child, int detach_fork)
410 {
411 int has_vforked;
412 ptid_t parent_ptid, child_ptid;
413
414 has_vforked = (inferior_thread ()->pending_follow.kind
415 == TARGET_WAITKIND_VFORKED);
416 parent_ptid = inferior_ptid;
417 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
418
419 if (has_vforked
420 && !non_stop /* Non-stop always resumes both branches. */
421 && current_ui->prompt_state == PROMPT_BLOCKED
422 && !(follow_child || detach_fork || sched_multi))
423 {
424 /* The parent stays blocked inside the vfork syscall until the
425 child execs or exits. If we don't let the child run, then
426 the parent stays blocked. If we're telling the parent to run
427 in the foreground, the user will not be able to ctrl-c to get
428 back the terminal, effectively hanging the debug session. */
429 fprintf_filtered (gdb_stderr, _("\
430 Can not resume the parent process over vfork in the foreground while\n\
431 holding the child stopped. Try \"set detach-on-fork\" or \
432 \"set schedule-multiple\".\n"));
433 return 1;
434 }
435
436 if (!follow_child)
437 {
438 /* Detach new forked process? */
439 if (detach_fork)
440 {
441 /* Before detaching from the child, remove all breakpoints
442 from it. If we forked, then this has already been taken
443 care of by infrun.c. If we vforked however, any
444 breakpoint inserted in the parent is visible in the
445 child, even those added while stopped in a vfork
446 catchpoint. This will remove the breakpoints from the
447 parent also, but they'll be reinserted below. */
448 if (has_vforked)
449 {
450 /* Keep breakpoints list in sync. */
451 remove_breakpoints_inf (current_inferior ());
452 }
453
454 if (print_inferior_events)
455 {
456 /* Ensure that we have a process ptid. */
457 ptid_t process_ptid = ptid_t (child_ptid.pid ());
458
459 target_terminal::ours_for_output ();
460 fprintf_filtered (gdb_stdlog,
461 _("[Detaching after %s from child %s]\n"),
462 has_vforked ? "vfork" : "fork",
463 target_pid_to_str (process_ptid).c_str ());
464 }
465 }
466 else
467 {
468 struct inferior *parent_inf, *child_inf;
469
470 /* Add process to GDB's tables. */
471 child_inf = add_inferior (child_ptid.pid ());
472
473 parent_inf = current_inferior ();
474 child_inf->attach_flag = parent_inf->attach_flag;
475 copy_terminal_info (child_inf, parent_inf);
476 child_inf->gdbarch = parent_inf->gdbarch;
477 copy_inferior_target_desc_info (child_inf, parent_inf);
478
479 scoped_restore_current_pspace_and_thread restore_pspace_thread;
480
481 inferior_ptid = child_ptid;
482 add_thread_silent (inferior_ptid);
483 set_current_inferior (child_inf);
484 child_inf->symfile_flags = SYMFILE_NO_READ;
485
486 /* If this is a vfork child, then the address-space is
487 shared with the parent. */
488 if (has_vforked)
489 {
490 child_inf->pspace = parent_inf->pspace;
491 child_inf->aspace = parent_inf->aspace;
492
493 /* The parent will be frozen until the child is done
494 with the shared region. Keep track of the
495 parent. */
496 child_inf->vfork_parent = parent_inf;
497 child_inf->pending_detach = 0;
498 parent_inf->vfork_child = child_inf;
499 parent_inf->pending_detach = 0;
500 }
501 else
502 {
503 child_inf->aspace = new_address_space ();
504 child_inf->pspace = new program_space (child_inf->aspace);
505 child_inf->removable = 1;
506 set_current_program_space (child_inf->pspace);
507 clone_program_space (child_inf->pspace, parent_inf->pspace);
508
509 /* Let the shared library layer (e.g., solib-svr4) learn
510 about this new process, relocate the cloned exec, pull
511 in shared libraries, and install the solib event
512 breakpoint. If a "cloned-VM" event was propagated
513 better throughout the core, this wouldn't be
514 required. */
515 solib_create_inferior_hook (0);
516 }
517 }
518
519 if (has_vforked)
520 {
521 struct inferior *parent_inf;
522
523 parent_inf = current_inferior ();
524
525 /* If we detached from the child, then we have to be careful
526 to not insert breakpoints in the parent until the child
527 is done with the shared memory region. However, if we're
528 staying attached to the child, then we can and should
529 insert breakpoints, so that we can debug it. A
530 subsequent child exec or exit is enough to know when does
531 the child stops using the parent's address space. */
532 parent_inf->waiting_for_vfork_done = detach_fork;
533 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
534 }
535 }
536 else
537 {
538 /* Follow the child. */
539 struct inferior *parent_inf, *child_inf;
540 struct program_space *parent_pspace;
541
542 if (print_inferior_events)
543 {
544 std::string parent_pid = target_pid_to_str (parent_ptid);
545 std::string child_pid = target_pid_to_str (child_ptid);
546
547 target_terminal::ours_for_output ();
548 fprintf_filtered (gdb_stdlog,
549 _("[Attaching after %s %s to child %s]\n"),
550 parent_pid.c_str (),
551 has_vforked ? "vfork" : "fork",
552 child_pid.c_str ());
553 }
554
555 /* Add the new inferior first, so that the target_detach below
556 doesn't unpush the target. */
557
558 child_inf = add_inferior (child_ptid.pid ());
559
560 parent_inf = current_inferior ();
561 child_inf->attach_flag = parent_inf->attach_flag;
562 copy_terminal_info (child_inf, parent_inf);
563 child_inf->gdbarch = parent_inf->gdbarch;
564 copy_inferior_target_desc_info (child_inf, parent_inf);
565
566 parent_pspace = parent_inf->pspace;
567
568 /* If we're vforking, we want to hold on to the parent until the
569 child exits or execs. At child exec or exit time we can
570 remove the old breakpoints from the parent and detach or
571 resume debugging it. Otherwise, detach the parent now; we'll
572 want to reuse it's program/address spaces, but we can't set
573 them to the child before removing breakpoints from the
574 parent, otherwise, the breakpoints module could decide to
575 remove breakpoints from the wrong process (since they'd be
576 assigned to the same address space). */
577
578 if (has_vforked)
579 {
580 gdb_assert (child_inf->vfork_parent == NULL);
581 gdb_assert (parent_inf->vfork_child == NULL);
582 child_inf->vfork_parent = parent_inf;
583 child_inf->pending_detach = 0;
584 parent_inf->vfork_child = child_inf;
585 parent_inf->pending_detach = detach_fork;
586 parent_inf->waiting_for_vfork_done = 0;
587 }
588 else if (detach_fork)
589 {
590 if (print_inferior_events)
591 {
592 /* Ensure that we have a process ptid. */
593 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
594
595 target_terminal::ours_for_output ();
596 fprintf_filtered (gdb_stdlog,
597 _("[Detaching after fork from "
598 "parent %s]\n"),
599 target_pid_to_str (process_ptid).c_str ());
600 }
601
602 target_detach (parent_inf, 0);
603 }
604
605 /* Note that the detach above makes PARENT_INF dangling. */
606
607 /* Add the child thread to the appropriate lists, and switch to
608 this new thread, before cloning the program space, and
609 informing the solib layer about this new process. */
610
611 inferior_ptid = child_ptid;
612 add_thread_silent (inferior_ptid);
613 set_current_inferior (child_inf);
614
615 /* If this is a vfork child, then the address-space is shared
616 with the parent. If we detached from the parent, then we can
617 reuse the parent's program/address spaces. */
618 if (has_vforked || detach_fork)
619 {
620 child_inf->pspace = parent_pspace;
621 child_inf->aspace = child_inf->pspace->aspace;
622 }
623 else
624 {
625 child_inf->aspace = new_address_space ();
626 child_inf->pspace = new program_space (child_inf->aspace);
627 child_inf->removable = 1;
628 child_inf->symfile_flags = SYMFILE_NO_READ;
629 set_current_program_space (child_inf->pspace);
630 clone_program_space (child_inf->pspace, parent_pspace);
631
632 /* Let the shared library layer (e.g., solib-svr4) learn
633 about this new process, relocate the cloned exec, pull in
634 shared libraries, and install the solib event breakpoint.
635 If a "cloned-VM" event was propagated better throughout
636 the core, this wouldn't be required. */
637 solib_create_inferior_hook (0);
638 }
639 }
640
641 return target_follow_fork (follow_child, detach_fork);
642 }
643
644 /* Tell the target to follow the fork we're stopped at. Returns true
645 if the inferior should be resumed; false, if the target for some
646 reason decided it's best not to resume. */
647
648 static int
649 follow_fork (void)
650 {
651 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
652 int should_resume = 1;
653 struct thread_info *tp;
654
655 /* Copy user stepping state to the new inferior thread. FIXME: the
656 followed fork child thread should have a copy of most of the
657 parent thread structure's run control related fields, not just these.
658 Initialized to avoid "may be used uninitialized" warnings from gcc. */
659 struct breakpoint *step_resume_breakpoint = NULL;
660 struct breakpoint *exception_resume_breakpoint = NULL;
661 CORE_ADDR step_range_start = 0;
662 CORE_ADDR step_range_end = 0;
663 struct frame_id step_frame_id = { 0 };
664 struct thread_fsm *thread_fsm = NULL;
665
666 if (!non_stop)
667 {
668 ptid_t wait_ptid;
669 struct target_waitstatus wait_status;
670
671 /* Get the last target status returned by target_wait(). */
672 get_last_target_status (&wait_ptid, &wait_status);
673
674 /* If not stopped at a fork event, then there's nothing else to
675 do. */
676 if (wait_status.kind != TARGET_WAITKIND_FORKED
677 && wait_status.kind != TARGET_WAITKIND_VFORKED)
678 return 1;
679
680 /* Check if we switched over from WAIT_PTID, since the event was
681 reported. */
682 if (wait_ptid != minus_one_ptid
683 && inferior_ptid != wait_ptid)
684 {
685 /* We did. Switch back to WAIT_PTID thread, to tell the
686 target to follow it (in either direction). We'll
687 afterwards refuse to resume, and inform the user what
688 happened. */
689 thread_info *wait_thread
690 = find_thread_ptid (wait_ptid);
691 switch_to_thread (wait_thread);
692 should_resume = 0;
693 }
694 }
695
696 tp = inferior_thread ();
697
698 /* If there were any forks/vforks that were caught and are now to be
699 followed, then do so now. */
700 switch (tp->pending_follow.kind)
701 {
702 case TARGET_WAITKIND_FORKED:
703 case TARGET_WAITKIND_VFORKED:
704 {
705 ptid_t parent, child;
706
707 /* If the user did a next/step, etc, over a fork call,
708 preserve the stepping state in the fork child. */
709 if (follow_child && should_resume)
710 {
711 step_resume_breakpoint = clone_momentary_breakpoint
712 (tp->control.step_resume_breakpoint);
713 step_range_start = tp->control.step_range_start;
714 step_range_end = tp->control.step_range_end;
715 step_frame_id = tp->control.step_frame_id;
716 exception_resume_breakpoint
717 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
718 thread_fsm = tp->thread_fsm;
719
720 /* For now, delete the parent's sr breakpoint, otherwise,
721 parent/child sr breakpoints are considered duplicates,
722 and the child version will not be installed. Remove
723 this when the breakpoints module becomes aware of
724 inferiors and address spaces. */
725 delete_step_resume_breakpoint (tp);
726 tp->control.step_range_start = 0;
727 tp->control.step_range_end = 0;
728 tp->control.step_frame_id = null_frame_id;
729 delete_exception_resume_breakpoint (tp);
730 tp->thread_fsm = NULL;
731 }
732
733 parent = inferior_ptid;
734 child = tp->pending_follow.value.related_pid;
735
736 /* Set up inferior(s) as specified by the caller, and tell the
737 target to do whatever is necessary to follow either parent
738 or child. */
739 if (follow_fork_inferior (follow_child, detach_fork))
740 {
741 /* Target refused to follow, or there's some other reason
742 we shouldn't resume. */
743 should_resume = 0;
744 }
745 else
746 {
747 /* This pending follow fork event is now handled, one way
748 or another. The previous selected thread may be gone
749 from the lists by now, but if it is still around, need
750 to clear the pending follow request. */
751 tp = find_thread_ptid (parent);
752 if (tp)
753 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
754
755 /* This makes sure we don't try to apply the "Switched
756 over from WAIT_PID" logic above. */
757 nullify_last_target_wait_ptid ();
758
759 /* If we followed the child, switch to it... */
760 if (follow_child)
761 {
762 thread_info *child_thr = find_thread_ptid (child);
763 switch_to_thread (child_thr);
764
765 /* ... and preserve the stepping state, in case the
766 user was stepping over the fork call. */
767 if (should_resume)
768 {
769 tp = inferior_thread ();
770 tp->control.step_resume_breakpoint
771 = step_resume_breakpoint;
772 tp->control.step_range_start = step_range_start;
773 tp->control.step_range_end = step_range_end;
774 tp->control.step_frame_id = step_frame_id;
775 tp->control.exception_resume_breakpoint
776 = exception_resume_breakpoint;
777 tp->thread_fsm = thread_fsm;
778 }
779 else
780 {
781 /* If we get here, it was because we're trying to
782 resume from a fork catchpoint, but, the user
783 has switched threads away from the thread that
784 forked. In that case, the resume command
785 issued is most likely not applicable to the
786 child, so just warn, and refuse to resume. */
787 warning (_("Not resuming: switched threads "
788 "before following fork child."));
789 }
790
791 /* Reset breakpoints in the child as appropriate. */
792 follow_inferior_reset_breakpoints ();
793 }
794 }
795 }
796 break;
797 case TARGET_WAITKIND_SPURIOUS:
798 /* Nothing to follow. */
799 break;
800 default:
801 internal_error (__FILE__, __LINE__,
802 "Unexpected pending_follow.kind %d\n",
803 tp->pending_follow.kind);
804 break;
805 }
806
807 return should_resume;
808 }
809
810 static void
811 follow_inferior_reset_breakpoints (void)
812 {
813 struct thread_info *tp = inferior_thread ();
814
815 /* Was there a step_resume breakpoint? (There was if the user
816 did a "next" at the fork() call.) If so, explicitly reset its
817 thread number. Cloned step_resume breakpoints are disabled on
818 creation, so enable it here now that it is associated with the
819 correct thread.
820
821 step_resumes are a form of bp that are made to be per-thread.
822 Since we created the step_resume bp when the parent process
823 was being debugged, and now are switching to the child process,
824 from the breakpoint package's viewpoint, that's a switch of
825 "threads". We must update the bp's notion of which thread
826 it is for, or it'll be ignored when it triggers. */
827
828 if (tp->control.step_resume_breakpoint)
829 {
830 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
831 tp->control.step_resume_breakpoint->loc->enabled = 1;
832 }
833
834 /* Treat exception_resume breakpoints like step_resume breakpoints. */
835 if (tp->control.exception_resume_breakpoint)
836 {
837 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
838 tp->control.exception_resume_breakpoint->loc->enabled = 1;
839 }
840
841 /* Reinsert all breakpoints in the child. The user may have set
842 breakpoints after catching the fork, in which case those
843 were never set in the child, but only in the parent. This makes
844 sure the inserted breakpoints match the breakpoint list. */
845
846 breakpoint_re_set ();
847 insert_breakpoints ();
848 }
849
850 /* The child has exited or execed: resume threads of the parent the
851 user wanted to be executing. */
852
853 static int
854 proceed_after_vfork_done (struct thread_info *thread,
855 void *arg)
856 {
857 int pid = * (int *) arg;
858
859 if (thread->ptid.pid () == pid
860 && thread->state == THREAD_RUNNING
861 && !thread->executing
862 && !thread->stop_requested
863 && thread->suspend.stop_signal == GDB_SIGNAL_0)
864 {
865 if (debug_infrun)
866 fprintf_unfiltered (gdb_stdlog,
867 "infrun: resuming vfork parent thread %s\n",
868 target_pid_to_str (thread->ptid).c_str ());
869
870 switch_to_thread (thread);
871 clear_proceed_status (0);
872 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
873 }
874
875 return 0;
876 }
877
878 /* Save/restore inferior_ptid, current program space and current
879 inferior. Only use this if the current context points at an exited
880 inferior (and therefore there's no current thread to save). */
881 class scoped_restore_exited_inferior
882 {
883 public:
884 scoped_restore_exited_inferior ()
885 : m_saved_ptid (&inferior_ptid)
886 {}
887
888 private:
889 scoped_restore_tmpl<ptid_t> m_saved_ptid;
890 scoped_restore_current_program_space m_pspace;
891 scoped_restore_current_inferior m_inferior;
892 };
893
894 /* Called whenever we notice an exec or exit event, to handle
895 detaching or resuming a vfork parent. */
896
897 static void
898 handle_vfork_child_exec_or_exit (int exec)
899 {
900 struct inferior *inf = current_inferior ();
901
902 if (inf->vfork_parent)
903 {
904 int resume_parent = -1;
905
906 /* This exec or exit marks the end of the shared memory region
907 between the parent and the child. Break the bonds. */
908 inferior *vfork_parent = inf->vfork_parent;
909 inf->vfork_parent->vfork_child = NULL;
910 inf->vfork_parent = NULL;
911
912 /* If the user wanted to detach from the parent, now is the
913 time. */
914 if (vfork_parent->pending_detach)
915 {
916 struct thread_info *tp;
917 struct program_space *pspace;
918 struct address_space *aspace;
919
920 /* follow-fork child, detach-on-fork on. */
921
922 vfork_parent->pending_detach = 0;
923
924 gdb::optional<scoped_restore_exited_inferior>
925 maybe_restore_inferior;
926 gdb::optional<scoped_restore_current_pspace_and_thread>
927 maybe_restore_thread;
928
929 /* If we're handling a child exit, then inferior_ptid points
930 at the inferior's pid, not to a thread. */
931 if (!exec)
932 maybe_restore_inferior.emplace ();
933 else
934 maybe_restore_thread.emplace ();
935
936 /* We're letting loose of the parent. */
937 tp = any_live_thread_of_inferior (vfork_parent);
938 switch_to_thread (tp);
939
940 /* We're about to detach from the parent, which implicitly
941 removes breakpoints from its address space. There's a
942 catch here: we want to reuse the spaces for the child,
943 but, parent/child are still sharing the pspace at this
944 point, although the exec in reality makes the kernel give
945 the child a fresh set of new pages. The problem here is
946 that the breakpoints module being unaware of this, would
947 likely chose the child process to write to the parent
948 address space. Swapping the child temporarily away from
949 the spaces has the desired effect. Yes, this is "sort
950 of" a hack. */
951
952 pspace = inf->pspace;
953 aspace = inf->aspace;
954 inf->aspace = NULL;
955 inf->pspace = NULL;
956
957 if (print_inferior_events)
958 {
959 std::string pidstr
960 = target_pid_to_str (ptid_t (vfork_parent->pid));
961
962 target_terminal::ours_for_output ();
963
964 if (exec)
965 {
966 fprintf_filtered (gdb_stdlog,
967 _("[Detaching vfork parent %s "
968 "after child exec]\n"), pidstr.c_str ());
969 }
970 else
971 {
972 fprintf_filtered (gdb_stdlog,
973 _("[Detaching vfork parent %s "
974 "after child exit]\n"), pidstr.c_str ());
975 }
976 }
977
978 target_detach (vfork_parent, 0);
979
980 /* Put it back. */
981 inf->pspace = pspace;
982 inf->aspace = aspace;
983 }
984 else if (exec)
985 {
986 /* We're staying attached to the parent, so, really give the
987 child a new address space. */
988 inf->pspace = new program_space (maybe_new_address_space ());
989 inf->aspace = inf->pspace->aspace;
990 inf->removable = 1;
991 set_current_program_space (inf->pspace);
992
993 resume_parent = vfork_parent->pid;
994 }
995 else
996 {
997 struct program_space *pspace;
998
999 /* If this is a vfork child exiting, then the pspace and
1000 aspaces were shared with the parent. Since we're
1001 reporting the process exit, we'll be mourning all that is
1002 found in the address space, and switching to null_ptid,
1003 preparing to start a new inferior. But, since we don't
1004 want to clobber the parent's address/program spaces, we
1005 go ahead and create a new one for this exiting
1006 inferior. */
1007
1008 /* Switch to null_ptid while running clone_program_space, so
1009 that clone_program_space doesn't want to read the
1010 selected frame of a dead process. */
1011 scoped_restore restore_ptid
1012 = make_scoped_restore (&inferior_ptid, null_ptid);
1013
1014 /* This inferior is dead, so avoid giving the breakpoints
1015 module the option to write through to it (cloning a
1016 program space resets breakpoints). */
1017 inf->aspace = NULL;
1018 inf->pspace = NULL;
1019 pspace = new program_space (maybe_new_address_space ());
1020 set_current_program_space (pspace);
1021 inf->removable = 1;
1022 inf->symfile_flags = SYMFILE_NO_READ;
1023 clone_program_space (pspace, vfork_parent->pspace);
1024 inf->pspace = pspace;
1025 inf->aspace = pspace->aspace;
1026
1027 resume_parent = vfork_parent->pid;
1028 }
1029
1030 gdb_assert (current_program_space == inf->pspace);
1031
1032 if (non_stop && resume_parent != -1)
1033 {
1034 /* If the user wanted the parent to be running, let it go
1035 free now. */
1036 scoped_restore_current_thread restore_thread;
1037
1038 if (debug_infrun)
1039 fprintf_unfiltered (gdb_stdlog,
1040 "infrun: resuming vfork parent process %d\n",
1041 resume_parent);
1042
1043 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
1044 }
1045 }
1046 }
1047
1048 /* Enum strings for "set|show follow-exec-mode". */
1049
1050 static const char follow_exec_mode_new[] = "new";
1051 static const char follow_exec_mode_same[] = "same";
1052 static const char *const follow_exec_mode_names[] =
1053 {
1054 follow_exec_mode_new,
1055 follow_exec_mode_same,
1056 NULL,
1057 };
1058
1059 static const char *follow_exec_mode_string = follow_exec_mode_same;
1060 static void
1061 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1062 struct cmd_list_element *c, const char *value)
1063 {
1064 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1065 }
1066
1067 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1068
1069 static void
1070 follow_exec (ptid_t ptid, const char *exec_file_target)
1071 {
1072 struct inferior *inf = current_inferior ();
1073 int pid = ptid.pid ();
1074 ptid_t process_ptid;
1075
1076 /* Switch terminal for any messages produced e.g. by
1077 breakpoint_re_set. */
1078 target_terminal::ours_for_output ();
1079
1080 /* This is an exec event that we actually wish to pay attention to.
1081 Refresh our symbol table to the newly exec'd program, remove any
1082 momentary bp's, etc.
1083
1084 If there are breakpoints, they aren't really inserted now,
1085 since the exec() transformed our inferior into a fresh set
1086 of instructions.
1087
1088 We want to preserve symbolic breakpoints on the list, since
1089 we have hopes that they can be reset after the new a.out's
1090 symbol table is read.
1091
1092 However, any "raw" breakpoints must be removed from the list
1093 (e.g., the solib bp's), since their address is probably invalid
1094 now.
1095
1096 And, we DON'T want to call delete_breakpoints() here, since
1097 that may write the bp's "shadow contents" (the instruction
1098 value that was overwritten with a TRAP instruction). Since
1099 we now have a new a.out, those shadow contents aren't valid. */
1100
1101 mark_breakpoints_out ();
1102
1103 /* The target reports the exec event to the main thread, even if
1104 some other thread does the exec, and even if the main thread was
1105 stopped or already gone. We may still have non-leader threads of
1106 the process on our list. E.g., on targets that don't have thread
1107 exit events (like remote); or on native Linux in non-stop mode if
1108 there were only two threads in the inferior and the non-leader
1109 one is the one that execs (and nothing forces an update of the
1110 thread list up to here). When debugging remotely, it's best to
1111 avoid extra traffic, when possible, so avoid syncing the thread
1112 list with the target, and instead go ahead and delete all threads
1113 of the process but one that reported the event. Note this must
1114 be done before calling update_breakpoints_after_exec, as
1115 otherwise clearing the threads' resources would reference stale
1116 thread breakpoints -- it may have been one of these threads that
1117 stepped across the exec. We could just clear their stepping
1118 states, but as long as we're iterating, might as well delete
1119 them. Deleting them now rather than at the next user-visible
1120 stop provides a nicer sequence of events for user and MI
1121 notifications. */
1122 for (thread_info *th : all_threads_safe ())
1123 if (th->ptid.pid () == pid && th->ptid != ptid)
1124 delete_thread (th);
1125
1126 /* We also need to clear any left over stale state for the
1127 leader/event thread. E.g., if there was any step-resume
1128 breakpoint or similar, it's gone now. We cannot truly
1129 step-to-next statement through an exec(). */
1130 thread_info *th = inferior_thread ();
1131 th->control.step_resume_breakpoint = NULL;
1132 th->control.exception_resume_breakpoint = NULL;
1133 th->control.single_step_breakpoints = NULL;
1134 th->control.step_range_start = 0;
1135 th->control.step_range_end = 0;
1136
1137 /* The user may have had the main thread held stopped in the
1138 previous image (e.g., schedlock on, or non-stop). Release
1139 it now. */
1140 th->stop_requested = 0;
1141
1142 update_breakpoints_after_exec ();
1143
1144 /* What is this a.out's name? */
1145 process_ptid = ptid_t (pid);
1146 printf_unfiltered (_("%s is executing new program: %s\n"),
1147 target_pid_to_str (process_ptid).c_str (),
1148 exec_file_target);
1149
1150 /* We've followed the inferior through an exec. Therefore, the
1151 inferior has essentially been killed & reborn. */
1152
1153 breakpoint_init_inferior (inf_execd);
1154
1155 gdb::unique_xmalloc_ptr<char> exec_file_host
1156 = exec_file_find (exec_file_target, NULL);
1157
1158 /* If we were unable to map the executable target pathname onto a host
1159 pathname, tell the user that. Otherwise GDB's subsequent behavior
1160 is confusing. Maybe it would even be better to stop at this point
1161 so that the user can specify a file manually before continuing. */
1162 if (exec_file_host == NULL)
1163 warning (_("Could not load symbols for executable %s.\n"
1164 "Do you need \"set sysroot\"?"),
1165 exec_file_target);
1166
1167 /* Reset the shared library package. This ensures that we get a
1168 shlib event when the child reaches "_start", at which point the
1169 dld will have had a chance to initialize the child. */
1170 /* Also, loading a symbol file below may trigger symbol lookups, and
1171 we don't want those to be satisfied by the libraries of the
1172 previous incarnation of this process. */
1173 no_shared_libraries (NULL, 0);
1174
1175 if (follow_exec_mode_string == follow_exec_mode_new)
1176 {
1177 /* The user wants to keep the old inferior and program spaces
1178 around. Create a new fresh one, and switch to it. */
1179
1180 /* Do exit processing for the original inferior before setting the new
1181 inferior's pid. Having two inferiors with the same pid would confuse
1182 find_inferior_p(t)id. Transfer the terminal state and info from the
1183 old to the new inferior. */
1184 inf = add_inferior_with_spaces ();
1185 swap_terminal_info (inf, current_inferior ());
1186 exit_inferior_silent (current_inferior ());
1187
1188 inf->pid = pid;
1189 target_follow_exec (inf, exec_file_target);
1190
1191 set_current_inferior (inf);
1192 set_current_program_space (inf->pspace);
1193 add_thread (ptid);
1194 }
1195 else
1196 {
1197 /* The old description may no longer be fit for the new image.
1198 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1199 old description; we'll read a new one below. No need to do
1200 this on "follow-exec-mode new", as the old inferior stays
1201 around (its description is later cleared/refetched on
1202 restart). */
1203 target_clear_description ();
1204 }
1205
1206 gdb_assert (current_program_space == inf->pspace);
1207
1208 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1209 because the proper displacement for a PIE (Position Independent
1210 Executable) main symbol file will only be computed by
1211 solib_create_inferior_hook below. breakpoint_re_set would fail
1212 to insert the breakpoints with the zero displacement. */
1213 try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
1214
1215 /* If the target can specify a description, read it. Must do this
1216 after flipping to the new executable (because the target supplied
1217 description must be compatible with the executable's
1218 architecture, and the old executable may e.g., be 32-bit, while
1219 the new one 64-bit), and before anything involving memory or
1220 registers. */
1221 target_find_description ();
1222
1223 solib_create_inferior_hook (0);
1224
1225 jit_inferior_created_hook ();
1226
1227 breakpoint_re_set ();
1228
1229 /* Reinsert all breakpoints. (Those which were symbolic have
1230 been reset to the proper address in the new a.out, thanks
1231 to symbol_file_command...). */
1232 insert_breakpoints ();
1233
1234 /* The next resume of this inferior should bring it to the shlib
1235 startup breakpoints. (If the user had also set bp's on
1236 "main" from the old (parent) process, then they'll auto-
1237 matically get reset there in the new process.). */
1238 }
1239
1240 /* The queue of threads that need to do a step-over operation to get
1241 past e.g., a breakpoint. What technique is used to step over the
1242 breakpoint/watchpoint does not matter -- all threads end up in the
1243 same queue, to maintain rough temporal order of execution, in order
1244 to avoid starvation, otherwise, we could e.g., find ourselves
1245 constantly stepping the same couple threads past their breakpoints
1246 over and over, if the single-step finish fast enough. */
1247 struct thread_info *step_over_queue_head;
1248
1249 /* Bit flags indicating what the thread needs to step over. */
1250
1251 enum step_over_what_flag
1252 {
1253 /* Step over a breakpoint. */
1254 STEP_OVER_BREAKPOINT = 1,
1255
1256 /* Step past a non-continuable watchpoint, in order to let the
1257 instruction execute so we can evaluate the watchpoint
1258 expression. */
1259 STEP_OVER_WATCHPOINT = 2
1260 };
1261 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
1262
1263 /* Info about an instruction that is being stepped over. */
1264
1265 struct step_over_info
1266 {
1267 /* If we're stepping past a breakpoint, this is the address space
1268 and address of the instruction the breakpoint is set at. We'll
1269 skip inserting all breakpoints here. Valid iff ASPACE is
1270 non-NULL. */
1271 const address_space *aspace;
1272 CORE_ADDR address;
1273
1274 /* The instruction being stepped over triggers a nonsteppable
1275 watchpoint. If true, we'll skip inserting watchpoints. */
1276 int nonsteppable_watchpoint_p;
1277
1278 /* The thread's global number. */
1279 int thread;
1280 };
1281
1282 /* The step-over info of the location that is being stepped over.
1283
1284 Note that with async/breakpoint always-inserted mode, a user might
1285 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1286 being stepped over. As setting a new breakpoint inserts all
1287 breakpoints, we need to make sure the breakpoint being stepped over
1288 isn't inserted then. We do that by only clearing the step-over
1289 info when the step-over is actually finished (or aborted).
1290
1291 Presently GDB can only step over one breakpoint at any given time.
1292 Given threads that can't run code in the same address space as the
1293 breakpoint's can't really miss the breakpoint, GDB could be taught
1294 to step-over at most one breakpoint per address space (so this info
1295 could move to the address space object if/when GDB is extended).
1296 The set of breakpoints being stepped over will normally be much
1297 smaller than the set of all breakpoints, so a flag in the
1298 breakpoint location structure would be wasteful. A separate list
1299 also saves complexity and run-time, as otherwise we'd have to go
1300 through all breakpoint locations clearing their flag whenever we
1301 start a new sequence. Similar considerations weigh against storing
1302 this info in the thread object. Plus, not all step overs actually
1303 have breakpoint locations -- e.g., stepping past a single-step
1304 breakpoint, or stepping to complete a non-continuable
1305 watchpoint. */
1306 static struct step_over_info step_over_info;
1307
1308 /* Record the address of the breakpoint/instruction we're currently
1309 stepping over.
1310 N.B. We record the aspace and address now, instead of say just the thread,
1311 because when we need the info later the thread may be running. */
1312
1313 static void
1314 set_step_over_info (const address_space *aspace, CORE_ADDR address,
1315 int nonsteppable_watchpoint_p,
1316 int thread)
1317 {
1318 step_over_info.aspace = aspace;
1319 step_over_info.address = address;
1320 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1321 step_over_info.thread = thread;
1322 }
1323
1324 /* Called when we're not longer stepping over a breakpoint / an
1325 instruction, so all breakpoints are free to be (re)inserted. */
1326
1327 static void
1328 clear_step_over_info (void)
1329 {
1330 if (debug_infrun)
1331 fprintf_unfiltered (gdb_stdlog,
1332 "infrun: clear_step_over_info\n");
1333 step_over_info.aspace = NULL;
1334 step_over_info.address = 0;
1335 step_over_info.nonsteppable_watchpoint_p = 0;
1336 step_over_info.thread = -1;
1337 }
1338
1339 /* See infrun.h. */
1340
1341 int
1342 stepping_past_instruction_at (struct address_space *aspace,
1343 CORE_ADDR address)
1344 {
1345 return (step_over_info.aspace != NULL
1346 && breakpoint_address_match (aspace, address,
1347 step_over_info.aspace,
1348 step_over_info.address));
1349 }
1350
1351 /* See infrun.h. */
1352
1353 int
1354 thread_is_stepping_over_breakpoint (int thread)
1355 {
1356 return (step_over_info.thread != -1
1357 && thread == step_over_info.thread);
1358 }
1359
1360 /* See infrun.h. */
1361
1362 int
1363 stepping_past_nonsteppable_watchpoint (void)
1364 {
1365 return step_over_info.nonsteppable_watchpoint_p;
1366 }
1367
1368 /* Returns true if step-over info is valid. */
1369
1370 static int
1371 step_over_info_valid_p (void)
1372 {
1373 return (step_over_info.aspace != NULL
1374 || stepping_past_nonsteppable_watchpoint ());
1375 }
1376
1377 \f
1378 /* Displaced stepping. */
1379
1380 /* In non-stop debugging mode, we must take special care to manage
1381 breakpoints properly; in particular, the traditional strategy for
1382 stepping a thread past a breakpoint it has hit is unsuitable.
1383 'Displaced stepping' is a tactic for stepping one thread past a
1384 breakpoint it has hit while ensuring that other threads running
1385 concurrently will hit the breakpoint as they should.
1386
1387 The traditional way to step a thread T off a breakpoint in a
1388 multi-threaded program in all-stop mode is as follows:
1389
1390 a0) Initially, all threads are stopped, and breakpoints are not
1391 inserted.
1392 a1) We single-step T, leaving breakpoints uninserted.
1393 a2) We insert breakpoints, and resume all threads.
1394
1395 In non-stop debugging, however, this strategy is unsuitable: we
1396 don't want to have to stop all threads in the system in order to
1397 continue or step T past a breakpoint. Instead, we use displaced
1398 stepping:
1399
1400 n0) Initially, T is stopped, other threads are running, and
1401 breakpoints are inserted.
1402 n1) We copy the instruction "under" the breakpoint to a separate
1403 location, outside the main code stream, making any adjustments
1404 to the instruction, register, and memory state as directed by
1405 T's architecture.
1406 n2) We single-step T over the instruction at its new location.
1407 n3) We adjust the resulting register and memory state as directed
1408 by T's architecture. This includes resetting T's PC to point
1409 back into the main instruction stream.
1410 n4) We resume T.
1411
1412 This approach depends on the following gdbarch methods:
1413
1414 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1415 indicate where to copy the instruction, and how much space must
1416 be reserved there. We use these in step n1.
1417
1418 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1419 address, and makes any necessary adjustments to the instruction,
1420 register contents, and memory. We use this in step n1.
1421
1422 - gdbarch_displaced_step_fixup adjusts registers and memory after
1423 we have successfully single-stepped the instruction, to yield the
1424 same effect the instruction would have had if we had executed it
1425 at its original address. We use this in step n3.
1426
1427 The gdbarch_displaced_step_copy_insn and
1428 gdbarch_displaced_step_fixup functions must be written so that
1429 copying an instruction with gdbarch_displaced_step_copy_insn,
1430 single-stepping across the copied instruction, and then applying
1431 gdbarch_displaced_insn_fixup should have the same effects on the
1432 thread's memory and registers as stepping the instruction in place
1433 would have. Exactly which responsibilities fall to the copy and
1434 which fall to the fixup is up to the author of those functions.
1435
1436 See the comments in gdbarch.sh for details.
1437
1438 Note that displaced stepping and software single-step cannot
1439 currently be used in combination, although with some care I think
1440 they could be made to. Software single-step works by placing
1441 breakpoints on all possible subsequent instructions; if the
1442 displaced instruction is a PC-relative jump, those breakpoints
1443 could fall in very strange places --- on pages that aren't
1444 executable, or at addresses that are not proper instruction
1445 boundaries. (We do generally let other threads run while we wait
1446 to hit the software single-step breakpoint, and they might
1447 encounter such a corrupted instruction.) One way to work around
1448 this would be to have gdbarch_displaced_step_copy_insn fully
1449 simulate the effect of PC-relative instructions (and return NULL)
1450 on architectures that use software single-stepping.
1451
1452 In non-stop mode, we can have independent and simultaneous step
1453 requests, so more than one thread may need to simultaneously step
1454 over a breakpoint. The current implementation assumes there is
1455 only one scratch space per process. In this case, we have to
1456 serialize access to the scratch space. If thread A wants to step
1457 over a breakpoint, but we are currently waiting for some other
1458 thread to complete a displaced step, we leave thread A stopped and
1459 place it in the displaced_step_request_queue. Whenever a displaced
1460 step finishes, we pick the next thread in the queue and start a new
1461 displaced step operation on it. See displaced_step_prepare and
1462 displaced_step_fixup for details. */
1463
1464 /* Default destructor for displaced_step_closure. */
1465
1466 displaced_step_closure::~displaced_step_closure () = default;
1467
1468 /* Get the displaced stepping state of process PID. */
1469
1470 static displaced_step_inferior_state *
1471 get_displaced_stepping_state (inferior *inf)
1472 {
1473 return &inf->displaced_step_state;
1474 }
1475
1476 /* Returns true if any inferior has a thread doing a displaced
1477 step. */
1478
1479 static bool
1480 displaced_step_in_progress_any_inferior ()
1481 {
1482 for (inferior *i : all_inferiors ())
1483 {
1484 if (i->displaced_step_state.step_thread != nullptr)
1485 return true;
1486 }
1487
1488 return false;
1489 }
1490
1491 /* Return true if thread represented by PTID is doing a displaced
1492 step. */
1493
1494 static int
1495 displaced_step_in_progress_thread (thread_info *thread)
1496 {
1497 gdb_assert (thread != NULL);
1498
1499 return get_displaced_stepping_state (thread->inf)->step_thread == thread;
1500 }
1501
1502 /* Return true if process PID has a thread doing a displaced step. */
1503
1504 static int
1505 displaced_step_in_progress (inferior *inf)
1506 {
1507 return get_displaced_stepping_state (inf)->step_thread != nullptr;
1508 }
1509
1510 /* If inferior is in displaced stepping, and ADDR equals to starting address
1511 of copy area, return corresponding displaced_step_closure. Otherwise,
1512 return NULL. */
1513
1514 struct displaced_step_closure*
1515 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1516 {
1517 displaced_step_inferior_state *displaced
1518 = get_displaced_stepping_state (current_inferior ());
1519
1520 /* If checking the mode of displaced instruction in copy area. */
1521 if (displaced->step_thread != nullptr
1522 && displaced->step_copy == addr)
1523 return displaced->step_closure;
1524
1525 return NULL;
1526 }
1527
1528 static void
1529 infrun_inferior_exit (struct inferior *inf)
1530 {
1531 inf->displaced_step_state.reset ();
1532 }
1533
1534 /* If ON, and the architecture supports it, GDB will use displaced
1535 stepping to step over breakpoints. If OFF, or if the architecture
1536 doesn't support it, GDB will instead use the traditional
1537 hold-and-step approach. If AUTO (which is the default), GDB will
1538 decide which technique to use to step over breakpoints depending on
1539 which of all-stop or non-stop mode is active --- displaced stepping
1540 in non-stop mode; hold-and-step in all-stop mode. */
1541
1542 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1543
1544 static void
1545 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1546 struct cmd_list_element *c,
1547 const char *value)
1548 {
1549 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1550 fprintf_filtered (file,
1551 _("Debugger's willingness to use displaced stepping "
1552 "to step over breakpoints is %s (currently %s).\n"),
1553 value, target_is_non_stop_p () ? "on" : "off");
1554 else
1555 fprintf_filtered (file,
1556 _("Debugger's willingness to use displaced stepping "
1557 "to step over breakpoints is %s.\n"), value);
1558 }
1559
1560 /* Return non-zero if displaced stepping can/should be used to step
1561 over breakpoints of thread TP. */
1562
1563 static int
1564 use_displaced_stepping (struct thread_info *tp)
1565 {
1566 struct regcache *regcache = get_thread_regcache (tp);
1567 struct gdbarch *gdbarch = regcache->arch ();
1568 displaced_step_inferior_state *displaced_state
1569 = get_displaced_stepping_state (tp->inf);
1570
1571 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1572 && target_is_non_stop_p ())
1573 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1574 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1575 && find_record_target () == NULL
1576 && !displaced_state->failed_before);
1577 }
1578
1579 /* Clean out any stray displaced stepping state. */
1580 static void
1581 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1582 {
1583 /* Indicate that there is no cleanup pending. */
1584 displaced->step_thread = nullptr;
1585
1586 delete displaced->step_closure;
1587 displaced->step_closure = NULL;
1588 }
1589
1590 /* A cleanup that wraps displaced_step_clear. */
1591 using displaced_step_clear_cleanup
1592 = FORWARD_SCOPE_EXIT (displaced_step_clear);
1593
1594 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1595 void
1596 displaced_step_dump_bytes (struct ui_file *file,
1597 const gdb_byte *buf,
1598 size_t len)
1599 {
1600 int i;
1601
1602 for (i = 0; i < len; i++)
1603 fprintf_unfiltered (file, "%02x ", buf[i]);
1604 fputs_unfiltered ("\n", file);
1605 }
1606
1607 /* Prepare to single-step, using displaced stepping.
1608
1609 Note that we cannot use displaced stepping when we have a signal to
1610 deliver. If we have a signal to deliver and an instruction to step
1611 over, then after the step, there will be no indication from the
1612 target whether the thread entered a signal handler or ignored the
1613 signal and stepped over the instruction successfully --- both cases
1614 result in a simple SIGTRAP. In the first case we mustn't do a
1615 fixup, and in the second case we must --- but we can't tell which.
1616 Comments in the code for 'random signals' in handle_inferior_event
1617 explain how we handle this case instead.
1618
1619 Returns 1 if preparing was successful -- this thread is going to be
1620 stepped now; 0 if displaced stepping this thread got queued; or -1
1621 if this instruction can't be displaced stepped. */
1622
1623 static int
1624 displaced_step_prepare_throw (thread_info *tp)
1625 {
1626 regcache *regcache = get_thread_regcache (tp);
1627 struct gdbarch *gdbarch = regcache->arch ();
1628 const address_space *aspace = regcache->aspace ();
1629 CORE_ADDR original, copy;
1630 ULONGEST len;
1631 struct displaced_step_closure *closure;
1632 int status;
1633
1634 /* We should never reach this function if the architecture does not
1635 support displaced stepping. */
1636 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1637
1638 /* Nor if the thread isn't meant to step over a breakpoint. */
1639 gdb_assert (tp->control.trap_expected);
1640
1641 /* Disable range stepping while executing in the scratch pad. We
1642 want a single-step even if executing the displaced instruction in
1643 the scratch buffer lands within the stepping range (e.g., a
1644 jump/branch). */
1645 tp->control.may_range_step = 0;
1646
1647 /* We have to displaced step one thread at a time, as we only have
1648 access to a single scratch space per inferior. */
1649
1650 displaced_step_inferior_state *displaced
1651 = get_displaced_stepping_state (tp->inf);
1652
1653 if (displaced->step_thread != nullptr)
1654 {
1655 /* Already waiting for a displaced step to finish. Defer this
1656 request and place in queue. */
1657
1658 if (debug_displaced)
1659 fprintf_unfiltered (gdb_stdlog,
1660 "displaced: deferring step of %s\n",
1661 target_pid_to_str (tp->ptid).c_str ());
1662
1663 thread_step_over_chain_enqueue (tp);
1664 return 0;
1665 }
1666 else
1667 {
1668 if (debug_displaced)
1669 fprintf_unfiltered (gdb_stdlog,
1670 "displaced: stepping %s now\n",
1671 target_pid_to_str (tp->ptid).c_str ());
1672 }
1673
1674 displaced_step_clear (displaced);
1675
1676 scoped_restore_current_thread restore_thread;
1677
1678 switch_to_thread (tp);
1679
1680 original = regcache_read_pc (regcache);
1681
1682 copy = gdbarch_displaced_step_location (gdbarch);
1683 len = gdbarch_max_insn_length (gdbarch);
1684
1685 if (breakpoint_in_range_p (aspace, copy, len))
1686 {
1687 /* There's a breakpoint set in the scratch pad location range
1688 (which is usually around the entry point). We'd either
1689 install it before resuming, which would overwrite/corrupt the
1690 scratch pad, or if it was already inserted, this displaced
1691 step would overwrite it. The latter is OK in the sense that
1692 we already assume that no thread is going to execute the code
1693 in the scratch pad range (after initial startup) anyway, but
1694 the former is unacceptable. Simply punt and fallback to
1695 stepping over this breakpoint in-line. */
1696 if (debug_displaced)
1697 {
1698 fprintf_unfiltered (gdb_stdlog,
1699 "displaced: breakpoint set in scratch pad. "
1700 "Stepping over breakpoint in-line instead.\n");
1701 }
1702
1703 return -1;
1704 }
1705
1706 /* Save the original contents of the copy area. */
1707 displaced->step_saved_copy.resize (len);
1708 status = target_read_memory (copy, displaced->step_saved_copy.data (), len);
1709 if (status != 0)
1710 throw_error (MEMORY_ERROR,
1711 _("Error accessing memory address %s (%s) for "
1712 "displaced-stepping scratch space."),
1713 paddress (gdbarch, copy), safe_strerror (status));
1714 if (debug_displaced)
1715 {
1716 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1717 paddress (gdbarch, copy));
1718 displaced_step_dump_bytes (gdb_stdlog,
1719 displaced->step_saved_copy.data (),
1720 len);
1721 };
1722
1723 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1724 original, copy, regcache);
1725 if (closure == NULL)
1726 {
1727 /* The architecture doesn't know how or want to displaced step
1728 this instruction or instruction sequence. Fallback to
1729 stepping over the breakpoint in-line. */
1730 return -1;
1731 }
1732
1733 /* Save the information we need to fix things up if the step
1734 succeeds. */
1735 displaced->step_thread = tp;
1736 displaced->step_gdbarch = gdbarch;
1737 displaced->step_closure = closure;
1738 displaced->step_original = original;
1739 displaced->step_copy = copy;
1740
1741 {
1742 displaced_step_clear_cleanup cleanup (displaced);
1743
1744 /* Resume execution at the copy. */
1745 regcache_write_pc (regcache, copy);
1746
1747 cleanup.release ();
1748 }
1749
1750 if (debug_displaced)
1751 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1752 paddress (gdbarch, copy));
1753
1754 return 1;
1755 }
1756
1757 /* Wrapper for displaced_step_prepare_throw that disabled further
1758 attempts at displaced stepping if we get a memory error. */
1759
1760 static int
1761 displaced_step_prepare (thread_info *thread)
1762 {
1763 int prepared = -1;
1764
1765 try
1766 {
1767 prepared = displaced_step_prepare_throw (thread);
1768 }
1769 catch (const gdb_exception_error &ex)
1770 {
1771 struct displaced_step_inferior_state *displaced_state;
1772
1773 if (ex.error != MEMORY_ERROR
1774 && ex.error != NOT_SUPPORTED_ERROR)
1775 throw;
1776
1777 if (debug_infrun)
1778 {
1779 fprintf_unfiltered (gdb_stdlog,
1780 "infrun: disabling displaced stepping: %s\n",
1781 ex.what ());
1782 }
1783
1784 /* Be verbose if "set displaced-stepping" is "on", silent if
1785 "auto". */
1786 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1787 {
1788 warning (_("disabling displaced stepping: %s"),
1789 ex.what ());
1790 }
1791
1792 /* Disable further displaced stepping attempts. */
1793 displaced_state
1794 = get_displaced_stepping_state (thread->inf);
1795 displaced_state->failed_before = 1;
1796 }
1797
1798 return prepared;
1799 }
1800
1801 static void
1802 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1803 const gdb_byte *myaddr, int len)
1804 {
1805 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
1806
1807 inferior_ptid = ptid;
1808 write_memory (memaddr, myaddr, len);
1809 }
1810
1811 /* Restore the contents of the copy area for thread PTID. */
1812
1813 static void
1814 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1815 ptid_t ptid)
1816 {
1817 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1818
1819 write_memory_ptid (ptid, displaced->step_copy,
1820 displaced->step_saved_copy.data (), len);
1821 if (debug_displaced)
1822 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1823 target_pid_to_str (ptid).c_str (),
1824 paddress (displaced->step_gdbarch,
1825 displaced->step_copy));
1826 }
1827
1828 /* If we displaced stepped an instruction successfully, adjust
1829 registers and memory to yield the same effect the instruction would
1830 have had if we had executed it at its original address, and return
1831 1. If the instruction didn't complete, relocate the PC and return
1832 -1. If the thread wasn't displaced stepping, return 0. */
1833
1834 static int
1835 displaced_step_fixup (thread_info *event_thread, enum gdb_signal signal)
1836 {
1837 struct displaced_step_inferior_state *displaced
1838 = get_displaced_stepping_state (event_thread->inf);
1839 int ret;
1840
1841 /* Was this event for the thread we displaced? */
1842 if (displaced->step_thread != event_thread)
1843 return 0;
1844
1845 displaced_step_clear_cleanup cleanup (displaced);
1846
1847 displaced_step_restore (displaced, displaced->step_thread->ptid);
1848
1849 /* Fixup may need to read memory/registers. Switch to the thread
1850 that we're fixing up. Also, target_stopped_by_watchpoint checks
1851 the current thread. */
1852 switch_to_thread (event_thread);
1853
1854 /* Did the instruction complete successfully? */
1855 if (signal == GDB_SIGNAL_TRAP
1856 && !(target_stopped_by_watchpoint ()
1857 && (gdbarch_have_nonsteppable_watchpoint (displaced->step_gdbarch)
1858 || target_have_steppable_watchpoint)))
1859 {
1860 /* Fix up the resulting state. */
1861 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1862 displaced->step_closure,
1863 displaced->step_original,
1864 displaced->step_copy,
1865 get_thread_regcache (displaced->step_thread));
1866 ret = 1;
1867 }
1868 else
1869 {
1870 /* Since the instruction didn't complete, all we can do is
1871 relocate the PC. */
1872 struct regcache *regcache = get_thread_regcache (event_thread);
1873 CORE_ADDR pc = regcache_read_pc (regcache);
1874
1875 pc = displaced->step_original + (pc - displaced->step_copy);
1876 regcache_write_pc (regcache, pc);
1877 ret = -1;
1878 }
1879
1880 return ret;
1881 }
1882
1883 /* Data to be passed around while handling an event. This data is
1884 discarded between events. */
1885 struct execution_control_state
1886 {
1887 ptid_t ptid;
1888 /* The thread that got the event, if this was a thread event; NULL
1889 otherwise. */
1890 struct thread_info *event_thread;
1891
1892 struct target_waitstatus ws;
1893 int stop_func_filled_in;
1894 CORE_ADDR stop_func_start;
1895 CORE_ADDR stop_func_end;
1896 const char *stop_func_name;
1897 int wait_some_more;
1898
1899 /* True if the event thread hit the single-step breakpoint of
1900 another thread. Thus the event doesn't cause a stop, the thread
1901 needs to be single-stepped past the single-step breakpoint before
1902 we can switch back to the original stepping thread. */
1903 int hit_singlestep_breakpoint;
1904 };
1905
1906 /* Clear ECS and set it to point at TP. */
1907
1908 static void
1909 reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1910 {
1911 memset (ecs, 0, sizeof (*ecs));
1912 ecs->event_thread = tp;
1913 ecs->ptid = tp->ptid;
1914 }
1915
1916 static void keep_going_pass_signal (struct execution_control_state *ecs);
1917 static void prepare_to_wait (struct execution_control_state *ecs);
1918 static int keep_going_stepped_thread (struct thread_info *tp);
1919 static step_over_what thread_still_needs_step_over (struct thread_info *tp);
1920
1921 /* Are there any pending step-over requests? If so, run all we can
1922 now and return true. Otherwise, return false. */
1923
1924 static int
1925 start_step_over (void)
1926 {
1927 struct thread_info *tp, *next;
1928
1929 /* Don't start a new step-over if we already have an in-line
1930 step-over operation ongoing. */
1931 if (step_over_info_valid_p ())
1932 return 0;
1933
1934 for (tp = step_over_queue_head; tp != NULL; tp = next)
1935 {
1936 struct execution_control_state ecss;
1937 struct execution_control_state *ecs = &ecss;
1938 step_over_what step_what;
1939 int must_be_in_line;
1940
1941 gdb_assert (!tp->stop_requested);
1942
1943 next = thread_step_over_chain_next (tp);
1944
1945 /* If this inferior already has a displaced step in process,
1946 don't start a new one. */
1947 if (displaced_step_in_progress (tp->inf))
1948 continue;
1949
1950 step_what = thread_still_needs_step_over (tp);
1951 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
1952 || ((step_what & STEP_OVER_BREAKPOINT)
1953 && !use_displaced_stepping (tp)));
1954
1955 /* We currently stop all threads of all processes to step-over
1956 in-line. If we need to start a new in-line step-over, let
1957 any pending displaced steps finish first. */
1958 if (must_be_in_line && displaced_step_in_progress_any_inferior ())
1959 return 0;
1960
1961 thread_step_over_chain_remove (tp);
1962
1963 if (step_over_queue_head == NULL)
1964 {
1965 if (debug_infrun)
1966 fprintf_unfiltered (gdb_stdlog,
1967 "infrun: step-over queue now empty\n");
1968 }
1969
1970 if (tp->control.trap_expected
1971 || tp->resumed
1972 || tp->executing)
1973 {
1974 internal_error (__FILE__, __LINE__,
1975 "[%s] has inconsistent state: "
1976 "trap_expected=%d, resumed=%d, executing=%d\n",
1977 target_pid_to_str (tp->ptid).c_str (),
1978 tp->control.trap_expected,
1979 tp->resumed,
1980 tp->executing);
1981 }
1982
1983 if (debug_infrun)
1984 fprintf_unfiltered (gdb_stdlog,
1985 "infrun: resuming [%s] for step-over\n",
1986 target_pid_to_str (tp->ptid).c_str ());
1987
1988 /* keep_going_pass_signal skips the step-over if the breakpoint
1989 is no longer inserted. In all-stop, we want to keep looking
1990 for a thread that needs a step-over instead of resuming TP,
1991 because we wouldn't be able to resume anything else until the
1992 target stops again. In non-stop, the resume always resumes
1993 only TP, so it's OK to let the thread resume freely. */
1994 if (!target_is_non_stop_p () && !step_what)
1995 continue;
1996
1997 switch_to_thread (tp);
1998 reset_ecs (ecs, tp);
1999 keep_going_pass_signal (ecs);
2000
2001 if (!ecs->wait_some_more)
2002 error (_("Command aborted."));
2003
2004 gdb_assert (tp->resumed);
2005
2006 /* If we started a new in-line step-over, we're done. */
2007 if (step_over_info_valid_p ())
2008 {
2009 gdb_assert (tp->control.trap_expected);
2010 return 1;
2011 }
2012
2013 if (!target_is_non_stop_p ())
2014 {
2015 /* On all-stop, shouldn't have resumed unless we needed a
2016 step over. */
2017 gdb_assert (tp->control.trap_expected
2018 || tp->step_after_step_resume_breakpoint);
2019
2020 /* With remote targets (at least), in all-stop, we can't
2021 issue any further remote commands until the program stops
2022 again. */
2023 return 1;
2024 }
2025
2026 /* Either the thread no longer needed a step-over, or a new
2027 displaced stepping sequence started. Even in the latter
2028 case, continue looking. Maybe we can also start another
2029 displaced step on a thread of other process. */
2030 }
2031
2032 return 0;
2033 }
2034
2035 /* Update global variables holding ptids to hold NEW_PTID if they were
2036 holding OLD_PTID. */
2037 static void
2038 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
2039 {
2040 if (inferior_ptid == old_ptid)
2041 inferior_ptid = new_ptid;
2042 }
2043
2044 \f
2045
2046 static const char schedlock_off[] = "off";
2047 static const char schedlock_on[] = "on";
2048 static const char schedlock_step[] = "step";
2049 static const char schedlock_replay[] = "replay";
2050 static const char *const scheduler_enums[] = {
2051 schedlock_off,
2052 schedlock_on,
2053 schedlock_step,
2054 schedlock_replay,
2055 NULL
2056 };
2057 static const char *scheduler_mode = schedlock_replay;
2058 static void
2059 show_scheduler_mode (struct ui_file *file, int from_tty,
2060 struct cmd_list_element *c, const char *value)
2061 {
2062 fprintf_filtered (file,
2063 _("Mode for locking scheduler "
2064 "during execution is \"%s\".\n"),
2065 value);
2066 }
2067
2068 static void
2069 set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
2070 {
2071 if (!target_can_lock_scheduler)
2072 {
2073 scheduler_mode = schedlock_off;
2074 error (_("Target '%s' cannot support this command."), target_shortname);
2075 }
2076 }
2077
2078 /* True if execution commands resume all threads of all processes by
2079 default; otherwise, resume only threads of the current inferior
2080 process. */
2081 bool sched_multi = false;
2082
2083 /* Try to setup for software single stepping over the specified location.
2084 Return 1 if target_resume() should use hardware single step.
2085
2086 GDBARCH the current gdbarch.
2087 PC the location to step over. */
2088
2089 static int
2090 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2091 {
2092 int hw_step = 1;
2093
2094 if (execution_direction == EXEC_FORWARD
2095 && gdbarch_software_single_step_p (gdbarch))
2096 hw_step = !insert_single_step_breakpoints (gdbarch);
2097
2098 return hw_step;
2099 }
2100
2101 /* See infrun.h. */
2102
2103 ptid_t
2104 user_visible_resume_ptid (int step)
2105 {
2106 ptid_t resume_ptid;
2107
2108 if (non_stop)
2109 {
2110 /* With non-stop mode on, threads are always handled
2111 individually. */
2112 resume_ptid = inferior_ptid;
2113 }
2114 else if ((scheduler_mode == schedlock_on)
2115 || (scheduler_mode == schedlock_step && step))
2116 {
2117 /* User-settable 'scheduler' mode requires solo thread
2118 resume. */
2119 resume_ptid = inferior_ptid;
2120 }
2121 else if ((scheduler_mode == schedlock_replay)
2122 && target_record_will_replay (minus_one_ptid, execution_direction))
2123 {
2124 /* User-settable 'scheduler' mode requires solo thread resume in replay
2125 mode. */
2126 resume_ptid = inferior_ptid;
2127 }
2128 else if (!sched_multi && target_supports_multi_process ())
2129 {
2130 /* Resume all threads of the current process (and none of other
2131 processes). */
2132 resume_ptid = ptid_t (inferior_ptid.pid ());
2133 }
2134 else
2135 {
2136 /* Resume all threads of all processes. */
2137 resume_ptid = RESUME_ALL;
2138 }
2139
2140 return resume_ptid;
2141 }
2142
2143 /* Return a ptid representing the set of threads that we will resume,
2144 in the perspective of the target, assuming run control handling
2145 does not require leaving some threads stopped (e.g., stepping past
2146 breakpoint). USER_STEP indicates whether we're about to start the
2147 target for a stepping command. */
2148
2149 static ptid_t
2150 internal_resume_ptid (int user_step)
2151 {
2152 /* In non-stop, we always control threads individually. Note that
2153 the target may always work in non-stop mode even with "set
2154 non-stop off", in which case user_visible_resume_ptid could
2155 return a wildcard ptid. */
2156 if (target_is_non_stop_p ())
2157 return inferior_ptid;
2158 else
2159 return user_visible_resume_ptid (user_step);
2160 }
2161
2162 /* Wrapper for target_resume, that handles infrun-specific
2163 bookkeeping. */
2164
2165 static void
2166 do_target_resume (ptid_t resume_ptid, int step, enum gdb_signal sig)
2167 {
2168 struct thread_info *tp = inferior_thread ();
2169
2170 gdb_assert (!tp->stop_requested);
2171
2172 /* Install inferior's terminal modes. */
2173 target_terminal::inferior ();
2174
2175 /* Avoid confusing the next resume, if the next stop/resume
2176 happens to apply to another thread. */
2177 tp->suspend.stop_signal = GDB_SIGNAL_0;
2178
2179 /* Advise target which signals may be handled silently.
2180
2181 If we have removed breakpoints because we are stepping over one
2182 in-line (in any thread), we need to receive all signals to avoid
2183 accidentally skipping a breakpoint during execution of a signal
2184 handler.
2185
2186 Likewise if we're displaced stepping, otherwise a trap for a
2187 breakpoint in a signal handler might be confused with the
2188 displaced step finishing. We don't make the displaced_step_fixup
2189 step distinguish the cases instead, because:
2190
2191 - a backtrace while stopped in the signal handler would show the
2192 scratch pad as frame older than the signal handler, instead of
2193 the real mainline code.
2194
2195 - when the thread is later resumed, the signal handler would
2196 return to the scratch pad area, which would no longer be
2197 valid. */
2198 if (step_over_info_valid_p ()
2199 || displaced_step_in_progress (tp->inf))
2200 target_pass_signals ({});
2201 else
2202 target_pass_signals (signal_pass);
2203
2204 target_resume (resume_ptid, step, sig);
2205
2206 target_commit_resume ();
2207 }
2208
2209 /* Resume the inferior. SIG is the signal to give the inferior
2210 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2211 call 'resume', which handles exceptions. */
2212
2213 static void
2214 resume_1 (enum gdb_signal sig)
2215 {
2216 struct regcache *regcache = get_current_regcache ();
2217 struct gdbarch *gdbarch = regcache->arch ();
2218 struct thread_info *tp = inferior_thread ();
2219 CORE_ADDR pc = regcache_read_pc (regcache);
2220 const address_space *aspace = regcache->aspace ();
2221 ptid_t resume_ptid;
2222 /* This represents the user's step vs continue request. When
2223 deciding whether "set scheduler-locking step" applies, it's the
2224 user's intention that counts. */
2225 const int user_step = tp->control.stepping_command;
2226 /* This represents what we'll actually request the target to do.
2227 This can decay from a step to a continue, if e.g., we need to
2228 implement single-stepping with breakpoints (software
2229 single-step). */
2230 int step;
2231
2232 gdb_assert (!tp->stop_requested);
2233 gdb_assert (!thread_is_in_step_over_chain (tp));
2234
2235 if (tp->suspend.waitstatus_pending_p)
2236 {
2237 if (debug_infrun)
2238 {
2239 std::string statstr
2240 = target_waitstatus_to_string (&tp->suspend.waitstatus);
2241
2242 fprintf_unfiltered (gdb_stdlog,
2243 "infrun: resume: thread %s has pending wait "
2244 "status %s (currently_stepping=%d).\n",
2245 target_pid_to_str (tp->ptid).c_str (),
2246 statstr.c_str (),
2247 currently_stepping (tp));
2248 }
2249
2250 tp->resumed = 1;
2251
2252 /* FIXME: What should we do if we are supposed to resume this
2253 thread with a signal? Maybe we should maintain a queue of
2254 pending signals to deliver. */
2255 if (sig != GDB_SIGNAL_0)
2256 {
2257 warning (_("Couldn't deliver signal %s to %s."),
2258 gdb_signal_to_name (sig),
2259 target_pid_to_str (tp->ptid).c_str ());
2260 }
2261
2262 tp->suspend.stop_signal = GDB_SIGNAL_0;
2263
2264 if (target_can_async_p ())
2265 {
2266 target_async (1);
2267 /* Tell the event loop we have an event to process. */
2268 mark_async_event_handler (infrun_async_inferior_event_token);
2269 }
2270 return;
2271 }
2272
2273 tp->stepped_breakpoint = 0;
2274
2275 /* Depends on stepped_breakpoint. */
2276 step = currently_stepping (tp);
2277
2278 if (current_inferior ()->waiting_for_vfork_done)
2279 {
2280 /* Don't try to single-step a vfork parent that is waiting for
2281 the child to get out of the shared memory region (by exec'ing
2282 or exiting). This is particularly important on software
2283 single-step archs, as the child process would trip on the
2284 software single step breakpoint inserted for the parent
2285 process. Since the parent will not actually execute any
2286 instruction until the child is out of the shared region (such
2287 are vfork's semantics), it is safe to simply continue it.
2288 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2289 the parent, and tell it to `keep_going', which automatically
2290 re-sets it stepping. */
2291 if (debug_infrun)
2292 fprintf_unfiltered (gdb_stdlog,
2293 "infrun: resume : clear step\n");
2294 step = 0;
2295 }
2296
2297 if (debug_infrun)
2298 fprintf_unfiltered (gdb_stdlog,
2299 "infrun: resume (step=%d, signal=%s), "
2300 "trap_expected=%d, current thread [%s] at %s\n",
2301 step, gdb_signal_to_symbol_string (sig),
2302 tp->control.trap_expected,
2303 target_pid_to_str (inferior_ptid).c_str (),
2304 paddress (gdbarch, pc));
2305
2306 /* Normally, by the time we reach `resume', the breakpoints are either
2307 removed or inserted, as appropriate. The exception is if we're sitting
2308 at a permanent breakpoint; we need to step over it, but permanent
2309 breakpoints can't be removed. So we have to test for it here. */
2310 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2311 {
2312 if (sig != GDB_SIGNAL_0)
2313 {
2314 /* We have a signal to pass to the inferior. The resume
2315 may, or may not take us to the signal handler. If this
2316 is a step, we'll need to stop in the signal handler, if
2317 there's one, (if the target supports stepping into
2318 handlers), or in the next mainline instruction, if
2319 there's no handler. If this is a continue, we need to be
2320 sure to run the handler with all breakpoints inserted.
2321 In all cases, set a breakpoint at the current address
2322 (where the handler returns to), and once that breakpoint
2323 is hit, resume skipping the permanent breakpoint. If
2324 that breakpoint isn't hit, then we've stepped into the
2325 signal handler (or hit some other event). We'll delete
2326 the step-resume breakpoint then. */
2327
2328 if (debug_infrun)
2329 fprintf_unfiltered (gdb_stdlog,
2330 "infrun: resume: skipping permanent breakpoint, "
2331 "deliver signal first\n");
2332
2333 clear_step_over_info ();
2334 tp->control.trap_expected = 0;
2335
2336 if (tp->control.step_resume_breakpoint == NULL)
2337 {
2338 /* Set a "high-priority" step-resume, as we don't want
2339 user breakpoints at PC to trigger (again) when this
2340 hits. */
2341 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2342 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2343
2344 tp->step_after_step_resume_breakpoint = step;
2345 }
2346
2347 insert_breakpoints ();
2348 }
2349 else
2350 {
2351 /* There's no signal to pass, we can go ahead and skip the
2352 permanent breakpoint manually. */
2353 if (debug_infrun)
2354 fprintf_unfiltered (gdb_stdlog,
2355 "infrun: resume: skipping permanent breakpoint\n");
2356 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2357 /* Update pc to reflect the new address from which we will
2358 execute instructions. */
2359 pc = regcache_read_pc (regcache);
2360
2361 if (step)
2362 {
2363 /* We've already advanced the PC, so the stepping part
2364 is done. Now we need to arrange for a trap to be
2365 reported to handle_inferior_event. Set a breakpoint
2366 at the current PC, and run to it. Don't update
2367 prev_pc, because if we end in
2368 switch_back_to_stepped_thread, we want the "expected
2369 thread advanced also" branch to be taken. IOW, we
2370 don't want this thread to step further from PC
2371 (overstep). */
2372 gdb_assert (!step_over_info_valid_p ());
2373 insert_single_step_breakpoint (gdbarch, aspace, pc);
2374 insert_breakpoints ();
2375
2376 resume_ptid = internal_resume_ptid (user_step);
2377 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
2378 tp->resumed = 1;
2379 return;
2380 }
2381 }
2382 }
2383
2384 /* If we have a breakpoint to step over, make sure to do a single
2385 step only. Same if we have software watchpoints. */
2386 if (tp->control.trap_expected || bpstat_should_step ())
2387 tp->control.may_range_step = 0;
2388
2389 /* If enabled, step over breakpoints by executing a copy of the
2390 instruction at a different address.
2391
2392 We can't use displaced stepping when we have a signal to deliver;
2393 the comments for displaced_step_prepare explain why. The
2394 comments in the handle_inferior event for dealing with 'random
2395 signals' explain what we do instead.
2396
2397 We can't use displaced stepping when we are waiting for vfork_done
2398 event, displaced stepping breaks the vfork child similarly as single
2399 step software breakpoint. */
2400 if (tp->control.trap_expected
2401 && use_displaced_stepping (tp)
2402 && !step_over_info_valid_p ()
2403 && sig == GDB_SIGNAL_0
2404 && !current_inferior ()->waiting_for_vfork_done)
2405 {
2406 int prepared = displaced_step_prepare (tp);
2407
2408 if (prepared == 0)
2409 {
2410 if (debug_infrun)
2411 fprintf_unfiltered (gdb_stdlog,
2412 "Got placed in step-over queue\n");
2413
2414 tp->control.trap_expected = 0;
2415 return;
2416 }
2417 else if (prepared < 0)
2418 {
2419 /* Fallback to stepping over the breakpoint in-line. */
2420
2421 if (target_is_non_stop_p ())
2422 stop_all_threads ();
2423
2424 set_step_over_info (regcache->aspace (),
2425 regcache_read_pc (regcache), 0, tp->global_num);
2426
2427 step = maybe_software_singlestep (gdbarch, pc);
2428
2429 insert_breakpoints ();
2430 }
2431 else if (prepared > 0)
2432 {
2433 struct displaced_step_inferior_state *displaced;
2434
2435 /* Update pc to reflect the new address from which we will
2436 execute instructions due to displaced stepping. */
2437 pc = regcache_read_pc (get_thread_regcache (tp));
2438
2439 displaced = get_displaced_stepping_state (tp->inf);
2440 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
2441 displaced->step_closure);
2442 }
2443 }
2444
2445 /* Do we need to do it the hard way, w/temp breakpoints? */
2446 else if (step)
2447 step = maybe_software_singlestep (gdbarch, pc);
2448
2449 /* Currently, our software single-step implementation leads to different
2450 results than hardware single-stepping in one situation: when stepping
2451 into delivering a signal which has an associated signal handler,
2452 hardware single-step will stop at the first instruction of the handler,
2453 while software single-step will simply skip execution of the handler.
2454
2455 For now, this difference in behavior is accepted since there is no
2456 easy way to actually implement single-stepping into a signal handler
2457 without kernel support.
2458
2459 However, there is one scenario where this difference leads to follow-on
2460 problems: if we're stepping off a breakpoint by removing all breakpoints
2461 and then single-stepping. In this case, the software single-step
2462 behavior means that even if there is a *breakpoint* in the signal
2463 handler, GDB still would not stop.
2464
2465 Fortunately, we can at least fix this particular issue. We detect
2466 here the case where we are about to deliver a signal while software
2467 single-stepping with breakpoints removed. In this situation, we
2468 revert the decisions to remove all breakpoints and insert single-
2469 step breakpoints, and instead we install a step-resume breakpoint
2470 at the current address, deliver the signal without stepping, and
2471 once we arrive back at the step-resume breakpoint, actually step
2472 over the breakpoint we originally wanted to step over. */
2473 if (thread_has_single_step_breakpoints_set (tp)
2474 && sig != GDB_SIGNAL_0
2475 && step_over_info_valid_p ())
2476 {
2477 /* If we have nested signals or a pending signal is delivered
2478 immediately after a handler returns, might might already have
2479 a step-resume breakpoint set on the earlier handler. We cannot
2480 set another step-resume breakpoint; just continue on until the
2481 original breakpoint is hit. */
2482 if (tp->control.step_resume_breakpoint == NULL)
2483 {
2484 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2485 tp->step_after_step_resume_breakpoint = 1;
2486 }
2487
2488 delete_single_step_breakpoints (tp);
2489
2490 clear_step_over_info ();
2491 tp->control.trap_expected = 0;
2492
2493 insert_breakpoints ();
2494 }
2495
2496 /* If STEP is set, it's a request to use hardware stepping
2497 facilities. But in that case, we should never
2498 use singlestep breakpoint. */
2499 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2500
2501 /* Decide the set of threads to ask the target to resume. */
2502 if (tp->control.trap_expected)
2503 {
2504 /* We're allowing a thread to run past a breakpoint it has
2505 hit, either by single-stepping the thread with the breakpoint
2506 removed, or by displaced stepping, with the breakpoint inserted.
2507 In the former case, we need to single-step only this thread,
2508 and keep others stopped, as they can miss this breakpoint if
2509 allowed to run. That's not really a problem for displaced
2510 stepping, but, we still keep other threads stopped, in case
2511 another thread is also stopped for a breakpoint waiting for
2512 its turn in the displaced stepping queue. */
2513 resume_ptid = inferior_ptid;
2514 }
2515 else
2516 resume_ptid = internal_resume_ptid (user_step);
2517
2518 if (execution_direction != EXEC_REVERSE
2519 && step && breakpoint_inserted_here_p (aspace, pc))
2520 {
2521 /* There are two cases where we currently need to step a
2522 breakpoint instruction when we have a signal to deliver:
2523
2524 - See handle_signal_stop where we handle random signals that
2525 could take out us out of the stepping range. Normally, in
2526 that case we end up continuing (instead of stepping) over the
2527 signal handler with a breakpoint at PC, but there are cases
2528 where we should _always_ single-step, even if we have a
2529 step-resume breakpoint, like when a software watchpoint is
2530 set. Assuming single-stepping and delivering a signal at the
2531 same time would takes us to the signal handler, then we could
2532 have removed the breakpoint at PC to step over it. However,
2533 some hardware step targets (like e.g., Mac OS) can't step
2534 into signal handlers, and for those, we need to leave the
2535 breakpoint at PC inserted, as otherwise if the handler
2536 recurses and executes PC again, it'll miss the breakpoint.
2537 So we leave the breakpoint inserted anyway, but we need to
2538 record that we tried to step a breakpoint instruction, so
2539 that adjust_pc_after_break doesn't end up confused.
2540
2541 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2542 in one thread after another thread that was stepping had been
2543 momentarily paused for a step-over. When we re-resume the
2544 stepping thread, it may be resumed from that address with a
2545 breakpoint that hasn't trapped yet. Seen with
2546 gdb.threads/non-stop-fair-events.exp, on targets that don't
2547 do displaced stepping. */
2548
2549 if (debug_infrun)
2550 fprintf_unfiltered (gdb_stdlog,
2551 "infrun: resume: [%s] stepped breakpoint\n",
2552 target_pid_to_str (tp->ptid).c_str ());
2553
2554 tp->stepped_breakpoint = 1;
2555
2556 /* Most targets can step a breakpoint instruction, thus
2557 executing it normally. But if this one cannot, just
2558 continue and we will hit it anyway. */
2559 if (gdbarch_cannot_step_breakpoint (gdbarch))
2560 step = 0;
2561 }
2562
2563 if (debug_displaced
2564 && tp->control.trap_expected
2565 && use_displaced_stepping (tp)
2566 && !step_over_info_valid_p ())
2567 {
2568 struct regcache *resume_regcache = get_thread_regcache (tp);
2569 struct gdbarch *resume_gdbarch = resume_regcache->arch ();
2570 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2571 gdb_byte buf[4];
2572
2573 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2574 paddress (resume_gdbarch, actual_pc));
2575 read_memory (actual_pc, buf, sizeof (buf));
2576 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2577 }
2578
2579 if (tp->control.may_range_step)
2580 {
2581 /* If we're resuming a thread with the PC out of the step
2582 range, then we're doing some nested/finer run control
2583 operation, like stepping the thread out of the dynamic
2584 linker or the displaced stepping scratch pad. We
2585 shouldn't have allowed a range step then. */
2586 gdb_assert (pc_in_thread_step_range (pc, tp));
2587 }
2588
2589 do_target_resume (resume_ptid, step, sig);
2590 tp->resumed = 1;
2591 }
2592
2593 /* Resume the inferior. SIG is the signal to give the inferior
2594 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2595 rolls back state on error. */
2596
2597 static void
2598 resume (gdb_signal sig)
2599 {
2600 try
2601 {
2602 resume_1 (sig);
2603 }
2604 catch (const gdb_exception &ex)
2605 {
2606 /* If resuming is being aborted for any reason, delete any
2607 single-step breakpoint resume_1 may have created, to avoid
2608 confusing the following resumption, and to avoid leaving
2609 single-step breakpoints perturbing other threads, in case
2610 we're running in non-stop mode. */
2611 if (inferior_ptid != null_ptid)
2612 delete_single_step_breakpoints (inferior_thread ());
2613 throw;
2614 }
2615 }
2616
2617 \f
2618 /* Proceeding. */
2619
2620 /* See infrun.h. */
2621
2622 /* Counter that tracks number of user visible stops. This can be used
2623 to tell whether a command has proceeded the inferior past the
2624 current location. This allows e.g., inferior function calls in
2625 breakpoint commands to not interrupt the command list. When the
2626 call finishes successfully, the inferior is standing at the same
2627 breakpoint as if nothing happened (and so we don't call
2628 normal_stop). */
2629 static ULONGEST current_stop_id;
2630
2631 /* See infrun.h. */
2632
2633 ULONGEST
2634 get_stop_id (void)
2635 {
2636 return current_stop_id;
2637 }
2638
2639 /* Called when we report a user visible stop. */
2640
2641 static void
2642 new_stop_id (void)
2643 {
2644 current_stop_id++;
2645 }
2646
2647 /* Clear out all variables saying what to do when inferior is continued.
2648 First do this, then set the ones you want, then call `proceed'. */
2649
2650 static void
2651 clear_proceed_status_thread (struct thread_info *tp)
2652 {
2653 if (debug_infrun)
2654 fprintf_unfiltered (gdb_stdlog,
2655 "infrun: clear_proceed_status_thread (%s)\n",
2656 target_pid_to_str (tp->ptid).c_str ());
2657
2658 /* If we're starting a new sequence, then the previous finished
2659 single-step is no longer relevant. */
2660 if (tp->suspend.waitstatus_pending_p)
2661 {
2662 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
2663 {
2664 if (debug_infrun)
2665 fprintf_unfiltered (gdb_stdlog,
2666 "infrun: clear_proceed_status: pending "
2667 "event of %s was a finished step. "
2668 "Discarding.\n",
2669 target_pid_to_str (tp->ptid).c_str ());
2670
2671 tp->suspend.waitstatus_pending_p = 0;
2672 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
2673 }
2674 else if (debug_infrun)
2675 {
2676 std::string statstr
2677 = target_waitstatus_to_string (&tp->suspend.waitstatus);
2678
2679 fprintf_unfiltered (gdb_stdlog,
2680 "infrun: clear_proceed_status_thread: thread %s "
2681 "has pending wait status %s "
2682 "(currently_stepping=%d).\n",
2683 target_pid_to_str (tp->ptid).c_str (),
2684 statstr.c_str (),
2685 currently_stepping (tp));
2686 }
2687 }
2688
2689 /* If this signal should not be seen by program, give it zero.
2690 Used for debugging signals. */
2691 if (!signal_pass_state (tp->suspend.stop_signal))
2692 tp->suspend.stop_signal = GDB_SIGNAL_0;
2693
2694 delete tp->thread_fsm;
2695 tp->thread_fsm = NULL;
2696
2697 tp->control.trap_expected = 0;
2698 tp->control.step_range_start = 0;
2699 tp->control.step_range_end = 0;
2700 tp->control.may_range_step = 0;
2701 tp->control.step_frame_id = null_frame_id;
2702 tp->control.step_stack_frame_id = null_frame_id;
2703 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2704 tp->control.step_start_function = NULL;
2705 tp->stop_requested = 0;
2706
2707 tp->control.stop_step = 0;
2708
2709 tp->control.proceed_to_finish = 0;
2710
2711 tp->control.stepping_command = 0;
2712
2713 /* Discard any remaining commands or status from previous stop. */
2714 bpstat_clear (&tp->control.stop_bpstat);
2715 }
2716
2717 void
2718 clear_proceed_status (int step)
2719 {
2720 /* With scheduler-locking replay, stop replaying other threads if we're
2721 not replaying the user-visible resume ptid.
2722
2723 This is a convenience feature to not require the user to explicitly
2724 stop replaying the other threads. We're assuming that the user's
2725 intent is to resume tracing the recorded process. */
2726 if (!non_stop && scheduler_mode == schedlock_replay
2727 && target_record_is_replaying (minus_one_ptid)
2728 && !target_record_will_replay (user_visible_resume_ptid (step),
2729 execution_direction))
2730 target_record_stop_replaying ();
2731
2732 if (!non_stop && inferior_ptid != null_ptid)
2733 {
2734 ptid_t resume_ptid = user_visible_resume_ptid (step);
2735
2736 /* In all-stop mode, delete the per-thread status of all threads
2737 we're about to resume, implicitly and explicitly. */
2738 for (thread_info *tp : all_non_exited_threads (resume_ptid))
2739 clear_proceed_status_thread (tp);
2740 }
2741
2742 if (inferior_ptid != null_ptid)
2743 {
2744 struct inferior *inferior;
2745
2746 if (non_stop)
2747 {
2748 /* If in non-stop mode, only delete the per-thread status of
2749 the current thread. */
2750 clear_proceed_status_thread (inferior_thread ());
2751 }
2752
2753 inferior = current_inferior ();
2754 inferior->control.stop_soon = NO_STOP_QUIETLY;
2755 }
2756
2757 gdb::observers::about_to_proceed.notify ();
2758 }
2759
2760 /* Returns true if TP is still stopped at a breakpoint that needs
2761 stepping-over in order to make progress. If the breakpoint is gone
2762 meanwhile, we can skip the whole step-over dance. */
2763
2764 static int
2765 thread_still_needs_step_over_bp (struct thread_info *tp)
2766 {
2767 if (tp->stepping_over_breakpoint)
2768 {
2769 struct regcache *regcache = get_thread_regcache (tp);
2770
2771 if (breakpoint_here_p (regcache->aspace (),
2772 regcache_read_pc (regcache))
2773 == ordinary_breakpoint_here)
2774 return 1;
2775
2776 tp->stepping_over_breakpoint = 0;
2777 }
2778
2779 return 0;
2780 }
2781
2782 /* Check whether thread TP still needs to start a step-over in order
2783 to make progress when resumed. Returns an bitwise or of enum
2784 step_over_what bits, indicating what needs to be stepped over. */
2785
2786 static step_over_what
2787 thread_still_needs_step_over (struct thread_info *tp)
2788 {
2789 step_over_what what = 0;
2790
2791 if (thread_still_needs_step_over_bp (tp))
2792 what |= STEP_OVER_BREAKPOINT;
2793
2794 if (tp->stepping_over_watchpoint
2795 && !target_have_steppable_watchpoint)
2796 what |= STEP_OVER_WATCHPOINT;
2797
2798 return what;
2799 }
2800
2801 /* Returns true if scheduler locking applies. STEP indicates whether
2802 we're about to do a step/next-like command to a thread. */
2803
2804 static int
2805 schedlock_applies (struct thread_info *tp)
2806 {
2807 return (scheduler_mode == schedlock_on
2808 || (scheduler_mode == schedlock_step
2809 && tp->control.stepping_command)
2810 || (scheduler_mode == schedlock_replay
2811 && target_record_will_replay (minus_one_ptid,
2812 execution_direction)));
2813 }
2814
2815 /* Basic routine for continuing the program in various fashions.
2816
2817 ADDR is the address to resume at, or -1 for resume where stopped.
2818 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
2819 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
2820
2821 You should call clear_proceed_status before calling proceed. */
2822
2823 void
2824 proceed (CORE_ADDR addr, enum gdb_signal siggnal)
2825 {
2826 struct regcache *regcache;
2827 struct gdbarch *gdbarch;
2828 CORE_ADDR pc;
2829 ptid_t resume_ptid;
2830 struct execution_control_state ecss;
2831 struct execution_control_state *ecs = &ecss;
2832 int started;
2833
2834 /* If we're stopped at a fork/vfork, follow the branch set by the
2835 "set follow-fork-mode" command; otherwise, we'll just proceed
2836 resuming the current thread. */
2837 if (!follow_fork ())
2838 {
2839 /* The target for some reason decided not to resume. */
2840 normal_stop ();
2841 if (target_can_async_p ())
2842 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2843 return;
2844 }
2845
2846 /* We'll update this if & when we switch to a new thread. */
2847 previous_inferior_ptid = inferior_ptid;
2848
2849 regcache = get_current_regcache ();
2850 gdbarch = regcache->arch ();
2851 const address_space *aspace = regcache->aspace ();
2852
2853 pc = regcache_read_pc (regcache);
2854 thread_info *cur_thr = inferior_thread ();
2855
2856 /* Fill in with reasonable starting values. */
2857 init_thread_stepping_state (cur_thr);
2858
2859 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
2860
2861 if (addr == (CORE_ADDR) -1)
2862 {
2863 if (pc == cur_thr->suspend.stop_pc
2864 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
2865 && execution_direction != EXEC_REVERSE)
2866 /* There is a breakpoint at the address we will resume at,
2867 step one instruction before inserting breakpoints so that
2868 we do not stop right away (and report a second hit at this
2869 breakpoint).
2870
2871 Note, we don't do this in reverse, because we won't
2872 actually be executing the breakpoint insn anyway.
2873 We'll be (un-)executing the previous instruction. */
2874 cur_thr->stepping_over_breakpoint = 1;
2875 else if (gdbarch_single_step_through_delay_p (gdbarch)
2876 && gdbarch_single_step_through_delay (gdbarch,
2877 get_current_frame ()))
2878 /* We stepped onto an instruction that needs to be stepped
2879 again before re-inserting the breakpoint, do so. */
2880 cur_thr->stepping_over_breakpoint = 1;
2881 }
2882 else
2883 {
2884 regcache_write_pc (regcache, addr);
2885 }
2886
2887 if (siggnal != GDB_SIGNAL_DEFAULT)
2888 cur_thr->suspend.stop_signal = siggnal;
2889
2890 resume_ptid = user_visible_resume_ptid (cur_thr->control.stepping_command);
2891
2892 /* If an exception is thrown from this point on, make sure to
2893 propagate GDB's knowledge of the executing state to the
2894 frontend/user running state. */
2895 scoped_finish_thread_state finish_state (resume_ptid);
2896
2897 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
2898 threads (e.g., we might need to set threads stepping over
2899 breakpoints first), from the user/frontend's point of view, all
2900 threads in RESUME_PTID are now running. Unless we're calling an
2901 inferior function, as in that case we pretend the inferior
2902 doesn't run at all. */
2903 if (!cur_thr->control.in_infcall)
2904 set_running (resume_ptid, 1);
2905
2906 if (debug_infrun)
2907 fprintf_unfiltered (gdb_stdlog,
2908 "infrun: proceed (addr=%s, signal=%s)\n",
2909 paddress (gdbarch, addr),
2910 gdb_signal_to_symbol_string (siggnal));
2911
2912 annotate_starting ();
2913
2914 /* Make sure that output from GDB appears before output from the
2915 inferior. */
2916 gdb_flush (gdb_stdout);
2917
2918 /* Since we've marked the inferior running, give it the terminal. A
2919 QUIT/Ctrl-C from here on is forwarded to the target (which can
2920 still detect attempts to unblock a stuck connection with repeated
2921 Ctrl-C from within target_pass_ctrlc). */
2922 target_terminal::inferior ();
2923
2924 /* In a multi-threaded task we may select another thread and
2925 then continue or step.
2926
2927 But if a thread that we're resuming had stopped at a breakpoint,
2928 it will immediately cause another breakpoint stop without any
2929 execution (i.e. it will report a breakpoint hit incorrectly). So
2930 we must step over it first.
2931
2932 Look for threads other than the current (TP) that reported a
2933 breakpoint hit and haven't been resumed yet since. */
2934
2935 /* If scheduler locking applies, we can avoid iterating over all
2936 threads. */
2937 if (!non_stop && !schedlock_applies (cur_thr))
2938 {
2939 for (thread_info *tp : all_non_exited_threads (resume_ptid))
2940 {
2941 switch_to_thread_no_regs (tp);
2942
2943 /* Ignore the current thread here. It's handled
2944 afterwards. */
2945 if (tp == cur_thr)
2946 continue;
2947
2948 if (!thread_still_needs_step_over (tp))
2949 continue;
2950
2951 gdb_assert (!thread_is_in_step_over_chain (tp));
2952
2953 if (debug_infrun)
2954 fprintf_unfiltered (gdb_stdlog,
2955 "infrun: need to step-over [%s] first\n",
2956 target_pid_to_str (tp->ptid).c_str ());
2957
2958 thread_step_over_chain_enqueue (tp);
2959 }
2960
2961 switch_to_thread (cur_thr);
2962 }
2963
2964 /* Enqueue the current thread last, so that we move all other
2965 threads over their breakpoints first. */
2966 if (cur_thr->stepping_over_breakpoint)
2967 thread_step_over_chain_enqueue (cur_thr);
2968
2969 /* If the thread isn't started, we'll still need to set its prev_pc,
2970 so that switch_back_to_stepped_thread knows the thread hasn't
2971 advanced. Must do this before resuming any thread, as in
2972 all-stop/remote, once we resume we can't send any other packet
2973 until the target stops again. */
2974 cur_thr->prev_pc = regcache_read_pc (regcache);
2975
2976 {
2977 scoped_restore save_defer_tc = make_scoped_defer_target_commit_resume ();
2978
2979 started = start_step_over ();
2980
2981 if (step_over_info_valid_p ())
2982 {
2983 /* Either this thread started a new in-line step over, or some
2984 other thread was already doing one. In either case, don't
2985 resume anything else until the step-over is finished. */
2986 }
2987 else if (started && !target_is_non_stop_p ())
2988 {
2989 /* A new displaced stepping sequence was started. In all-stop,
2990 we can't talk to the target anymore until it next stops. */
2991 }
2992 else if (!non_stop && target_is_non_stop_p ())
2993 {
2994 /* In all-stop, but the target is always in non-stop mode.
2995 Start all other threads that are implicitly resumed too. */
2996 for (thread_info *tp : all_non_exited_threads (resume_ptid))
2997 {
2998 switch_to_thread_no_regs (tp);
2999
3000 if (tp->resumed)
3001 {
3002 if (debug_infrun)
3003 fprintf_unfiltered (gdb_stdlog,
3004 "infrun: proceed: [%s] resumed\n",
3005 target_pid_to_str (tp->ptid).c_str ());
3006 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
3007 continue;
3008 }
3009
3010 if (thread_is_in_step_over_chain (tp))
3011 {
3012 if (debug_infrun)
3013 fprintf_unfiltered (gdb_stdlog,
3014 "infrun: proceed: [%s] needs step-over\n",
3015 target_pid_to_str (tp->ptid).c_str ());
3016 continue;
3017 }
3018
3019 if (debug_infrun)
3020 fprintf_unfiltered (gdb_stdlog,
3021 "infrun: proceed: resuming %s\n",
3022 target_pid_to_str (tp->ptid).c_str ());
3023
3024 reset_ecs (ecs, tp);
3025 switch_to_thread (tp);
3026 keep_going_pass_signal (ecs);
3027 if (!ecs->wait_some_more)
3028 error (_("Command aborted."));
3029 }
3030 }
3031 else if (!cur_thr->resumed && !thread_is_in_step_over_chain (cur_thr))
3032 {
3033 /* The thread wasn't started, and isn't queued, run it now. */
3034 reset_ecs (ecs, cur_thr);
3035 switch_to_thread (cur_thr);
3036 keep_going_pass_signal (ecs);
3037 if (!ecs->wait_some_more)
3038 error (_("Command aborted."));
3039 }
3040 }
3041
3042 target_commit_resume ();
3043
3044 finish_state.release ();
3045
3046 /* If we've switched threads above, switch back to the previously
3047 current thread. We don't want the user to see a different
3048 selected thread. */
3049 switch_to_thread (cur_thr);
3050
3051 /* Tell the event loop to wait for it to stop. If the target
3052 supports asynchronous execution, it'll do this from within
3053 target_resume. */
3054 if (!target_can_async_p ())
3055 mark_async_event_handler (infrun_async_inferior_event_token);
3056 }
3057 \f
3058
3059 /* Start remote-debugging of a machine over a serial link. */
3060
3061 void
3062 start_remote (int from_tty)
3063 {
3064 struct inferior *inferior;
3065
3066 inferior = current_inferior ();
3067 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
3068
3069 /* Always go on waiting for the target, regardless of the mode. */
3070 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3071 indicate to wait_for_inferior that a target should timeout if
3072 nothing is returned (instead of just blocking). Because of this,
3073 targets expecting an immediate response need to, internally, set
3074 things up so that the target_wait() is forced to eventually
3075 timeout. */
3076 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3077 differentiate to its caller what the state of the target is after
3078 the initial open has been performed. Here we're assuming that
3079 the target has stopped. It should be possible to eventually have
3080 target_open() return to the caller an indication that the target
3081 is currently running and GDB state should be set to the same as
3082 for an async run. */
3083 wait_for_inferior ();
3084
3085 /* Now that the inferior has stopped, do any bookkeeping like
3086 loading shared libraries. We want to do this before normal_stop,
3087 so that the displayed frame is up to date. */
3088 post_create_inferior (current_top_target (), from_tty);
3089
3090 normal_stop ();
3091 }
3092
3093 /* Initialize static vars when a new inferior begins. */
3094
3095 void
3096 init_wait_for_inferior (void)
3097 {
3098 /* These are meaningless until the first time through wait_for_inferior. */
3099
3100 breakpoint_init_inferior (inf_starting);
3101
3102 clear_proceed_status (0);
3103
3104 nullify_last_target_wait_ptid ();
3105
3106 previous_inferior_ptid = inferior_ptid;
3107 }
3108
3109 \f
3110
3111 static void handle_inferior_event (struct execution_control_state *ecs);
3112
3113 static void handle_step_into_function (struct gdbarch *gdbarch,
3114 struct execution_control_state *ecs);
3115 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3116 struct execution_control_state *ecs);
3117 static void handle_signal_stop (struct execution_control_state *ecs);
3118 static void check_exception_resume (struct execution_control_state *,
3119 struct frame_info *);
3120
3121 static void end_stepping_range (struct execution_control_state *ecs);
3122 static void stop_waiting (struct execution_control_state *ecs);
3123 static void keep_going (struct execution_control_state *ecs);
3124 static void process_event_stop_test (struct execution_control_state *ecs);
3125 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
3126
3127 /* This function is attached as a "thread_stop_requested" observer.
3128 Cleanup local state that assumed the PTID was to be resumed, and
3129 report the stop to the frontend. */
3130
3131 static void
3132 infrun_thread_stop_requested (ptid_t ptid)
3133 {
3134 /* PTID was requested to stop. If the thread was already stopped,
3135 but the user/frontend doesn't know about that yet (e.g., the
3136 thread had been temporarily paused for some step-over), set up
3137 for reporting the stop now. */
3138 for (thread_info *tp : all_threads (ptid))
3139 {
3140 if (tp->state != THREAD_RUNNING)
3141 continue;
3142 if (tp->executing)
3143 continue;
3144
3145 /* Remove matching threads from the step-over queue, so
3146 start_step_over doesn't try to resume them
3147 automatically. */
3148 if (thread_is_in_step_over_chain (tp))
3149 thread_step_over_chain_remove (tp);
3150
3151 /* If the thread is stopped, but the user/frontend doesn't
3152 know about that yet, queue a pending event, as if the
3153 thread had just stopped now. Unless the thread already had
3154 a pending event. */
3155 if (!tp->suspend.waitstatus_pending_p)
3156 {
3157 tp->suspend.waitstatus_pending_p = 1;
3158 tp->suspend.waitstatus.kind = TARGET_WAITKIND_STOPPED;
3159 tp->suspend.waitstatus.value.sig = GDB_SIGNAL_0;
3160 }
3161
3162 /* Clear the inline-frame state, since we're re-processing the
3163 stop. */
3164 clear_inline_frame_state (tp->ptid);
3165
3166 /* If this thread was paused because some other thread was
3167 doing an inline-step over, let that finish first. Once
3168 that happens, we'll restart all threads and consume pending
3169 stop events then. */
3170 if (step_over_info_valid_p ())
3171 continue;
3172
3173 /* Otherwise we can process the (new) pending event now. Set
3174 it so this pending event is considered by
3175 do_target_wait. */
3176 tp->resumed = 1;
3177 }
3178 }
3179
3180 static void
3181 infrun_thread_thread_exit (struct thread_info *tp, int silent)
3182 {
3183 if (target_last_wait_ptid == tp->ptid)
3184 nullify_last_target_wait_ptid ();
3185 }
3186
3187 /* Delete the step resume, single-step and longjmp/exception resume
3188 breakpoints of TP. */
3189
3190 static void
3191 delete_thread_infrun_breakpoints (struct thread_info *tp)
3192 {
3193 delete_step_resume_breakpoint (tp);
3194 delete_exception_resume_breakpoint (tp);
3195 delete_single_step_breakpoints (tp);
3196 }
3197
3198 /* If the target still has execution, call FUNC for each thread that
3199 just stopped. In all-stop, that's all the non-exited threads; in
3200 non-stop, that's the current thread, only. */
3201
3202 typedef void (*for_each_just_stopped_thread_callback_func)
3203 (struct thread_info *tp);
3204
3205 static void
3206 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
3207 {
3208 if (!target_has_execution || inferior_ptid == null_ptid)
3209 return;
3210
3211 if (target_is_non_stop_p ())
3212 {
3213 /* If in non-stop mode, only the current thread stopped. */
3214 func (inferior_thread ());
3215 }
3216 else
3217 {
3218 /* In all-stop mode, all threads have stopped. */
3219 for (thread_info *tp : all_non_exited_threads ())
3220 func (tp);
3221 }
3222 }
3223
3224 /* Delete the step resume and longjmp/exception resume breakpoints of
3225 the threads that just stopped. */
3226
3227 static void
3228 delete_just_stopped_threads_infrun_breakpoints (void)
3229 {
3230 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
3231 }
3232
3233 /* Delete the single-step breakpoints of the threads that just
3234 stopped. */
3235
3236 static void
3237 delete_just_stopped_threads_single_step_breakpoints (void)
3238 {
3239 for_each_just_stopped_thread (delete_single_step_breakpoints);
3240 }
3241
3242 /* See infrun.h. */
3243
3244 void
3245 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3246 const struct target_waitstatus *ws)
3247 {
3248 std::string status_string = target_waitstatus_to_string (ws);
3249 string_file stb;
3250
3251 /* The text is split over several lines because it was getting too long.
3252 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3253 output as a unit; we want only one timestamp printed if debug_timestamp
3254 is set. */
3255
3256 stb.printf ("infrun: target_wait (%d.%ld.%ld",
3257 waiton_ptid.pid (),
3258 waiton_ptid.lwp (),
3259 waiton_ptid.tid ());
3260 if (waiton_ptid.pid () != -1)
3261 stb.printf (" [%s]", target_pid_to_str (waiton_ptid).c_str ());
3262 stb.printf (", status) =\n");
3263 stb.printf ("infrun: %d.%ld.%ld [%s],\n",
3264 result_ptid.pid (),
3265 result_ptid.lwp (),
3266 result_ptid.tid (),
3267 target_pid_to_str (result_ptid).c_str ());
3268 stb.printf ("infrun: %s\n", status_string.c_str ());
3269
3270 /* This uses %s in part to handle %'s in the text, but also to avoid
3271 a gcc error: the format attribute requires a string literal. */
3272 fprintf_unfiltered (gdb_stdlog, "%s", stb.c_str ());
3273 }
3274
3275 /* Select a thread at random, out of those which are resumed and have
3276 had events. */
3277
3278 static struct thread_info *
3279 random_pending_event_thread (ptid_t waiton_ptid)
3280 {
3281 int num_events = 0;
3282
3283 auto has_event = [] (thread_info *tp)
3284 {
3285 return (tp->resumed
3286 && tp->suspend.waitstatus_pending_p);
3287 };
3288
3289 /* First see how many events we have. Count only resumed threads
3290 that have an event pending. */
3291 for (thread_info *tp : all_non_exited_threads (waiton_ptid))
3292 if (has_event (tp))
3293 num_events++;
3294
3295 if (num_events == 0)
3296 return NULL;
3297
3298 /* Now randomly pick a thread out of those that have had events. */
3299 int random_selector = (int) ((num_events * (double) rand ())
3300 / (RAND_MAX + 1.0));
3301
3302 if (debug_infrun && num_events > 1)
3303 fprintf_unfiltered (gdb_stdlog,
3304 "infrun: Found %d events, selecting #%d\n",
3305 num_events, random_selector);
3306
3307 /* Select the Nth thread that has had an event. */
3308 for (thread_info *tp : all_non_exited_threads (waiton_ptid))
3309 if (has_event (tp))
3310 if (random_selector-- == 0)
3311 return tp;
3312
3313 gdb_assert_not_reached ("event thread not found");
3314 }
3315
3316 /* Wrapper for target_wait that first checks whether threads have
3317 pending statuses to report before actually asking the target for
3318 more events. */
3319
3320 static ptid_t
3321 do_target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
3322 {
3323 ptid_t event_ptid;
3324 struct thread_info *tp;
3325
3326 /* First check if there is a resumed thread with a wait status
3327 pending. */
3328 if (ptid == minus_one_ptid || ptid.is_pid ())
3329 {
3330 tp = random_pending_event_thread (ptid);
3331 }
3332 else
3333 {
3334 if (debug_infrun)
3335 fprintf_unfiltered (gdb_stdlog,
3336 "infrun: Waiting for specific thread %s.\n",
3337 target_pid_to_str (ptid).c_str ());
3338
3339 /* We have a specific thread to check. */
3340 tp = find_thread_ptid (ptid);
3341 gdb_assert (tp != NULL);
3342 if (!tp->suspend.waitstatus_pending_p)
3343 tp = NULL;
3344 }
3345
3346 if (tp != NULL
3347 && (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3348 || tp->suspend.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
3349 {
3350 struct regcache *regcache = get_thread_regcache (tp);
3351 struct gdbarch *gdbarch = regcache->arch ();
3352 CORE_ADDR pc;
3353 int discard = 0;
3354
3355 pc = regcache_read_pc (regcache);
3356
3357 if (pc != tp->suspend.stop_pc)
3358 {
3359 if (debug_infrun)
3360 fprintf_unfiltered (gdb_stdlog,
3361 "infrun: PC of %s changed. was=%s, now=%s\n",
3362 target_pid_to_str (tp->ptid).c_str (),
3363 paddress (gdbarch, tp->suspend.stop_pc),
3364 paddress (gdbarch, pc));
3365 discard = 1;
3366 }
3367 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
3368 {
3369 if (debug_infrun)
3370 fprintf_unfiltered (gdb_stdlog,
3371 "infrun: previous breakpoint of %s, at %s gone\n",
3372 target_pid_to_str (tp->ptid).c_str (),
3373 paddress (gdbarch, pc));
3374
3375 discard = 1;
3376 }
3377
3378 if (discard)
3379 {
3380 if (debug_infrun)
3381 fprintf_unfiltered (gdb_stdlog,
3382 "infrun: pending event of %s cancelled.\n",
3383 target_pid_to_str (tp->ptid).c_str ());
3384
3385 tp->suspend.waitstatus.kind = TARGET_WAITKIND_SPURIOUS;
3386 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3387 }
3388 }
3389
3390 if (tp != NULL)
3391 {
3392 if (debug_infrun)
3393 {
3394 std::string statstr
3395 = target_waitstatus_to_string (&tp->suspend.waitstatus);
3396
3397 fprintf_unfiltered (gdb_stdlog,
3398 "infrun: Using pending wait status %s for %s.\n",
3399 statstr.c_str (),
3400 target_pid_to_str (tp->ptid).c_str ());
3401 }
3402
3403 /* Now that we've selected our final event LWP, un-adjust its PC
3404 if it was a software breakpoint (and the target doesn't
3405 always adjust the PC itself). */
3406 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3407 && !target_supports_stopped_by_sw_breakpoint ())
3408 {
3409 struct regcache *regcache;
3410 struct gdbarch *gdbarch;
3411 int decr_pc;
3412
3413 regcache = get_thread_regcache (tp);
3414 gdbarch = regcache->arch ();
3415
3416 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3417 if (decr_pc != 0)
3418 {
3419 CORE_ADDR pc;
3420
3421 pc = regcache_read_pc (regcache);
3422 regcache_write_pc (regcache, pc + decr_pc);
3423 }
3424 }
3425
3426 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3427 *status = tp->suspend.waitstatus;
3428 tp->suspend.waitstatus_pending_p = 0;
3429
3430 /* Wake up the event loop again, until all pending events are
3431 processed. */
3432 if (target_is_async_p ())
3433 mark_async_event_handler (infrun_async_inferior_event_token);
3434 return tp->ptid;
3435 }
3436
3437 /* But if we don't find one, we'll have to wait. */
3438
3439 if (deprecated_target_wait_hook)
3440 event_ptid = deprecated_target_wait_hook (ptid, status, options);
3441 else
3442 event_ptid = target_wait (ptid, status, options);
3443
3444 return event_ptid;
3445 }
3446
3447 /* Prepare and stabilize the inferior for detaching it. E.g.,
3448 detaching while a thread is displaced stepping is a recipe for
3449 crashing it, as nothing would readjust the PC out of the scratch
3450 pad. */
3451
3452 void
3453 prepare_for_detach (void)
3454 {
3455 struct inferior *inf = current_inferior ();
3456 ptid_t pid_ptid = ptid_t (inf->pid);
3457
3458 displaced_step_inferior_state *displaced = get_displaced_stepping_state (inf);
3459
3460 /* Is any thread of this process displaced stepping? If not,
3461 there's nothing else to do. */
3462 if (displaced->step_thread == nullptr)
3463 return;
3464
3465 if (debug_infrun)
3466 fprintf_unfiltered (gdb_stdlog,
3467 "displaced-stepping in-process while detaching");
3468
3469 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
3470
3471 while (displaced->step_thread != nullptr)
3472 {
3473 struct execution_control_state ecss;
3474 struct execution_control_state *ecs;
3475
3476 ecs = &ecss;
3477 memset (ecs, 0, sizeof (*ecs));
3478
3479 overlay_cache_invalid = 1;
3480 /* Flush target cache before starting to handle each event.
3481 Target was running and cache could be stale. This is just a
3482 heuristic. Running threads may modify target memory, but we
3483 don't get any event. */
3484 target_dcache_invalidate ();
3485
3486 ecs->ptid = do_target_wait (pid_ptid, &ecs->ws, 0);
3487
3488 if (debug_infrun)
3489 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3490
3491 /* If an error happens while handling the event, propagate GDB's
3492 knowledge of the executing state to the frontend/user running
3493 state. */
3494 scoped_finish_thread_state finish_state (minus_one_ptid);
3495
3496 /* Now figure out what to do with the result of the result. */
3497 handle_inferior_event (ecs);
3498
3499 /* No error, don't finish the state yet. */
3500 finish_state.release ();
3501
3502 /* Breakpoints and watchpoints are not installed on the target
3503 at this point, and signals are passed directly to the
3504 inferior, so this must mean the process is gone. */
3505 if (!ecs->wait_some_more)
3506 {
3507 restore_detaching.release ();
3508 error (_("Program exited while detaching"));
3509 }
3510 }
3511
3512 restore_detaching.release ();
3513 }
3514
3515 /* Wait for control to return from inferior to debugger.
3516
3517 If inferior gets a signal, we may decide to start it up again
3518 instead of returning. That is why there is a loop in this function.
3519 When this function actually returns it means the inferior
3520 should be left stopped and GDB should read more commands. */
3521
3522 void
3523 wait_for_inferior (void)
3524 {
3525 if (debug_infrun)
3526 fprintf_unfiltered
3527 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
3528
3529 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
3530
3531 /* If an error happens while handling the event, propagate GDB's
3532 knowledge of the executing state to the frontend/user running
3533 state. */
3534 scoped_finish_thread_state finish_state (minus_one_ptid);
3535
3536 while (1)
3537 {
3538 struct execution_control_state ecss;
3539 struct execution_control_state *ecs = &ecss;
3540 ptid_t waiton_ptid = minus_one_ptid;
3541
3542 memset (ecs, 0, sizeof (*ecs));
3543
3544 overlay_cache_invalid = 1;
3545
3546 /* Flush target cache before starting to handle each event.
3547 Target was running and cache could be stale. This is just a
3548 heuristic. Running threads may modify target memory, but we
3549 don't get any event. */
3550 target_dcache_invalidate ();
3551
3552 ecs->ptid = do_target_wait (waiton_ptid, &ecs->ws, 0);
3553
3554 if (debug_infrun)
3555 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3556
3557 /* Now figure out what to do with the result of the result. */
3558 handle_inferior_event (ecs);
3559
3560 if (!ecs->wait_some_more)
3561 break;
3562 }
3563
3564 /* No error, don't finish the state yet. */
3565 finish_state.release ();
3566 }
3567
3568 /* Cleanup that reinstalls the readline callback handler, if the
3569 target is running in the background. If while handling the target
3570 event something triggered a secondary prompt, like e.g., a
3571 pagination prompt, we'll have removed the callback handler (see
3572 gdb_readline_wrapper_line). Need to do this as we go back to the
3573 event loop, ready to process further input. Note this has no
3574 effect if the handler hasn't actually been removed, because calling
3575 rl_callback_handler_install resets the line buffer, thus losing
3576 input. */
3577
3578 static void
3579 reinstall_readline_callback_handler_cleanup ()
3580 {
3581 struct ui *ui = current_ui;
3582
3583 if (!ui->async)
3584 {
3585 /* We're not going back to the top level event loop yet. Don't
3586 install the readline callback, as it'd prep the terminal,
3587 readline-style (raw, noecho) (e.g., --batch). We'll install
3588 it the next time the prompt is displayed, when we're ready
3589 for input. */
3590 return;
3591 }
3592
3593 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
3594 gdb_rl_callback_handler_reinstall ();
3595 }
3596
3597 /* Clean up the FSMs of threads that are now stopped. In non-stop,
3598 that's just the event thread. In all-stop, that's all threads. */
3599
3600 static void
3601 clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
3602 {
3603 if (ecs->event_thread != NULL
3604 && ecs->event_thread->thread_fsm != NULL)
3605 ecs->event_thread->thread_fsm->clean_up (ecs->event_thread);
3606
3607 if (!non_stop)
3608 {
3609 for (thread_info *thr : all_non_exited_threads ())
3610 {
3611 if (thr->thread_fsm == NULL)
3612 continue;
3613 if (thr == ecs->event_thread)
3614 continue;
3615
3616 switch_to_thread (thr);
3617 thr->thread_fsm->clean_up (thr);
3618 }
3619
3620 if (ecs->event_thread != NULL)
3621 switch_to_thread (ecs->event_thread);
3622 }
3623 }
3624
3625 /* Helper for all_uis_check_sync_execution_done that works on the
3626 current UI. */
3627
3628 static void
3629 check_curr_ui_sync_execution_done (void)
3630 {
3631 struct ui *ui = current_ui;
3632
3633 if (ui->prompt_state == PROMPT_NEEDED
3634 && ui->async
3635 && !gdb_in_secondary_prompt_p (ui))
3636 {
3637 target_terminal::ours ();
3638 gdb::observers::sync_execution_done.notify ();
3639 ui_register_input_event_handler (ui);
3640 }
3641 }
3642
3643 /* See infrun.h. */
3644
3645 void
3646 all_uis_check_sync_execution_done (void)
3647 {
3648 SWITCH_THRU_ALL_UIS ()
3649 {
3650 check_curr_ui_sync_execution_done ();
3651 }
3652 }
3653
3654 /* See infrun.h. */
3655
3656 void
3657 all_uis_on_sync_execution_starting (void)
3658 {
3659 SWITCH_THRU_ALL_UIS ()
3660 {
3661 if (current_ui->prompt_state == PROMPT_NEEDED)
3662 async_disable_stdin ();
3663 }
3664 }
3665
3666 /* Asynchronous version of wait_for_inferior. It is called by the
3667 event loop whenever a change of state is detected on the file
3668 descriptor corresponding to the target. It can be called more than
3669 once to complete a single execution command. In such cases we need
3670 to keep the state in a global variable ECSS. If it is the last time
3671 that this function is called for a single execution command, then
3672 report to the user that the inferior has stopped, and do the
3673 necessary cleanups. */
3674
3675 void
3676 fetch_inferior_event (void *client_data)
3677 {
3678 struct execution_control_state ecss;
3679 struct execution_control_state *ecs = &ecss;
3680 int cmd_done = 0;
3681 ptid_t waiton_ptid = minus_one_ptid;
3682
3683 memset (ecs, 0, sizeof (*ecs));
3684
3685 /* Events are always processed with the main UI as current UI. This
3686 way, warnings, debug output, etc. are always consistently sent to
3687 the main console. */
3688 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
3689
3690 /* End up with readline processing input, if necessary. */
3691 {
3692 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
3693
3694 /* We're handling a live event, so make sure we're doing live
3695 debugging. If we're looking at traceframes while the target is
3696 running, we're going to need to get back to that mode after
3697 handling the event. */
3698 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
3699 if (non_stop)
3700 {
3701 maybe_restore_traceframe.emplace ();
3702 set_current_traceframe (-1);
3703 }
3704
3705 /* The user/frontend should not notice a thread switch due to
3706 internal events. Make sure we revert to the user selected
3707 thread and frame after handling the event and running any
3708 breakpoint commands. */
3709 scoped_restore_current_thread restore_thread;
3710
3711 overlay_cache_invalid = 1;
3712 /* Flush target cache before starting to handle each event. Target
3713 was running and cache could be stale. This is just a heuristic.
3714 Running threads may modify target memory, but we don't get any
3715 event. */
3716 target_dcache_invalidate ();
3717
3718 scoped_restore save_exec_dir
3719 = make_scoped_restore (&execution_direction,
3720 target_execution_direction ());
3721
3722 ecs->ptid = do_target_wait (waiton_ptid, &ecs->ws,
3723 target_can_async_p () ? TARGET_WNOHANG : 0);
3724
3725 if (debug_infrun)
3726 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3727
3728 /* If an error happens while handling the event, propagate GDB's
3729 knowledge of the executing state to the frontend/user running
3730 state. */
3731 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
3732 scoped_finish_thread_state finish_state (finish_ptid);
3733
3734 /* Get executed before scoped_restore_current_thread above to apply
3735 still for the thread which has thrown the exception. */
3736 auto defer_bpstat_clear
3737 = make_scope_exit (bpstat_clear_actions);
3738 auto defer_delete_threads
3739 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
3740
3741 /* Now figure out what to do with the result of the result. */
3742 handle_inferior_event (ecs);
3743
3744 if (!ecs->wait_some_more)
3745 {
3746 struct inferior *inf = find_inferior_ptid (ecs->ptid);
3747 int should_stop = 1;
3748 struct thread_info *thr = ecs->event_thread;
3749
3750 delete_just_stopped_threads_infrun_breakpoints ();
3751
3752 if (thr != NULL)
3753 {
3754 struct thread_fsm *thread_fsm = thr->thread_fsm;
3755
3756 if (thread_fsm != NULL)
3757 should_stop = thread_fsm->should_stop (thr);
3758 }
3759
3760 if (!should_stop)
3761 {
3762 keep_going (ecs);
3763 }
3764 else
3765 {
3766 bool should_notify_stop = true;
3767 int proceeded = 0;
3768
3769 clean_up_just_stopped_threads_fsms (ecs);
3770
3771 if (thr != NULL && thr->thread_fsm != NULL)
3772 should_notify_stop = thr->thread_fsm->should_notify_stop ();
3773
3774 if (should_notify_stop)
3775 {
3776 /* We may not find an inferior if this was a process exit. */
3777 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3778 proceeded = normal_stop ();
3779 }
3780
3781 if (!proceeded)
3782 {
3783 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
3784 cmd_done = 1;
3785 }
3786
3787 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
3788 previously selected thread is gone. We have two
3789 choices - switch to no thread selected, or restore the
3790 previously selected thread (now exited). We chose the
3791 later, just because that's what GDB used to do. After
3792 this, "info threads" says "The current thread <Thread
3793 ID 2> has terminated." instead of "No thread
3794 selected.". */
3795 if (!non_stop
3796 && cmd_done
3797 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
3798 restore_thread.dont_restore ();
3799 }
3800 }
3801
3802 defer_delete_threads.release ();
3803 defer_bpstat_clear.release ();
3804
3805 /* No error, don't finish the thread states yet. */
3806 finish_state.release ();
3807
3808 /* This scope is used to ensure that readline callbacks are
3809 reinstalled here. */
3810 }
3811
3812 /* If a UI was in sync execution mode, and now isn't, restore its
3813 prompt (a synchronous execution command has finished, and we're
3814 ready for input). */
3815 all_uis_check_sync_execution_done ();
3816
3817 if (cmd_done
3818 && exec_done_display_p
3819 && (inferior_ptid == null_ptid
3820 || inferior_thread ()->state != THREAD_RUNNING))
3821 printf_unfiltered (_("completed.\n"));
3822 }
3823
3824 /* Record the frame and location we're currently stepping through. */
3825 void
3826 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
3827 {
3828 struct thread_info *tp = inferior_thread ();
3829
3830 tp->control.step_frame_id = get_frame_id (frame);
3831 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
3832
3833 tp->current_symtab = sal.symtab;
3834 tp->current_line = sal.line;
3835 }
3836
3837 /* Clear context switchable stepping state. */
3838
3839 void
3840 init_thread_stepping_state (struct thread_info *tss)
3841 {
3842 tss->stepped_breakpoint = 0;
3843 tss->stepping_over_breakpoint = 0;
3844 tss->stepping_over_watchpoint = 0;
3845 tss->step_after_step_resume_breakpoint = 0;
3846 }
3847
3848 /* See infrun.h. */
3849
3850 void
3851 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
3852 {
3853 target_last_wait_ptid = ptid;
3854 target_last_waitstatus = status;
3855 }
3856
3857 /* See infrun.h. */
3858
3859 void
3860 get_last_target_status (ptid_t *ptid, struct target_waitstatus *status)
3861 {
3862 if (ptid != nullptr)
3863 *ptid = target_last_wait_ptid;
3864 if (status != nullptr)
3865 *status = target_last_waitstatus;
3866 }
3867
3868 /* See infrun.h. */
3869
3870 void
3871 nullify_last_target_wait_ptid (void)
3872 {
3873 target_last_wait_ptid = minus_one_ptid;
3874 target_last_waitstatus = {};
3875 }
3876
3877 /* Switch thread contexts. */
3878
3879 static void
3880 context_switch (execution_control_state *ecs)
3881 {
3882 if (debug_infrun
3883 && ecs->ptid != inferior_ptid
3884 && ecs->event_thread != inferior_thread ())
3885 {
3886 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3887 target_pid_to_str (inferior_ptid).c_str ());
3888 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3889 target_pid_to_str (ecs->ptid).c_str ());
3890 }
3891
3892 switch_to_thread (ecs->event_thread);
3893 }
3894
3895 /* If the target can't tell whether we've hit breakpoints
3896 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
3897 check whether that could have been caused by a breakpoint. If so,
3898 adjust the PC, per gdbarch_decr_pc_after_break. */
3899
3900 static void
3901 adjust_pc_after_break (struct thread_info *thread,
3902 struct target_waitstatus *ws)
3903 {
3904 struct regcache *regcache;
3905 struct gdbarch *gdbarch;
3906 CORE_ADDR breakpoint_pc, decr_pc;
3907
3908 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3909 we aren't, just return.
3910
3911 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3912 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3913 implemented by software breakpoints should be handled through the normal
3914 breakpoint layer.
3915
3916 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3917 different signals (SIGILL or SIGEMT for instance), but it is less
3918 clear where the PC is pointing afterwards. It may not match
3919 gdbarch_decr_pc_after_break. I don't know any specific target that
3920 generates these signals at breakpoints (the code has been in GDB since at
3921 least 1992) so I can not guess how to handle them here.
3922
3923 In earlier versions of GDB, a target with
3924 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3925 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3926 target with both of these set in GDB history, and it seems unlikely to be
3927 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3928
3929 if (ws->kind != TARGET_WAITKIND_STOPPED)
3930 return;
3931
3932 if (ws->value.sig != GDB_SIGNAL_TRAP)
3933 return;
3934
3935 /* In reverse execution, when a breakpoint is hit, the instruction
3936 under it has already been de-executed. The reported PC always
3937 points at the breakpoint address, so adjusting it further would
3938 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3939 architecture:
3940
3941 B1 0x08000000 : INSN1
3942 B2 0x08000001 : INSN2
3943 0x08000002 : INSN3
3944 PC -> 0x08000003 : INSN4
3945
3946 Say you're stopped at 0x08000003 as above. Reverse continuing
3947 from that point should hit B2 as below. Reading the PC when the
3948 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3949 been de-executed already.
3950
3951 B1 0x08000000 : INSN1
3952 B2 PC -> 0x08000001 : INSN2
3953 0x08000002 : INSN3
3954 0x08000003 : INSN4
3955
3956 We can't apply the same logic as for forward execution, because
3957 we would wrongly adjust the PC to 0x08000000, since there's a
3958 breakpoint at PC - 1. We'd then report a hit on B1, although
3959 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3960 behaviour. */
3961 if (execution_direction == EXEC_REVERSE)
3962 return;
3963
3964 /* If the target can tell whether the thread hit a SW breakpoint,
3965 trust it. Targets that can tell also adjust the PC
3966 themselves. */
3967 if (target_supports_stopped_by_sw_breakpoint ())
3968 return;
3969
3970 /* Note that relying on whether a breakpoint is planted in memory to
3971 determine this can fail. E.g,. the breakpoint could have been
3972 removed since. Or the thread could have been told to step an
3973 instruction the size of a breakpoint instruction, and only
3974 _after_ was a breakpoint inserted at its address. */
3975
3976 /* If this target does not decrement the PC after breakpoints, then
3977 we have nothing to do. */
3978 regcache = get_thread_regcache (thread);
3979 gdbarch = regcache->arch ();
3980
3981 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3982 if (decr_pc == 0)
3983 return;
3984
3985 const address_space *aspace = regcache->aspace ();
3986
3987 /* Find the location where (if we've hit a breakpoint) the
3988 breakpoint would be. */
3989 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3990
3991 /* If the target can't tell whether a software breakpoint triggered,
3992 fallback to figuring it out based on breakpoints we think were
3993 inserted in the target, and on whether the thread was stepped or
3994 continued. */
3995
3996 /* Check whether there actually is a software breakpoint inserted at
3997 that location.
3998
3999 If in non-stop mode, a race condition is possible where we've
4000 removed a breakpoint, but stop events for that breakpoint were
4001 already queued and arrive later. To suppress those spurious
4002 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
4003 and retire them after a number of stop events are reported. Note
4004 this is an heuristic and can thus get confused. The real fix is
4005 to get the "stopped by SW BP and needs adjustment" info out of
4006 the target/kernel (and thus never reach here; see above). */
4007 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
4008 || (target_is_non_stop_p ()
4009 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
4010 {
4011 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
4012
4013 if (record_full_is_used ())
4014 restore_operation_disable.emplace
4015 (record_full_gdb_operation_disable_set ());
4016
4017 /* When using hardware single-step, a SIGTRAP is reported for both
4018 a completed single-step and a software breakpoint. Need to
4019 differentiate between the two, as the latter needs adjusting
4020 but the former does not.
4021
4022 The SIGTRAP can be due to a completed hardware single-step only if
4023 - we didn't insert software single-step breakpoints
4024 - this thread is currently being stepped
4025
4026 If any of these events did not occur, we must have stopped due
4027 to hitting a software breakpoint, and have to back up to the
4028 breakpoint address.
4029
4030 As a special case, we could have hardware single-stepped a
4031 software breakpoint. In this case (prev_pc == breakpoint_pc),
4032 we also need to back up to the breakpoint address. */
4033
4034 if (thread_has_single_step_breakpoints_set (thread)
4035 || !currently_stepping (thread)
4036 || (thread->stepped_breakpoint
4037 && thread->prev_pc == breakpoint_pc))
4038 regcache_write_pc (regcache, breakpoint_pc);
4039 }
4040 }
4041
4042 static int
4043 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4044 {
4045 for (frame = get_prev_frame (frame);
4046 frame != NULL;
4047 frame = get_prev_frame (frame))
4048 {
4049 if (frame_id_eq (get_frame_id (frame), step_frame_id))
4050 return 1;
4051 if (get_frame_type (frame) != INLINE_FRAME)
4052 break;
4053 }
4054
4055 return 0;
4056 }
4057
4058 /* Look for an inline frame that is marked for skip.
4059 If PREV_FRAME is TRUE start at the previous frame,
4060 otherwise start at the current frame. Stop at the
4061 first non-inline frame, or at the frame where the
4062 step started. */
4063
4064 static bool
4065 inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
4066 {
4067 struct frame_info *frame = get_current_frame ();
4068
4069 if (prev_frame)
4070 frame = get_prev_frame (frame);
4071
4072 for (; frame != NULL; frame = get_prev_frame (frame))
4073 {
4074 const char *fn = NULL;
4075 symtab_and_line sal;
4076 struct symbol *sym;
4077
4078 if (frame_id_eq (get_frame_id (frame), tp->control.step_frame_id))
4079 break;
4080 if (get_frame_type (frame) != INLINE_FRAME)
4081 break;
4082
4083 sal = find_frame_sal (frame);
4084 sym = get_frame_function (frame);
4085
4086 if (sym != NULL)
4087 fn = sym->print_name ();
4088
4089 if (sal.line != 0
4090 && function_name_is_marked_for_skip (fn, sal))
4091 return true;
4092 }
4093
4094 return false;
4095 }
4096
4097 /* If the event thread has the stop requested flag set, pretend it
4098 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4099 target_stop). */
4100
4101 static bool
4102 handle_stop_requested (struct execution_control_state *ecs)
4103 {
4104 if (ecs->event_thread->stop_requested)
4105 {
4106 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
4107 ecs->ws.value.sig = GDB_SIGNAL_0;
4108 handle_signal_stop (ecs);
4109 return true;
4110 }
4111 return false;
4112 }
4113
4114 /* Auxiliary function that handles syscall entry/return events.
4115 It returns 1 if the inferior should keep going (and GDB
4116 should ignore the event), or 0 if the event deserves to be
4117 processed. */
4118
4119 static int
4120 handle_syscall_event (struct execution_control_state *ecs)
4121 {
4122 struct regcache *regcache;
4123 int syscall_number;
4124
4125 context_switch (ecs);
4126
4127 regcache = get_thread_regcache (ecs->event_thread);
4128 syscall_number = ecs->ws.value.syscall_number;
4129 ecs->event_thread->suspend.stop_pc = regcache_read_pc (regcache);
4130
4131 if (catch_syscall_enabled () > 0
4132 && catching_syscall_number (syscall_number) > 0)
4133 {
4134 if (debug_infrun)
4135 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
4136 syscall_number);
4137
4138 ecs->event_thread->control.stop_bpstat
4139 = bpstat_stop_status (regcache->aspace (),
4140 ecs->event_thread->suspend.stop_pc,
4141 ecs->event_thread, &ecs->ws);
4142
4143 if (handle_stop_requested (ecs))
4144 return 0;
4145
4146 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4147 {
4148 /* Catchpoint hit. */
4149 return 0;
4150 }
4151 }
4152
4153 if (handle_stop_requested (ecs))
4154 return 0;
4155
4156 /* If no catchpoint triggered for this, then keep going. */
4157 keep_going (ecs);
4158 return 1;
4159 }
4160
4161 /* Lazily fill in the execution_control_state's stop_func_* fields. */
4162
4163 static void
4164 fill_in_stop_func (struct gdbarch *gdbarch,
4165 struct execution_control_state *ecs)
4166 {
4167 if (!ecs->stop_func_filled_in)
4168 {
4169 const block *block;
4170
4171 /* Don't care about return value; stop_func_start and stop_func_name
4172 will both be 0 if it doesn't work. */
4173 find_pc_partial_function (ecs->event_thread->suspend.stop_pc,
4174 &ecs->stop_func_name,
4175 &ecs->stop_func_start,
4176 &ecs->stop_func_end,
4177 &block);
4178
4179 /* The call to find_pc_partial_function, above, will set
4180 stop_func_start and stop_func_end to the start and end
4181 of the range containing the stop pc. If this range
4182 contains the entry pc for the block (which is always the
4183 case for contiguous blocks), advance stop_func_start past
4184 the function's start offset and entrypoint. Note that
4185 stop_func_start is NOT advanced when in a range of a
4186 non-contiguous block that does not contain the entry pc. */
4187 if (block != nullptr
4188 && ecs->stop_func_start <= BLOCK_ENTRY_PC (block)
4189 && BLOCK_ENTRY_PC (block) < ecs->stop_func_end)
4190 {
4191 ecs->stop_func_start
4192 += gdbarch_deprecated_function_start_offset (gdbarch);
4193
4194 if (gdbarch_skip_entrypoint_p (gdbarch))
4195 ecs->stop_func_start
4196 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
4197 }
4198
4199 ecs->stop_func_filled_in = 1;
4200 }
4201 }
4202
4203
4204 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
4205
4206 static enum stop_kind
4207 get_inferior_stop_soon (execution_control_state *ecs)
4208 {
4209 struct inferior *inf = find_inferior_ptid (ecs->ptid);
4210
4211 gdb_assert (inf != NULL);
4212 return inf->control.stop_soon;
4213 }
4214
4215 /* Wait for one event. Store the resulting waitstatus in WS, and
4216 return the event ptid. */
4217
4218 static ptid_t
4219 wait_one (struct target_waitstatus *ws)
4220 {
4221 ptid_t event_ptid;
4222 ptid_t wait_ptid = minus_one_ptid;
4223
4224 overlay_cache_invalid = 1;
4225
4226 /* Flush target cache before starting to handle each event.
4227 Target was running and cache could be stale. This is just a
4228 heuristic. Running threads may modify target memory, but we
4229 don't get any event. */
4230 target_dcache_invalidate ();
4231
4232 if (deprecated_target_wait_hook)
4233 event_ptid = deprecated_target_wait_hook (wait_ptid, ws, 0);
4234 else
4235 event_ptid = target_wait (wait_ptid, ws, 0);
4236
4237 if (debug_infrun)
4238 print_target_wait_results (wait_ptid, event_ptid, ws);
4239
4240 return event_ptid;
4241 }
4242
4243 /* Generate a wrapper for target_stopped_by_REASON that works on PTID
4244 instead of the current thread. */
4245 #define THREAD_STOPPED_BY(REASON) \
4246 static int \
4247 thread_stopped_by_ ## REASON (ptid_t ptid) \
4248 { \
4249 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid); \
4250 inferior_ptid = ptid; \
4251 \
4252 return target_stopped_by_ ## REASON (); \
4253 }
4254
4255 /* Generate thread_stopped_by_watchpoint. */
4256 THREAD_STOPPED_BY (watchpoint)
4257 /* Generate thread_stopped_by_sw_breakpoint. */
4258 THREAD_STOPPED_BY (sw_breakpoint)
4259 /* Generate thread_stopped_by_hw_breakpoint. */
4260 THREAD_STOPPED_BY (hw_breakpoint)
4261
4262 /* Save the thread's event and stop reason to process it later. */
4263
4264 static void
4265 save_waitstatus (struct thread_info *tp, struct target_waitstatus *ws)
4266 {
4267 if (debug_infrun)
4268 {
4269 std::string statstr = target_waitstatus_to_string (ws);
4270
4271 fprintf_unfiltered (gdb_stdlog,
4272 "infrun: saving status %s for %d.%ld.%ld\n",
4273 statstr.c_str (),
4274 tp->ptid.pid (),
4275 tp->ptid.lwp (),
4276 tp->ptid.tid ());
4277 }
4278
4279 /* Record for later. */
4280 tp->suspend.waitstatus = *ws;
4281 tp->suspend.waitstatus_pending_p = 1;
4282
4283 struct regcache *regcache = get_thread_regcache (tp);
4284 const address_space *aspace = regcache->aspace ();
4285
4286 if (ws->kind == TARGET_WAITKIND_STOPPED
4287 && ws->value.sig == GDB_SIGNAL_TRAP)
4288 {
4289 CORE_ADDR pc = regcache_read_pc (regcache);
4290
4291 adjust_pc_after_break (tp, &tp->suspend.waitstatus);
4292
4293 if (thread_stopped_by_watchpoint (tp->ptid))
4294 {
4295 tp->suspend.stop_reason
4296 = TARGET_STOPPED_BY_WATCHPOINT;
4297 }
4298 else if (target_supports_stopped_by_sw_breakpoint ()
4299 && thread_stopped_by_sw_breakpoint (tp->ptid))
4300 {
4301 tp->suspend.stop_reason
4302 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4303 }
4304 else if (target_supports_stopped_by_hw_breakpoint ()
4305 && thread_stopped_by_hw_breakpoint (tp->ptid))
4306 {
4307 tp->suspend.stop_reason
4308 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4309 }
4310 else if (!target_supports_stopped_by_hw_breakpoint ()
4311 && hardware_breakpoint_inserted_here_p (aspace,
4312 pc))
4313 {
4314 tp->suspend.stop_reason
4315 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4316 }
4317 else if (!target_supports_stopped_by_sw_breakpoint ()
4318 && software_breakpoint_inserted_here_p (aspace,
4319 pc))
4320 {
4321 tp->suspend.stop_reason
4322 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4323 }
4324 else if (!thread_has_single_step_breakpoints_set (tp)
4325 && currently_stepping (tp))
4326 {
4327 tp->suspend.stop_reason
4328 = TARGET_STOPPED_BY_SINGLE_STEP;
4329 }
4330 }
4331 }
4332
4333 /* See infrun.h. */
4334
4335 void
4336 stop_all_threads (void)
4337 {
4338 /* We may need multiple passes to discover all threads. */
4339 int pass;
4340 int iterations = 0;
4341
4342 gdb_assert (target_is_non_stop_p ());
4343
4344 if (debug_infrun)
4345 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads\n");
4346
4347 scoped_restore_current_thread restore_thread;
4348
4349 target_thread_events (1);
4350 SCOPE_EXIT { target_thread_events (0); };
4351
4352 /* Request threads to stop, and then wait for the stops. Because
4353 threads we already know about can spawn more threads while we're
4354 trying to stop them, and we only learn about new threads when we
4355 update the thread list, do this in a loop, and keep iterating
4356 until two passes find no threads that need to be stopped. */
4357 for (pass = 0; pass < 2; pass++, iterations++)
4358 {
4359 if (debug_infrun)
4360 fprintf_unfiltered (gdb_stdlog,
4361 "infrun: stop_all_threads, pass=%d, "
4362 "iterations=%d\n", pass, iterations);
4363 while (1)
4364 {
4365 ptid_t event_ptid;
4366 struct target_waitstatus ws;
4367 int need_wait = 0;
4368
4369 update_thread_list ();
4370
4371 /* Go through all threads looking for threads that we need
4372 to tell the target to stop. */
4373 for (thread_info *t : all_non_exited_threads ())
4374 {
4375 if (t->executing)
4376 {
4377 /* If already stopping, don't request a stop again.
4378 We just haven't seen the notification yet. */
4379 if (!t->stop_requested)
4380 {
4381 if (debug_infrun)
4382 fprintf_unfiltered (gdb_stdlog,
4383 "infrun: %s executing, "
4384 "need stop\n",
4385 target_pid_to_str (t->ptid).c_str ());
4386 switch_to_thread_no_regs (t);
4387 target_stop (t->ptid);
4388 t->stop_requested = 1;
4389 }
4390 else
4391 {
4392 if (debug_infrun)
4393 fprintf_unfiltered (gdb_stdlog,
4394 "infrun: %s executing, "
4395 "already stopping\n",
4396 target_pid_to_str (t->ptid).c_str ());
4397 }
4398
4399 if (t->stop_requested)
4400 need_wait = 1;
4401 }
4402 else
4403 {
4404 if (debug_infrun)
4405 fprintf_unfiltered (gdb_stdlog,
4406 "infrun: %s not executing\n",
4407 target_pid_to_str (t->ptid).c_str ());
4408
4409 /* The thread may be not executing, but still be
4410 resumed with a pending status to process. */
4411 t->resumed = 0;
4412 }
4413 }
4414
4415 if (!need_wait)
4416 break;
4417
4418 /* If we find new threads on the second iteration, restart
4419 over. We want to see two iterations in a row with all
4420 threads stopped. */
4421 if (pass > 0)
4422 pass = -1;
4423
4424 event_ptid = wait_one (&ws);
4425 if (debug_infrun)
4426 {
4427 fprintf_unfiltered (gdb_stdlog,
4428 "infrun: stop_all_threads %s %s\n",
4429 target_waitstatus_to_string (&ws).c_str (),
4430 target_pid_to_str (event_ptid).c_str ());
4431 }
4432
4433 if (ws.kind == TARGET_WAITKIND_NO_RESUMED
4434 || ws.kind == TARGET_WAITKIND_THREAD_EXITED
4435 || ws.kind == TARGET_WAITKIND_EXITED
4436 || ws.kind == TARGET_WAITKIND_SIGNALLED)
4437 {
4438 /* All resumed threads exited
4439 or one thread/process exited/signalled. */
4440 }
4441 else
4442 {
4443 thread_info *t = find_thread_ptid (event_ptid);
4444 if (t == NULL)
4445 t = add_thread (event_ptid);
4446
4447 t->stop_requested = 0;
4448 t->executing = 0;
4449 t->resumed = 0;
4450 t->control.may_range_step = 0;
4451
4452 /* This may be the first time we see the inferior report
4453 a stop. */
4454 inferior *inf = find_inferior_ptid (event_ptid);
4455 if (inf->needs_setup)
4456 {
4457 switch_to_thread_no_regs (t);
4458 setup_inferior (0);
4459 }
4460
4461 if (ws.kind == TARGET_WAITKIND_STOPPED
4462 && ws.value.sig == GDB_SIGNAL_0)
4463 {
4464 /* We caught the event that we intended to catch, so
4465 there's no event pending. */
4466 t->suspend.waitstatus.kind = TARGET_WAITKIND_IGNORE;
4467 t->suspend.waitstatus_pending_p = 0;
4468
4469 if (displaced_step_fixup (t, GDB_SIGNAL_0) < 0)
4470 {
4471 /* Add it back to the step-over queue. */
4472 if (debug_infrun)
4473 {
4474 fprintf_unfiltered (gdb_stdlog,
4475 "infrun: displaced-step of %s "
4476 "canceled: adding back to the "
4477 "step-over queue\n",
4478 target_pid_to_str (t->ptid).c_str ());
4479 }
4480 t->control.trap_expected = 0;
4481 thread_step_over_chain_enqueue (t);
4482 }
4483 }
4484 else
4485 {
4486 enum gdb_signal sig;
4487 struct regcache *regcache;
4488
4489 if (debug_infrun)
4490 {
4491 std::string statstr = target_waitstatus_to_string (&ws);
4492
4493 fprintf_unfiltered (gdb_stdlog,
4494 "infrun: target_wait %s, saving "
4495 "status for %d.%ld.%ld\n",
4496 statstr.c_str (),
4497 t->ptid.pid (),
4498 t->ptid.lwp (),
4499 t->ptid.tid ());
4500 }
4501
4502 /* Record for later. */
4503 save_waitstatus (t, &ws);
4504
4505 sig = (ws.kind == TARGET_WAITKIND_STOPPED
4506 ? ws.value.sig : GDB_SIGNAL_0);
4507
4508 if (displaced_step_fixup (t, sig) < 0)
4509 {
4510 /* Add it back to the step-over queue. */
4511 t->control.trap_expected = 0;
4512 thread_step_over_chain_enqueue (t);
4513 }
4514
4515 regcache = get_thread_regcache (t);
4516 t->suspend.stop_pc = regcache_read_pc (regcache);
4517
4518 if (debug_infrun)
4519 {
4520 fprintf_unfiltered (gdb_stdlog,
4521 "infrun: saved stop_pc=%s for %s "
4522 "(currently_stepping=%d)\n",
4523 paddress (target_gdbarch (),
4524 t->suspend.stop_pc),
4525 target_pid_to_str (t->ptid).c_str (),
4526 currently_stepping (t));
4527 }
4528 }
4529 }
4530 }
4531 }
4532
4533 if (debug_infrun)
4534 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads done\n");
4535 }
4536
4537 /* Handle a TARGET_WAITKIND_NO_RESUMED event. */
4538
4539 static int
4540 handle_no_resumed (struct execution_control_state *ecs)
4541 {
4542 if (target_can_async_p ())
4543 {
4544 struct ui *ui;
4545 int any_sync = 0;
4546
4547 ALL_UIS (ui)
4548 {
4549 if (ui->prompt_state == PROMPT_BLOCKED)
4550 {
4551 any_sync = 1;
4552 break;
4553 }
4554 }
4555 if (!any_sync)
4556 {
4557 /* There were no unwaited-for children left in the target, but,
4558 we're not synchronously waiting for events either. Just
4559 ignore. */
4560
4561 if (debug_infrun)
4562 fprintf_unfiltered (gdb_stdlog,
4563 "infrun: TARGET_WAITKIND_NO_RESUMED "
4564 "(ignoring: bg)\n");
4565 prepare_to_wait (ecs);
4566 return 1;
4567 }
4568 }
4569
4570 /* Otherwise, if we were running a synchronous execution command, we
4571 may need to cancel it and give the user back the terminal.
4572
4573 In non-stop mode, the target can't tell whether we've already
4574 consumed previous stop events, so it can end up sending us a
4575 no-resumed event like so:
4576
4577 #0 - thread 1 is left stopped
4578
4579 #1 - thread 2 is resumed and hits breakpoint
4580 -> TARGET_WAITKIND_STOPPED
4581
4582 #2 - thread 3 is resumed and exits
4583 this is the last resumed thread, so
4584 -> TARGET_WAITKIND_NO_RESUMED
4585
4586 #3 - gdb processes stop for thread 2 and decides to re-resume
4587 it.
4588
4589 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
4590 thread 2 is now resumed, so the event should be ignored.
4591
4592 IOW, if the stop for thread 2 doesn't end a foreground command,
4593 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
4594 event. But it could be that the event meant that thread 2 itself
4595 (or whatever other thread was the last resumed thread) exited.
4596
4597 To address this we refresh the thread list and check whether we
4598 have resumed threads _now_. In the example above, this removes
4599 thread 3 from the thread list. If thread 2 was re-resumed, we
4600 ignore this event. If we find no thread resumed, then we cancel
4601 the synchronous command show "no unwaited-for " to the user. */
4602 update_thread_list ();
4603
4604 for (thread_info *thread : all_non_exited_threads ())
4605 {
4606 if (thread->executing
4607 || thread->suspend.waitstatus_pending_p)
4608 {
4609 /* There were no unwaited-for children left in the target at
4610 some point, but there are now. Just ignore. */
4611 if (debug_infrun)
4612 fprintf_unfiltered (gdb_stdlog,
4613 "infrun: TARGET_WAITKIND_NO_RESUMED "
4614 "(ignoring: found resumed)\n");
4615 prepare_to_wait (ecs);
4616 return 1;
4617 }
4618 }
4619
4620 /* Note however that we may find no resumed thread because the whole
4621 process exited meanwhile (thus updating the thread list results
4622 in an empty thread list). In this case we know we'll be getting
4623 a process exit event shortly. */
4624 for (inferior *inf : all_non_exited_inferiors ())
4625 {
4626 thread_info *thread = any_live_thread_of_inferior (inf);
4627 if (thread == NULL)
4628 {
4629 if (debug_infrun)
4630 fprintf_unfiltered (gdb_stdlog,
4631 "infrun: TARGET_WAITKIND_NO_RESUMED "
4632 "(expect process exit)\n");
4633 prepare_to_wait (ecs);
4634 return 1;
4635 }
4636 }
4637
4638 /* Go ahead and report the event. */
4639 return 0;
4640 }
4641
4642 /* Given an execution control state that has been freshly filled in by
4643 an event from the inferior, figure out what it means and take
4644 appropriate action.
4645
4646 The alternatives are:
4647
4648 1) stop_waiting and return; to really stop and return to the
4649 debugger.
4650
4651 2) keep_going and return; to wait for the next event (set
4652 ecs->event_thread->stepping_over_breakpoint to 1 to single step
4653 once). */
4654
4655 static void
4656 handle_inferior_event (struct execution_control_state *ecs)
4657 {
4658 /* Make sure that all temporary struct value objects that were
4659 created during the handling of the event get deleted at the
4660 end. */
4661 scoped_value_mark free_values;
4662
4663 enum stop_kind stop_soon;
4664
4665 if (debug_infrun)
4666 fprintf_unfiltered (gdb_stdlog, "infrun: handle_inferior_event %s\n",
4667 target_waitstatus_to_string (&ecs->ws).c_str ());
4668
4669 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
4670 {
4671 /* We had an event in the inferior, but we are not interested in
4672 handling it at this level. The lower layers have already
4673 done what needs to be done, if anything.
4674
4675 One of the possible circumstances for this is when the
4676 inferior produces output for the console. The inferior has
4677 not stopped, and we are ignoring the event. Another possible
4678 circumstance is any event which the lower level knows will be
4679 reported multiple times without an intervening resume. */
4680 prepare_to_wait (ecs);
4681 return;
4682 }
4683
4684 if (ecs->ws.kind == TARGET_WAITKIND_THREAD_EXITED)
4685 {
4686 prepare_to_wait (ecs);
4687 return;
4688 }
4689
4690 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
4691 && handle_no_resumed (ecs))
4692 return;
4693
4694 /* Cache the last pid/waitstatus. */
4695 set_last_target_status (ecs->ptid, ecs->ws);
4696
4697 /* Always clear state belonging to the previous time we stopped. */
4698 stop_stack_dummy = STOP_NONE;
4699
4700 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
4701 {
4702 /* No unwaited-for children left. IOW, all resumed children
4703 have exited. */
4704 stop_print_frame = 0;
4705 stop_waiting (ecs);
4706 return;
4707 }
4708
4709 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
4710 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
4711 {
4712 ecs->event_thread = find_thread_ptid (ecs->ptid);
4713 /* If it's a new thread, add it to the thread database. */
4714 if (ecs->event_thread == NULL)
4715 ecs->event_thread = add_thread (ecs->ptid);
4716
4717 /* Disable range stepping. If the next step request could use a
4718 range, this will be end up re-enabled then. */
4719 ecs->event_thread->control.may_range_step = 0;
4720 }
4721
4722 /* Dependent on valid ECS->EVENT_THREAD. */
4723 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
4724
4725 /* Dependent on the current PC value modified by adjust_pc_after_break. */
4726 reinit_frame_cache ();
4727
4728 breakpoint_retire_moribund ();
4729
4730 /* First, distinguish signals caused by the debugger from signals
4731 that have to do with the program's own actions. Note that
4732 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
4733 on the operating system version. Here we detect when a SIGILL or
4734 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
4735 something similar for SIGSEGV, since a SIGSEGV will be generated
4736 when we're trying to execute a breakpoint instruction on a
4737 non-executable stack. This happens for call dummy breakpoints
4738 for architectures like SPARC that place call dummies on the
4739 stack. */
4740 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
4741 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
4742 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
4743 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
4744 {
4745 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
4746
4747 if (breakpoint_inserted_here_p (regcache->aspace (),
4748 regcache_read_pc (regcache)))
4749 {
4750 if (debug_infrun)
4751 fprintf_unfiltered (gdb_stdlog,
4752 "infrun: Treating signal as SIGTRAP\n");
4753 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
4754 }
4755 }
4756
4757 /* Mark the non-executing threads accordingly. In all-stop, all
4758 threads of all processes are stopped when we get any event
4759 reported. In non-stop mode, only the event thread stops. */
4760 {
4761 ptid_t mark_ptid;
4762
4763 if (!target_is_non_stop_p ())
4764 mark_ptid = minus_one_ptid;
4765 else if (ecs->ws.kind == TARGET_WAITKIND_SIGNALLED
4766 || ecs->ws.kind == TARGET_WAITKIND_EXITED)
4767 {
4768 /* If we're handling a process exit in non-stop mode, even
4769 though threads haven't been deleted yet, one would think
4770 that there is nothing to do, as threads of the dead process
4771 will be soon deleted, and threads of any other process were
4772 left running. However, on some targets, threads survive a
4773 process exit event. E.g., for the "checkpoint" command,
4774 when the current checkpoint/fork exits, linux-fork.c
4775 automatically switches to another fork from within
4776 target_mourn_inferior, by associating the same
4777 inferior/thread to another fork. We haven't mourned yet at
4778 this point, but we must mark any threads left in the
4779 process as not-executing so that finish_thread_state marks
4780 them stopped (in the user's perspective) if/when we present
4781 the stop to the user. */
4782 mark_ptid = ptid_t (ecs->ptid.pid ());
4783 }
4784 else
4785 mark_ptid = ecs->ptid;
4786
4787 set_executing (mark_ptid, 0);
4788
4789 /* Likewise the resumed flag. */
4790 set_resumed (mark_ptid, 0);
4791 }
4792
4793 switch (ecs->ws.kind)
4794 {
4795 case TARGET_WAITKIND_LOADED:
4796 context_switch (ecs);
4797 /* Ignore gracefully during startup of the inferior, as it might
4798 be the shell which has just loaded some objects, otherwise
4799 add the symbols for the newly loaded objects. Also ignore at
4800 the beginning of an attach or remote session; we will query
4801 the full list of libraries once the connection is
4802 established. */
4803
4804 stop_soon = get_inferior_stop_soon (ecs);
4805 if (stop_soon == NO_STOP_QUIETLY)
4806 {
4807 struct regcache *regcache;
4808
4809 regcache = get_thread_regcache (ecs->event_thread);
4810
4811 handle_solib_event ();
4812
4813 ecs->event_thread->control.stop_bpstat
4814 = bpstat_stop_status (regcache->aspace (),
4815 ecs->event_thread->suspend.stop_pc,
4816 ecs->event_thread, &ecs->ws);
4817
4818 if (handle_stop_requested (ecs))
4819 return;
4820
4821 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4822 {
4823 /* A catchpoint triggered. */
4824 process_event_stop_test (ecs);
4825 return;
4826 }
4827
4828 /* If requested, stop when the dynamic linker notifies
4829 gdb of events. This allows the user to get control
4830 and place breakpoints in initializer routines for
4831 dynamically loaded objects (among other things). */
4832 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4833 if (stop_on_solib_events)
4834 {
4835 /* Make sure we print "Stopped due to solib-event" in
4836 normal_stop. */
4837 stop_print_frame = 1;
4838
4839 stop_waiting (ecs);
4840 return;
4841 }
4842 }
4843
4844 /* If we are skipping through a shell, or through shared library
4845 loading that we aren't interested in, resume the program. If
4846 we're running the program normally, also resume. */
4847 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
4848 {
4849 /* Loading of shared libraries might have changed breakpoint
4850 addresses. Make sure new breakpoints are inserted. */
4851 if (stop_soon == NO_STOP_QUIETLY)
4852 insert_breakpoints ();
4853 resume (GDB_SIGNAL_0);
4854 prepare_to_wait (ecs);
4855 return;
4856 }
4857
4858 /* But stop if we're attaching or setting up a remote
4859 connection. */
4860 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4861 || stop_soon == STOP_QUIETLY_REMOTE)
4862 {
4863 if (debug_infrun)
4864 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4865 stop_waiting (ecs);
4866 return;
4867 }
4868
4869 internal_error (__FILE__, __LINE__,
4870 _("unhandled stop_soon: %d"), (int) stop_soon);
4871
4872 case TARGET_WAITKIND_SPURIOUS:
4873 if (handle_stop_requested (ecs))
4874 return;
4875 context_switch (ecs);
4876 resume (GDB_SIGNAL_0);
4877 prepare_to_wait (ecs);
4878 return;
4879
4880 case TARGET_WAITKIND_THREAD_CREATED:
4881 if (handle_stop_requested (ecs))
4882 return;
4883 context_switch (ecs);
4884 if (!switch_back_to_stepped_thread (ecs))
4885 keep_going (ecs);
4886 return;
4887
4888 case TARGET_WAITKIND_EXITED:
4889 case TARGET_WAITKIND_SIGNALLED:
4890 inferior_ptid = ecs->ptid;
4891 set_current_inferior (find_inferior_ptid (ecs->ptid));
4892 set_current_program_space (current_inferior ()->pspace);
4893 handle_vfork_child_exec_or_exit (0);
4894 target_terminal::ours (); /* Must do this before mourn anyway. */
4895
4896 /* Clearing any previous state of convenience variables. */
4897 clear_exit_convenience_vars ();
4898
4899 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
4900 {
4901 /* Record the exit code in the convenience variable $_exitcode, so
4902 that the user can inspect this again later. */
4903 set_internalvar_integer (lookup_internalvar ("_exitcode"),
4904 (LONGEST) ecs->ws.value.integer);
4905
4906 /* Also record this in the inferior itself. */
4907 current_inferior ()->has_exit_code = 1;
4908 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
4909
4910 /* Support the --return-child-result option. */
4911 return_child_result_value = ecs->ws.value.integer;
4912
4913 gdb::observers::exited.notify (ecs->ws.value.integer);
4914 }
4915 else
4916 {
4917 struct gdbarch *gdbarch = current_inferior ()->gdbarch;
4918
4919 if (gdbarch_gdb_signal_to_target_p (gdbarch))
4920 {
4921 /* Set the value of the internal variable $_exitsignal,
4922 which holds the signal uncaught by the inferior. */
4923 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
4924 gdbarch_gdb_signal_to_target (gdbarch,
4925 ecs->ws.value.sig));
4926 }
4927 else
4928 {
4929 /* We don't have access to the target's method used for
4930 converting between signal numbers (GDB's internal
4931 representation <-> target's representation).
4932 Therefore, we cannot do a good job at displaying this
4933 information to the user. It's better to just warn
4934 her about it (if infrun debugging is enabled), and
4935 give up. */
4936 if (debug_infrun)
4937 fprintf_filtered (gdb_stdlog, _("\
4938 Cannot fill $_exitsignal with the correct signal number.\n"));
4939 }
4940
4941 gdb::observers::signal_exited.notify (ecs->ws.value.sig);
4942 }
4943
4944 gdb_flush (gdb_stdout);
4945 target_mourn_inferior (inferior_ptid);
4946 stop_print_frame = 0;
4947 stop_waiting (ecs);
4948 return;
4949
4950 /* The following are the only cases in which we keep going;
4951 the above cases end in a continue or goto. */
4952 case TARGET_WAITKIND_FORKED:
4953 case TARGET_WAITKIND_VFORKED:
4954 /* Check whether the inferior is displaced stepping. */
4955 {
4956 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
4957 struct gdbarch *gdbarch = regcache->arch ();
4958
4959 /* If checking displaced stepping is supported, and thread
4960 ecs->ptid is displaced stepping. */
4961 if (displaced_step_in_progress_thread (ecs->event_thread))
4962 {
4963 struct inferior *parent_inf
4964 = find_inferior_ptid (ecs->ptid);
4965 struct regcache *child_regcache;
4966 CORE_ADDR parent_pc;
4967
4968 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
4969 indicating that the displaced stepping of syscall instruction
4970 has been done. Perform cleanup for parent process here. Note
4971 that this operation also cleans up the child process for vfork,
4972 because their pages are shared. */
4973 displaced_step_fixup (ecs->event_thread, GDB_SIGNAL_TRAP);
4974 /* Start a new step-over in another thread if there's one
4975 that needs it. */
4976 start_step_over ();
4977
4978 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
4979 {
4980 struct displaced_step_inferior_state *displaced
4981 = get_displaced_stepping_state (parent_inf);
4982
4983 /* Restore scratch pad for child process. */
4984 displaced_step_restore (displaced, ecs->ws.value.related_pid);
4985 }
4986
4987 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
4988 the child's PC is also within the scratchpad. Set the child's PC
4989 to the parent's PC value, which has already been fixed up.
4990 FIXME: we use the parent's aspace here, although we're touching
4991 the child, because the child hasn't been added to the inferior
4992 list yet at this point. */
4993
4994 child_regcache
4995 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
4996 gdbarch,
4997 parent_inf->aspace);
4998 /* Read PC value of parent process. */
4999 parent_pc = regcache_read_pc (regcache);
5000
5001 if (debug_displaced)
5002 fprintf_unfiltered (gdb_stdlog,
5003 "displaced: write child pc from %s to %s\n",
5004 paddress (gdbarch,
5005 regcache_read_pc (child_regcache)),
5006 paddress (gdbarch, parent_pc));
5007
5008 regcache_write_pc (child_regcache, parent_pc);
5009 }
5010 }
5011
5012 context_switch (ecs);
5013
5014 /* Immediately detach breakpoints from the child before there's
5015 any chance of letting the user delete breakpoints from the
5016 breakpoint lists. If we don't do this early, it's easy to
5017 leave left over traps in the child, vis: "break foo; catch
5018 fork; c; <fork>; del; c; <child calls foo>". We only follow
5019 the fork on the last `continue', and by that time the
5020 breakpoint at "foo" is long gone from the breakpoint table.
5021 If we vforked, then we don't need to unpatch here, since both
5022 parent and child are sharing the same memory pages; we'll
5023 need to unpatch at follow/detach time instead to be certain
5024 that new breakpoints added between catchpoint hit time and
5025 vfork follow are detached. */
5026 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
5027 {
5028 /* This won't actually modify the breakpoint list, but will
5029 physically remove the breakpoints from the child. */
5030 detach_breakpoints (ecs->ws.value.related_pid);
5031 }
5032
5033 delete_just_stopped_threads_single_step_breakpoints ();
5034
5035 /* In case the event is caught by a catchpoint, remember that
5036 the event is to be followed at the next resume of the thread,
5037 and not immediately. */
5038 ecs->event_thread->pending_follow = ecs->ws;
5039
5040 ecs->event_thread->suspend.stop_pc
5041 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
5042
5043 ecs->event_thread->control.stop_bpstat
5044 = bpstat_stop_status (get_current_regcache ()->aspace (),
5045 ecs->event_thread->suspend.stop_pc,
5046 ecs->event_thread, &ecs->ws);
5047
5048 if (handle_stop_requested (ecs))
5049 return;
5050
5051 /* If no catchpoint triggered for this, then keep going. Note
5052 that we're interested in knowing the bpstat actually causes a
5053 stop, not just if it may explain the signal. Software
5054 watchpoints, for example, always appear in the bpstat. */
5055 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5056 {
5057 int should_resume;
5058 int follow_child
5059 = (follow_fork_mode_string == follow_fork_mode_child);
5060
5061 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5062
5063 should_resume = follow_fork ();
5064
5065 thread_info *parent = ecs->event_thread;
5066 thread_info *child = find_thread_ptid (ecs->ws.value.related_pid);
5067
5068 /* At this point, the parent is marked running, and the
5069 child is marked stopped. */
5070
5071 /* If not resuming the parent, mark it stopped. */
5072 if (follow_child && !detach_fork && !non_stop && !sched_multi)
5073 parent->set_running (false);
5074
5075 /* If resuming the child, mark it running. */
5076 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
5077 child->set_running (true);
5078
5079 /* In non-stop mode, also resume the other branch. */
5080 if (!detach_fork && (non_stop
5081 || (sched_multi && target_is_non_stop_p ())))
5082 {
5083 if (follow_child)
5084 switch_to_thread (parent);
5085 else
5086 switch_to_thread (child);
5087
5088 ecs->event_thread = inferior_thread ();
5089 ecs->ptid = inferior_ptid;
5090 keep_going (ecs);
5091 }
5092
5093 if (follow_child)
5094 switch_to_thread (child);
5095 else
5096 switch_to_thread (parent);
5097
5098 ecs->event_thread = inferior_thread ();
5099 ecs->ptid = inferior_ptid;
5100
5101 if (should_resume)
5102 keep_going (ecs);
5103 else
5104 stop_waiting (ecs);
5105 return;
5106 }
5107 process_event_stop_test (ecs);
5108 return;
5109
5110 case TARGET_WAITKIND_VFORK_DONE:
5111 /* Done with the shared memory region. Re-insert breakpoints in
5112 the parent, and keep going. */
5113
5114 context_switch (ecs);
5115
5116 current_inferior ()->waiting_for_vfork_done = 0;
5117 current_inferior ()->pspace->breakpoints_not_allowed = 0;
5118
5119 if (handle_stop_requested (ecs))
5120 return;
5121
5122 /* This also takes care of reinserting breakpoints in the
5123 previously locked inferior. */
5124 keep_going (ecs);
5125 return;
5126
5127 case TARGET_WAITKIND_EXECD:
5128
5129 /* Note we can't read registers yet (the stop_pc), because we
5130 don't yet know the inferior's post-exec architecture.
5131 'stop_pc' is explicitly read below instead. */
5132 switch_to_thread_no_regs (ecs->event_thread);
5133
5134 /* Do whatever is necessary to the parent branch of the vfork. */
5135 handle_vfork_child_exec_or_exit (1);
5136
5137 /* This causes the eventpoints and symbol table to be reset.
5138 Must do this now, before trying to determine whether to
5139 stop. */
5140 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
5141
5142 /* In follow_exec we may have deleted the original thread and
5143 created a new one. Make sure that the event thread is the
5144 execd thread for that case (this is a nop otherwise). */
5145 ecs->event_thread = inferior_thread ();
5146
5147 ecs->event_thread->suspend.stop_pc
5148 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
5149
5150 ecs->event_thread->control.stop_bpstat
5151 = bpstat_stop_status (get_current_regcache ()->aspace (),
5152 ecs->event_thread->suspend.stop_pc,
5153 ecs->event_thread, &ecs->ws);
5154
5155 /* Note that this may be referenced from inside
5156 bpstat_stop_status above, through inferior_has_execd. */
5157 xfree (ecs->ws.value.execd_pathname);
5158 ecs->ws.value.execd_pathname = NULL;
5159
5160 if (handle_stop_requested (ecs))
5161 return;
5162
5163 /* If no catchpoint triggered for this, then keep going. */
5164 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5165 {
5166 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5167 keep_going (ecs);
5168 return;
5169 }
5170 process_event_stop_test (ecs);
5171 return;
5172
5173 /* Be careful not to try to gather much state about a thread
5174 that's in a syscall. It's frequently a losing proposition. */
5175 case TARGET_WAITKIND_SYSCALL_ENTRY:
5176 /* Getting the current syscall number. */
5177 if (handle_syscall_event (ecs) == 0)
5178 process_event_stop_test (ecs);
5179 return;
5180
5181 /* Before examining the threads further, step this thread to
5182 get it entirely out of the syscall. (We get notice of the
5183 event when the thread is just on the verge of exiting a
5184 syscall. Stepping one instruction seems to get it back
5185 into user code.) */
5186 case TARGET_WAITKIND_SYSCALL_RETURN:
5187 if (handle_syscall_event (ecs) == 0)
5188 process_event_stop_test (ecs);
5189 return;
5190
5191 case TARGET_WAITKIND_STOPPED:
5192 handle_signal_stop (ecs);
5193 return;
5194
5195 case TARGET_WAITKIND_NO_HISTORY:
5196 /* Reverse execution: target ran out of history info. */
5197
5198 /* Switch to the stopped thread. */
5199 context_switch (ecs);
5200 if (debug_infrun)
5201 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
5202
5203 delete_just_stopped_threads_single_step_breakpoints ();
5204 ecs->event_thread->suspend.stop_pc
5205 = regcache_read_pc (get_thread_regcache (inferior_thread ()));
5206
5207 if (handle_stop_requested (ecs))
5208 return;
5209
5210 gdb::observers::no_history.notify ();
5211 stop_waiting (ecs);
5212 return;
5213 }
5214 }
5215
5216 /* Restart threads back to what they were trying to do back when we
5217 paused them for an in-line step-over. The EVENT_THREAD thread is
5218 ignored. */
5219
5220 static void
5221 restart_threads (struct thread_info *event_thread)
5222 {
5223 /* In case the instruction just stepped spawned a new thread. */
5224 update_thread_list ();
5225
5226 for (thread_info *tp : all_non_exited_threads ())
5227 {
5228 switch_to_thread_no_regs (tp);
5229
5230 if (tp == event_thread)
5231 {
5232 if (debug_infrun)
5233 fprintf_unfiltered (gdb_stdlog,
5234 "infrun: restart threads: "
5235 "[%s] is event thread\n",
5236 target_pid_to_str (tp->ptid).c_str ());
5237 continue;
5238 }
5239
5240 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5241 {
5242 if (debug_infrun)
5243 fprintf_unfiltered (gdb_stdlog,
5244 "infrun: restart threads: "
5245 "[%s] not meant to be running\n",
5246 target_pid_to_str (tp->ptid).c_str ());
5247 continue;
5248 }
5249
5250 if (tp->resumed)
5251 {
5252 if (debug_infrun)
5253 fprintf_unfiltered (gdb_stdlog,
5254 "infrun: restart threads: [%s] resumed\n",
5255 target_pid_to_str (tp->ptid).c_str ());
5256 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
5257 continue;
5258 }
5259
5260 if (thread_is_in_step_over_chain (tp))
5261 {
5262 if (debug_infrun)
5263 fprintf_unfiltered (gdb_stdlog,
5264 "infrun: restart threads: "
5265 "[%s] needs step-over\n",
5266 target_pid_to_str (tp->ptid).c_str ());
5267 gdb_assert (!tp->resumed);
5268 continue;
5269 }
5270
5271
5272 if (tp->suspend.waitstatus_pending_p)
5273 {
5274 if (debug_infrun)
5275 fprintf_unfiltered (gdb_stdlog,
5276 "infrun: restart threads: "
5277 "[%s] has pending status\n",
5278 target_pid_to_str (tp->ptid).c_str ());
5279 tp->resumed = 1;
5280 continue;
5281 }
5282
5283 gdb_assert (!tp->stop_requested);
5284
5285 /* If some thread needs to start a step-over at this point, it
5286 should still be in the step-over queue, and thus skipped
5287 above. */
5288 if (thread_still_needs_step_over (tp))
5289 {
5290 internal_error (__FILE__, __LINE__,
5291 "thread [%s] needs a step-over, but not in "
5292 "step-over queue\n",
5293 target_pid_to_str (tp->ptid).c_str ());
5294 }
5295
5296 if (currently_stepping (tp))
5297 {
5298 if (debug_infrun)
5299 fprintf_unfiltered (gdb_stdlog,
5300 "infrun: restart threads: [%s] was stepping\n",
5301 target_pid_to_str (tp->ptid).c_str ());
5302 keep_going_stepped_thread (tp);
5303 }
5304 else
5305 {
5306 struct execution_control_state ecss;
5307 struct execution_control_state *ecs = &ecss;
5308
5309 if (debug_infrun)
5310 fprintf_unfiltered (gdb_stdlog,
5311 "infrun: restart threads: [%s] continuing\n",
5312 target_pid_to_str (tp->ptid).c_str ());
5313 reset_ecs (ecs, tp);
5314 switch_to_thread (tp);
5315 keep_going_pass_signal (ecs);
5316 }
5317 }
5318 }
5319
5320 /* Callback for iterate_over_threads. Find a resumed thread that has
5321 a pending waitstatus. */
5322
5323 static int
5324 resumed_thread_with_pending_status (struct thread_info *tp,
5325 void *arg)
5326 {
5327 return (tp->resumed
5328 && tp->suspend.waitstatus_pending_p);
5329 }
5330
5331 /* Called when we get an event that may finish an in-line or
5332 out-of-line (displaced stepping) step-over started previously.
5333 Return true if the event is processed and we should go back to the
5334 event loop; false if the caller should continue processing the
5335 event. */
5336
5337 static int
5338 finish_step_over (struct execution_control_state *ecs)
5339 {
5340 int had_step_over_info;
5341
5342 displaced_step_fixup (ecs->event_thread,
5343 ecs->event_thread->suspend.stop_signal);
5344
5345 had_step_over_info = step_over_info_valid_p ();
5346
5347 if (had_step_over_info)
5348 {
5349 /* If we're stepping over a breakpoint with all threads locked,
5350 then only the thread that was stepped should be reporting
5351 back an event. */
5352 gdb_assert (ecs->event_thread->control.trap_expected);
5353
5354 clear_step_over_info ();
5355 }
5356
5357 if (!target_is_non_stop_p ())
5358 return 0;
5359
5360 /* Start a new step-over in another thread if there's one that
5361 needs it. */
5362 start_step_over ();
5363
5364 /* If we were stepping over a breakpoint before, and haven't started
5365 a new in-line step-over sequence, then restart all other threads
5366 (except the event thread). We can't do this in all-stop, as then
5367 e.g., we wouldn't be able to issue any other remote packet until
5368 these other threads stop. */
5369 if (had_step_over_info && !step_over_info_valid_p ())
5370 {
5371 struct thread_info *pending;
5372
5373 /* If we only have threads with pending statuses, the restart
5374 below won't restart any thread and so nothing re-inserts the
5375 breakpoint we just stepped over. But we need it inserted
5376 when we later process the pending events, otherwise if
5377 another thread has a pending event for this breakpoint too,
5378 we'd discard its event (because the breakpoint that
5379 originally caused the event was no longer inserted). */
5380 context_switch (ecs);
5381 insert_breakpoints ();
5382
5383 restart_threads (ecs->event_thread);
5384
5385 /* If we have events pending, go through handle_inferior_event
5386 again, picking up a pending event at random. This avoids
5387 thread starvation. */
5388
5389 /* But not if we just stepped over a watchpoint in order to let
5390 the instruction execute so we can evaluate its expression.
5391 The set of watchpoints that triggered is recorded in the
5392 breakpoint objects themselves (see bp->watchpoint_triggered).
5393 If we processed another event first, that other event could
5394 clobber this info. */
5395 if (ecs->event_thread->stepping_over_watchpoint)
5396 return 0;
5397
5398 pending = iterate_over_threads (resumed_thread_with_pending_status,
5399 NULL);
5400 if (pending != NULL)
5401 {
5402 struct thread_info *tp = ecs->event_thread;
5403 struct regcache *regcache;
5404
5405 if (debug_infrun)
5406 {
5407 fprintf_unfiltered (gdb_stdlog,
5408 "infrun: found resumed threads with "
5409 "pending events, saving status\n");
5410 }
5411
5412 gdb_assert (pending != tp);
5413
5414 /* Record the event thread's event for later. */
5415 save_waitstatus (tp, &ecs->ws);
5416 /* This was cleared early, by handle_inferior_event. Set it
5417 so this pending event is considered by
5418 do_target_wait. */
5419 tp->resumed = 1;
5420
5421 gdb_assert (!tp->executing);
5422
5423 regcache = get_thread_regcache (tp);
5424 tp->suspend.stop_pc = regcache_read_pc (regcache);
5425
5426 if (debug_infrun)
5427 {
5428 fprintf_unfiltered (gdb_stdlog,
5429 "infrun: saved stop_pc=%s for %s "
5430 "(currently_stepping=%d)\n",
5431 paddress (target_gdbarch (),
5432 tp->suspend.stop_pc),
5433 target_pid_to_str (tp->ptid).c_str (),
5434 currently_stepping (tp));
5435 }
5436
5437 /* This in-line step-over finished; clear this so we won't
5438 start a new one. This is what handle_signal_stop would
5439 do, if we returned false. */
5440 tp->stepping_over_breakpoint = 0;
5441
5442 /* Wake up the event loop again. */
5443 mark_async_event_handler (infrun_async_inferior_event_token);
5444
5445 prepare_to_wait (ecs);
5446 return 1;
5447 }
5448 }
5449
5450 return 0;
5451 }
5452
5453 /* Come here when the program has stopped with a signal. */
5454
5455 static void
5456 handle_signal_stop (struct execution_control_state *ecs)
5457 {
5458 struct frame_info *frame;
5459 struct gdbarch *gdbarch;
5460 int stopped_by_watchpoint;
5461 enum stop_kind stop_soon;
5462 int random_signal;
5463
5464 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
5465
5466 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
5467
5468 /* Do we need to clean up the state of a thread that has
5469 completed a displaced single-step? (Doing so usually affects
5470 the PC, so do it here, before we set stop_pc.) */
5471 if (finish_step_over (ecs))
5472 return;
5473
5474 /* If we either finished a single-step or hit a breakpoint, but
5475 the user wanted this thread to be stopped, pretend we got a
5476 SIG0 (generic unsignaled stop). */
5477 if (ecs->event_thread->stop_requested
5478 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
5479 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5480
5481 ecs->event_thread->suspend.stop_pc
5482 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
5483
5484 if (debug_infrun)
5485 {
5486 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
5487 struct gdbarch *reg_gdbarch = regcache->arch ();
5488
5489 switch_to_thread (ecs->event_thread);
5490
5491 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
5492 paddress (reg_gdbarch,
5493 ecs->event_thread->suspend.stop_pc));
5494 if (target_stopped_by_watchpoint ())
5495 {
5496 CORE_ADDR addr;
5497
5498 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
5499
5500 if (target_stopped_data_address (current_top_target (), &addr))
5501 fprintf_unfiltered (gdb_stdlog,
5502 "infrun: stopped data address = %s\n",
5503 paddress (reg_gdbarch, addr));
5504 else
5505 fprintf_unfiltered (gdb_stdlog,
5506 "infrun: (no data address available)\n");
5507 }
5508 }
5509
5510 /* This is originated from start_remote(), start_inferior() and
5511 shared libraries hook functions. */
5512 stop_soon = get_inferior_stop_soon (ecs);
5513 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
5514 {
5515 context_switch (ecs);
5516 if (debug_infrun)
5517 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
5518 stop_print_frame = 1;
5519 stop_waiting (ecs);
5520 return;
5521 }
5522
5523 /* This originates from attach_command(). We need to overwrite
5524 the stop_signal here, because some kernels don't ignore a
5525 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
5526 See more comments in inferior.h. On the other hand, if we
5527 get a non-SIGSTOP, report it to the user - assume the backend
5528 will handle the SIGSTOP if it should show up later.
5529
5530 Also consider that the attach is complete when we see a
5531 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
5532 target extended-remote report it instead of a SIGSTOP
5533 (e.g. gdbserver). We already rely on SIGTRAP being our
5534 signal, so this is no exception.
5535
5536 Also consider that the attach is complete when we see a
5537 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
5538 the target to stop all threads of the inferior, in case the
5539 low level attach operation doesn't stop them implicitly. If
5540 they weren't stopped implicitly, then the stub will report a
5541 GDB_SIGNAL_0, meaning: stopped for no particular reason
5542 other than GDB's request. */
5543 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5544 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
5545 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5546 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
5547 {
5548 stop_print_frame = 1;
5549 stop_waiting (ecs);
5550 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5551 return;
5552 }
5553
5554 /* See if something interesting happened to the non-current thread. If
5555 so, then switch to that thread. */
5556 if (ecs->ptid != inferior_ptid)
5557 {
5558 if (debug_infrun)
5559 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
5560
5561 context_switch (ecs);
5562
5563 if (deprecated_context_hook)
5564 deprecated_context_hook (ecs->event_thread->global_num);
5565 }
5566
5567 /* At this point, get hold of the now-current thread's frame. */
5568 frame = get_current_frame ();
5569 gdbarch = get_frame_arch (frame);
5570
5571 /* Pull the single step breakpoints out of the target. */
5572 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
5573 {
5574 struct regcache *regcache;
5575 CORE_ADDR pc;
5576
5577 regcache = get_thread_regcache (ecs->event_thread);
5578 const address_space *aspace = regcache->aspace ();
5579
5580 pc = regcache_read_pc (regcache);
5581
5582 /* However, before doing so, if this single-step breakpoint was
5583 actually for another thread, set this thread up for moving
5584 past it. */
5585 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
5586 aspace, pc))
5587 {
5588 if (single_step_breakpoint_inserted_here_p (aspace, pc))
5589 {
5590 if (debug_infrun)
5591 {
5592 fprintf_unfiltered (gdb_stdlog,
5593 "infrun: [%s] hit another thread's "
5594 "single-step breakpoint\n",
5595 target_pid_to_str (ecs->ptid).c_str ());
5596 }
5597 ecs->hit_singlestep_breakpoint = 1;
5598 }
5599 }
5600 else
5601 {
5602 if (debug_infrun)
5603 {
5604 fprintf_unfiltered (gdb_stdlog,
5605 "infrun: [%s] hit its "
5606 "single-step breakpoint\n",
5607 target_pid_to_str (ecs->ptid).c_str ());
5608 }
5609 }
5610 }
5611 delete_just_stopped_threads_single_step_breakpoints ();
5612
5613 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5614 && ecs->event_thread->control.trap_expected
5615 && ecs->event_thread->stepping_over_watchpoint)
5616 stopped_by_watchpoint = 0;
5617 else
5618 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
5619
5620 /* If necessary, step over this watchpoint. We'll be back to display
5621 it in a moment. */
5622 if (stopped_by_watchpoint
5623 && (target_have_steppable_watchpoint
5624 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
5625 {
5626 /* At this point, we are stopped at an instruction which has
5627 attempted to write to a piece of memory under control of
5628 a watchpoint. The instruction hasn't actually executed
5629 yet. If we were to evaluate the watchpoint expression
5630 now, we would get the old value, and therefore no change
5631 would seem to have occurred.
5632
5633 In order to make watchpoints work `right', we really need
5634 to complete the memory write, and then evaluate the
5635 watchpoint expression. We do this by single-stepping the
5636 target.
5637
5638 It may not be necessary to disable the watchpoint to step over
5639 it. For example, the PA can (with some kernel cooperation)
5640 single step over a watchpoint without disabling the watchpoint.
5641
5642 It is far more common to need to disable a watchpoint to step
5643 the inferior over it. If we have non-steppable watchpoints,
5644 we must disable the current watchpoint; it's simplest to
5645 disable all watchpoints.
5646
5647 Any breakpoint at PC must also be stepped over -- if there's
5648 one, it will have already triggered before the watchpoint
5649 triggered, and we either already reported it to the user, or
5650 it didn't cause a stop and we called keep_going. In either
5651 case, if there was a breakpoint at PC, we must be trying to
5652 step past it. */
5653 ecs->event_thread->stepping_over_watchpoint = 1;
5654 keep_going (ecs);
5655 return;
5656 }
5657
5658 ecs->event_thread->stepping_over_breakpoint = 0;
5659 ecs->event_thread->stepping_over_watchpoint = 0;
5660 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
5661 ecs->event_thread->control.stop_step = 0;
5662 stop_print_frame = 1;
5663 stopped_by_random_signal = 0;
5664 bpstat stop_chain = NULL;
5665
5666 /* Hide inlined functions starting here, unless we just performed stepi or
5667 nexti. After stepi and nexti, always show the innermost frame (not any
5668 inline function call sites). */
5669 if (ecs->event_thread->control.step_range_end != 1)
5670 {
5671 const address_space *aspace
5672 = get_thread_regcache (ecs->event_thread)->aspace ();
5673
5674 /* skip_inline_frames is expensive, so we avoid it if we can
5675 determine that the address is one where functions cannot have
5676 been inlined. This improves performance with inferiors that
5677 load a lot of shared libraries, because the solib event
5678 breakpoint is defined as the address of a function (i.e. not
5679 inline). Note that we have to check the previous PC as well
5680 as the current one to catch cases when we have just
5681 single-stepped off a breakpoint prior to reinstating it.
5682 Note that we're assuming that the code we single-step to is
5683 not inline, but that's not definitive: there's nothing
5684 preventing the event breakpoint function from containing
5685 inlined code, and the single-step ending up there. If the
5686 user had set a breakpoint on that inlined code, the missing
5687 skip_inline_frames call would break things. Fortunately
5688 that's an extremely unlikely scenario. */
5689 if (!pc_at_non_inline_function (aspace,
5690 ecs->event_thread->suspend.stop_pc,
5691 &ecs->ws)
5692 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5693 && ecs->event_thread->control.trap_expected
5694 && pc_at_non_inline_function (aspace,
5695 ecs->event_thread->prev_pc,
5696 &ecs->ws)))
5697 {
5698 stop_chain = build_bpstat_chain (aspace,
5699 ecs->event_thread->suspend.stop_pc,
5700 &ecs->ws);
5701 skip_inline_frames (ecs->event_thread, stop_chain);
5702
5703 /* Re-fetch current thread's frame in case that invalidated
5704 the frame cache. */
5705 frame = get_current_frame ();
5706 gdbarch = get_frame_arch (frame);
5707 }
5708 }
5709
5710 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5711 && ecs->event_thread->control.trap_expected
5712 && gdbarch_single_step_through_delay_p (gdbarch)
5713 && currently_stepping (ecs->event_thread))
5714 {
5715 /* We're trying to step off a breakpoint. Turns out that we're
5716 also on an instruction that needs to be stepped multiple
5717 times before it's been fully executing. E.g., architectures
5718 with a delay slot. It needs to be stepped twice, once for
5719 the instruction and once for the delay slot. */
5720 int step_through_delay
5721 = gdbarch_single_step_through_delay (gdbarch, frame);
5722
5723 if (debug_infrun && step_through_delay)
5724 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
5725 if (ecs->event_thread->control.step_range_end == 0
5726 && step_through_delay)
5727 {
5728 /* The user issued a continue when stopped at a breakpoint.
5729 Set up for another trap and get out of here. */
5730 ecs->event_thread->stepping_over_breakpoint = 1;
5731 keep_going (ecs);
5732 return;
5733 }
5734 else if (step_through_delay)
5735 {
5736 /* The user issued a step when stopped at a breakpoint.
5737 Maybe we should stop, maybe we should not - the delay
5738 slot *might* correspond to a line of source. In any
5739 case, don't decide that here, just set
5740 ecs->stepping_over_breakpoint, making sure we
5741 single-step again before breakpoints are re-inserted. */
5742 ecs->event_thread->stepping_over_breakpoint = 1;
5743 }
5744 }
5745
5746 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
5747 handles this event. */
5748 ecs->event_thread->control.stop_bpstat
5749 = bpstat_stop_status (get_current_regcache ()->aspace (),
5750 ecs->event_thread->suspend.stop_pc,
5751 ecs->event_thread, &ecs->ws, stop_chain);
5752
5753 /* Following in case break condition called a
5754 function. */
5755 stop_print_frame = 1;
5756
5757 /* This is where we handle "moribund" watchpoints. Unlike
5758 software breakpoints traps, hardware watchpoint traps are
5759 always distinguishable from random traps. If no high-level
5760 watchpoint is associated with the reported stop data address
5761 anymore, then the bpstat does not explain the signal ---
5762 simply make sure to ignore it if `stopped_by_watchpoint' is
5763 set. */
5764
5765 if (debug_infrun
5766 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5767 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
5768 GDB_SIGNAL_TRAP)
5769 && stopped_by_watchpoint)
5770 fprintf_unfiltered (gdb_stdlog,
5771 "infrun: no user watchpoint explains "
5772 "watchpoint SIGTRAP, ignoring\n");
5773
5774 /* NOTE: cagney/2003-03-29: These checks for a random signal
5775 at one stage in the past included checks for an inferior
5776 function call's call dummy's return breakpoint. The original
5777 comment, that went with the test, read:
5778
5779 ``End of a stack dummy. Some systems (e.g. Sony news) give
5780 another signal besides SIGTRAP, so check here as well as
5781 above.''
5782
5783 If someone ever tries to get call dummys on a
5784 non-executable stack to work (where the target would stop
5785 with something like a SIGSEGV), then those tests might need
5786 to be re-instated. Given, however, that the tests were only
5787 enabled when momentary breakpoints were not being used, I
5788 suspect that it won't be the case.
5789
5790 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
5791 be necessary for call dummies on a non-executable stack on
5792 SPARC. */
5793
5794 /* See if the breakpoints module can explain the signal. */
5795 random_signal
5796 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
5797 ecs->event_thread->suspend.stop_signal);
5798
5799 /* Maybe this was a trap for a software breakpoint that has since
5800 been removed. */
5801 if (random_signal && target_stopped_by_sw_breakpoint ())
5802 {
5803 if (program_breakpoint_here_p (gdbarch,
5804 ecs->event_thread->suspend.stop_pc))
5805 {
5806 struct regcache *regcache;
5807 int decr_pc;
5808
5809 /* Re-adjust PC to what the program would see if GDB was not
5810 debugging it. */
5811 regcache = get_thread_regcache (ecs->event_thread);
5812 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
5813 if (decr_pc != 0)
5814 {
5815 gdb::optional<scoped_restore_tmpl<int>>
5816 restore_operation_disable;
5817
5818 if (record_full_is_used ())
5819 restore_operation_disable.emplace
5820 (record_full_gdb_operation_disable_set ());
5821
5822 regcache_write_pc (regcache,
5823 ecs->event_thread->suspend.stop_pc + decr_pc);
5824 }
5825 }
5826 else
5827 {
5828 /* A delayed software breakpoint event. Ignore the trap. */
5829 if (debug_infrun)
5830 fprintf_unfiltered (gdb_stdlog,
5831 "infrun: delayed software breakpoint "
5832 "trap, ignoring\n");
5833 random_signal = 0;
5834 }
5835 }
5836
5837 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
5838 has since been removed. */
5839 if (random_signal && target_stopped_by_hw_breakpoint ())
5840 {
5841 /* A delayed hardware breakpoint event. Ignore the trap. */
5842 if (debug_infrun)
5843 fprintf_unfiltered (gdb_stdlog,
5844 "infrun: delayed hardware breakpoint/watchpoint "
5845 "trap, ignoring\n");
5846 random_signal = 0;
5847 }
5848
5849 /* If not, perhaps stepping/nexting can. */
5850 if (random_signal)
5851 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5852 && currently_stepping (ecs->event_thread));
5853
5854 /* Perhaps the thread hit a single-step breakpoint of _another_
5855 thread. Single-step breakpoints are transparent to the
5856 breakpoints module. */
5857 if (random_signal)
5858 random_signal = !ecs->hit_singlestep_breakpoint;
5859
5860 /* No? Perhaps we got a moribund watchpoint. */
5861 if (random_signal)
5862 random_signal = !stopped_by_watchpoint;
5863
5864 /* Always stop if the user explicitly requested this thread to
5865 remain stopped. */
5866 if (ecs->event_thread->stop_requested)
5867 {
5868 random_signal = 1;
5869 if (debug_infrun)
5870 fprintf_unfiltered (gdb_stdlog, "infrun: user-requested stop\n");
5871 }
5872
5873 /* For the program's own signals, act according to
5874 the signal handling tables. */
5875
5876 if (random_signal)
5877 {
5878 /* Signal not for debugging purposes. */
5879 struct inferior *inf = find_inferior_ptid (ecs->ptid);
5880 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
5881
5882 if (debug_infrun)
5883 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
5884 gdb_signal_to_symbol_string (stop_signal));
5885
5886 stopped_by_random_signal = 1;
5887
5888 /* Always stop on signals if we're either just gaining control
5889 of the program, or the user explicitly requested this thread
5890 to remain stopped. */
5891 if (stop_soon != NO_STOP_QUIETLY
5892 || ecs->event_thread->stop_requested
5893 || (!inf->detaching
5894 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
5895 {
5896 stop_waiting (ecs);
5897 return;
5898 }
5899
5900 /* Notify observers the signal has "handle print" set. Note we
5901 returned early above if stopping; normal_stop handles the
5902 printing in that case. */
5903 if (signal_print[ecs->event_thread->suspend.stop_signal])
5904 {
5905 /* The signal table tells us to print about this signal. */
5906 target_terminal::ours_for_output ();
5907 gdb::observers::signal_received.notify (ecs->event_thread->suspend.stop_signal);
5908 target_terminal::inferior ();
5909 }
5910
5911 /* Clear the signal if it should not be passed. */
5912 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
5913 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5914
5915 if (ecs->event_thread->prev_pc == ecs->event_thread->suspend.stop_pc
5916 && ecs->event_thread->control.trap_expected
5917 && ecs->event_thread->control.step_resume_breakpoint == NULL)
5918 {
5919 /* We were just starting a new sequence, attempting to
5920 single-step off of a breakpoint and expecting a SIGTRAP.
5921 Instead this signal arrives. This signal will take us out
5922 of the stepping range so GDB needs to remember to, when
5923 the signal handler returns, resume stepping off that
5924 breakpoint. */
5925 /* To simplify things, "continue" is forced to use the same
5926 code paths as single-step - set a breakpoint at the
5927 signal return address and then, once hit, step off that
5928 breakpoint. */
5929 if (debug_infrun)
5930 fprintf_unfiltered (gdb_stdlog,
5931 "infrun: signal arrived while stepping over "
5932 "breakpoint\n");
5933
5934 insert_hp_step_resume_breakpoint_at_frame (frame);
5935 ecs->event_thread->step_after_step_resume_breakpoint = 1;
5936 /* Reset trap_expected to ensure breakpoints are re-inserted. */
5937 ecs->event_thread->control.trap_expected = 0;
5938
5939 /* If we were nexting/stepping some other thread, switch to
5940 it, so that we don't continue it, losing control. */
5941 if (!switch_back_to_stepped_thread (ecs))
5942 keep_going (ecs);
5943 return;
5944 }
5945
5946 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
5947 && (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
5948 ecs->event_thread)
5949 || ecs->event_thread->control.step_range_end == 1)
5950 && frame_id_eq (get_stack_frame_id (frame),
5951 ecs->event_thread->control.step_stack_frame_id)
5952 && ecs->event_thread->control.step_resume_breakpoint == NULL)
5953 {
5954 /* The inferior is about to take a signal that will take it
5955 out of the single step range. Set a breakpoint at the
5956 current PC (which is presumably where the signal handler
5957 will eventually return) and then allow the inferior to
5958 run free.
5959
5960 Note that this is only needed for a signal delivered
5961 while in the single-step range. Nested signals aren't a
5962 problem as they eventually all return. */
5963 if (debug_infrun)
5964 fprintf_unfiltered (gdb_stdlog,
5965 "infrun: signal may take us out of "
5966 "single-step range\n");
5967
5968 clear_step_over_info ();
5969 insert_hp_step_resume_breakpoint_at_frame (frame);
5970 ecs->event_thread->step_after_step_resume_breakpoint = 1;
5971 /* Reset trap_expected to ensure breakpoints are re-inserted. */
5972 ecs->event_thread->control.trap_expected = 0;
5973 keep_going (ecs);
5974 return;
5975 }
5976
5977 /* Note: step_resume_breakpoint may be non-NULL. This occurs
5978 when either there's a nested signal, or when there's a
5979 pending signal enabled just as the signal handler returns
5980 (leaving the inferior at the step-resume-breakpoint without
5981 actually executing it). Either way continue until the
5982 breakpoint is really hit. */
5983
5984 if (!switch_back_to_stepped_thread (ecs))
5985 {
5986 if (debug_infrun)
5987 fprintf_unfiltered (gdb_stdlog,
5988 "infrun: random signal, keep going\n");
5989
5990 keep_going (ecs);
5991 }
5992 return;
5993 }
5994
5995 process_event_stop_test (ecs);
5996 }
5997
5998 /* Come here when we've got some debug event / signal we can explain
5999 (IOW, not a random signal), and test whether it should cause a
6000 stop, or whether we should resume the inferior (transparently).
6001 E.g., could be a breakpoint whose condition evaluates false; we
6002 could be still stepping within the line; etc. */
6003
6004 static void
6005 process_event_stop_test (struct execution_control_state *ecs)
6006 {
6007 struct symtab_and_line stop_pc_sal;
6008 struct frame_info *frame;
6009 struct gdbarch *gdbarch;
6010 CORE_ADDR jmp_buf_pc;
6011 struct bpstat_what what;
6012
6013 /* Handle cases caused by hitting a breakpoint. */
6014
6015 frame = get_current_frame ();
6016 gdbarch = get_frame_arch (frame);
6017
6018 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
6019
6020 if (what.call_dummy)
6021 {
6022 stop_stack_dummy = what.call_dummy;
6023 }
6024
6025 /* A few breakpoint types have callbacks associated (e.g.,
6026 bp_jit_event). Run them now. */
6027 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
6028
6029 /* If we hit an internal event that triggers symbol changes, the
6030 current frame will be invalidated within bpstat_what (e.g., if we
6031 hit an internal solib event). Re-fetch it. */
6032 frame = get_current_frame ();
6033 gdbarch = get_frame_arch (frame);
6034
6035 switch (what.main_action)
6036 {
6037 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
6038 /* If we hit the breakpoint at longjmp while stepping, we
6039 install a momentary breakpoint at the target of the
6040 jmp_buf. */
6041
6042 if (debug_infrun)
6043 fprintf_unfiltered (gdb_stdlog,
6044 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
6045
6046 ecs->event_thread->stepping_over_breakpoint = 1;
6047
6048 if (what.is_longjmp)
6049 {
6050 struct value *arg_value;
6051
6052 /* If we set the longjmp breakpoint via a SystemTap probe,
6053 then use it to extract the arguments. The destination PC
6054 is the third argument to the probe. */
6055 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6056 if (arg_value)
6057 {
6058 jmp_buf_pc = value_as_address (arg_value);
6059 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6060 }
6061 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6062 || !gdbarch_get_longjmp_target (gdbarch,
6063 frame, &jmp_buf_pc))
6064 {
6065 if (debug_infrun)
6066 fprintf_unfiltered (gdb_stdlog,
6067 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
6068 "(!gdbarch_get_longjmp_target)\n");
6069 keep_going (ecs);
6070 return;
6071 }
6072
6073 /* Insert a breakpoint at resume address. */
6074 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6075 }
6076 else
6077 check_exception_resume (ecs, frame);
6078 keep_going (ecs);
6079 return;
6080
6081 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6082 {
6083 struct frame_info *init_frame;
6084
6085 /* There are several cases to consider.
6086
6087 1. The initiating frame no longer exists. In this case we
6088 must stop, because the exception or longjmp has gone too
6089 far.
6090
6091 2. The initiating frame exists, and is the same as the
6092 current frame. We stop, because the exception or longjmp
6093 has been caught.
6094
6095 3. The initiating frame exists and is different from the
6096 current frame. This means the exception or longjmp has
6097 been caught beneath the initiating frame, so keep going.
6098
6099 4. longjmp breakpoint has been placed just to protect
6100 against stale dummy frames and user is not interested in
6101 stopping around longjmps. */
6102
6103 if (debug_infrun)
6104 fprintf_unfiltered (gdb_stdlog,
6105 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
6106
6107 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6108 != NULL);
6109 delete_exception_resume_breakpoint (ecs->event_thread);
6110
6111 if (what.is_longjmp)
6112 {
6113 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
6114
6115 if (!frame_id_p (ecs->event_thread->initiating_frame))
6116 {
6117 /* Case 4. */
6118 keep_going (ecs);
6119 return;
6120 }
6121 }
6122
6123 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
6124
6125 if (init_frame)
6126 {
6127 struct frame_id current_id
6128 = get_frame_id (get_current_frame ());
6129 if (frame_id_eq (current_id,
6130 ecs->event_thread->initiating_frame))
6131 {
6132 /* Case 2. Fall through. */
6133 }
6134 else
6135 {
6136 /* Case 3. */
6137 keep_going (ecs);
6138 return;
6139 }
6140 }
6141
6142 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6143 exists. */
6144 delete_step_resume_breakpoint (ecs->event_thread);
6145
6146 end_stepping_range (ecs);
6147 }
6148 return;
6149
6150 case BPSTAT_WHAT_SINGLE:
6151 if (debug_infrun)
6152 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
6153 ecs->event_thread->stepping_over_breakpoint = 1;
6154 /* Still need to check other stuff, at least the case where we
6155 are stepping and step out of the right range. */
6156 break;
6157
6158 case BPSTAT_WHAT_STEP_RESUME:
6159 if (debug_infrun)
6160 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
6161
6162 delete_step_resume_breakpoint (ecs->event_thread);
6163 if (ecs->event_thread->control.proceed_to_finish
6164 && execution_direction == EXEC_REVERSE)
6165 {
6166 struct thread_info *tp = ecs->event_thread;
6167
6168 /* We are finishing a function in reverse, and just hit the
6169 step-resume breakpoint at the start address of the
6170 function, and we're almost there -- just need to back up
6171 by one more single-step, which should take us back to the
6172 function call. */
6173 tp->control.step_range_start = tp->control.step_range_end = 1;
6174 keep_going (ecs);
6175 return;
6176 }
6177 fill_in_stop_func (gdbarch, ecs);
6178 if (ecs->event_thread->suspend.stop_pc == ecs->stop_func_start
6179 && execution_direction == EXEC_REVERSE)
6180 {
6181 /* We are stepping over a function call in reverse, and just
6182 hit the step-resume breakpoint at the start address of
6183 the function. Go back to single-stepping, which should
6184 take us back to the function call. */
6185 ecs->event_thread->stepping_over_breakpoint = 1;
6186 keep_going (ecs);
6187 return;
6188 }
6189 break;
6190
6191 case BPSTAT_WHAT_STOP_NOISY:
6192 if (debug_infrun)
6193 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
6194 stop_print_frame = 1;
6195
6196 /* Assume the thread stopped for a breapoint. We'll still check
6197 whether a/the breakpoint is there when the thread is next
6198 resumed. */
6199 ecs->event_thread->stepping_over_breakpoint = 1;
6200
6201 stop_waiting (ecs);
6202 return;
6203
6204 case BPSTAT_WHAT_STOP_SILENT:
6205 if (debug_infrun)
6206 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
6207 stop_print_frame = 0;
6208
6209 /* Assume the thread stopped for a breapoint. We'll still check
6210 whether a/the breakpoint is there when the thread is next
6211 resumed. */
6212 ecs->event_thread->stepping_over_breakpoint = 1;
6213 stop_waiting (ecs);
6214 return;
6215
6216 case BPSTAT_WHAT_HP_STEP_RESUME:
6217 if (debug_infrun)
6218 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
6219
6220 delete_step_resume_breakpoint (ecs->event_thread);
6221 if (ecs->event_thread->step_after_step_resume_breakpoint)
6222 {
6223 /* Back when the step-resume breakpoint was inserted, we
6224 were trying to single-step off a breakpoint. Go back to
6225 doing that. */
6226 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6227 ecs->event_thread->stepping_over_breakpoint = 1;
6228 keep_going (ecs);
6229 return;
6230 }
6231 break;
6232
6233 case BPSTAT_WHAT_KEEP_CHECKING:
6234 break;
6235 }
6236
6237 /* If we stepped a permanent breakpoint and we had a high priority
6238 step-resume breakpoint for the address we stepped, but we didn't
6239 hit it, then we must have stepped into the signal handler. The
6240 step-resume was only necessary to catch the case of _not_
6241 stepping into the handler, so delete it, and fall through to
6242 checking whether the step finished. */
6243 if (ecs->event_thread->stepped_breakpoint)
6244 {
6245 struct breakpoint *sr_bp
6246 = ecs->event_thread->control.step_resume_breakpoint;
6247
6248 if (sr_bp != NULL
6249 && sr_bp->loc->permanent
6250 && sr_bp->type == bp_hp_step_resume
6251 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6252 {
6253 if (debug_infrun)
6254 fprintf_unfiltered (gdb_stdlog,
6255 "infrun: stepped permanent breakpoint, stopped in "
6256 "handler\n");
6257 delete_step_resume_breakpoint (ecs->event_thread);
6258 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6259 }
6260 }
6261
6262 /* We come here if we hit a breakpoint but should not stop for it.
6263 Possibly we also were stepping and should stop for that. So fall
6264 through and test for stepping. But, if not stepping, do not
6265 stop. */
6266
6267 /* In all-stop mode, if we're currently stepping but have stopped in
6268 some other thread, we need to switch back to the stepped thread. */
6269 if (switch_back_to_stepped_thread (ecs))
6270 return;
6271
6272 if (ecs->event_thread->control.step_resume_breakpoint)
6273 {
6274 if (debug_infrun)
6275 fprintf_unfiltered (gdb_stdlog,
6276 "infrun: step-resume breakpoint is inserted\n");
6277
6278 /* Having a step-resume breakpoint overrides anything
6279 else having to do with stepping commands until
6280 that breakpoint is reached. */
6281 keep_going (ecs);
6282 return;
6283 }
6284
6285 if (ecs->event_thread->control.step_range_end == 0)
6286 {
6287 if (debug_infrun)
6288 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
6289 /* Likewise if we aren't even stepping. */
6290 keep_going (ecs);
6291 return;
6292 }
6293
6294 /* Re-fetch current thread's frame in case the code above caused
6295 the frame cache to be re-initialized, making our FRAME variable
6296 a dangling pointer. */
6297 frame = get_current_frame ();
6298 gdbarch = get_frame_arch (frame);
6299 fill_in_stop_func (gdbarch, ecs);
6300
6301 /* If stepping through a line, keep going if still within it.
6302
6303 Note that step_range_end is the address of the first instruction
6304 beyond the step range, and NOT the address of the last instruction
6305 within it!
6306
6307 Note also that during reverse execution, we may be stepping
6308 through a function epilogue and therefore must detect when
6309 the current-frame changes in the middle of a line. */
6310
6311 if (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6312 ecs->event_thread)
6313 && (execution_direction != EXEC_REVERSE
6314 || frame_id_eq (get_frame_id (frame),
6315 ecs->event_thread->control.step_frame_id)))
6316 {
6317 if (debug_infrun)
6318 fprintf_unfiltered
6319 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
6320 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6321 paddress (gdbarch, ecs->event_thread->control.step_range_end));
6322
6323 /* Tentatively re-enable range stepping; `resume' disables it if
6324 necessary (e.g., if we're stepping over a breakpoint or we
6325 have software watchpoints). */
6326 ecs->event_thread->control.may_range_step = 1;
6327
6328 /* When stepping backward, stop at beginning of line range
6329 (unless it's the function entry point, in which case
6330 keep going back to the call point). */
6331 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6332 if (stop_pc == ecs->event_thread->control.step_range_start
6333 && stop_pc != ecs->stop_func_start
6334 && execution_direction == EXEC_REVERSE)
6335 end_stepping_range (ecs);
6336 else
6337 keep_going (ecs);
6338
6339 return;
6340 }
6341
6342 /* We stepped out of the stepping range. */
6343
6344 /* If we are stepping at the source level and entered the runtime
6345 loader dynamic symbol resolution code...
6346
6347 EXEC_FORWARD: we keep on single stepping until we exit the run
6348 time loader code and reach the callee's address.
6349
6350 EXEC_REVERSE: we've already executed the callee (backward), and
6351 the runtime loader code is handled just like any other
6352 undebuggable function call. Now we need only keep stepping
6353 backward through the trampoline code, and that's handled further
6354 down, so there is nothing for us to do here. */
6355
6356 if (execution_direction != EXEC_REVERSE
6357 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6358 && in_solib_dynsym_resolve_code (ecs->event_thread->suspend.stop_pc))
6359 {
6360 CORE_ADDR pc_after_resolver =
6361 gdbarch_skip_solib_resolver (gdbarch,
6362 ecs->event_thread->suspend.stop_pc);
6363
6364 if (debug_infrun)
6365 fprintf_unfiltered (gdb_stdlog,
6366 "infrun: stepped into dynsym resolve code\n");
6367
6368 if (pc_after_resolver)
6369 {
6370 /* Set up a step-resume breakpoint at the address
6371 indicated by SKIP_SOLIB_RESOLVER. */
6372 symtab_and_line sr_sal;
6373 sr_sal.pc = pc_after_resolver;
6374 sr_sal.pspace = get_frame_program_space (frame);
6375
6376 insert_step_resume_breakpoint_at_sal (gdbarch,
6377 sr_sal, null_frame_id);
6378 }
6379
6380 keep_going (ecs);
6381 return;
6382 }
6383
6384 /* Step through an indirect branch thunk. */
6385 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
6386 && gdbarch_in_indirect_branch_thunk (gdbarch,
6387 ecs->event_thread->suspend.stop_pc))
6388 {
6389 if (debug_infrun)
6390 fprintf_unfiltered (gdb_stdlog,
6391 "infrun: stepped into indirect branch thunk\n");
6392 keep_going (ecs);
6393 return;
6394 }
6395
6396 if (ecs->event_thread->control.step_range_end != 1
6397 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6398 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
6399 && get_frame_type (frame) == SIGTRAMP_FRAME)
6400 {
6401 if (debug_infrun)
6402 fprintf_unfiltered (gdb_stdlog,
6403 "infrun: stepped into signal trampoline\n");
6404 /* The inferior, while doing a "step" or "next", has ended up in
6405 a signal trampoline (either by a signal being delivered or by
6406 the signal handler returning). Just single-step until the
6407 inferior leaves the trampoline (either by calling the handler
6408 or returning). */
6409 keep_going (ecs);
6410 return;
6411 }
6412
6413 /* If we're in the return path from a shared library trampoline,
6414 we want to proceed through the trampoline when stepping. */
6415 /* macro/2012-04-25: This needs to come before the subroutine
6416 call check below as on some targets return trampolines look
6417 like subroutine calls (MIPS16 return thunks). */
6418 if (gdbarch_in_solib_return_trampoline (gdbarch,
6419 ecs->event_thread->suspend.stop_pc,
6420 ecs->stop_func_name)
6421 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
6422 {
6423 /* Determine where this trampoline returns. */
6424 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6425 CORE_ADDR real_stop_pc
6426 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
6427
6428 if (debug_infrun)
6429 fprintf_unfiltered (gdb_stdlog,
6430 "infrun: stepped into solib return tramp\n");
6431
6432 /* Only proceed through if we know where it's going. */
6433 if (real_stop_pc)
6434 {
6435 /* And put the step-breakpoint there and go until there. */
6436 symtab_and_line sr_sal;
6437 sr_sal.pc = real_stop_pc;
6438 sr_sal.section = find_pc_overlay (sr_sal.pc);
6439 sr_sal.pspace = get_frame_program_space (frame);
6440
6441 /* Do not specify what the fp should be when we stop since
6442 on some machines the prologue is where the new fp value
6443 is established. */
6444 insert_step_resume_breakpoint_at_sal (gdbarch,
6445 sr_sal, null_frame_id);
6446
6447 /* Restart without fiddling with the step ranges or
6448 other state. */
6449 keep_going (ecs);
6450 return;
6451 }
6452 }
6453
6454 /* Check for subroutine calls. The check for the current frame
6455 equalling the step ID is not necessary - the check of the
6456 previous frame's ID is sufficient - but it is a common case and
6457 cheaper than checking the previous frame's ID.
6458
6459 NOTE: frame_id_eq will never report two invalid frame IDs as
6460 being equal, so to get into this block, both the current and
6461 previous frame must have valid frame IDs. */
6462 /* The outer_frame_id check is a heuristic to detect stepping
6463 through startup code. If we step over an instruction which
6464 sets the stack pointer from an invalid value to a valid value,
6465 we may detect that as a subroutine call from the mythical
6466 "outermost" function. This could be fixed by marking
6467 outermost frames as !stack_p,code_p,special_p. Then the
6468 initial outermost frame, before sp was valid, would
6469 have code_addr == &_start. See the comment in frame_id_eq
6470 for more. */
6471 if (!frame_id_eq (get_stack_frame_id (frame),
6472 ecs->event_thread->control.step_stack_frame_id)
6473 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
6474 ecs->event_thread->control.step_stack_frame_id)
6475 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
6476 outer_frame_id)
6477 || (ecs->event_thread->control.step_start_function
6478 != find_pc_function (ecs->event_thread->suspend.stop_pc)))))
6479 {
6480 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6481 CORE_ADDR real_stop_pc;
6482
6483 if (debug_infrun)
6484 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
6485
6486 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
6487 {
6488 /* I presume that step_over_calls is only 0 when we're
6489 supposed to be stepping at the assembly language level
6490 ("stepi"). Just stop. */
6491 /* And this works the same backward as frontward. MVS */
6492 end_stepping_range (ecs);
6493 return;
6494 }
6495
6496 /* Reverse stepping through solib trampolines. */
6497
6498 if (execution_direction == EXEC_REVERSE
6499 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
6500 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6501 || (ecs->stop_func_start == 0
6502 && in_solib_dynsym_resolve_code (stop_pc))))
6503 {
6504 /* Any solib trampoline code can be handled in reverse
6505 by simply continuing to single-step. We have already
6506 executed the solib function (backwards), and a few
6507 steps will take us back through the trampoline to the
6508 caller. */
6509 keep_going (ecs);
6510 return;
6511 }
6512
6513 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
6514 {
6515 /* We're doing a "next".
6516
6517 Normal (forward) execution: set a breakpoint at the
6518 callee's return address (the address at which the caller
6519 will resume).
6520
6521 Reverse (backward) execution. set the step-resume
6522 breakpoint at the start of the function that we just
6523 stepped into (backwards), and continue to there. When we
6524 get there, we'll need to single-step back to the caller. */
6525
6526 if (execution_direction == EXEC_REVERSE)
6527 {
6528 /* If we're already at the start of the function, we've either
6529 just stepped backward into a single instruction function,
6530 or stepped back out of a signal handler to the first instruction
6531 of the function. Just keep going, which will single-step back
6532 to the caller. */
6533 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
6534 {
6535 /* Normal function call return (static or dynamic). */
6536 symtab_and_line sr_sal;
6537 sr_sal.pc = ecs->stop_func_start;
6538 sr_sal.pspace = get_frame_program_space (frame);
6539 insert_step_resume_breakpoint_at_sal (gdbarch,
6540 sr_sal, null_frame_id);
6541 }
6542 }
6543 else
6544 insert_step_resume_breakpoint_at_caller (frame);
6545
6546 keep_going (ecs);
6547 return;
6548 }
6549
6550 /* If we are in a function call trampoline (a stub between the
6551 calling routine and the real function), locate the real
6552 function. That's what tells us (a) whether we want to step
6553 into it at all, and (b) what prologue we want to run to the
6554 end of, if we do step into it. */
6555 real_stop_pc = skip_language_trampoline (frame, stop_pc);
6556 if (real_stop_pc == 0)
6557 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
6558 if (real_stop_pc != 0)
6559 ecs->stop_func_start = real_stop_pc;
6560
6561 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
6562 {
6563 symtab_and_line sr_sal;
6564 sr_sal.pc = ecs->stop_func_start;
6565 sr_sal.pspace = get_frame_program_space (frame);
6566
6567 insert_step_resume_breakpoint_at_sal (gdbarch,
6568 sr_sal, null_frame_id);
6569 keep_going (ecs);
6570 return;
6571 }
6572
6573 /* If we have line number information for the function we are
6574 thinking of stepping into and the function isn't on the skip
6575 list, step into it.
6576
6577 If there are several symtabs at that PC (e.g. with include
6578 files), just want to know whether *any* of them have line
6579 numbers. find_pc_line handles this. */
6580 {
6581 struct symtab_and_line tmp_sal;
6582
6583 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
6584 if (tmp_sal.line != 0
6585 && !function_name_is_marked_for_skip (ecs->stop_func_name,
6586 tmp_sal)
6587 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
6588 {
6589 if (execution_direction == EXEC_REVERSE)
6590 handle_step_into_function_backward (gdbarch, ecs);
6591 else
6592 handle_step_into_function (gdbarch, ecs);
6593 return;
6594 }
6595 }
6596
6597 /* If we have no line number and the step-stop-if-no-debug is
6598 set, we stop the step so that the user has a chance to switch
6599 in assembly mode. */
6600 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6601 && step_stop_if_no_debug)
6602 {
6603 end_stepping_range (ecs);
6604 return;
6605 }
6606
6607 if (execution_direction == EXEC_REVERSE)
6608 {
6609 /* If we're already at the start of the function, we've either just
6610 stepped backward into a single instruction function without line
6611 number info, or stepped back out of a signal handler to the first
6612 instruction of the function without line number info. Just keep
6613 going, which will single-step back to the caller. */
6614 if (ecs->stop_func_start != stop_pc)
6615 {
6616 /* Set a breakpoint at callee's start address.
6617 From there we can step once and be back in the caller. */
6618 symtab_and_line sr_sal;
6619 sr_sal.pc = ecs->stop_func_start;
6620 sr_sal.pspace = get_frame_program_space (frame);
6621 insert_step_resume_breakpoint_at_sal (gdbarch,
6622 sr_sal, null_frame_id);
6623 }
6624 }
6625 else
6626 /* Set a breakpoint at callee's return address (the address
6627 at which the caller will resume). */
6628 insert_step_resume_breakpoint_at_caller (frame);
6629
6630 keep_going (ecs);
6631 return;
6632 }
6633
6634 /* Reverse stepping through solib trampolines. */
6635
6636 if (execution_direction == EXEC_REVERSE
6637 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
6638 {
6639 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6640
6641 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6642 || (ecs->stop_func_start == 0
6643 && in_solib_dynsym_resolve_code (stop_pc)))
6644 {
6645 /* Any solib trampoline code can be handled in reverse
6646 by simply continuing to single-step. We have already
6647 executed the solib function (backwards), and a few
6648 steps will take us back through the trampoline to the
6649 caller. */
6650 keep_going (ecs);
6651 return;
6652 }
6653 else if (in_solib_dynsym_resolve_code (stop_pc))
6654 {
6655 /* Stepped backward into the solib dynsym resolver.
6656 Set a breakpoint at its start and continue, then
6657 one more step will take us out. */
6658 symtab_and_line sr_sal;
6659 sr_sal.pc = ecs->stop_func_start;
6660 sr_sal.pspace = get_frame_program_space (frame);
6661 insert_step_resume_breakpoint_at_sal (gdbarch,
6662 sr_sal, null_frame_id);
6663 keep_going (ecs);
6664 return;
6665 }
6666 }
6667
6668 stop_pc_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
6669
6670 /* NOTE: tausq/2004-05-24: This if block used to be done before all
6671 the trampoline processing logic, however, there are some trampolines
6672 that have no names, so we should do trampoline handling first. */
6673 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6674 && ecs->stop_func_name == NULL
6675 && stop_pc_sal.line == 0)
6676 {
6677 if (debug_infrun)
6678 fprintf_unfiltered (gdb_stdlog,
6679 "infrun: stepped into undebuggable function\n");
6680
6681 /* The inferior just stepped into, or returned to, an
6682 undebuggable function (where there is no debugging information
6683 and no line number corresponding to the address where the
6684 inferior stopped). Since we want to skip this kind of code,
6685 we keep going until the inferior returns from this
6686 function - unless the user has asked us not to (via
6687 set step-mode) or we no longer know how to get back
6688 to the call site. */
6689 if (step_stop_if_no_debug
6690 || !frame_id_p (frame_unwind_caller_id (frame)))
6691 {
6692 /* If we have no line number and the step-stop-if-no-debug
6693 is set, we stop the step so that the user has a chance to
6694 switch in assembly mode. */
6695 end_stepping_range (ecs);
6696 return;
6697 }
6698 else
6699 {
6700 /* Set a breakpoint at callee's return address (the address
6701 at which the caller will resume). */
6702 insert_step_resume_breakpoint_at_caller (frame);
6703 keep_going (ecs);
6704 return;
6705 }
6706 }
6707
6708 if (ecs->event_thread->control.step_range_end == 1)
6709 {
6710 /* It is stepi or nexti. We always want to stop stepping after
6711 one instruction. */
6712 if (debug_infrun)
6713 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
6714 end_stepping_range (ecs);
6715 return;
6716 }
6717
6718 if (stop_pc_sal.line == 0)
6719 {
6720 /* We have no line number information. That means to stop
6721 stepping (does this always happen right after one instruction,
6722 when we do "s" in a function with no line numbers,
6723 or can this happen as a result of a return or longjmp?). */
6724 if (debug_infrun)
6725 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
6726 end_stepping_range (ecs);
6727 return;
6728 }
6729
6730 /* Look for "calls" to inlined functions, part one. If the inline
6731 frame machinery detected some skipped call sites, we have entered
6732 a new inline function. */
6733
6734 if (frame_id_eq (get_frame_id (get_current_frame ()),
6735 ecs->event_thread->control.step_frame_id)
6736 && inline_skipped_frames (ecs->event_thread))
6737 {
6738 if (debug_infrun)
6739 fprintf_unfiltered (gdb_stdlog,
6740 "infrun: stepped into inlined function\n");
6741
6742 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
6743
6744 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
6745 {
6746 /* For "step", we're going to stop. But if the call site
6747 for this inlined function is on the same source line as
6748 we were previously stepping, go down into the function
6749 first. Otherwise stop at the call site. */
6750
6751 if (call_sal.line == ecs->event_thread->current_line
6752 && call_sal.symtab == ecs->event_thread->current_symtab)
6753 {
6754 step_into_inline_frame (ecs->event_thread);
6755 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
6756 {
6757 keep_going (ecs);
6758 return;
6759 }
6760 }
6761
6762 end_stepping_range (ecs);
6763 return;
6764 }
6765 else
6766 {
6767 /* For "next", we should stop at the call site if it is on a
6768 different source line. Otherwise continue through the
6769 inlined function. */
6770 if (call_sal.line == ecs->event_thread->current_line
6771 && call_sal.symtab == ecs->event_thread->current_symtab)
6772 keep_going (ecs);
6773 else
6774 end_stepping_range (ecs);
6775 return;
6776 }
6777 }
6778
6779 /* Look for "calls" to inlined functions, part two. If we are still
6780 in the same real function we were stepping through, but we have
6781 to go further up to find the exact frame ID, we are stepping
6782 through a more inlined call beyond its call site. */
6783
6784 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
6785 && !frame_id_eq (get_frame_id (get_current_frame ()),
6786 ecs->event_thread->control.step_frame_id)
6787 && stepped_in_from (get_current_frame (),
6788 ecs->event_thread->control.step_frame_id))
6789 {
6790 if (debug_infrun)
6791 fprintf_unfiltered (gdb_stdlog,
6792 "infrun: stepping through inlined function\n");
6793
6794 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
6795 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
6796 keep_going (ecs);
6797 else
6798 end_stepping_range (ecs);
6799 return;
6800 }
6801
6802 if ((ecs->event_thread->suspend.stop_pc == stop_pc_sal.pc)
6803 && (ecs->event_thread->current_line != stop_pc_sal.line
6804 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
6805 {
6806 /* We are at the start of a different line. So stop. Note that
6807 we don't stop if we step into the middle of a different line.
6808 That is said to make things like for (;;) statements work
6809 better. */
6810 if (debug_infrun)
6811 fprintf_unfiltered (gdb_stdlog,
6812 "infrun: stepped to a different line\n");
6813 end_stepping_range (ecs);
6814 return;
6815 }
6816
6817 /* We aren't done stepping.
6818
6819 Optimize by setting the stepping range to the line.
6820 (We might not be in the original line, but if we entered a
6821 new line in mid-statement, we continue stepping. This makes
6822 things like for(;;) statements work better.) */
6823
6824 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
6825 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
6826 ecs->event_thread->control.may_range_step = 1;
6827 set_step_info (frame, stop_pc_sal);
6828
6829 if (debug_infrun)
6830 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
6831 keep_going (ecs);
6832 }
6833
6834 /* In all-stop mode, if we're currently stepping but have stopped in
6835 some other thread, we may need to switch back to the stepped
6836 thread. Returns true we set the inferior running, false if we left
6837 it stopped (and the event needs further processing). */
6838
6839 static int
6840 switch_back_to_stepped_thread (struct execution_control_state *ecs)
6841 {
6842 if (!target_is_non_stop_p ())
6843 {
6844 struct thread_info *stepping_thread;
6845
6846 /* If any thread is blocked on some internal breakpoint, and we
6847 simply need to step over that breakpoint to get it going
6848 again, do that first. */
6849
6850 /* However, if we see an event for the stepping thread, then we
6851 know all other threads have been moved past their breakpoints
6852 already. Let the caller check whether the step is finished,
6853 etc., before deciding to move it past a breakpoint. */
6854 if (ecs->event_thread->control.step_range_end != 0)
6855 return 0;
6856
6857 /* Check if the current thread is blocked on an incomplete
6858 step-over, interrupted by a random signal. */
6859 if (ecs->event_thread->control.trap_expected
6860 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
6861 {
6862 if (debug_infrun)
6863 {
6864 fprintf_unfiltered (gdb_stdlog,
6865 "infrun: need to finish step-over of [%s]\n",
6866 target_pid_to_str (ecs->event_thread->ptid).c_str ());
6867 }
6868 keep_going (ecs);
6869 return 1;
6870 }
6871
6872 /* Check if the current thread is blocked by a single-step
6873 breakpoint of another thread. */
6874 if (ecs->hit_singlestep_breakpoint)
6875 {
6876 if (debug_infrun)
6877 {
6878 fprintf_unfiltered (gdb_stdlog,
6879 "infrun: need to step [%s] over single-step "
6880 "breakpoint\n",
6881 target_pid_to_str (ecs->ptid).c_str ());
6882 }
6883 keep_going (ecs);
6884 return 1;
6885 }
6886
6887 /* If this thread needs yet another step-over (e.g., stepping
6888 through a delay slot), do it first before moving on to
6889 another thread. */
6890 if (thread_still_needs_step_over (ecs->event_thread))
6891 {
6892 if (debug_infrun)
6893 {
6894 fprintf_unfiltered (gdb_stdlog,
6895 "infrun: thread [%s] still needs step-over\n",
6896 target_pid_to_str (ecs->event_thread->ptid).c_str ());
6897 }
6898 keep_going (ecs);
6899 return 1;
6900 }
6901
6902 /* If scheduler locking applies even if not stepping, there's no
6903 need to walk over threads. Above we've checked whether the
6904 current thread is stepping. If some other thread not the
6905 event thread is stepping, then it must be that scheduler
6906 locking is not in effect. */
6907 if (schedlock_applies (ecs->event_thread))
6908 return 0;
6909
6910 /* Otherwise, we no longer expect a trap in the current thread.
6911 Clear the trap_expected flag before switching back -- this is
6912 what keep_going does as well, if we call it. */
6913 ecs->event_thread->control.trap_expected = 0;
6914
6915 /* Likewise, clear the signal if it should not be passed. */
6916 if (!signal_program[ecs->event_thread->suspend.stop_signal])
6917 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6918
6919 /* Do all pending step-overs before actually proceeding with
6920 step/next/etc. */
6921 if (start_step_over ())
6922 {
6923 prepare_to_wait (ecs);
6924 return 1;
6925 }
6926
6927 /* Look for the stepping/nexting thread. */
6928 stepping_thread = NULL;
6929
6930 for (thread_info *tp : all_non_exited_threads ())
6931 {
6932 switch_to_thread_no_regs (tp);
6933
6934 /* Ignore threads of processes the caller is not
6935 resuming. */
6936 if (!sched_multi
6937 && tp->ptid.pid () != ecs->ptid.pid ())
6938 continue;
6939
6940 /* When stepping over a breakpoint, we lock all threads
6941 except the one that needs to move past the breakpoint.
6942 If a non-event thread has this set, the "incomplete
6943 step-over" check above should have caught it earlier. */
6944 if (tp->control.trap_expected)
6945 {
6946 internal_error (__FILE__, __LINE__,
6947 "[%s] has inconsistent state: "
6948 "trap_expected=%d\n",
6949 target_pid_to_str (tp->ptid).c_str (),
6950 tp->control.trap_expected);
6951 }
6952
6953 /* Did we find the stepping thread? */
6954 if (tp->control.step_range_end)
6955 {
6956 /* Yep. There should only one though. */
6957 gdb_assert (stepping_thread == NULL);
6958
6959 /* The event thread is handled at the top, before we
6960 enter this loop. */
6961 gdb_assert (tp != ecs->event_thread);
6962
6963 /* If some thread other than the event thread is
6964 stepping, then scheduler locking can't be in effect,
6965 otherwise we wouldn't have resumed the current event
6966 thread in the first place. */
6967 gdb_assert (!schedlock_applies (tp));
6968
6969 stepping_thread = tp;
6970 }
6971 }
6972
6973 if (stepping_thread != NULL)
6974 {
6975 if (debug_infrun)
6976 fprintf_unfiltered (gdb_stdlog,
6977 "infrun: switching back to stepped thread\n");
6978
6979 if (keep_going_stepped_thread (stepping_thread))
6980 {
6981 prepare_to_wait (ecs);
6982 return 1;
6983 }
6984 }
6985
6986 switch_to_thread (ecs->event_thread);
6987 }
6988
6989 return 0;
6990 }
6991
6992 /* Set a previously stepped thread back to stepping. Returns true on
6993 success, false if the resume is not possible (e.g., the thread
6994 vanished). */
6995
6996 static int
6997 keep_going_stepped_thread (struct thread_info *tp)
6998 {
6999 struct frame_info *frame;
7000 struct execution_control_state ecss;
7001 struct execution_control_state *ecs = &ecss;
7002
7003 /* If the stepping thread exited, then don't try to switch back and
7004 resume it, which could fail in several different ways depending
7005 on the target. Instead, just keep going.
7006
7007 We can find a stepping dead thread in the thread list in two
7008 cases:
7009
7010 - The target supports thread exit events, and when the target
7011 tries to delete the thread from the thread list, inferior_ptid
7012 pointed at the exiting thread. In such case, calling
7013 delete_thread does not really remove the thread from the list;
7014 instead, the thread is left listed, with 'exited' state.
7015
7016 - The target's debug interface does not support thread exit
7017 events, and so we have no idea whatsoever if the previously
7018 stepping thread is still alive. For that reason, we need to
7019 synchronously query the target now. */
7020
7021 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
7022 {
7023 if (debug_infrun)
7024 fprintf_unfiltered (gdb_stdlog,
7025 "infrun: not resuming previously "
7026 "stepped thread, it has vanished\n");
7027
7028 delete_thread (tp);
7029 return 0;
7030 }
7031
7032 if (debug_infrun)
7033 fprintf_unfiltered (gdb_stdlog,
7034 "infrun: resuming previously stepped thread\n");
7035
7036 reset_ecs (ecs, tp);
7037 switch_to_thread (tp);
7038
7039 tp->suspend.stop_pc = regcache_read_pc (get_thread_regcache (tp));
7040 frame = get_current_frame ();
7041
7042 /* If the PC of the thread we were trying to single-step has
7043 changed, then that thread has trapped or been signaled, but the
7044 event has not been reported to GDB yet. Re-poll the target
7045 looking for this particular thread's event (i.e. temporarily
7046 enable schedlock) by:
7047
7048 - setting a break at the current PC
7049 - resuming that particular thread, only (by setting trap
7050 expected)
7051
7052 This prevents us continuously moving the single-step breakpoint
7053 forward, one instruction at a time, overstepping. */
7054
7055 if (tp->suspend.stop_pc != tp->prev_pc)
7056 {
7057 ptid_t resume_ptid;
7058
7059 if (debug_infrun)
7060 fprintf_unfiltered (gdb_stdlog,
7061 "infrun: expected thread advanced also (%s -> %s)\n",
7062 paddress (target_gdbarch (), tp->prev_pc),
7063 paddress (target_gdbarch (), tp->suspend.stop_pc));
7064
7065 /* Clear the info of the previous step-over, as it's no longer
7066 valid (if the thread was trying to step over a breakpoint, it
7067 has already succeeded). It's what keep_going would do too,
7068 if we called it. Do this before trying to insert the sss
7069 breakpoint, otherwise if we were previously trying to step
7070 over this exact address in another thread, the breakpoint is
7071 skipped. */
7072 clear_step_over_info ();
7073 tp->control.trap_expected = 0;
7074
7075 insert_single_step_breakpoint (get_frame_arch (frame),
7076 get_frame_address_space (frame),
7077 tp->suspend.stop_pc);
7078
7079 tp->resumed = 1;
7080 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
7081 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
7082 }
7083 else
7084 {
7085 if (debug_infrun)
7086 fprintf_unfiltered (gdb_stdlog,
7087 "infrun: expected thread still hasn't advanced\n");
7088
7089 keep_going_pass_signal (ecs);
7090 }
7091 return 1;
7092 }
7093
7094 /* Is thread TP in the middle of (software or hardware)
7095 single-stepping? (Note the result of this function must never be
7096 passed directly as target_resume's STEP parameter.) */
7097
7098 static int
7099 currently_stepping (struct thread_info *tp)
7100 {
7101 return ((tp->control.step_range_end
7102 && tp->control.step_resume_breakpoint == NULL)
7103 || tp->control.trap_expected
7104 || tp->stepped_breakpoint
7105 || bpstat_should_step ());
7106 }
7107
7108 /* Inferior has stepped into a subroutine call with source code that
7109 we should not step over. Do step to the first line of code in
7110 it. */
7111
7112 static void
7113 handle_step_into_function (struct gdbarch *gdbarch,
7114 struct execution_control_state *ecs)
7115 {
7116 fill_in_stop_func (gdbarch, ecs);
7117
7118 compunit_symtab *cust
7119 = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
7120 if (cust != NULL && compunit_language (cust) != language_asm)
7121 ecs->stop_func_start
7122 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
7123
7124 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
7125 /* Use the step_resume_break to step until the end of the prologue,
7126 even if that involves jumps (as it seems to on the vax under
7127 4.2). */
7128 /* If the prologue ends in the middle of a source line, continue to
7129 the end of that source line (if it is still within the function).
7130 Otherwise, just go to end of prologue. */
7131 if (stop_func_sal.end
7132 && stop_func_sal.pc != ecs->stop_func_start
7133 && stop_func_sal.end < ecs->stop_func_end)
7134 ecs->stop_func_start = stop_func_sal.end;
7135
7136 /* Architectures which require breakpoint adjustment might not be able
7137 to place a breakpoint at the computed address. If so, the test
7138 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7139 ecs->stop_func_start to an address at which a breakpoint may be
7140 legitimately placed.
7141
7142 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7143 made, GDB will enter an infinite loop when stepping through
7144 optimized code consisting of VLIW instructions which contain
7145 subinstructions corresponding to different source lines. On
7146 FR-V, it's not permitted to place a breakpoint on any but the
7147 first subinstruction of a VLIW instruction. When a breakpoint is
7148 set, GDB will adjust the breakpoint address to the beginning of
7149 the VLIW instruction. Thus, we need to make the corresponding
7150 adjustment here when computing the stop address. */
7151
7152 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
7153 {
7154 ecs->stop_func_start
7155 = gdbarch_adjust_breakpoint_address (gdbarch,
7156 ecs->stop_func_start);
7157 }
7158
7159 if (ecs->stop_func_start == ecs->event_thread->suspend.stop_pc)
7160 {
7161 /* We are already there: stop now. */
7162 end_stepping_range (ecs);
7163 return;
7164 }
7165 else
7166 {
7167 /* Put the step-breakpoint there and go until there. */
7168 symtab_and_line sr_sal;
7169 sr_sal.pc = ecs->stop_func_start;
7170 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
7171 sr_sal.pspace = get_frame_program_space (get_current_frame ());
7172
7173 /* Do not specify what the fp should be when we stop since on
7174 some machines the prologue is where the new fp value is
7175 established. */
7176 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
7177
7178 /* And make sure stepping stops right away then. */
7179 ecs->event_thread->control.step_range_end
7180 = ecs->event_thread->control.step_range_start;
7181 }
7182 keep_going (ecs);
7183 }
7184
7185 /* Inferior has stepped backward into a subroutine call with source
7186 code that we should not step over. Do step to the beginning of the
7187 last line of code in it. */
7188
7189 static void
7190 handle_step_into_function_backward (struct gdbarch *gdbarch,
7191 struct execution_control_state *ecs)
7192 {
7193 struct compunit_symtab *cust;
7194 struct symtab_and_line stop_func_sal;
7195
7196 fill_in_stop_func (gdbarch, ecs);
7197
7198 cust = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
7199 if (cust != NULL && compunit_language (cust) != language_asm)
7200 ecs->stop_func_start
7201 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
7202
7203 stop_func_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
7204
7205 /* OK, we're just going to keep stepping here. */
7206 if (stop_func_sal.pc == ecs->event_thread->suspend.stop_pc)
7207 {
7208 /* We're there already. Just stop stepping now. */
7209 end_stepping_range (ecs);
7210 }
7211 else
7212 {
7213 /* Else just reset the step range and keep going.
7214 No step-resume breakpoint, they don't work for
7215 epilogues, which can have multiple entry paths. */
7216 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7217 ecs->event_thread->control.step_range_end = stop_func_sal.end;
7218 keep_going (ecs);
7219 }
7220 return;
7221 }
7222
7223 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
7224 This is used to both functions and to skip over code. */
7225
7226 static void
7227 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7228 struct symtab_and_line sr_sal,
7229 struct frame_id sr_id,
7230 enum bptype sr_type)
7231 {
7232 /* There should never be more than one step-resume or longjmp-resume
7233 breakpoint per thread, so we should never be setting a new
7234 step_resume_breakpoint when one is already active. */
7235 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
7236 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
7237
7238 if (debug_infrun)
7239 fprintf_unfiltered (gdb_stdlog,
7240 "infrun: inserting step-resume breakpoint at %s\n",
7241 paddress (gdbarch, sr_sal.pc));
7242
7243 inferior_thread ()->control.step_resume_breakpoint
7244 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
7245 }
7246
7247 void
7248 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7249 struct symtab_and_line sr_sal,
7250 struct frame_id sr_id)
7251 {
7252 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7253 sr_sal, sr_id,
7254 bp_step_resume);
7255 }
7256
7257 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7258 This is used to skip a potential signal handler.
7259
7260 This is called with the interrupted function's frame. The signal
7261 handler, when it returns, will resume the interrupted function at
7262 RETURN_FRAME.pc. */
7263
7264 static void
7265 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
7266 {
7267 gdb_assert (return_frame != NULL);
7268
7269 struct gdbarch *gdbarch = get_frame_arch (return_frame);
7270
7271 symtab_and_line sr_sal;
7272 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
7273 sr_sal.section = find_pc_overlay (sr_sal.pc);
7274 sr_sal.pspace = get_frame_program_space (return_frame);
7275
7276 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7277 get_stack_frame_id (return_frame),
7278 bp_hp_step_resume);
7279 }
7280
7281 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
7282 is used to skip a function after stepping into it (for "next" or if
7283 the called function has no debugging information).
7284
7285 The current function has almost always been reached by single
7286 stepping a call or return instruction. NEXT_FRAME belongs to the
7287 current function, and the breakpoint will be set at the caller's
7288 resume address.
7289
7290 This is a separate function rather than reusing
7291 insert_hp_step_resume_breakpoint_at_frame in order to avoid
7292 get_prev_frame, which may stop prematurely (see the implementation
7293 of frame_unwind_caller_id for an example). */
7294
7295 static void
7296 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7297 {
7298 /* We shouldn't have gotten here if we don't know where the call site
7299 is. */
7300 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
7301
7302 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
7303
7304 symtab_and_line sr_sal;
7305 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7306 frame_unwind_caller_pc (next_frame));
7307 sr_sal.section = find_pc_overlay (sr_sal.pc);
7308 sr_sal.pspace = frame_unwind_program_space (next_frame);
7309
7310 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
7311 frame_unwind_caller_id (next_frame));
7312 }
7313
7314 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7315 new breakpoint at the target of a jmp_buf. The handling of
7316 longjmp-resume uses the same mechanisms used for handling
7317 "step-resume" breakpoints. */
7318
7319 static void
7320 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
7321 {
7322 /* There should never be more than one longjmp-resume breakpoint per
7323 thread, so we should never be setting a new
7324 longjmp_resume_breakpoint when one is already active. */
7325 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
7326
7327 if (debug_infrun)
7328 fprintf_unfiltered (gdb_stdlog,
7329 "infrun: inserting longjmp-resume breakpoint at %s\n",
7330 paddress (gdbarch, pc));
7331
7332 inferior_thread ()->control.exception_resume_breakpoint =
7333 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
7334 }
7335
7336 /* Insert an exception resume breakpoint. TP is the thread throwing
7337 the exception. The block B is the block of the unwinder debug hook
7338 function. FRAME is the frame corresponding to the call to this
7339 function. SYM is the symbol of the function argument holding the
7340 target PC of the exception. */
7341
7342 static void
7343 insert_exception_resume_breakpoint (struct thread_info *tp,
7344 const struct block *b,
7345 struct frame_info *frame,
7346 struct symbol *sym)
7347 {
7348 try
7349 {
7350 struct block_symbol vsym;
7351 struct value *value;
7352 CORE_ADDR handler;
7353 struct breakpoint *bp;
7354
7355 vsym = lookup_symbol_search_name (sym->search_name (),
7356 b, VAR_DOMAIN);
7357 value = read_var_value (vsym.symbol, vsym.block, frame);
7358 /* If the value was optimized out, revert to the old behavior. */
7359 if (! value_optimized_out (value))
7360 {
7361 handler = value_as_address (value);
7362
7363 if (debug_infrun)
7364 fprintf_unfiltered (gdb_stdlog,
7365 "infrun: exception resume at %lx\n",
7366 (unsigned long) handler);
7367
7368 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
7369 handler,
7370 bp_exception_resume).release ();
7371
7372 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
7373 frame = NULL;
7374
7375 bp->thread = tp->global_num;
7376 inferior_thread ()->control.exception_resume_breakpoint = bp;
7377 }
7378 }
7379 catch (const gdb_exception_error &e)
7380 {
7381 /* We want to ignore errors here. */
7382 }
7383 }
7384
7385 /* A helper for check_exception_resume that sets an
7386 exception-breakpoint based on a SystemTap probe. */
7387
7388 static void
7389 insert_exception_resume_from_probe (struct thread_info *tp,
7390 const struct bound_probe *probe,
7391 struct frame_info *frame)
7392 {
7393 struct value *arg_value;
7394 CORE_ADDR handler;
7395 struct breakpoint *bp;
7396
7397 arg_value = probe_safe_evaluate_at_pc (frame, 1);
7398 if (!arg_value)
7399 return;
7400
7401 handler = value_as_address (arg_value);
7402
7403 if (debug_infrun)
7404 fprintf_unfiltered (gdb_stdlog,
7405 "infrun: exception resume at %s\n",
7406 paddress (get_objfile_arch (probe->objfile),
7407 handler));
7408
7409 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
7410 handler, bp_exception_resume).release ();
7411 bp->thread = tp->global_num;
7412 inferior_thread ()->control.exception_resume_breakpoint = bp;
7413 }
7414
7415 /* This is called when an exception has been intercepted. Check to
7416 see whether the exception's destination is of interest, and if so,
7417 set an exception resume breakpoint there. */
7418
7419 static void
7420 check_exception_resume (struct execution_control_state *ecs,
7421 struct frame_info *frame)
7422 {
7423 struct bound_probe probe;
7424 struct symbol *func;
7425
7426 /* First see if this exception unwinding breakpoint was set via a
7427 SystemTap probe point. If so, the probe has two arguments: the
7428 CFA and the HANDLER. We ignore the CFA, extract the handler, and
7429 set a breakpoint there. */
7430 probe = find_probe_by_pc (get_frame_pc (frame));
7431 if (probe.prob)
7432 {
7433 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
7434 return;
7435 }
7436
7437 func = get_frame_function (frame);
7438 if (!func)
7439 return;
7440
7441 try
7442 {
7443 const struct block *b;
7444 struct block_iterator iter;
7445 struct symbol *sym;
7446 int argno = 0;
7447
7448 /* The exception breakpoint is a thread-specific breakpoint on
7449 the unwinder's debug hook, declared as:
7450
7451 void _Unwind_DebugHook (void *cfa, void *handler);
7452
7453 The CFA argument indicates the frame to which control is
7454 about to be transferred. HANDLER is the destination PC.
7455
7456 We ignore the CFA and set a temporary breakpoint at HANDLER.
7457 This is not extremely efficient but it avoids issues in gdb
7458 with computing the DWARF CFA, and it also works even in weird
7459 cases such as throwing an exception from inside a signal
7460 handler. */
7461
7462 b = SYMBOL_BLOCK_VALUE (func);
7463 ALL_BLOCK_SYMBOLS (b, iter, sym)
7464 {
7465 if (!SYMBOL_IS_ARGUMENT (sym))
7466 continue;
7467
7468 if (argno == 0)
7469 ++argno;
7470 else
7471 {
7472 insert_exception_resume_breakpoint (ecs->event_thread,
7473 b, frame, sym);
7474 break;
7475 }
7476 }
7477 }
7478 catch (const gdb_exception_error &e)
7479 {
7480 }
7481 }
7482
7483 static void
7484 stop_waiting (struct execution_control_state *ecs)
7485 {
7486 if (debug_infrun)
7487 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
7488
7489 /* Let callers know we don't want to wait for the inferior anymore. */
7490 ecs->wait_some_more = 0;
7491
7492 /* If all-stop, but the target is always in non-stop mode, stop all
7493 threads now that we're presenting the stop to the user. */
7494 if (!non_stop && target_is_non_stop_p ())
7495 stop_all_threads ();
7496 }
7497
7498 /* Like keep_going, but passes the signal to the inferior, even if the
7499 signal is set to nopass. */
7500
7501 static void
7502 keep_going_pass_signal (struct execution_control_state *ecs)
7503 {
7504 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
7505 gdb_assert (!ecs->event_thread->resumed);
7506
7507 /* Save the pc before execution, to compare with pc after stop. */
7508 ecs->event_thread->prev_pc
7509 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
7510
7511 if (ecs->event_thread->control.trap_expected)
7512 {
7513 struct thread_info *tp = ecs->event_thread;
7514
7515 if (debug_infrun)
7516 fprintf_unfiltered (gdb_stdlog,
7517 "infrun: %s has trap_expected set, "
7518 "resuming to collect trap\n",
7519 target_pid_to_str (tp->ptid).c_str ());
7520
7521 /* We haven't yet gotten our trap, and either: intercepted a
7522 non-signal event (e.g., a fork); or took a signal which we
7523 are supposed to pass through to the inferior. Simply
7524 continue. */
7525 resume (ecs->event_thread->suspend.stop_signal);
7526 }
7527 else if (step_over_info_valid_p ())
7528 {
7529 /* Another thread is stepping over a breakpoint in-line. If
7530 this thread needs a step-over too, queue the request. In
7531 either case, this resume must be deferred for later. */
7532 struct thread_info *tp = ecs->event_thread;
7533
7534 if (ecs->hit_singlestep_breakpoint
7535 || thread_still_needs_step_over (tp))
7536 {
7537 if (debug_infrun)
7538 fprintf_unfiltered (gdb_stdlog,
7539 "infrun: step-over already in progress: "
7540 "step-over for %s deferred\n",
7541 target_pid_to_str (tp->ptid).c_str ());
7542 thread_step_over_chain_enqueue (tp);
7543 }
7544 else
7545 {
7546 if (debug_infrun)
7547 fprintf_unfiltered (gdb_stdlog,
7548 "infrun: step-over in progress: "
7549 "resume of %s deferred\n",
7550 target_pid_to_str (tp->ptid).c_str ());
7551 }
7552 }
7553 else
7554 {
7555 struct regcache *regcache = get_current_regcache ();
7556 int remove_bp;
7557 int remove_wps;
7558 step_over_what step_what;
7559
7560 /* Either the trap was not expected, but we are continuing
7561 anyway (if we got a signal, the user asked it be passed to
7562 the child)
7563 -- or --
7564 We got our expected trap, but decided we should resume from
7565 it.
7566
7567 We're going to run this baby now!
7568
7569 Note that insert_breakpoints won't try to re-insert
7570 already inserted breakpoints. Therefore, we don't
7571 care if breakpoints were already inserted, or not. */
7572
7573 /* If we need to step over a breakpoint, and we're not using
7574 displaced stepping to do so, insert all breakpoints
7575 (watchpoints, etc.) but the one we're stepping over, step one
7576 instruction, and then re-insert the breakpoint when that step
7577 is finished. */
7578
7579 step_what = thread_still_needs_step_over (ecs->event_thread);
7580
7581 remove_bp = (ecs->hit_singlestep_breakpoint
7582 || (step_what & STEP_OVER_BREAKPOINT));
7583 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
7584
7585 /* We can't use displaced stepping if we need to step past a
7586 watchpoint. The instruction copied to the scratch pad would
7587 still trigger the watchpoint. */
7588 if (remove_bp
7589 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
7590 {
7591 set_step_over_info (regcache->aspace (),
7592 regcache_read_pc (regcache), remove_wps,
7593 ecs->event_thread->global_num);
7594 }
7595 else if (remove_wps)
7596 set_step_over_info (NULL, 0, remove_wps, -1);
7597
7598 /* If we now need to do an in-line step-over, we need to stop
7599 all other threads. Note this must be done before
7600 insert_breakpoints below, because that removes the breakpoint
7601 we're about to step over, otherwise other threads could miss
7602 it. */
7603 if (step_over_info_valid_p () && target_is_non_stop_p ())
7604 stop_all_threads ();
7605
7606 /* Stop stepping if inserting breakpoints fails. */
7607 try
7608 {
7609 insert_breakpoints ();
7610 }
7611 catch (const gdb_exception_error &e)
7612 {
7613 exception_print (gdb_stderr, e);
7614 stop_waiting (ecs);
7615 clear_step_over_info ();
7616 return;
7617 }
7618
7619 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
7620
7621 resume (ecs->event_thread->suspend.stop_signal);
7622 }
7623
7624 prepare_to_wait (ecs);
7625 }
7626
7627 /* Called when we should continue running the inferior, because the
7628 current event doesn't cause a user visible stop. This does the
7629 resuming part; waiting for the next event is done elsewhere. */
7630
7631 static void
7632 keep_going (struct execution_control_state *ecs)
7633 {
7634 if (ecs->event_thread->control.trap_expected
7635 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
7636 ecs->event_thread->control.trap_expected = 0;
7637
7638 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7639 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7640 keep_going_pass_signal (ecs);
7641 }
7642
7643 /* This function normally comes after a resume, before
7644 handle_inferior_event exits. It takes care of any last bits of
7645 housekeeping, and sets the all-important wait_some_more flag. */
7646
7647 static void
7648 prepare_to_wait (struct execution_control_state *ecs)
7649 {
7650 if (debug_infrun)
7651 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
7652
7653 ecs->wait_some_more = 1;
7654
7655 if (!target_is_async_p ())
7656 mark_infrun_async_event_handler ();
7657 }
7658
7659 /* We are done with the step range of a step/next/si/ni command.
7660 Called once for each n of a "step n" operation. */
7661
7662 static void
7663 end_stepping_range (struct execution_control_state *ecs)
7664 {
7665 ecs->event_thread->control.stop_step = 1;
7666 stop_waiting (ecs);
7667 }
7668
7669 /* Several print_*_reason functions to print why the inferior has stopped.
7670 We always print something when the inferior exits, or receives a signal.
7671 The rest of the cases are dealt with later on in normal_stop and
7672 print_it_typical. Ideally there should be a call to one of these
7673 print_*_reason functions functions from handle_inferior_event each time
7674 stop_waiting is called.
7675
7676 Note that we don't call these directly, instead we delegate that to
7677 the interpreters, through observers. Interpreters then call these
7678 with whatever uiout is right. */
7679
7680 void
7681 print_end_stepping_range_reason (struct ui_out *uiout)
7682 {
7683 /* For CLI-like interpreters, print nothing. */
7684
7685 if (uiout->is_mi_like_p ())
7686 {
7687 uiout->field_string ("reason",
7688 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
7689 }
7690 }
7691
7692 void
7693 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
7694 {
7695 annotate_signalled ();
7696 if (uiout->is_mi_like_p ())
7697 uiout->field_string
7698 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
7699 uiout->text ("\nProgram terminated with signal ");
7700 annotate_signal_name ();
7701 uiout->field_string ("signal-name",
7702 gdb_signal_to_name (siggnal));
7703 annotate_signal_name_end ();
7704 uiout->text (", ");
7705 annotate_signal_string ();
7706 uiout->field_string ("signal-meaning",
7707 gdb_signal_to_string (siggnal));
7708 annotate_signal_string_end ();
7709 uiout->text (".\n");
7710 uiout->text ("The program no longer exists.\n");
7711 }
7712
7713 void
7714 print_exited_reason (struct ui_out *uiout, int exitstatus)
7715 {
7716 struct inferior *inf = current_inferior ();
7717 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
7718
7719 annotate_exited (exitstatus);
7720 if (exitstatus)
7721 {
7722 if (uiout->is_mi_like_p ())
7723 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
7724 std::string exit_code_str
7725 = string_printf ("0%o", (unsigned int) exitstatus);
7726 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
7727 plongest (inf->num), pidstr.c_str (),
7728 string_field ("exit-code", exit_code_str.c_str ()));
7729 }
7730 else
7731 {
7732 if (uiout->is_mi_like_p ())
7733 uiout->field_string
7734 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
7735 uiout->message ("[Inferior %s (%s) exited normally]\n",
7736 plongest (inf->num), pidstr.c_str ());
7737 }
7738 }
7739
7740 /* Some targets/architectures can do extra processing/display of
7741 segmentation faults. E.g., Intel MPX boundary faults.
7742 Call the architecture dependent function to handle the fault. */
7743
7744 static void
7745 handle_segmentation_fault (struct ui_out *uiout)
7746 {
7747 struct regcache *regcache = get_current_regcache ();
7748 struct gdbarch *gdbarch = regcache->arch ();
7749
7750 if (gdbarch_handle_segmentation_fault_p (gdbarch))
7751 gdbarch_handle_segmentation_fault (gdbarch, uiout);
7752 }
7753
7754 void
7755 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
7756 {
7757 struct thread_info *thr = inferior_thread ();
7758
7759 annotate_signal ();
7760
7761 if (uiout->is_mi_like_p ())
7762 ;
7763 else if (show_thread_that_caused_stop ())
7764 {
7765 const char *name;
7766
7767 uiout->text ("\nThread ");
7768 uiout->field_string ("thread-id", print_thread_id (thr));
7769
7770 name = thr->name != NULL ? thr->name : target_thread_name (thr);
7771 if (name != NULL)
7772 {
7773 uiout->text (" \"");
7774 uiout->field_string ("name", name);
7775 uiout->text ("\"");
7776 }
7777 }
7778 else
7779 uiout->text ("\nProgram");
7780
7781 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
7782 uiout->text (" stopped");
7783 else
7784 {
7785 uiout->text (" received signal ");
7786 annotate_signal_name ();
7787 if (uiout->is_mi_like_p ())
7788 uiout->field_string
7789 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
7790 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
7791 annotate_signal_name_end ();
7792 uiout->text (", ");
7793 annotate_signal_string ();
7794 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
7795
7796 if (siggnal == GDB_SIGNAL_SEGV)
7797 handle_segmentation_fault (uiout);
7798
7799 annotate_signal_string_end ();
7800 }
7801 uiout->text (".\n");
7802 }
7803
7804 void
7805 print_no_history_reason (struct ui_out *uiout)
7806 {
7807 uiout->text ("\nNo more reverse-execution history.\n");
7808 }
7809
7810 /* Print current location without a level number, if we have changed
7811 functions or hit a breakpoint. Print source line if we have one.
7812 bpstat_print contains the logic deciding in detail what to print,
7813 based on the event(s) that just occurred. */
7814
7815 static void
7816 print_stop_location (struct target_waitstatus *ws)
7817 {
7818 int bpstat_ret;
7819 enum print_what source_flag;
7820 int do_frame_printing = 1;
7821 struct thread_info *tp = inferior_thread ();
7822
7823 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
7824 switch (bpstat_ret)
7825 {
7826 case PRINT_UNKNOWN:
7827 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
7828 should) carry around the function and does (or should) use
7829 that when doing a frame comparison. */
7830 if (tp->control.stop_step
7831 && frame_id_eq (tp->control.step_frame_id,
7832 get_frame_id (get_current_frame ()))
7833 && (tp->control.step_start_function
7834 == find_pc_function (tp->suspend.stop_pc)))
7835 {
7836 /* Finished step, just print source line. */
7837 source_flag = SRC_LINE;
7838 }
7839 else
7840 {
7841 /* Print location and source line. */
7842 source_flag = SRC_AND_LOC;
7843 }
7844 break;
7845 case PRINT_SRC_AND_LOC:
7846 /* Print location and source line. */
7847 source_flag = SRC_AND_LOC;
7848 break;
7849 case PRINT_SRC_ONLY:
7850 source_flag = SRC_LINE;
7851 break;
7852 case PRINT_NOTHING:
7853 /* Something bogus. */
7854 source_flag = SRC_LINE;
7855 do_frame_printing = 0;
7856 break;
7857 default:
7858 internal_error (__FILE__, __LINE__, _("Unknown value."));
7859 }
7860
7861 /* The behavior of this routine with respect to the source
7862 flag is:
7863 SRC_LINE: Print only source line
7864 LOCATION: Print only location
7865 SRC_AND_LOC: Print location and source line. */
7866 if (do_frame_printing)
7867 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
7868 }
7869
7870 /* See infrun.h. */
7871
7872 void
7873 print_stop_event (struct ui_out *uiout, bool displays)
7874 {
7875 struct target_waitstatus last;
7876 struct thread_info *tp;
7877
7878 get_last_target_status (nullptr, &last);
7879
7880 {
7881 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
7882
7883 print_stop_location (&last);
7884
7885 /* Display the auto-display expressions. */
7886 if (displays)
7887 do_displays ();
7888 }
7889
7890 tp = inferior_thread ();
7891 if (tp->thread_fsm != NULL
7892 && tp->thread_fsm->finished_p ())
7893 {
7894 struct return_value_info *rv;
7895
7896 rv = tp->thread_fsm->return_value ();
7897 if (rv != NULL)
7898 print_return_value (uiout, rv);
7899 }
7900 }
7901
7902 /* See infrun.h. */
7903
7904 void
7905 maybe_remove_breakpoints (void)
7906 {
7907 if (!breakpoints_should_be_inserted_now () && target_has_execution)
7908 {
7909 if (remove_breakpoints ())
7910 {
7911 target_terminal::ours_for_output ();
7912 printf_filtered (_("Cannot remove breakpoints because "
7913 "program is no longer writable.\nFurther "
7914 "execution is probably impossible.\n"));
7915 }
7916 }
7917 }
7918
7919 /* The execution context that just caused a normal stop. */
7920
7921 struct stop_context
7922 {
7923 stop_context ();
7924 ~stop_context ();
7925
7926 DISABLE_COPY_AND_ASSIGN (stop_context);
7927
7928 bool changed () const;
7929
7930 /* The stop ID. */
7931 ULONGEST stop_id;
7932
7933 /* The event PTID. */
7934
7935 ptid_t ptid;
7936
7937 /* If stopp for a thread event, this is the thread that caused the
7938 stop. */
7939 struct thread_info *thread;
7940
7941 /* The inferior that caused the stop. */
7942 int inf_num;
7943 };
7944
7945 /* Initializes a new stop context. If stopped for a thread event, this
7946 takes a strong reference to the thread. */
7947
7948 stop_context::stop_context ()
7949 {
7950 stop_id = get_stop_id ();
7951 ptid = inferior_ptid;
7952 inf_num = current_inferior ()->num;
7953
7954 if (inferior_ptid != null_ptid)
7955 {
7956 /* Take a strong reference so that the thread can't be deleted
7957 yet. */
7958 thread = inferior_thread ();
7959 thread->incref ();
7960 }
7961 else
7962 thread = NULL;
7963 }
7964
7965 /* Release a stop context previously created with save_stop_context.
7966 Releases the strong reference to the thread as well. */
7967
7968 stop_context::~stop_context ()
7969 {
7970 if (thread != NULL)
7971 thread->decref ();
7972 }
7973
7974 /* Return true if the current context no longer matches the saved stop
7975 context. */
7976
7977 bool
7978 stop_context::changed () const
7979 {
7980 if (ptid != inferior_ptid)
7981 return true;
7982 if (inf_num != current_inferior ()->num)
7983 return true;
7984 if (thread != NULL && thread->state != THREAD_STOPPED)
7985 return true;
7986 if (get_stop_id () != stop_id)
7987 return true;
7988 return false;
7989 }
7990
7991 /* See infrun.h. */
7992
7993 int
7994 normal_stop (void)
7995 {
7996 struct target_waitstatus last;
7997
7998 get_last_target_status (nullptr, &last);
7999
8000 new_stop_id ();
8001
8002 /* If an exception is thrown from this point on, make sure to
8003 propagate GDB's knowledge of the executing state to the
8004 frontend/user running state. A QUIT is an easy exception to see
8005 here, so do this before any filtered output. */
8006
8007 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
8008
8009 if (!non_stop)
8010 maybe_finish_thread_state.emplace (minus_one_ptid);
8011 else if (last.kind == TARGET_WAITKIND_SIGNALLED
8012 || last.kind == TARGET_WAITKIND_EXITED)
8013 {
8014 /* On some targets, we may still have live threads in the
8015 inferior when we get a process exit event. E.g., for
8016 "checkpoint", when the current checkpoint/fork exits,
8017 linux-fork.c automatically switches to another fork from
8018 within target_mourn_inferior. */
8019 if (inferior_ptid != null_ptid)
8020 maybe_finish_thread_state.emplace (ptid_t (inferior_ptid.pid ()));
8021 }
8022 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
8023 maybe_finish_thread_state.emplace (inferior_ptid);
8024
8025 /* As we're presenting a stop, and potentially removing breakpoints,
8026 update the thread list so we can tell whether there are threads
8027 running on the target. With target remote, for example, we can
8028 only learn about new threads when we explicitly update the thread
8029 list. Do this before notifying the interpreters about signal
8030 stops, end of stepping ranges, etc., so that the "new thread"
8031 output is emitted before e.g., "Program received signal FOO",
8032 instead of after. */
8033 update_thread_list ();
8034
8035 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
8036 gdb::observers::signal_received.notify (inferior_thread ()->suspend.stop_signal);
8037
8038 /* As with the notification of thread events, we want to delay
8039 notifying the user that we've switched thread context until
8040 the inferior actually stops.
8041
8042 There's no point in saying anything if the inferior has exited.
8043 Note that SIGNALLED here means "exited with a signal", not
8044 "received a signal".
8045
8046 Also skip saying anything in non-stop mode. In that mode, as we
8047 don't want GDB to switch threads behind the user's back, to avoid
8048 races where the user is typing a command to apply to thread x,
8049 but GDB switches to thread y before the user finishes entering
8050 the command, fetch_inferior_event installs a cleanup to restore
8051 the current thread back to the thread the user had selected right
8052 after this event is handled, so we're not really switching, only
8053 informing of a stop. */
8054 if (!non_stop
8055 && previous_inferior_ptid != inferior_ptid
8056 && target_has_execution
8057 && last.kind != TARGET_WAITKIND_SIGNALLED
8058 && last.kind != TARGET_WAITKIND_EXITED
8059 && last.kind != TARGET_WAITKIND_NO_RESUMED)
8060 {
8061 SWITCH_THRU_ALL_UIS ()
8062 {
8063 target_terminal::ours_for_output ();
8064 printf_filtered (_("[Switching to %s]\n"),
8065 target_pid_to_str (inferior_ptid).c_str ());
8066 annotate_thread_changed ();
8067 }
8068 previous_inferior_ptid = inferior_ptid;
8069 }
8070
8071 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
8072 {
8073 SWITCH_THRU_ALL_UIS ()
8074 if (current_ui->prompt_state == PROMPT_BLOCKED)
8075 {
8076 target_terminal::ours_for_output ();
8077 printf_filtered (_("No unwaited-for children left.\n"));
8078 }
8079 }
8080
8081 /* Note: this depends on the update_thread_list call above. */
8082 maybe_remove_breakpoints ();
8083
8084 /* If an auto-display called a function and that got a signal,
8085 delete that auto-display to avoid an infinite recursion. */
8086
8087 if (stopped_by_random_signal)
8088 disable_current_display ();
8089
8090 SWITCH_THRU_ALL_UIS ()
8091 {
8092 async_enable_stdin ();
8093 }
8094
8095 /* Let the user/frontend see the threads as stopped. */
8096 maybe_finish_thread_state.reset ();
8097
8098 /* Select innermost stack frame - i.e., current frame is frame 0,
8099 and current location is based on that. Handle the case where the
8100 dummy call is returning after being stopped. E.g. the dummy call
8101 previously hit a breakpoint. (If the dummy call returns
8102 normally, we won't reach here.) Do this before the stop hook is
8103 run, so that it doesn't get to see the temporary dummy frame,
8104 which is not where we'll present the stop. */
8105 if (has_stack_frames ())
8106 {
8107 if (stop_stack_dummy == STOP_STACK_DUMMY)
8108 {
8109 /* Pop the empty frame that contains the stack dummy. This
8110 also restores inferior state prior to the call (struct
8111 infcall_suspend_state). */
8112 struct frame_info *frame = get_current_frame ();
8113
8114 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8115 frame_pop (frame);
8116 /* frame_pop calls reinit_frame_cache as the last thing it
8117 does which means there's now no selected frame. */
8118 }
8119
8120 select_frame (get_current_frame ());
8121
8122 /* Set the current source location. */
8123 set_current_sal_from_frame (get_current_frame ());
8124 }
8125
8126 /* Look up the hook_stop and run it (CLI internally handles problem
8127 of stop_command's pre-hook not existing). */
8128 if (stop_command != NULL)
8129 {
8130 stop_context saved_context;
8131
8132 try
8133 {
8134 execute_cmd_pre_hook (stop_command);
8135 }
8136 catch (const gdb_exception &ex)
8137 {
8138 exception_fprintf (gdb_stderr, ex,
8139 "Error while running hook_stop:\n");
8140 }
8141
8142 /* If the stop hook resumes the target, then there's no point in
8143 trying to notify about the previous stop; its context is
8144 gone. Likewise if the command switches thread or inferior --
8145 the observers would print a stop for the wrong
8146 thread/inferior. */
8147 if (saved_context.changed ())
8148 return 1;
8149 }
8150
8151 /* Notify observers about the stop. This is where the interpreters
8152 print the stop event. */
8153 if (inferior_ptid != null_ptid)
8154 gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
8155 stop_print_frame);
8156 else
8157 gdb::observers::normal_stop.notify (NULL, stop_print_frame);
8158
8159 annotate_stopped ();
8160
8161 if (target_has_execution)
8162 {
8163 if (last.kind != TARGET_WAITKIND_SIGNALLED
8164 && last.kind != TARGET_WAITKIND_EXITED
8165 && last.kind != TARGET_WAITKIND_NO_RESUMED)
8166 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8167 Delete any breakpoint that is to be deleted at the next stop. */
8168 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
8169 }
8170
8171 /* Try to get rid of automatically added inferiors that are no
8172 longer needed. Keeping those around slows down things linearly.
8173 Note that this never removes the current inferior. */
8174 prune_inferiors ();
8175
8176 return 0;
8177 }
8178 \f
8179 int
8180 signal_stop_state (int signo)
8181 {
8182 return signal_stop[signo];
8183 }
8184
8185 int
8186 signal_print_state (int signo)
8187 {
8188 return signal_print[signo];
8189 }
8190
8191 int
8192 signal_pass_state (int signo)
8193 {
8194 return signal_program[signo];
8195 }
8196
8197 static void
8198 signal_cache_update (int signo)
8199 {
8200 if (signo == -1)
8201 {
8202 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
8203 signal_cache_update (signo);
8204
8205 return;
8206 }
8207
8208 signal_pass[signo] = (signal_stop[signo] == 0
8209 && signal_print[signo] == 0
8210 && signal_program[signo] == 1
8211 && signal_catch[signo] == 0);
8212 }
8213
8214 int
8215 signal_stop_update (int signo, int state)
8216 {
8217 int ret = signal_stop[signo];
8218
8219 signal_stop[signo] = state;
8220 signal_cache_update (signo);
8221 return ret;
8222 }
8223
8224 int
8225 signal_print_update (int signo, int state)
8226 {
8227 int ret = signal_print[signo];
8228
8229 signal_print[signo] = state;
8230 signal_cache_update (signo);
8231 return ret;
8232 }
8233
8234 int
8235 signal_pass_update (int signo, int state)
8236 {
8237 int ret = signal_program[signo];
8238
8239 signal_program[signo] = state;
8240 signal_cache_update (signo);
8241 return ret;
8242 }
8243
8244 /* Update the global 'signal_catch' from INFO and notify the
8245 target. */
8246
8247 void
8248 signal_catch_update (const unsigned int *info)
8249 {
8250 int i;
8251
8252 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8253 signal_catch[i] = info[i] > 0;
8254 signal_cache_update (-1);
8255 target_pass_signals (signal_pass);
8256 }
8257
8258 static void
8259 sig_print_header (void)
8260 {
8261 printf_filtered (_("Signal Stop\tPrint\tPass "
8262 "to program\tDescription\n"));
8263 }
8264
8265 static void
8266 sig_print_info (enum gdb_signal oursig)
8267 {
8268 const char *name = gdb_signal_to_name (oursig);
8269 int name_padding = 13 - strlen (name);
8270
8271 if (name_padding <= 0)
8272 name_padding = 0;
8273
8274 printf_filtered ("%s", name);
8275 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
8276 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8277 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
8278 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
8279 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
8280 }
8281
8282 /* Specify how various signals in the inferior should be handled. */
8283
8284 static void
8285 handle_command (const char *args, int from_tty)
8286 {
8287 int digits, wordlen;
8288 int sigfirst, siglast;
8289 enum gdb_signal oursig;
8290 int allsigs;
8291
8292 if (args == NULL)
8293 {
8294 error_no_arg (_("signal to handle"));
8295 }
8296
8297 /* Allocate and zero an array of flags for which signals to handle. */
8298
8299 const size_t nsigs = GDB_SIGNAL_LAST;
8300 unsigned char sigs[nsigs] {};
8301
8302 /* Break the command line up into args. */
8303
8304 gdb_argv built_argv (args);
8305
8306 /* Walk through the args, looking for signal oursigs, signal names, and
8307 actions. Signal numbers and signal names may be interspersed with
8308 actions, with the actions being performed for all signals cumulatively
8309 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
8310
8311 for (char *arg : built_argv)
8312 {
8313 wordlen = strlen (arg);
8314 for (digits = 0; isdigit (arg[digits]); digits++)
8315 {;
8316 }
8317 allsigs = 0;
8318 sigfirst = siglast = -1;
8319
8320 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
8321 {
8322 /* Apply action to all signals except those used by the
8323 debugger. Silently skip those. */
8324 allsigs = 1;
8325 sigfirst = 0;
8326 siglast = nsigs - 1;
8327 }
8328 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
8329 {
8330 SET_SIGS (nsigs, sigs, signal_stop);
8331 SET_SIGS (nsigs, sigs, signal_print);
8332 }
8333 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
8334 {
8335 UNSET_SIGS (nsigs, sigs, signal_program);
8336 }
8337 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
8338 {
8339 SET_SIGS (nsigs, sigs, signal_print);
8340 }
8341 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
8342 {
8343 SET_SIGS (nsigs, sigs, signal_program);
8344 }
8345 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
8346 {
8347 UNSET_SIGS (nsigs, sigs, signal_stop);
8348 }
8349 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
8350 {
8351 SET_SIGS (nsigs, sigs, signal_program);
8352 }
8353 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
8354 {
8355 UNSET_SIGS (nsigs, sigs, signal_print);
8356 UNSET_SIGS (nsigs, sigs, signal_stop);
8357 }
8358 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
8359 {
8360 UNSET_SIGS (nsigs, sigs, signal_program);
8361 }
8362 else if (digits > 0)
8363 {
8364 /* It is numeric. The numeric signal refers to our own
8365 internal signal numbering from target.h, not to host/target
8366 signal number. This is a feature; users really should be
8367 using symbolic names anyway, and the common ones like
8368 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8369
8370 sigfirst = siglast = (int)
8371 gdb_signal_from_command (atoi (arg));
8372 if (arg[digits] == '-')
8373 {
8374 siglast = (int)
8375 gdb_signal_from_command (atoi (arg + digits + 1));
8376 }
8377 if (sigfirst > siglast)
8378 {
8379 /* Bet he didn't figure we'd think of this case... */
8380 std::swap (sigfirst, siglast);
8381 }
8382 }
8383 else
8384 {
8385 oursig = gdb_signal_from_name (arg);
8386 if (oursig != GDB_SIGNAL_UNKNOWN)
8387 {
8388 sigfirst = siglast = (int) oursig;
8389 }
8390 else
8391 {
8392 /* Not a number and not a recognized flag word => complain. */
8393 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
8394 }
8395 }
8396
8397 /* If any signal numbers or symbol names were found, set flags for
8398 which signals to apply actions to. */
8399
8400 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
8401 {
8402 switch ((enum gdb_signal) signum)
8403 {
8404 case GDB_SIGNAL_TRAP:
8405 case GDB_SIGNAL_INT:
8406 if (!allsigs && !sigs[signum])
8407 {
8408 if (query (_("%s is used by the debugger.\n\
8409 Are you sure you want to change it? "),
8410 gdb_signal_to_name ((enum gdb_signal) signum)))
8411 {
8412 sigs[signum] = 1;
8413 }
8414 else
8415 printf_unfiltered (_("Not confirmed, unchanged.\n"));
8416 }
8417 break;
8418 case GDB_SIGNAL_0:
8419 case GDB_SIGNAL_DEFAULT:
8420 case GDB_SIGNAL_UNKNOWN:
8421 /* Make sure that "all" doesn't print these. */
8422 break;
8423 default:
8424 sigs[signum] = 1;
8425 break;
8426 }
8427 }
8428 }
8429
8430 for (int signum = 0; signum < nsigs; signum++)
8431 if (sigs[signum])
8432 {
8433 signal_cache_update (-1);
8434 target_pass_signals (signal_pass);
8435 target_program_signals (signal_program);
8436
8437 if (from_tty)
8438 {
8439 /* Show the results. */
8440 sig_print_header ();
8441 for (; signum < nsigs; signum++)
8442 if (sigs[signum])
8443 sig_print_info ((enum gdb_signal) signum);
8444 }
8445
8446 break;
8447 }
8448 }
8449
8450 /* Complete the "handle" command. */
8451
8452 static void
8453 handle_completer (struct cmd_list_element *ignore,
8454 completion_tracker &tracker,
8455 const char *text, const char *word)
8456 {
8457 static const char * const keywords[] =
8458 {
8459 "all",
8460 "stop",
8461 "ignore",
8462 "print",
8463 "pass",
8464 "nostop",
8465 "noignore",
8466 "noprint",
8467 "nopass",
8468 NULL,
8469 };
8470
8471 signal_completer (ignore, tracker, text, word);
8472 complete_on_enum (tracker, keywords, word, word);
8473 }
8474
8475 enum gdb_signal
8476 gdb_signal_from_command (int num)
8477 {
8478 if (num >= 1 && num <= 15)
8479 return (enum gdb_signal) num;
8480 error (_("Only signals 1-15 are valid as numeric signals.\n\
8481 Use \"info signals\" for a list of symbolic signals."));
8482 }
8483
8484 /* Print current contents of the tables set by the handle command.
8485 It is possible we should just be printing signals actually used
8486 by the current target (but for things to work right when switching
8487 targets, all signals should be in the signal tables). */
8488
8489 static void
8490 info_signals_command (const char *signum_exp, int from_tty)
8491 {
8492 enum gdb_signal oursig;
8493
8494 sig_print_header ();
8495
8496 if (signum_exp)
8497 {
8498 /* First see if this is a symbol name. */
8499 oursig = gdb_signal_from_name (signum_exp);
8500 if (oursig == GDB_SIGNAL_UNKNOWN)
8501 {
8502 /* No, try numeric. */
8503 oursig =
8504 gdb_signal_from_command (parse_and_eval_long (signum_exp));
8505 }
8506 sig_print_info (oursig);
8507 return;
8508 }
8509
8510 printf_filtered ("\n");
8511 /* These ugly casts brought to you by the native VAX compiler. */
8512 for (oursig = GDB_SIGNAL_FIRST;
8513 (int) oursig < (int) GDB_SIGNAL_LAST;
8514 oursig = (enum gdb_signal) ((int) oursig + 1))
8515 {
8516 QUIT;
8517
8518 if (oursig != GDB_SIGNAL_UNKNOWN
8519 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
8520 sig_print_info (oursig);
8521 }
8522
8523 printf_filtered (_("\nUse the \"handle\" command "
8524 "to change these tables.\n"));
8525 }
8526
8527 /* The $_siginfo convenience variable is a bit special. We don't know
8528 for sure the type of the value until we actually have a chance to
8529 fetch the data. The type can change depending on gdbarch, so it is
8530 also dependent on which thread you have selected.
8531
8532 1. making $_siginfo be an internalvar that creates a new value on
8533 access.
8534
8535 2. making the value of $_siginfo be an lval_computed value. */
8536
8537 /* This function implements the lval_computed support for reading a
8538 $_siginfo value. */
8539
8540 static void
8541 siginfo_value_read (struct value *v)
8542 {
8543 LONGEST transferred;
8544
8545 /* If we can access registers, so can we access $_siginfo. Likewise
8546 vice versa. */
8547 validate_registers_access ();
8548
8549 transferred =
8550 target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO,
8551 NULL,
8552 value_contents_all_raw (v),
8553 value_offset (v),
8554 TYPE_LENGTH (value_type (v)));
8555
8556 if (transferred != TYPE_LENGTH (value_type (v)))
8557 error (_("Unable to read siginfo"));
8558 }
8559
8560 /* This function implements the lval_computed support for writing a
8561 $_siginfo value. */
8562
8563 static void
8564 siginfo_value_write (struct value *v, struct value *fromval)
8565 {
8566 LONGEST transferred;
8567
8568 /* If we can access registers, so can we access $_siginfo. Likewise
8569 vice versa. */
8570 validate_registers_access ();
8571
8572 transferred = target_write (current_top_target (),
8573 TARGET_OBJECT_SIGNAL_INFO,
8574 NULL,
8575 value_contents_all_raw (fromval),
8576 value_offset (v),
8577 TYPE_LENGTH (value_type (fromval)));
8578
8579 if (transferred != TYPE_LENGTH (value_type (fromval)))
8580 error (_("Unable to write siginfo"));
8581 }
8582
8583 static const struct lval_funcs siginfo_value_funcs =
8584 {
8585 siginfo_value_read,
8586 siginfo_value_write
8587 };
8588
8589 /* Return a new value with the correct type for the siginfo object of
8590 the current thread using architecture GDBARCH. Return a void value
8591 if there's no object available. */
8592
8593 static struct value *
8594 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
8595 void *ignore)
8596 {
8597 if (target_has_stack
8598 && inferior_ptid != null_ptid
8599 && gdbarch_get_siginfo_type_p (gdbarch))
8600 {
8601 struct type *type = gdbarch_get_siginfo_type (gdbarch);
8602
8603 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
8604 }
8605
8606 return allocate_value (builtin_type (gdbarch)->builtin_void);
8607 }
8608
8609 \f
8610 /* infcall_suspend_state contains state about the program itself like its
8611 registers and any signal it received when it last stopped.
8612 This state must be restored regardless of how the inferior function call
8613 ends (either successfully, or after it hits a breakpoint or signal)
8614 if the program is to properly continue where it left off. */
8615
8616 class infcall_suspend_state
8617 {
8618 public:
8619 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
8620 once the inferior function call has finished. */
8621 infcall_suspend_state (struct gdbarch *gdbarch,
8622 const struct thread_info *tp,
8623 struct regcache *regcache)
8624 : m_thread_suspend (tp->suspend),
8625 m_registers (new readonly_detached_regcache (*regcache))
8626 {
8627 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
8628
8629 if (gdbarch_get_siginfo_type_p (gdbarch))
8630 {
8631 struct type *type = gdbarch_get_siginfo_type (gdbarch);
8632 size_t len = TYPE_LENGTH (type);
8633
8634 siginfo_data.reset ((gdb_byte *) xmalloc (len));
8635
8636 if (target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
8637 siginfo_data.get (), 0, len) != len)
8638 {
8639 /* Errors ignored. */
8640 siginfo_data.reset (nullptr);
8641 }
8642 }
8643
8644 if (siginfo_data)
8645 {
8646 m_siginfo_gdbarch = gdbarch;
8647 m_siginfo_data = std::move (siginfo_data);
8648 }
8649 }
8650
8651 /* Return a pointer to the stored register state. */
8652
8653 readonly_detached_regcache *registers () const
8654 {
8655 return m_registers.get ();
8656 }
8657
8658 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
8659
8660 void restore (struct gdbarch *gdbarch,
8661 struct thread_info *tp,
8662 struct regcache *regcache) const
8663 {
8664 tp->suspend = m_thread_suspend;
8665
8666 if (m_siginfo_gdbarch == gdbarch)
8667 {
8668 struct type *type = gdbarch_get_siginfo_type (gdbarch);
8669
8670 /* Errors ignored. */
8671 target_write (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
8672 m_siginfo_data.get (), 0, TYPE_LENGTH (type));
8673 }
8674
8675 /* The inferior can be gone if the user types "print exit(0)"
8676 (and perhaps other times). */
8677 if (target_has_execution)
8678 /* NB: The register write goes through to the target. */
8679 regcache->restore (registers ());
8680 }
8681
8682 private:
8683 /* How the current thread stopped before the inferior function call was
8684 executed. */
8685 struct thread_suspend_state m_thread_suspend;
8686
8687 /* The registers before the inferior function call was executed. */
8688 std::unique_ptr<readonly_detached_regcache> m_registers;
8689
8690 /* Format of SIGINFO_DATA or NULL if it is not present. */
8691 struct gdbarch *m_siginfo_gdbarch = nullptr;
8692
8693 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
8694 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
8695 content would be invalid. */
8696 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
8697 };
8698
8699 infcall_suspend_state_up
8700 save_infcall_suspend_state ()
8701 {
8702 struct thread_info *tp = inferior_thread ();
8703 struct regcache *regcache = get_current_regcache ();
8704 struct gdbarch *gdbarch = regcache->arch ();
8705
8706 infcall_suspend_state_up inf_state
8707 (new struct infcall_suspend_state (gdbarch, tp, regcache));
8708
8709 /* Having saved the current state, adjust the thread state, discarding
8710 any stop signal information. The stop signal is not useful when
8711 starting an inferior function call, and run_inferior_call will not use
8712 the signal due to its `proceed' call with GDB_SIGNAL_0. */
8713 tp->suspend.stop_signal = GDB_SIGNAL_0;
8714
8715 return inf_state;
8716 }
8717
8718 /* Restore inferior session state to INF_STATE. */
8719
8720 void
8721 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
8722 {
8723 struct thread_info *tp = inferior_thread ();
8724 struct regcache *regcache = get_current_regcache ();
8725 struct gdbarch *gdbarch = regcache->arch ();
8726
8727 inf_state->restore (gdbarch, tp, regcache);
8728 discard_infcall_suspend_state (inf_state);
8729 }
8730
8731 void
8732 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
8733 {
8734 delete inf_state;
8735 }
8736
8737 readonly_detached_regcache *
8738 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
8739 {
8740 return inf_state->registers ();
8741 }
8742
8743 /* infcall_control_state contains state regarding gdb's control of the
8744 inferior itself like stepping control. It also contains session state like
8745 the user's currently selected frame. */
8746
8747 struct infcall_control_state
8748 {
8749 struct thread_control_state thread_control;
8750 struct inferior_control_state inferior_control;
8751
8752 /* Other fields: */
8753 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
8754 int stopped_by_random_signal = 0;
8755
8756 /* ID if the selected frame when the inferior function call was made. */
8757 struct frame_id selected_frame_id {};
8758 };
8759
8760 /* Save all of the information associated with the inferior<==>gdb
8761 connection. */
8762
8763 infcall_control_state_up
8764 save_infcall_control_state ()
8765 {
8766 infcall_control_state_up inf_status (new struct infcall_control_state);
8767 struct thread_info *tp = inferior_thread ();
8768 struct inferior *inf = current_inferior ();
8769
8770 inf_status->thread_control = tp->control;
8771 inf_status->inferior_control = inf->control;
8772
8773 tp->control.step_resume_breakpoint = NULL;
8774 tp->control.exception_resume_breakpoint = NULL;
8775
8776 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
8777 chain. If caller's caller is walking the chain, they'll be happier if we
8778 hand them back the original chain when restore_infcall_control_state is
8779 called. */
8780 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
8781
8782 /* Other fields: */
8783 inf_status->stop_stack_dummy = stop_stack_dummy;
8784 inf_status->stopped_by_random_signal = stopped_by_random_signal;
8785
8786 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
8787
8788 return inf_status;
8789 }
8790
8791 static void
8792 restore_selected_frame (const frame_id &fid)
8793 {
8794 frame_info *frame = frame_find_by_id (fid);
8795
8796 /* If inf_status->selected_frame_id is NULL, there was no previously
8797 selected frame. */
8798 if (frame == NULL)
8799 {
8800 warning (_("Unable to restore previously selected frame."));
8801 return;
8802 }
8803
8804 select_frame (frame);
8805 }
8806
8807 /* Restore inferior session state to INF_STATUS. */
8808
8809 void
8810 restore_infcall_control_state (struct infcall_control_state *inf_status)
8811 {
8812 struct thread_info *tp = inferior_thread ();
8813 struct inferior *inf = current_inferior ();
8814
8815 if (tp->control.step_resume_breakpoint)
8816 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
8817
8818 if (tp->control.exception_resume_breakpoint)
8819 tp->control.exception_resume_breakpoint->disposition
8820 = disp_del_at_next_stop;
8821
8822 /* Handle the bpstat_copy of the chain. */
8823 bpstat_clear (&tp->control.stop_bpstat);
8824
8825 tp->control = inf_status->thread_control;
8826 inf->control = inf_status->inferior_control;
8827
8828 /* Other fields: */
8829 stop_stack_dummy = inf_status->stop_stack_dummy;
8830 stopped_by_random_signal = inf_status->stopped_by_random_signal;
8831
8832 if (target_has_stack)
8833 {
8834 /* The point of the try/catch is that if the stack is clobbered,
8835 walking the stack might encounter a garbage pointer and
8836 error() trying to dereference it. */
8837 try
8838 {
8839 restore_selected_frame (inf_status->selected_frame_id);
8840 }
8841 catch (const gdb_exception_error &ex)
8842 {
8843 exception_fprintf (gdb_stderr, ex,
8844 "Unable to restore previously selected frame:\n");
8845 /* Error in restoring the selected frame. Select the
8846 innermost frame. */
8847 select_frame (get_current_frame ());
8848 }
8849 }
8850
8851 delete inf_status;
8852 }
8853
8854 void
8855 discard_infcall_control_state (struct infcall_control_state *inf_status)
8856 {
8857 if (inf_status->thread_control.step_resume_breakpoint)
8858 inf_status->thread_control.step_resume_breakpoint->disposition
8859 = disp_del_at_next_stop;
8860
8861 if (inf_status->thread_control.exception_resume_breakpoint)
8862 inf_status->thread_control.exception_resume_breakpoint->disposition
8863 = disp_del_at_next_stop;
8864
8865 /* See save_infcall_control_state for info on stop_bpstat. */
8866 bpstat_clear (&inf_status->thread_control.stop_bpstat);
8867
8868 delete inf_status;
8869 }
8870 \f
8871 /* See infrun.h. */
8872
8873 void
8874 clear_exit_convenience_vars (void)
8875 {
8876 clear_internalvar (lookup_internalvar ("_exitsignal"));
8877 clear_internalvar (lookup_internalvar ("_exitcode"));
8878 }
8879 \f
8880
8881 /* User interface for reverse debugging:
8882 Set exec-direction / show exec-direction commands
8883 (returns error unless target implements to_set_exec_direction method). */
8884
8885 enum exec_direction_kind execution_direction = EXEC_FORWARD;
8886 static const char exec_forward[] = "forward";
8887 static const char exec_reverse[] = "reverse";
8888 static const char *exec_direction = exec_forward;
8889 static const char *const exec_direction_names[] = {
8890 exec_forward,
8891 exec_reverse,
8892 NULL
8893 };
8894
8895 static void
8896 set_exec_direction_func (const char *args, int from_tty,
8897 struct cmd_list_element *cmd)
8898 {
8899 if (target_can_execute_reverse)
8900 {
8901 if (!strcmp (exec_direction, exec_forward))
8902 execution_direction = EXEC_FORWARD;
8903 else if (!strcmp (exec_direction, exec_reverse))
8904 execution_direction = EXEC_REVERSE;
8905 }
8906 else
8907 {
8908 exec_direction = exec_forward;
8909 error (_("Target does not support this operation."));
8910 }
8911 }
8912
8913 static void
8914 show_exec_direction_func (struct ui_file *out, int from_tty,
8915 struct cmd_list_element *cmd, const char *value)
8916 {
8917 switch (execution_direction) {
8918 case EXEC_FORWARD:
8919 fprintf_filtered (out, _("Forward.\n"));
8920 break;
8921 case EXEC_REVERSE:
8922 fprintf_filtered (out, _("Reverse.\n"));
8923 break;
8924 default:
8925 internal_error (__FILE__, __LINE__,
8926 _("bogus execution_direction value: %d"),
8927 (int) execution_direction);
8928 }
8929 }
8930
8931 static void
8932 show_schedule_multiple (struct ui_file *file, int from_tty,
8933 struct cmd_list_element *c, const char *value)
8934 {
8935 fprintf_filtered (file, _("Resuming the execution of threads "
8936 "of all processes is %s.\n"), value);
8937 }
8938
8939 /* Implementation of `siginfo' variable. */
8940
8941 static const struct internalvar_funcs siginfo_funcs =
8942 {
8943 siginfo_make_value,
8944 NULL,
8945 NULL
8946 };
8947
8948 /* Callback for infrun's target events source. This is marked when a
8949 thread has a pending status to process. */
8950
8951 static void
8952 infrun_async_inferior_event_handler (gdb_client_data data)
8953 {
8954 inferior_event_handler (INF_REG_EVENT, NULL);
8955 }
8956
8957 void
8958 _initialize_infrun (void)
8959 {
8960 struct cmd_list_element *c;
8961
8962 /* Register extra event sources in the event loop. */
8963 infrun_async_inferior_event_token
8964 = create_async_event_handler (infrun_async_inferior_event_handler, NULL);
8965
8966 add_info ("signals", info_signals_command, _("\
8967 What debugger does when program gets various signals.\n\
8968 Specify a signal as argument to print info on that signal only."));
8969 add_info_alias ("handle", "signals", 0);
8970
8971 c = add_com ("handle", class_run, handle_command, _("\
8972 Specify how to handle signals.\n\
8973 Usage: handle SIGNAL [ACTIONS]\n\
8974 Args are signals and actions to apply to those signals.\n\
8975 If no actions are specified, the current settings for the specified signals\n\
8976 will be displayed instead.\n\
8977 \n\
8978 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
8979 from 1-15 are allowed for compatibility with old versions of GDB.\n\
8980 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
8981 The special arg \"all\" is recognized to mean all signals except those\n\
8982 used by the debugger, typically SIGTRAP and SIGINT.\n\
8983 \n\
8984 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
8985 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
8986 Stop means reenter debugger if this signal happens (implies print).\n\
8987 Print means print a message if this signal happens.\n\
8988 Pass means let program see this signal; otherwise program doesn't know.\n\
8989 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
8990 Pass and Stop may be combined.\n\
8991 \n\
8992 Multiple signals may be specified. Signal numbers and signal names\n\
8993 may be interspersed with actions, with the actions being performed for\n\
8994 all signals cumulatively specified."));
8995 set_cmd_completer (c, handle_completer);
8996
8997 if (!dbx_commands)
8998 stop_command = add_cmd ("stop", class_obscure,
8999 not_just_help_class_command, _("\
9000 There is no `stop' command, but you can set a hook on `stop'.\n\
9001 This allows you to set a list of commands to be run each time execution\n\
9002 of the program stops."), &cmdlist);
9003
9004 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
9005 Set inferior debugging."), _("\
9006 Show inferior debugging."), _("\
9007 When non-zero, inferior specific debugging is enabled."),
9008 NULL,
9009 show_debug_infrun,
9010 &setdebuglist, &showdebuglist);
9011
9012 add_setshow_boolean_cmd ("displaced", class_maintenance,
9013 &debug_displaced, _("\
9014 Set displaced stepping debugging."), _("\
9015 Show displaced stepping debugging."), _("\
9016 When non-zero, displaced stepping specific debugging is enabled."),
9017 NULL,
9018 show_debug_displaced,
9019 &setdebuglist, &showdebuglist);
9020
9021 add_setshow_boolean_cmd ("non-stop", no_class,
9022 &non_stop_1, _("\
9023 Set whether gdb controls the inferior in non-stop mode."), _("\
9024 Show whether gdb controls the inferior in non-stop mode."), _("\
9025 When debugging a multi-threaded program and this setting is\n\
9026 off (the default, also called all-stop mode), when one thread stops\n\
9027 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9028 all other threads in the program while you interact with the thread of\n\
9029 interest. When you continue or step a thread, you can allow the other\n\
9030 threads to run, or have them remain stopped, but while you inspect any\n\
9031 thread's state, all threads stop.\n\
9032 \n\
9033 In non-stop mode, when one thread stops, other threads can continue\n\
9034 to run freely. You'll be able to step each thread independently,\n\
9035 leave it stopped or free to run as needed."),
9036 set_non_stop,
9037 show_non_stop,
9038 &setlist,
9039 &showlist);
9040
9041 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
9042 {
9043 signal_stop[i] = 1;
9044 signal_print[i] = 1;
9045 signal_program[i] = 1;
9046 signal_catch[i] = 0;
9047 }
9048
9049 /* Signals caused by debugger's own actions should not be given to
9050 the program afterwards.
9051
9052 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9053 explicitly specifies that it should be delivered to the target
9054 program. Typically, that would occur when a user is debugging a
9055 target monitor on a simulator: the target monitor sets a
9056 breakpoint; the simulator encounters this breakpoint and halts
9057 the simulation handing control to GDB; GDB, noting that the stop
9058 address doesn't map to any known breakpoint, returns control back
9059 to the simulator; the simulator then delivers the hardware
9060 equivalent of a GDB_SIGNAL_TRAP to the program being
9061 debugged. */
9062 signal_program[GDB_SIGNAL_TRAP] = 0;
9063 signal_program[GDB_SIGNAL_INT] = 0;
9064
9065 /* Signals that are not errors should not normally enter the debugger. */
9066 signal_stop[GDB_SIGNAL_ALRM] = 0;
9067 signal_print[GDB_SIGNAL_ALRM] = 0;
9068 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9069 signal_print[GDB_SIGNAL_VTALRM] = 0;
9070 signal_stop[GDB_SIGNAL_PROF] = 0;
9071 signal_print[GDB_SIGNAL_PROF] = 0;
9072 signal_stop[GDB_SIGNAL_CHLD] = 0;
9073 signal_print[GDB_SIGNAL_CHLD] = 0;
9074 signal_stop[GDB_SIGNAL_IO] = 0;
9075 signal_print[GDB_SIGNAL_IO] = 0;
9076 signal_stop[GDB_SIGNAL_POLL] = 0;
9077 signal_print[GDB_SIGNAL_POLL] = 0;
9078 signal_stop[GDB_SIGNAL_URG] = 0;
9079 signal_print[GDB_SIGNAL_URG] = 0;
9080 signal_stop[GDB_SIGNAL_WINCH] = 0;
9081 signal_print[GDB_SIGNAL_WINCH] = 0;
9082 signal_stop[GDB_SIGNAL_PRIO] = 0;
9083 signal_print[GDB_SIGNAL_PRIO] = 0;
9084
9085 /* These signals are used internally by user-level thread
9086 implementations. (See signal(5) on Solaris.) Like the above
9087 signals, a healthy program receives and handles them as part of
9088 its normal operation. */
9089 signal_stop[GDB_SIGNAL_LWP] = 0;
9090 signal_print[GDB_SIGNAL_LWP] = 0;
9091 signal_stop[GDB_SIGNAL_WAITING] = 0;
9092 signal_print[GDB_SIGNAL_WAITING] = 0;
9093 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9094 signal_print[GDB_SIGNAL_CANCEL] = 0;
9095 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9096 signal_print[GDB_SIGNAL_LIBRT] = 0;
9097
9098 /* Update cached state. */
9099 signal_cache_update (-1);
9100
9101 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9102 &stop_on_solib_events, _("\
9103 Set stopping for shared library events."), _("\
9104 Show stopping for shared library events."), _("\
9105 If nonzero, gdb will give control to the user when the dynamic linker\n\
9106 notifies gdb of shared library events. The most common event of interest\n\
9107 to the user would be loading/unloading of a new library."),
9108 set_stop_on_solib_events,
9109 show_stop_on_solib_events,
9110 &setlist, &showlist);
9111
9112 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9113 follow_fork_mode_kind_names,
9114 &follow_fork_mode_string, _("\
9115 Set debugger response to a program call of fork or vfork."), _("\
9116 Show debugger response to a program call of fork or vfork."), _("\
9117 A fork or vfork creates a new process. follow-fork-mode can be:\n\
9118 parent - the original process is debugged after a fork\n\
9119 child - the new process is debugged after a fork\n\
9120 The unfollowed process will continue to run.\n\
9121 By default, the debugger will follow the parent process."),
9122 NULL,
9123 show_follow_fork_mode_string,
9124 &setlist, &showlist);
9125
9126 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9127 follow_exec_mode_names,
9128 &follow_exec_mode_string, _("\
9129 Set debugger response to a program call of exec."), _("\
9130 Show debugger response to a program call of exec."), _("\
9131 An exec call replaces the program image of a process.\n\
9132 \n\
9133 follow-exec-mode can be:\n\
9134 \n\
9135 new - the debugger creates a new inferior and rebinds the process\n\
9136 to this new inferior. The program the process was running before\n\
9137 the exec call can be restarted afterwards by restarting the original\n\
9138 inferior.\n\
9139 \n\
9140 same - the debugger keeps the process bound to the same inferior.\n\
9141 The new executable image replaces the previous executable loaded in\n\
9142 the inferior. Restarting the inferior after the exec call restarts\n\
9143 the executable the process was running after the exec call.\n\
9144 \n\
9145 By default, the debugger will use the same inferior."),
9146 NULL,
9147 show_follow_exec_mode_string,
9148 &setlist, &showlist);
9149
9150 add_setshow_enum_cmd ("scheduler-locking", class_run,
9151 scheduler_enums, &scheduler_mode, _("\
9152 Set mode for locking scheduler during execution."), _("\
9153 Show mode for locking scheduler during execution."), _("\
9154 off == no locking (threads may preempt at any time)\n\
9155 on == full locking (no thread except the current thread may run)\n\
9156 This applies to both normal execution and replay mode.\n\
9157 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9158 In this mode, other threads may run during other commands.\n\
9159 This applies to both normal execution and replay mode.\n\
9160 replay == scheduler locked in replay mode and unlocked during normal execution."),
9161 set_schedlock_func, /* traps on target vector */
9162 show_scheduler_mode,
9163 &setlist, &showlist);
9164
9165 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9166 Set mode for resuming threads of all processes."), _("\
9167 Show mode for resuming threads of all processes."), _("\
9168 When on, execution commands (such as 'continue' or 'next') resume all\n\
9169 threads of all processes. When off (which is the default), execution\n\
9170 commands only resume the threads of the current process. The set of\n\
9171 threads that are resumed is further refined by the scheduler-locking\n\
9172 mode (see help set scheduler-locking)."),
9173 NULL,
9174 show_schedule_multiple,
9175 &setlist, &showlist);
9176
9177 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9178 Set mode of the step operation."), _("\
9179 Show mode of the step operation."), _("\
9180 When set, doing a step over a function without debug line information\n\
9181 will stop at the first instruction of that function. Otherwise, the\n\
9182 function is skipped and the step command stops at a different source line."),
9183 NULL,
9184 show_step_stop_if_no_debug,
9185 &setlist, &showlist);
9186
9187 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9188 &can_use_displaced_stepping, _("\
9189 Set debugger's willingness to use displaced stepping."), _("\
9190 Show debugger's willingness to use displaced stepping."), _("\
9191 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9192 supported by the target architecture. If off, gdb will not use displaced\n\
9193 stepping to step over breakpoints, even if such is supported by the target\n\
9194 architecture. If auto (which is the default), gdb will use displaced stepping\n\
9195 if the target architecture supports it and non-stop mode is active, but will not\n\
9196 use it in all-stop mode (see help set non-stop)."),
9197 NULL,
9198 show_can_use_displaced_stepping,
9199 &setlist, &showlist);
9200
9201 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9202 &exec_direction, _("Set direction of execution.\n\
9203 Options are 'forward' or 'reverse'."),
9204 _("Show direction of execution (forward/reverse)."),
9205 _("Tells gdb whether to execute forward or backward."),
9206 set_exec_direction_func, show_exec_direction_func,
9207 &setlist, &showlist);
9208
9209 /* Set/show detach-on-fork: user-settable mode. */
9210
9211 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9212 Set whether gdb will detach the child of a fork."), _("\
9213 Show whether gdb will detach the child of a fork."), _("\
9214 Tells gdb whether to detach the child of a fork."),
9215 NULL, NULL, &setlist, &showlist);
9216
9217 /* Set/show disable address space randomization mode. */
9218
9219 add_setshow_boolean_cmd ("disable-randomization", class_support,
9220 &disable_randomization, _("\
9221 Set disabling of debuggee's virtual address space randomization."), _("\
9222 Show disabling of debuggee's virtual address space randomization."), _("\
9223 When this mode is on (which is the default), randomization of the virtual\n\
9224 address space is disabled. Standalone programs run with the randomization\n\
9225 enabled by default on some platforms."),
9226 &set_disable_randomization,
9227 &show_disable_randomization,
9228 &setlist, &showlist);
9229
9230 /* ptid initializations */
9231 inferior_ptid = null_ptid;
9232 target_last_wait_ptid = minus_one_ptid;
9233
9234 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed);
9235 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested);
9236 gdb::observers::thread_exit.attach (infrun_thread_thread_exit);
9237 gdb::observers::inferior_exit.attach (infrun_inferior_exit);
9238
9239 /* Explicitly create without lookup, since that tries to create a
9240 value with a void typed value, and when we get here, gdbarch
9241 isn't initialized yet. At this point, we're quite sure there
9242 isn't another convenience variable of the same name. */
9243 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
9244
9245 add_setshow_boolean_cmd ("observer", no_class,
9246 &observer_mode_1, _("\
9247 Set whether gdb controls the inferior in observer mode."), _("\
9248 Show whether gdb controls the inferior in observer mode."), _("\
9249 In observer mode, GDB can get data from the inferior, but not\n\
9250 affect its execution. Registers and memory may not be changed,\n\
9251 breakpoints may not be set, and the program cannot be interrupted\n\
9252 or signalled."),
9253 set_observer_mode,
9254 show_observer_mode,
9255 &setlist,
9256 &showlist);
9257 }
This page took 0.269981 seconds and 4 git commands to generate.