Make inferior::detaching a bool, and introduce scoped_restore::release()
[deliverable/binutils-gdb.git] / gdb / infrun.c
CommitLineData
ca557f44
AC
1/* Target-struct-independent code to start (run) and stop an inferior
2 process.
8926118c 3
61baf725 4 Copyright (C) 1986-2017 Free Software Foundation, Inc.
c906108c 5
c5aa993b 6 This file is part of GDB.
c906108c 7
c5aa993b
JM
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
c5aa993b 11 (at your option) any later version.
c906108c 12
c5aa993b
JM
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
c906108c 17
c5aa993b 18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
c906108c
SS
20
21#include "defs.h"
45741a9c 22#include "infrun.h"
c906108c
SS
23#include <ctype.h>
24#include "symtab.h"
25#include "frame.h"
26#include "inferior.h"
27#include "breakpoint.h"
03f2053f 28#include "gdb_wait.h"
c906108c
SS
29#include "gdbcore.h"
30#include "gdbcmd.h"
210661e7 31#include "cli/cli-script.h"
c906108c
SS
32#include "target.h"
33#include "gdbthread.h"
34#include "annotate.h"
1adeb98a 35#include "symfile.h"
7a292a7a 36#include "top.h"
c906108c 37#include <signal.h>
2acceee2 38#include "inf-loop.h"
4e052eda 39#include "regcache.h"
fd0407d6 40#include "value.h"
06600e06 41#include "observer.h"
f636b87d 42#include "language.h"
a77053c2 43#include "solib.h"
f17517ea 44#include "main.h"
186c406b
TT
45#include "dictionary.h"
46#include "block.h"
034dad6f 47#include "mi/mi-common.h"
4f8d22e3 48#include "event-top.h"
96429cc8 49#include "record.h"
d02ed0bb 50#include "record-full.h"
edb3359d 51#include "inline-frame.h"
4efc6507 52#include "jit.h"
06cd862c 53#include "tracepoint.h"
be34f849 54#include "continuations.h"
b4a14fd0 55#include "interps.h"
1bfeeb0f 56#include "skip.h"
28106bc2
SDJ
57#include "probe.h"
58#include "objfiles.h"
de0bea00 59#include "completer.h"
9107fc8d 60#include "target-descriptions.h"
f15cb84a 61#include "target-dcache.h"
d83ad864 62#include "terminal.h"
ff862be4 63#include "solist.h"
372316f1 64#include "event-loop.h"
243a9253 65#include "thread-fsm.h"
8d297bbf 66#include "common/enum-flags.h"
c906108c
SS
67
68/* Prototypes for local functions */
69
96baa820 70static void signals_info (char *, int);
c906108c 71
96baa820 72static void handle_command (char *, int);
c906108c 73
2ea28649 74static void sig_print_info (enum gdb_signal);
c906108c 75
96baa820 76static void sig_print_header (void);
c906108c 77
74b7792f 78static void resume_cleanups (void *);
c906108c 79
96baa820 80static int hook_stop_stub (void *);
c906108c 81
96baa820
JM
82static int restore_selected_frame (void *);
83
4ef3f3be 84static int follow_fork (void);
96baa820 85
d83ad864
DB
86static int follow_fork_inferior (int follow_child, int detach_fork);
87
88static void follow_inferior_reset_breakpoints (void);
89
96baa820 90static void set_schedlock_func (char *args, int from_tty,
488f131b 91 struct cmd_list_element *c);
96baa820 92
a289b8f6
JK
93static int currently_stepping (struct thread_info *tp);
94
96baa820 95void _initialize_infrun (void);
43ff13b4 96
e58b0e63
PA
97void nullify_last_target_wait_ptid (void);
98
2c03e5be 99static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
2484c66b
UW
100
101static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
102
2484c66b
UW
103static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
104
8550d3b3
YQ
105static int maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
106
372316f1
PA
107/* Asynchronous signal handler registered as event loop source for
108 when we have pending events ready to be passed to the core. */
109static struct async_event_handler *infrun_async_inferior_event_token;
110
111/* Stores whether infrun_async was previously enabled or disabled.
112 Starts off as -1, indicating "never enabled/disabled". */
113static int infrun_is_async = -1;
114
115/* See infrun.h. */
116
117void
118infrun_async (int enable)
119{
120 if (infrun_is_async != enable)
121 {
122 infrun_is_async = enable;
123
124 if (debug_infrun)
125 fprintf_unfiltered (gdb_stdlog,
126 "infrun: infrun_async(%d)\n",
127 enable);
128
129 if (enable)
130 mark_async_event_handler (infrun_async_inferior_event_token);
131 else
132 clear_async_event_handler (infrun_async_inferior_event_token);
133 }
134}
135
0b333c5e
PA
136/* See infrun.h. */
137
138void
139mark_infrun_async_event_handler (void)
140{
141 mark_async_event_handler (infrun_async_inferior_event_token);
142}
143
5fbbeb29
CF
144/* When set, stop the 'step' command if we enter a function which has
145 no line number information. The normal behavior is that we step
146 over such function. */
147int step_stop_if_no_debug = 0;
920d2a44
AC
148static void
149show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
150 struct cmd_list_element *c, const char *value)
151{
152 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
153}
5fbbeb29 154
b9f437de
PA
155/* proceed and normal_stop use this to notify the user when the
156 inferior stopped in a different thread than it had been running
157 in. */
96baa820 158
39f77062 159static ptid_t previous_inferior_ptid;
7a292a7a 160
07107ca6
LM
161/* If set (default for legacy reasons), when following a fork, GDB
162 will detach from one of the fork branches, child or parent.
163 Exactly which branch is detached depends on 'set follow-fork-mode'
164 setting. */
165
166static int detach_fork = 1;
6c95b8df 167
237fc4c9
PA
168int debug_displaced = 0;
169static void
170show_debug_displaced (struct ui_file *file, int from_tty,
171 struct cmd_list_element *c, const char *value)
172{
173 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
174}
175
ccce17b0 176unsigned int debug_infrun = 0;
920d2a44
AC
177static void
178show_debug_infrun (struct ui_file *file, int from_tty,
179 struct cmd_list_element *c, const char *value)
180{
181 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
182}
527159b7 183
03583c20
UW
184
185/* Support for disabling address space randomization. */
186
187int disable_randomization = 1;
188
189static void
190show_disable_randomization (struct ui_file *file, int from_tty,
191 struct cmd_list_element *c, const char *value)
192{
193 if (target_supports_disable_randomization ())
194 fprintf_filtered (file,
195 _("Disabling randomization of debuggee's "
196 "virtual address space is %s.\n"),
197 value);
198 else
199 fputs_filtered (_("Disabling randomization of debuggee's "
200 "virtual address space is unsupported on\n"
201 "this platform.\n"), file);
202}
203
204static void
205set_disable_randomization (char *args, int from_tty,
206 struct cmd_list_element *c)
207{
208 if (!target_supports_disable_randomization ())
209 error (_("Disabling randomization of debuggee's "
210 "virtual address space is unsupported on\n"
211 "this platform."));
212}
213
d32dc48e
PA
214/* User interface for non-stop mode. */
215
216int non_stop = 0;
217static int non_stop_1 = 0;
218
219static void
220set_non_stop (char *args, int from_tty,
221 struct cmd_list_element *c)
222{
223 if (target_has_execution)
224 {
225 non_stop_1 = non_stop;
226 error (_("Cannot change this setting while the inferior is running."));
227 }
228
229 non_stop = non_stop_1;
230}
231
232static void
233show_non_stop (struct ui_file *file, int from_tty,
234 struct cmd_list_element *c, const char *value)
235{
236 fprintf_filtered (file,
237 _("Controlling the inferior in non-stop mode is %s.\n"),
238 value);
239}
240
d914c394
SS
241/* "Observer mode" is somewhat like a more extreme version of
242 non-stop, in which all GDB operations that might affect the
243 target's execution have been disabled. */
244
d914c394
SS
245int observer_mode = 0;
246static int observer_mode_1 = 0;
247
248static void
249set_observer_mode (char *args, int from_tty,
250 struct cmd_list_element *c)
251{
d914c394
SS
252 if (target_has_execution)
253 {
254 observer_mode_1 = observer_mode;
255 error (_("Cannot change this setting while the inferior is running."));
256 }
257
258 observer_mode = observer_mode_1;
259
260 may_write_registers = !observer_mode;
261 may_write_memory = !observer_mode;
262 may_insert_breakpoints = !observer_mode;
263 may_insert_tracepoints = !observer_mode;
264 /* We can insert fast tracepoints in or out of observer mode,
265 but enable them if we're going into this mode. */
266 if (observer_mode)
267 may_insert_fast_tracepoints = 1;
268 may_stop = !observer_mode;
269 update_target_permissions ();
270
271 /* Going *into* observer mode we must force non-stop, then
272 going out we leave it that way. */
273 if (observer_mode)
274 {
d914c394
SS
275 pagination_enabled = 0;
276 non_stop = non_stop_1 = 1;
277 }
278
279 if (from_tty)
280 printf_filtered (_("Observer mode is now %s.\n"),
281 (observer_mode ? "on" : "off"));
282}
283
284static void
285show_observer_mode (struct ui_file *file, int from_tty,
286 struct cmd_list_element *c, const char *value)
287{
288 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
289}
290
291/* This updates the value of observer mode based on changes in
292 permissions. Note that we are deliberately ignoring the values of
293 may-write-registers and may-write-memory, since the user may have
294 reason to enable these during a session, for instance to turn on a
295 debugging-related global. */
296
297void
298update_observer_mode (void)
299{
300 int newval;
301
302 newval = (!may_insert_breakpoints
303 && !may_insert_tracepoints
304 && may_insert_fast_tracepoints
305 && !may_stop
306 && non_stop);
307
308 /* Let the user know if things change. */
309 if (newval != observer_mode)
310 printf_filtered (_("Observer mode is now %s.\n"),
311 (newval ? "on" : "off"));
312
313 observer_mode = observer_mode_1 = newval;
314}
c2c6d25f 315
c906108c
SS
316/* Tables of how to react to signals; the user sets them. */
317
318static unsigned char *signal_stop;
319static unsigned char *signal_print;
320static unsigned char *signal_program;
321
ab04a2af
TT
322/* Table of signals that are registered with "catch signal". A
323 non-zero entry indicates that the signal is caught by some "catch
324 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
325 signals. */
326static unsigned char *signal_catch;
327
2455069d
UW
328/* Table of signals that the target may silently handle.
329 This is automatically determined from the flags above,
330 and simply cached here. */
331static unsigned char *signal_pass;
332
c906108c
SS
333#define SET_SIGS(nsigs,sigs,flags) \
334 do { \
335 int signum = (nsigs); \
336 while (signum-- > 0) \
337 if ((sigs)[signum]) \
338 (flags)[signum] = 1; \
339 } while (0)
340
341#define UNSET_SIGS(nsigs,sigs,flags) \
342 do { \
343 int signum = (nsigs); \
344 while (signum-- > 0) \
345 if ((sigs)[signum]) \
346 (flags)[signum] = 0; \
347 } while (0)
348
9b224c5e
PA
349/* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
350 this function is to avoid exporting `signal_program'. */
351
352void
353update_signals_program_target (void)
354{
a493e3e2 355 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
9b224c5e
PA
356}
357
1777feb0 358/* Value to pass to target_resume() to cause all threads to resume. */
39f77062 359
edb3359d 360#define RESUME_ALL minus_one_ptid
c906108c
SS
361
362/* Command list pointer for the "stop" placeholder. */
363
364static struct cmd_list_element *stop_command;
365
c906108c
SS
366/* Nonzero if we want to give control to the user when we're notified
367 of shared library events by the dynamic linker. */
628fe4e4 368int stop_on_solib_events;
f9e14852
GB
369
370/* Enable or disable optional shared library event breakpoints
371 as appropriate when the above flag is changed. */
372
373static void
374set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
375{
376 update_solib_breakpoints ();
377}
378
920d2a44
AC
379static void
380show_stop_on_solib_events (struct ui_file *file, int from_tty,
381 struct cmd_list_element *c, const char *value)
382{
383 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
384 value);
385}
c906108c 386
c906108c
SS
387/* Nonzero after stop if current stack frame should be printed. */
388
389static int stop_print_frame;
390
e02bc4cc 391/* This is a cached copy of the pid/waitstatus of the last event
9a4105ab
AC
392 returned by target_wait()/deprecated_target_wait_hook(). This
393 information is returned by get_last_target_status(). */
39f77062 394static ptid_t target_last_wait_ptid;
e02bc4cc
DS
395static struct target_waitstatus target_last_waitstatus;
396
0d1e5fa7
PA
397static void context_switch (ptid_t ptid);
398
4e1c45ea 399void init_thread_stepping_state (struct thread_info *tss);
0d1e5fa7 400
53904c9e
AC
401static const char follow_fork_mode_child[] = "child";
402static const char follow_fork_mode_parent[] = "parent";
403
40478521 404static const char *const follow_fork_mode_kind_names[] = {
53904c9e
AC
405 follow_fork_mode_child,
406 follow_fork_mode_parent,
407 NULL
ef346e04 408};
c906108c 409
53904c9e 410static const char *follow_fork_mode_string = follow_fork_mode_parent;
920d2a44
AC
411static void
412show_follow_fork_mode_string (struct ui_file *file, int from_tty,
413 struct cmd_list_element *c, const char *value)
414{
3e43a32a
MS
415 fprintf_filtered (file,
416 _("Debugger response to a program "
417 "call of fork or vfork is \"%s\".\n"),
920d2a44
AC
418 value);
419}
c906108c
SS
420\f
421
d83ad864
DB
422/* Handle changes to the inferior list based on the type of fork,
423 which process is being followed, and whether the other process
424 should be detached. On entry inferior_ptid must be the ptid of
425 the fork parent. At return inferior_ptid is the ptid of the
426 followed inferior. */
427
428static int
429follow_fork_inferior (int follow_child, int detach_fork)
430{
431 int has_vforked;
79639e11 432 ptid_t parent_ptid, child_ptid;
d83ad864
DB
433
434 has_vforked = (inferior_thread ()->pending_follow.kind
435 == TARGET_WAITKIND_VFORKED);
79639e11
PA
436 parent_ptid = inferior_ptid;
437 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
d83ad864
DB
438
439 if (has_vforked
440 && !non_stop /* Non-stop always resumes both branches. */
3b12939d 441 && current_ui->prompt_state == PROMPT_BLOCKED
d83ad864
DB
442 && !(follow_child || detach_fork || sched_multi))
443 {
444 /* The parent stays blocked inside the vfork syscall until the
445 child execs or exits. If we don't let the child run, then
446 the parent stays blocked. If we're telling the parent to run
447 in the foreground, the user will not be able to ctrl-c to get
448 back the terminal, effectively hanging the debug session. */
449 fprintf_filtered (gdb_stderr, _("\
450Can not resume the parent process over vfork in the foreground while\n\
451holding the child stopped. Try \"set detach-on-fork\" or \
452\"set schedule-multiple\".\n"));
453 /* FIXME output string > 80 columns. */
454 return 1;
455 }
456
457 if (!follow_child)
458 {
459 /* Detach new forked process? */
460 if (detach_fork)
461 {
d83ad864
DB
462 /* Before detaching from the child, remove all breakpoints
463 from it. If we forked, then this has already been taken
464 care of by infrun.c. If we vforked however, any
465 breakpoint inserted in the parent is visible in the
466 child, even those added while stopped in a vfork
467 catchpoint. This will remove the breakpoints from the
468 parent also, but they'll be reinserted below. */
469 if (has_vforked)
470 {
471 /* Keep breakpoints list in sync. */
472 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
473 }
474
475 if (info_verbose || debug_infrun)
476 {
8dd06f7a
DB
477 /* Ensure that we have a process ptid. */
478 ptid_t process_ptid = pid_to_ptid (ptid_get_pid (child_ptid));
479
6f259a23 480 target_terminal_ours_for_output ();
d83ad864 481 fprintf_filtered (gdb_stdlog,
79639e11 482 _("Detaching after %s from child %s.\n"),
6f259a23 483 has_vforked ? "vfork" : "fork",
8dd06f7a 484 target_pid_to_str (process_ptid));
d83ad864
DB
485 }
486 }
487 else
488 {
489 struct inferior *parent_inf, *child_inf;
490 struct cleanup *old_chain;
491
492 /* Add process to GDB's tables. */
79639e11 493 child_inf = add_inferior (ptid_get_pid (child_ptid));
d83ad864
DB
494
495 parent_inf = current_inferior ();
496 child_inf->attach_flag = parent_inf->attach_flag;
497 copy_terminal_info (child_inf, parent_inf);
498 child_inf->gdbarch = parent_inf->gdbarch;
499 copy_inferior_target_desc_info (child_inf, parent_inf);
500
2a00d7ce 501 old_chain = save_current_space_and_thread ();
d83ad864 502
79639e11 503 inferior_ptid = child_ptid;
d83ad864 504 add_thread (inferior_ptid);
2a00d7ce 505 set_current_inferior (child_inf);
d83ad864
DB
506 child_inf->symfile_flags = SYMFILE_NO_READ;
507
508 /* If this is a vfork child, then the address-space is
509 shared with the parent. */
510 if (has_vforked)
511 {
512 child_inf->pspace = parent_inf->pspace;
513 child_inf->aspace = parent_inf->aspace;
514
515 /* The parent will be frozen until the child is done
516 with the shared region. Keep track of the
517 parent. */
518 child_inf->vfork_parent = parent_inf;
519 child_inf->pending_detach = 0;
520 parent_inf->vfork_child = child_inf;
521 parent_inf->pending_detach = 0;
522 }
523 else
524 {
525 child_inf->aspace = new_address_space ();
526 child_inf->pspace = add_program_space (child_inf->aspace);
527 child_inf->removable = 1;
528 set_current_program_space (child_inf->pspace);
529 clone_program_space (child_inf->pspace, parent_inf->pspace);
530
531 /* Let the shared library layer (e.g., solib-svr4) learn
532 about this new process, relocate the cloned exec, pull
533 in shared libraries, and install the solib event
534 breakpoint. If a "cloned-VM" event was propagated
535 better throughout the core, this wouldn't be
536 required. */
537 solib_create_inferior_hook (0);
538 }
539
540 do_cleanups (old_chain);
541 }
542
543 if (has_vforked)
544 {
545 struct inferior *parent_inf;
546
547 parent_inf = current_inferior ();
548
549 /* If we detached from the child, then we have to be careful
550 to not insert breakpoints in the parent until the child
551 is done with the shared memory region. However, if we're
552 staying attached to the child, then we can and should
553 insert breakpoints, so that we can debug it. A
554 subsequent child exec or exit is enough to know when does
555 the child stops using the parent's address space. */
556 parent_inf->waiting_for_vfork_done = detach_fork;
557 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
558 }
559 }
560 else
561 {
562 /* Follow the child. */
563 struct inferior *parent_inf, *child_inf;
564 struct program_space *parent_pspace;
565
566 if (info_verbose || debug_infrun)
567 {
6f259a23
DB
568 target_terminal_ours_for_output ();
569 fprintf_filtered (gdb_stdlog,
79639e11
PA
570 _("Attaching after %s %s to child %s.\n"),
571 target_pid_to_str (parent_ptid),
6f259a23 572 has_vforked ? "vfork" : "fork",
79639e11 573 target_pid_to_str (child_ptid));
d83ad864
DB
574 }
575
576 /* Add the new inferior first, so that the target_detach below
577 doesn't unpush the target. */
578
79639e11 579 child_inf = add_inferior (ptid_get_pid (child_ptid));
d83ad864
DB
580
581 parent_inf = current_inferior ();
582 child_inf->attach_flag = parent_inf->attach_flag;
583 copy_terminal_info (child_inf, parent_inf);
584 child_inf->gdbarch = parent_inf->gdbarch;
585 copy_inferior_target_desc_info (child_inf, parent_inf);
586
587 parent_pspace = parent_inf->pspace;
588
589 /* If we're vforking, we want to hold on to the parent until the
590 child exits or execs. At child exec or exit time we can
591 remove the old breakpoints from the parent and detach or
592 resume debugging it. Otherwise, detach the parent now; we'll
593 want to reuse it's program/address spaces, but we can't set
594 them to the child before removing breakpoints from the
595 parent, otherwise, the breakpoints module could decide to
596 remove breakpoints from the wrong process (since they'd be
597 assigned to the same address space). */
598
599 if (has_vforked)
600 {
601 gdb_assert (child_inf->vfork_parent == NULL);
602 gdb_assert (parent_inf->vfork_child == NULL);
603 child_inf->vfork_parent = parent_inf;
604 child_inf->pending_detach = 0;
605 parent_inf->vfork_child = child_inf;
606 parent_inf->pending_detach = detach_fork;
607 parent_inf->waiting_for_vfork_done = 0;
608 }
609 else if (detach_fork)
6f259a23
DB
610 {
611 if (info_verbose || debug_infrun)
612 {
8dd06f7a
DB
613 /* Ensure that we have a process ptid. */
614 ptid_t process_ptid = pid_to_ptid (ptid_get_pid (child_ptid));
615
6f259a23
DB
616 target_terminal_ours_for_output ();
617 fprintf_filtered (gdb_stdlog,
618 _("Detaching after fork from "
79639e11 619 "child %s.\n"),
8dd06f7a 620 target_pid_to_str (process_ptid));
6f259a23
DB
621 }
622
623 target_detach (NULL, 0);
624 }
d83ad864
DB
625
626 /* Note that the detach above makes PARENT_INF dangling. */
627
628 /* Add the child thread to the appropriate lists, and switch to
629 this new thread, before cloning the program space, and
630 informing the solib layer about this new process. */
631
79639e11 632 inferior_ptid = child_ptid;
d83ad864 633 add_thread (inferior_ptid);
2a00d7ce 634 set_current_inferior (child_inf);
d83ad864
DB
635
636 /* If this is a vfork child, then the address-space is shared
637 with the parent. If we detached from the parent, then we can
638 reuse the parent's program/address spaces. */
639 if (has_vforked || detach_fork)
640 {
641 child_inf->pspace = parent_pspace;
642 child_inf->aspace = child_inf->pspace->aspace;
643 }
644 else
645 {
646 child_inf->aspace = new_address_space ();
647 child_inf->pspace = add_program_space (child_inf->aspace);
648 child_inf->removable = 1;
649 child_inf->symfile_flags = SYMFILE_NO_READ;
650 set_current_program_space (child_inf->pspace);
651 clone_program_space (child_inf->pspace, parent_pspace);
652
653 /* Let the shared library layer (e.g., solib-svr4) learn
654 about this new process, relocate the cloned exec, pull in
655 shared libraries, and install the solib event breakpoint.
656 If a "cloned-VM" event was propagated better throughout
657 the core, this wouldn't be required. */
658 solib_create_inferior_hook (0);
659 }
660 }
661
662 return target_follow_fork (follow_child, detach_fork);
663}
664
e58b0e63
PA
665/* Tell the target to follow the fork we're stopped at. Returns true
666 if the inferior should be resumed; false, if the target for some
667 reason decided it's best not to resume. */
668
6604731b 669static int
4ef3f3be 670follow_fork (void)
c906108c 671{
ea1dd7bc 672 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63
PA
673 int should_resume = 1;
674 struct thread_info *tp;
675
676 /* Copy user stepping state to the new inferior thread. FIXME: the
677 followed fork child thread should have a copy of most of the
4e3990f4
DE
678 parent thread structure's run control related fields, not just these.
679 Initialized to avoid "may be used uninitialized" warnings from gcc. */
680 struct breakpoint *step_resume_breakpoint = NULL;
186c406b 681 struct breakpoint *exception_resume_breakpoint = NULL;
4e3990f4
DE
682 CORE_ADDR step_range_start = 0;
683 CORE_ADDR step_range_end = 0;
684 struct frame_id step_frame_id = { 0 };
8980e177 685 struct thread_fsm *thread_fsm = NULL;
e58b0e63
PA
686
687 if (!non_stop)
688 {
689 ptid_t wait_ptid;
690 struct target_waitstatus wait_status;
691
692 /* Get the last target status returned by target_wait(). */
693 get_last_target_status (&wait_ptid, &wait_status);
694
695 /* If not stopped at a fork event, then there's nothing else to
696 do. */
697 if (wait_status.kind != TARGET_WAITKIND_FORKED
698 && wait_status.kind != TARGET_WAITKIND_VFORKED)
699 return 1;
700
701 /* Check if we switched over from WAIT_PTID, since the event was
702 reported. */
703 if (!ptid_equal (wait_ptid, minus_one_ptid)
704 && !ptid_equal (inferior_ptid, wait_ptid))
705 {
706 /* We did. Switch back to WAIT_PTID thread, to tell the
707 target to follow it (in either direction). We'll
708 afterwards refuse to resume, and inform the user what
709 happened. */
710 switch_to_thread (wait_ptid);
711 should_resume = 0;
712 }
713 }
714
715 tp = inferior_thread ();
716
717 /* If there were any forks/vforks that were caught and are now to be
718 followed, then do so now. */
719 switch (tp->pending_follow.kind)
720 {
721 case TARGET_WAITKIND_FORKED:
722 case TARGET_WAITKIND_VFORKED:
723 {
724 ptid_t parent, child;
725
726 /* If the user did a next/step, etc, over a fork call,
727 preserve the stepping state in the fork child. */
728 if (follow_child && should_resume)
729 {
8358c15c
JK
730 step_resume_breakpoint = clone_momentary_breakpoint
731 (tp->control.step_resume_breakpoint);
16c381f0
JK
732 step_range_start = tp->control.step_range_start;
733 step_range_end = tp->control.step_range_end;
734 step_frame_id = tp->control.step_frame_id;
186c406b
TT
735 exception_resume_breakpoint
736 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
8980e177 737 thread_fsm = tp->thread_fsm;
e58b0e63
PA
738
739 /* For now, delete the parent's sr breakpoint, otherwise,
740 parent/child sr breakpoints are considered duplicates,
741 and the child version will not be installed. Remove
742 this when the breakpoints module becomes aware of
743 inferiors and address spaces. */
744 delete_step_resume_breakpoint (tp);
16c381f0
JK
745 tp->control.step_range_start = 0;
746 tp->control.step_range_end = 0;
747 tp->control.step_frame_id = null_frame_id;
186c406b 748 delete_exception_resume_breakpoint (tp);
8980e177 749 tp->thread_fsm = NULL;
e58b0e63
PA
750 }
751
752 parent = inferior_ptid;
753 child = tp->pending_follow.value.related_pid;
754
d83ad864
DB
755 /* Set up inferior(s) as specified by the caller, and tell the
756 target to do whatever is necessary to follow either parent
757 or child. */
758 if (follow_fork_inferior (follow_child, detach_fork))
e58b0e63
PA
759 {
760 /* Target refused to follow, or there's some other reason
761 we shouldn't resume. */
762 should_resume = 0;
763 }
764 else
765 {
766 /* This pending follow fork event is now handled, one way
767 or another. The previous selected thread may be gone
768 from the lists by now, but if it is still around, need
769 to clear the pending follow request. */
e09875d4 770 tp = find_thread_ptid (parent);
e58b0e63
PA
771 if (tp)
772 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
773
774 /* This makes sure we don't try to apply the "Switched
775 over from WAIT_PID" logic above. */
776 nullify_last_target_wait_ptid ();
777
1777feb0 778 /* If we followed the child, switch to it... */
e58b0e63
PA
779 if (follow_child)
780 {
781 switch_to_thread (child);
782
783 /* ... and preserve the stepping state, in case the
784 user was stepping over the fork call. */
785 if (should_resume)
786 {
787 tp = inferior_thread ();
8358c15c
JK
788 tp->control.step_resume_breakpoint
789 = step_resume_breakpoint;
16c381f0
JK
790 tp->control.step_range_start = step_range_start;
791 tp->control.step_range_end = step_range_end;
792 tp->control.step_frame_id = step_frame_id;
186c406b
TT
793 tp->control.exception_resume_breakpoint
794 = exception_resume_breakpoint;
8980e177 795 tp->thread_fsm = thread_fsm;
e58b0e63
PA
796 }
797 else
798 {
799 /* If we get here, it was because we're trying to
800 resume from a fork catchpoint, but, the user
801 has switched threads away from the thread that
802 forked. In that case, the resume command
803 issued is most likely not applicable to the
804 child, so just warn, and refuse to resume. */
3e43a32a 805 warning (_("Not resuming: switched threads "
fd7dcb94 806 "before following fork child."));
e58b0e63
PA
807 }
808
809 /* Reset breakpoints in the child as appropriate. */
810 follow_inferior_reset_breakpoints ();
811 }
812 else
813 switch_to_thread (parent);
814 }
815 }
816 break;
817 case TARGET_WAITKIND_SPURIOUS:
818 /* Nothing to follow. */
819 break;
820 default:
821 internal_error (__FILE__, __LINE__,
822 "Unexpected pending_follow.kind %d\n",
823 tp->pending_follow.kind);
824 break;
825 }
c906108c 826
e58b0e63 827 return should_resume;
c906108c
SS
828}
829
d83ad864 830static void
6604731b 831follow_inferior_reset_breakpoints (void)
c906108c 832{
4e1c45ea
PA
833 struct thread_info *tp = inferior_thread ();
834
6604731b
DJ
835 /* Was there a step_resume breakpoint? (There was if the user
836 did a "next" at the fork() call.) If so, explicitly reset its
a1aa2221
LM
837 thread number. Cloned step_resume breakpoints are disabled on
838 creation, so enable it here now that it is associated with the
839 correct thread.
6604731b
DJ
840
841 step_resumes are a form of bp that are made to be per-thread.
842 Since we created the step_resume bp when the parent process
843 was being debugged, and now are switching to the child process,
844 from the breakpoint package's viewpoint, that's a switch of
845 "threads". We must update the bp's notion of which thread
846 it is for, or it'll be ignored when it triggers. */
847
8358c15c 848 if (tp->control.step_resume_breakpoint)
a1aa2221
LM
849 {
850 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
851 tp->control.step_resume_breakpoint->loc->enabled = 1;
852 }
6604731b 853
a1aa2221 854 /* Treat exception_resume breakpoints like step_resume breakpoints. */
186c406b 855 if (tp->control.exception_resume_breakpoint)
a1aa2221
LM
856 {
857 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
858 tp->control.exception_resume_breakpoint->loc->enabled = 1;
859 }
186c406b 860
6604731b
DJ
861 /* Reinsert all breakpoints in the child. The user may have set
862 breakpoints after catching the fork, in which case those
863 were never set in the child, but only in the parent. This makes
864 sure the inserted breakpoints match the breakpoint list. */
865
866 breakpoint_re_set ();
867 insert_breakpoints ();
c906108c 868}
c906108c 869
6c95b8df
PA
870/* The child has exited or execed: resume threads of the parent the
871 user wanted to be executing. */
872
873static int
874proceed_after_vfork_done (struct thread_info *thread,
875 void *arg)
876{
877 int pid = * (int *) arg;
878
879 if (ptid_get_pid (thread->ptid) == pid
880 && is_running (thread->ptid)
881 && !is_executing (thread->ptid)
882 && !thread->stop_requested
a493e3e2 883 && thread->suspend.stop_signal == GDB_SIGNAL_0)
6c95b8df
PA
884 {
885 if (debug_infrun)
886 fprintf_unfiltered (gdb_stdlog,
887 "infrun: resuming vfork parent thread %s\n",
888 target_pid_to_str (thread->ptid));
889
890 switch_to_thread (thread->ptid);
70509625 891 clear_proceed_status (0);
64ce06e4 892 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
6c95b8df
PA
893 }
894
895 return 0;
896}
897
898/* Called whenever we notice an exec or exit event, to handle
899 detaching or resuming a vfork parent. */
900
901static void
902handle_vfork_child_exec_or_exit (int exec)
903{
904 struct inferior *inf = current_inferior ();
905
906 if (inf->vfork_parent)
907 {
908 int resume_parent = -1;
909
910 /* This exec or exit marks the end of the shared memory region
911 between the parent and the child. If the user wanted to
912 detach from the parent, now is the time. */
913
914 if (inf->vfork_parent->pending_detach)
915 {
916 struct thread_info *tp;
917 struct cleanup *old_chain;
918 struct program_space *pspace;
919 struct address_space *aspace;
920
1777feb0 921 /* follow-fork child, detach-on-fork on. */
6c95b8df 922
68c9da30
PA
923 inf->vfork_parent->pending_detach = 0;
924
f50f4e56
PA
925 if (!exec)
926 {
927 /* If we're handling a child exit, then inferior_ptid
928 points at the inferior's pid, not to a thread. */
929 old_chain = save_inferior_ptid ();
930 save_current_program_space ();
931 save_current_inferior ();
932 }
933 else
934 old_chain = save_current_space_and_thread ();
6c95b8df
PA
935
936 /* We're letting loose of the parent. */
937 tp = any_live_thread_of_process (inf->vfork_parent->pid);
938 switch_to_thread (tp->ptid);
939
940 /* We're about to detach from the parent, which implicitly
941 removes breakpoints from its address space. There's a
942 catch here: we want to reuse the spaces for the child,
943 but, parent/child are still sharing the pspace at this
944 point, although the exec in reality makes the kernel give
945 the child a fresh set of new pages. The problem here is
946 that the breakpoints module being unaware of this, would
947 likely chose the child process to write to the parent
948 address space. Swapping the child temporarily away from
949 the spaces has the desired effect. Yes, this is "sort
950 of" a hack. */
951
952 pspace = inf->pspace;
953 aspace = inf->aspace;
954 inf->aspace = NULL;
955 inf->pspace = NULL;
956
957 if (debug_infrun || info_verbose)
958 {
6f259a23 959 target_terminal_ours_for_output ();
6c95b8df
PA
960
961 if (exec)
6f259a23
DB
962 {
963 fprintf_filtered (gdb_stdlog,
964 _("Detaching vfork parent process "
965 "%d after child exec.\n"),
966 inf->vfork_parent->pid);
967 }
6c95b8df 968 else
6f259a23
DB
969 {
970 fprintf_filtered (gdb_stdlog,
971 _("Detaching vfork parent process "
972 "%d after child exit.\n"),
973 inf->vfork_parent->pid);
974 }
6c95b8df
PA
975 }
976
977 target_detach (NULL, 0);
978
979 /* Put it back. */
980 inf->pspace = pspace;
981 inf->aspace = aspace;
982
983 do_cleanups (old_chain);
984 }
985 else if (exec)
986 {
987 /* We're staying attached to the parent, so, really give the
988 child a new address space. */
989 inf->pspace = add_program_space (maybe_new_address_space ());
990 inf->aspace = inf->pspace->aspace;
991 inf->removable = 1;
992 set_current_program_space (inf->pspace);
993
994 resume_parent = inf->vfork_parent->pid;
995
996 /* Break the bonds. */
997 inf->vfork_parent->vfork_child = NULL;
998 }
999 else
1000 {
1001 struct cleanup *old_chain;
1002 struct program_space *pspace;
1003
1004 /* If this is a vfork child exiting, then the pspace and
1005 aspaces were shared with the parent. Since we're
1006 reporting the process exit, we'll be mourning all that is
1007 found in the address space, and switching to null_ptid,
1008 preparing to start a new inferior. But, since we don't
1009 want to clobber the parent's address/program spaces, we
1010 go ahead and create a new one for this exiting
1011 inferior. */
1012
1013 /* Switch to null_ptid, so that clone_program_space doesn't want
1014 to read the selected frame of a dead process. */
1015 old_chain = save_inferior_ptid ();
1016 inferior_ptid = null_ptid;
1017
1018 /* This inferior is dead, so avoid giving the breakpoints
1019 module the option to write through to it (cloning a
1020 program space resets breakpoints). */
1021 inf->aspace = NULL;
1022 inf->pspace = NULL;
1023 pspace = add_program_space (maybe_new_address_space ());
1024 set_current_program_space (pspace);
1025 inf->removable = 1;
7dcd53a0 1026 inf->symfile_flags = SYMFILE_NO_READ;
6c95b8df
PA
1027 clone_program_space (pspace, inf->vfork_parent->pspace);
1028 inf->pspace = pspace;
1029 inf->aspace = pspace->aspace;
1030
1031 /* Put back inferior_ptid. We'll continue mourning this
1777feb0 1032 inferior. */
6c95b8df
PA
1033 do_cleanups (old_chain);
1034
1035 resume_parent = inf->vfork_parent->pid;
1036 /* Break the bonds. */
1037 inf->vfork_parent->vfork_child = NULL;
1038 }
1039
1040 inf->vfork_parent = NULL;
1041
1042 gdb_assert (current_program_space == inf->pspace);
1043
1044 if (non_stop && resume_parent != -1)
1045 {
1046 /* If the user wanted the parent to be running, let it go
1047 free now. */
1048 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
1049
1050 if (debug_infrun)
3e43a32a
MS
1051 fprintf_unfiltered (gdb_stdlog,
1052 "infrun: resuming vfork parent process %d\n",
6c95b8df
PA
1053 resume_parent);
1054
1055 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
1056
1057 do_cleanups (old_chain);
1058 }
1059 }
1060}
1061
eb6c553b 1062/* Enum strings for "set|show follow-exec-mode". */
6c95b8df
PA
1063
1064static const char follow_exec_mode_new[] = "new";
1065static const char follow_exec_mode_same[] = "same";
40478521 1066static const char *const follow_exec_mode_names[] =
6c95b8df
PA
1067{
1068 follow_exec_mode_new,
1069 follow_exec_mode_same,
1070 NULL,
1071};
1072
1073static const char *follow_exec_mode_string = follow_exec_mode_same;
1074static void
1075show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1076 struct cmd_list_element *c, const char *value)
1077{
1078 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1079}
1080
ecf45d2c 1081/* EXEC_FILE_TARGET is assumed to be non-NULL. */
1adeb98a 1082
c906108c 1083static void
ecf45d2c 1084follow_exec (ptid_t ptid, char *exec_file_target)
c906108c 1085{
95e50b27 1086 struct thread_info *th, *tmp;
6c95b8df 1087 struct inferior *inf = current_inferior ();
95e50b27 1088 int pid = ptid_get_pid (ptid);
94585166 1089 ptid_t process_ptid;
ecf45d2c
SL
1090 char *exec_file_host;
1091 struct cleanup *old_chain;
7a292a7a 1092
c906108c
SS
1093 /* This is an exec event that we actually wish to pay attention to.
1094 Refresh our symbol table to the newly exec'd program, remove any
1095 momentary bp's, etc.
1096
1097 If there are breakpoints, they aren't really inserted now,
1098 since the exec() transformed our inferior into a fresh set
1099 of instructions.
1100
1101 We want to preserve symbolic breakpoints on the list, since
1102 we have hopes that they can be reset after the new a.out's
1103 symbol table is read.
1104
1105 However, any "raw" breakpoints must be removed from the list
1106 (e.g., the solib bp's), since their address is probably invalid
1107 now.
1108
1109 And, we DON'T want to call delete_breakpoints() here, since
1110 that may write the bp's "shadow contents" (the instruction
1111 value that was overwritten witha TRAP instruction). Since
1777feb0 1112 we now have a new a.out, those shadow contents aren't valid. */
6c95b8df
PA
1113
1114 mark_breakpoints_out ();
1115
95e50b27
PA
1116 /* The target reports the exec event to the main thread, even if
1117 some other thread does the exec, and even if the main thread was
1118 stopped or already gone. We may still have non-leader threads of
1119 the process on our list. E.g., on targets that don't have thread
1120 exit events (like remote); or on native Linux in non-stop mode if
1121 there were only two threads in the inferior and the non-leader
1122 one is the one that execs (and nothing forces an update of the
1123 thread list up to here). When debugging remotely, it's best to
1124 avoid extra traffic, when possible, so avoid syncing the thread
1125 list with the target, and instead go ahead and delete all threads
1126 of the process but one that reported the event. Note this must
1127 be done before calling update_breakpoints_after_exec, as
1128 otherwise clearing the threads' resources would reference stale
1129 thread breakpoints -- it may have been one of these threads that
1130 stepped across the exec. We could just clear their stepping
1131 states, but as long as we're iterating, might as well delete
1132 them. Deleting them now rather than at the next user-visible
1133 stop provides a nicer sequence of events for user and MI
1134 notifications. */
8a06aea7 1135 ALL_THREADS_SAFE (th, tmp)
95e50b27
PA
1136 if (ptid_get_pid (th->ptid) == pid && !ptid_equal (th->ptid, ptid))
1137 delete_thread (th->ptid);
1138
1139 /* We also need to clear any left over stale state for the
1140 leader/event thread. E.g., if there was any step-resume
1141 breakpoint or similar, it's gone now. We cannot truly
1142 step-to-next statement through an exec(). */
1143 th = inferior_thread ();
8358c15c 1144 th->control.step_resume_breakpoint = NULL;
186c406b 1145 th->control.exception_resume_breakpoint = NULL;
34b7e8a6 1146 th->control.single_step_breakpoints = NULL;
16c381f0
JK
1147 th->control.step_range_start = 0;
1148 th->control.step_range_end = 0;
c906108c 1149
95e50b27
PA
1150 /* The user may have had the main thread held stopped in the
1151 previous image (e.g., schedlock on, or non-stop). Release
1152 it now. */
a75724bc
PA
1153 th->stop_requested = 0;
1154
95e50b27
PA
1155 update_breakpoints_after_exec ();
1156
1777feb0 1157 /* What is this a.out's name? */
94585166 1158 process_ptid = pid_to_ptid (pid);
6c95b8df 1159 printf_unfiltered (_("%s is executing new program: %s\n"),
94585166 1160 target_pid_to_str (process_ptid),
ecf45d2c 1161 exec_file_target);
c906108c
SS
1162
1163 /* We've followed the inferior through an exec. Therefore, the
1777feb0 1164 inferior has essentially been killed & reborn. */
7a292a7a 1165
c906108c 1166 gdb_flush (gdb_stdout);
6ca15a4b
PA
1167
1168 breakpoint_init_inferior (inf_execd);
e85a822c 1169
ecf45d2c
SL
1170 exec_file_host = exec_file_find (exec_file_target, NULL);
1171 old_chain = make_cleanup (xfree, exec_file_host);
ff862be4 1172
ecf45d2c
SL
1173 /* If we were unable to map the executable target pathname onto a host
1174 pathname, tell the user that. Otherwise GDB's subsequent behavior
1175 is confusing. Maybe it would even be better to stop at this point
1176 so that the user can specify a file manually before continuing. */
1177 if (exec_file_host == NULL)
1178 warning (_("Could not load symbols for executable %s.\n"
1179 "Do you need \"set sysroot\"?"),
1180 exec_file_target);
c906108c 1181
cce9b6bf
PA
1182 /* Reset the shared library package. This ensures that we get a
1183 shlib event when the child reaches "_start", at which point the
1184 dld will have had a chance to initialize the child. */
1185 /* Also, loading a symbol file below may trigger symbol lookups, and
1186 we don't want those to be satisfied by the libraries of the
1187 previous incarnation of this process. */
1188 no_shared_libraries (NULL, 0);
1189
6c95b8df
PA
1190 if (follow_exec_mode_string == follow_exec_mode_new)
1191 {
6c95b8df
PA
1192 /* The user wants to keep the old inferior and program spaces
1193 around. Create a new fresh one, and switch to it. */
1194
17d8546e
DB
1195 /* Do exit processing for the original inferior before adding
1196 the new inferior so we don't have two active inferiors with
1197 the same ptid, which can confuse find_inferior_ptid. */
1198 exit_inferior_num_silent (current_inferior ()->num);
1199
94585166
DB
1200 inf = add_inferior_with_spaces ();
1201 inf->pid = pid;
ecf45d2c 1202 target_follow_exec (inf, exec_file_target);
6c95b8df
PA
1203
1204 set_current_inferior (inf);
94585166
DB
1205 set_current_program_space (inf->pspace);
1206 add_thread (ptid);
6c95b8df 1207 }
9107fc8d
PA
1208 else
1209 {
1210 /* The old description may no longer be fit for the new image.
1211 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1212 old description; we'll read a new one below. No need to do
1213 this on "follow-exec-mode new", as the old inferior stays
1214 around (its description is later cleared/refetched on
1215 restart). */
1216 target_clear_description ();
1217 }
6c95b8df
PA
1218
1219 gdb_assert (current_program_space == inf->pspace);
1220
ecf45d2c
SL
1221 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1222 because the proper displacement for a PIE (Position Independent
1223 Executable) main symbol file will only be computed by
1224 solib_create_inferior_hook below. breakpoint_re_set would fail
1225 to insert the breakpoints with the zero displacement. */
1226 try_open_exec_file (exec_file_host, inf, SYMFILE_DEFER_BP_RESET);
c1e56572 1227
ecf45d2c 1228 do_cleanups (old_chain);
c906108c 1229
9107fc8d
PA
1230 /* If the target can specify a description, read it. Must do this
1231 after flipping to the new executable (because the target supplied
1232 description must be compatible with the executable's
1233 architecture, and the old executable may e.g., be 32-bit, while
1234 the new one 64-bit), and before anything involving memory or
1235 registers. */
1236 target_find_description ();
1237
268a4a75 1238 solib_create_inferior_hook (0);
c906108c 1239
4efc6507
DE
1240 jit_inferior_created_hook ();
1241
c1e56572
JK
1242 breakpoint_re_set ();
1243
c906108c
SS
1244 /* Reinsert all breakpoints. (Those which were symbolic have
1245 been reset to the proper address in the new a.out, thanks
1777feb0 1246 to symbol_file_command...). */
c906108c
SS
1247 insert_breakpoints ();
1248
1249 /* The next resume of this inferior should bring it to the shlib
1250 startup breakpoints. (If the user had also set bp's on
1251 "main" from the old (parent) process, then they'll auto-
1777feb0 1252 matically get reset there in the new process.). */
c906108c
SS
1253}
1254
c2829269
PA
1255/* The queue of threads that need to do a step-over operation to get
1256 past e.g., a breakpoint. What technique is used to step over the
1257 breakpoint/watchpoint does not matter -- all threads end up in the
1258 same queue, to maintain rough temporal order of execution, in order
1259 to avoid starvation, otherwise, we could e.g., find ourselves
1260 constantly stepping the same couple threads past their breakpoints
1261 over and over, if the single-step finish fast enough. */
1262struct thread_info *step_over_queue_head;
1263
6c4cfb24
PA
1264/* Bit flags indicating what the thread needs to step over. */
1265
8d297bbf 1266enum step_over_what_flag
6c4cfb24
PA
1267 {
1268 /* Step over a breakpoint. */
1269 STEP_OVER_BREAKPOINT = 1,
1270
1271 /* Step past a non-continuable watchpoint, in order to let the
1272 instruction execute so we can evaluate the watchpoint
1273 expression. */
1274 STEP_OVER_WATCHPOINT = 2
1275 };
8d297bbf 1276DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
6c4cfb24 1277
963f9c80 1278/* Info about an instruction that is being stepped over. */
31e77af2
PA
1279
1280struct step_over_info
1281{
963f9c80
PA
1282 /* If we're stepping past a breakpoint, this is the address space
1283 and address of the instruction the breakpoint is set at. We'll
1284 skip inserting all breakpoints here. Valid iff ASPACE is
1285 non-NULL. */
31e77af2 1286 struct address_space *aspace;
31e77af2 1287 CORE_ADDR address;
963f9c80
PA
1288
1289 /* The instruction being stepped over triggers a nonsteppable
1290 watchpoint. If true, we'll skip inserting watchpoints. */
1291 int nonsteppable_watchpoint_p;
21edc42f
YQ
1292
1293 /* The thread's global number. */
1294 int thread;
31e77af2
PA
1295};
1296
1297/* The step-over info of the location that is being stepped over.
1298
1299 Note that with async/breakpoint always-inserted mode, a user might
1300 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1301 being stepped over. As setting a new breakpoint inserts all
1302 breakpoints, we need to make sure the breakpoint being stepped over
1303 isn't inserted then. We do that by only clearing the step-over
1304 info when the step-over is actually finished (or aborted).
1305
1306 Presently GDB can only step over one breakpoint at any given time.
1307 Given threads that can't run code in the same address space as the
1308 breakpoint's can't really miss the breakpoint, GDB could be taught
1309 to step-over at most one breakpoint per address space (so this info
1310 could move to the address space object if/when GDB is extended).
1311 The set of breakpoints being stepped over will normally be much
1312 smaller than the set of all breakpoints, so a flag in the
1313 breakpoint location structure would be wasteful. A separate list
1314 also saves complexity and run-time, as otherwise we'd have to go
1315 through all breakpoint locations clearing their flag whenever we
1316 start a new sequence. Similar considerations weigh against storing
1317 this info in the thread object. Plus, not all step overs actually
1318 have breakpoint locations -- e.g., stepping past a single-step
1319 breakpoint, or stepping to complete a non-continuable
1320 watchpoint. */
1321static struct step_over_info step_over_info;
1322
1323/* Record the address of the breakpoint/instruction we're currently
ce0db137
DE
1324 stepping over.
1325 N.B. We record the aspace and address now, instead of say just the thread,
1326 because when we need the info later the thread may be running. */
31e77af2
PA
1327
1328static void
963f9c80 1329set_step_over_info (struct address_space *aspace, CORE_ADDR address,
21edc42f
YQ
1330 int nonsteppable_watchpoint_p,
1331 int thread)
31e77af2
PA
1332{
1333 step_over_info.aspace = aspace;
1334 step_over_info.address = address;
963f9c80 1335 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
21edc42f 1336 step_over_info.thread = thread;
31e77af2
PA
1337}
1338
1339/* Called when we're not longer stepping over a breakpoint / an
1340 instruction, so all breakpoints are free to be (re)inserted. */
1341
1342static void
1343clear_step_over_info (void)
1344{
372316f1
PA
1345 if (debug_infrun)
1346 fprintf_unfiltered (gdb_stdlog,
1347 "infrun: clear_step_over_info\n");
31e77af2
PA
1348 step_over_info.aspace = NULL;
1349 step_over_info.address = 0;
963f9c80 1350 step_over_info.nonsteppable_watchpoint_p = 0;
21edc42f 1351 step_over_info.thread = -1;
31e77af2
PA
1352}
1353
7f89fd65 1354/* See infrun.h. */
31e77af2
PA
1355
1356int
1357stepping_past_instruction_at (struct address_space *aspace,
1358 CORE_ADDR address)
1359{
1360 return (step_over_info.aspace != NULL
1361 && breakpoint_address_match (aspace, address,
1362 step_over_info.aspace,
1363 step_over_info.address));
1364}
1365
963f9c80
PA
1366/* See infrun.h. */
1367
21edc42f
YQ
1368int
1369thread_is_stepping_over_breakpoint (int thread)
1370{
1371 return (step_over_info.thread != -1
1372 && thread == step_over_info.thread);
1373}
1374
1375/* See infrun.h. */
1376
963f9c80
PA
1377int
1378stepping_past_nonsteppable_watchpoint (void)
1379{
1380 return step_over_info.nonsteppable_watchpoint_p;
1381}
1382
6cc83d2a
PA
1383/* Returns true if step-over info is valid. */
1384
1385static int
1386step_over_info_valid_p (void)
1387{
963f9c80
PA
1388 return (step_over_info.aspace != NULL
1389 || stepping_past_nonsteppable_watchpoint ());
6cc83d2a
PA
1390}
1391
c906108c 1392\f
237fc4c9
PA
1393/* Displaced stepping. */
1394
1395/* In non-stop debugging mode, we must take special care to manage
1396 breakpoints properly; in particular, the traditional strategy for
1397 stepping a thread past a breakpoint it has hit is unsuitable.
1398 'Displaced stepping' is a tactic for stepping one thread past a
1399 breakpoint it has hit while ensuring that other threads running
1400 concurrently will hit the breakpoint as they should.
1401
1402 The traditional way to step a thread T off a breakpoint in a
1403 multi-threaded program in all-stop mode is as follows:
1404
1405 a0) Initially, all threads are stopped, and breakpoints are not
1406 inserted.
1407 a1) We single-step T, leaving breakpoints uninserted.
1408 a2) We insert breakpoints, and resume all threads.
1409
1410 In non-stop debugging, however, this strategy is unsuitable: we
1411 don't want to have to stop all threads in the system in order to
1412 continue or step T past a breakpoint. Instead, we use displaced
1413 stepping:
1414
1415 n0) Initially, T is stopped, other threads are running, and
1416 breakpoints are inserted.
1417 n1) We copy the instruction "under" the breakpoint to a separate
1418 location, outside the main code stream, making any adjustments
1419 to the instruction, register, and memory state as directed by
1420 T's architecture.
1421 n2) We single-step T over the instruction at its new location.
1422 n3) We adjust the resulting register and memory state as directed
1423 by T's architecture. This includes resetting T's PC to point
1424 back into the main instruction stream.
1425 n4) We resume T.
1426
1427 This approach depends on the following gdbarch methods:
1428
1429 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1430 indicate where to copy the instruction, and how much space must
1431 be reserved there. We use these in step n1.
1432
1433 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1434 address, and makes any necessary adjustments to the instruction,
1435 register contents, and memory. We use this in step n1.
1436
1437 - gdbarch_displaced_step_fixup adjusts registers and memory after
1438 we have successfuly single-stepped the instruction, to yield the
1439 same effect the instruction would have had if we had executed it
1440 at its original address. We use this in step n3.
1441
1442 - gdbarch_displaced_step_free_closure provides cleanup.
1443
1444 The gdbarch_displaced_step_copy_insn and
1445 gdbarch_displaced_step_fixup functions must be written so that
1446 copying an instruction with gdbarch_displaced_step_copy_insn,
1447 single-stepping across the copied instruction, and then applying
1448 gdbarch_displaced_insn_fixup should have the same effects on the
1449 thread's memory and registers as stepping the instruction in place
1450 would have. Exactly which responsibilities fall to the copy and
1451 which fall to the fixup is up to the author of those functions.
1452
1453 See the comments in gdbarch.sh for details.
1454
1455 Note that displaced stepping and software single-step cannot
1456 currently be used in combination, although with some care I think
1457 they could be made to. Software single-step works by placing
1458 breakpoints on all possible subsequent instructions; if the
1459 displaced instruction is a PC-relative jump, those breakpoints
1460 could fall in very strange places --- on pages that aren't
1461 executable, or at addresses that are not proper instruction
1462 boundaries. (We do generally let other threads run while we wait
1463 to hit the software single-step breakpoint, and they might
1464 encounter such a corrupted instruction.) One way to work around
1465 this would be to have gdbarch_displaced_step_copy_insn fully
1466 simulate the effect of PC-relative instructions (and return NULL)
1467 on architectures that use software single-stepping.
1468
1469 In non-stop mode, we can have independent and simultaneous step
1470 requests, so more than one thread may need to simultaneously step
1471 over a breakpoint. The current implementation assumes there is
1472 only one scratch space per process. In this case, we have to
1473 serialize access to the scratch space. If thread A wants to step
1474 over a breakpoint, but we are currently waiting for some other
1475 thread to complete a displaced step, we leave thread A stopped and
1476 place it in the displaced_step_request_queue. Whenever a displaced
1477 step finishes, we pick the next thread in the queue and start a new
1478 displaced step operation on it. See displaced_step_prepare and
1479 displaced_step_fixup for details. */
1480
fc1cf338
PA
1481/* Per-inferior displaced stepping state. */
1482struct displaced_step_inferior_state
1483{
1484 /* Pointer to next in linked list. */
1485 struct displaced_step_inferior_state *next;
1486
1487 /* The process this displaced step state refers to. */
1488 int pid;
1489
3fc8eb30
PA
1490 /* True if preparing a displaced step ever failed. If so, we won't
1491 try displaced stepping for this inferior again. */
1492 int failed_before;
1493
fc1cf338
PA
1494 /* If this is not null_ptid, this is the thread carrying out a
1495 displaced single-step in process PID. This thread's state will
1496 require fixing up once it has completed its step. */
1497 ptid_t step_ptid;
1498
1499 /* The architecture the thread had when we stepped it. */
1500 struct gdbarch *step_gdbarch;
1501
1502 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1503 for post-step cleanup. */
1504 struct displaced_step_closure *step_closure;
1505
1506 /* The address of the original instruction, and the copy we
1507 made. */
1508 CORE_ADDR step_original, step_copy;
1509
1510 /* Saved contents of copy area. */
1511 gdb_byte *step_saved_copy;
1512};
1513
1514/* The list of states of processes involved in displaced stepping
1515 presently. */
1516static struct displaced_step_inferior_state *displaced_step_inferior_states;
1517
1518/* Get the displaced stepping state of process PID. */
1519
1520static struct displaced_step_inferior_state *
1521get_displaced_stepping_state (int pid)
1522{
1523 struct displaced_step_inferior_state *state;
1524
1525 for (state = displaced_step_inferior_states;
1526 state != NULL;
1527 state = state->next)
1528 if (state->pid == pid)
1529 return state;
1530
1531 return NULL;
1532}
1533
372316f1
PA
1534/* Returns true if any inferior has a thread doing a displaced
1535 step. */
1536
1537static int
1538displaced_step_in_progress_any_inferior (void)
1539{
1540 struct displaced_step_inferior_state *state;
1541
1542 for (state = displaced_step_inferior_states;
1543 state != NULL;
1544 state = state->next)
1545 if (!ptid_equal (state->step_ptid, null_ptid))
1546 return 1;
1547
1548 return 0;
1549}
1550
c0987663
YQ
1551/* Return true if thread represented by PTID is doing a displaced
1552 step. */
1553
1554static int
1555displaced_step_in_progress_thread (ptid_t ptid)
1556{
1557 struct displaced_step_inferior_state *displaced;
1558
1559 gdb_assert (!ptid_equal (ptid, null_ptid));
1560
1561 displaced = get_displaced_stepping_state (ptid_get_pid (ptid));
1562
1563 return (displaced != NULL && ptid_equal (displaced->step_ptid, ptid));
1564}
1565
8f572e5c
PA
1566/* Return true if process PID has a thread doing a displaced step. */
1567
1568static int
1569displaced_step_in_progress (int pid)
1570{
1571 struct displaced_step_inferior_state *displaced;
1572
1573 displaced = get_displaced_stepping_state (pid);
1574 if (displaced != NULL && !ptid_equal (displaced->step_ptid, null_ptid))
1575 return 1;
1576
1577 return 0;
1578}
1579
fc1cf338
PA
1580/* Add a new displaced stepping state for process PID to the displaced
1581 stepping state list, or return a pointer to an already existing
1582 entry, if it already exists. Never returns NULL. */
1583
1584static struct displaced_step_inferior_state *
1585add_displaced_stepping_state (int pid)
1586{
1587 struct displaced_step_inferior_state *state;
1588
1589 for (state = displaced_step_inferior_states;
1590 state != NULL;
1591 state = state->next)
1592 if (state->pid == pid)
1593 return state;
237fc4c9 1594
8d749320 1595 state = XCNEW (struct displaced_step_inferior_state);
fc1cf338
PA
1596 state->pid = pid;
1597 state->next = displaced_step_inferior_states;
1598 displaced_step_inferior_states = state;
237fc4c9 1599
fc1cf338
PA
1600 return state;
1601}
1602
a42244db
YQ
1603/* If inferior is in displaced stepping, and ADDR equals to starting address
1604 of copy area, return corresponding displaced_step_closure. Otherwise,
1605 return NULL. */
1606
1607struct displaced_step_closure*
1608get_displaced_step_closure_by_addr (CORE_ADDR addr)
1609{
1610 struct displaced_step_inferior_state *displaced
1611 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1612
1613 /* If checking the mode of displaced instruction in copy area. */
1614 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1615 && (displaced->step_copy == addr))
1616 return displaced->step_closure;
1617
1618 return NULL;
1619}
1620
fc1cf338 1621/* Remove the displaced stepping state of process PID. */
237fc4c9 1622
fc1cf338
PA
1623static void
1624remove_displaced_stepping_state (int pid)
1625{
1626 struct displaced_step_inferior_state *it, **prev_next_p;
237fc4c9 1627
fc1cf338
PA
1628 gdb_assert (pid != 0);
1629
1630 it = displaced_step_inferior_states;
1631 prev_next_p = &displaced_step_inferior_states;
1632 while (it)
1633 {
1634 if (it->pid == pid)
1635 {
1636 *prev_next_p = it->next;
1637 xfree (it);
1638 return;
1639 }
1640
1641 prev_next_p = &it->next;
1642 it = *prev_next_p;
1643 }
1644}
1645
1646static void
1647infrun_inferior_exit (struct inferior *inf)
1648{
1649 remove_displaced_stepping_state (inf->pid);
1650}
237fc4c9 1651
fff08868
HZ
1652/* If ON, and the architecture supports it, GDB will use displaced
1653 stepping to step over breakpoints. If OFF, or if the architecture
1654 doesn't support it, GDB will instead use the traditional
1655 hold-and-step approach. If AUTO (which is the default), GDB will
1656 decide which technique to use to step over breakpoints depending on
1657 which of all-stop or non-stop mode is active --- displaced stepping
1658 in non-stop mode; hold-and-step in all-stop mode. */
1659
72d0e2c5 1660static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
fff08868 1661
237fc4c9
PA
1662static void
1663show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1664 struct cmd_list_element *c,
1665 const char *value)
1666{
72d0e2c5 1667 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
3e43a32a
MS
1668 fprintf_filtered (file,
1669 _("Debugger's willingness to use displaced stepping "
1670 "to step over breakpoints is %s (currently %s).\n"),
fbea99ea 1671 value, target_is_non_stop_p () ? "on" : "off");
fff08868 1672 else
3e43a32a
MS
1673 fprintf_filtered (file,
1674 _("Debugger's willingness to use displaced stepping "
1675 "to step over breakpoints is %s.\n"), value);
237fc4c9
PA
1676}
1677
fff08868 1678/* Return non-zero if displaced stepping can/should be used to step
3fc8eb30 1679 over breakpoints of thread TP. */
fff08868 1680
237fc4c9 1681static int
3fc8eb30 1682use_displaced_stepping (struct thread_info *tp)
237fc4c9 1683{
3fc8eb30
PA
1684 struct regcache *regcache = get_thread_regcache (tp->ptid);
1685 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1686 struct displaced_step_inferior_state *displaced_state;
1687
1688 displaced_state = get_displaced_stepping_state (ptid_get_pid (tp->ptid));
1689
fbea99ea
PA
1690 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1691 && target_is_non_stop_p ())
72d0e2c5 1692 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
96429cc8 1693 && gdbarch_displaced_step_copy_insn_p (gdbarch)
3fc8eb30
PA
1694 && find_record_target () == NULL
1695 && (displaced_state == NULL
1696 || !displaced_state->failed_before));
237fc4c9
PA
1697}
1698
1699/* Clean out any stray displaced stepping state. */
1700static void
fc1cf338 1701displaced_step_clear (struct displaced_step_inferior_state *displaced)
237fc4c9
PA
1702{
1703 /* Indicate that there is no cleanup pending. */
fc1cf338 1704 displaced->step_ptid = null_ptid;
237fc4c9 1705
fc1cf338 1706 if (displaced->step_closure)
237fc4c9 1707 {
fc1cf338
PA
1708 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1709 displaced->step_closure);
1710 displaced->step_closure = NULL;
237fc4c9
PA
1711 }
1712}
1713
1714static void
fc1cf338 1715displaced_step_clear_cleanup (void *arg)
237fc4c9 1716{
9a3c8263
SM
1717 struct displaced_step_inferior_state *state
1718 = (struct displaced_step_inferior_state *) arg;
fc1cf338
PA
1719
1720 displaced_step_clear (state);
237fc4c9
PA
1721}
1722
1723/* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1724void
1725displaced_step_dump_bytes (struct ui_file *file,
1726 const gdb_byte *buf,
1727 size_t len)
1728{
1729 int i;
1730
1731 for (i = 0; i < len; i++)
1732 fprintf_unfiltered (file, "%02x ", buf[i]);
1733 fputs_unfiltered ("\n", file);
1734}
1735
1736/* Prepare to single-step, using displaced stepping.
1737
1738 Note that we cannot use displaced stepping when we have a signal to
1739 deliver. If we have a signal to deliver and an instruction to step
1740 over, then after the step, there will be no indication from the
1741 target whether the thread entered a signal handler or ignored the
1742 signal and stepped over the instruction successfully --- both cases
1743 result in a simple SIGTRAP. In the first case we mustn't do a
1744 fixup, and in the second case we must --- but we can't tell which.
1745 Comments in the code for 'random signals' in handle_inferior_event
1746 explain how we handle this case instead.
1747
1748 Returns 1 if preparing was successful -- this thread is going to be
7f03bd92
PA
1749 stepped now; 0 if displaced stepping this thread got queued; or -1
1750 if this instruction can't be displaced stepped. */
1751
237fc4c9 1752static int
3fc8eb30 1753displaced_step_prepare_throw (ptid_t ptid)
237fc4c9 1754{
ad53cd71 1755 struct cleanup *old_cleanups, *ignore_cleanups;
c1e36e3e 1756 struct thread_info *tp = find_thread_ptid (ptid);
237fc4c9
PA
1757 struct regcache *regcache = get_thread_regcache (ptid);
1758 struct gdbarch *gdbarch = get_regcache_arch (regcache);
d35ae833 1759 struct address_space *aspace = get_regcache_aspace (regcache);
237fc4c9
PA
1760 CORE_ADDR original, copy;
1761 ULONGEST len;
1762 struct displaced_step_closure *closure;
fc1cf338 1763 struct displaced_step_inferior_state *displaced;
9e529e1d 1764 int status;
237fc4c9
PA
1765
1766 /* We should never reach this function if the architecture does not
1767 support displaced stepping. */
1768 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1769
c2829269
PA
1770 /* Nor if the thread isn't meant to step over a breakpoint. */
1771 gdb_assert (tp->control.trap_expected);
1772
c1e36e3e
PA
1773 /* Disable range stepping while executing in the scratch pad. We
1774 want a single-step even if executing the displaced instruction in
1775 the scratch buffer lands within the stepping range (e.g., a
1776 jump/branch). */
1777 tp->control.may_range_step = 0;
1778
fc1cf338
PA
1779 /* We have to displaced step one thread at a time, as we only have
1780 access to a single scratch space per inferior. */
237fc4c9 1781
fc1cf338
PA
1782 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1783
1784 if (!ptid_equal (displaced->step_ptid, null_ptid))
237fc4c9
PA
1785 {
1786 /* Already waiting for a displaced step to finish. Defer this
1787 request and place in queue. */
237fc4c9
PA
1788
1789 if (debug_displaced)
1790 fprintf_unfiltered (gdb_stdlog,
c2829269 1791 "displaced: deferring step of %s\n",
237fc4c9
PA
1792 target_pid_to_str (ptid));
1793
c2829269 1794 thread_step_over_chain_enqueue (tp);
237fc4c9
PA
1795 return 0;
1796 }
1797 else
1798 {
1799 if (debug_displaced)
1800 fprintf_unfiltered (gdb_stdlog,
1801 "displaced: stepping %s now\n",
1802 target_pid_to_str (ptid));
1803 }
1804
fc1cf338 1805 displaced_step_clear (displaced);
237fc4c9 1806
ad53cd71
PA
1807 old_cleanups = save_inferior_ptid ();
1808 inferior_ptid = ptid;
1809
515630c5 1810 original = regcache_read_pc (regcache);
237fc4c9
PA
1811
1812 copy = gdbarch_displaced_step_location (gdbarch);
1813 len = gdbarch_max_insn_length (gdbarch);
1814
d35ae833
PA
1815 if (breakpoint_in_range_p (aspace, copy, len))
1816 {
1817 /* There's a breakpoint set in the scratch pad location range
1818 (which is usually around the entry point). We'd either
1819 install it before resuming, which would overwrite/corrupt the
1820 scratch pad, or if it was already inserted, this displaced
1821 step would overwrite it. The latter is OK in the sense that
1822 we already assume that no thread is going to execute the code
1823 in the scratch pad range (after initial startup) anyway, but
1824 the former is unacceptable. Simply punt and fallback to
1825 stepping over this breakpoint in-line. */
1826 if (debug_displaced)
1827 {
1828 fprintf_unfiltered (gdb_stdlog,
1829 "displaced: breakpoint set in scratch pad. "
1830 "Stepping over breakpoint in-line instead.\n");
1831 }
1832
1833 do_cleanups (old_cleanups);
1834 return -1;
1835 }
1836
237fc4c9 1837 /* Save the original contents of the copy area. */
224c3ddb 1838 displaced->step_saved_copy = (gdb_byte *) xmalloc (len);
ad53cd71 1839 ignore_cleanups = make_cleanup (free_current_contents,
fc1cf338 1840 &displaced->step_saved_copy);
9e529e1d
JK
1841 status = target_read_memory (copy, displaced->step_saved_copy, len);
1842 if (status != 0)
1843 throw_error (MEMORY_ERROR,
1844 _("Error accessing memory address %s (%s) for "
1845 "displaced-stepping scratch space."),
1846 paddress (gdbarch, copy), safe_strerror (status));
237fc4c9
PA
1847 if (debug_displaced)
1848 {
5af949e3
UW
1849 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1850 paddress (gdbarch, copy));
fc1cf338
PA
1851 displaced_step_dump_bytes (gdb_stdlog,
1852 displaced->step_saved_copy,
1853 len);
237fc4c9
PA
1854 };
1855
1856 closure = gdbarch_displaced_step_copy_insn (gdbarch,
ad53cd71 1857 original, copy, regcache);
7f03bd92
PA
1858 if (closure == NULL)
1859 {
1860 /* The architecture doesn't know how or want to displaced step
1861 this instruction or instruction sequence. Fallback to
1862 stepping over the breakpoint in-line. */
1863 do_cleanups (old_cleanups);
1864 return -1;
1865 }
237fc4c9 1866
9f5a595d
UW
1867 /* Save the information we need to fix things up if the step
1868 succeeds. */
fc1cf338
PA
1869 displaced->step_ptid = ptid;
1870 displaced->step_gdbarch = gdbarch;
1871 displaced->step_closure = closure;
1872 displaced->step_original = original;
1873 displaced->step_copy = copy;
9f5a595d 1874
fc1cf338 1875 make_cleanup (displaced_step_clear_cleanup, displaced);
237fc4c9
PA
1876
1877 /* Resume execution at the copy. */
515630c5 1878 regcache_write_pc (regcache, copy);
237fc4c9 1879
ad53cd71
PA
1880 discard_cleanups (ignore_cleanups);
1881
1882 do_cleanups (old_cleanups);
237fc4c9
PA
1883
1884 if (debug_displaced)
5af949e3
UW
1885 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1886 paddress (gdbarch, copy));
237fc4c9 1887
237fc4c9
PA
1888 return 1;
1889}
1890
3fc8eb30
PA
1891/* Wrapper for displaced_step_prepare_throw that disabled further
1892 attempts at displaced stepping if we get a memory error. */
1893
1894static int
1895displaced_step_prepare (ptid_t ptid)
1896{
1897 int prepared = -1;
1898
1899 TRY
1900 {
1901 prepared = displaced_step_prepare_throw (ptid);
1902 }
1903 CATCH (ex, RETURN_MASK_ERROR)
1904 {
1905 struct displaced_step_inferior_state *displaced_state;
1906
16b41842
PA
1907 if (ex.error != MEMORY_ERROR
1908 && ex.error != NOT_SUPPORTED_ERROR)
3fc8eb30
PA
1909 throw_exception (ex);
1910
1911 if (debug_infrun)
1912 {
1913 fprintf_unfiltered (gdb_stdlog,
1914 "infrun: disabling displaced stepping: %s\n",
1915 ex.message);
1916 }
1917
1918 /* Be verbose if "set displaced-stepping" is "on", silent if
1919 "auto". */
1920 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1921 {
fd7dcb94 1922 warning (_("disabling displaced stepping: %s"),
3fc8eb30
PA
1923 ex.message);
1924 }
1925
1926 /* Disable further displaced stepping attempts. */
1927 displaced_state
1928 = get_displaced_stepping_state (ptid_get_pid (ptid));
1929 displaced_state->failed_before = 1;
1930 }
1931 END_CATCH
1932
1933 return prepared;
1934}
1935
237fc4c9 1936static void
3e43a32a
MS
1937write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1938 const gdb_byte *myaddr, int len)
237fc4c9
PA
1939{
1940 struct cleanup *ptid_cleanup = save_inferior_ptid ();
abbb1732 1941
237fc4c9
PA
1942 inferior_ptid = ptid;
1943 write_memory (memaddr, myaddr, len);
1944 do_cleanups (ptid_cleanup);
1945}
1946
e2d96639
YQ
1947/* Restore the contents of the copy area for thread PTID. */
1948
1949static void
1950displaced_step_restore (struct displaced_step_inferior_state *displaced,
1951 ptid_t ptid)
1952{
1953 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1954
1955 write_memory_ptid (ptid, displaced->step_copy,
1956 displaced->step_saved_copy, len);
1957 if (debug_displaced)
1958 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1959 target_pid_to_str (ptid),
1960 paddress (displaced->step_gdbarch,
1961 displaced->step_copy));
1962}
1963
372316f1
PA
1964/* If we displaced stepped an instruction successfully, adjust
1965 registers and memory to yield the same effect the instruction would
1966 have had if we had executed it at its original address, and return
1967 1. If the instruction didn't complete, relocate the PC and return
1968 -1. If the thread wasn't displaced stepping, return 0. */
1969
1970static int
2ea28649 1971displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
237fc4c9
PA
1972{
1973 struct cleanup *old_cleanups;
fc1cf338
PA
1974 struct displaced_step_inferior_state *displaced
1975 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
372316f1 1976 int ret;
fc1cf338
PA
1977
1978 /* Was any thread of this process doing a displaced step? */
1979 if (displaced == NULL)
372316f1 1980 return 0;
237fc4c9
PA
1981
1982 /* Was this event for the pid we displaced? */
fc1cf338
PA
1983 if (ptid_equal (displaced->step_ptid, null_ptid)
1984 || ! ptid_equal (displaced->step_ptid, event_ptid))
372316f1 1985 return 0;
237fc4c9 1986
fc1cf338 1987 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
237fc4c9 1988
e2d96639 1989 displaced_step_restore (displaced, displaced->step_ptid);
237fc4c9 1990
cb71640d
PA
1991 /* Fixup may need to read memory/registers. Switch to the thread
1992 that we're fixing up. Also, target_stopped_by_watchpoint checks
1993 the current thread. */
1994 switch_to_thread (event_ptid);
1995
237fc4c9 1996 /* Did the instruction complete successfully? */
cb71640d
PA
1997 if (signal == GDB_SIGNAL_TRAP
1998 && !(target_stopped_by_watchpoint ()
1999 && (gdbarch_have_nonsteppable_watchpoint (displaced->step_gdbarch)
2000 || target_have_steppable_watchpoint)))
237fc4c9
PA
2001 {
2002 /* Fix up the resulting state. */
fc1cf338
PA
2003 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
2004 displaced->step_closure,
2005 displaced->step_original,
2006 displaced->step_copy,
2007 get_thread_regcache (displaced->step_ptid));
372316f1 2008 ret = 1;
237fc4c9
PA
2009 }
2010 else
2011 {
2012 /* Since the instruction didn't complete, all we can do is
2013 relocate the PC. */
515630c5
UW
2014 struct regcache *regcache = get_thread_regcache (event_ptid);
2015 CORE_ADDR pc = regcache_read_pc (regcache);
abbb1732 2016
fc1cf338 2017 pc = displaced->step_original + (pc - displaced->step_copy);
515630c5 2018 regcache_write_pc (regcache, pc);
372316f1 2019 ret = -1;
237fc4c9
PA
2020 }
2021
2022 do_cleanups (old_cleanups);
2023
fc1cf338 2024 displaced->step_ptid = null_ptid;
372316f1
PA
2025
2026 return ret;
c2829269 2027}
1c5cfe86 2028
4d9d9d04
PA
2029/* Data to be passed around while handling an event. This data is
2030 discarded between events. */
2031struct execution_control_state
2032{
2033 ptid_t ptid;
2034 /* The thread that got the event, if this was a thread event; NULL
2035 otherwise. */
2036 struct thread_info *event_thread;
2037
2038 struct target_waitstatus ws;
2039 int stop_func_filled_in;
2040 CORE_ADDR stop_func_start;
2041 CORE_ADDR stop_func_end;
2042 const char *stop_func_name;
2043 int wait_some_more;
2044
2045 /* True if the event thread hit the single-step breakpoint of
2046 another thread. Thus the event doesn't cause a stop, the thread
2047 needs to be single-stepped past the single-step breakpoint before
2048 we can switch back to the original stepping thread. */
2049 int hit_singlestep_breakpoint;
2050};
2051
2052/* Clear ECS and set it to point at TP. */
c2829269
PA
2053
2054static void
4d9d9d04
PA
2055reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
2056{
2057 memset (ecs, 0, sizeof (*ecs));
2058 ecs->event_thread = tp;
2059 ecs->ptid = tp->ptid;
2060}
2061
2062static void keep_going_pass_signal (struct execution_control_state *ecs);
2063static void prepare_to_wait (struct execution_control_state *ecs);
2ac7589c 2064static int keep_going_stepped_thread (struct thread_info *tp);
8d297bbf 2065static step_over_what thread_still_needs_step_over (struct thread_info *tp);
4d9d9d04
PA
2066
2067/* Are there any pending step-over requests? If so, run all we can
2068 now and return true. Otherwise, return false. */
2069
2070static int
c2829269
PA
2071start_step_over (void)
2072{
2073 struct thread_info *tp, *next;
2074
372316f1
PA
2075 /* Don't start a new step-over if we already have an in-line
2076 step-over operation ongoing. */
2077 if (step_over_info_valid_p ())
2078 return 0;
2079
c2829269 2080 for (tp = step_over_queue_head; tp != NULL; tp = next)
237fc4c9 2081 {
4d9d9d04
PA
2082 struct execution_control_state ecss;
2083 struct execution_control_state *ecs = &ecss;
8d297bbf 2084 step_over_what step_what;
372316f1 2085 int must_be_in_line;
c2829269 2086
c65d6b55
PA
2087 gdb_assert (!tp->stop_requested);
2088
c2829269 2089 next = thread_step_over_chain_next (tp);
237fc4c9 2090
c2829269
PA
2091 /* If this inferior already has a displaced step in process,
2092 don't start a new one. */
4d9d9d04 2093 if (displaced_step_in_progress (ptid_get_pid (tp->ptid)))
c2829269
PA
2094 continue;
2095
372316f1
PA
2096 step_what = thread_still_needs_step_over (tp);
2097 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
2098 || ((step_what & STEP_OVER_BREAKPOINT)
3fc8eb30 2099 && !use_displaced_stepping (tp)));
372316f1
PA
2100
2101 /* We currently stop all threads of all processes to step-over
2102 in-line. If we need to start a new in-line step-over, let
2103 any pending displaced steps finish first. */
2104 if (must_be_in_line && displaced_step_in_progress_any_inferior ())
2105 return 0;
2106
c2829269
PA
2107 thread_step_over_chain_remove (tp);
2108
2109 if (step_over_queue_head == NULL)
2110 {
2111 if (debug_infrun)
2112 fprintf_unfiltered (gdb_stdlog,
2113 "infrun: step-over queue now empty\n");
2114 }
2115
372316f1
PA
2116 if (tp->control.trap_expected
2117 || tp->resumed
2118 || tp->executing)
ad53cd71 2119 {
4d9d9d04
PA
2120 internal_error (__FILE__, __LINE__,
2121 "[%s] has inconsistent state: "
372316f1 2122 "trap_expected=%d, resumed=%d, executing=%d\n",
4d9d9d04
PA
2123 target_pid_to_str (tp->ptid),
2124 tp->control.trap_expected,
372316f1 2125 tp->resumed,
4d9d9d04 2126 tp->executing);
ad53cd71 2127 }
1c5cfe86 2128
4d9d9d04
PA
2129 if (debug_infrun)
2130 fprintf_unfiltered (gdb_stdlog,
2131 "infrun: resuming [%s] for step-over\n",
2132 target_pid_to_str (tp->ptid));
2133
2134 /* keep_going_pass_signal skips the step-over if the breakpoint
2135 is no longer inserted. In all-stop, we want to keep looking
2136 for a thread that needs a step-over instead of resuming TP,
2137 because we wouldn't be able to resume anything else until the
2138 target stops again. In non-stop, the resume always resumes
2139 only TP, so it's OK to let the thread resume freely. */
fbea99ea 2140 if (!target_is_non_stop_p () && !step_what)
4d9d9d04 2141 continue;
8550d3b3 2142
4d9d9d04
PA
2143 switch_to_thread (tp->ptid);
2144 reset_ecs (ecs, tp);
2145 keep_going_pass_signal (ecs);
1c5cfe86 2146
4d9d9d04
PA
2147 if (!ecs->wait_some_more)
2148 error (_("Command aborted."));
1c5cfe86 2149
372316f1
PA
2150 gdb_assert (tp->resumed);
2151
2152 /* If we started a new in-line step-over, we're done. */
2153 if (step_over_info_valid_p ())
2154 {
2155 gdb_assert (tp->control.trap_expected);
2156 return 1;
2157 }
2158
fbea99ea 2159 if (!target_is_non_stop_p ())
4d9d9d04
PA
2160 {
2161 /* On all-stop, shouldn't have resumed unless we needed a
2162 step over. */
2163 gdb_assert (tp->control.trap_expected
2164 || tp->step_after_step_resume_breakpoint);
2165
2166 /* With remote targets (at least), in all-stop, we can't
2167 issue any further remote commands until the program stops
2168 again. */
2169 return 1;
1c5cfe86 2170 }
c2829269 2171
4d9d9d04
PA
2172 /* Either the thread no longer needed a step-over, or a new
2173 displaced stepping sequence started. Even in the latter
2174 case, continue looking. Maybe we can also start another
2175 displaced step on a thread of other process. */
237fc4c9 2176 }
4d9d9d04
PA
2177
2178 return 0;
237fc4c9
PA
2179}
2180
5231c1fd
PA
2181/* Update global variables holding ptids to hold NEW_PTID if they were
2182 holding OLD_PTID. */
2183static void
2184infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
2185{
fc1cf338 2186 struct displaced_step_inferior_state *displaced;
5231c1fd
PA
2187
2188 if (ptid_equal (inferior_ptid, old_ptid))
2189 inferior_ptid = new_ptid;
2190
fc1cf338
PA
2191 for (displaced = displaced_step_inferior_states;
2192 displaced;
2193 displaced = displaced->next)
2194 {
2195 if (ptid_equal (displaced->step_ptid, old_ptid))
2196 displaced->step_ptid = new_ptid;
fc1cf338 2197 }
5231c1fd
PA
2198}
2199
237fc4c9
PA
2200\f
2201/* Resuming. */
c906108c
SS
2202
2203/* Things to clean up if we QUIT out of resume (). */
c906108c 2204static void
74b7792f 2205resume_cleanups (void *ignore)
c906108c 2206{
34b7e8a6
PA
2207 if (!ptid_equal (inferior_ptid, null_ptid))
2208 delete_single_step_breakpoints (inferior_thread ());
7c16b83e 2209
c906108c
SS
2210 normal_stop ();
2211}
2212
53904c9e
AC
2213static const char schedlock_off[] = "off";
2214static const char schedlock_on[] = "on";
2215static const char schedlock_step[] = "step";
f2665db5 2216static const char schedlock_replay[] = "replay";
40478521 2217static const char *const scheduler_enums[] = {
ef346e04
AC
2218 schedlock_off,
2219 schedlock_on,
2220 schedlock_step,
f2665db5 2221 schedlock_replay,
ef346e04
AC
2222 NULL
2223};
f2665db5 2224static const char *scheduler_mode = schedlock_replay;
920d2a44
AC
2225static void
2226show_scheduler_mode (struct ui_file *file, int from_tty,
2227 struct cmd_list_element *c, const char *value)
2228{
3e43a32a
MS
2229 fprintf_filtered (file,
2230 _("Mode for locking scheduler "
2231 "during execution is \"%s\".\n"),
920d2a44
AC
2232 value);
2233}
c906108c
SS
2234
2235static void
96baa820 2236set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
c906108c 2237{
eefe576e
AC
2238 if (!target_can_lock_scheduler)
2239 {
2240 scheduler_mode = schedlock_off;
2241 error (_("Target '%s' cannot support this command."), target_shortname);
2242 }
c906108c
SS
2243}
2244
d4db2f36
PA
2245/* True if execution commands resume all threads of all processes by
2246 default; otherwise, resume only threads of the current inferior
2247 process. */
2248int sched_multi = 0;
2249
2facfe5c
DD
2250/* Try to setup for software single stepping over the specified location.
2251 Return 1 if target_resume() should use hardware single step.
2252
2253 GDBARCH the current gdbarch.
2254 PC the location to step over. */
2255
2256static int
2257maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2258{
2259 int hw_step = 1;
2260
f02253f1 2261 if (execution_direction == EXEC_FORWARD
93f9a11f
YQ
2262 && gdbarch_software_single_step_p (gdbarch))
2263 hw_step = !insert_single_step_breakpoints (gdbarch);
2264
2facfe5c
DD
2265 return hw_step;
2266}
c906108c 2267
f3263aa4
PA
2268/* See infrun.h. */
2269
09cee04b
PA
2270ptid_t
2271user_visible_resume_ptid (int step)
2272{
f3263aa4 2273 ptid_t resume_ptid;
09cee04b 2274
09cee04b
PA
2275 if (non_stop)
2276 {
2277 /* With non-stop mode on, threads are always handled
2278 individually. */
2279 resume_ptid = inferior_ptid;
2280 }
2281 else if ((scheduler_mode == schedlock_on)
03d46957 2282 || (scheduler_mode == schedlock_step && step))
09cee04b 2283 {
f3263aa4
PA
2284 /* User-settable 'scheduler' mode requires solo thread
2285 resume. */
09cee04b
PA
2286 resume_ptid = inferior_ptid;
2287 }
f2665db5
MM
2288 else if ((scheduler_mode == schedlock_replay)
2289 && target_record_will_replay (minus_one_ptid, execution_direction))
2290 {
2291 /* User-settable 'scheduler' mode requires solo thread resume in replay
2292 mode. */
2293 resume_ptid = inferior_ptid;
2294 }
f3263aa4
PA
2295 else if (!sched_multi && target_supports_multi_process ())
2296 {
2297 /* Resume all threads of the current process (and none of other
2298 processes). */
2299 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
2300 }
2301 else
2302 {
2303 /* Resume all threads of all processes. */
2304 resume_ptid = RESUME_ALL;
2305 }
09cee04b
PA
2306
2307 return resume_ptid;
2308}
2309
fbea99ea
PA
2310/* Return a ptid representing the set of threads that we will resume,
2311 in the perspective of the target, assuming run control handling
2312 does not require leaving some threads stopped (e.g., stepping past
2313 breakpoint). USER_STEP indicates whether we're about to start the
2314 target for a stepping command. */
2315
2316static ptid_t
2317internal_resume_ptid (int user_step)
2318{
2319 /* In non-stop, we always control threads individually. Note that
2320 the target may always work in non-stop mode even with "set
2321 non-stop off", in which case user_visible_resume_ptid could
2322 return a wildcard ptid. */
2323 if (target_is_non_stop_p ())
2324 return inferior_ptid;
2325 else
2326 return user_visible_resume_ptid (user_step);
2327}
2328
64ce06e4
PA
2329/* Wrapper for target_resume, that handles infrun-specific
2330 bookkeeping. */
2331
2332static void
2333do_target_resume (ptid_t resume_ptid, int step, enum gdb_signal sig)
2334{
2335 struct thread_info *tp = inferior_thread ();
2336
c65d6b55
PA
2337 gdb_assert (!tp->stop_requested);
2338
64ce06e4
PA
2339 /* Install inferior's terminal modes. */
2340 target_terminal_inferior ();
2341
2342 /* Avoid confusing the next resume, if the next stop/resume
2343 happens to apply to another thread. */
2344 tp->suspend.stop_signal = GDB_SIGNAL_0;
2345
8f572e5c
PA
2346 /* Advise target which signals may be handled silently.
2347
2348 If we have removed breakpoints because we are stepping over one
2349 in-line (in any thread), we need to receive all signals to avoid
2350 accidentally skipping a breakpoint during execution of a signal
2351 handler.
2352
2353 Likewise if we're displaced stepping, otherwise a trap for a
2354 breakpoint in a signal handler might be confused with the
2355 displaced step finishing. We don't make the displaced_step_fixup
2356 step distinguish the cases instead, because:
2357
2358 - a backtrace while stopped in the signal handler would show the
2359 scratch pad as frame older than the signal handler, instead of
2360 the real mainline code.
2361
2362 - when the thread is later resumed, the signal handler would
2363 return to the scratch pad area, which would no longer be
2364 valid. */
2365 if (step_over_info_valid_p ()
2366 || displaced_step_in_progress (ptid_get_pid (tp->ptid)))
64ce06e4
PA
2367 target_pass_signals (0, NULL);
2368 else
2369 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2370
2371 target_resume (resume_ptid, step, sig);
85ad3aaf
PA
2372
2373 target_commit_resume ();
64ce06e4
PA
2374}
2375
c906108c
SS
2376/* Resume the inferior, but allow a QUIT. This is useful if the user
2377 wants to interrupt some lengthy single-stepping operation
2378 (for child processes, the SIGINT goes to the inferior, and so
2379 we get a SIGINT random_signal, but for remote debugging and perhaps
2380 other targets, that's not true).
2381
c906108c
SS
2382 SIG is the signal to give the inferior (zero for none). */
2383void
64ce06e4 2384resume (enum gdb_signal sig)
c906108c 2385{
74b7792f 2386 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
515630c5
UW
2387 struct regcache *regcache = get_current_regcache ();
2388 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4e1c45ea 2389 struct thread_info *tp = inferior_thread ();
515630c5 2390 CORE_ADDR pc = regcache_read_pc (regcache);
6c95b8df 2391 struct address_space *aspace = get_regcache_aspace (regcache);
b0f16a3e 2392 ptid_t resume_ptid;
856e7dd6
PA
2393 /* This represents the user's step vs continue request. When
2394 deciding whether "set scheduler-locking step" applies, it's the
2395 user's intention that counts. */
2396 const int user_step = tp->control.stepping_command;
64ce06e4
PA
2397 /* This represents what we'll actually request the target to do.
2398 This can decay from a step to a continue, if e.g., we need to
2399 implement single-stepping with breakpoints (software
2400 single-step). */
6b403daa 2401 int step;
c7e8a53c 2402
c65d6b55 2403 gdb_assert (!tp->stop_requested);
c2829269
PA
2404 gdb_assert (!thread_is_in_step_over_chain (tp));
2405
c906108c
SS
2406 QUIT;
2407
372316f1
PA
2408 if (tp->suspend.waitstatus_pending_p)
2409 {
2410 if (debug_infrun)
2411 {
2412 char *statstr;
2413
2414 statstr = target_waitstatus_to_string (&tp->suspend.waitstatus);
2415 fprintf_unfiltered (gdb_stdlog,
2416 "infrun: resume: thread %s has pending wait status %s "
2417 "(currently_stepping=%d).\n",
2418 target_pid_to_str (tp->ptid), statstr,
2419 currently_stepping (tp));
2420 xfree (statstr);
2421 }
2422
2423 tp->resumed = 1;
2424
2425 /* FIXME: What should we do if we are supposed to resume this
2426 thread with a signal? Maybe we should maintain a queue of
2427 pending signals to deliver. */
2428 if (sig != GDB_SIGNAL_0)
2429 {
fd7dcb94 2430 warning (_("Couldn't deliver signal %s to %s."),
372316f1
PA
2431 gdb_signal_to_name (sig), target_pid_to_str (tp->ptid));
2432 }
2433
2434 tp->suspend.stop_signal = GDB_SIGNAL_0;
2435 discard_cleanups (old_cleanups);
2436
2437 if (target_can_async_p ())
2438 target_async (1);
2439 return;
2440 }
2441
2442 tp->stepped_breakpoint = 0;
2443
6b403daa
PA
2444 /* Depends on stepped_breakpoint. */
2445 step = currently_stepping (tp);
2446
74609e71
YQ
2447 if (current_inferior ()->waiting_for_vfork_done)
2448 {
48f9886d
PA
2449 /* Don't try to single-step a vfork parent that is waiting for
2450 the child to get out of the shared memory region (by exec'ing
2451 or exiting). This is particularly important on software
2452 single-step archs, as the child process would trip on the
2453 software single step breakpoint inserted for the parent
2454 process. Since the parent will not actually execute any
2455 instruction until the child is out of the shared region (such
2456 are vfork's semantics), it is safe to simply continue it.
2457 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2458 the parent, and tell it to `keep_going', which automatically
2459 re-sets it stepping. */
74609e71
YQ
2460 if (debug_infrun)
2461 fprintf_unfiltered (gdb_stdlog,
2462 "infrun: resume : clear step\n");
a09dd441 2463 step = 0;
74609e71
YQ
2464 }
2465
527159b7 2466 if (debug_infrun)
237fc4c9 2467 fprintf_unfiltered (gdb_stdlog,
c9737c08 2468 "infrun: resume (step=%d, signal=%s), "
0d9a9a5f 2469 "trap_expected=%d, current thread [%s] at %s\n",
c9737c08
PA
2470 step, gdb_signal_to_symbol_string (sig),
2471 tp->control.trap_expected,
0d9a9a5f
PA
2472 target_pid_to_str (inferior_ptid),
2473 paddress (gdbarch, pc));
c906108c 2474
c2c6d25f
JM
2475 /* Normally, by the time we reach `resume', the breakpoints are either
2476 removed or inserted, as appropriate. The exception is if we're sitting
2477 at a permanent breakpoint; we need to step over it, but permanent
2478 breakpoints can't be removed. So we have to test for it here. */
6c95b8df 2479 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
6d350bb5 2480 {
af48d08f
PA
2481 if (sig != GDB_SIGNAL_0)
2482 {
2483 /* We have a signal to pass to the inferior. The resume
2484 may, or may not take us to the signal handler. If this
2485 is a step, we'll need to stop in the signal handler, if
2486 there's one, (if the target supports stepping into
2487 handlers), or in the next mainline instruction, if
2488 there's no handler. If this is a continue, we need to be
2489 sure to run the handler with all breakpoints inserted.
2490 In all cases, set a breakpoint at the current address
2491 (where the handler returns to), and once that breakpoint
2492 is hit, resume skipping the permanent breakpoint. If
2493 that breakpoint isn't hit, then we've stepped into the
2494 signal handler (or hit some other event). We'll delete
2495 the step-resume breakpoint then. */
2496
2497 if (debug_infrun)
2498 fprintf_unfiltered (gdb_stdlog,
2499 "infrun: resume: skipping permanent breakpoint, "
2500 "deliver signal first\n");
2501
2502 clear_step_over_info ();
2503 tp->control.trap_expected = 0;
2504
2505 if (tp->control.step_resume_breakpoint == NULL)
2506 {
2507 /* Set a "high-priority" step-resume, as we don't want
2508 user breakpoints at PC to trigger (again) when this
2509 hits. */
2510 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2511 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2512
2513 tp->step_after_step_resume_breakpoint = step;
2514 }
2515
2516 insert_breakpoints ();
2517 }
2518 else
2519 {
2520 /* There's no signal to pass, we can go ahead and skip the
2521 permanent breakpoint manually. */
2522 if (debug_infrun)
2523 fprintf_unfiltered (gdb_stdlog,
2524 "infrun: resume: skipping permanent breakpoint\n");
2525 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2526 /* Update pc to reflect the new address from which we will
2527 execute instructions. */
2528 pc = regcache_read_pc (regcache);
2529
2530 if (step)
2531 {
2532 /* We've already advanced the PC, so the stepping part
2533 is done. Now we need to arrange for a trap to be
2534 reported to handle_inferior_event. Set a breakpoint
2535 at the current PC, and run to it. Don't update
2536 prev_pc, because if we end in
44a1ee51
PA
2537 switch_back_to_stepped_thread, we want the "expected
2538 thread advanced also" branch to be taken. IOW, we
2539 don't want this thread to step further from PC
af48d08f 2540 (overstep). */
1ac806b8 2541 gdb_assert (!step_over_info_valid_p ());
af48d08f
PA
2542 insert_single_step_breakpoint (gdbarch, aspace, pc);
2543 insert_breakpoints ();
2544
fbea99ea 2545 resume_ptid = internal_resume_ptid (user_step);
1ac806b8 2546 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
af48d08f 2547 discard_cleanups (old_cleanups);
372316f1 2548 tp->resumed = 1;
af48d08f
PA
2549 return;
2550 }
2551 }
6d350bb5 2552 }
c2c6d25f 2553
c1e36e3e
PA
2554 /* If we have a breakpoint to step over, make sure to do a single
2555 step only. Same if we have software watchpoints. */
2556 if (tp->control.trap_expected || bpstat_should_step ())
2557 tp->control.may_range_step = 0;
2558
237fc4c9
PA
2559 /* If enabled, step over breakpoints by executing a copy of the
2560 instruction at a different address.
2561
2562 We can't use displaced stepping when we have a signal to deliver;
2563 the comments for displaced_step_prepare explain why. The
2564 comments in the handle_inferior event for dealing with 'random
74609e71
YQ
2565 signals' explain what we do instead.
2566
2567 We can't use displaced stepping when we are waiting for vfork_done
2568 event, displaced stepping breaks the vfork child similarly as single
2569 step software breakpoint. */
3fc8eb30
PA
2570 if (tp->control.trap_expected
2571 && use_displaced_stepping (tp)
cb71640d 2572 && !step_over_info_valid_p ()
a493e3e2 2573 && sig == GDB_SIGNAL_0
74609e71 2574 && !current_inferior ()->waiting_for_vfork_done)
237fc4c9 2575 {
3fc8eb30 2576 int prepared = displaced_step_prepare (inferior_ptid);
fc1cf338 2577
3fc8eb30 2578 if (prepared == 0)
d56b7306 2579 {
4d9d9d04
PA
2580 if (debug_infrun)
2581 fprintf_unfiltered (gdb_stdlog,
2582 "Got placed in step-over queue\n");
2583
2584 tp->control.trap_expected = 0;
d56b7306
VP
2585 discard_cleanups (old_cleanups);
2586 return;
2587 }
3fc8eb30
PA
2588 else if (prepared < 0)
2589 {
2590 /* Fallback to stepping over the breakpoint in-line. */
2591
2592 if (target_is_non_stop_p ())
2593 stop_all_threads ();
2594
2595 set_step_over_info (get_regcache_aspace (regcache),
21edc42f 2596 regcache_read_pc (regcache), 0, tp->global_num);
3fc8eb30
PA
2597
2598 step = maybe_software_singlestep (gdbarch, pc);
2599
2600 insert_breakpoints ();
2601 }
2602 else if (prepared > 0)
2603 {
2604 struct displaced_step_inferior_state *displaced;
99e40580 2605
3fc8eb30
PA
2606 /* Update pc to reflect the new address from which we will
2607 execute instructions due to displaced stepping. */
2608 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
ca7781d2 2609
3fc8eb30
PA
2610 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
2611 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
2612 displaced->step_closure);
2613 }
237fc4c9
PA
2614 }
2615
2facfe5c 2616 /* Do we need to do it the hard way, w/temp breakpoints? */
99e40580 2617 else if (step)
2facfe5c 2618 step = maybe_software_singlestep (gdbarch, pc);
c906108c 2619
30852783
UW
2620 /* Currently, our software single-step implementation leads to different
2621 results than hardware single-stepping in one situation: when stepping
2622 into delivering a signal which has an associated signal handler,
2623 hardware single-step will stop at the first instruction of the handler,
2624 while software single-step will simply skip execution of the handler.
2625
2626 For now, this difference in behavior is accepted since there is no
2627 easy way to actually implement single-stepping into a signal handler
2628 without kernel support.
2629
2630 However, there is one scenario where this difference leads to follow-on
2631 problems: if we're stepping off a breakpoint by removing all breakpoints
2632 and then single-stepping. In this case, the software single-step
2633 behavior means that even if there is a *breakpoint* in the signal
2634 handler, GDB still would not stop.
2635
2636 Fortunately, we can at least fix this particular issue. We detect
2637 here the case where we are about to deliver a signal while software
2638 single-stepping with breakpoints removed. In this situation, we
2639 revert the decisions to remove all breakpoints and insert single-
2640 step breakpoints, and instead we install a step-resume breakpoint
2641 at the current address, deliver the signal without stepping, and
2642 once we arrive back at the step-resume breakpoint, actually step
2643 over the breakpoint we originally wanted to step over. */
34b7e8a6 2644 if (thread_has_single_step_breakpoints_set (tp)
6cc83d2a
PA
2645 && sig != GDB_SIGNAL_0
2646 && step_over_info_valid_p ())
30852783
UW
2647 {
2648 /* If we have nested signals or a pending signal is delivered
2649 immediately after a handler returns, might might already have
2650 a step-resume breakpoint set on the earlier handler. We cannot
2651 set another step-resume breakpoint; just continue on until the
2652 original breakpoint is hit. */
2653 if (tp->control.step_resume_breakpoint == NULL)
2654 {
2c03e5be 2655 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
30852783
UW
2656 tp->step_after_step_resume_breakpoint = 1;
2657 }
2658
34b7e8a6 2659 delete_single_step_breakpoints (tp);
30852783 2660
31e77af2 2661 clear_step_over_info ();
30852783 2662 tp->control.trap_expected = 0;
31e77af2
PA
2663
2664 insert_breakpoints ();
30852783
UW
2665 }
2666
b0f16a3e
SM
2667 /* If STEP is set, it's a request to use hardware stepping
2668 facilities. But in that case, we should never
2669 use singlestep breakpoint. */
34b7e8a6 2670 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
dfcd3bfb 2671
fbea99ea 2672 /* Decide the set of threads to ask the target to resume. */
1946c4cc 2673 if (tp->control.trap_expected)
b0f16a3e
SM
2674 {
2675 /* We're allowing a thread to run past a breakpoint it has
1946c4cc
YQ
2676 hit, either by single-stepping the thread with the breakpoint
2677 removed, or by displaced stepping, with the breakpoint inserted.
2678 In the former case, we need to single-step only this thread,
2679 and keep others stopped, as they can miss this breakpoint if
2680 allowed to run. That's not really a problem for displaced
2681 stepping, but, we still keep other threads stopped, in case
2682 another thread is also stopped for a breakpoint waiting for
2683 its turn in the displaced stepping queue. */
b0f16a3e
SM
2684 resume_ptid = inferior_ptid;
2685 }
fbea99ea
PA
2686 else
2687 resume_ptid = internal_resume_ptid (user_step);
d4db2f36 2688
7f5ef605
PA
2689 if (execution_direction != EXEC_REVERSE
2690 && step && breakpoint_inserted_here_p (aspace, pc))
b0f16a3e 2691 {
372316f1
PA
2692 /* There are two cases where we currently need to step a
2693 breakpoint instruction when we have a signal to deliver:
2694
2695 - See handle_signal_stop where we handle random signals that
2696 could take out us out of the stepping range. Normally, in
2697 that case we end up continuing (instead of stepping) over the
7f5ef605
PA
2698 signal handler with a breakpoint at PC, but there are cases
2699 where we should _always_ single-step, even if we have a
2700 step-resume breakpoint, like when a software watchpoint is
2701 set. Assuming single-stepping and delivering a signal at the
2702 same time would takes us to the signal handler, then we could
2703 have removed the breakpoint at PC to step over it. However,
2704 some hardware step targets (like e.g., Mac OS) can't step
2705 into signal handlers, and for those, we need to leave the
2706 breakpoint at PC inserted, as otherwise if the handler
2707 recurses and executes PC again, it'll miss the breakpoint.
2708 So we leave the breakpoint inserted anyway, but we need to
2709 record that we tried to step a breakpoint instruction, so
372316f1
PA
2710 that adjust_pc_after_break doesn't end up confused.
2711
2712 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2713 in one thread after another thread that was stepping had been
2714 momentarily paused for a step-over. When we re-resume the
2715 stepping thread, it may be resumed from that address with a
2716 breakpoint that hasn't trapped yet. Seen with
2717 gdb.threads/non-stop-fair-events.exp, on targets that don't
2718 do displaced stepping. */
2719
2720 if (debug_infrun)
2721 fprintf_unfiltered (gdb_stdlog,
2722 "infrun: resume: [%s] stepped breakpoint\n",
2723 target_pid_to_str (tp->ptid));
7f5ef605
PA
2724
2725 tp->stepped_breakpoint = 1;
2726
b0f16a3e
SM
2727 /* Most targets can step a breakpoint instruction, thus
2728 executing it normally. But if this one cannot, just
2729 continue and we will hit it anyway. */
7f5ef605 2730 if (gdbarch_cannot_step_breakpoint (gdbarch))
b0f16a3e
SM
2731 step = 0;
2732 }
ef5cf84e 2733
b0f16a3e 2734 if (debug_displaced
cb71640d 2735 && tp->control.trap_expected
3fc8eb30 2736 && use_displaced_stepping (tp)
cb71640d 2737 && !step_over_info_valid_p ())
b0f16a3e 2738 {
d9b67d9f 2739 struct regcache *resume_regcache = get_thread_regcache (tp->ptid);
b0f16a3e
SM
2740 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
2741 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2742 gdb_byte buf[4];
2743
2744 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2745 paddress (resume_gdbarch, actual_pc));
2746 read_memory (actual_pc, buf, sizeof (buf));
2747 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2748 }
237fc4c9 2749
b0f16a3e
SM
2750 if (tp->control.may_range_step)
2751 {
2752 /* If we're resuming a thread with the PC out of the step
2753 range, then we're doing some nested/finer run control
2754 operation, like stepping the thread out of the dynamic
2755 linker or the displaced stepping scratch pad. We
2756 shouldn't have allowed a range step then. */
2757 gdb_assert (pc_in_thread_step_range (pc, tp));
2758 }
c1e36e3e 2759
64ce06e4 2760 do_target_resume (resume_ptid, step, sig);
372316f1 2761 tp->resumed = 1;
c906108c
SS
2762 discard_cleanups (old_cleanups);
2763}
2764\f
237fc4c9 2765/* Proceeding. */
c906108c 2766
4c2f2a79
PA
2767/* See infrun.h. */
2768
2769/* Counter that tracks number of user visible stops. This can be used
2770 to tell whether a command has proceeded the inferior past the
2771 current location. This allows e.g., inferior function calls in
2772 breakpoint commands to not interrupt the command list. When the
2773 call finishes successfully, the inferior is standing at the same
2774 breakpoint as if nothing happened (and so we don't call
2775 normal_stop). */
2776static ULONGEST current_stop_id;
2777
2778/* See infrun.h. */
2779
2780ULONGEST
2781get_stop_id (void)
2782{
2783 return current_stop_id;
2784}
2785
2786/* Called when we report a user visible stop. */
2787
2788static void
2789new_stop_id (void)
2790{
2791 current_stop_id++;
2792}
2793
c906108c
SS
2794/* Clear out all variables saying what to do when inferior is continued.
2795 First do this, then set the ones you want, then call `proceed'. */
2796
a7212384
UW
2797static void
2798clear_proceed_status_thread (struct thread_info *tp)
c906108c 2799{
a7212384
UW
2800 if (debug_infrun)
2801 fprintf_unfiltered (gdb_stdlog,
2802 "infrun: clear_proceed_status_thread (%s)\n",
2803 target_pid_to_str (tp->ptid));
d6b48e9c 2804
372316f1
PA
2805 /* If we're starting a new sequence, then the previous finished
2806 single-step is no longer relevant. */
2807 if (tp->suspend.waitstatus_pending_p)
2808 {
2809 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
2810 {
2811 if (debug_infrun)
2812 fprintf_unfiltered (gdb_stdlog,
2813 "infrun: clear_proceed_status: pending "
2814 "event of %s was a finished step. "
2815 "Discarding.\n",
2816 target_pid_to_str (tp->ptid));
2817
2818 tp->suspend.waitstatus_pending_p = 0;
2819 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
2820 }
2821 else if (debug_infrun)
2822 {
2823 char *statstr;
2824
2825 statstr = target_waitstatus_to_string (&tp->suspend.waitstatus);
2826 fprintf_unfiltered (gdb_stdlog,
2827 "infrun: clear_proceed_status_thread: thread %s "
2828 "has pending wait status %s "
2829 "(currently_stepping=%d).\n",
2830 target_pid_to_str (tp->ptid), statstr,
2831 currently_stepping (tp));
2832 xfree (statstr);
2833 }
2834 }
2835
70509625
PA
2836 /* If this signal should not be seen by program, give it zero.
2837 Used for debugging signals. */
2838 if (!signal_pass_state (tp->suspend.stop_signal))
2839 tp->suspend.stop_signal = GDB_SIGNAL_0;
2840
243a9253
PA
2841 thread_fsm_delete (tp->thread_fsm);
2842 tp->thread_fsm = NULL;
2843
16c381f0
JK
2844 tp->control.trap_expected = 0;
2845 tp->control.step_range_start = 0;
2846 tp->control.step_range_end = 0;
c1e36e3e 2847 tp->control.may_range_step = 0;
16c381f0
JK
2848 tp->control.step_frame_id = null_frame_id;
2849 tp->control.step_stack_frame_id = null_frame_id;
2850 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
885eeb5b 2851 tp->control.step_start_function = NULL;
a7212384 2852 tp->stop_requested = 0;
4e1c45ea 2853
16c381f0 2854 tp->control.stop_step = 0;
32400beb 2855
16c381f0 2856 tp->control.proceed_to_finish = 0;
414c69f7 2857
856e7dd6 2858 tp->control.stepping_command = 0;
17b2616c 2859
a7212384 2860 /* Discard any remaining commands or status from previous stop. */
16c381f0 2861 bpstat_clear (&tp->control.stop_bpstat);
a7212384 2862}
32400beb 2863
a7212384 2864void
70509625 2865clear_proceed_status (int step)
a7212384 2866{
f2665db5
MM
2867 /* With scheduler-locking replay, stop replaying other threads if we're
2868 not replaying the user-visible resume ptid.
2869
2870 This is a convenience feature to not require the user to explicitly
2871 stop replaying the other threads. We're assuming that the user's
2872 intent is to resume tracing the recorded process. */
2873 if (!non_stop && scheduler_mode == schedlock_replay
2874 && target_record_is_replaying (minus_one_ptid)
2875 && !target_record_will_replay (user_visible_resume_ptid (step),
2876 execution_direction))
2877 target_record_stop_replaying ();
2878
6c95b8df
PA
2879 if (!non_stop)
2880 {
70509625
PA
2881 struct thread_info *tp;
2882 ptid_t resume_ptid;
2883
2884 resume_ptid = user_visible_resume_ptid (step);
2885
2886 /* In all-stop mode, delete the per-thread status of all threads
2887 we're about to resume, implicitly and explicitly. */
2888 ALL_NON_EXITED_THREADS (tp)
2889 {
2890 if (!ptid_match (tp->ptid, resume_ptid))
2891 continue;
2892 clear_proceed_status_thread (tp);
2893 }
6c95b8df
PA
2894 }
2895
a7212384
UW
2896 if (!ptid_equal (inferior_ptid, null_ptid))
2897 {
2898 struct inferior *inferior;
2899
2900 if (non_stop)
2901 {
6c95b8df
PA
2902 /* If in non-stop mode, only delete the per-thread status of
2903 the current thread. */
a7212384
UW
2904 clear_proceed_status_thread (inferior_thread ());
2905 }
6c95b8df 2906
d6b48e9c 2907 inferior = current_inferior ();
16c381f0 2908 inferior->control.stop_soon = NO_STOP_QUIETLY;
4e1c45ea
PA
2909 }
2910
f3b1572e 2911 observer_notify_about_to_proceed ();
c906108c
SS
2912}
2913
99619bea
PA
2914/* Returns true if TP is still stopped at a breakpoint that needs
2915 stepping-over in order to make progress. If the breakpoint is gone
2916 meanwhile, we can skip the whole step-over dance. */
ea67f13b
DJ
2917
2918static int
6c4cfb24 2919thread_still_needs_step_over_bp (struct thread_info *tp)
99619bea
PA
2920{
2921 if (tp->stepping_over_breakpoint)
2922 {
2923 struct regcache *regcache = get_thread_regcache (tp->ptid);
2924
2925 if (breakpoint_here_p (get_regcache_aspace (regcache),
af48d08f
PA
2926 regcache_read_pc (regcache))
2927 == ordinary_breakpoint_here)
99619bea
PA
2928 return 1;
2929
2930 tp->stepping_over_breakpoint = 0;
2931 }
2932
2933 return 0;
2934}
2935
6c4cfb24
PA
2936/* Check whether thread TP still needs to start a step-over in order
2937 to make progress when resumed. Returns an bitwise or of enum
2938 step_over_what bits, indicating what needs to be stepped over. */
2939
8d297bbf 2940static step_over_what
6c4cfb24
PA
2941thread_still_needs_step_over (struct thread_info *tp)
2942{
8d297bbf 2943 step_over_what what = 0;
6c4cfb24
PA
2944
2945 if (thread_still_needs_step_over_bp (tp))
2946 what |= STEP_OVER_BREAKPOINT;
2947
2948 if (tp->stepping_over_watchpoint
2949 && !target_have_steppable_watchpoint)
2950 what |= STEP_OVER_WATCHPOINT;
2951
2952 return what;
2953}
2954
483805cf
PA
2955/* Returns true if scheduler locking applies. STEP indicates whether
2956 we're about to do a step/next-like command to a thread. */
2957
2958static int
856e7dd6 2959schedlock_applies (struct thread_info *tp)
483805cf
PA
2960{
2961 return (scheduler_mode == schedlock_on
2962 || (scheduler_mode == schedlock_step
f2665db5
MM
2963 && tp->control.stepping_command)
2964 || (scheduler_mode == schedlock_replay
2965 && target_record_will_replay (minus_one_ptid,
2966 execution_direction)));
483805cf
PA
2967}
2968
c906108c
SS
2969/* Basic routine for continuing the program in various fashions.
2970
2971 ADDR is the address to resume at, or -1 for resume where stopped.
2972 SIGGNAL is the signal to give it, or 0 for none,
c5aa993b 2973 or -1 for act according to how it stopped.
c906108c 2974 STEP is nonzero if should trap after one instruction.
c5aa993b
JM
2975 -1 means return after that and print nothing.
2976 You should probably set various step_... variables
2977 before calling here, if you are stepping.
c906108c
SS
2978
2979 You should call clear_proceed_status before calling proceed. */
2980
2981void
64ce06e4 2982proceed (CORE_ADDR addr, enum gdb_signal siggnal)
c906108c 2983{
e58b0e63
PA
2984 struct regcache *regcache;
2985 struct gdbarch *gdbarch;
4e1c45ea 2986 struct thread_info *tp;
e58b0e63 2987 CORE_ADDR pc;
6c95b8df 2988 struct address_space *aspace;
4d9d9d04
PA
2989 ptid_t resume_ptid;
2990 struct execution_control_state ecss;
2991 struct execution_control_state *ecs = &ecss;
2992 struct cleanup *old_chain;
85ad3aaf 2993 struct cleanup *defer_resume_cleanup;
4d9d9d04 2994 int started;
c906108c 2995
e58b0e63
PA
2996 /* If we're stopped at a fork/vfork, follow the branch set by the
2997 "set follow-fork-mode" command; otherwise, we'll just proceed
2998 resuming the current thread. */
2999 if (!follow_fork ())
3000 {
3001 /* The target for some reason decided not to resume. */
3002 normal_stop ();
f148b27e
PA
3003 if (target_can_async_p ())
3004 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
e58b0e63
PA
3005 return;
3006 }
3007
842951eb
PA
3008 /* We'll update this if & when we switch to a new thread. */
3009 previous_inferior_ptid = inferior_ptid;
3010
e58b0e63
PA
3011 regcache = get_current_regcache ();
3012 gdbarch = get_regcache_arch (regcache);
6c95b8df 3013 aspace = get_regcache_aspace (regcache);
e58b0e63 3014 pc = regcache_read_pc (regcache);
2adfaa28 3015 tp = inferior_thread ();
e58b0e63 3016
99619bea
PA
3017 /* Fill in with reasonable starting values. */
3018 init_thread_stepping_state (tp);
3019
c2829269
PA
3020 gdb_assert (!thread_is_in_step_over_chain (tp));
3021
2acceee2 3022 if (addr == (CORE_ADDR) -1)
c906108c 3023 {
af48d08f
PA
3024 if (pc == stop_pc
3025 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
b2175913 3026 && execution_direction != EXEC_REVERSE)
3352ef37
AC
3027 /* There is a breakpoint at the address we will resume at,
3028 step one instruction before inserting breakpoints so that
3029 we do not stop right away (and report a second hit at this
b2175913
MS
3030 breakpoint).
3031
3032 Note, we don't do this in reverse, because we won't
3033 actually be executing the breakpoint insn anyway.
3034 We'll be (un-)executing the previous instruction. */
99619bea 3035 tp->stepping_over_breakpoint = 1;
515630c5
UW
3036 else if (gdbarch_single_step_through_delay_p (gdbarch)
3037 && gdbarch_single_step_through_delay (gdbarch,
3038 get_current_frame ()))
3352ef37
AC
3039 /* We stepped onto an instruction that needs to be stepped
3040 again before re-inserting the breakpoint, do so. */
99619bea 3041 tp->stepping_over_breakpoint = 1;
c906108c
SS
3042 }
3043 else
3044 {
515630c5 3045 regcache_write_pc (regcache, addr);
c906108c
SS
3046 }
3047
70509625
PA
3048 if (siggnal != GDB_SIGNAL_DEFAULT)
3049 tp->suspend.stop_signal = siggnal;
3050
4d9d9d04
PA
3051 resume_ptid = user_visible_resume_ptid (tp->control.stepping_command);
3052
3053 /* If an exception is thrown from this point on, make sure to
3054 propagate GDB's knowledge of the executing state to the
3055 frontend/user running state. */
3056 old_chain = make_cleanup (finish_thread_state_cleanup, &resume_ptid);
3057
3058 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3059 threads (e.g., we might need to set threads stepping over
3060 breakpoints first), from the user/frontend's point of view, all
3061 threads in RESUME_PTID are now running. Unless we're calling an
3062 inferior function, as in that case we pretend the inferior
3063 doesn't run at all. */
3064 if (!tp->control.in_infcall)
3065 set_running (resume_ptid, 1);
17b2616c 3066
527159b7 3067 if (debug_infrun)
8a9de0e4 3068 fprintf_unfiltered (gdb_stdlog,
64ce06e4 3069 "infrun: proceed (addr=%s, signal=%s)\n",
c9737c08 3070 paddress (gdbarch, addr),
64ce06e4 3071 gdb_signal_to_symbol_string (siggnal));
527159b7 3072
4d9d9d04
PA
3073 annotate_starting ();
3074
3075 /* Make sure that output from GDB appears before output from the
3076 inferior. */
3077 gdb_flush (gdb_stdout);
3078
3079 /* In a multi-threaded task we may select another thread and
3080 then continue or step.
3081
3082 But if a thread that we're resuming had stopped at a breakpoint,
3083 it will immediately cause another breakpoint stop without any
3084 execution (i.e. it will report a breakpoint hit incorrectly). So
3085 we must step over it first.
3086
3087 Look for threads other than the current (TP) that reported a
3088 breakpoint hit and haven't been resumed yet since. */
3089
3090 /* If scheduler locking applies, we can avoid iterating over all
3091 threads. */
3092 if (!non_stop && !schedlock_applies (tp))
94cc34af 3093 {
4d9d9d04
PA
3094 struct thread_info *current = tp;
3095
3096 ALL_NON_EXITED_THREADS (tp)
3097 {
3098 /* Ignore the current thread here. It's handled
3099 afterwards. */
3100 if (tp == current)
3101 continue;
99619bea 3102
4d9d9d04
PA
3103 /* Ignore threads of processes we're not resuming. */
3104 if (!ptid_match (tp->ptid, resume_ptid))
3105 continue;
c906108c 3106
4d9d9d04
PA
3107 if (!thread_still_needs_step_over (tp))
3108 continue;
3109
3110 gdb_assert (!thread_is_in_step_over_chain (tp));
c906108c 3111
99619bea
PA
3112 if (debug_infrun)
3113 fprintf_unfiltered (gdb_stdlog,
3114 "infrun: need to step-over [%s] first\n",
4d9d9d04 3115 target_pid_to_str (tp->ptid));
99619bea 3116
4d9d9d04 3117 thread_step_over_chain_enqueue (tp);
2adfaa28 3118 }
31e77af2 3119
4d9d9d04 3120 tp = current;
30852783
UW
3121 }
3122
4d9d9d04
PA
3123 /* Enqueue the current thread last, so that we move all other
3124 threads over their breakpoints first. */
3125 if (tp->stepping_over_breakpoint)
3126 thread_step_over_chain_enqueue (tp);
30852783 3127
4d9d9d04
PA
3128 /* If the thread isn't started, we'll still need to set its prev_pc,
3129 so that switch_back_to_stepped_thread knows the thread hasn't
3130 advanced. Must do this before resuming any thread, as in
3131 all-stop/remote, once we resume we can't send any other packet
3132 until the target stops again. */
3133 tp->prev_pc = regcache_read_pc (regcache);
99619bea 3134
85ad3aaf
PA
3135 defer_resume_cleanup = make_cleanup_defer_target_commit_resume ();
3136
4d9d9d04 3137 started = start_step_over ();
c906108c 3138
4d9d9d04
PA
3139 if (step_over_info_valid_p ())
3140 {
3141 /* Either this thread started a new in-line step over, or some
3142 other thread was already doing one. In either case, don't
3143 resume anything else until the step-over is finished. */
3144 }
fbea99ea 3145 else if (started && !target_is_non_stop_p ())
4d9d9d04
PA
3146 {
3147 /* A new displaced stepping sequence was started. In all-stop,
3148 we can't talk to the target anymore until it next stops. */
3149 }
fbea99ea
PA
3150 else if (!non_stop && target_is_non_stop_p ())
3151 {
3152 /* In all-stop, but the target is always in non-stop mode.
3153 Start all other threads that are implicitly resumed too. */
3154 ALL_NON_EXITED_THREADS (tp)
3155 {
3156 /* Ignore threads of processes we're not resuming. */
3157 if (!ptid_match (tp->ptid, resume_ptid))
3158 continue;
3159
3160 if (tp->resumed)
3161 {
3162 if (debug_infrun)
3163 fprintf_unfiltered (gdb_stdlog,
3164 "infrun: proceed: [%s] resumed\n",
3165 target_pid_to_str (tp->ptid));
3166 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
3167 continue;
3168 }
3169
3170 if (thread_is_in_step_over_chain (tp))
3171 {
3172 if (debug_infrun)
3173 fprintf_unfiltered (gdb_stdlog,
3174 "infrun: proceed: [%s] needs step-over\n",
3175 target_pid_to_str (tp->ptid));
3176 continue;
3177 }
3178
3179 if (debug_infrun)
3180 fprintf_unfiltered (gdb_stdlog,
3181 "infrun: proceed: resuming %s\n",
3182 target_pid_to_str (tp->ptid));
3183
3184 reset_ecs (ecs, tp);
3185 switch_to_thread (tp->ptid);
3186 keep_going_pass_signal (ecs);
3187 if (!ecs->wait_some_more)
fd7dcb94 3188 error (_("Command aborted."));
fbea99ea
PA
3189 }
3190 }
372316f1 3191 else if (!tp->resumed && !thread_is_in_step_over_chain (tp))
4d9d9d04
PA
3192 {
3193 /* The thread wasn't started, and isn't queued, run it now. */
3194 reset_ecs (ecs, tp);
3195 switch_to_thread (tp->ptid);
3196 keep_going_pass_signal (ecs);
3197 if (!ecs->wait_some_more)
fd7dcb94 3198 error (_("Command aborted."));
4d9d9d04 3199 }
c906108c 3200
85ad3aaf
PA
3201 do_cleanups (defer_resume_cleanup);
3202 target_commit_resume ();
3203
4d9d9d04 3204 discard_cleanups (old_chain);
c906108c 3205
0b333c5e
PA
3206 /* Tell the event loop to wait for it to stop. If the target
3207 supports asynchronous execution, it'll do this from within
3208 target_resume. */
362646f5 3209 if (!target_can_async_p ())
0b333c5e 3210 mark_async_event_handler (infrun_async_inferior_event_token);
c906108c 3211}
c906108c
SS
3212\f
3213
3214/* Start remote-debugging of a machine over a serial link. */
96baa820 3215
c906108c 3216void
8621d6a9 3217start_remote (int from_tty)
c906108c 3218{
d6b48e9c 3219 struct inferior *inferior;
d6b48e9c
PA
3220
3221 inferior = current_inferior ();
16c381f0 3222 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
43ff13b4 3223
1777feb0 3224 /* Always go on waiting for the target, regardless of the mode. */
6426a772 3225 /* FIXME: cagney/1999-09-23: At present it isn't possible to
7e73cedf 3226 indicate to wait_for_inferior that a target should timeout if
6426a772
JM
3227 nothing is returned (instead of just blocking). Because of this,
3228 targets expecting an immediate response need to, internally, set
3229 things up so that the target_wait() is forced to eventually
1777feb0 3230 timeout. */
6426a772
JM
3231 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3232 differentiate to its caller what the state of the target is after
3233 the initial open has been performed. Here we're assuming that
3234 the target has stopped. It should be possible to eventually have
3235 target_open() return to the caller an indication that the target
3236 is currently running and GDB state should be set to the same as
1777feb0 3237 for an async run. */
e4c8541f 3238 wait_for_inferior ();
8621d6a9
DJ
3239
3240 /* Now that the inferior has stopped, do any bookkeeping like
3241 loading shared libraries. We want to do this before normal_stop,
3242 so that the displayed frame is up to date. */
3243 post_create_inferior (&current_target, from_tty);
3244
6426a772 3245 normal_stop ();
c906108c
SS
3246}
3247
3248/* Initialize static vars when a new inferior begins. */
3249
3250void
96baa820 3251init_wait_for_inferior (void)
c906108c
SS
3252{
3253 /* These are meaningless until the first time through wait_for_inferior. */
c906108c 3254
c906108c
SS
3255 breakpoint_init_inferior (inf_starting);
3256
70509625 3257 clear_proceed_status (0);
9f976b41 3258
ca005067 3259 target_last_wait_ptid = minus_one_ptid;
237fc4c9 3260
842951eb 3261 previous_inferior_ptid = inferior_ptid;
0d1e5fa7 3262
edb3359d
DJ
3263 /* Discard any skipped inlined frames. */
3264 clear_inline_frame_state (minus_one_ptid);
c906108c 3265}
237fc4c9 3266
c906108c 3267\f
488f131b 3268
ec9499be 3269static void handle_inferior_event (struct execution_control_state *ecs);
cd0fc7c3 3270
568d6575
UW
3271static void handle_step_into_function (struct gdbarch *gdbarch,
3272 struct execution_control_state *ecs);
3273static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3274 struct execution_control_state *ecs);
4f5d7f63 3275static void handle_signal_stop (struct execution_control_state *ecs);
186c406b 3276static void check_exception_resume (struct execution_control_state *,
28106bc2 3277 struct frame_info *);
611c83ae 3278
bdc36728 3279static void end_stepping_range (struct execution_control_state *ecs);
22bcd14b 3280static void stop_waiting (struct execution_control_state *ecs);
d4f3574e 3281static void keep_going (struct execution_control_state *ecs);
94c57d6a 3282static void process_event_stop_test (struct execution_control_state *ecs);
c447ac0b 3283static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
104c1213 3284
252fbfc8
PA
3285/* This function is attached as a "thread_stop_requested" observer.
3286 Cleanup local state that assumed the PTID was to be resumed, and
3287 report the stop to the frontend. */
3288
2c0b251b 3289static void
252fbfc8
PA
3290infrun_thread_stop_requested (ptid_t ptid)
3291{
c2829269 3292 struct thread_info *tp;
252fbfc8 3293
c65d6b55
PA
3294 /* PTID was requested to stop. If the thread was already stopped,
3295 but the user/frontend doesn't know about that yet (e.g., the
3296 thread had been temporarily paused for some step-over), set up
3297 for reporting the stop now. */
c2829269
PA
3298 ALL_NON_EXITED_THREADS (tp)
3299 if (ptid_match (tp->ptid, ptid))
3300 {
c65d6b55
PA
3301 if (tp->state != THREAD_RUNNING)
3302 continue;
3303 if (tp->executing)
3304 continue;
3305
3306 /* Remove matching threads from the step-over queue, so
3307 start_step_over doesn't try to resume them
3308 automatically. */
c2829269
PA
3309 if (thread_is_in_step_over_chain (tp))
3310 thread_step_over_chain_remove (tp);
252fbfc8 3311
c65d6b55
PA
3312 /* If the thread is stopped, but the user/frontend doesn't
3313 know about that yet, queue a pending event, as if the
3314 thread had just stopped now. Unless the thread already had
3315 a pending event. */
3316 if (!tp->suspend.waitstatus_pending_p)
3317 {
3318 tp->suspend.waitstatus_pending_p = 1;
3319 tp->suspend.waitstatus.kind = TARGET_WAITKIND_STOPPED;
3320 tp->suspend.waitstatus.value.sig = GDB_SIGNAL_0;
3321 }
3322
3323 /* Clear the inline-frame state, since we're re-processing the
3324 stop. */
3325 clear_inline_frame_state (tp->ptid);
3326
3327 /* If this thread was paused because some other thread was
3328 doing an inline-step over, let that finish first. Once
3329 that happens, we'll restart all threads and consume pending
3330 stop events then. */
3331 if (step_over_info_valid_p ())
3332 continue;
3333
3334 /* Otherwise we can process the (new) pending event now. Set
3335 it so this pending event is considered by
3336 do_target_wait. */
3337 tp->resumed = 1;
3338 }
252fbfc8
PA
3339}
3340
a07daef3
PA
3341static void
3342infrun_thread_thread_exit (struct thread_info *tp, int silent)
3343{
3344 if (ptid_equal (target_last_wait_ptid, tp->ptid))
3345 nullify_last_target_wait_ptid ();
3346}
3347
0cbcdb96
PA
3348/* Delete the step resume, single-step and longjmp/exception resume
3349 breakpoints of TP. */
4e1c45ea 3350
0cbcdb96
PA
3351static void
3352delete_thread_infrun_breakpoints (struct thread_info *tp)
4e1c45ea 3353{
0cbcdb96
PA
3354 delete_step_resume_breakpoint (tp);
3355 delete_exception_resume_breakpoint (tp);
34b7e8a6 3356 delete_single_step_breakpoints (tp);
4e1c45ea
PA
3357}
3358
0cbcdb96
PA
3359/* If the target still has execution, call FUNC for each thread that
3360 just stopped. In all-stop, that's all the non-exited threads; in
3361 non-stop, that's the current thread, only. */
3362
3363typedef void (*for_each_just_stopped_thread_callback_func)
3364 (struct thread_info *tp);
4e1c45ea
PA
3365
3366static void
0cbcdb96 3367for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
4e1c45ea 3368{
0cbcdb96 3369 if (!target_has_execution || ptid_equal (inferior_ptid, null_ptid))
4e1c45ea
PA
3370 return;
3371
fbea99ea 3372 if (target_is_non_stop_p ())
4e1c45ea 3373 {
0cbcdb96
PA
3374 /* If in non-stop mode, only the current thread stopped. */
3375 func (inferior_thread ());
4e1c45ea
PA
3376 }
3377 else
0cbcdb96
PA
3378 {
3379 struct thread_info *tp;
3380
3381 /* In all-stop mode, all threads have stopped. */
3382 ALL_NON_EXITED_THREADS (tp)
3383 {
3384 func (tp);
3385 }
3386 }
3387}
3388
3389/* Delete the step resume and longjmp/exception resume breakpoints of
3390 the threads that just stopped. */
3391
3392static void
3393delete_just_stopped_threads_infrun_breakpoints (void)
3394{
3395 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
34b7e8a6
PA
3396}
3397
3398/* Delete the single-step breakpoints of the threads that just
3399 stopped. */
7c16b83e 3400
34b7e8a6
PA
3401static void
3402delete_just_stopped_threads_single_step_breakpoints (void)
3403{
3404 for_each_just_stopped_thread (delete_single_step_breakpoints);
4e1c45ea
PA
3405}
3406
1777feb0 3407/* A cleanup wrapper. */
4e1c45ea
PA
3408
3409static void
0cbcdb96 3410delete_just_stopped_threads_infrun_breakpoints_cleanup (void *arg)
4e1c45ea 3411{
0cbcdb96 3412 delete_just_stopped_threads_infrun_breakpoints ();
4e1c45ea
PA
3413}
3414
221e1a37 3415/* See infrun.h. */
223698f8 3416
221e1a37 3417void
223698f8
DE
3418print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3419 const struct target_waitstatus *ws)
3420{
3421 char *status_string = target_waitstatus_to_string (ws);
d7e74731 3422 string_file stb;
223698f8
DE
3423
3424 /* The text is split over several lines because it was getting too long.
3425 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3426 output as a unit; we want only one timestamp printed if debug_timestamp
3427 is set. */
3428
d7e74731
PA
3429 stb.printf ("infrun: target_wait (%d.%ld.%ld",
3430 ptid_get_pid (waiton_ptid),
3431 ptid_get_lwp (waiton_ptid),
3432 ptid_get_tid (waiton_ptid));
dfd4cc63 3433 if (ptid_get_pid (waiton_ptid) != -1)
d7e74731
PA
3434 stb.printf (" [%s]", target_pid_to_str (waiton_ptid));
3435 stb.printf (", status) =\n");
3436 stb.printf ("infrun: %d.%ld.%ld [%s],\n",
3437 ptid_get_pid (result_ptid),
3438 ptid_get_lwp (result_ptid),
3439 ptid_get_tid (result_ptid),
3440 target_pid_to_str (result_ptid));
3441 stb.printf ("infrun: %s\n", status_string);
223698f8
DE
3442
3443 /* This uses %s in part to handle %'s in the text, but also to avoid
3444 a gcc error: the format attribute requires a string literal. */
d7e74731 3445 fprintf_unfiltered (gdb_stdlog, "%s", stb.c_str ());
223698f8
DE
3446
3447 xfree (status_string);
223698f8
DE
3448}
3449
372316f1
PA
3450/* Select a thread at random, out of those which are resumed and have
3451 had events. */
3452
3453static struct thread_info *
3454random_pending_event_thread (ptid_t waiton_ptid)
3455{
3456 struct thread_info *event_tp;
3457 int num_events = 0;
3458 int random_selector;
3459
3460 /* First see how many events we have. Count only resumed threads
3461 that have an event pending. */
3462 ALL_NON_EXITED_THREADS (event_tp)
3463 if (ptid_match (event_tp->ptid, waiton_ptid)
3464 && event_tp->resumed
3465 && event_tp->suspend.waitstatus_pending_p)
3466 num_events++;
3467
3468 if (num_events == 0)
3469 return NULL;
3470
3471 /* Now randomly pick a thread out of those that have had events. */
3472 random_selector = (int)
3473 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3474
3475 if (debug_infrun && num_events > 1)
3476 fprintf_unfiltered (gdb_stdlog,
3477 "infrun: Found %d events, selecting #%d\n",
3478 num_events, random_selector);
3479
3480 /* Select the Nth thread that has had an event. */
3481 ALL_NON_EXITED_THREADS (event_tp)
3482 if (ptid_match (event_tp->ptid, waiton_ptid)
3483 && event_tp->resumed
3484 && event_tp->suspend.waitstatus_pending_p)
3485 if (random_selector-- == 0)
3486 break;
3487
3488 return event_tp;
3489}
3490
3491/* Wrapper for target_wait that first checks whether threads have
3492 pending statuses to report before actually asking the target for
3493 more events. */
3494
3495static ptid_t
3496do_target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
3497{
3498 ptid_t event_ptid;
3499 struct thread_info *tp;
3500
3501 /* First check if there is a resumed thread with a wait status
3502 pending. */
3503 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3504 {
3505 tp = random_pending_event_thread (ptid);
3506 }
3507 else
3508 {
3509 if (debug_infrun)
3510 fprintf_unfiltered (gdb_stdlog,
3511 "infrun: Waiting for specific thread %s.\n",
3512 target_pid_to_str (ptid));
3513
3514 /* We have a specific thread to check. */
3515 tp = find_thread_ptid (ptid);
3516 gdb_assert (tp != NULL);
3517 if (!tp->suspend.waitstatus_pending_p)
3518 tp = NULL;
3519 }
3520
3521 if (tp != NULL
3522 && (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3523 || tp->suspend.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
3524 {
3525 struct regcache *regcache = get_thread_regcache (tp->ptid);
3526 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3527 CORE_ADDR pc;
3528 int discard = 0;
3529
3530 pc = regcache_read_pc (regcache);
3531
3532 if (pc != tp->suspend.stop_pc)
3533 {
3534 if (debug_infrun)
3535 fprintf_unfiltered (gdb_stdlog,
3536 "infrun: PC of %s changed. was=%s, now=%s\n",
3537 target_pid_to_str (tp->ptid),
3538 paddress (gdbarch, tp->prev_pc),
3539 paddress (gdbarch, pc));
3540 discard = 1;
3541 }
3542 else if (!breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3543 {
3544 if (debug_infrun)
3545 fprintf_unfiltered (gdb_stdlog,
3546 "infrun: previous breakpoint of %s, at %s gone\n",
3547 target_pid_to_str (tp->ptid),
3548 paddress (gdbarch, pc));
3549
3550 discard = 1;
3551 }
3552
3553 if (discard)
3554 {
3555 if (debug_infrun)
3556 fprintf_unfiltered (gdb_stdlog,
3557 "infrun: pending event of %s cancelled.\n",
3558 target_pid_to_str (tp->ptid));
3559
3560 tp->suspend.waitstatus.kind = TARGET_WAITKIND_SPURIOUS;
3561 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3562 }
3563 }
3564
3565 if (tp != NULL)
3566 {
3567 if (debug_infrun)
3568 {
3569 char *statstr;
3570
3571 statstr = target_waitstatus_to_string (&tp->suspend.waitstatus);
3572 fprintf_unfiltered (gdb_stdlog,
3573 "infrun: Using pending wait status %s for %s.\n",
3574 statstr,
3575 target_pid_to_str (tp->ptid));
3576 xfree (statstr);
3577 }
3578
3579 /* Now that we've selected our final event LWP, un-adjust its PC
3580 if it was a software breakpoint (and the target doesn't
3581 always adjust the PC itself). */
3582 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3583 && !target_supports_stopped_by_sw_breakpoint ())
3584 {
3585 struct regcache *regcache;
3586 struct gdbarch *gdbarch;
3587 int decr_pc;
3588
3589 regcache = get_thread_regcache (tp->ptid);
3590 gdbarch = get_regcache_arch (regcache);
3591
3592 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3593 if (decr_pc != 0)
3594 {
3595 CORE_ADDR pc;
3596
3597 pc = regcache_read_pc (regcache);
3598 regcache_write_pc (regcache, pc + decr_pc);
3599 }
3600 }
3601
3602 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3603 *status = tp->suspend.waitstatus;
3604 tp->suspend.waitstatus_pending_p = 0;
3605
3606 /* Wake up the event loop again, until all pending events are
3607 processed. */
3608 if (target_is_async_p ())
3609 mark_async_event_handler (infrun_async_inferior_event_token);
3610 return tp->ptid;
3611 }
3612
3613 /* But if we don't find one, we'll have to wait. */
3614
3615 if (deprecated_target_wait_hook)
3616 event_ptid = deprecated_target_wait_hook (ptid, status, options);
3617 else
3618 event_ptid = target_wait (ptid, status, options);
3619
3620 return event_ptid;
3621}
3622
24291992
PA
3623/* Prepare and stabilize the inferior for detaching it. E.g.,
3624 detaching while a thread is displaced stepping is a recipe for
3625 crashing it, as nothing would readjust the PC out of the scratch
3626 pad. */
3627
3628void
3629prepare_for_detach (void)
3630{
3631 struct inferior *inf = current_inferior ();
3632 ptid_t pid_ptid = pid_to_ptid (inf->pid);
24291992
PA
3633 struct displaced_step_inferior_state *displaced;
3634
3635 displaced = get_displaced_stepping_state (inf->pid);
3636
3637 /* Is any thread of this process displaced stepping? If not,
3638 there's nothing else to do. */
3639 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
3640 return;
3641
3642 if (debug_infrun)
3643 fprintf_unfiltered (gdb_stdlog,
3644 "displaced-stepping in-process while detaching");
3645
9bcb1f16 3646 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
24291992
PA
3647
3648 while (!ptid_equal (displaced->step_ptid, null_ptid))
3649 {
3650 struct cleanup *old_chain_2;
3651 struct execution_control_state ecss;
3652 struct execution_control_state *ecs;
3653
3654 ecs = &ecss;
3655 memset (ecs, 0, sizeof (*ecs));
3656
3657 overlay_cache_invalid = 1;
f15cb84a
YQ
3658 /* Flush target cache before starting to handle each event.
3659 Target was running and cache could be stale. This is just a
3660 heuristic. Running threads may modify target memory, but we
3661 don't get any event. */
3662 target_dcache_invalidate ();
24291992 3663
372316f1 3664 ecs->ptid = do_target_wait (pid_ptid, &ecs->ws, 0);
24291992
PA
3665
3666 if (debug_infrun)
3667 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3668
3669 /* If an error happens while handling the event, propagate GDB's
3670 knowledge of the executing state to the frontend/user running
3671 state. */
3e43a32a
MS
3672 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
3673 &minus_one_ptid);
24291992
PA
3674
3675 /* Now figure out what to do with the result of the result. */
3676 handle_inferior_event (ecs);
3677
3678 /* No error, don't finish the state yet. */
3679 discard_cleanups (old_chain_2);
3680
3681 /* Breakpoints and watchpoints are not installed on the target
3682 at this point, and signals are passed directly to the
3683 inferior, so this must mean the process is gone. */
3684 if (!ecs->wait_some_more)
3685 {
9bcb1f16 3686 restore_detaching.release ();
24291992
PA
3687 error (_("Program exited while detaching"));
3688 }
3689 }
3690
9bcb1f16 3691 restore_detaching.release ();
24291992
PA
3692}
3693
cd0fc7c3 3694/* Wait for control to return from inferior to debugger.
ae123ec6 3695
cd0fc7c3
SS
3696 If inferior gets a signal, we may decide to start it up again
3697 instead of returning. That is why there is a loop in this function.
3698 When this function actually returns it means the inferior
3699 should be left stopped and GDB should read more commands. */
3700
3701void
e4c8541f 3702wait_for_inferior (void)
cd0fc7c3
SS
3703{
3704 struct cleanup *old_cleanups;
e6f5c25b 3705 struct cleanup *thread_state_chain;
c906108c 3706
527159b7 3707 if (debug_infrun)
ae123ec6 3708 fprintf_unfiltered
e4c8541f 3709 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
527159b7 3710
0cbcdb96
PA
3711 old_cleanups
3712 = make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup,
3713 NULL);
cd0fc7c3 3714
e6f5c25b
PA
3715 /* If an error happens while handling the event, propagate GDB's
3716 knowledge of the executing state to the frontend/user running
3717 state. */
3718 thread_state_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3719
c906108c
SS
3720 while (1)
3721 {
ae25568b
PA
3722 struct execution_control_state ecss;
3723 struct execution_control_state *ecs = &ecss;
963f9c80 3724 ptid_t waiton_ptid = minus_one_ptid;
29f49a6a 3725
ae25568b
PA
3726 memset (ecs, 0, sizeof (*ecs));
3727
ec9499be 3728 overlay_cache_invalid = 1;
ec9499be 3729
f15cb84a
YQ
3730 /* Flush target cache before starting to handle each event.
3731 Target was running and cache could be stale. This is just a
3732 heuristic. Running threads may modify target memory, but we
3733 don't get any event. */
3734 target_dcache_invalidate ();
3735
372316f1 3736 ecs->ptid = do_target_wait (waiton_ptid, &ecs->ws, 0);
c906108c 3737
f00150c9 3738 if (debug_infrun)
223698f8 3739 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
f00150c9 3740
cd0fc7c3
SS
3741 /* Now figure out what to do with the result of the result. */
3742 handle_inferior_event (ecs);
c906108c 3743
cd0fc7c3
SS
3744 if (!ecs->wait_some_more)
3745 break;
3746 }
4e1c45ea 3747
e6f5c25b
PA
3748 /* No error, don't finish the state yet. */
3749 discard_cleanups (thread_state_chain);
3750
cd0fc7c3
SS
3751 do_cleanups (old_cleanups);
3752}
c906108c 3753
d3d4baed
PA
3754/* Cleanup that reinstalls the readline callback handler, if the
3755 target is running in the background. If while handling the target
3756 event something triggered a secondary prompt, like e.g., a
3757 pagination prompt, we'll have removed the callback handler (see
3758 gdb_readline_wrapper_line). Need to do this as we go back to the
3759 event loop, ready to process further input. Note this has no
3760 effect if the handler hasn't actually been removed, because calling
3761 rl_callback_handler_install resets the line buffer, thus losing
3762 input. */
3763
3764static void
3765reinstall_readline_callback_handler_cleanup (void *arg)
3766{
3b12939d
PA
3767 struct ui *ui = current_ui;
3768
3769 if (!ui->async)
6c400b59
PA
3770 {
3771 /* We're not going back to the top level event loop yet. Don't
3772 install the readline callback, as it'd prep the terminal,
3773 readline-style (raw, noecho) (e.g., --batch). We'll install
3774 it the next time the prompt is displayed, when we're ready
3775 for input. */
3776 return;
3777 }
3778
3b12939d 3779 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
d3d4baed
PA
3780 gdb_rl_callback_handler_reinstall ();
3781}
3782
243a9253
PA
3783/* Clean up the FSMs of threads that are now stopped. In non-stop,
3784 that's just the event thread. In all-stop, that's all threads. */
3785
3786static void
3787clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
3788{
3789 struct thread_info *thr = ecs->event_thread;
3790
3791 if (thr != NULL && thr->thread_fsm != NULL)
8980e177 3792 thread_fsm_clean_up (thr->thread_fsm, thr);
243a9253
PA
3793
3794 if (!non_stop)
3795 {
3796 ALL_NON_EXITED_THREADS (thr)
3797 {
3798 if (thr->thread_fsm == NULL)
3799 continue;
3800 if (thr == ecs->event_thread)
3801 continue;
3802
3803 switch_to_thread (thr->ptid);
8980e177 3804 thread_fsm_clean_up (thr->thread_fsm, thr);
243a9253
PA
3805 }
3806
3807 if (ecs->event_thread != NULL)
3808 switch_to_thread (ecs->event_thread->ptid);
3809 }
3810}
3811
3b12939d
PA
3812/* Helper for all_uis_check_sync_execution_done that works on the
3813 current UI. */
3814
3815static void
3816check_curr_ui_sync_execution_done (void)
3817{
3818 struct ui *ui = current_ui;
3819
3820 if (ui->prompt_state == PROMPT_NEEDED
3821 && ui->async
3822 && !gdb_in_secondary_prompt_p (ui))
3823 {
3824 target_terminal_ours ();
3825 observer_notify_sync_execution_done ();
3eb7562a 3826 ui_register_input_event_handler (ui);
3b12939d
PA
3827 }
3828}
3829
3830/* See infrun.h. */
3831
3832void
3833all_uis_check_sync_execution_done (void)
3834{
0e454242 3835 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
3836 {
3837 check_curr_ui_sync_execution_done ();
3838 }
3839}
3840
a8836c93
PA
3841/* See infrun.h. */
3842
3843void
3844all_uis_on_sync_execution_starting (void)
3845{
0e454242 3846 SWITCH_THRU_ALL_UIS ()
a8836c93
PA
3847 {
3848 if (current_ui->prompt_state == PROMPT_NEEDED)
3849 async_disable_stdin ();
3850 }
3851}
3852
1777feb0 3853/* Asynchronous version of wait_for_inferior. It is called by the
43ff13b4 3854 event loop whenever a change of state is detected on the file
1777feb0
MS
3855 descriptor corresponding to the target. It can be called more than
3856 once to complete a single execution command. In such cases we need
3857 to keep the state in a global variable ECSS. If it is the last time
a474d7c2
PA
3858 that this function is called for a single execution command, then
3859 report to the user that the inferior has stopped, and do the
1777feb0 3860 necessary cleanups. */
43ff13b4
JM
3861
3862void
fba45db2 3863fetch_inferior_event (void *client_data)
43ff13b4 3864{
0d1e5fa7 3865 struct execution_control_state ecss;
a474d7c2 3866 struct execution_control_state *ecs = &ecss;
4f8d22e3 3867 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
29f49a6a 3868 struct cleanup *ts_old_chain;
0f641c01 3869 int cmd_done = 0;
963f9c80 3870 ptid_t waiton_ptid = minus_one_ptid;
43ff13b4 3871
0d1e5fa7
PA
3872 memset (ecs, 0, sizeof (*ecs));
3873
c61db772
PA
3874 /* Events are always processed with the main UI as current UI. This
3875 way, warnings, debug output, etc. are always consistently sent to
3876 the main console. */
4b6749b9 3877 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
c61db772 3878
d3d4baed
PA
3879 /* End up with readline processing input, if necessary. */
3880 make_cleanup (reinstall_readline_callback_handler_cleanup, NULL);
3881
c5187ac6
PA
3882 /* We're handling a live event, so make sure we're doing live
3883 debugging. If we're looking at traceframes while the target is
3884 running, we're going to need to get back to that mode after
3885 handling the event. */
3886 if (non_stop)
3887 {
3888 make_cleanup_restore_current_traceframe ();
e6e4e701 3889 set_current_traceframe (-1);
c5187ac6
PA
3890 }
3891
4f8d22e3
PA
3892 if (non_stop)
3893 /* In non-stop mode, the user/frontend should not notice a thread
3894 switch due to internal events. Make sure we reverse to the
3895 user selected thread and frame after handling the event and
3896 running any breakpoint commands. */
3897 make_cleanup_restore_current_thread ();
3898
ec9499be 3899 overlay_cache_invalid = 1;
f15cb84a
YQ
3900 /* Flush target cache before starting to handle each event. Target
3901 was running and cache could be stale. This is just a heuristic.
3902 Running threads may modify target memory, but we don't get any
3903 event. */
3904 target_dcache_invalidate ();
3dd5b83d 3905
b7b633e9
TT
3906 scoped_restore save_exec_dir
3907 = make_scoped_restore (&execution_direction, target_execution_direction ());
32231432 3908
0b333c5e
PA
3909 ecs->ptid = do_target_wait (waiton_ptid, &ecs->ws,
3910 target_can_async_p () ? TARGET_WNOHANG : 0);
43ff13b4 3911
f00150c9 3912 if (debug_infrun)
223698f8 3913 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
f00150c9 3914
29f49a6a
PA
3915 /* If an error happens while handling the event, propagate GDB's
3916 knowledge of the executing state to the frontend/user running
3917 state. */
fbea99ea 3918 if (!target_is_non_stop_p ())
29f49a6a
PA
3919 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3920 else
3921 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
3922
353d1d73
JK
3923 /* Get executed before make_cleanup_restore_current_thread above to apply
3924 still for the thread which has thrown the exception. */
3925 make_bpstat_clear_actions_cleanup ();
3926
7c16b83e
PA
3927 make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup, NULL);
3928
43ff13b4 3929 /* Now figure out what to do with the result of the result. */
a474d7c2 3930 handle_inferior_event (ecs);
43ff13b4 3931
a474d7c2 3932 if (!ecs->wait_some_more)
43ff13b4 3933 {
c9657e70 3934 struct inferior *inf = find_inferior_ptid (ecs->ptid);
243a9253
PA
3935 int should_stop = 1;
3936 struct thread_info *thr = ecs->event_thread;
388a7084 3937 int should_notify_stop = 1;
d6b48e9c 3938
0cbcdb96 3939 delete_just_stopped_threads_infrun_breakpoints ();
f107f563 3940
243a9253
PA
3941 if (thr != NULL)
3942 {
3943 struct thread_fsm *thread_fsm = thr->thread_fsm;
3944
3945 if (thread_fsm != NULL)
8980e177 3946 should_stop = thread_fsm_should_stop (thread_fsm, thr);
243a9253
PA
3947 }
3948
3949 if (!should_stop)
3950 {
3951 keep_going (ecs);
3952 }
c2d11a7d 3953 else
0f641c01 3954 {
243a9253
PA
3955 clean_up_just_stopped_threads_fsms (ecs);
3956
388a7084
PA
3957 if (thr != NULL && thr->thread_fsm != NULL)
3958 {
3959 should_notify_stop
3960 = thread_fsm_should_notify_stop (thr->thread_fsm);
3961 }
3962
3963 if (should_notify_stop)
3964 {
4c2f2a79
PA
3965 int proceeded = 0;
3966
388a7084
PA
3967 /* We may not find an inferior if this was a process exit. */
3968 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
4c2f2a79 3969 proceeded = normal_stop ();
243a9253 3970
4c2f2a79
PA
3971 if (!proceeded)
3972 {
3973 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
3974 cmd_done = 1;
3975 }
388a7084 3976 }
0f641c01 3977 }
43ff13b4 3978 }
4f8d22e3 3979
29f49a6a
PA
3980 /* No error, don't finish the thread states yet. */
3981 discard_cleanups (ts_old_chain);
3982
4f8d22e3
PA
3983 /* Revert thread and frame. */
3984 do_cleanups (old_chain);
3985
3b12939d
PA
3986 /* If a UI was in sync execution mode, and now isn't, restore its
3987 prompt (a synchronous execution command has finished, and we're
3988 ready for input). */
3989 all_uis_check_sync_execution_done ();
0f641c01
PA
3990
3991 if (cmd_done
0f641c01
PA
3992 && exec_done_display_p
3993 && (ptid_equal (inferior_ptid, null_ptid)
3994 || !is_running (inferior_ptid)))
3995 printf_unfiltered (_("completed.\n"));
43ff13b4
JM
3996}
3997
edb3359d
DJ
3998/* Record the frame and location we're currently stepping through. */
3999void
4000set_step_info (struct frame_info *frame, struct symtab_and_line sal)
4001{
4002 struct thread_info *tp = inferior_thread ();
4003
16c381f0
JK
4004 tp->control.step_frame_id = get_frame_id (frame);
4005 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
edb3359d
DJ
4006
4007 tp->current_symtab = sal.symtab;
4008 tp->current_line = sal.line;
4009}
4010
0d1e5fa7
PA
4011/* Clear context switchable stepping state. */
4012
4013void
4e1c45ea 4014init_thread_stepping_state (struct thread_info *tss)
0d1e5fa7 4015{
7f5ef605 4016 tss->stepped_breakpoint = 0;
0d1e5fa7 4017 tss->stepping_over_breakpoint = 0;
963f9c80 4018 tss->stepping_over_watchpoint = 0;
0d1e5fa7 4019 tss->step_after_step_resume_breakpoint = 0;
cd0fc7c3
SS
4020}
4021
c32c64b7
DE
4022/* Set the cached copy of the last ptid/waitstatus. */
4023
6efcd9a8 4024void
c32c64b7
DE
4025set_last_target_status (ptid_t ptid, struct target_waitstatus status)
4026{
4027 target_last_wait_ptid = ptid;
4028 target_last_waitstatus = status;
4029}
4030
e02bc4cc 4031/* Return the cached copy of the last pid/waitstatus returned by
9a4105ab
AC
4032 target_wait()/deprecated_target_wait_hook(). The data is actually
4033 cached by handle_inferior_event(), which gets called immediately
4034 after target_wait()/deprecated_target_wait_hook(). */
e02bc4cc
DS
4035
4036void
488f131b 4037get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
e02bc4cc 4038{
39f77062 4039 *ptidp = target_last_wait_ptid;
e02bc4cc
DS
4040 *status = target_last_waitstatus;
4041}
4042
ac264b3b
MS
4043void
4044nullify_last_target_wait_ptid (void)
4045{
4046 target_last_wait_ptid = minus_one_ptid;
4047}
4048
dcf4fbde 4049/* Switch thread contexts. */
dd80620e
MS
4050
4051static void
0d1e5fa7 4052context_switch (ptid_t ptid)
dd80620e 4053{
4b51d87b 4054 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
fd48f117
DJ
4055 {
4056 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
4057 target_pid_to_str (inferior_ptid));
4058 fprintf_unfiltered (gdb_stdlog, "to %s\n",
0d1e5fa7 4059 target_pid_to_str (ptid));
fd48f117
DJ
4060 }
4061
0d1e5fa7 4062 switch_to_thread (ptid);
dd80620e
MS
4063}
4064
d8dd4d5f
PA
4065/* If the target can't tell whether we've hit breakpoints
4066 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4067 check whether that could have been caused by a breakpoint. If so,
4068 adjust the PC, per gdbarch_decr_pc_after_break. */
4069
4fa8626c 4070static void
d8dd4d5f
PA
4071adjust_pc_after_break (struct thread_info *thread,
4072 struct target_waitstatus *ws)
4fa8626c 4073{
24a73cce
UW
4074 struct regcache *regcache;
4075 struct gdbarch *gdbarch;
6c95b8df 4076 struct address_space *aspace;
118e6252 4077 CORE_ADDR breakpoint_pc, decr_pc;
4fa8626c 4078
4fa8626c
DJ
4079 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4080 we aren't, just return.
9709f61c
DJ
4081
4082 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
b798847d
UW
4083 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4084 implemented by software breakpoints should be handled through the normal
4085 breakpoint layer.
8fb3e588 4086
4fa8626c
DJ
4087 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4088 different signals (SIGILL or SIGEMT for instance), but it is less
4089 clear where the PC is pointing afterwards. It may not match
b798847d
UW
4090 gdbarch_decr_pc_after_break. I don't know any specific target that
4091 generates these signals at breakpoints (the code has been in GDB since at
4092 least 1992) so I can not guess how to handle them here.
8fb3e588 4093
e6cf7916
UW
4094 In earlier versions of GDB, a target with
4095 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
b798847d
UW
4096 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4097 target with both of these set in GDB history, and it seems unlikely to be
4098 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4fa8626c 4099
d8dd4d5f 4100 if (ws->kind != TARGET_WAITKIND_STOPPED)
4fa8626c
DJ
4101 return;
4102
d8dd4d5f 4103 if (ws->value.sig != GDB_SIGNAL_TRAP)
4fa8626c
DJ
4104 return;
4105
4058b839
PA
4106 /* In reverse execution, when a breakpoint is hit, the instruction
4107 under it has already been de-executed. The reported PC always
4108 points at the breakpoint address, so adjusting it further would
4109 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4110 architecture:
4111
4112 B1 0x08000000 : INSN1
4113 B2 0x08000001 : INSN2
4114 0x08000002 : INSN3
4115 PC -> 0x08000003 : INSN4
4116
4117 Say you're stopped at 0x08000003 as above. Reverse continuing
4118 from that point should hit B2 as below. Reading the PC when the
4119 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4120 been de-executed already.
4121
4122 B1 0x08000000 : INSN1
4123 B2 PC -> 0x08000001 : INSN2
4124 0x08000002 : INSN3
4125 0x08000003 : INSN4
4126
4127 We can't apply the same logic as for forward execution, because
4128 we would wrongly adjust the PC to 0x08000000, since there's a
4129 breakpoint at PC - 1. We'd then report a hit on B1, although
4130 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4131 behaviour. */
4132 if (execution_direction == EXEC_REVERSE)
4133 return;
4134
1cf4d951
PA
4135 /* If the target can tell whether the thread hit a SW breakpoint,
4136 trust it. Targets that can tell also adjust the PC
4137 themselves. */
4138 if (target_supports_stopped_by_sw_breakpoint ())
4139 return;
4140
4141 /* Note that relying on whether a breakpoint is planted in memory to
4142 determine this can fail. E.g,. the breakpoint could have been
4143 removed since. Or the thread could have been told to step an
4144 instruction the size of a breakpoint instruction, and only
4145 _after_ was a breakpoint inserted at its address. */
4146
24a73cce
UW
4147 /* If this target does not decrement the PC after breakpoints, then
4148 we have nothing to do. */
d8dd4d5f 4149 regcache = get_thread_regcache (thread->ptid);
24a73cce 4150 gdbarch = get_regcache_arch (regcache);
118e6252 4151
527a273a 4152 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
118e6252 4153 if (decr_pc == 0)
24a73cce
UW
4154 return;
4155
6c95b8df
PA
4156 aspace = get_regcache_aspace (regcache);
4157
8aad930b
AC
4158 /* Find the location where (if we've hit a breakpoint) the
4159 breakpoint would be. */
118e6252 4160 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
8aad930b 4161
1cf4d951
PA
4162 /* If the target can't tell whether a software breakpoint triggered,
4163 fallback to figuring it out based on breakpoints we think were
4164 inserted in the target, and on whether the thread was stepped or
4165 continued. */
4166
1c5cfe86
PA
4167 /* Check whether there actually is a software breakpoint inserted at
4168 that location.
4169
4170 If in non-stop mode, a race condition is possible where we've
4171 removed a breakpoint, but stop events for that breakpoint were
4172 already queued and arrive later. To suppress those spurious
4173 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
1cf4d951
PA
4174 and retire them after a number of stop events are reported. Note
4175 this is an heuristic and can thus get confused. The real fix is
4176 to get the "stopped by SW BP and needs adjustment" info out of
4177 the target/kernel (and thus never reach here; see above). */
6c95b8df 4178 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
fbea99ea
PA
4179 || (target_is_non_stop_p ()
4180 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
8aad930b 4181 {
77f9e713 4182 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
abbb1732 4183
8213266a 4184 if (record_full_is_used ())
77f9e713 4185 record_full_gdb_operation_disable_set ();
96429cc8 4186
1c0fdd0e
UW
4187 /* When using hardware single-step, a SIGTRAP is reported for both
4188 a completed single-step and a software breakpoint. Need to
4189 differentiate between the two, as the latter needs adjusting
4190 but the former does not.
4191
4192 The SIGTRAP can be due to a completed hardware single-step only if
4193 - we didn't insert software single-step breakpoints
1c0fdd0e
UW
4194 - this thread is currently being stepped
4195
4196 If any of these events did not occur, we must have stopped due
4197 to hitting a software breakpoint, and have to back up to the
4198 breakpoint address.
4199
4200 As a special case, we could have hardware single-stepped a
4201 software breakpoint. In this case (prev_pc == breakpoint_pc),
4202 we also need to back up to the breakpoint address. */
4203
d8dd4d5f
PA
4204 if (thread_has_single_step_breakpoints_set (thread)
4205 || !currently_stepping (thread)
4206 || (thread->stepped_breakpoint
4207 && thread->prev_pc == breakpoint_pc))
515630c5 4208 regcache_write_pc (regcache, breakpoint_pc);
96429cc8 4209
77f9e713 4210 do_cleanups (old_cleanups);
8aad930b 4211 }
4fa8626c
DJ
4212}
4213
edb3359d
DJ
4214static int
4215stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4216{
4217 for (frame = get_prev_frame (frame);
4218 frame != NULL;
4219 frame = get_prev_frame (frame))
4220 {
4221 if (frame_id_eq (get_frame_id (frame), step_frame_id))
4222 return 1;
4223 if (get_frame_type (frame) != INLINE_FRAME)
4224 break;
4225 }
4226
4227 return 0;
4228}
4229
c65d6b55
PA
4230/* If the event thread has the stop requested flag set, pretend it
4231 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4232 target_stop). */
4233
4234static bool
4235handle_stop_requested (struct execution_control_state *ecs)
4236{
4237 if (ecs->event_thread->stop_requested)
4238 {
4239 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
4240 ecs->ws.value.sig = GDB_SIGNAL_0;
4241 handle_signal_stop (ecs);
4242 return true;
4243 }
4244 return false;
4245}
4246
a96d9b2e
SDJ
4247/* Auxiliary function that handles syscall entry/return events.
4248 It returns 1 if the inferior should keep going (and GDB
4249 should ignore the event), or 0 if the event deserves to be
4250 processed. */
ca2163eb 4251
a96d9b2e 4252static int
ca2163eb 4253handle_syscall_event (struct execution_control_state *ecs)
a96d9b2e 4254{
ca2163eb 4255 struct regcache *regcache;
ca2163eb
PA
4256 int syscall_number;
4257
4258 if (!ptid_equal (ecs->ptid, inferior_ptid))
4259 context_switch (ecs->ptid);
4260
4261 regcache = get_thread_regcache (ecs->ptid);
f90263c1 4262 syscall_number = ecs->ws.value.syscall_number;
ca2163eb
PA
4263 stop_pc = regcache_read_pc (regcache);
4264
a96d9b2e
SDJ
4265 if (catch_syscall_enabled () > 0
4266 && catching_syscall_number (syscall_number) > 0)
4267 {
4268 if (debug_infrun)
4269 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
4270 syscall_number);
a96d9b2e 4271
16c381f0 4272 ecs->event_thread->control.stop_bpstat
6c95b8df 4273 = bpstat_stop_status (get_regcache_aspace (regcache),
09ac7c10 4274 stop_pc, ecs->ptid, &ecs->ws);
ab04a2af 4275
c65d6b55
PA
4276 if (handle_stop_requested (ecs))
4277 return 0;
4278
ce12b012 4279 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
ca2163eb
PA
4280 {
4281 /* Catchpoint hit. */
ca2163eb
PA
4282 return 0;
4283 }
a96d9b2e 4284 }
ca2163eb 4285
c65d6b55
PA
4286 if (handle_stop_requested (ecs))
4287 return 0;
4288
ca2163eb 4289 /* If no catchpoint triggered for this, then keep going. */
ca2163eb
PA
4290 keep_going (ecs);
4291 return 1;
a96d9b2e
SDJ
4292}
4293
7e324e48
GB
4294/* Lazily fill in the execution_control_state's stop_func_* fields. */
4295
4296static void
4297fill_in_stop_func (struct gdbarch *gdbarch,
4298 struct execution_control_state *ecs)
4299{
4300 if (!ecs->stop_func_filled_in)
4301 {
4302 /* Don't care about return value; stop_func_start and stop_func_name
4303 will both be 0 if it doesn't work. */
4304 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
4305 &ecs->stop_func_start, &ecs->stop_func_end);
4306 ecs->stop_func_start
4307 += gdbarch_deprecated_function_start_offset (gdbarch);
4308
591a12a1
UW
4309 if (gdbarch_skip_entrypoint_p (gdbarch))
4310 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
4311 ecs->stop_func_start);
4312
7e324e48
GB
4313 ecs->stop_func_filled_in = 1;
4314 }
4315}
4316
4f5d7f63
PA
4317
4318/* Return the STOP_SOON field of the inferior pointed at by PTID. */
4319
4320static enum stop_kind
4321get_inferior_stop_soon (ptid_t ptid)
4322{
c9657e70 4323 struct inferior *inf = find_inferior_ptid (ptid);
4f5d7f63
PA
4324
4325 gdb_assert (inf != NULL);
4326 return inf->control.stop_soon;
4327}
4328
372316f1
PA
4329/* Wait for one event. Store the resulting waitstatus in WS, and
4330 return the event ptid. */
4331
4332static ptid_t
4333wait_one (struct target_waitstatus *ws)
4334{
4335 ptid_t event_ptid;
4336 ptid_t wait_ptid = minus_one_ptid;
4337
4338 overlay_cache_invalid = 1;
4339
4340 /* Flush target cache before starting to handle each event.
4341 Target was running and cache could be stale. This is just a
4342 heuristic. Running threads may modify target memory, but we
4343 don't get any event. */
4344 target_dcache_invalidate ();
4345
4346 if (deprecated_target_wait_hook)
4347 event_ptid = deprecated_target_wait_hook (wait_ptid, ws, 0);
4348 else
4349 event_ptid = target_wait (wait_ptid, ws, 0);
4350
4351 if (debug_infrun)
4352 print_target_wait_results (wait_ptid, event_ptid, ws);
4353
4354 return event_ptid;
4355}
4356
4357/* Generate a wrapper for target_stopped_by_REASON that works on PTID
4358 instead of the current thread. */
4359#define THREAD_STOPPED_BY(REASON) \
4360static int \
4361thread_stopped_by_ ## REASON (ptid_t ptid) \
4362{ \
4363 struct cleanup *old_chain; \
4364 int res; \
4365 \
4366 old_chain = save_inferior_ptid (); \
4367 inferior_ptid = ptid; \
4368 \
4369 res = target_stopped_by_ ## REASON (); \
4370 \
4371 do_cleanups (old_chain); \
4372 \
4373 return res; \
4374}
4375
4376/* Generate thread_stopped_by_watchpoint. */
4377THREAD_STOPPED_BY (watchpoint)
4378/* Generate thread_stopped_by_sw_breakpoint. */
4379THREAD_STOPPED_BY (sw_breakpoint)
4380/* Generate thread_stopped_by_hw_breakpoint. */
4381THREAD_STOPPED_BY (hw_breakpoint)
4382
4383/* Cleanups that switches to the PTID pointed at by PTID_P. */
4384
4385static void
4386switch_to_thread_cleanup (void *ptid_p)
4387{
4388 ptid_t ptid = *(ptid_t *) ptid_p;
4389
4390 switch_to_thread (ptid);
4391}
4392
4393/* Save the thread's event and stop reason to process it later. */
4394
4395static void
4396save_waitstatus (struct thread_info *tp, struct target_waitstatus *ws)
4397{
4398 struct regcache *regcache;
4399 struct address_space *aspace;
4400
4401 if (debug_infrun)
4402 {
4403 char *statstr;
4404
4405 statstr = target_waitstatus_to_string (ws);
4406 fprintf_unfiltered (gdb_stdlog,
4407 "infrun: saving status %s for %d.%ld.%ld\n",
4408 statstr,
4409 ptid_get_pid (tp->ptid),
4410 ptid_get_lwp (tp->ptid),
4411 ptid_get_tid (tp->ptid));
4412 xfree (statstr);
4413 }
4414
4415 /* Record for later. */
4416 tp->suspend.waitstatus = *ws;
4417 tp->suspend.waitstatus_pending_p = 1;
4418
4419 regcache = get_thread_regcache (tp->ptid);
4420 aspace = get_regcache_aspace (regcache);
4421
4422 if (ws->kind == TARGET_WAITKIND_STOPPED
4423 && ws->value.sig == GDB_SIGNAL_TRAP)
4424 {
4425 CORE_ADDR pc = regcache_read_pc (regcache);
4426
4427 adjust_pc_after_break (tp, &tp->suspend.waitstatus);
4428
4429 if (thread_stopped_by_watchpoint (tp->ptid))
4430 {
4431 tp->suspend.stop_reason
4432 = TARGET_STOPPED_BY_WATCHPOINT;
4433 }
4434 else if (target_supports_stopped_by_sw_breakpoint ()
4435 && thread_stopped_by_sw_breakpoint (tp->ptid))
4436 {
4437 tp->suspend.stop_reason
4438 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4439 }
4440 else if (target_supports_stopped_by_hw_breakpoint ()
4441 && thread_stopped_by_hw_breakpoint (tp->ptid))
4442 {
4443 tp->suspend.stop_reason
4444 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4445 }
4446 else if (!target_supports_stopped_by_hw_breakpoint ()
4447 && hardware_breakpoint_inserted_here_p (aspace,
4448 pc))
4449 {
4450 tp->suspend.stop_reason
4451 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4452 }
4453 else if (!target_supports_stopped_by_sw_breakpoint ()
4454 && software_breakpoint_inserted_here_p (aspace,
4455 pc))
4456 {
4457 tp->suspend.stop_reason
4458 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4459 }
4460 else if (!thread_has_single_step_breakpoints_set (tp)
4461 && currently_stepping (tp))
4462 {
4463 tp->suspend.stop_reason
4464 = TARGET_STOPPED_BY_SINGLE_STEP;
4465 }
4466 }
4467}
4468
65706a29
PA
4469/* A cleanup that disables thread create/exit events. */
4470
4471static void
4472disable_thread_events (void *arg)
4473{
4474 target_thread_events (0);
4475}
4476
6efcd9a8 4477/* See infrun.h. */
372316f1 4478
6efcd9a8 4479void
372316f1
PA
4480stop_all_threads (void)
4481{
4482 /* We may need multiple passes to discover all threads. */
4483 int pass;
4484 int iterations = 0;
4485 ptid_t entry_ptid;
4486 struct cleanup *old_chain;
4487
fbea99ea 4488 gdb_assert (target_is_non_stop_p ());
372316f1
PA
4489
4490 if (debug_infrun)
4491 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads\n");
4492
4493 entry_ptid = inferior_ptid;
4494 old_chain = make_cleanup (switch_to_thread_cleanup, &entry_ptid);
4495
65706a29
PA
4496 target_thread_events (1);
4497 make_cleanup (disable_thread_events, NULL);
4498
372316f1
PA
4499 /* Request threads to stop, and then wait for the stops. Because
4500 threads we already know about can spawn more threads while we're
4501 trying to stop them, and we only learn about new threads when we
4502 update the thread list, do this in a loop, and keep iterating
4503 until two passes find no threads that need to be stopped. */
4504 for (pass = 0; pass < 2; pass++, iterations++)
4505 {
4506 if (debug_infrun)
4507 fprintf_unfiltered (gdb_stdlog,
4508 "infrun: stop_all_threads, pass=%d, "
4509 "iterations=%d\n", pass, iterations);
4510 while (1)
4511 {
4512 ptid_t event_ptid;
4513 struct target_waitstatus ws;
4514 int need_wait = 0;
4515 struct thread_info *t;
4516
4517 update_thread_list ();
4518
4519 /* Go through all threads looking for threads that we need
4520 to tell the target to stop. */
4521 ALL_NON_EXITED_THREADS (t)
4522 {
4523 if (t->executing)
4524 {
4525 /* If already stopping, don't request a stop again.
4526 We just haven't seen the notification yet. */
4527 if (!t->stop_requested)
4528 {
4529 if (debug_infrun)
4530 fprintf_unfiltered (gdb_stdlog,
4531 "infrun: %s executing, "
4532 "need stop\n",
4533 target_pid_to_str (t->ptid));
4534 target_stop (t->ptid);
4535 t->stop_requested = 1;
4536 }
4537 else
4538 {
4539 if (debug_infrun)
4540 fprintf_unfiltered (gdb_stdlog,
4541 "infrun: %s executing, "
4542 "already stopping\n",
4543 target_pid_to_str (t->ptid));
4544 }
4545
4546 if (t->stop_requested)
4547 need_wait = 1;
4548 }
4549 else
4550 {
4551 if (debug_infrun)
4552 fprintf_unfiltered (gdb_stdlog,
4553 "infrun: %s not executing\n",
4554 target_pid_to_str (t->ptid));
4555
4556 /* The thread may be not executing, but still be
4557 resumed with a pending status to process. */
4558 t->resumed = 0;
4559 }
4560 }
4561
4562 if (!need_wait)
4563 break;
4564
4565 /* If we find new threads on the second iteration, restart
4566 over. We want to see two iterations in a row with all
4567 threads stopped. */
4568 if (pass > 0)
4569 pass = -1;
4570
4571 event_ptid = wait_one (&ws);
4572 if (ws.kind == TARGET_WAITKIND_NO_RESUMED)
4573 {
4574 /* All resumed threads exited. */
4575 }
65706a29
PA
4576 else if (ws.kind == TARGET_WAITKIND_THREAD_EXITED
4577 || ws.kind == TARGET_WAITKIND_EXITED
372316f1
PA
4578 || ws.kind == TARGET_WAITKIND_SIGNALLED)
4579 {
4580 if (debug_infrun)
4581 {
4582 ptid_t ptid = pid_to_ptid (ws.value.integer);
4583
4584 fprintf_unfiltered (gdb_stdlog,
4585 "infrun: %s exited while "
4586 "stopping threads\n",
4587 target_pid_to_str (ptid));
4588 }
4589 }
4590 else
4591 {
6efcd9a8
PA
4592 struct inferior *inf;
4593
372316f1
PA
4594 t = find_thread_ptid (event_ptid);
4595 if (t == NULL)
4596 t = add_thread (event_ptid);
4597
4598 t->stop_requested = 0;
4599 t->executing = 0;
4600 t->resumed = 0;
4601 t->control.may_range_step = 0;
4602
6efcd9a8
PA
4603 /* This may be the first time we see the inferior report
4604 a stop. */
4605 inf = find_inferior_ptid (event_ptid);
4606 if (inf->needs_setup)
4607 {
4608 switch_to_thread_no_regs (t);
4609 setup_inferior (0);
4610 }
4611
372316f1
PA
4612 if (ws.kind == TARGET_WAITKIND_STOPPED
4613 && ws.value.sig == GDB_SIGNAL_0)
4614 {
4615 /* We caught the event that we intended to catch, so
4616 there's no event pending. */
4617 t->suspend.waitstatus.kind = TARGET_WAITKIND_IGNORE;
4618 t->suspend.waitstatus_pending_p = 0;
4619
4620 if (displaced_step_fixup (t->ptid, GDB_SIGNAL_0) < 0)
4621 {
4622 /* Add it back to the step-over queue. */
4623 if (debug_infrun)
4624 {
4625 fprintf_unfiltered (gdb_stdlog,
4626 "infrun: displaced-step of %s "
4627 "canceled: adding back to the "
4628 "step-over queue\n",
4629 target_pid_to_str (t->ptid));
4630 }
4631 t->control.trap_expected = 0;
4632 thread_step_over_chain_enqueue (t);
4633 }
4634 }
4635 else
4636 {
4637 enum gdb_signal sig;
4638 struct regcache *regcache;
372316f1
PA
4639
4640 if (debug_infrun)
4641 {
4642 char *statstr;
4643
4644 statstr = target_waitstatus_to_string (&ws);
4645 fprintf_unfiltered (gdb_stdlog,
4646 "infrun: target_wait %s, saving "
4647 "status for %d.%ld.%ld\n",
4648 statstr,
4649 ptid_get_pid (t->ptid),
4650 ptid_get_lwp (t->ptid),
4651 ptid_get_tid (t->ptid));
4652 xfree (statstr);
4653 }
4654
4655 /* Record for later. */
4656 save_waitstatus (t, &ws);
4657
4658 sig = (ws.kind == TARGET_WAITKIND_STOPPED
4659 ? ws.value.sig : GDB_SIGNAL_0);
4660
4661 if (displaced_step_fixup (t->ptid, sig) < 0)
4662 {
4663 /* Add it back to the step-over queue. */
4664 t->control.trap_expected = 0;
4665 thread_step_over_chain_enqueue (t);
4666 }
4667
4668 regcache = get_thread_regcache (t->ptid);
4669 t->suspend.stop_pc = regcache_read_pc (regcache);
4670
4671 if (debug_infrun)
4672 {
4673 fprintf_unfiltered (gdb_stdlog,
4674 "infrun: saved stop_pc=%s for %s "
4675 "(currently_stepping=%d)\n",
4676 paddress (target_gdbarch (),
4677 t->suspend.stop_pc),
4678 target_pid_to_str (t->ptid),
4679 currently_stepping (t));
4680 }
4681 }
4682 }
4683 }
4684 }
4685
4686 do_cleanups (old_chain);
4687
4688 if (debug_infrun)
4689 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads done\n");
4690}
4691
f4836ba9
PA
4692/* Handle a TARGET_WAITKIND_NO_RESUMED event. */
4693
4694static int
4695handle_no_resumed (struct execution_control_state *ecs)
4696{
4697 struct inferior *inf;
4698 struct thread_info *thread;
4699
3b12939d 4700 if (target_can_async_p ())
f4836ba9 4701 {
3b12939d
PA
4702 struct ui *ui;
4703 int any_sync = 0;
f4836ba9 4704
3b12939d
PA
4705 ALL_UIS (ui)
4706 {
4707 if (ui->prompt_state == PROMPT_BLOCKED)
4708 {
4709 any_sync = 1;
4710 break;
4711 }
4712 }
4713 if (!any_sync)
4714 {
4715 /* There were no unwaited-for children left in the target, but,
4716 we're not synchronously waiting for events either. Just
4717 ignore. */
4718
4719 if (debug_infrun)
4720 fprintf_unfiltered (gdb_stdlog,
4721 "infrun: TARGET_WAITKIND_NO_RESUMED "
4722 "(ignoring: bg)\n");
4723 prepare_to_wait (ecs);
4724 return 1;
4725 }
f4836ba9
PA
4726 }
4727
4728 /* Otherwise, if we were running a synchronous execution command, we
4729 may need to cancel it and give the user back the terminal.
4730
4731 In non-stop mode, the target can't tell whether we've already
4732 consumed previous stop events, so it can end up sending us a
4733 no-resumed event like so:
4734
4735 #0 - thread 1 is left stopped
4736
4737 #1 - thread 2 is resumed and hits breakpoint
4738 -> TARGET_WAITKIND_STOPPED
4739
4740 #2 - thread 3 is resumed and exits
4741 this is the last resumed thread, so
4742 -> TARGET_WAITKIND_NO_RESUMED
4743
4744 #3 - gdb processes stop for thread 2 and decides to re-resume
4745 it.
4746
4747 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
4748 thread 2 is now resumed, so the event should be ignored.
4749
4750 IOW, if the stop for thread 2 doesn't end a foreground command,
4751 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
4752 event. But it could be that the event meant that thread 2 itself
4753 (or whatever other thread was the last resumed thread) exited.
4754
4755 To address this we refresh the thread list and check whether we
4756 have resumed threads _now_. In the example above, this removes
4757 thread 3 from the thread list. If thread 2 was re-resumed, we
4758 ignore this event. If we find no thread resumed, then we cancel
4759 the synchronous command show "no unwaited-for " to the user. */
4760 update_thread_list ();
4761
4762 ALL_NON_EXITED_THREADS (thread)
4763 {
4764 if (thread->executing
4765 || thread->suspend.waitstatus_pending_p)
4766 {
4767 /* There were no unwaited-for children left in the target at
4768 some point, but there are now. Just ignore. */
4769 if (debug_infrun)
4770 fprintf_unfiltered (gdb_stdlog,
4771 "infrun: TARGET_WAITKIND_NO_RESUMED "
4772 "(ignoring: found resumed)\n");
4773 prepare_to_wait (ecs);
4774 return 1;
4775 }
4776 }
4777
4778 /* Note however that we may find no resumed thread because the whole
4779 process exited meanwhile (thus updating the thread list results
4780 in an empty thread list). In this case we know we'll be getting
4781 a process exit event shortly. */
4782 ALL_INFERIORS (inf)
4783 {
4784 if (inf->pid == 0)
4785 continue;
4786
4787 thread = any_live_thread_of_process (inf->pid);
4788 if (thread == NULL)
4789 {
4790 if (debug_infrun)
4791 fprintf_unfiltered (gdb_stdlog,
4792 "infrun: TARGET_WAITKIND_NO_RESUMED "
4793 "(expect process exit)\n");
4794 prepare_to_wait (ecs);
4795 return 1;
4796 }
4797 }
4798
4799 /* Go ahead and report the event. */
4800 return 0;
4801}
4802
05ba8510
PA
4803/* Given an execution control state that has been freshly filled in by
4804 an event from the inferior, figure out what it means and take
4805 appropriate action.
4806
4807 The alternatives are:
4808
22bcd14b 4809 1) stop_waiting and return; to really stop and return to the
05ba8510
PA
4810 debugger.
4811
4812 2) keep_going and return; to wait for the next event (set
4813 ecs->event_thread->stepping_over_breakpoint to 1 to single step
4814 once). */
c906108c 4815
ec9499be 4816static void
0b6e5e10 4817handle_inferior_event_1 (struct execution_control_state *ecs)
cd0fc7c3 4818{
d6b48e9c
PA
4819 enum stop_kind stop_soon;
4820
28736962
PA
4821 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
4822 {
4823 /* We had an event in the inferior, but we are not interested in
4824 handling it at this level. The lower layers have already
4825 done what needs to be done, if anything.
4826
4827 One of the possible circumstances for this is when the
4828 inferior produces output for the console. The inferior has
4829 not stopped, and we are ignoring the event. Another possible
4830 circumstance is any event which the lower level knows will be
4831 reported multiple times without an intervening resume. */
4832 if (debug_infrun)
4833 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
4834 prepare_to_wait (ecs);
4835 return;
4836 }
4837
65706a29
PA
4838 if (ecs->ws.kind == TARGET_WAITKIND_THREAD_EXITED)
4839 {
4840 if (debug_infrun)
4841 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_THREAD_EXITED\n");
4842 prepare_to_wait (ecs);
4843 return;
4844 }
4845
0e5bf2a8 4846 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
f4836ba9
PA
4847 && handle_no_resumed (ecs))
4848 return;
0e5bf2a8 4849
1777feb0 4850 /* Cache the last pid/waitstatus. */
c32c64b7 4851 set_last_target_status (ecs->ptid, ecs->ws);
e02bc4cc 4852
ca005067 4853 /* Always clear state belonging to the previous time we stopped. */
aa7d318d 4854 stop_stack_dummy = STOP_NONE;
ca005067 4855
0e5bf2a8
PA
4856 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
4857 {
4858 /* No unwaited-for children left. IOW, all resumed children
4859 have exited. */
4860 if (debug_infrun)
4861 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
4862
4863 stop_print_frame = 0;
22bcd14b 4864 stop_waiting (ecs);
0e5bf2a8
PA
4865 return;
4866 }
4867
8c90c137 4868 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
64776a0b 4869 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
359f5fe6
PA
4870 {
4871 ecs->event_thread = find_thread_ptid (ecs->ptid);
4872 /* If it's a new thread, add it to the thread database. */
4873 if (ecs->event_thread == NULL)
4874 ecs->event_thread = add_thread (ecs->ptid);
c1e36e3e
PA
4875
4876 /* Disable range stepping. If the next step request could use a
4877 range, this will be end up re-enabled then. */
4878 ecs->event_thread->control.may_range_step = 0;
359f5fe6 4879 }
88ed393a
JK
4880
4881 /* Dependent on valid ECS->EVENT_THREAD. */
d8dd4d5f 4882 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
88ed393a
JK
4883
4884 /* Dependent on the current PC value modified by adjust_pc_after_break. */
4885 reinit_frame_cache ();
4886
28736962
PA
4887 breakpoint_retire_moribund ();
4888
2b009048
DJ
4889 /* First, distinguish signals caused by the debugger from signals
4890 that have to do with the program's own actions. Note that
4891 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
4892 on the operating system version. Here we detect when a SIGILL or
4893 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
4894 something similar for SIGSEGV, since a SIGSEGV will be generated
4895 when we're trying to execute a breakpoint instruction on a
4896 non-executable stack. This happens for call dummy breakpoints
4897 for architectures like SPARC that place call dummies on the
4898 stack. */
2b009048 4899 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
a493e3e2
PA
4900 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
4901 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
4902 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
2b009048 4903 {
de0a0249
UW
4904 struct regcache *regcache = get_thread_regcache (ecs->ptid);
4905
4906 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
4907 regcache_read_pc (regcache)))
4908 {
4909 if (debug_infrun)
4910 fprintf_unfiltered (gdb_stdlog,
4911 "infrun: Treating signal as SIGTRAP\n");
a493e3e2 4912 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
de0a0249 4913 }
2b009048
DJ
4914 }
4915
28736962
PA
4916 /* Mark the non-executing threads accordingly. In all-stop, all
4917 threads of all processes are stopped when we get any event
e1316e60 4918 reported. In non-stop mode, only the event thread stops. */
372316f1
PA
4919 {
4920 ptid_t mark_ptid;
4921
fbea99ea 4922 if (!target_is_non_stop_p ())
372316f1
PA
4923 mark_ptid = minus_one_ptid;
4924 else if (ecs->ws.kind == TARGET_WAITKIND_SIGNALLED
4925 || ecs->ws.kind == TARGET_WAITKIND_EXITED)
4926 {
4927 /* If we're handling a process exit in non-stop mode, even
4928 though threads haven't been deleted yet, one would think
4929 that there is nothing to do, as threads of the dead process
4930 will be soon deleted, and threads of any other process were
4931 left running. However, on some targets, threads survive a
4932 process exit event. E.g., for the "checkpoint" command,
4933 when the current checkpoint/fork exits, linux-fork.c
4934 automatically switches to another fork from within
4935 target_mourn_inferior, by associating the same
4936 inferior/thread to another fork. We haven't mourned yet at
4937 this point, but we must mark any threads left in the
4938 process as not-executing so that finish_thread_state marks
4939 them stopped (in the user's perspective) if/when we present
4940 the stop to the user. */
4941 mark_ptid = pid_to_ptid (ptid_get_pid (ecs->ptid));
4942 }
4943 else
4944 mark_ptid = ecs->ptid;
4945
4946 set_executing (mark_ptid, 0);
4947
4948 /* Likewise the resumed flag. */
4949 set_resumed (mark_ptid, 0);
4950 }
8c90c137 4951
488f131b
JB
4952 switch (ecs->ws.kind)
4953 {
4954 case TARGET_WAITKIND_LOADED:
527159b7 4955 if (debug_infrun)
8a9de0e4 4956 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
5c09a2c5
PA
4957 if (!ptid_equal (ecs->ptid, inferior_ptid))
4958 context_switch (ecs->ptid);
b0f4b84b
DJ
4959 /* Ignore gracefully during startup of the inferior, as it might
4960 be the shell which has just loaded some objects, otherwise
4961 add the symbols for the newly loaded objects. Also ignore at
4962 the beginning of an attach or remote session; we will query
4963 the full list of libraries once the connection is
4964 established. */
4f5d7f63
PA
4965
4966 stop_soon = get_inferior_stop_soon (ecs->ptid);
c0236d92 4967 if (stop_soon == NO_STOP_QUIETLY)
488f131b 4968 {
edcc5120
TT
4969 struct regcache *regcache;
4970
edcc5120
TT
4971 regcache = get_thread_regcache (ecs->ptid);
4972
4973 handle_solib_event ();
4974
4975 ecs->event_thread->control.stop_bpstat
4976 = bpstat_stop_status (get_regcache_aspace (regcache),
4977 stop_pc, ecs->ptid, &ecs->ws);
ab04a2af 4978
c65d6b55
PA
4979 if (handle_stop_requested (ecs))
4980 return;
4981
ce12b012 4982 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
edcc5120
TT
4983 {
4984 /* A catchpoint triggered. */
94c57d6a
PA
4985 process_event_stop_test (ecs);
4986 return;
edcc5120 4987 }
488f131b 4988
b0f4b84b
DJ
4989 /* If requested, stop when the dynamic linker notifies
4990 gdb of events. This allows the user to get control
4991 and place breakpoints in initializer routines for
4992 dynamically loaded objects (among other things). */
a493e3e2 4993 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
b0f4b84b
DJ
4994 if (stop_on_solib_events)
4995 {
55409f9d
DJ
4996 /* Make sure we print "Stopped due to solib-event" in
4997 normal_stop. */
4998 stop_print_frame = 1;
4999
22bcd14b 5000 stop_waiting (ecs);
b0f4b84b
DJ
5001 return;
5002 }
488f131b 5003 }
b0f4b84b
DJ
5004
5005 /* If we are skipping through a shell, or through shared library
5006 loading that we aren't interested in, resume the program. If
5c09a2c5 5007 we're running the program normally, also resume. */
b0f4b84b
DJ
5008 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
5009 {
74960c60
VP
5010 /* Loading of shared libraries might have changed breakpoint
5011 addresses. Make sure new breakpoints are inserted. */
a25a5a45 5012 if (stop_soon == NO_STOP_QUIETLY)
74960c60 5013 insert_breakpoints ();
64ce06e4 5014 resume (GDB_SIGNAL_0);
b0f4b84b
DJ
5015 prepare_to_wait (ecs);
5016 return;
5017 }
5018
5c09a2c5
PA
5019 /* But stop if we're attaching or setting up a remote
5020 connection. */
5021 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5022 || stop_soon == STOP_QUIETLY_REMOTE)
5023 {
5024 if (debug_infrun)
5025 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
22bcd14b 5026 stop_waiting (ecs);
5c09a2c5
PA
5027 return;
5028 }
5029
5030 internal_error (__FILE__, __LINE__,
5031 _("unhandled stop_soon: %d"), (int) stop_soon);
c5aa993b 5032
488f131b 5033 case TARGET_WAITKIND_SPURIOUS:
527159b7 5034 if (debug_infrun)
8a9de0e4 5035 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
c65d6b55
PA
5036 if (handle_stop_requested (ecs))
5037 return;
64776a0b 5038 if (!ptid_equal (ecs->ptid, inferior_ptid))
8b3ee56d 5039 context_switch (ecs->ptid);
64ce06e4 5040 resume (GDB_SIGNAL_0);
488f131b
JB
5041 prepare_to_wait (ecs);
5042 return;
c5aa993b 5043
65706a29
PA
5044 case TARGET_WAITKIND_THREAD_CREATED:
5045 if (debug_infrun)
5046 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_THREAD_CREATED\n");
c65d6b55
PA
5047 if (handle_stop_requested (ecs))
5048 return;
65706a29
PA
5049 if (!ptid_equal (ecs->ptid, inferior_ptid))
5050 context_switch (ecs->ptid);
5051 if (!switch_back_to_stepped_thread (ecs))
5052 keep_going (ecs);
5053 return;
5054
488f131b 5055 case TARGET_WAITKIND_EXITED:
940c3c06 5056 case TARGET_WAITKIND_SIGNALLED:
527159b7 5057 if (debug_infrun)
940c3c06
PA
5058 {
5059 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
5060 fprintf_unfiltered (gdb_stdlog,
5061 "infrun: TARGET_WAITKIND_EXITED\n");
5062 else
5063 fprintf_unfiltered (gdb_stdlog,
5064 "infrun: TARGET_WAITKIND_SIGNALLED\n");
5065 }
5066
fb66883a 5067 inferior_ptid = ecs->ptid;
c9657e70 5068 set_current_inferior (find_inferior_ptid (ecs->ptid));
6c95b8df
PA
5069 set_current_program_space (current_inferior ()->pspace);
5070 handle_vfork_child_exec_or_exit (0);
1777feb0 5071 target_terminal_ours (); /* Must do this before mourn anyway. */
488f131b 5072
0c557179
SDJ
5073 /* Clearing any previous state of convenience variables. */
5074 clear_exit_convenience_vars ();
5075
940c3c06
PA
5076 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
5077 {
5078 /* Record the exit code in the convenience variable $_exitcode, so
5079 that the user can inspect this again later. */
5080 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5081 (LONGEST) ecs->ws.value.integer);
5082
5083 /* Also record this in the inferior itself. */
5084 current_inferior ()->has_exit_code = 1;
5085 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
8cf64490 5086
98eb56a4
PA
5087 /* Support the --return-child-result option. */
5088 return_child_result_value = ecs->ws.value.integer;
5089
fd664c91 5090 observer_notify_exited (ecs->ws.value.integer);
940c3c06
PA
5091 }
5092 else
0c557179
SDJ
5093 {
5094 struct regcache *regcache = get_thread_regcache (ecs->ptid);
5095 struct gdbarch *gdbarch = get_regcache_arch (regcache);
5096
5097 if (gdbarch_gdb_signal_to_target_p (gdbarch))
5098 {
5099 /* Set the value of the internal variable $_exitsignal,
5100 which holds the signal uncaught by the inferior. */
5101 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5102 gdbarch_gdb_signal_to_target (gdbarch,
5103 ecs->ws.value.sig));
5104 }
5105 else
5106 {
5107 /* We don't have access to the target's method used for
5108 converting between signal numbers (GDB's internal
5109 representation <-> target's representation).
5110 Therefore, we cannot do a good job at displaying this
5111 information to the user. It's better to just warn
5112 her about it (if infrun debugging is enabled), and
5113 give up. */
5114 if (debug_infrun)
5115 fprintf_filtered (gdb_stdlog, _("\
5116Cannot fill $_exitsignal with the correct signal number.\n"));
5117 }
5118
fd664c91 5119 observer_notify_signal_exited (ecs->ws.value.sig);
0c557179 5120 }
8cf64490 5121
488f131b 5122 gdb_flush (gdb_stdout);
bc1e6c81 5123 target_mourn_inferior (inferior_ptid);
488f131b 5124 stop_print_frame = 0;
22bcd14b 5125 stop_waiting (ecs);
488f131b 5126 return;
c5aa993b 5127
488f131b 5128 /* The following are the only cases in which we keep going;
1777feb0 5129 the above cases end in a continue or goto. */
488f131b 5130 case TARGET_WAITKIND_FORKED:
deb3b17b 5131 case TARGET_WAITKIND_VFORKED:
527159b7 5132 if (debug_infrun)
fed708ed
PA
5133 {
5134 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
5135 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
5136 else
5137 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
5138 }
c906108c 5139
e2d96639
YQ
5140 /* Check whether the inferior is displaced stepping. */
5141 {
5142 struct regcache *regcache = get_thread_regcache (ecs->ptid);
5143 struct gdbarch *gdbarch = get_regcache_arch (regcache);
e2d96639
YQ
5144
5145 /* If checking displaced stepping is supported, and thread
5146 ecs->ptid is displaced stepping. */
c0987663 5147 if (displaced_step_in_progress_thread (ecs->ptid))
e2d96639
YQ
5148 {
5149 struct inferior *parent_inf
c9657e70 5150 = find_inferior_ptid (ecs->ptid);
e2d96639
YQ
5151 struct regcache *child_regcache;
5152 CORE_ADDR parent_pc;
5153
5154 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5155 indicating that the displaced stepping of syscall instruction
5156 has been done. Perform cleanup for parent process here. Note
5157 that this operation also cleans up the child process for vfork,
5158 because their pages are shared. */
a493e3e2 5159 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
c2829269
PA
5160 /* Start a new step-over in another thread if there's one
5161 that needs it. */
5162 start_step_over ();
e2d96639
YQ
5163
5164 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
5165 {
c0987663
YQ
5166 struct displaced_step_inferior_state *displaced
5167 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
5168
e2d96639
YQ
5169 /* Restore scratch pad for child process. */
5170 displaced_step_restore (displaced, ecs->ws.value.related_pid);
5171 }
5172
5173 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5174 the child's PC is also within the scratchpad. Set the child's PC
5175 to the parent's PC value, which has already been fixed up.
5176 FIXME: we use the parent's aspace here, although we're touching
5177 the child, because the child hasn't been added to the inferior
5178 list yet at this point. */
5179
5180 child_regcache
5181 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
5182 gdbarch,
5183 parent_inf->aspace);
5184 /* Read PC value of parent process. */
5185 parent_pc = regcache_read_pc (regcache);
5186
5187 if (debug_displaced)
5188 fprintf_unfiltered (gdb_stdlog,
5189 "displaced: write child pc from %s to %s\n",
5190 paddress (gdbarch,
5191 regcache_read_pc (child_regcache)),
5192 paddress (gdbarch, parent_pc));
5193
5194 regcache_write_pc (child_regcache, parent_pc);
5195 }
5196 }
5197
5a2901d9 5198 if (!ptid_equal (ecs->ptid, inferior_ptid))
c3a01a22 5199 context_switch (ecs->ptid);
5a2901d9 5200
b242c3c2
PA
5201 /* Immediately detach breakpoints from the child before there's
5202 any chance of letting the user delete breakpoints from the
5203 breakpoint lists. If we don't do this early, it's easy to
5204 leave left over traps in the child, vis: "break foo; catch
5205 fork; c; <fork>; del; c; <child calls foo>". We only follow
5206 the fork on the last `continue', and by that time the
5207 breakpoint at "foo" is long gone from the breakpoint table.
5208 If we vforked, then we don't need to unpatch here, since both
5209 parent and child are sharing the same memory pages; we'll
5210 need to unpatch at follow/detach time instead to be certain
5211 that new breakpoints added between catchpoint hit time and
5212 vfork follow are detached. */
5213 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
5214 {
b242c3c2
PA
5215 /* This won't actually modify the breakpoint list, but will
5216 physically remove the breakpoints from the child. */
d80ee84f 5217 detach_breakpoints (ecs->ws.value.related_pid);
b242c3c2
PA
5218 }
5219
34b7e8a6 5220 delete_just_stopped_threads_single_step_breakpoints ();
d03285ec 5221
e58b0e63
PA
5222 /* In case the event is caught by a catchpoint, remember that
5223 the event is to be followed at the next resume of the thread,
5224 and not immediately. */
5225 ecs->event_thread->pending_follow = ecs->ws;
5226
fb14de7b 5227 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
675bf4cb 5228
16c381f0 5229 ecs->event_thread->control.stop_bpstat
6c95b8df 5230 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
09ac7c10 5231 stop_pc, ecs->ptid, &ecs->ws);
675bf4cb 5232
c65d6b55
PA
5233 if (handle_stop_requested (ecs))
5234 return;
5235
ce12b012
PA
5236 /* If no catchpoint triggered for this, then keep going. Note
5237 that we're interested in knowing the bpstat actually causes a
5238 stop, not just if it may explain the signal. Software
5239 watchpoints, for example, always appear in the bpstat. */
5240 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5241 {
6c95b8df
PA
5242 ptid_t parent;
5243 ptid_t child;
e58b0e63 5244 int should_resume;
3e43a32a
MS
5245 int follow_child
5246 = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63 5247
a493e3e2 5248 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
e58b0e63
PA
5249
5250 should_resume = follow_fork ();
5251
6c95b8df
PA
5252 parent = ecs->ptid;
5253 child = ecs->ws.value.related_pid;
5254
a2077e25
PA
5255 /* At this point, the parent is marked running, and the
5256 child is marked stopped. */
5257
5258 /* If not resuming the parent, mark it stopped. */
5259 if (follow_child && !detach_fork && !non_stop && !sched_multi)
5260 set_running (parent, 0);
5261
5262 /* If resuming the child, mark it running. */
5263 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
5264 set_running (child, 1);
5265
6c95b8df 5266 /* In non-stop mode, also resume the other branch. */
fbea99ea
PA
5267 if (!detach_fork && (non_stop
5268 || (sched_multi && target_is_non_stop_p ())))
6c95b8df
PA
5269 {
5270 if (follow_child)
5271 switch_to_thread (parent);
5272 else
5273 switch_to_thread (child);
5274
5275 ecs->event_thread = inferior_thread ();
5276 ecs->ptid = inferior_ptid;
5277 keep_going (ecs);
5278 }
5279
5280 if (follow_child)
5281 switch_to_thread (child);
5282 else
5283 switch_to_thread (parent);
5284
e58b0e63
PA
5285 ecs->event_thread = inferior_thread ();
5286 ecs->ptid = inferior_ptid;
5287
5288 if (should_resume)
5289 keep_going (ecs);
5290 else
22bcd14b 5291 stop_waiting (ecs);
04e68871
DJ
5292 return;
5293 }
94c57d6a
PA
5294 process_event_stop_test (ecs);
5295 return;
488f131b 5296
6c95b8df
PA
5297 case TARGET_WAITKIND_VFORK_DONE:
5298 /* Done with the shared memory region. Re-insert breakpoints in
5299 the parent, and keep going. */
5300
5301 if (debug_infrun)
3e43a32a
MS
5302 fprintf_unfiltered (gdb_stdlog,
5303 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
6c95b8df
PA
5304
5305 if (!ptid_equal (ecs->ptid, inferior_ptid))
5306 context_switch (ecs->ptid);
5307
5308 current_inferior ()->waiting_for_vfork_done = 0;
56710373 5309 current_inferior ()->pspace->breakpoints_not_allowed = 0;
c65d6b55
PA
5310
5311 if (handle_stop_requested (ecs))
5312 return;
5313
6c95b8df
PA
5314 /* This also takes care of reinserting breakpoints in the
5315 previously locked inferior. */
5316 keep_going (ecs);
5317 return;
5318
488f131b 5319 case TARGET_WAITKIND_EXECD:
527159b7 5320 if (debug_infrun)
fc5261f2 5321 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
488f131b 5322
5a2901d9 5323 if (!ptid_equal (ecs->ptid, inferior_ptid))
c3a01a22 5324 context_switch (ecs->ptid);
5a2901d9 5325
fb14de7b 5326 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
795e548f 5327
6c95b8df
PA
5328 /* Do whatever is necessary to the parent branch of the vfork. */
5329 handle_vfork_child_exec_or_exit (1);
5330
795e548f
PA
5331 /* This causes the eventpoints and symbol table to be reset.
5332 Must do this now, before trying to determine whether to
5333 stop. */
71b43ef8 5334 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
795e548f 5335
17d8546e
DB
5336 /* In follow_exec we may have deleted the original thread and
5337 created a new one. Make sure that the event thread is the
5338 execd thread for that case (this is a nop otherwise). */
5339 ecs->event_thread = inferior_thread ();
5340
16c381f0 5341 ecs->event_thread->control.stop_bpstat
6c95b8df 5342 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
09ac7c10 5343 stop_pc, ecs->ptid, &ecs->ws);
795e548f 5344
71b43ef8
PA
5345 /* Note that this may be referenced from inside
5346 bpstat_stop_status above, through inferior_has_execd. */
5347 xfree (ecs->ws.value.execd_pathname);
5348 ecs->ws.value.execd_pathname = NULL;
5349
c65d6b55
PA
5350 if (handle_stop_requested (ecs))
5351 return;
5352
04e68871 5353 /* If no catchpoint triggered for this, then keep going. */
ce12b012 5354 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5355 {
a493e3e2 5356 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
04e68871
DJ
5357 keep_going (ecs);
5358 return;
5359 }
94c57d6a
PA
5360 process_event_stop_test (ecs);
5361 return;
488f131b 5362
b4dc5ffa
MK
5363 /* Be careful not to try to gather much state about a thread
5364 that's in a syscall. It's frequently a losing proposition. */
488f131b 5365 case TARGET_WAITKIND_SYSCALL_ENTRY:
527159b7 5366 if (debug_infrun)
3e43a32a
MS
5367 fprintf_unfiltered (gdb_stdlog,
5368 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
1777feb0 5369 /* Getting the current syscall number. */
94c57d6a
PA
5370 if (handle_syscall_event (ecs) == 0)
5371 process_event_stop_test (ecs);
5372 return;
c906108c 5373
488f131b
JB
5374 /* Before examining the threads further, step this thread to
5375 get it entirely out of the syscall. (We get notice of the
5376 event when the thread is just on the verge of exiting a
5377 syscall. Stepping one instruction seems to get it back
b4dc5ffa 5378 into user code.) */
488f131b 5379 case TARGET_WAITKIND_SYSCALL_RETURN:
527159b7 5380 if (debug_infrun)
3e43a32a
MS
5381 fprintf_unfiltered (gdb_stdlog,
5382 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
94c57d6a
PA
5383 if (handle_syscall_event (ecs) == 0)
5384 process_event_stop_test (ecs);
5385 return;
c906108c 5386
488f131b 5387 case TARGET_WAITKIND_STOPPED:
527159b7 5388 if (debug_infrun)
8a9de0e4 5389 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
4f5d7f63
PA
5390 handle_signal_stop (ecs);
5391 return;
c906108c 5392
b2175913 5393 case TARGET_WAITKIND_NO_HISTORY:
4b4e080e
PA
5394 if (debug_infrun)
5395 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
b2175913 5396 /* Reverse execution: target ran out of history info. */
eab402df 5397
d1988021
MM
5398 /* Switch to the stopped thread. */
5399 if (!ptid_equal (ecs->ptid, inferior_ptid))
5400 context_switch (ecs->ptid);
5401 if (debug_infrun)
5402 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
5403
34b7e8a6 5404 delete_just_stopped_threads_single_step_breakpoints ();
d1988021 5405 stop_pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
c65d6b55
PA
5406
5407 if (handle_stop_requested (ecs))
5408 return;
5409
fd664c91 5410 observer_notify_no_history ();
22bcd14b 5411 stop_waiting (ecs);
b2175913 5412 return;
488f131b 5413 }
4f5d7f63
PA
5414}
5415
0b6e5e10
JB
5416/* A wrapper around handle_inferior_event_1, which also makes sure
5417 that all temporary struct value objects that were created during
5418 the handling of the event get deleted at the end. */
5419
5420static void
5421handle_inferior_event (struct execution_control_state *ecs)
5422{
5423 struct value *mark = value_mark ();
5424
5425 handle_inferior_event_1 (ecs);
5426 /* Purge all temporary values created during the event handling,
5427 as it could be a long time before we return to the command level
5428 where such values would otherwise be purged. */
5429 value_free_to_mark (mark);
5430}
5431
372316f1
PA
5432/* Restart threads back to what they were trying to do back when we
5433 paused them for an in-line step-over. The EVENT_THREAD thread is
5434 ignored. */
4d9d9d04
PA
5435
5436static void
372316f1
PA
5437restart_threads (struct thread_info *event_thread)
5438{
5439 struct thread_info *tp;
372316f1
PA
5440
5441 /* In case the instruction just stepped spawned a new thread. */
5442 update_thread_list ();
5443
5444 ALL_NON_EXITED_THREADS (tp)
5445 {
5446 if (tp == event_thread)
5447 {
5448 if (debug_infrun)
5449 fprintf_unfiltered (gdb_stdlog,
5450 "infrun: restart threads: "
5451 "[%s] is event thread\n",
5452 target_pid_to_str (tp->ptid));
5453 continue;
5454 }
5455
5456 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5457 {
5458 if (debug_infrun)
5459 fprintf_unfiltered (gdb_stdlog,
5460 "infrun: restart threads: "
5461 "[%s] not meant to be running\n",
5462 target_pid_to_str (tp->ptid));
5463 continue;
5464 }
5465
5466 if (tp->resumed)
5467 {
5468 if (debug_infrun)
5469 fprintf_unfiltered (gdb_stdlog,
5470 "infrun: restart threads: [%s] resumed\n",
5471 target_pid_to_str (tp->ptid));
5472 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
5473 continue;
5474 }
5475
5476 if (thread_is_in_step_over_chain (tp))
5477 {
5478 if (debug_infrun)
5479 fprintf_unfiltered (gdb_stdlog,
5480 "infrun: restart threads: "
5481 "[%s] needs step-over\n",
5482 target_pid_to_str (tp->ptid));
5483 gdb_assert (!tp->resumed);
5484 continue;
5485 }
5486
5487
5488 if (tp->suspend.waitstatus_pending_p)
5489 {
5490 if (debug_infrun)
5491 fprintf_unfiltered (gdb_stdlog,
5492 "infrun: restart threads: "
5493 "[%s] has pending status\n",
5494 target_pid_to_str (tp->ptid));
5495 tp->resumed = 1;
5496 continue;
5497 }
5498
c65d6b55
PA
5499 gdb_assert (!tp->stop_requested);
5500
372316f1
PA
5501 /* If some thread needs to start a step-over at this point, it
5502 should still be in the step-over queue, and thus skipped
5503 above. */
5504 if (thread_still_needs_step_over (tp))
5505 {
5506 internal_error (__FILE__, __LINE__,
5507 "thread [%s] needs a step-over, but not in "
5508 "step-over queue\n",
5509 target_pid_to_str (tp->ptid));
5510 }
5511
5512 if (currently_stepping (tp))
5513 {
5514 if (debug_infrun)
5515 fprintf_unfiltered (gdb_stdlog,
5516 "infrun: restart threads: [%s] was stepping\n",
5517 target_pid_to_str (tp->ptid));
5518 keep_going_stepped_thread (tp);
5519 }
5520 else
5521 {
5522 struct execution_control_state ecss;
5523 struct execution_control_state *ecs = &ecss;
5524
5525 if (debug_infrun)
5526 fprintf_unfiltered (gdb_stdlog,
5527 "infrun: restart threads: [%s] continuing\n",
5528 target_pid_to_str (tp->ptid));
5529 reset_ecs (ecs, tp);
5530 switch_to_thread (tp->ptid);
5531 keep_going_pass_signal (ecs);
5532 }
5533 }
5534}
5535
5536/* Callback for iterate_over_threads. Find a resumed thread that has
5537 a pending waitstatus. */
5538
5539static int
5540resumed_thread_with_pending_status (struct thread_info *tp,
5541 void *arg)
5542{
5543 return (tp->resumed
5544 && tp->suspend.waitstatus_pending_p);
5545}
5546
5547/* Called when we get an event that may finish an in-line or
5548 out-of-line (displaced stepping) step-over started previously.
5549 Return true if the event is processed and we should go back to the
5550 event loop; false if the caller should continue processing the
5551 event. */
5552
5553static int
4d9d9d04
PA
5554finish_step_over (struct execution_control_state *ecs)
5555{
372316f1
PA
5556 int had_step_over_info;
5557
4d9d9d04
PA
5558 displaced_step_fixup (ecs->ptid,
5559 ecs->event_thread->suspend.stop_signal);
5560
372316f1
PA
5561 had_step_over_info = step_over_info_valid_p ();
5562
5563 if (had_step_over_info)
4d9d9d04
PA
5564 {
5565 /* If we're stepping over a breakpoint with all threads locked,
5566 then only the thread that was stepped should be reporting
5567 back an event. */
5568 gdb_assert (ecs->event_thread->control.trap_expected);
5569
c65d6b55 5570 clear_step_over_info ();
4d9d9d04
PA
5571 }
5572
fbea99ea 5573 if (!target_is_non_stop_p ())
372316f1 5574 return 0;
4d9d9d04
PA
5575
5576 /* Start a new step-over in another thread if there's one that
5577 needs it. */
5578 start_step_over ();
372316f1
PA
5579
5580 /* If we were stepping over a breakpoint before, and haven't started
5581 a new in-line step-over sequence, then restart all other threads
5582 (except the event thread). We can't do this in all-stop, as then
5583 e.g., we wouldn't be able to issue any other remote packet until
5584 these other threads stop. */
5585 if (had_step_over_info && !step_over_info_valid_p ())
5586 {
5587 struct thread_info *pending;
5588
5589 /* If we only have threads with pending statuses, the restart
5590 below won't restart any thread and so nothing re-inserts the
5591 breakpoint we just stepped over. But we need it inserted
5592 when we later process the pending events, otherwise if
5593 another thread has a pending event for this breakpoint too,
5594 we'd discard its event (because the breakpoint that
5595 originally caused the event was no longer inserted). */
5596 context_switch (ecs->ptid);
5597 insert_breakpoints ();
5598
5599 restart_threads (ecs->event_thread);
5600
5601 /* If we have events pending, go through handle_inferior_event
5602 again, picking up a pending event at random. This avoids
5603 thread starvation. */
5604
5605 /* But not if we just stepped over a watchpoint in order to let
5606 the instruction execute so we can evaluate its expression.
5607 The set of watchpoints that triggered is recorded in the
5608 breakpoint objects themselves (see bp->watchpoint_triggered).
5609 If we processed another event first, that other event could
5610 clobber this info. */
5611 if (ecs->event_thread->stepping_over_watchpoint)
5612 return 0;
5613
5614 pending = iterate_over_threads (resumed_thread_with_pending_status,
5615 NULL);
5616 if (pending != NULL)
5617 {
5618 struct thread_info *tp = ecs->event_thread;
5619 struct regcache *regcache;
5620
5621 if (debug_infrun)
5622 {
5623 fprintf_unfiltered (gdb_stdlog,
5624 "infrun: found resumed threads with "
5625 "pending events, saving status\n");
5626 }
5627
5628 gdb_assert (pending != tp);
5629
5630 /* Record the event thread's event for later. */
5631 save_waitstatus (tp, &ecs->ws);
5632 /* This was cleared early, by handle_inferior_event. Set it
5633 so this pending event is considered by
5634 do_target_wait. */
5635 tp->resumed = 1;
5636
5637 gdb_assert (!tp->executing);
5638
5639 regcache = get_thread_regcache (tp->ptid);
5640 tp->suspend.stop_pc = regcache_read_pc (regcache);
5641
5642 if (debug_infrun)
5643 {
5644 fprintf_unfiltered (gdb_stdlog,
5645 "infrun: saved stop_pc=%s for %s "
5646 "(currently_stepping=%d)\n",
5647 paddress (target_gdbarch (),
5648 tp->suspend.stop_pc),
5649 target_pid_to_str (tp->ptid),
5650 currently_stepping (tp));
5651 }
5652
5653 /* This in-line step-over finished; clear this so we won't
5654 start a new one. This is what handle_signal_stop would
5655 do, if we returned false. */
5656 tp->stepping_over_breakpoint = 0;
5657
5658 /* Wake up the event loop again. */
5659 mark_async_event_handler (infrun_async_inferior_event_token);
5660
5661 prepare_to_wait (ecs);
5662 return 1;
5663 }
5664 }
5665
5666 return 0;
4d9d9d04
PA
5667}
5668
4f5d7f63
PA
5669/* Come here when the program has stopped with a signal. */
5670
5671static void
5672handle_signal_stop (struct execution_control_state *ecs)
5673{
5674 struct frame_info *frame;
5675 struct gdbarch *gdbarch;
5676 int stopped_by_watchpoint;
5677 enum stop_kind stop_soon;
5678 int random_signal;
c906108c 5679
f0407826
DE
5680 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
5681
c65d6b55
PA
5682 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
5683
f0407826
DE
5684 /* Do we need to clean up the state of a thread that has
5685 completed a displaced single-step? (Doing so usually affects
5686 the PC, so do it here, before we set stop_pc.) */
372316f1
PA
5687 if (finish_step_over (ecs))
5688 return;
f0407826
DE
5689
5690 /* If we either finished a single-step or hit a breakpoint, but
5691 the user wanted this thread to be stopped, pretend we got a
5692 SIG0 (generic unsignaled stop). */
5693 if (ecs->event_thread->stop_requested
5694 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
5695 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
237fc4c9 5696
515630c5 5697 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
488f131b 5698
527159b7 5699 if (debug_infrun)
237fc4c9 5700 {
5af949e3
UW
5701 struct regcache *regcache = get_thread_regcache (ecs->ptid);
5702 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7f82dfc7
JK
5703 struct cleanup *old_chain = save_inferior_ptid ();
5704
5705 inferior_ptid = ecs->ptid;
5af949e3
UW
5706
5707 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
5708 paddress (gdbarch, stop_pc));
d92524f1 5709 if (target_stopped_by_watchpoint ())
237fc4c9
PA
5710 {
5711 CORE_ADDR addr;
abbb1732 5712
237fc4c9
PA
5713 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
5714
5715 if (target_stopped_data_address (&current_target, &addr))
5716 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
5717 "infrun: stopped data address = %s\n",
5718 paddress (gdbarch, addr));
237fc4c9
PA
5719 else
5720 fprintf_unfiltered (gdb_stdlog,
5721 "infrun: (no data address available)\n");
5722 }
7f82dfc7
JK
5723
5724 do_cleanups (old_chain);
237fc4c9 5725 }
527159b7 5726
36fa8042
PA
5727 /* This is originated from start_remote(), start_inferior() and
5728 shared libraries hook functions. */
5729 stop_soon = get_inferior_stop_soon (ecs->ptid);
5730 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
5731 {
5732 if (!ptid_equal (ecs->ptid, inferior_ptid))
5733 context_switch (ecs->ptid);
5734 if (debug_infrun)
5735 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
5736 stop_print_frame = 1;
22bcd14b 5737 stop_waiting (ecs);
36fa8042
PA
5738 return;
5739 }
5740
36fa8042
PA
5741 /* This originates from attach_command(). We need to overwrite
5742 the stop_signal here, because some kernels don't ignore a
5743 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
5744 See more comments in inferior.h. On the other hand, if we
5745 get a non-SIGSTOP, report it to the user - assume the backend
5746 will handle the SIGSTOP if it should show up later.
5747
5748 Also consider that the attach is complete when we see a
5749 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
5750 target extended-remote report it instead of a SIGSTOP
5751 (e.g. gdbserver). We already rely on SIGTRAP being our
5752 signal, so this is no exception.
5753
5754 Also consider that the attach is complete when we see a
5755 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
5756 the target to stop all threads of the inferior, in case the
5757 low level attach operation doesn't stop them implicitly. If
5758 they weren't stopped implicitly, then the stub will report a
5759 GDB_SIGNAL_0, meaning: stopped for no particular reason
5760 other than GDB's request. */
5761 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5762 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
5763 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5764 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
5765 {
5766 stop_print_frame = 1;
22bcd14b 5767 stop_waiting (ecs);
36fa8042
PA
5768 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5769 return;
5770 }
5771
488f131b 5772 /* See if something interesting happened to the non-current thread. If
b40c7d58
DJ
5773 so, then switch to that thread. */
5774 if (!ptid_equal (ecs->ptid, inferior_ptid))
488f131b 5775 {
527159b7 5776 if (debug_infrun)
8a9de0e4 5777 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
527159b7 5778
0d1e5fa7 5779 context_switch (ecs->ptid);
c5aa993b 5780
9a4105ab 5781 if (deprecated_context_hook)
5d5658a1 5782 deprecated_context_hook (ptid_to_global_thread_id (ecs->ptid));
488f131b 5783 }
c906108c 5784
568d6575
UW
5785 /* At this point, get hold of the now-current thread's frame. */
5786 frame = get_current_frame ();
5787 gdbarch = get_frame_arch (frame);
5788
2adfaa28 5789 /* Pull the single step breakpoints out of the target. */
af48d08f 5790 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
488f131b 5791 {
af48d08f
PA
5792 struct regcache *regcache;
5793 struct address_space *aspace;
5794 CORE_ADDR pc;
2adfaa28 5795
af48d08f
PA
5796 regcache = get_thread_regcache (ecs->ptid);
5797 aspace = get_regcache_aspace (regcache);
5798 pc = regcache_read_pc (regcache);
34b7e8a6 5799
af48d08f
PA
5800 /* However, before doing so, if this single-step breakpoint was
5801 actually for another thread, set this thread up for moving
5802 past it. */
5803 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
5804 aspace, pc))
5805 {
5806 if (single_step_breakpoint_inserted_here_p (aspace, pc))
2adfaa28
PA
5807 {
5808 if (debug_infrun)
5809 {
5810 fprintf_unfiltered (gdb_stdlog,
af48d08f 5811 "infrun: [%s] hit another thread's "
34b7e8a6
PA
5812 "single-step breakpoint\n",
5813 target_pid_to_str (ecs->ptid));
2adfaa28 5814 }
af48d08f
PA
5815 ecs->hit_singlestep_breakpoint = 1;
5816 }
5817 }
5818 else
5819 {
5820 if (debug_infrun)
5821 {
5822 fprintf_unfiltered (gdb_stdlog,
5823 "infrun: [%s] hit its "
5824 "single-step breakpoint\n",
5825 target_pid_to_str (ecs->ptid));
2adfaa28
PA
5826 }
5827 }
488f131b 5828 }
af48d08f 5829 delete_just_stopped_threads_single_step_breakpoints ();
c906108c 5830
963f9c80
PA
5831 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5832 && ecs->event_thread->control.trap_expected
5833 && ecs->event_thread->stepping_over_watchpoint)
d983da9c
DJ
5834 stopped_by_watchpoint = 0;
5835 else
5836 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
5837
5838 /* If necessary, step over this watchpoint. We'll be back to display
5839 it in a moment. */
5840 if (stopped_by_watchpoint
d92524f1 5841 && (target_have_steppable_watchpoint
568d6575 5842 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
488f131b 5843 {
488f131b
JB
5844 /* At this point, we are stopped at an instruction which has
5845 attempted to write to a piece of memory under control of
5846 a watchpoint. The instruction hasn't actually executed
5847 yet. If we were to evaluate the watchpoint expression
5848 now, we would get the old value, and therefore no change
5849 would seem to have occurred.
5850
5851 In order to make watchpoints work `right', we really need
5852 to complete the memory write, and then evaluate the
d983da9c
DJ
5853 watchpoint expression. We do this by single-stepping the
5854 target.
5855
7f89fd65 5856 It may not be necessary to disable the watchpoint to step over
d983da9c
DJ
5857 it. For example, the PA can (with some kernel cooperation)
5858 single step over a watchpoint without disabling the watchpoint.
5859
5860 It is far more common to need to disable a watchpoint to step
5861 the inferior over it. If we have non-steppable watchpoints,
5862 we must disable the current watchpoint; it's simplest to
963f9c80
PA
5863 disable all watchpoints.
5864
5865 Any breakpoint at PC must also be stepped over -- if there's
5866 one, it will have already triggered before the watchpoint
5867 triggered, and we either already reported it to the user, or
5868 it didn't cause a stop and we called keep_going. In either
5869 case, if there was a breakpoint at PC, we must be trying to
5870 step past it. */
5871 ecs->event_thread->stepping_over_watchpoint = 1;
5872 keep_going (ecs);
488f131b
JB
5873 return;
5874 }
5875
4e1c45ea 5876 ecs->event_thread->stepping_over_breakpoint = 0;
963f9c80 5877 ecs->event_thread->stepping_over_watchpoint = 0;
16c381f0
JK
5878 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
5879 ecs->event_thread->control.stop_step = 0;
488f131b 5880 stop_print_frame = 1;
488f131b 5881 stopped_by_random_signal = 0;
488f131b 5882
edb3359d
DJ
5883 /* Hide inlined functions starting here, unless we just performed stepi or
5884 nexti. After stepi and nexti, always show the innermost frame (not any
5885 inline function call sites). */
16c381f0 5886 if (ecs->event_thread->control.step_range_end != 1)
0574c78f
GB
5887 {
5888 struct address_space *aspace =
5889 get_regcache_aspace (get_thread_regcache (ecs->ptid));
5890
5891 /* skip_inline_frames is expensive, so we avoid it if we can
5892 determine that the address is one where functions cannot have
5893 been inlined. This improves performance with inferiors that
5894 load a lot of shared libraries, because the solib event
5895 breakpoint is defined as the address of a function (i.e. not
5896 inline). Note that we have to check the previous PC as well
5897 as the current one to catch cases when we have just
5898 single-stepped off a breakpoint prior to reinstating it.
5899 Note that we're assuming that the code we single-step to is
5900 not inline, but that's not definitive: there's nothing
5901 preventing the event breakpoint function from containing
5902 inlined code, and the single-step ending up there. If the
5903 user had set a breakpoint on that inlined code, the missing
5904 skip_inline_frames call would break things. Fortunately
5905 that's an extremely unlikely scenario. */
09ac7c10 5906 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
a210c238
MR
5907 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5908 && ecs->event_thread->control.trap_expected
5909 && pc_at_non_inline_function (aspace,
5910 ecs->event_thread->prev_pc,
09ac7c10 5911 &ecs->ws)))
1c5a993e
MR
5912 {
5913 skip_inline_frames (ecs->ptid);
5914
5915 /* Re-fetch current thread's frame in case that invalidated
5916 the frame cache. */
5917 frame = get_current_frame ();
5918 gdbarch = get_frame_arch (frame);
5919 }
0574c78f 5920 }
edb3359d 5921
a493e3e2 5922 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
16c381f0 5923 && ecs->event_thread->control.trap_expected
568d6575 5924 && gdbarch_single_step_through_delay_p (gdbarch)
4e1c45ea 5925 && currently_stepping (ecs->event_thread))
3352ef37 5926 {
b50d7442 5927 /* We're trying to step off a breakpoint. Turns out that we're
3352ef37 5928 also on an instruction that needs to be stepped multiple
1777feb0 5929 times before it's been fully executing. E.g., architectures
3352ef37
AC
5930 with a delay slot. It needs to be stepped twice, once for
5931 the instruction and once for the delay slot. */
5932 int step_through_delay
568d6575 5933 = gdbarch_single_step_through_delay (gdbarch, frame);
abbb1732 5934
527159b7 5935 if (debug_infrun && step_through_delay)
8a9de0e4 5936 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
16c381f0
JK
5937 if (ecs->event_thread->control.step_range_end == 0
5938 && step_through_delay)
3352ef37
AC
5939 {
5940 /* The user issued a continue when stopped at a breakpoint.
5941 Set up for another trap and get out of here. */
4e1c45ea 5942 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
5943 keep_going (ecs);
5944 return;
5945 }
5946 else if (step_through_delay)
5947 {
5948 /* The user issued a step when stopped at a breakpoint.
5949 Maybe we should stop, maybe we should not - the delay
5950 slot *might* correspond to a line of source. In any
ca67fcb8
VP
5951 case, don't decide that here, just set
5952 ecs->stepping_over_breakpoint, making sure we
5953 single-step again before breakpoints are re-inserted. */
4e1c45ea 5954 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
5955 }
5956 }
5957
ab04a2af
TT
5958 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
5959 handles this event. */
5960 ecs->event_thread->control.stop_bpstat
5961 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
5962 stop_pc, ecs->ptid, &ecs->ws);
db82e815 5963
ab04a2af
TT
5964 /* Following in case break condition called a
5965 function. */
5966 stop_print_frame = 1;
73dd234f 5967
ab04a2af
TT
5968 /* This is where we handle "moribund" watchpoints. Unlike
5969 software breakpoints traps, hardware watchpoint traps are
5970 always distinguishable from random traps. If no high-level
5971 watchpoint is associated with the reported stop data address
5972 anymore, then the bpstat does not explain the signal ---
5973 simply make sure to ignore it if `stopped_by_watchpoint' is
5974 set. */
5975
5976 if (debug_infrun
5977 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
47591c29 5978 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
427cd150 5979 GDB_SIGNAL_TRAP)
ab04a2af
TT
5980 && stopped_by_watchpoint)
5981 fprintf_unfiltered (gdb_stdlog,
5982 "infrun: no user watchpoint explains "
5983 "watchpoint SIGTRAP, ignoring\n");
73dd234f 5984
bac7d97b 5985 /* NOTE: cagney/2003-03-29: These checks for a random signal
ab04a2af
TT
5986 at one stage in the past included checks for an inferior
5987 function call's call dummy's return breakpoint. The original
5988 comment, that went with the test, read:
03cebad2 5989
ab04a2af
TT
5990 ``End of a stack dummy. Some systems (e.g. Sony news) give
5991 another signal besides SIGTRAP, so check here as well as
5992 above.''
73dd234f 5993
ab04a2af
TT
5994 If someone ever tries to get call dummys on a
5995 non-executable stack to work (where the target would stop
5996 with something like a SIGSEGV), then those tests might need
5997 to be re-instated. Given, however, that the tests were only
5998 enabled when momentary breakpoints were not being used, I
5999 suspect that it won't be the case.
488f131b 6000
ab04a2af
TT
6001 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6002 be necessary for call dummies on a non-executable stack on
6003 SPARC. */
488f131b 6004
bac7d97b 6005 /* See if the breakpoints module can explain the signal. */
47591c29
PA
6006 random_signal
6007 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6008 ecs->event_thread->suspend.stop_signal);
bac7d97b 6009
1cf4d951
PA
6010 /* Maybe this was a trap for a software breakpoint that has since
6011 been removed. */
6012 if (random_signal && target_stopped_by_sw_breakpoint ())
6013 {
6014 if (program_breakpoint_here_p (gdbarch, stop_pc))
6015 {
6016 struct regcache *regcache;
6017 int decr_pc;
6018
6019 /* Re-adjust PC to what the program would see if GDB was not
6020 debugging it. */
6021 regcache = get_thread_regcache (ecs->event_thread->ptid);
527a273a 6022 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
1cf4d951
PA
6023 if (decr_pc != 0)
6024 {
6025 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
6026
6027 if (record_full_is_used ())
6028 record_full_gdb_operation_disable_set ();
6029
6030 regcache_write_pc (regcache, stop_pc + decr_pc);
6031
6032 do_cleanups (old_cleanups);
6033 }
6034 }
6035 else
6036 {
6037 /* A delayed software breakpoint event. Ignore the trap. */
6038 if (debug_infrun)
6039 fprintf_unfiltered (gdb_stdlog,
6040 "infrun: delayed software breakpoint "
6041 "trap, ignoring\n");
6042 random_signal = 0;
6043 }
6044 }
6045
6046 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6047 has since been removed. */
6048 if (random_signal && target_stopped_by_hw_breakpoint ())
6049 {
6050 /* A delayed hardware breakpoint event. Ignore the trap. */
6051 if (debug_infrun)
6052 fprintf_unfiltered (gdb_stdlog,
6053 "infrun: delayed hardware breakpoint/watchpoint "
6054 "trap, ignoring\n");
6055 random_signal = 0;
6056 }
6057
bac7d97b
PA
6058 /* If not, perhaps stepping/nexting can. */
6059 if (random_signal)
6060 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6061 && currently_stepping (ecs->event_thread));
ab04a2af 6062
2adfaa28
PA
6063 /* Perhaps the thread hit a single-step breakpoint of _another_
6064 thread. Single-step breakpoints are transparent to the
6065 breakpoints module. */
6066 if (random_signal)
6067 random_signal = !ecs->hit_singlestep_breakpoint;
6068
bac7d97b
PA
6069 /* No? Perhaps we got a moribund watchpoint. */
6070 if (random_signal)
6071 random_signal = !stopped_by_watchpoint;
ab04a2af 6072
c65d6b55
PA
6073 /* Always stop if the user explicitly requested this thread to
6074 remain stopped. */
6075 if (ecs->event_thread->stop_requested)
6076 {
6077 random_signal = 1;
6078 if (debug_infrun)
6079 fprintf_unfiltered (gdb_stdlog, "infrun: user-requested stop\n");
6080 }
6081
488f131b
JB
6082 /* For the program's own signals, act according to
6083 the signal handling tables. */
6084
ce12b012 6085 if (random_signal)
488f131b
JB
6086 {
6087 /* Signal not for debugging purposes. */
c9657e70 6088 struct inferior *inf = find_inferior_ptid (ecs->ptid);
c9737c08 6089 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
488f131b 6090
527159b7 6091 if (debug_infrun)
c9737c08
PA
6092 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
6093 gdb_signal_to_symbol_string (stop_signal));
527159b7 6094
488f131b
JB
6095 stopped_by_random_signal = 1;
6096
252fbfc8
PA
6097 /* Always stop on signals if we're either just gaining control
6098 of the program, or the user explicitly requested this thread
6099 to remain stopped. */
d6b48e9c 6100 if (stop_soon != NO_STOP_QUIETLY
252fbfc8 6101 || ecs->event_thread->stop_requested
24291992 6102 || (!inf->detaching
16c381f0 6103 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
488f131b 6104 {
22bcd14b 6105 stop_waiting (ecs);
488f131b
JB
6106 return;
6107 }
b57bacec
PA
6108
6109 /* Notify observers the signal has "handle print" set. Note we
6110 returned early above if stopping; normal_stop handles the
6111 printing in that case. */
6112 if (signal_print[ecs->event_thread->suspend.stop_signal])
6113 {
6114 /* The signal table tells us to print about this signal. */
6115 target_terminal_ours_for_output ();
6116 observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
6117 target_terminal_inferior ();
6118 }
488f131b
JB
6119
6120 /* Clear the signal if it should not be passed. */
16c381f0 6121 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
a493e3e2 6122 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
488f131b 6123
fb14de7b 6124 if (ecs->event_thread->prev_pc == stop_pc
16c381f0 6125 && ecs->event_thread->control.trap_expected
8358c15c 6126 && ecs->event_thread->control.step_resume_breakpoint == NULL)
68f53502
AC
6127 {
6128 /* We were just starting a new sequence, attempting to
6129 single-step off of a breakpoint and expecting a SIGTRAP.
237fc4c9 6130 Instead this signal arrives. This signal will take us out
68f53502
AC
6131 of the stepping range so GDB needs to remember to, when
6132 the signal handler returns, resume stepping off that
6133 breakpoint. */
6134 /* To simplify things, "continue" is forced to use the same
6135 code paths as single-step - set a breakpoint at the
6136 signal return address and then, once hit, step off that
6137 breakpoint. */
237fc4c9
PA
6138 if (debug_infrun)
6139 fprintf_unfiltered (gdb_stdlog,
6140 "infrun: signal arrived while stepping over "
6141 "breakpoint\n");
d3169d93 6142
2c03e5be 6143 insert_hp_step_resume_breakpoint_at_frame (frame);
4e1c45ea 6144 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6145 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6146 ecs->event_thread->control.trap_expected = 0;
d137e6dc
PA
6147
6148 /* If we were nexting/stepping some other thread, switch to
6149 it, so that we don't continue it, losing control. */
6150 if (!switch_back_to_stepped_thread (ecs))
6151 keep_going (ecs);
9d799f85 6152 return;
68f53502 6153 }
9d799f85 6154
e5f8a7cc
PA
6155 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
6156 && (pc_in_thread_step_range (stop_pc, ecs->event_thread)
6157 || ecs->event_thread->control.step_range_end == 1)
edb3359d 6158 && frame_id_eq (get_stack_frame_id (frame),
16c381f0 6159 ecs->event_thread->control.step_stack_frame_id)
8358c15c 6160 && ecs->event_thread->control.step_resume_breakpoint == NULL)
d303a6c7
AC
6161 {
6162 /* The inferior is about to take a signal that will take it
6163 out of the single step range. Set a breakpoint at the
6164 current PC (which is presumably where the signal handler
6165 will eventually return) and then allow the inferior to
6166 run free.
6167
6168 Note that this is only needed for a signal delivered
6169 while in the single-step range. Nested signals aren't a
6170 problem as they eventually all return. */
237fc4c9
PA
6171 if (debug_infrun)
6172 fprintf_unfiltered (gdb_stdlog,
6173 "infrun: signal may take us out of "
6174 "single-step range\n");
6175
372316f1 6176 clear_step_over_info ();
2c03e5be 6177 insert_hp_step_resume_breakpoint_at_frame (frame);
e5f8a7cc 6178 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6179 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6180 ecs->event_thread->control.trap_expected = 0;
9d799f85
AC
6181 keep_going (ecs);
6182 return;
d303a6c7 6183 }
9d799f85
AC
6184
6185 /* Note: step_resume_breakpoint may be non-NULL. This occures
6186 when either there's a nested signal, or when there's a
6187 pending signal enabled just as the signal handler returns
6188 (leaving the inferior at the step-resume-breakpoint without
6189 actually executing it). Either way continue until the
6190 breakpoint is really hit. */
c447ac0b
PA
6191
6192 if (!switch_back_to_stepped_thread (ecs))
6193 {
6194 if (debug_infrun)
6195 fprintf_unfiltered (gdb_stdlog,
6196 "infrun: random signal, keep going\n");
6197
6198 keep_going (ecs);
6199 }
6200 return;
488f131b 6201 }
94c57d6a
PA
6202
6203 process_event_stop_test (ecs);
6204}
6205
6206/* Come here when we've got some debug event / signal we can explain
6207 (IOW, not a random signal), and test whether it should cause a
6208 stop, or whether we should resume the inferior (transparently).
6209 E.g., could be a breakpoint whose condition evaluates false; we
6210 could be still stepping within the line; etc. */
6211
6212static void
6213process_event_stop_test (struct execution_control_state *ecs)
6214{
6215 struct symtab_and_line stop_pc_sal;
6216 struct frame_info *frame;
6217 struct gdbarch *gdbarch;
cdaa5b73
PA
6218 CORE_ADDR jmp_buf_pc;
6219 struct bpstat_what what;
94c57d6a 6220
cdaa5b73 6221 /* Handle cases caused by hitting a breakpoint. */
611c83ae 6222
cdaa5b73
PA
6223 frame = get_current_frame ();
6224 gdbarch = get_frame_arch (frame);
fcf3daef 6225
cdaa5b73 6226 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
611c83ae 6227
cdaa5b73
PA
6228 if (what.call_dummy)
6229 {
6230 stop_stack_dummy = what.call_dummy;
6231 }
186c406b 6232
243a9253
PA
6233 /* A few breakpoint types have callbacks associated (e.g.,
6234 bp_jit_event). Run them now. */
6235 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
6236
cdaa5b73
PA
6237 /* If we hit an internal event that triggers symbol changes, the
6238 current frame will be invalidated within bpstat_what (e.g., if we
6239 hit an internal solib event). Re-fetch it. */
6240 frame = get_current_frame ();
6241 gdbarch = get_frame_arch (frame);
e2e4d78b 6242
cdaa5b73
PA
6243 switch (what.main_action)
6244 {
6245 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
6246 /* If we hit the breakpoint at longjmp while stepping, we
6247 install a momentary breakpoint at the target of the
6248 jmp_buf. */
186c406b 6249
cdaa5b73
PA
6250 if (debug_infrun)
6251 fprintf_unfiltered (gdb_stdlog,
6252 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
186c406b 6253
cdaa5b73 6254 ecs->event_thread->stepping_over_breakpoint = 1;
611c83ae 6255
cdaa5b73
PA
6256 if (what.is_longjmp)
6257 {
6258 struct value *arg_value;
6259
6260 /* If we set the longjmp breakpoint via a SystemTap probe,
6261 then use it to extract the arguments. The destination PC
6262 is the third argument to the probe. */
6263 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6264 if (arg_value)
8fa0c4f8
AA
6265 {
6266 jmp_buf_pc = value_as_address (arg_value);
6267 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6268 }
cdaa5b73
PA
6269 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6270 || !gdbarch_get_longjmp_target (gdbarch,
6271 frame, &jmp_buf_pc))
e2e4d78b 6272 {
cdaa5b73
PA
6273 if (debug_infrun)
6274 fprintf_unfiltered (gdb_stdlog,
6275 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
6276 "(!gdbarch_get_longjmp_target)\n");
6277 keep_going (ecs);
6278 return;
e2e4d78b 6279 }
e2e4d78b 6280
cdaa5b73
PA
6281 /* Insert a breakpoint at resume address. */
6282 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6283 }
6284 else
6285 check_exception_resume (ecs, frame);
6286 keep_going (ecs);
6287 return;
e81a37f7 6288
cdaa5b73
PA
6289 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6290 {
6291 struct frame_info *init_frame;
e81a37f7 6292
cdaa5b73 6293 /* There are several cases to consider.
c906108c 6294
cdaa5b73
PA
6295 1. The initiating frame no longer exists. In this case we
6296 must stop, because the exception or longjmp has gone too
6297 far.
2c03e5be 6298
cdaa5b73
PA
6299 2. The initiating frame exists, and is the same as the
6300 current frame. We stop, because the exception or longjmp
6301 has been caught.
2c03e5be 6302
cdaa5b73
PA
6303 3. The initiating frame exists and is different from the
6304 current frame. This means the exception or longjmp has
6305 been caught beneath the initiating frame, so keep going.
c906108c 6306
cdaa5b73
PA
6307 4. longjmp breakpoint has been placed just to protect
6308 against stale dummy frames and user is not interested in
6309 stopping around longjmps. */
c5aa993b 6310
cdaa5b73
PA
6311 if (debug_infrun)
6312 fprintf_unfiltered (gdb_stdlog,
6313 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
c5aa993b 6314
cdaa5b73
PA
6315 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6316 != NULL);
6317 delete_exception_resume_breakpoint (ecs->event_thread);
c5aa993b 6318
cdaa5b73
PA
6319 if (what.is_longjmp)
6320 {
b67a2c6f 6321 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
c5aa993b 6322
cdaa5b73 6323 if (!frame_id_p (ecs->event_thread->initiating_frame))
e5ef252a 6324 {
cdaa5b73
PA
6325 /* Case 4. */
6326 keep_going (ecs);
6327 return;
e5ef252a 6328 }
cdaa5b73 6329 }
c5aa993b 6330
cdaa5b73 6331 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
527159b7 6332
cdaa5b73
PA
6333 if (init_frame)
6334 {
6335 struct frame_id current_id
6336 = get_frame_id (get_current_frame ());
6337 if (frame_id_eq (current_id,
6338 ecs->event_thread->initiating_frame))
6339 {
6340 /* Case 2. Fall through. */
6341 }
6342 else
6343 {
6344 /* Case 3. */
6345 keep_going (ecs);
6346 return;
6347 }
68f53502 6348 }
488f131b 6349
cdaa5b73
PA
6350 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6351 exists. */
6352 delete_step_resume_breakpoint (ecs->event_thread);
e5ef252a 6353
bdc36728 6354 end_stepping_range (ecs);
cdaa5b73
PA
6355 }
6356 return;
e5ef252a 6357
cdaa5b73
PA
6358 case BPSTAT_WHAT_SINGLE:
6359 if (debug_infrun)
6360 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
6361 ecs->event_thread->stepping_over_breakpoint = 1;
6362 /* Still need to check other stuff, at least the case where we
6363 are stepping and step out of the right range. */
6364 break;
e5ef252a 6365
cdaa5b73
PA
6366 case BPSTAT_WHAT_STEP_RESUME:
6367 if (debug_infrun)
6368 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
e5ef252a 6369
cdaa5b73
PA
6370 delete_step_resume_breakpoint (ecs->event_thread);
6371 if (ecs->event_thread->control.proceed_to_finish
6372 && execution_direction == EXEC_REVERSE)
6373 {
6374 struct thread_info *tp = ecs->event_thread;
6375
6376 /* We are finishing a function in reverse, and just hit the
6377 step-resume breakpoint at the start address of the
6378 function, and we're almost there -- just need to back up
6379 by one more single-step, which should take us back to the
6380 function call. */
6381 tp->control.step_range_start = tp->control.step_range_end = 1;
6382 keep_going (ecs);
e5ef252a 6383 return;
cdaa5b73
PA
6384 }
6385 fill_in_stop_func (gdbarch, ecs);
6386 if (stop_pc == ecs->stop_func_start
6387 && execution_direction == EXEC_REVERSE)
6388 {
6389 /* We are stepping over a function call in reverse, and just
6390 hit the step-resume breakpoint at the start address of
6391 the function. Go back to single-stepping, which should
6392 take us back to the function call. */
6393 ecs->event_thread->stepping_over_breakpoint = 1;
6394 keep_going (ecs);
6395 return;
6396 }
6397 break;
e5ef252a 6398
cdaa5b73
PA
6399 case BPSTAT_WHAT_STOP_NOISY:
6400 if (debug_infrun)
6401 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
6402 stop_print_frame = 1;
e5ef252a 6403
99619bea
PA
6404 /* Assume the thread stopped for a breapoint. We'll still check
6405 whether a/the breakpoint is there when the thread is next
6406 resumed. */
6407 ecs->event_thread->stepping_over_breakpoint = 1;
e5ef252a 6408
22bcd14b 6409 stop_waiting (ecs);
cdaa5b73 6410 return;
e5ef252a 6411
cdaa5b73
PA
6412 case BPSTAT_WHAT_STOP_SILENT:
6413 if (debug_infrun)
6414 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
6415 stop_print_frame = 0;
e5ef252a 6416
99619bea
PA
6417 /* Assume the thread stopped for a breapoint. We'll still check
6418 whether a/the breakpoint is there when the thread is next
6419 resumed. */
6420 ecs->event_thread->stepping_over_breakpoint = 1;
22bcd14b 6421 stop_waiting (ecs);
cdaa5b73
PA
6422 return;
6423
6424 case BPSTAT_WHAT_HP_STEP_RESUME:
6425 if (debug_infrun)
6426 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
6427
6428 delete_step_resume_breakpoint (ecs->event_thread);
6429 if (ecs->event_thread->step_after_step_resume_breakpoint)
6430 {
6431 /* Back when the step-resume breakpoint was inserted, we
6432 were trying to single-step off a breakpoint. Go back to
6433 doing that. */
6434 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6435 ecs->event_thread->stepping_over_breakpoint = 1;
6436 keep_going (ecs);
6437 return;
e5ef252a 6438 }
cdaa5b73
PA
6439 break;
6440
6441 case BPSTAT_WHAT_KEEP_CHECKING:
6442 break;
e5ef252a 6443 }
c906108c 6444
af48d08f
PA
6445 /* If we stepped a permanent breakpoint and we had a high priority
6446 step-resume breakpoint for the address we stepped, but we didn't
6447 hit it, then we must have stepped into the signal handler. The
6448 step-resume was only necessary to catch the case of _not_
6449 stepping into the handler, so delete it, and fall through to
6450 checking whether the step finished. */
6451 if (ecs->event_thread->stepped_breakpoint)
6452 {
6453 struct breakpoint *sr_bp
6454 = ecs->event_thread->control.step_resume_breakpoint;
6455
8d707a12
PA
6456 if (sr_bp != NULL
6457 && sr_bp->loc->permanent
af48d08f
PA
6458 && sr_bp->type == bp_hp_step_resume
6459 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6460 {
6461 if (debug_infrun)
6462 fprintf_unfiltered (gdb_stdlog,
6463 "infrun: stepped permanent breakpoint, stopped in "
6464 "handler\n");
6465 delete_step_resume_breakpoint (ecs->event_thread);
6466 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6467 }
6468 }
6469
cdaa5b73
PA
6470 /* We come here if we hit a breakpoint but should not stop for it.
6471 Possibly we also were stepping and should stop for that. So fall
6472 through and test for stepping. But, if not stepping, do not
6473 stop. */
c906108c 6474
a7212384
UW
6475 /* In all-stop mode, if we're currently stepping but have stopped in
6476 some other thread, we need to switch back to the stepped thread. */
c447ac0b
PA
6477 if (switch_back_to_stepped_thread (ecs))
6478 return;
776f04fa 6479
8358c15c 6480 if (ecs->event_thread->control.step_resume_breakpoint)
488f131b 6481 {
527159b7 6482 if (debug_infrun)
d3169d93
DJ
6483 fprintf_unfiltered (gdb_stdlog,
6484 "infrun: step-resume breakpoint is inserted\n");
527159b7 6485
488f131b
JB
6486 /* Having a step-resume breakpoint overrides anything
6487 else having to do with stepping commands until
6488 that breakpoint is reached. */
488f131b
JB
6489 keep_going (ecs);
6490 return;
6491 }
c5aa993b 6492
16c381f0 6493 if (ecs->event_thread->control.step_range_end == 0)
488f131b 6494 {
527159b7 6495 if (debug_infrun)
8a9de0e4 6496 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
488f131b 6497 /* Likewise if we aren't even stepping. */
488f131b
JB
6498 keep_going (ecs);
6499 return;
6500 }
c5aa993b 6501
4b7703ad
JB
6502 /* Re-fetch current thread's frame in case the code above caused
6503 the frame cache to be re-initialized, making our FRAME variable
6504 a dangling pointer. */
6505 frame = get_current_frame ();
628fe4e4 6506 gdbarch = get_frame_arch (frame);
7e324e48 6507 fill_in_stop_func (gdbarch, ecs);
4b7703ad 6508
488f131b 6509 /* If stepping through a line, keep going if still within it.
c906108c 6510
488f131b
JB
6511 Note that step_range_end is the address of the first instruction
6512 beyond the step range, and NOT the address of the last instruction
31410e84
MS
6513 within it!
6514
6515 Note also that during reverse execution, we may be stepping
6516 through a function epilogue and therefore must detect when
6517 the current-frame changes in the middle of a line. */
6518
ce4c476a 6519 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
31410e84 6520 && (execution_direction != EXEC_REVERSE
388a8562 6521 || frame_id_eq (get_frame_id (frame),
16c381f0 6522 ecs->event_thread->control.step_frame_id)))
488f131b 6523 {
527159b7 6524 if (debug_infrun)
5af949e3
UW
6525 fprintf_unfiltered
6526 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
16c381f0
JK
6527 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6528 paddress (gdbarch, ecs->event_thread->control.step_range_end));
b2175913 6529
c1e36e3e
PA
6530 /* Tentatively re-enable range stepping; `resume' disables it if
6531 necessary (e.g., if we're stepping over a breakpoint or we
6532 have software watchpoints). */
6533 ecs->event_thread->control.may_range_step = 1;
6534
b2175913
MS
6535 /* When stepping backward, stop at beginning of line range
6536 (unless it's the function entry point, in which case
6537 keep going back to the call point). */
16c381f0 6538 if (stop_pc == ecs->event_thread->control.step_range_start
b2175913
MS
6539 && stop_pc != ecs->stop_func_start
6540 && execution_direction == EXEC_REVERSE)
bdc36728 6541 end_stepping_range (ecs);
b2175913
MS
6542 else
6543 keep_going (ecs);
6544
488f131b
JB
6545 return;
6546 }
c5aa993b 6547
488f131b 6548 /* We stepped out of the stepping range. */
c906108c 6549
488f131b 6550 /* If we are stepping at the source level and entered the runtime
388a8562
MS
6551 loader dynamic symbol resolution code...
6552
6553 EXEC_FORWARD: we keep on single stepping until we exit the run
6554 time loader code and reach the callee's address.
6555
6556 EXEC_REVERSE: we've already executed the callee (backward), and
6557 the runtime loader code is handled just like any other
6558 undebuggable function call. Now we need only keep stepping
6559 backward through the trampoline code, and that's handled further
6560 down, so there is nothing for us to do here. */
6561
6562 if (execution_direction != EXEC_REVERSE
16c381f0 6563 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
cfd8ab24 6564 && in_solib_dynsym_resolve_code (stop_pc))
488f131b 6565 {
4c8c40e6 6566 CORE_ADDR pc_after_resolver =
568d6575 6567 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
c906108c 6568
527159b7 6569 if (debug_infrun)
3e43a32a
MS
6570 fprintf_unfiltered (gdb_stdlog,
6571 "infrun: stepped into dynsym resolve code\n");
527159b7 6572
488f131b
JB
6573 if (pc_after_resolver)
6574 {
6575 /* Set up a step-resume breakpoint at the address
6576 indicated by SKIP_SOLIB_RESOLVER. */
6577 struct symtab_and_line sr_sal;
abbb1732 6578
fe39c653 6579 init_sal (&sr_sal);
488f131b 6580 sr_sal.pc = pc_after_resolver;
6c95b8df 6581 sr_sal.pspace = get_frame_program_space (frame);
488f131b 6582
a6d9a66e
UW
6583 insert_step_resume_breakpoint_at_sal (gdbarch,
6584 sr_sal, null_frame_id);
c5aa993b 6585 }
c906108c 6586
488f131b
JB
6587 keep_going (ecs);
6588 return;
6589 }
c906108c 6590
16c381f0
JK
6591 if (ecs->event_thread->control.step_range_end != 1
6592 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6593 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
568d6575 6594 && get_frame_type (frame) == SIGTRAMP_FRAME)
488f131b 6595 {
527159b7 6596 if (debug_infrun)
3e43a32a
MS
6597 fprintf_unfiltered (gdb_stdlog,
6598 "infrun: stepped into signal trampoline\n");
42edda50 6599 /* The inferior, while doing a "step" or "next", has ended up in
8fb3e588
AC
6600 a signal trampoline (either by a signal being delivered or by
6601 the signal handler returning). Just single-step until the
6602 inferior leaves the trampoline (either by calling the handler
6603 or returning). */
488f131b
JB
6604 keep_going (ecs);
6605 return;
6606 }
c906108c 6607
14132e89
MR
6608 /* If we're in the return path from a shared library trampoline,
6609 we want to proceed through the trampoline when stepping. */
6610 /* macro/2012-04-25: This needs to come before the subroutine
6611 call check below as on some targets return trampolines look
6612 like subroutine calls (MIPS16 return thunks). */
6613 if (gdbarch_in_solib_return_trampoline (gdbarch,
6614 stop_pc, ecs->stop_func_name)
6615 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
6616 {
6617 /* Determine where this trampoline returns. */
6618 CORE_ADDR real_stop_pc;
6619
6620 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
6621
6622 if (debug_infrun)
6623 fprintf_unfiltered (gdb_stdlog,
6624 "infrun: stepped into solib return tramp\n");
6625
6626 /* Only proceed through if we know where it's going. */
6627 if (real_stop_pc)
6628 {
6629 /* And put the step-breakpoint there and go until there. */
6630 struct symtab_and_line sr_sal;
6631
6632 init_sal (&sr_sal); /* initialize to zeroes */
6633 sr_sal.pc = real_stop_pc;
6634 sr_sal.section = find_pc_overlay (sr_sal.pc);
6635 sr_sal.pspace = get_frame_program_space (frame);
6636
6637 /* Do not specify what the fp should be when we stop since
6638 on some machines the prologue is where the new fp value
6639 is established. */
6640 insert_step_resume_breakpoint_at_sal (gdbarch,
6641 sr_sal, null_frame_id);
6642
6643 /* Restart without fiddling with the step ranges or
6644 other state. */
6645 keep_going (ecs);
6646 return;
6647 }
6648 }
6649
c17eaafe
DJ
6650 /* Check for subroutine calls. The check for the current frame
6651 equalling the step ID is not necessary - the check of the
6652 previous frame's ID is sufficient - but it is a common case and
6653 cheaper than checking the previous frame's ID.
14e60db5
DJ
6654
6655 NOTE: frame_id_eq will never report two invalid frame IDs as
6656 being equal, so to get into this block, both the current and
6657 previous frame must have valid frame IDs. */
005ca36a
JB
6658 /* The outer_frame_id check is a heuristic to detect stepping
6659 through startup code. If we step over an instruction which
6660 sets the stack pointer from an invalid value to a valid value,
6661 we may detect that as a subroutine call from the mythical
6662 "outermost" function. This could be fixed by marking
6663 outermost frames as !stack_p,code_p,special_p. Then the
6664 initial outermost frame, before sp was valid, would
ce6cca6d 6665 have code_addr == &_start. See the comment in frame_id_eq
005ca36a 6666 for more. */
edb3359d 6667 if (!frame_id_eq (get_stack_frame_id (frame),
16c381f0 6668 ecs->event_thread->control.step_stack_frame_id)
005ca36a 6669 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
16c381f0
JK
6670 ecs->event_thread->control.step_stack_frame_id)
6671 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
005ca36a 6672 outer_frame_id)
885eeb5b
PA
6673 || (ecs->event_thread->control.step_start_function
6674 != find_pc_function (stop_pc)))))
488f131b 6675 {
95918acb 6676 CORE_ADDR real_stop_pc;
8fb3e588 6677
527159b7 6678 if (debug_infrun)
8a9de0e4 6679 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
527159b7 6680
b7a084be 6681 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
95918acb
AC
6682 {
6683 /* I presume that step_over_calls is only 0 when we're
6684 supposed to be stepping at the assembly language level
6685 ("stepi"). Just stop. */
388a8562 6686 /* And this works the same backward as frontward. MVS */
bdc36728 6687 end_stepping_range (ecs);
95918acb
AC
6688 return;
6689 }
8fb3e588 6690
388a8562
MS
6691 /* Reverse stepping through solib trampolines. */
6692
6693 if (execution_direction == EXEC_REVERSE
16c381f0 6694 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
388a8562
MS
6695 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6696 || (ecs->stop_func_start == 0
6697 && in_solib_dynsym_resolve_code (stop_pc))))
6698 {
6699 /* Any solib trampoline code can be handled in reverse
6700 by simply continuing to single-step. We have already
6701 executed the solib function (backwards), and a few
6702 steps will take us back through the trampoline to the
6703 caller. */
6704 keep_going (ecs);
6705 return;
6706 }
6707
16c381f0 6708 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
8567c30f 6709 {
b2175913
MS
6710 /* We're doing a "next".
6711
6712 Normal (forward) execution: set a breakpoint at the
6713 callee's return address (the address at which the caller
6714 will resume).
6715
6716 Reverse (backward) execution. set the step-resume
6717 breakpoint at the start of the function that we just
6718 stepped into (backwards), and continue to there. When we
6130d0b7 6719 get there, we'll need to single-step back to the caller. */
b2175913
MS
6720
6721 if (execution_direction == EXEC_REVERSE)
6722 {
acf9414f
JK
6723 /* If we're already at the start of the function, we've either
6724 just stepped backward into a single instruction function,
6725 or stepped back out of a signal handler to the first instruction
6726 of the function. Just keep going, which will single-step back
6727 to the caller. */
58c48e72 6728 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
acf9414f
JK
6729 {
6730 struct symtab_and_line sr_sal;
6731
6732 /* Normal function call return (static or dynamic). */
6733 init_sal (&sr_sal);
6734 sr_sal.pc = ecs->stop_func_start;
6735 sr_sal.pspace = get_frame_program_space (frame);
6736 insert_step_resume_breakpoint_at_sal (gdbarch,
6737 sr_sal, null_frame_id);
6738 }
b2175913
MS
6739 }
6740 else
568d6575 6741 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6742
8567c30f
AC
6743 keep_going (ecs);
6744 return;
6745 }
a53c66de 6746
95918acb 6747 /* If we are in a function call trampoline (a stub between the
8fb3e588
AC
6748 calling routine and the real function), locate the real
6749 function. That's what tells us (a) whether we want to step
6750 into it at all, and (b) what prologue we want to run to the
6751 end of, if we do step into it. */
568d6575 6752 real_stop_pc = skip_language_trampoline (frame, stop_pc);
95918acb 6753 if (real_stop_pc == 0)
568d6575 6754 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
95918acb
AC
6755 if (real_stop_pc != 0)
6756 ecs->stop_func_start = real_stop_pc;
8fb3e588 6757
db5f024e 6758 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
1b2bfbb9
RC
6759 {
6760 struct symtab_and_line sr_sal;
abbb1732 6761
1b2bfbb9
RC
6762 init_sal (&sr_sal);
6763 sr_sal.pc = ecs->stop_func_start;
6c95b8df 6764 sr_sal.pspace = get_frame_program_space (frame);
1b2bfbb9 6765
a6d9a66e
UW
6766 insert_step_resume_breakpoint_at_sal (gdbarch,
6767 sr_sal, null_frame_id);
8fb3e588
AC
6768 keep_going (ecs);
6769 return;
1b2bfbb9
RC
6770 }
6771
95918acb 6772 /* If we have line number information for the function we are
1bfeeb0f
JL
6773 thinking of stepping into and the function isn't on the skip
6774 list, step into it.
95918acb 6775
8fb3e588
AC
6776 If there are several symtabs at that PC (e.g. with include
6777 files), just want to know whether *any* of them have line
6778 numbers. find_pc_line handles this. */
95918acb
AC
6779 {
6780 struct symtab_and_line tmp_sal;
8fb3e588 6781
95918acb 6782 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
2b914b52 6783 if (tmp_sal.line != 0
85817405
JK
6784 && !function_name_is_marked_for_skip (ecs->stop_func_name,
6785 &tmp_sal))
95918acb 6786 {
b2175913 6787 if (execution_direction == EXEC_REVERSE)
568d6575 6788 handle_step_into_function_backward (gdbarch, ecs);
b2175913 6789 else
568d6575 6790 handle_step_into_function (gdbarch, ecs);
95918acb
AC
6791 return;
6792 }
6793 }
6794
6795 /* If we have no line number and the step-stop-if-no-debug is
8fb3e588
AC
6796 set, we stop the step so that the user has a chance to switch
6797 in assembly mode. */
16c381f0 6798 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
078130d0 6799 && step_stop_if_no_debug)
95918acb 6800 {
bdc36728 6801 end_stepping_range (ecs);
95918acb
AC
6802 return;
6803 }
6804
b2175913
MS
6805 if (execution_direction == EXEC_REVERSE)
6806 {
acf9414f
JK
6807 /* If we're already at the start of the function, we've either just
6808 stepped backward into a single instruction function without line
6809 number info, or stepped back out of a signal handler to the first
6810 instruction of the function without line number info. Just keep
6811 going, which will single-step back to the caller. */
6812 if (ecs->stop_func_start != stop_pc)
6813 {
6814 /* Set a breakpoint at callee's start address.
6815 From there we can step once and be back in the caller. */
6816 struct symtab_and_line sr_sal;
abbb1732 6817
acf9414f
JK
6818 init_sal (&sr_sal);
6819 sr_sal.pc = ecs->stop_func_start;
6820 sr_sal.pspace = get_frame_program_space (frame);
6821 insert_step_resume_breakpoint_at_sal (gdbarch,
6822 sr_sal, null_frame_id);
6823 }
b2175913
MS
6824 }
6825 else
6826 /* Set a breakpoint at callee's return address (the address
6827 at which the caller will resume). */
568d6575 6828 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6829
95918acb 6830 keep_going (ecs);
488f131b 6831 return;
488f131b 6832 }
c906108c 6833
fdd654f3
MS
6834 /* Reverse stepping through solib trampolines. */
6835
6836 if (execution_direction == EXEC_REVERSE
16c381f0 6837 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
fdd654f3
MS
6838 {
6839 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6840 || (ecs->stop_func_start == 0
6841 && in_solib_dynsym_resolve_code (stop_pc)))
6842 {
6843 /* Any solib trampoline code can be handled in reverse
6844 by simply continuing to single-step. We have already
6845 executed the solib function (backwards), and a few
6846 steps will take us back through the trampoline to the
6847 caller. */
6848 keep_going (ecs);
6849 return;
6850 }
6851 else if (in_solib_dynsym_resolve_code (stop_pc))
6852 {
6853 /* Stepped backward into the solib dynsym resolver.
6854 Set a breakpoint at its start and continue, then
6855 one more step will take us out. */
6856 struct symtab_and_line sr_sal;
abbb1732 6857
fdd654f3
MS
6858 init_sal (&sr_sal);
6859 sr_sal.pc = ecs->stop_func_start;
9d1807c3 6860 sr_sal.pspace = get_frame_program_space (frame);
fdd654f3
MS
6861 insert_step_resume_breakpoint_at_sal (gdbarch,
6862 sr_sal, null_frame_id);
6863 keep_going (ecs);
6864 return;
6865 }
6866 }
6867
2afb61aa 6868 stop_pc_sal = find_pc_line (stop_pc, 0);
7ed0fe66 6869
1b2bfbb9
RC
6870 /* NOTE: tausq/2004-05-24: This if block used to be done before all
6871 the trampoline processing logic, however, there are some trampolines
6872 that have no names, so we should do trampoline handling first. */
16c381f0 6873 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7ed0fe66 6874 && ecs->stop_func_name == NULL
2afb61aa 6875 && stop_pc_sal.line == 0)
1b2bfbb9 6876 {
527159b7 6877 if (debug_infrun)
3e43a32a
MS
6878 fprintf_unfiltered (gdb_stdlog,
6879 "infrun: stepped into undebuggable function\n");
527159b7 6880
1b2bfbb9 6881 /* The inferior just stepped into, or returned to, an
7ed0fe66
DJ
6882 undebuggable function (where there is no debugging information
6883 and no line number corresponding to the address where the
1b2bfbb9
RC
6884 inferior stopped). Since we want to skip this kind of code,
6885 we keep going until the inferior returns from this
14e60db5
DJ
6886 function - unless the user has asked us not to (via
6887 set step-mode) or we no longer know how to get back
6888 to the call site. */
6889 if (step_stop_if_no_debug
c7ce8faa 6890 || !frame_id_p (frame_unwind_caller_id (frame)))
1b2bfbb9
RC
6891 {
6892 /* If we have no line number and the step-stop-if-no-debug
6893 is set, we stop the step so that the user has a chance to
6894 switch in assembly mode. */
bdc36728 6895 end_stepping_range (ecs);
1b2bfbb9
RC
6896 return;
6897 }
6898 else
6899 {
6900 /* Set a breakpoint at callee's return address (the address
6901 at which the caller will resume). */
568d6575 6902 insert_step_resume_breakpoint_at_caller (frame);
1b2bfbb9
RC
6903 keep_going (ecs);
6904 return;
6905 }
6906 }
6907
16c381f0 6908 if (ecs->event_thread->control.step_range_end == 1)
1b2bfbb9
RC
6909 {
6910 /* It is stepi or nexti. We always want to stop stepping after
6911 one instruction. */
527159b7 6912 if (debug_infrun)
8a9de0e4 6913 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
bdc36728 6914 end_stepping_range (ecs);
1b2bfbb9
RC
6915 return;
6916 }
6917
2afb61aa 6918 if (stop_pc_sal.line == 0)
488f131b
JB
6919 {
6920 /* We have no line number information. That means to stop
6921 stepping (does this always happen right after one instruction,
6922 when we do "s" in a function with no line numbers,
6923 or can this happen as a result of a return or longjmp?). */
527159b7 6924 if (debug_infrun)
8a9de0e4 6925 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
bdc36728 6926 end_stepping_range (ecs);
488f131b
JB
6927 return;
6928 }
c906108c 6929
edb3359d
DJ
6930 /* Look for "calls" to inlined functions, part one. If the inline
6931 frame machinery detected some skipped call sites, we have entered
6932 a new inline function. */
6933
6934 if (frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 6935 ecs->event_thread->control.step_frame_id)
edb3359d
DJ
6936 && inline_skipped_frames (ecs->ptid))
6937 {
6938 struct symtab_and_line call_sal;
6939
6940 if (debug_infrun)
6941 fprintf_unfiltered (gdb_stdlog,
6942 "infrun: stepped into inlined function\n");
6943
6944 find_frame_sal (get_current_frame (), &call_sal);
6945
16c381f0 6946 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
edb3359d
DJ
6947 {
6948 /* For "step", we're going to stop. But if the call site
6949 for this inlined function is on the same source line as
6950 we were previously stepping, go down into the function
6951 first. Otherwise stop at the call site. */
6952
6953 if (call_sal.line == ecs->event_thread->current_line
6954 && call_sal.symtab == ecs->event_thread->current_symtab)
6955 step_into_inline_frame (ecs->ptid);
6956
bdc36728 6957 end_stepping_range (ecs);
edb3359d
DJ
6958 return;
6959 }
6960 else
6961 {
6962 /* For "next", we should stop at the call site if it is on a
6963 different source line. Otherwise continue through the
6964 inlined function. */
6965 if (call_sal.line == ecs->event_thread->current_line
6966 && call_sal.symtab == ecs->event_thread->current_symtab)
6967 keep_going (ecs);
6968 else
bdc36728 6969 end_stepping_range (ecs);
edb3359d
DJ
6970 return;
6971 }
6972 }
6973
6974 /* Look for "calls" to inlined functions, part two. If we are still
6975 in the same real function we were stepping through, but we have
6976 to go further up to find the exact frame ID, we are stepping
6977 through a more inlined call beyond its call site. */
6978
6979 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
6980 && !frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 6981 ecs->event_thread->control.step_frame_id)
edb3359d 6982 && stepped_in_from (get_current_frame (),
16c381f0 6983 ecs->event_thread->control.step_frame_id))
edb3359d
DJ
6984 {
6985 if (debug_infrun)
6986 fprintf_unfiltered (gdb_stdlog,
6987 "infrun: stepping through inlined function\n");
6988
16c381f0 6989 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
edb3359d
DJ
6990 keep_going (ecs);
6991 else
bdc36728 6992 end_stepping_range (ecs);
edb3359d
DJ
6993 return;
6994 }
6995
2afb61aa 6996 if ((stop_pc == stop_pc_sal.pc)
4e1c45ea
PA
6997 && (ecs->event_thread->current_line != stop_pc_sal.line
6998 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
488f131b
JB
6999 {
7000 /* We are at the start of a different line. So stop. Note that
7001 we don't stop if we step into the middle of a different line.
7002 That is said to make things like for (;;) statements work
7003 better. */
527159b7 7004 if (debug_infrun)
3e43a32a
MS
7005 fprintf_unfiltered (gdb_stdlog,
7006 "infrun: stepped to a different line\n");
bdc36728 7007 end_stepping_range (ecs);
488f131b
JB
7008 return;
7009 }
c906108c 7010
488f131b 7011 /* We aren't done stepping.
c906108c 7012
488f131b
JB
7013 Optimize by setting the stepping range to the line.
7014 (We might not be in the original line, but if we entered a
7015 new line in mid-statement, we continue stepping. This makes
7016 things like for(;;) statements work better.) */
c906108c 7017
16c381f0
JK
7018 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
7019 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
c1e36e3e 7020 ecs->event_thread->control.may_range_step = 1;
edb3359d 7021 set_step_info (frame, stop_pc_sal);
488f131b 7022
527159b7 7023 if (debug_infrun)
8a9de0e4 7024 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
488f131b 7025 keep_going (ecs);
104c1213
JM
7026}
7027
c447ac0b
PA
7028/* In all-stop mode, if we're currently stepping but have stopped in
7029 some other thread, we may need to switch back to the stepped
7030 thread. Returns true we set the inferior running, false if we left
7031 it stopped (and the event needs further processing). */
7032
7033static int
7034switch_back_to_stepped_thread (struct execution_control_state *ecs)
7035{
fbea99ea 7036 if (!target_is_non_stop_p ())
c447ac0b
PA
7037 {
7038 struct thread_info *tp;
99619bea
PA
7039 struct thread_info *stepping_thread;
7040
7041 /* If any thread is blocked on some internal breakpoint, and we
7042 simply need to step over that breakpoint to get it going
7043 again, do that first. */
7044
7045 /* However, if we see an event for the stepping thread, then we
7046 know all other threads have been moved past their breakpoints
7047 already. Let the caller check whether the step is finished,
7048 etc., before deciding to move it past a breakpoint. */
7049 if (ecs->event_thread->control.step_range_end != 0)
7050 return 0;
7051
7052 /* Check if the current thread is blocked on an incomplete
7053 step-over, interrupted by a random signal. */
7054 if (ecs->event_thread->control.trap_expected
7055 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
c447ac0b 7056 {
99619bea
PA
7057 if (debug_infrun)
7058 {
7059 fprintf_unfiltered (gdb_stdlog,
7060 "infrun: need to finish step-over of [%s]\n",
7061 target_pid_to_str (ecs->event_thread->ptid));
7062 }
7063 keep_going (ecs);
7064 return 1;
7065 }
2adfaa28 7066
99619bea
PA
7067 /* Check if the current thread is blocked by a single-step
7068 breakpoint of another thread. */
7069 if (ecs->hit_singlestep_breakpoint)
7070 {
7071 if (debug_infrun)
7072 {
7073 fprintf_unfiltered (gdb_stdlog,
7074 "infrun: need to step [%s] over single-step "
7075 "breakpoint\n",
7076 target_pid_to_str (ecs->ptid));
7077 }
7078 keep_going (ecs);
7079 return 1;
7080 }
7081
4d9d9d04
PA
7082 /* If this thread needs yet another step-over (e.g., stepping
7083 through a delay slot), do it first before moving on to
7084 another thread. */
7085 if (thread_still_needs_step_over (ecs->event_thread))
7086 {
7087 if (debug_infrun)
7088 {
7089 fprintf_unfiltered (gdb_stdlog,
7090 "infrun: thread [%s] still needs step-over\n",
7091 target_pid_to_str (ecs->event_thread->ptid));
7092 }
7093 keep_going (ecs);
7094 return 1;
7095 }
70509625 7096
483805cf
PA
7097 /* If scheduler locking applies even if not stepping, there's no
7098 need to walk over threads. Above we've checked whether the
7099 current thread is stepping. If some other thread not the
7100 event thread is stepping, then it must be that scheduler
7101 locking is not in effect. */
856e7dd6 7102 if (schedlock_applies (ecs->event_thread))
483805cf
PA
7103 return 0;
7104
4d9d9d04
PA
7105 /* Otherwise, we no longer expect a trap in the current thread.
7106 Clear the trap_expected flag before switching back -- this is
7107 what keep_going does as well, if we call it. */
7108 ecs->event_thread->control.trap_expected = 0;
7109
7110 /* Likewise, clear the signal if it should not be passed. */
7111 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7112 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7113
7114 /* Do all pending step-overs before actually proceeding with
483805cf 7115 step/next/etc. */
4d9d9d04
PA
7116 if (start_step_over ())
7117 {
7118 prepare_to_wait (ecs);
7119 return 1;
7120 }
7121
7122 /* Look for the stepping/nexting thread. */
483805cf 7123 stepping_thread = NULL;
4d9d9d04 7124
034f788c 7125 ALL_NON_EXITED_THREADS (tp)
483805cf 7126 {
fbea99ea
PA
7127 /* Ignore threads of processes the caller is not
7128 resuming. */
483805cf 7129 if (!sched_multi
1afd5965 7130 && ptid_get_pid (tp->ptid) != ptid_get_pid (ecs->ptid))
483805cf
PA
7131 continue;
7132
7133 /* When stepping over a breakpoint, we lock all threads
7134 except the one that needs to move past the breakpoint.
7135 If a non-event thread has this set, the "incomplete
7136 step-over" check above should have caught it earlier. */
372316f1
PA
7137 if (tp->control.trap_expected)
7138 {
7139 internal_error (__FILE__, __LINE__,
7140 "[%s] has inconsistent state: "
7141 "trap_expected=%d\n",
7142 target_pid_to_str (tp->ptid),
7143 tp->control.trap_expected);
7144 }
483805cf
PA
7145
7146 /* Did we find the stepping thread? */
7147 if (tp->control.step_range_end)
7148 {
7149 /* Yep. There should only one though. */
7150 gdb_assert (stepping_thread == NULL);
7151
7152 /* The event thread is handled at the top, before we
7153 enter this loop. */
7154 gdb_assert (tp != ecs->event_thread);
7155
7156 /* If some thread other than the event thread is
7157 stepping, then scheduler locking can't be in effect,
7158 otherwise we wouldn't have resumed the current event
7159 thread in the first place. */
856e7dd6 7160 gdb_assert (!schedlock_applies (tp));
483805cf
PA
7161
7162 stepping_thread = tp;
7163 }
99619bea
PA
7164 }
7165
483805cf 7166 if (stepping_thread != NULL)
99619bea 7167 {
c447ac0b
PA
7168 if (debug_infrun)
7169 fprintf_unfiltered (gdb_stdlog,
7170 "infrun: switching back to stepped thread\n");
7171
2ac7589c
PA
7172 if (keep_going_stepped_thread (stepping_thread))
7173 {
7174 prepare_to_wait (ecs);
7175 return 1;
7176 }
7177 }
7178 }
2adfaa28 7179
2ac7589c
PA
7180 return 0;
7181}
2adfaa28 7182
2ac7589c
PA
7183/* Set a previously stepped thread back to stepping. Returns true on
7184 success, false if the resume is not possible (e.g., the thread
7185 vanished). */
7186
7187static int
7188keep_going_stepped_thread (struct thread_info *tp)
7189{
7190 struct frame_info *frame;
2ac7589c
PA
7191 struct execution_control_state ecss;
7192 struct execution_control_state *ecs = &ecss;
2adfaa28 7193
2ac7589c
PA
7194 /* If the stepping thread exited, then don't try to switch back and
7195 resume it, which could fail in several different ways depending
7196 on the target. Instead, just keep going.
2adfaa28 7197
2ac7589c
PA
7198 We can find a stepping dead thread in the thread list in two
7199 cases:
2adfaa28 7200
2ac7589c
PA
7201 - The target supports thread exit events, and when the target
7202 tries to delete the thread from the thread list, inferior_ptid
7203 pointed at the exiting thread. In such case, calling
7204 delete_thread does not really remove the thread from the list;
7205 instead, the thread is left listed, with 'exited' state.
64ce06e4 7206
2ac7589c
PA
7207 - The target's debug interface does not support thread exit
7208 events, and so we have no idea whatsoever if the previously
7209 stepping thread is still alive. For that reason, we need to
7210 synchronously query the target now. */
2adfaa28 7211
2ac7589c
PA
7212 if (is_exited (tp->ptid)
7213 || !target_thread_alive (tp->ptid))
7214 {
7215 if (debug_infrun)
7216 fprintf_unfiltered (gdb_stdlog,
7217 "infrun: not resuming previously "
7218 "stepped thread, it has vanished\n");
7219
7220 delete_thread (tp->ptid);
7221 return 0;
c447ac0b 7222 }
2ac7589c
PA
7223
7224 if (debug_infrun)
7225 fprintf_unfiltered (gdb_stdlog,
7226 "infrun: resuming previously stepped thread\n");
7227
7228 reset_ecs (ecs, tp);
7229 switch_to_thread (tp->ptid);
7230
7231 stop_pc = regcache_read_pc (get_thread_regcache (tp->ptid));
7232 frame = get_current_frame ();
2ac7589c
PA
7233
7234 /* If the PC of the thread we were trying to single-step has
7235 changed, then that thread has trapped or been signaled, but the
7236 event has not been reported to GDB yet. Re-poll the target
7237 looking for this particular thread's event (i.e. temporarily
7238 enable schedlock) by:
7239
7240 - setting a break at the current PC
7241 - resuming that particular thread, only (by setting trap
7242 expected)
7243
7244 This prevents us continuously moving the single-step breakpoint
7245 forward, one instruction at a time, overstepping. */
7246
7247 if (stop_pc != tp->prev_pc)
7248 {
7249 ptid_t resume_ptid;
7250
7251 if (debug_infrun)
7252 fprintf_unfiltered (gdb_stdlog,
7253 "infrun: expected thread advanced also (%s -> %s)\n",
7254 paddress (target_gdbarch (), tp->prev_pc),
7255 paddress (target_gdbarch (), stop_pc));
7256
7257 /* Clear the info of the previous step-over, as it's no longer
7258 valid (if the thread was trying to step over a breakpoint, it
7259 has already succeeded). It's what keep_going would do too,
7260 if we called it. Do this before trying to insert the sss
7261 breakpoint, otherwise if we were previously trying to step
7262 over this exact address in another thread, the breakpoint is
7263 skipped. */
7264 clear_step_over_info ();
7265 tp->control.trap_expected = 0;
7266
7267 insert_single_step_breakpoint (get_frame_arch (frame),
7268 get_frame_address_space (frame),
7269 stop_pc);
7270
372316f1 7271 tp->resumed = 1;
fbea99ea 7272 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
2ac7589c
PA
7273 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
7274 }
7275 else
7276 {
7277 if (debug_infrun)
7278 fprintf_unfiltered (gdb_stdlog,
7279 "infrun: expected thread still hasn't advanced\n");
7280
7281 keep_going_pass_signal (ecs);
7282 }
7283 return 1;
c447ac0b
PA
7284}
7285
8b061563
PA
7286/* Is thread TP in the middle of (software or hardware)
7287 single-stepping? (Note the result of this function must never be
7288 passed directly as target_resume's STEP parameter.) */
104c1213 7289
a289b8f6 7290static int
b3444185 7291currently_stepping (struct thread_info *tp)
a7212384 7292{
8358c15c
JK
7293 return ((tp->control.step_range_end
7294 && tp->control.step_resume_breakpoint == NULL)
7295 || tp->control.trap_expected
af48d08f 7296 || tp->stepped_breakpoint
8358c15c 7297 || bpstat_should_step ());
a7212384
UW
7298}
7299
b2175913
MS
7300/* Inferior has stepped into a subroutine call with source code that
7301 we should not step over. Do step to the first line of code in
7302 it. */
c2c6d25f
JM
7303
7304static void
568d6575
UW
7305handle_step_into_function (struct gdbarch *gdbarch,
7306 struct execution_control_state *ecs)
c2c6d25f 7307{
43f3e411 7308 struct compunit_symtab *cust;
2afb61aa 7309 struct symtab_and_line stop_func_sal, sr_sal;
c2c6d25f 7310
7e324e48
GB
7311 fill_in_stop_func (gdbarch, ecs);
7312
43f3e411
DE
7313 cust = find_pc_compunit_symtab (stop_pc);
7314 if (cust != NULL && compunit_language (cust) != language_asm)
568d6575 7315 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
b2175913 7316 ecs->stop_func_start);
c2c6d25f 7317
2afb61aa 7318 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
c2c6d25f
JM
7319 /* Use the step_resume_break to step until the end of the prologue,
7320 even if that involves jumps (as it seems to on the vax under
7321 4.2). */
7322 /* If the prologue ends in the middle of a source line, continue to
7323 the end of that source line (if it is still within the function).
7324 Otherwise, just go to end of prologue. */
2afb61aa
PA
7325 if (stop_func_sal.end
7326 && stop_func_sal.pc != ecs->stop_func_start
7327 && stop_func_sal.end < ecs->stop_func_end)
7328 ecs->stop_func_start = stop_func_sal.end;
c2c6d25f 7329
2dbd5e30
KB
7330 /* Architectures which require breakpoint adjustment might not be able
7331 to place a breakpoint at the computed address. If so, the test
7332 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7333 ecs->stop_func_start to an address at which a breakpoint may be
7334 legitimately placed.
8fb3e588 7335
2dbd5e30
KB
7336 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7337 made, GDB will enter an infinite loop when stepping through
7338 optimized code consisting of VLIW instructions which contain
7339 subinstructions corresponding to different source lines. On
7340 FR-V, it's not permitted to place a breakpoint on any but the
7341 first subinstruction of a VLIW instruction. When a breakpoint is
7342 set, GDB will adjust the breakpoint address to the beginning of
7343 the VLIW instruction. Thus, we need to make the corresponding
7344 adjustment here when computing the stop address. */
8fb3e588 7345
568d6575 7346 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
2dbd5e30
KB
7347 {
7348 ecs->stop_func_start
568d6575 7349 = gdbarch_adjust_breakpoint_address (gdbarch,
8fb3e588 7350 ecs->stop_func_start);
2dbd5e30
KB
7351 }
7352
c2c6d25f
JM
7353 if (ecs->stop_func_start == stop_pc)
7354 {
7355 /* We are already there: stop now. */
bdc36728 7356 end_stepping_range (ecs);
c2c6d25f
JM
7357 return;
7358 }
7359 else
7360 {
7361 /* Put the step-breakpoint there and go until there. */
fe39c653 7362 init_sal (&sr_sal); /* initialize to zeroes */
c2c6d25f
JM
7363 sr_sal.pc = ecs->stop_func_start;
7364 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
6c95b8df 7365 sr_sal.pspace = get_frame_program_space (get_current_frame ());
44cbf7b5 7366
c2c6d25f 7367 /* Do not specify what the fp should be when we stop since on
488f131b
JB
7368 some machines the prologue is where the new fp value is
7369 established. */
a6d9a66e 7370 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
c2c6d25f
JM
7371
7372 /* And make sure stepping stops right away then. */
16c381f0
JK
7373 ecs->event_thread->control.step_range_end
7374 = ecs->event_thread->control.step_range_start;
c2c6d25f
JM
7375 }
7376 keep_going (ecs);
7377}
d4f3574e 7378
b2175913
MS
7379/* Inferior has stepped backward into a subroutine call with source
7380 code that we should not step over. Do step to the beginning of the
7381 last line of code in it. */
7382
7383static void
568d6575
UW
7384handle_step_into_function_backward (struct gdbarch *gdbarch,
7385 struct execution_control_state *ecs)
b2175913 7386{
43f3e411 7387 struct compunit_symtab *cust;
167e4384 7388 struct symtab_and_line stop_func_sal;
b2175913 7389
7e324e48
GB
7390 fill_in_stop_func (gdbarch, ecs);
7391
43f3e411
DE
7392 cust = find_pc_compunit_symtab (stop_pc);
7393 if (cust != NULL && compunit_language (cust) != language_asm)
568d6575 7394 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
b2175913
MS
7395 ecs->stop_func_start);
7396
7397 stop_func_sal = find_pc_line (stop_pc, 0);
7398
7399 /* OK, we're just going to keep stepping here. */
7400 if (stop_func_sal.pc == stop_pc)
7401 {
7402 /* We're there already. Just stop stepping now. */
bdc36728 7403 end_stepping_range (ecs);
b2175913
MS
7404 }
7405 else
7406 {
7407 /* Else just reset the step range and keep going.
7408 No step-resume breakpoint, they don't work for
7409 epilogues, which can have multiple entry paths. */
16c381f0
JK
7410 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7411 ecs->event_thread->control.step_range_end = stop_func_sal.end;
b2175913
MS
7412 keep_going (ecs);
7413 }
7414 return;
7415}
7416
d3169d93 7417/* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
44cbf7b5
AC
7418 This is used to both functions and to skip over code. */
7419
7420static void
2c03e5be
PA
7421insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7422 struct symtab_and_line sr_sal,
7423 struct frame_id sr_id,
7424 enum bptype sr_type)
44cbf7b5 7425{
611c83ae
PA
7426 /* There should never be more than one step-resume or longjmp-resume
7427 breakpoint per thread, so we should never be setting a new
44cbf7b5 7428 step_resume_breakpoint when one is already active. */
8358c15c 7429 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
2c03e5be 7430 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
d3169d93
DJ
7431
7432 if (debug_infrun)
7433 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
7434 "infrun: inserting step-resume breakpoint at %s\n",
7435 paddress (gdbarch, sr_sal.pc));
d3169d93 7436
8358c15c 7437 inferior_thread ()->control.step_resume_breakpoint
2c03e5be
PA
7438 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
7439}
7440
9da8c2a0 7441void
2c03e5be
PA
7442insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7443 struct symtab_and_line sr_sal,
7444 struct frame_id sr_id)
7445{
7446 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7447 sr_sal, sr_id,
7448 bp_step_resume);
44cbf7b5 7449}
7ce450bd 7450
2c03e5be
PA
7451/* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7452 This is used to skip a potential signal handler.
7ce450bd 7453
14e60db5
DJ
7454 This is called with the interrupted function's frame. The signal
7455 handler, when it returns, will resume the interrupted function at
7456 RETURN_FRAME.pc. */
d303a6c7
AC
7457
7458static void
2c03e5be 7459insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
d303a6c7
AC
7460{
7461 struct symtab_and_line sr_sal;
a6d9a66e 7462 struct gdbarch *gdbarch;
d303a6c7 7463
f4c1edd8 7464 gdb_assert (return_frame != NULL);
d303a6c7
AC
7465 init_sal (&sr_sal); /* initialize to zeros */
7466
a6d9a66e 7467 gdbarch = get_frame_arch (return_frame);
568d6575 7468 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
d303a6c7 7469 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7470 sr_sal.pspace = get_frame_program_space (return_frame);
d303a6c7 7471
2c03e5be
PA
7472 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7473 get_stack_frame_id (return_frame),
7474 bp_hp_step_resume);
d303a6c7
AC
7475}
7476
2c03e5be
PA
7477/* Insert a "step-resume breakpoint" at the previous frame's PC. This
7478 is used to skip a function after stepping into it (for "next" or if
7479 the called function has no debugging information).
14e60db5
DJ
7480
7481 The current function has almost always been reached by single
7482 stepping a call or return instruction. NEXT_FRAME belongs to the
7483 current function, and the breakpoint will be set at the caller's
7484 resume address.
7485
7486 This is a separate function rather than reusing
2c03e5be 7487 insert_hp_step_resume_breakpoint_at_frame in order to avoid
14e60db5 7488 get_prev_frame, which may stop prematurely (see the implementation
c7ce8faa 7489 of frame_unwind_caller_id for an example). */
14e60db5
DJ
7490
7491static void
7492insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7493{
7494 struct symtab_and_line sr_sal;
a6d9a66e 7495 struct gdbarch *gdbarch;
14e60db5
DJ
7496
7497 /* We shouldn't have gotten here if we don't know where the call site
7498 is. */
c7ce8faa 7499 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
14e60db5
DJ
7500
7501 init_sal (&sr_sal); /* initialize to zeros */
7502
a6d9a66e 7503 gdbarch = frame_unwind_caller_arch (next_frame);
c7ce8faa
DJ
7504 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7505 frame_unwind_caller_pc (next_frame));
14e60db5 7506 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7507 sr_sal.pspace = frame_unwind_program_space (next_frame);
14e60db5 7508
a6d9a66e 7509 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
c7ce8faa 7510 frame_unwind_caller_id (next_frame));
14e60db5
DJ
7511}
7512
611c83ae
PA
7513/* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7514 new breakpoint at the target of a jmp_buf. The handling of
7515 longjmp-resume uses the same mechanisms used for handling
7516 "step-resume" breakpoints. */
7517
7518static void
a6d9a66e 7519insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
611c83ae 7520{
e81a37f7
TT
7521 /* There should never be more than one longjmp-resume breakpoint per
7522 thread, so we should never be setting a new
611c83ae 7523 longjmp_resume_breakpoint when one is already active. */
e81a37f7 7524 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
611c83ae
PA
7525
7526 if (debug_infrun)
7527 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
7528 "infrun: inserting longjmp-resume breakpoint at %s\n",
7529 paddress (gdbarch, pc));
611c83ae 7530
e81a37f7 7531 inferior_thread ()->control.exception_resume_breakpoint =
a6d9a66e 7532 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
611c83ae
PA
7533}
7534
186c406b
TT
7535/* Insert an exception resume breakpoint. TP is the thread throwing
7536 the exception. The block B is the block of the unwinder debug hook
7537 function. FRAME is the frame corresponding to the call to this
7538 function. SYM is the symbol of the function argument holding the
7539 target PC of the exception. */
7540
7541static void
7542insert_exception_resume_breakpoint (struct thread_info *tp,
3977b71f 7543 const struct block *b,
186c406b
TT
7544 struct frame_info *frame,
7545 struct symbol *sym)
7546{
492d29ea 7547 TRY
186c406b 7548 {
63e43d3a 7549 struct block_symbol vsym;
186c406b
TT
7550 struct value *value;
7551 CORE_ADDR handler;
7552 struct breakpoint *bp;
7553
63e43d3a
PMR
7554 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
7555 value = read_var_value (vsym.symbol, vsym.block, frame);
186c406b
TT
7556 /* If the value was optimized out, revert to the old behavior. */
7557 if (! value_optimized_out (value))
7558 {
7559 handler = value_as_address (value);
7560
7561 if (debug_infrun)
7562 fprintf_unfiltered (gdb_stdlog,
7563 "infrun: exception resume at %lx\n",
7564 (unsigned long) handler);
7565
7566 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
7567 handler, bp_exception_resume);
c70a6932
JK
7568
7569 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
7570 frame = NULL;
7571
5d5658a1 7572 bp->thread = tp->global_num;
186c406b
TT
7573 inferior_thread ()->control.exception_resume_breakpoint = bp;
7574 }
7575 }
492d29ea
PA
7576 CATCH (e, RETURN_MASK_ERROR)
7577 {
7578 /* We want to ignore errors here. */
7579 }
7580 END_CATCH
186c406b
TT
7581}
7582
28106bc2
SDJ
7583/* A helper for check_exception_resume that sets an
7584 exception-breakpoint based on a SystemTap probe. */
7585
7586static void
7587insert_exception_resume_from_probe (struct thread_info *tp,
729662a5 7588 const struct bound_probe *probe,
28106bc2
SDJ
7589 struct frame_info *frame)
7590{
7591 struct value *arg_value;
7592 CORE_ADDR handler;
7593 struct breakpoint *bp;
7594
7595 arg_value = probe_safe_evaluate_at_pc (frame, 1);
7596 if (!arg_value)
7597 return;
7598
7599 handler = value_as_address (arg_value);
7600
7601 if (debug_infrun)
7602 fprintf_unfiltered (gdb_stdlog,
7603 "infrun: exception resume at %s\n",
6bac7473 7604 paddress (get_objfile_arch (probe->objfile),
28106bc2
SDJ
7605 handler));
7606
7607 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
7608 handler, bp_exception_resume);
5d5658a1 7609 bp->thread = tp->global_num;
28106bc2
SDJ
7610 inferior_thread ()->control.exception_resume_breakpoint = bp;
7611}
7612
186c406b
TT
7613/* This is called when an exception has been intercepted. Check to
7614 see whether the exception's destination is of interest, and if so,
7615 set an exception resume breakpoint there. */
7616
7617static void
7618check_exception_resume (struct execution_control_state *ecs,
28106bc2 7619 struct frame_info *frame)
186c406b 7620{
729662a5 7621 struct bound_probe probe;
28106bc2
SDJ
7622 struct symbol *func;
7623
7624 /* First see if this exception unwinding breakpoint was set via a
7625 SystemTap probe point. If so, the probe has two arguments: the
7626 CFA and the HANDLER. We ignore the CFA, extract the handler, and
7627 set a breakpoint there. */
6bac7473 7628 probe = find_probe_by_pc (get_frame_pc (frame));
729662a5 7629 if (probe.probe)
28106bc2 7630 {
729662a5 7631 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
28106bc2
SDJ
7632 return;
7633 }
7634
7635 func = get_frame_function (frame);
7636 if (!func)
7637 return;
186c406b 7638
492d29ea 7639 TRY
186c406b 7640 {
3977b71f 7641 const struct block *b;
8157b174 7642 struct block_iterator iter;
186c406b
TT
7643 struct symbol *sym;
7644 int argno = 0;
7645
7646 /* The exception breakpoint is a thread-specific breakpoint on
7647 the unwinder's debug hook, declared as:
7648
7649 void _Unwind_DebugHook (void *cfa, void *handler);
7650
7651 The CFA argument indicates the frame to which control is
7652 about to be transferred. HANDLER is the destination PC.
7653
7654 We ignore the CFA and set a temporary breakpoint at HANDLER.
7655 This is not extremely efficient but it avoids issues in gdb
7656 with computing the DWARF CFA, and it also works even in weird
7657 cases such as throwing an exception from inside a signal
7658 handler. */
7659
7660 b = SYMBOL_BLOCK_VALUE (func);
7661 ALL_BLOCK_SYMBOLS (b, iter, sym)
7662 {
7663 if (!SYMBOL_IS_ARGUMENT (sym))
7664 continue;
7665
7666 if (argno == 0)
7667 ++argno;
7668 else
7669 {
7670 insert_exception_resume_breakpoint (ecs->event_thread,
7671 b, frame, sym);
7672 break;
7673 }
7674 }
7675 }
492d29ea
PA
7676 CATCH (e, RETURN_MASK_ERROR)
7677 {
7678 }
7679 END_CATCH
186c406b
TT
7680}
7681
104c1213 7682static void
22bcd14b 7683stop_waiting (struct execution_control_state *ecs)
104c1213 7684{
527159b7 7685 if (debug_infrun)
22bcd14b 7686 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
527159b7 7687
cd0fc7c3
SS
7688 /* Let callers know we don't want to wait for the inferior anymore. */
7689 ecs->wait_some_more = 0;
fbea99ea
PA
7690
7691 /* If all-stop, but the target is always in non-stop mode, stop all
7692 threads now that we're presenting the stop to the user. */
7693 if (!non_stop && target_is_non_stop_p ())
7694 stop_all_threads ();
cd0fc7c3
SS
7695}
7696
4d9d9d04
PA
7697/* Like keep_going, but passes the signal to the inferior, even if the
7698 signal is set to nopass. */
d4f3574e
SS
7699
7700static void
4d9d9d04 7701keep_going_pass_signal (struct execution_control_state *ecs)
d4f3574e 7702{
c4dbc9af
PA
7703 /* Make sure normal_stop is called if we get a QUIT handled before
7704 reaching resume. */
7705 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
7706
4d9d9d04 7707 gdb_assert (ptid_equal (ecs->event_thread->ptid, inferior_ptid));
372316f1 7708 gdb_assert (!ecs->event_thread->resumed);
4d9d9d04 7709
d4f3574e 7710 /* Save the pc before execution, to compare with pc after stop. */
fb14de7b
UW
7711 ecs->event_thread->prev_pc
7712 = regcache_read_pc (get_thread_regcache (ecs->ptid));
d4f3574e 7713
4d9d9d04 7714 if (ecs->event_thread->control.trap_expected)
d4f3574e 7715 {
4d9d9d04
PA
7716 struct thread_info *tp = ecs->event_thread;
7717
7718 if (debug_infrun)
7719 fprintf_unfiltered (gdb_stdlog,
7720 "infrun: %s has trap_expected set, "
7721 "resuming to collect trap\n",
7722 target_pid_to_str (tp->ptid));
7723
a9ba6bae
PA
7724 /* We haven't yet gotten our trap, and either: intercepted a
7725 non-signal event (e.g., a fork); or took a signal which we
7726 are supposed to pass through to the inferior. Simply
7727 continue. */
c4dbc9af 7728 discard_cleanups (old_cleanups);
64ce06e4 7729 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e 7730 }
372316f1
PA
7731 else if (step_over_info_valid_p ())
7732 {
7733 /* Another thread is stepping over a breakpoint in-line. If
7734 this thread needs a step-over too, queue the request. In
7735 either case, this resume must be deferred for later. */
7736 struct thread_info *tp = ecs->event_thread;
7737
7738 if (ecs->hit_singlestep_breakpoint
7739 || thread_still_needs_step_over (tp))
7740 {
7741 if (debug_infrun)
7742 fprintf_unfiltered (gdb_stdlog,
7743 "infrun: step-over already in progress: "
7744 "step-over for %s deferred\n",
7745 target_pid_to_str (tp->ptid));
7746 thread_step_over_chain_enqueue (tp);
7747 }
7748 else
7749 {
7750 if (debug_infrun)
7751 fprintf_unfiltered (gdb_stdlog,
7752 "infrun: step-over in progress: "
7753 "resume of %s deferred\n",
7754 target_pid_to_str (tp->ptid));
7755 }
7756
7757 discard_cleanups (old_cleanups);
7758 }
d4f3574e
SS
7759 else
7760 {
31e77af2 7761 struct regcache *regcache = get_current_regcache ();
963f9c80
PA
7762 int remove_bp;
7763 int remove_wps;
8d297bbf 7764 step_over_what step_what;
31e77af2 7765
d4f3574e 7766 /* Either the trap was not expected, but we are continuing
a9ba6bae
PA
7767 anyway (if we got a signal, the user asked it be passed to
7768 the child)
7769 -- or --
7770 We got our expected trap, but decided we should resume from
7771 it.
d4f3574e 7772
a9ba6bae 7773 We're going to run this baby now!
d4f3574e 7774
c36b740a
VP
7775 Note that insert_breakpoints won't try to re-insert
7776 already inserted breakpoints. Therefore, we don't
7777 care if breakpoints were already inserted, or not. */
a9ba6bae 7778
31e77af2
PA
7779 /* If we need to step over a breakpoint, and we're not using
7780 displaced stepping to do so, insert all breakpoints
7781 (watchpoints, etc.) but the one we're stepping over, step one
7782 instruction, and then re-insert the breakpoint when that step
7783 is finished. */
963f9c80 7784
6c4cfb24
PA
7785 step_what = thread_still_needs_step_over (ecs->event_thread);
7786
963f9c80 7787 remove_bp = (ecs->hit_singlestep_breakpoint
6c4cfb24
PA
7788 || (step_what & STEP_OVER_BREAKPOINT));
7789 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
963f9c80 7790
cb71640d
PA
7791 /* We can't use displaced stepping if we need to step past a
7792 watchpoint. The instruction copied to the scratch pad would
7793 still trigger the watchpoint. */
7794 if (remove_bp
3fc8eb30 7795 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
45e8c884 7796 {
31e77af2 7797 set_step_over_info (get_regcache_aspace (regcache),
21edc42f
YQ
7798 regcache_read_pc (regcache), remove_wps,
7799 ecs->event_thread->global_num);
45e8c884 7800 }
963f9c80 7801 else if (remove_wps)
21edc42f 7802 set_step_over_info (NULL, 0, remove_wps, -1);
372316f1
PA
7803
7804 /* If we now need to do an in-line step-over, we need to stop
7805 all other threads. Note this must be done before
7806 insert_breakpoints below, because that removes the breakpoint
7807 we're about to step over, otherwise other threads could miss
7808 it. */
fbea99ea 7809 if (step_over_info_valid_p () && target_is_non_stop_p ())
372316f1 7810 stop_all_threads ();
abbb1732 7811
31e77af2 7812 /* Stop stepping if inserting breakpoints fails. */
492d29ea 7813 TRY
31e77af2
PA
7814 {
7815 insert_breakpoints ();
7816 }
492d29ea 7817 CATCH (e, RETURN_MASK_ERROR)
31e77af2
PA
7818 {
7819 exception_print (gdb_stderr, e);
22bcd14b 7820 stop_waiting (ecs);
de1fe8c8 7821 discard_cleanups (old_cleanups);
31e77af2 7822 return;
d4f3574e 7823 }
492d29ea 7824 END_CATCH
d4f3574e 7825
963f9c80 7826 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
d4f3574e 7827
c4dbc9af 7828 discard_cleanups (old_cleanups);
64ce06e4 7829 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e
SS
7830 }
7831
488f131b 7832 prepare_to_wait (ecs);
d4f3574e
SS
7833}
7834
4d9d9d04
PA
7835/* Called when we should continue running the inferior, because the
7836 current event doesn't cause a user visible stop. This does the
7837 resuming part; waiting for the next event is done elsewhere. */
7838
7839static void
7840keep_going (struct execution_control_state *ecs)
7841{
7842 if (ecs->event_thread->control.trap_expected
7843 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
7844 ecs->event_thread->control.trap_expected = 0;
7845
7846 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7847 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7848 keep_going_pass_signal (ecs);
7849}
7850
104c1213
JM
7851/* This function normally comes after a resume, before
7852 handle_inferior_event exits. It takes care of any last bits of
7853 housekeeping, and sets the all-important wait_some_more flag. */
cd0fc7c3 7854
104c1213
JM
7855static void
7856prepare_to_wait (struct execution_control_state *ecs)
cd0fc7c3 7857{
527159b7 7858 if (debug_infrun)
8a9de0e4 7859 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
104c1213 7860
104c1213 7861 ecs->wait_some_more = 1;
0b333c5e
PA
7862
7863 if (!target_is_async_p ())
7864 mark_infrun_async_event_handler ();
c906108c 7865}
11cf8741 7866
fd664c91 7867/* We are done with the step range of a step/next/si/ni command.
b57bacec 7868 Called once for each n of a "step n" operation. */
fd664c91
PA
7869
7870static void
bdc36728 7871end_stepping_range (struct execution_control_state *ecs)
fd664c91 7872{
bdc36728 7873 ecs->event_thread->control.stop_step = 1;
bdc36728 7874 stop_waiting (ecs);
fd664c91
PA
7875}
7876
33d62d64
JK
7877/* Several print_*_reason functions to print why the inferior has stopped.
7878 We always print something when the inferior exits, or receives a signal.
7879 The rest of the cases are dealt with later on in normal_stop and
7880 print_it_typical. Ideally there should be a call to one of these
7881 print_*_reason functions functions from handle_inferior_event each time
22bcd14b 7882 stop_waiting is called.
33d62d64 7883
fd664c91
PA
7884 Note that we don't call these directly, instead we delegate that to
7885 the interpreters, through observers. Interpreters then call these
7886 with whatever uiout is right. */
33d62d64 7887
fd664c91
PA
7888void
7889print_end_stepping_range_reason (struct ui_out *uiout)
33d62d64 7890{
fd664c91 7891 /* For CLI-like interpreters, print nothing. */
33d62d64 7892
112e8700 7893 if (uiout->is_mi_like_p ())
fd664c91 7894 {
112e8700 7895 uiout->field_string ("reason",
fd664c91
PA
7896 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
7897 }
7898}
33d62d64 7899
fd664c91
PA
7900void
7901print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
11cf8741 7902{
33d62d64 7903 annotate_signalled ();
112e8700
SM
7904 if (uiout->is_mi_like_p ())
7905 uiout->field_string
7906 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
7907 uiout->text ("\nProgram terminated with signal ");
33d62d64 7908 annotate_signal_name ();
112e8700 7909 uiout->field_string ("signal-name",
2ea28649 7910 gdb_signal_to_name (siggnal));
33d62d64 7911 annotate_signal_name_end ();
112e8700 7912 uiout->text (", ");
33d62d64 7913 annotate_signal_string ();
112e8700 7914 uiout->field_string ("signal-meaning",
2ea28649 7915 gdb_signal_to_string (siggnal));
33d62d64 7916 annotate_signal_string_end ();
112e8700
SM
7917 uiout->text (".\n");
7918 uiout->text ("The program no longer exists.\n");
33d62d64
JK
7919}
7920
fd664c91
PA
7921void
7922print_exited_reason (struct ui_out *uiout, int exitstatus)
33d62d64 7923{
fda326dd
TT
7924 struct inferior *inf = current_inferior ();
7925 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
7926
33d62d64
JK
7927 annotate_exited (exitstatus);
7928 if (exitstatus)
7929 {
112e8700
SM
7930 if (uiout->is_mi_like_p ())
7931 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
7932 uiout->text ("[Inferior ");
7933 uiout->text (plongest (inf->num));
7934 uiout->text (" (");
7935 uiout->text (pidstr);
7936 uiout->text (") exited with code ");
7937 uiout->field_fmt ("exit-code", "0%o", (unsigned int) exitstatus);
7938 uiout->text ("]\n");
33d62d64
JK
7939 }
7940 else
11cf8741 7941 {
112e8700
SM
7942 if (uiout->is_mi_like_p ())
7943 uiout->field_string
7944 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
7945 uiout->text ("[Inferior ");
7946 uiout->text (plongest (inf->num));
7947 uiout->text (" (");
7948 uiout->text (pidstr);
7949 uiout->text (") exited normally]\n");
33d62d64 7950 }
33d62d64
JK
7951}
7952
012b3a21
WT
7953/* Some targets/architectures can do extra processing/display of
7954 segmentation faults. E.g., Intel MPX boundary faults.
7955 Call the architecture dependent function to handle the fault. */
7956
7957static void
7958handle_segmentation_fault (struct ui_out *uiout)
7959{
7960 struct regcache *regcache = get_current_regcache ();
7961 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7962
7963 if (gdbarch_handle_segmentation_fault_p (gdbarch))
7964 gdbarch_handle_segmentation_fault (gdbarch, uiout);
7965}
7966
fd664c91
PA
7967void
7968print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
33d62d64 7969{
f303dbd6
PA
7970 struct thread_info *thr = inferior_thread ();
7971
33d62d64
JK
7972 annotate_signal ();
7973
112e8700 7974 if (uiout->is_mi_like_p ())
f303dbd6
PA
7975 ;
7976 else if (show_thread_that_caused_stop ())
33d62d64 7977 {
f303dbd6 7978 const char *name;
33d62d64 7979
112e8700
SM
7980 uiout->text ("\nThread ");
7981 uiout->field_fmt ("thread-id", "%s", print_thread_id (thr));
f303dbd6
PA
7982
7983 name = thr->name != NULL ? thr->name : target_thread_name (thr);
7984 if (name != NULL)
7985 {
112e8700
SM
7986 uiout->text (" \"");
7987 uiout->field_fmt ("name", "%s", name);
7988 uiout->text ("\"");
f303dbd6 7989 }
33d62d64 7990 }
f303dbd6 7991 else
112e8700 7992 uiout->text ("\nProgram");
f303dbd6 7993
112e8700
SM
7994 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
7995 uiout->text (" stopped");
33d62d64
JK
7996 else
7997 {
112e8700 7998 uiout->text (" received signal ");
8b93c638 7999 annotate_signal_name ();
112e8700
SM
8000 if (uiout->is_mi_like_p ())
8001 uiout->field_string
8002 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
8003 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8b93c638 8004 annotate_signal_name_end ();
112e8700 8005 uiout->text (", ");
8b93c638 8006 annotate_signal_string ();
112e8700 8007 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
012b3a21
WT
8008
8009 if (siggnal == GDB_SIGNAL_SEGV)
8010 handle_segmentation_fault (uiout);
8011
8b93c638 8012 annotate_signal_string_end ();
33d62d64 8013 }
112e8700 8014 uiout->text (".\n");
33d62d64 8015}
252fbfc8 8016
fd664c91
PA
8017void
8018print_no_history_reason (struct ui_out *uiout)
33d62d64 8019{
112e8700 8020 uiout->text ("\nNo more reverse-execution history.\n");
11cf8741 8021}
43ff13b4 8022
0c7e1a46
PA
8023/* Print current location without a level number, if we have changed
8024 functions or hit a breakpoint. Print source line if we have one.
8025 bpstat_print contains the logic deciding in detail what to print,
8026 based on the event(s) that just occurred. */
8027
243a9253
PA
8028static void
8029print_stop_location (struct target_waitstatus *ws)
0c7e1a46
PA
8030{
8031 int bpstat_ret;
f486487f 8032 enum print_what source_flag;
0c7e1a46
PA
8033 int do_frame_printing = 1;
8034 struct thread_info *tp = inferior_thread ();
8035
8036 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
8037 switch (bpstat_ret)
8038 {
8039 case PRINT_UNKNOWN:
8040 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8041 should) carry around the function and does (or should) use
8042 that when doing a frame comparison. */
8043 if (tp->control.stop_step
8044 && frame_id_eq (tp->control.step_frame_id,
8045 get_frame_id (get_current_frame ()))
885eeb5b 8046 && tp->control.step_start_function == find_pc_function (stop_pc))
0c7e1a46
PA
8047 {
8048 /* Finished step, just print source line. */
8049 source_flag = SRC_LINE;
8050 }
8051 else
8052 {
8053 /* Print location and source line. */
8054 source_flag = SRC_AND_LOC;
8055 }
8056 break;
8057 case PRINT_SRC_AND_LOC:
8058 /* Print location and source line. */
8059 source_flag = SRC_AND_LOC;
8060 break;
8061 case PRINT_SRC_ONLY:
8062 source_flag = SRC_LINE;
8063 break;
8064 case PRINT_NOTHING:
8065 /* Something bogus. */
8066 source_flag = SRC_LINE;
8067 do_frame_printing = 0;
8068 break;
8069 default:
8070 internal_error (__FILE__, __LINE__, _("Unknown value."));
8071 }
8072
8073 /* The behavior of this routine with respect to the source
8074 flag is:
8075 SRC_LINE: Print only source line
8076 LOCATION: Print only location
8077 SRC_AND_LOC: Print location and source line. */
8078 if (do_frame_printing)
8079 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
243a9253
PA
8080}
8081
243a9253
PA
8082/* See infrun.h. */
8083
8084void
8085print_stop_event (struct ui_out *uiout)
8086{
243a9253
PA
8087 struct target_waitstatus last;
8088 ptid_t last_ptid;
8089 struct thread_info *tp;
8090
8091 get_last_target_status (&last_ptid, &last);
8092
67ad9399
TT
8093 {
8094 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
0c7e1a46 8095
67ad9399 8096 print_stop_location (&last);
243a9253 8097
67ad9399
TT
8098 /* Display the auto-display expressions. */
8099 do_displays ();
8100 }
243a9253
PA
8101
8102 tp = inferior_thread ();
8103 if (tp->thread_fsm != NULL
8104 && thread_fsm_finished_p (tp->thread_fsm))
8105 {
8106 struct return_value_info *rv;
8107
8108 rv = thread_fsm_return_value (tp->thread_fsm);
8109 if (rv != NULL)
8110 print_return_value (uiout, rv);
8111 }
0c7e1a46
PA
8112}
8113
388a7084
PA
8114/* See infrun.h. */
8115
8116void
8117maybe_remove_breakpoints (void)
8118{
8119 if (!breakpoints_should_be_inserted_now () && target_has_execution)
8120 {
8121 if (remove_breakpoints ())
8122 {
8123 target_terminal_ours_for_output ();
8124 printf_filtered (_("Cannot remove breakpoints because "
8125 "program is no longer writable.\nFurther "
8126 "execution is probably impossible.\n"));
8127 }
8128 }
8129}
8130
4c2f2a79
PA
8131/* The execution context that just caused a normal stop. */
8132
8133struct stop_context
8134{
8135 /* The stop ID. */
8136 ULONGEST stop_id;
c906108c 8137
4c2f2a79 8138 /* The event PTID. */
c906108c 8139
4c2f2a79
PA
8140 ptid_t ptid;
8141
8142 /* If stopp for a thread event, this is the thread that caused the
8143 stop. */
8144 struct thread_info *thread;
8145
8146 /* The inferior that caused the stop. */
8147 int inf_num;
8148};
8149
8150/* Returns a new stop context. If stopped for a thread event, this
8151 takes a strong reference to the thread. */
8152
8153static struct stop_context *
8154save_stop_context (void)
8155{
224c3ddb 8156 struct stop_context *sc = XNEW (struct stop_context);
4c2f2a79
PA
8157
8158 sc->stop_id = get_stop_id ();
8159 sc->ptid = inferior_ptid;
8160 sc->inf_num = current_inferior ()->num;
8161
8162 if (!ptid_equal (inferior_ptid, null_ptid))
8163 {
8164 /* Take a strong reference so that the thread can't be deleted
8165 yet. */
8166 sc->thread = inferior_thread ();
803bdfe4 8167 sc->thread->incref ();
4c2f2a79
PA
8168 }
8169 else
8170 sc->thread = NULL;
8171
8172 return sc;
8173}
8174
8175/* Release a stop context previously created with save_stop_context.
8176 Releases the strong reference to the thread as well. */
8177
8178static void
8179release_stop_context_cleanup (void *arg)
8180{
9a3c8263 8181 struct stop_context *sc = (struct stop_context *) arg;
4c2f2a79
PA
8182
8183 if (sc->thread != NULL)
803bdfe4 8184 sc->thread->decref ();
4c2f2a79
PA
8185 xfree (sc);
8186}
8187
8188/* Return true if the current context no longer matches the saved stop
8189 context. */
8190
8191static int
8192stop_context_changed (struct stop_context *prev)
8193{
8194 if (!ptid_equal (prev->ptid, inferior_ptid))
8195 return 1;
8196 if (prev->inf_num != current_inferior ()->num)
8197 return 1;
8198 if (prev->thread != NULL && prev->thread->state != THREAD_STOPPED)
8199 return 1;
8200 if (get_stop_id () != prev->stop_id)
8201 return 1;
8202 return 0;
8203}
8204
8205/* See infrun.h. */
8206
8207int
96baa820 8208normal_stop (void)
c906108c 8209{
73b65bb0
DJ
8210 struct target_waitstatus last;
8211 ptid_t last_ptid;
29f49a6a 8212 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
e1316e60 8213 ptid_t pid_ptid;
73b65bb0
DJ
8214
8215 get_last_target_status (&last_ptid, &last);
8216
4c2f2a79
PA
8217 new_stop_id ();
8218
29f49a6a
PA
8219 /* If an exception is thrown from this point on, make sure to
8220 propagate GDB's knowledge of the executing state to the
8221 frontend/user running state. A QUIT is an easy exception to see
8222 here, so do this before any filtered output. */
c35b1492
PA
8223 if (!non_stop)
8224 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
e1316e60
PA
8225 else if (last.kind == TARGET_WAITKIND_SIGNALLED
8226 || last.kind == TARGET_WAITKIND_EXITED)
8227 {
8228 /* On some targets, we may still have live threads in the
8229 inferior when we get a process exit event. E.g., for
8230 "checkpoint", when the current checkpoint/fork exits,
8231 linux-fork.c automatically switches to another fork from
8232 within target_mourn_inferior. */
8233 if (!ptid_equal (inferior_ptid, null_ptid))
8234 {
8235 pid_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
8236 make_cleanup (finish_thread_state_cleanup, &pid_ptid);
8237 }
8238 }
8239 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
c35b1492 8240 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
29f49a6a 8241
b57bacec
PA
8242 /* As we're presenting a stop, and potentially removing breakpoints,
8243 update the thread list so we can tell whether there are threads
8244 running on the target. With target remote, for example, we can
8245 only learn about new threads when we explicitly update the thread
8246 list. Do this before notifying the interpreters about signal
8247 stops, end of stepping ranges, etc., so that the "new thread"
8248 output is emitted before e.g., "Program received signal FOO",
8249 instead of after. */
8250 update_thread_list ();
8251
8252 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
8253 observer_notify_signal_received (inferior_thread ()->suspend.stop_signal);
8254
c906108c
SS
8255 /* As with the notification of thread events, we want to delay
8256 notifying the user that we've switched thread context until
8257 the inferior actually stops.
8258
73b65bb0
DJ
8259 There's no point in saying anything if the inferior has exited.
8260 Note that SIGNALLED here means "exited with a signal", not
b65dc60b
PA
8261 "received a signal".
8262
8263 Also skip saying anything in non-stop mode. In that mode, as we
8264 don't want GDB to switch threads behind the user's back, to avoid
8265 races where the user is typing a command to apply to thread x,
8266 but GDB switches to thread y before the user finishes entering
8267 the command, fetch_inferior_event installs a cleanup to restore
8268 the current thread back to the thread the user had selected right
8269 after this event is handled, so we're not really switching, only
8270 informing of a stop. */
4f8d22e3
PA
8271 if (!non_stop
8272 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
73b65bb0
DJ
8273 && target_has_execution
8274 && last.kind != TARGET_WAITKIND_SIGNALLED
0e5bf2a8
PA
8275 && last.kind != TARGET_WAITKIND_EXITED
8276 && last.kind != TARGET_WAITKIND_NO_RESUMED)
c906108c 8277 {
0e454242 8278 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8279 {
8280 target_terminal_ours_for_output ();
8281 printf_filtered (_("[Switching to %s]\n"),
8282 target_pid_to_str (inferior_ptid));
8283 annotate_thread_changed ();
8284 }
39f77062 8285 previous_inferior_ptid = inferior_ptid;
c906108c 8286 }
c906108c 8287
0e5bf2a8
PA
8288 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
8289 {
0e454242 8290 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8291 if (current_ui->prompt_state == PROMPT_BLOCKED)
8292 {
8293 target_terminal_ours_for_output ();
8294 printf_filtered (_("No unwaited-for children left.\n"));
8295 }
0e5bf2a8
PA
8296 }
8297
b57bacec 8298 /* Note: this depends on the update_thread_list call above. */
388a7084 8299 maybe_remove_breakpoints ();
c906108c 8300
c906108c
SS
8301 /* If an auto-display called a function and that got a signal,
8302 delete that auto-display to avoid an infinite recursion. */
8303
8304 if (stopped_by_random_signal)
8305 disable_current_display ();
8306
0e454242 8307 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8308 {
8309 async_enable_stdin ();
8310 }
c906108c 8311
388a7084
PA
8312 /* Let the user/frontend see the threads as stopped. */
8313 do_cleanups (old_chain);
8314
8315 /* Select innermost stack frame - i.e., current frame is frame 0,
8316 and current location is based on that. Handle the case where the
8317 dummy call is returning after being stopped. E.g. the dummy call
8318 previously hit a breakpoint. (If the dummy call returns
8319 normally, we won't reach here.) Do this before the stop hook is
8320 run, so that it doesn't get to see the temporary dummy frame,
8321 which is not where we'll present the stop. */
8322 if (has_stack_frames ())
8323 {
8324 if (stop_stack_dummy == STOP_STACK_DUMMY)
8325 {
8326 /* Pop the empty frame that contains the stack dummy. This
8327 also restores inferior state prior to the call (struct
8328 infcall_suspend_state). */
8329 struct frame_info *frame = get_current_frame ();
8330
8331 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8332 frame_pop (frame);
8333 /* frame_pop calls reinit_frame_cache as the last thing it
8334 does which means there's now no selected frame. */
8335 }
8336
8337 select_frame (get_current_frame ());
8338
8339 /* Set the current source location. */
8340 set_current_sal_from_frame (get_current_frame ());
8341 }
dd7e2d2b
PA
8342
8343 /* Look up the hook_stop and run it (CLI internally handles problem
8344 of stop_command's pre-hook not existing). */
4c2f2a79
PA
8345 if (stop_command != NULL)
8346 {
8347 struct stop_context *saved_context = save_stop_context ();
8348 struct cleanup *old_chain
8349 = make_cleanup (release_stop_context_cleanup, saved_context);
8350
8351 catch_errors (hook_stop_stub, stop_command,
8352 "Error while running hook_stop:\n", RETURN_MASK_ALL);
8353
8354 /* If the stop hook resumes the target, then there's no point in
8355 trying to notify about the previous stop; its context is
8356 gone. Likewise if the command switches thread or inferior --
8357 the observers would print a stop for the wrong
8358 thread/inferior. */
8359 if (stop_context_changed (saved_context))
8360 {
8361 do_cleanups (old_chain);
8362 return 1;
8363 }
8364 do_cleanups (old_chain);
8365 }
dd7e2d2b 8366
388a7084
PA
8367 /* Notify observers about the stop. This is where the interpreters
8368 print the stop event. */
8369 if (!ptid_equal (inferior_ptid, null_ptid))
8370 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
8371 stop_print_frame);
8372 else
8373 observer_notify_normal_stop (NULL, stop_print_frame);
347bddb7 8374
243a9253
PA
8375 annotate_stopped ();
8376
48844aa6
PA
8377 if (target_has_execution)
8378 {
8379 if (last.kind != TARGET_WAITKIND_SIGNALLED
8380 && last.kind != TARGET_WAITKIND_EXITED)
8381 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8382 Delete any breakpoint that is to be deleted at the next stop. */
16c381f0 8383 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
94cc34af 8384 }
6c95b8df
PA
8385
8386 /* Try to get rid of automatically added inferiors that are no
8387 longer needed. Keeping those around slows down things linearly.
8388 Note that this never removes the current inferior. */
8389 prune_inferiors ();
4c2f2a79
PA
8390
8391 return 0;
c906108c
SS
8392}
8393
8394static int
96baa820 8395hook_stop_stub (void *cmd)
c906108c 8396{
5913bcb0 8397 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
c906108c
SS
8398 return (0);
8399}
8400\f
c5aa993b 8401int
96baa820 8402signal_stop_state (int signo)
c906108c 8403{
d6b48e9c 8404 return signal_stop[signo];
c906108c
SS
8405}
8406
c5aa993b 8407int
96baa820 8408signal_print_state (int signo)
c906108c
SS
8409{
8410 return signal_print[signo];
8411}
8412
c5aa993b 8413int
96baa820 8414signal_pass_state (int signo)
c906108c
SS
8415{
8416 return signal_program[signo];
8417}
8418
2455069d
UW
8419static void
8420signal_cache_update (int signo)
8421{
8422 if (signo == -1)
8423 {
a493e3e2 8424 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
2455069d
UW
8425 signal_cache_update (signo);
8426
8427 return;
8428 }
8429
8430 signal_pass[signo] = (signal_stop[signo] == 0
8431 && signal_print[signo] == 0
ab04a2af
TT
8432 && signal_program[signo] == 1
8433 && signal_catch[signo] == 0);
2455069d
UW
8434}
8435
488f131b 8436int
7bda5e4a 8437signal_stop_update (int signo, int state)
d4f3574e
SS
8438{
8439 int ret = signal_stop[signo];
abbb1732 8440
d4f3574e 8441 signal_stop[signo] = state;
2455069d 8442 signal_cache_update (signo);
d4f3574e
SS
8443 return ret;
8444}
8445
488f131b 8446int
7bda5e4a 8447signal_print_update (int signo, int state)
d4f3574e
SS
8448{
8449 int ret = signal_print[signo];
abbb1732 8450
d4f3574e 8451 signal_print[signo] = state;
2455069d 8452 signal_cache_update (signo);
d4f3574e
SS
8453 return ret;
8454}
8455
488f131b 8456int
7bda5e4a 8457signal_pass_update (int signo, int state)
d4f3574e
SS
8458{
8459 int ret = signal_program[signo];
abbb1732 8460
d4f3574e 8461 signal_program[signo] = state;
2455069d 8462 signal_cache_update (signo);
d4f3574e
SS
8463 return ret;
8464}
8465
ab04a2af
TT
8466/* Update the global 'signal_catch' from INFO and notify the
8467 target. */
8468
8469void
8470signal_catch_update (const unsigned int *info)
8471{
8472 int i;
8473
8474 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8475 signal_catch[i] = info[i] > 0;
8476 signal_cache_update (-1);
8477 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
8478}
8479
c906108c 8480static void
96baa820 8481sig_print_header (void)
c906108c 8482{
3e43a32a
MS
8483 printf_filtered (_("Signal Stop\tPrint\tPass "
8484 "to program\tDescription\n"));
c906108c
SS
8485}
8486
8487static void
2ea28649 8488sig_print_info (enum gdb_signal oursig)
c906108c 8489{
2ea28649 8490 const char *name = gdb_signal_to_name (oursig);
c906108c 8491 int name_padding = 13 - strlen (name);
96baa820 8492
c906108c
SS
8493 if (name_padding <= 0)
8494 name_padding = 0;
8495
8496 printf_filtered ("%s", name);
488f131b 8497 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
c906108c
SS
8498 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8499 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
8500 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
2ea28649 8501 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
c906108c
SS
8502}
8503
8504/* Specify how various signals in the inferior should be handled. */
8505
8506static void
96baa820 8507handle_command (char *args, int from_tty)
c906108c
SS
8508{
8509 char **argv;
8510 int digits, wordlen;
8511 int sigfirst, signum, siglast;
2ea28649 8512 enum gdb_signal oursig;
c906108c
SS
8513 int allsigs;
8514 int nsigs;
8515 unsigned char *sigs;
8516 struct cleanup *old_chain;
8517
8518 if (args == NULL)
8519 {
e2e0b3e5 8520 error_no_arg (_("signal to handle"));
c906108c
SS
8521 }
8522
1777feb0 8523 /* Allocate and zero an array of flags for which signals to handle. */
c906108c 8524
a493e3e2 8525 nsigs = (int) GDB_SIGNAL_LAST;
c906108c
SS
8526 sigs = (unsigned char *) alloca (nsigs);
8527 memset (sigs, 0, nsigs);
8528
1777feb0 8529 /* Break the command line up into args. */
c906108c 8530
d1a41061 8531 argv = gdb_buildargv (args);
7a292a7a 8532 old_chain = make_cleanup_freeargv (argv);
c906108c
SS
8533
8534 /* Walk through the args, looking for signal oursigs, signal names, and
8535 actions. Signal numbers and signal names may be interspersed with
8536 actions, with the actions being performed for all signals cumulatively
1777feb0 8537 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
c906108c
SS
8538
8539 while (*argv != NULL)
8540 {
8541 wordlen = strlen (*argv);
8542 for (digits = 0; isdigit ((*argv)[digits]); digits++)
8543 {;
8544 }
8545 allsigs = 0;
8546 sigfirst = siglast = -1;
8547
8548 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
8549 {
8550 /* Apply action to all signals except those used by the
1777feb0 8551 debugger. Silently skip those. */
c906108c
SS
8552 allsigs = 1;
8553 sigfirst = 0;
8554 siglast = nsigs - 1;
8555 }
8556 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
8557 {
8558 SET_SIGS (nsigs, sigs, signal_stop);
8559 SET_SIGS (nsigs, sigs, signal_print);
8560 }
8561 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
8562 {
8563 UNSET_SIGS (nsigs, sigs, signal_program);
8564 }
8565 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
8566 {
8567 SET_SIGS (nsigs, sigs, signal_print);
8568 }
8569 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
8570 {
8571 SET_SIGS (nsigs, sigs, signal_program);
8572 }
8573 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
8574 {
8575 UNSET_SIGS (nsigs, sigs, signal_stop);
8576 }
8577 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
8578 {
8579 SET_SIGS (nsigs, sigs, signal_program);
8580 }
8581 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
8582 {
8583 UNSET_SIGS (nsigs, sigs, signal_print);
8584 UNSET_SIGS (nsigs, sigs, signal_stop);
8585 }
8586 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
8587 {
8588 UNSET_SIGS (nsigs, sigs, signal_program);
8589 }
8590 else if (digits > 0)
8591 {
8592 /* It is numeric. The numeric signal refers to our own
8593 internal signal numbering from target.h, not to host/target
8594 signal number. This is a feature; users really should be
8595 using symbolic names anyway, and the common ones like
8596 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8597
8598 sigfirst = siglast = (int)
2ea28649 8599 gdb_signal_from_command (atoi (*argv));
c906108c
SS
8600 if ((*argv)[digits] == '-')
8601 {
8602 siglast = (int)
2ea28649 8603 gdb_signal_from_command (atoi ((*argv) + digits + 1));
c906108c
SS
8604 }
8605 if (sigfirst > siglast)
8606 {
1777feb0 8607 /* Bet he didn't figure we'd think of this case... */
c906108c
SS
8608 signum = sigfirst;
8609 sigfirst = siglast;
8610 siglast = signum;
8611 }
8612 }
8613 else
8614 {
2ea28649 8615 oursig = gdb_signal_from_name (*argv);
a493e3e2 8616 if (oursig != GDB_SIGNAL_UNKNOWN)
c906108c
SS
8617 {
8618 sigfirst = siglast = (int) oursig;
8619 }
8620 else
8621 {
8622 /* Not a number and not a recognized flag word => complain. */
8a3fe4f8 8623 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
c906108c
SS
8624 }
8625 }
8626
8627 /* If any signal numbers or symbol names were found, set flags for
1777feb0 8628 which signals to apply actions to. */
c906108c
SS
8629
8630 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
8631 {
2ea28649 8632 switch ((enum gdb_signal) signum)
c906108c 8633 {
a493e3e2
PA
8634 case GDB_SIGNAL_TRAP:
8635 case GDB_SIGNAL_INT:
c906108c
SS
8636 if (!allsigs && !sigs[signum])
8637 {
9e2f0ad4 8638 if (query (_("%s is used by the debugger.\n\
3e43a32a 8639Are you sure you want to change it? "),
2ea28649 8640 gdb_signal_to_name ((enum gdb_signal) signum)))
c906108c
SS
8641 {
8642 sigs[signum] = 1;
8643 }
8644 else
8645 {
a3f17187 8646 printf_unfiltered (_("Not confirmed, unchanged.\n"));
c906108c
SS
8647 gdb_flush (gdb_stdout);
8648 }
8649 }
8650 break;
a493e3e2
PA
8651 case GDB_SIGNAL_0:
8652 case GDB_SIGNAL_DEFAULT:
8653 case GDB_SIGNAL_UNKNOWN:
c906108c
SS
8654 /* Make sure that "all" doesn't print these. */
8655 break;
8656 default:
8657 sigs[signum] = 1;
8658 break;
8659 }
8660 }
8661
8662 argv++;
8663 }
8664
3a031f65
PA
8665 for (signum = 0; signum < nsigs; signum++)
8666 if (sigs[signum])
8667 {
2455069d 8668 signal_cache_update (-1);
a493e3e2
PA
8669 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
8670 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
c906108c 8671
3a031f65
PA
8672 if (from_tty)
8673 {
8674 /* Show the results. */
8675 sig_print_header ();
8676 for (; signum < nsigs; signum++)
8677 if (sigs[signum])
aead7601 8678 sig_print_info ((enum gdb_signal) signum);
3a031f65
PA
8679 }
8680
8681 break;
8682 }
c906108c
SS
8683
8684 do_cleanups (old_chain);
8685}
8686
de0bea00
MF
8687/* Complete the "handle" command. */
8688
8689static VEC (char_ptr) *
8690handle_completer (struct cmd_list_element *ignore,
6f937416 8691 const char *text, const char *word)
de0bea00
MF
8692{
8693 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
8694 static const char * const keywords[] =
8695 {
8696 "all",
8697 "stop",
8698 "ignore",
8699 "print",
8700 "pass",
8701 "nostop",
8702 "noignore",
8703 "noprint",
8704 "nopass",
8705 NULL,
8706 };
8707
8708 vec_signals = signal_completer (ignore, text, word);
8709 vec_keywords = complete_on_enum (keywords, word, word);
8710
8711 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
8712 VEC_free (char_ptr, vec_signals);
8713 VEC_free (char_ptr, vec_keywords);
8714 return return_val;
8715}
8716
2ea28649
PA
8717enum gdb_signal
8718gdb_signal_from_command (int num)
ed01b82c
PA
8719{
8720 if (num >= 1 && num <= 15)
2ea28649 8721 return (enum gdb_signal) num;
ed01b82c
PA
8722 error (_("Only signals 1-15 are valid as numeric signals.\n\
8723Use \"info signals\" for a list of symbolic signals."));
8724}
8725
c906108c
SS
8726/* Print current contents of the tables set by the handle command.
8727 It is possible we should just be printing signals actually used
8728 by the current target (but for things to work right when switching
8729 targets, all signals should be in the signal tables). */
8730
8731static void
96baa820 8732signals_info (char *signum_exp, int from_tty)
c906108c 8733{
2ea28649 8734 enum gdb_signal oursig;
abbb1732 8735
c906108c
SS
8736 sig_print_header ();
8737
8738 if (signum_exp)
8739 {
8740 /* First see if this is a symbol name. */
2ea28649 8741 oursig = gdb_signal_from_name (signum_exp);
a493e3e2 8742 if (oursig == GDB_SIGNAL_UNKNOWN)
c906108c
SS
8743 {
8744 /* No, try numeric. */
8745 oursig =
2ea28649 8746 gdb_signal_from_command (parse_and_eval_long (signum_exp));
c906108c
SS
8747 }
8748 sig_print_info (oursig);
8749 return;
8750 }
8751
8752 printf_filtered ("\n");
8753 /* These ugly casts brought to you by the native VAX compiler. */
a493e3e2
PA
8754 for (oursig = GDB_SIGNAL_FIRST;
8755 (int) oursig < (int) GDB_SIGNAL_LAST;
2ea28649 8756 oursig = (enum gdb_signal) ((int) oursig + 1))
c906108c
SS
8757 {
8758 QUIT;
8759
a493e3e2
PA
8760 if (oursig != GDB_SIGNAL_UNKNOWN
8761 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
c906108c
SS
8762 sig_print_info (oursig);
8763 }
8764
3e43a32a
MS
8765 printf_filtered (_("\nUse the \"handle\" command "
8766 "to change these tables.\n"));
c906108c 8767}
4aa995e1
PA
8768
8769/* The $_siginfo convenience variable is a bit special. We don't know
8770 for sure the type of the value until we actually have a chance to
7a9dd1b2 8771 fetch the data. The type can change depending on gdbarch, so it is
4aa995e1
PA
8772 also dependent on which thread you have selected.
8773
8774 1. making $_siginfo be an internalvar that creates a new value on
8775 access.
8776
8777 2. making the value of $_siginfo be an lval_computed value. */
8778
8779/* This function implements the lval_computed support for reading a
8780 $_siginfo value. */
8781
8782static void
8783siginfo_value_read (struct value *v)
8784{
8785 LONGEST transferred;
8786
a911d87a
PA
8787 /* If we can access registers, so can we access $_siginfo. Likewise
8788 vice versa. */
8789 validate_registers_access ();
c709acd1 8790
4aa995e1
PA
8791 transferred =
8792 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
8793 NULL,
8794 value_contents_all_raw (v),
8795 value_offset (v),
8796 TYPE_LENGTH (value_type (v)));
8797
8798 if (transferred != TYPE_LENGTH (value_type (v)))
8799 error (_("Unable to read siginfo"));
8800}
8801
8802/* This function implements the lval_computed support for writing a
8803 $_siginfo value. */
8804
8805static void
8806siginfo_value_write (struct value *v, struct value *fromval)
8807{
8808 LONGEST transferred;
8809
a911d87a
PA
8810 /* If we can access registers, so can we access $_siginfo. Likewise
8811 vice versa. */
8812 validate_registers_access ();
c709acd1 8813
4aa995e1
PA
8814 transferred = target_write (&current_target,
8815 TARGET_OBJECT_SIGNAL_INFO,
8816 NULL,
8817 value_contents_all_raw (fromval),
8818 value_offset (v),
8819 TYPE_LENGTH (value_type (fromval)));
8820
8821 if (transferred != TYPE_LENGTH (value_type (fromval)))
8822 error (_("Unable to write siginfo"));
8823}
8824
c8f2448a 8825static const struct lval_funcs siginfo_value_funcs =
4aa995e1
PA
8826 {
8827 siginfo_value_read,
8828 siginfo_value_write
8829 };
8830
8831/* Return a new value with the correct type for the siginfo object of
78267919
UW
8832 the current thread using architecture GDBARCH. Return a void value
8833 if there's no object available. */
4aa995e1 8834
2c0b251b 8835static struct value *
22d2b532
SDJ
8836siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
8837 void *ignore)
4aa995e1 8838{
4aa995e1 8839 if (target_has_stack
78267919
UW
8840 && !ptid_equal (inferior_ptid, null_ptid)
8841 && gdbarch_get_siginfo_type_p (gdbarch))
4aa995e1 8842 {
78267919 8843 struct type *type = gdbarch_get_siginfo_type (gdbarch);
abbb1732 8844
78267919 8845 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
4aa995e1
PA
8846 }
8847
78267919 8848 return allocate_value (builtin_type (gdbarch)->builtin_void);
4aa995e1
PA
8849}
8850
c906108c 8851\f
16c381f0
JK
8852/* infcall_suspend_state contains state about the program itself like its
8853 registers and any signal it received when it last stopped.
8854 This state must be restored regardless of how the inferior function call
8855 ends (either successfully, or after it hits a breakpoint or signal)
8856 if the program is to properly continue where it left off. */
8857
8858struct infcall_suspend_state
7a292a7a 8859{
16c381f0 8860 struct thread_suspend_state thread_suspend;
16c381f0
JK
8861
8862 /* Other fields: */
7a292a7a 8863 CORE_ADDR stop_pc;
b89667eb 8864 struct regcache *registers;
1736ad11 8865
35515841 8866 /* Format of SIGINFO_DATA or NULL if it is not present. */
1736ad11
JK
8867 struct gdbarch *siginfo_gdbarch;
8868
8869 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
8870 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
8871 content would be invalid. */
8872 gdb_byte *siginfo_data;
b89667eb
DE
8873};
8874
16c381f0
JK
8875struct infcall_suspend_state *
8876save_infcall_suspend_state (void)
b89667eb 8877{
16c381f0 8878 struct infcall_suspend_state *inf_state;
b89667eb 8879 struct thread_info *tp = inferior_thread ();
1736ad11
JK
8880 struct regcache *regcache = get_current_regcache ();
8881 struct gdbarch *gdbarch = get_regcache_arch (regcache);
8882 gdb_byte *siginfo_data = NULL;
8883
8884 if (gdbarch_get_siginfo_type_p (gdbarch))
8885 {
8886 struct type *type = gdbarch_get_siginfo_type (gdbarch);
8887 size_t len = TYPE_LENGTH (type);
8888 struct cleanup *back_to;
8889
224c3ddb 8890 siginfo_data = (gdb_byte *) xmalloc (len);
1736ad11
JK
8891 back_to = make_cleanup (xfree, siginfo_data);
8892
8893 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
8894 siginfo_data, 0, len) == len)
8895 discard_cleanups (back_to);
8896 else
8897 {
8898 /* Errors ignored. */
8899 do_cleanups (back_to);
8900 siginfo_data = NULL;
8901 }
8902 }
8903
41bf6aca 8904 inf_state = XCNEW (struct infcall_suspend_state);
1736ad11
JK
8905
8906 if (siginfo_data)
8907 {
8908 inf_state->siginfo_gdbarch = gdbarch;
8909 inf_state->siginfo_data = siginfo_data;
8910 }
b89667eb 8911
16c381f0 8912 inf_state->thread_suspend = tp->suspend;
16c381f0 8913
35515841 8914 /* run_inferior_call will not use the signal due to its `proceed' call with
a493e3e2
PA
8915 GDB_SIGNAL_0 anyway. */
8916 tp->suspend.stop_signal = GDB_SIGNAL_0;
35515841 8917
b89667eb
DE
8918 inf_state->stop_pc = stop_pc;
8919
1736ad11 8920 inf_state->registers = regcache_dup (regcache);
b89667eb
DE
8921
8922 return inf_state;
8923}
8924
8925/* Restore inferior session state to INF_STATE. */
8926
8927void
16c381f0 8928restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb
DE
8929{
8930 struct thread_info *tp = inferior_thread ();
1736ad11
JK
8931 struct regcache *regcache = get_current_regcache ();
8932 struct gdbarch *gdbarch = get_regcache_arch (regcache);
b89667eb 8933
16c381f0 8934 tp->suspend = inf_state->thread_suspend;
16c381f0 8935
b89667eb
DE
8936 stop_pc = inf_state->stop_pc;
8937
1736ad11
JK
8938 if (inf_state->siginfo_gdbarch == gdbarch)
8939 {
8940 struct type *type = gdbarch_get_siginfo_type (gdbarch);
1736ad11
JK
8941
8942 /* Errors ignored. */
8943 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6acef6cd 8944 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
1736ad11
JK
8945 }
8946
b89667eb
DE
8947 /* The inferior can be gone if the user types "print exit(0)"
8948 (and perhaps other times). */
8949 if (target_has_execution)
8950 /* NB: The register write goes through to the target. */
1736ad11 8951 regcache_cpy (regcache, inf_state->registers);
803b5f95 8952
16c381f0 8953 discard_infcall_suspend_state (inf_state);
b89667eb
DE
8954}
8955
8956static void
16c381f0 8957do_restore_infcall_suspend_state_cleanup (void *state)
b89667eb 8958{
9a3c8263 8959 restore_infcall_suspend_state ((struct infcall_suspend_state *) state);
b89667eb
DE
8960}
8961
8962struct cleanup *
16c381f0
JK
8963make_cleanup_restore_infcall_suspend_state
8964 (struct infcall_suspend_state *inf_state)
b89667eb 8965{
16c381f0 8966 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
b89667eb
DE
8967}
8968
8969void
16c381f0 8970discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb
DE
8971{
8972 regcache_xfree (inf_state->registers);
803b5f95 8973 xfree (inf_state->siginfo_data);
b89667eb
DE
8974 xfree (inf_state);
8975}
8976
8977struct regcache *
16c381f0 8978get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
b89667eb
DE
8979{
8980 return inf_state->registers;
8981}
8982
16c381f0
JK
8983/* infcall_control_state contains state regarding gdb's control of the
8984 inferior itself like stepping control. It also contains session state like
8985 the user's currently selected frame. */
b89667eb 8986
16c381f0 8987struct infcall_control_state
b89667eb 8988{
16c381f0
JK
8989 struct thread_control_state thread_control;
8990 struct inferior_control_state inferior_control;
d82142e2
JK
8991
8992 /* Other fields: */
8993 enum stop_stack_kind stop_stack_dummy;
8994 int stopped_by_random_signal;
7a292a7a 8995
b89667eb 8996 /* ID if the selected frame when the inferior function call was made. */
101dcfbe 8997 struct frame_id selected_frame_id;
7a292a7a
SS
8998};
8999
c906108c 9000/* Save all of the information associated with the inferior<==>gdb
b89667eb 9001 connection. */
c906108c 9002
16c381f0
JK
9003struct infcall_control_state *
9004save_infcall_control_state (void)
c906108c 9005{
8d749320
SM
9006 struct infcall_control_state *inf_status =
9007 XNEW (struct infcall_control_state);
4e1c45ea 9008 struct thread_info *tp = inferior_thread ();
d6b48e9c 9009 struct inferior *inf = current_inferior ();
7a292a7a 9010
16c381f0
JK
9011 inf_status->thread_control = tp->control;
9012 inf_status->inferior_control = inf->control;
d82142e2 9013
8358c15c 9014 tp->control.step_resume_breakpoint = NULL;
5b79abe7 9015 tp->control.exception_resume_breakpoint = NULL;
8358c15c 9016
16c381f0
JK
9017 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9018 chain. If caller's caller is walking the chain, they'll be happier if we
9019 hand them back the original chain when restore_infcall_control_state is
9020 called. */
9021 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
d82142e2
JK
9022
9023 /* Other fields: */
9024 inf_status->stop_stack_dummy = stop_stack_dummy;
9025 inf_status->stopped_by_random_signal = stopped_by_random_signal;
c5aa993b 9026
206415a3 9027 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
b89667eb 9028
7a292a7a 9029 return inf_status;
c906108c
SS
9030}
9031
c906108c 9032static int
96baa820 9033restore_selected_frame (void *args)
c906108c 9034{
488f131b 9035 struct frame_id *fid = (struct frame_id *) args;
c906108c 9036 struct frame_info *frame;
c906108c 9037
101dcfbe 9038 frame = frame_find_by_id (*fid);
c906108c 9039
aa0cd9c1
AC
9040 /* If inf_status->selected_frame_id is NULL, there was no previously
9041 selected frame. */
101dcfbe 9042 if (frame == NULL)
c906108c 9043 {
8a3fe4f8 9044 warning (_("Unable to restore previously selected frame."));
c906108c
SS
9045 return 0;
9046 }
9047
0f7d239c 9048 select_frame (frame);
c906108c
SS
9049
9050 return (1);
9051}
9052
b89667eb
DE
9053/* Restore inferior session state to INF_STATUS. */
9054
c906108c 9055void
16c381f0 9056restore_infcall_control_state (struct infcall_control_state *inf_status)
c906108c 9057{
4e1c45ea 9058 struct thread_info *tp = inferior_thread ();
d6b48e9c 9059 struct inferior *inf = current_inferior ();
4e1c45ea 9060
8358c15c
JK
9061 if (tp->control.step_resume_breakpoint)
9062 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
9063
5b79abe7
TT
9064 if (tp->control.exception_resume_breakpoint)
9065 tp->control.exception_resume_breakpoint->disposition
9066 = disp_del_at_next_stop;
9067
d82142e2 9068 /* Handle the bpstat_copy of the chain. */
16c381f0 9069 bpstat_clear (&tp->control.stop_bpstat);
d82142e2 9070
16c381f0
JK
9071 tp->control = inf_status->thread_control;
9072 inf->control = inf_status->inferior_control;
d82142e2
JK
9073
9074 /* Other fields: */
9075 stop_stack_dummy = inf_status->stop_stack_dummy;
9076 stopped_by_random_signal = inf_status->stopped_by_random_signal;
c906108c 9077
b89667eb 9078 if (target_has_stack)
c906108c 9079 {
c906108c 9080 /* The point of catch_errors is that if the stack is clobbered,
101dcfbe
AC
9081 walking the stack might encounter a garbage pointer and
9082 error() trying to dereference it. */
488f131b
JB
9083 if (catch_errors
9084 (restore_selected_frame, &inf_status->selected_frame_id,
9085 "Unable to restore previously selected frame:\n",
9086 RETURN_MASK_ERROR) == 0)
c906108c
SS
9087 /* Error in restoring the selected frame. Select the innermost
9088 frame. */
0f7d239c 9089 select_frame (get_current_frame ());
c906108c 9090 }
c906108c 9091
72cec141 9092 xfree (inf_status);
7a292a7a 9093}
c906108c 9094
74b7792f 9095static void
16c381f0 9096do_restore_infcall_control_state_cleanup (void *sts)
74b7792f 9097{
9a3c8263 9098 restore_infcall_control_state ((struct infcall_control_state *) sts);
74b7792f
AC
9099}
9100
9101struct cleanup *
16c381f0
JK
9102make_cleanup_restore_infcall_control_state
9103 (struct infcall_control_state *inf_status)
74b7792f 9104{
16c381f0 9105 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
74b7792f
AC
9106}
9107
c906108c 9108void
16c381f0 9109discard_infcall_control_state (struct infcall_control_state *inf_status)
7a292a7a 9110{
8358c15c
JK
9111 if (inf_status->thread_control.step_resume_breakpoint)
9112 inf_status->thread_control.step_resume_breakpoint->disposition
9113 = disp_del_at_next_stop;
9114
5b79abe7
TT
9115 if (inf_status->thread_control.exception_resume_breakpoint)
9116 inf_status->thread_control.exception_resume_breakpoint->disposition
9117 = disp_del_at_next_stop;
9118
1777feb0 9119 /* See save_infcall_control_state for info on stop_bpstat. */
16c381f0 9120 bpstat_clear (&inf_status->thread_control.stop_bpstat);
8358c15c 9121
72cec141 9122 xfree (inf_status);
7a292a7a 9123}
b89667eb 9124\f
ca6724c1
KB
9125/* restore_inferior_ptid() will be used by the cleanup machinery
9126 to restore the inferior_ptid value saved in a call to
9127 save_inferior_ptid(). */
ce696e05
KB
9128
9129static void
9130restore_inferior_ptid (void *arg)
9131{
9a3c8263 9132 ptid_t *saved_ptid_ptr = (ptid_t *) arg;
abbb1732 9133
ce696e05
KB
9134 inferior_ptid = *saved_ptid_ptr;
9135 xfree (arg);
9136}
9137
9138/* Save the value of inferior_ptid so that it may be restored by a
9139 later call to do_cleanups(). Returns the struct cleanup pointer
9140 needed for later doing the cleanup. */
9141
9142struct cleanup *
9143save_inferior_ptid (void)
9144{
8d749320 9145 ptid_t *saved_ptid_ptr = XNEW (ptid_t);
ce696e05 9146
ce696e05
KB
9147 *saved_ptid_ptr = inferior_ptid;
9148 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
9149}
0c557179 9150
7f89fd65 9151/* See infrun.h. */
0c557179
SDJ
9152
9153void
9154clear_exit_convenience_vars (void)
9155{
9156 clear_internalvar (lookup_internalvar ("_exitsignal"));
9157 clear_internalvar (lookup_internalvar ("_exitcode"));
9158}
c5aa993b 9159\f
488f131b 9160
b2175913
MS
9161/* User interface for reverse debugging:
9162 Set exec-direction / show exec-direction commands
9163 (returns error unless target implements to_set_exec_direction method). */
9164
170742de 9165enum exec_direction_kind execution_direction = EXEC_FORWARD;
b2175913
MS
9166static const char exec_forward[] = "forward";
9167static const char exec_reverse[] = "reverse";
9168static const char *exec_direction = exec_forward;
40478521 9169static const char *const exec_direction_names[] = {
b2175913
MS
9170 exec_forward,
9171 exec_reverse,
9172 NULL
9173};
9174
9175static void
9176set_exec_direction_func (char *args, int from_tty,
9177 struct cmd_list_element *cmd)
9178{
9179 if (target_can_execute_reverse)
9180 {
9181 if (!strcmp (exec_direction, exec_forward))
9182 execution_direction = EXEC_FORWARD;
9183 else if (!strcmp (exec_direction, exec_reverse))
9184 execution_direction = EXEC_REVERSE;
9185 }
8bbed405
MS
9186 else
9187 {
9188 exec_direction = exec_forward;
9189 error (_("Target does not support this operation."));
9190 }
b2175913
MS
9191}
9192
9193static void
9194show_exec_direction_func (struct ui_file *out, int from_tty,
9195 struct cmd_list_element *cmd, const char *value)
9196{
9197 switch (execution_direction) {
9198 case EXEC_FORWARD:
9199 fprintf_filtered (out, _("Forward.\n"));
9200 break;
9201 case EXEC_REVERSE:
9202 fprintf_filtered (out, _("Reverse.\n"));
9203 break;
b2175913 9204 default:
d8b34453
PA
9205 internal_error (__FILE__, __LINE__,
9206 _("bogus execution_direction value: %d"),
9207 (int) execution_direction);
b2175913
MS
9208 }
9209}
9210
d4db2f36
PA
9211static void
9212show_schedule_multiple (struct ui_file *file, int from_tty,
9213 struct cmd_list_element *c, const char *value)
9214{
3e43a32a
MS
9215 fprintf_filtered (file, _("Resuming the execution of threads "
9216 "of all processes is %s.\n"), value);
d4db2f36 9217}
ad52ddc6 9218
22d2b532
SDJ
9219/* Implementation of `siginfo' variable. */
9220
9221static const struct internalvar_funcs siginfo_funcs =
9222{
9223 siginfo_make_value,
9224 NULL,
9225 NULL
9226};
9227
372316f1
PA
9228/* Callback for infrun's target events source. This is marked when a
9229 thread has a pending status to process. */
9230
9231static void
9232infrun_async_inferior_event_handler (gdb_client_data data)
9233{
372316f1
PA
9234 inferior_event_handler (INF_REG_EVENT, NULL);
9235}
9236
c906108c 9237void
96baa820 9238_initialize_infrun (void)
c906108c 9239{
52f0bd74
AC
9240 int i;
9241 int numsigs;
de0bea00 9242 struct cmd_list_element *c;
c906108c 9243
372316f1
PA
9244 /* Register extra event sources in the event loop. */
9245 infrun_async_inferior_event_token
9246 = create_async_event_handler (infrun_async_inferior_event_handler, NULL);
9247
1bedd215
AC
9248 add_info ("signals", signals_info, _("\
9249What debugger does when program gets various signals.\n\
9250Specify a signal as argument to print info on that signal only."));
c906108c
SS
9251 add_info_alias ("handle", "signals", 0);
9252
de0bea00 9253 c = add_com ("handle", class_run, handle_command, _("\
dfbd5e7b 9254Specify how to handle signals.\n\
486c7739 9255Usage: handle SIGNAL [ACTIONS]\n\
c906108c 9256Args are signals and actions to apply to those signals.\n\
dfbd5e7b 9257If no actions are specified, the current settings for the specified signals\n\
486c7739
MF
9258will be displayed instead.\n\
9259\n\
c906108c
SS
9260Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9261from 1-15 are allowed for compatibility with old versions of GDB.\n\
9262Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9263The special arg \"all\" is recognized to mean all signals except those\n\
1bedd215 9264used by the debugger, typically SIGTRAP and SIGINT.\n\
486c7739 9265\n\
1bedd215 9266Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
c906108c
SS
9267\"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9268Stop means reenter debugger if this signal happens (implies print).\n\
9269Print means print a message if this signal happens.\n\
9270Pass means let program see this signal; otherwise program doesn't know.\n\
9271Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
dfbd5e7b
PA
9272Pass and Stop may be combined.\n\
9273\n\
9274Multiple signals may be specified. Signal numbers and signal names\n\
9275may be interspersed with actions, with the actions being performed for\n\
9276all signals cumulatively specified."));
de0bea00 9277 set_cmd_completer (c, handle_completer);
486c7739 9278
c906108c 9279 if (!dbx_commands)
1a966eab
AC
9280 stop_command = add_cmd ("stop", class_obscure,
9281 not_just_help_class_command, _("\
9282There is no `stop' command, but you can set a hook on `stop'.\n\
c906108c 9283This allows you to set a list of commands to be run each time execution\n\
1a966eab 9284of the program stops."), &cmdlist);
c906108c 9285
ccce17b0 9286 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
85c07804
AC
9287Set inferior debugging."), _("\
9288Show inferior debugging."), _("\
9289When non-zero, inferior specific debugging is enabled."),
ccce17b0
YQ
9290 NULL,
9291 show_debug_infrun,
9292 &setdebuglist, &showdebuglist);
527159b7 9293
3e43a32a
MS
9294 add_setshow_boolean_cmd ("displaced", class_maintenance,
9295 &debug_displaced, _("\
237fc4c9
PA
9296Set displaced stepping debugging."), _("\
9297Show displaced stepping debugging."), _("\
9298When non-zero, displaced stepping specific debugging is enabled."),
9299 NULL,
9300 show_debug_displaced,
9301 &setdebuglist, &showdebuglist);
9302
ad52ddc6
PA
9303 add_setshow_boolean_cmd ("non-stop", no_class,
9304 &non_stop_1, _("\
9305Set whether gdb controls the inferior in non-stop mode."), _("\
9306Show whether gdb controls the inferior in non-stop mode."), _("\
9307When debugging a multi-threaded program and this setting is\n\
9308off (the default, also called all-stop mode), when one thread stops\n\
9309(for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9310all other threads in the program while you interact with the thread of\n\
9311interest. When you continue or step a thread, you can allow the other\n\
9312threads to run, or have them remain stopped, but while you inspect any\n\
9313thread's state, all threads stop.\n\
9314\n\
9315In non-stop mode, when one thread stops, other threads can continue\n\
9316to run freely. You'll be able to step each thread independently,\n\
9317leave it stopped or free to run as needed."),
9318 set_non_stop,
9319 show_non_stop,
9320 &setlist,
9321 &showlist);
9322
a493e3e2 9323 numsigs = (int) GDB_SIGNAL_LAST;
8d749320
SM
9324 signal_stop = XNEWVEC (unsigned char, numsigs);
9325 signal_print = XNEWVEC (unsigned char, numsigs);
9326 signal_program = XNEWVEC (unsigned char, numsigs);
9327 signal_catch = XNEWVEC (unsigned char, numsigs);
9328 signal_pass = XNEWVEC (unsigned char, numsigs);
c906108c
SS
9329 for (i = 0; i < numsigs; i++)
9330 {
9331 signal_stop[i] = 1;
9332 signal_print[i] = 1;
9333 signal_program[i] = 1;
ab04a2af 9334 signal_catch[i] = 0;
c906108c
SS
9335 }
9336
4d9d9d04
PA
9337 /* Signals caused by debugger's own actions should not be given to
9338 the program afterwards.
9339
9340 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9341 explicitly specifies that it should be delivered to the target
9342 program. Typically, that would occur when a user is debugging a
9343 target monitor on a simulator: the target monitor sets a
9344 breakpoint; the simulator encounters this breakpoint and halts
9345 the simulation handing control to GDB; GDB, noting that the stop
9346 address doesn't map to any known breakpoint, returns control back
9347 to the simulator; the simulator then delivers the hardware
9348 equivalent of a GDB_SIGNAL_TRAP to the program being
9349 debugged. */
a493e3e2
PA
9350 signal_program[GDB_SIGNAL_TRAP] = 0;
9351 signal_program[GDB_SIGNAL_INT] = 0;
c906108c
SS
9352
9353 /* Signals that are not errors should not normally enter the debugger. */
a493e3e2
PA
9354 signal_stop[GDB_SIGNAL_ALRM] = 0;
9355 signal_print[GDB_SIGNAL_ALRM] = 0;
9356 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9357 signal_print[GDB_SIGNAL_VTALRM] = 0;
9358 signal_stop[GDB_SIGNAL_PROF] = 0;
9359 signal_print[GDB_SIGNAL_PROF] = 0;
9360 signal_stop[GDB_SIGNAL_CHLD] = 0;
9361 signal_print[GDB_SIGNAL_CHLD] = 0;
9362 signal_stop[GDB_SIGNAL_IO] = 0;
9363 signal_print[GDB_SIGNAL_IO] = 0;
9364 signal_stop[GDB_SIGNAL_POLL] = 0;
9365 signal_print[GDB_SIGNAL_POLL] = 0;
9366 signal_stop[GDB_SIGNAL_URG] = 0;
9367 signal_print[GDB_SIGNAL_URG] = 0;
9368 signal_stop[GDB_SIGNAL_WINCH] = 0;
9369 signal_print[GDB_SIGNAL_WINCH] = 0;
9370 signal_stop[GDB_SIGNAL_PRIO] = 0;
9371 signal_print[GDB_SIGNAL_PRIO] = 0;
c906108c 9372
cd0fc7c3
SS
9373 /* These signals are used internally by user-level thread
9374 implementations. (See signal(5) on Solaris.) Like the above
9375 signals, a healthy program receives and handles them as part of
9376 its normal operation. */
a493e3e2
PA
9377 signal_stop[GDB_SIGNAL_LWP] = 0;
9378 signal_print[GDB_SIGNAL_LWP] = 0;
9379 signal_stop[GDB_SIGNAL_WAITING] = 0;
9380 signal_print[GDB_SIGNAL_WAITING] = 0;
9381 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9382 signal_print[GDB_SIGNAL_CANCEL] = 0;
bc7b765a
JB
9383 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9384 signal_print[GDB_SIGNAL_LIBRT] = 0;
cd0fc7c3 9385
2455069d
UW
9386 /* Update cached state. */
9387 signal_cache_update (-1);
9388
85c07804
AC
9389 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9390 &stop_on_solib_events, _("\
9391Set stopping for shared library events."), _("\
9392Show stopping for shared library events."), _("\
c906108c
SS
9393If nonzero, gdb will give control to the user when the dynamic linker\n\
9394notifies gdb of shared library events. The most common event of interest\n\
85c07804 9395to the user would be loading/unloading of a new library."),
f9e14852 9396 set_stop_on_solib_events,
920d2a44 9397 show_stop_on_solib_events,
85c07804 9398 &setlist, &showlist);
c906108c 9399
7ab04401
AC
9400 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9401 follow_fork_mode_kind_names,
9402 &follow_fork_mode_string, _("\
9403Set debugger response to a program call of fork or vfork."), _("\
9404Show debugger response to a program call of fork or vfork."), _("\
c906108c
SS
9405A fork or vfork creates a new process. follow-fork-mode can be:\n\
9406 parent - the original process is debugged after a fork\n\
9407 child - the new process is debugged after a fork\n\
ea1dd7bc 9408The unfollowed process will continue to run.\n\
7ab04401
AC
9409By default, the debugger will follow the parent process."),
9410 NULL,
920d2a44 9411 show_follow_fork_mode_string,
7ab04401
AC
9412 &setlist, &showlist);
9413
6c95b8df
PA
9414 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9415 follow_exec_mode_names,
9416 &follow_exec_mode_string, _("\
9417Set debugger response to a program call of exec."), _("\
9418Show debugger response to a program call of exec."), _("\
9419An exec call replaces the program image of a process.\n\
9420\n\
9421follow-exec-mode can be:\n\
9422\n\
cce7e648 9423 new - the debugger creates a new inferior and rebinds the process\n\
6c95b8df
PA
9424to this new inferior. The program the process was running before\n\
9425the exec call can be restarted afterwards by restarting the original\n\
9426inferior.\n\
9427\n\
9428 same - the debugger keeps the process bound to the same inferior.\n\
9429The new executable image replaces the previous executable loaded in\n\
9430the inferior. Restarting the inferior after the exec call restarts\n\
9431the executable the process was running after the exec call.\n\
9432\n\
9433By default, the debugger will use the same inferior."),
9434 NULL,
9435 show_follow_exec_mode_string,
9436 &setlist, &showlist);
9437
7ab04401
AC
9438 add_setshow_enum_cmd ("scheduler-locking", class_run,
9439 scheduler_enums, &scheduler_mode, _("\
9440Set mode for locking scheduler during execution."), _("\
9441Show mode for locking scheduler during execution."), _("\
f2665db5
MM
9442off == no locking (threads may preempt at any time)\n\
9443on == full locking (no thread except the current thread may run)\n\
9444 This applies to both normal execution and replay mode.\n\
9445step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9446 In this mode, other threads may run during other commands.\n\
9447 This applies to both normal execution and replay mode.\n\
9448replay == scheduler locked in replay mode and unlocked during normal execution."),
7ab04401 9449 set_schedlock_func, /* traps on target vector */
920d2a44 9450 show_scheduler_mode,
7ab04401 9451 &setlist, &showlist);
5fbbeb29 9452
d4db2f36
PA
9453 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9454Set mode for resuming threads of all processes."), _("\
9455Show mode for resuming threads of all processes."), _("\
9456When on, execution commands (such as 'continue' or 'next') resume all\n\
9457threads of all processes. When off (which is the default), execution\n\
9458commands only resume the threads of the current process. The set of\n\
9459threads that are resumed is further refined by the scheduler-locking\n\
9460mode (see help set scheduler-locking)."),
9461 NULL,
9462 show_schedule_multiple,
9463 &setlist, &showlist);
9464
5bf193a2
AC
9465 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9466Set mode of the step operation."), _("\
9467Show mode of the step operation."), _("\
9468When set, doing a step over a function without debug line information\n\
9469will stop at the first instruction of that function. Otherwise, the\n\
9470function is skipped and the step command stops at a different source line."),
9471 NULL,
920d2a44 9472 show_step_stop_if_no_debug,
5bf193a2 9473 &setlist, &showlist);
ca6724c1 9474
72d0e2c5
YQ
9475 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9476 &can_use_displaced_stepping, _("\
237fc4c9
PA
9477Set debugger's willingness to use displaced stepping."), _("\
9478Show debugger's willingness to use displaced stepping."), _("\
fff08868
HZ
9479If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9480supported by the target architecture. If off, gdb will not use displaced\n\
9481stepping to step over breakpoints, even if such is supported by the target\n\
9482architecture. If auto (which is the default), gdb will use displaced stepping\n\
9483if the target architecture supports it and non-stop mode is active, but will not\n\
9484use it in all-stop mode (see help set non-stop)."),
72d0e2c5
YQ
9485 NULL,
9486 show_can_use_displaced_stepping,
9487 &setlist, &showlist);
237fc4c9 9488
b2175913
MS
9489 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9490 &exec_direction, _("Set direction of execution.\n\
9491Options are 'forward' or 'reverse'."),
9492 _("Show direction of execution (forward/reverse)."),
9493 _("Tells gdb whether to execute forward or backward."),
9494 set_exec_direction_func, show_exec_direction_func,
9495 &setlist, &showlist);
9496
6c95b8df
PA
9497 /* Set/show detach-on-fork: user-settable mode. */
9498
9499 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9500Set whether gdb will detach the child of a fork."), _("\
9501Show whether gdb will detach the child of a fork."), _("\
9502Tells gdb whether to detach the child of a fork."),
9503 NULL, NULL, &setlist, &showlist);
9504
03583c20
UW
9505 /* Set/show disable address space randomization mode. */
9506
9507 add_setshow_boolean_cmd ("disable-randomization", class_support,
9508 &disable_randomization, _("\
9509Set disabling of debuggee's virtual address space randomization."), _("\
9510Show disabling of debuggee's virtual address space randomization."), _("\
9511When this mode is on (which is the default), randomization of the virtual\n\
9512address space is disabled. Standalone programs run with the randomization\n\
9513enabled by default on some platforms."),
9514 &set_disable_randomization,
9515 &show_disable_randomization,
9516 &setlist, &showlist);
9517
ca6724c1 9518 /* ptid initializations */
ca6724c1
KB
9519 inferior_ptid = null_ptid;
9520 target_last_wait_ptid = minus_one_ptid;
5231c1fd
PA
9521
9522 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
252fbfc8 9523 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
a07daef3 9524 observer_attach_thread_exit (infrun_thread_thread_exit);
fc1cf338 9525 observer_attach_inferior_exit (infrun_inferior_exit);
4aa995e1
PA
9526
9527 /* Explicitly create without lookup, since that tries to create a
9528 value with a void typed value, and when we get here, gdbarch
9529 isn't initialized yet. At this point, we're quite sure there
9530 isn't another convenience variable of the same name. */
22d2b532 9531 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
d914c394
SS
9532
9533 add_setshow_boolean_cmd ("observer", no_class,
9534 &observer_mode_1, _("\
9535Set whether gdb controls the inferior in observer mode."), _("\
9536Show whether gdb controls the inferior in observer mode."), _("\
9537In observer mode, GDB can get data from the inferior, but not\n\
9538affect its execution. Registers and memory may not be changed,\n\
9539breakpoints may not be set, and the program cannot be interrupted\n\
9540or signalled."),
9541 set_observer_mode,
9542 show_observer_mode,
9543 &setlist,
9544 &showlist);
c906108c 9545}
This page took 2.275084 seconds and 4 git commands to generate.