[AArch64] Fix a typo in the comment for BFD_RELOC_AARCH64_LD64_GOT_LO12_NC
[deliverable/binutils-gdb.git] / gdb / infrun.c
CommitLineData
ca557f44
AC
1/* Target-struct-independent code to start (run) and stop an inferior
2 process.
8926118c 3
32d0add0 4 Copyright (C) 1986-2015 Free Software Foundation, Inc.
c906108c 5
c5aa993b 6 This file is part of GDB.
c906108c 7
c5aa993b
JM
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
c5aa993b 11 (at your option) any later version.
c906108c 12
c5aa993b
JM
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
c906108c 17
c5aa993b 18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
c906108c
SS
20
21#include "defs.h"
45741a9c 22#include "infrun.h"
c906108c
SS
23#include <ctype.h>
24#include "symtab.h"
25#include "frame.h"
26#include "inferior.h"
27#include "breakpoint.h"
03f2053f 28#include "gdb_wait.h"
c906108c
SS
29#include "gdbcore.h"
30#include "gdbcmd.h"
210661e7 31#include "cli/cli-script.h"
c906108c
SS
32#include "target.h"
33#include "gdbthread.h"
34#include "annotate.h"
1adeb98a 35#include "symfile.h"
7a292a7a 36#include "top.h"
c906108c 37#include <signal.h>
2acceee2 38#include "inf-loop.h"
4e052eda 39#include "regcache.h"
fd0407d6 40#include "value.h"
06600e06 41#include "observer.h"
f636b87d 42#include "language.h"
a77053c2 43#include "solib.h"
f17517ea 44#include "main.h"
186c406b
TT
45#include "dictionary.h"
46#include "block.h"
034dad6f 47#include "mi/mi-common.h"
4f8d22e3 48#include "event-top.h"
96429cc8 49#include "record.h"
d02ed0bb 50#include "record-full.h"
edb3359d 51#include "inline-frame.h"
4efc6507 52#include "jit.h"
06cd862c 53#include "tracepoint.h"
be34f849 54#include "continuations.h"
b4a14fd0 55#include "interps.h"
1bfeeb0f 56#include "skip.h"
28106bc2
SDJ
57#include "probe.h"
58#include "objfiles.h"
de0bea00 59#include "completer.h"
9107fc8d 60#include "target-descriptions.h"
f15cb84a 61#include "target-dcache.h"
d83ad864 62#include "terminal.h"
ff862be4 63#include "solist.h"
372316f1 64#include "event-loop.h"
c906108c
SS
65
66/* Prototypes for local functions */
67
96baa820 68static void signals_info (char *, int);
c906108c 69
96baa820 70static void handle_command (char *, int);
c906108c 71
2ea28649 72static void sig_print_info (enum gdb_signal);
c906108c 73
96baa820 74static void sig_print_header (void);
c906108c 75
74b7792f 76static void resume_cleanups (void *);
c906108c 77
96baa820 78static int hook_stop_stub (void *);
c906108c 79
96baa820
JM
80static int restore_selected_frame (void *);
81
4ef3f3be 82static int follow_fork (void);
96baa820 83
d83ad864
DB
84static int follow_fork_inferior (int follow_child, int detach_fork);
85
86static void follow_inferior_reset_breakpoints (void);
87
96baa820 88static void set_schedlock_func (char *args, int from_tty,
488f131b 89 struct cmd_list_element *c);
96baa820 90
a289b8f6
JK
91static int currently_stepping (struct thread_info *tp);
92
96baa820 93void _initialize_infrun (void);
43ff13b4 94
e58b0e63
PA
95void nullify_last_target_wait_ptid (void);
96
2c03e5be 97static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
2484c66b
UW
98
99static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
100
2484c66b
UW
101static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
102
8550d3b3
YQ
103static int maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
104
372316f1
PA
105/* Asynchronous signal handler registered as event loop source for
106 when we have pending events ready to be passed to the core. */
107static struct async_event_handler *infrun_async_inferior_event_token;
108
109/* Stores whether infrun_async was previously enabled or disabled.
110 Starts off as -1, indicating "never enabled/disabled". */
111static int infrun_is_async = -1;
112
113/* See infrun.h. */
114
115void
116infrun_async (int enable)
117{
118 if (infrun_is_async != enable)
119 {
120 infrun_is_async = enable;
121
122 if (debug_infrun)
123 fprintf_unfiltered (gdb_stdlog,
124 "infrun: infrun_async(%d)\n",
125 enable);
126
127 if (enable)
128 mark_async_event_handler (infrun_async_inferior_event_token);
129 else
130 clear_async_event_handler (infrun_async_inferior_event_token);
131 }
132}
133
5fbbeb29
CF
134/* When set, stop the 'step' command if we enter a function which has
135 no line number information. The normal behavior is that we step
136 over such function. */
137int step_stop_if_no_debug = 0;
920d2a44
AC
138static void
139show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
140 struct cmd_list_element *c, const char *value)
141{
142 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
143}
5fbbeb29 144
1777feb0 145/* In asynchronous mode, but simulating synchronous execution. */
96baa820 146
43ff13b4
JM
147int sync_execution = 0;
148
b9f437de
PA
149/* proceed and normal_stop use this to notify the user when the
150 inferior stopped in a different thread than it had been running
151 in. */
96baa820 152
39f77062 153static ptid_t previous_inferior_ptid;
7a292a7a 154
07107ca6
LM
155/* If set (default for legacy reasons), when following a fork, GDB
156 will detach from one of the fork branches, child or parent.
157 Exactly which branch is detached depends on 'set follow-fork-mode'
158 setting. */
159
160static int detach_fork = 1;
6c95b8df 161
237fc4c9
PA
162int debug_displaced = 0;
163static void
164show_debug_displaced (struct ui_file *file, int from_tty,
165 struct cmd_list_element *c, const char *value)
166{
167 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
168}
169
ccce17b0 170unsigned int debug_infrun = 0;
920d2a44
AC
171static void
172show_debug_infrun (struct ui_file *file, int from_tty,
173 struct cmd_list_element *c, const char *value)
174{
175 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
176}
527159b7 177
03583c20
UW
178
179/* Support for disabling address space randomization. */
180
181int disable_randomization = 1;
182
183static void
184show_disable_randomization (struct ui_file *file, int from_tty,
185 struct cmd_list_element *c, const char *value)
186{
187 if (target_supports_disable_randomization ())
188 fprintf_filtered (file,
189 _("Disabling randomization of debuggee's "
190 "virtual address space is %s.\n"),
191 value);
192 else
193 fputs_filtered (_("Disabling randomization of debuggee's "
194 "virtual address space is unsupported on\n"
195 "this platform.\n"), file);
196}
197
198static void
199set_disable_randomization (char *args, int from_tty,
200 struct cmd_list_element *c)
201{
202 if (!target_supports_disable_randomization ())
203 error (_("Disabling randomization of debuggee's "
204 "virtual address space is unsupported on\n"
205 "this platform."));
206}
207
d32dc48e
PA
208/* User interface for non-stop mode. */
209
210int non_stop = 0;
211static int non_stop_1 = 0;
212
213static void
214set_non_stop (char *args, int from_tty,
215 struct cmd_list_element *c)
216{
217 if (target_has_execution)
218 {
219 non_stop_1 = non_stop;
220 error (_("Cannot change this setting while the inferior is running."));
221 }
222
223 non_stop = non_stop_1;
224}
225
226static void
227show_non_stop (struct ui_file *file, int from_tty,
228 struct cmd_list_element *c, const char *value)
229{
230 fprintf_filtered (file,
231 _("Controlling the inferior in non-stop mode is %s.\n"),
232 value);
233}
234
d914c394
SS
235/* "Observer mode" is somewhat like a more extreme version of
236 non-stop, in which all GDB operations that might affect the
237 target's execution have been disabled. */
238
d914c394
SS
239int observer_mode = 0;
240static int observer_mode_1 = 0;
241
242static void
243set_observer_mode (char *args, int from_tty,
244 struct cmd_list_element *c)
245{
d914c394
SS
246 if (target_has_execution)
247 {
248 observer_mode_1 = observer_mode;
249 error (_("Cannot change this setting while the inferior is running."));
250 }
251
252 observer_mode = observer_mode_1;
253
254 may_write_registers = !observer_mode;
255 may_write_memory = !observer_mode;
256 may_insert_breakpoints = !observer_mode;
257 may_insert_tracepoints = !observer_mode;
258 /* We can insert fast tracepoints in or out of observer mode,
259 but enable them if we're going into this mode. */
260 if (observer_mode)
261 may_insert_fast_tracepoints = 1;
262 may_stop = !observer_mode;
263 update_target_permissions ();
264
265 /* Going *into* observer mode we must force non-stop, then
266 going out we leave it that way. */
267 if (observer_mode)
268 {
d914c394
SS
269 pagination_enabled = 0;
270 non_stop = non_stop_1 = 1;
271 }
272
273 if (from_tty)
274 printf_filtered (_("Observer mode is now %s.\n"),
275 (observer_mode ? "on" : "off"));
276}
277
278static void
279show_observer_mode (struct ui_file *file, int from_tty,
280 struct cmd_list_element *c, const char *value)
281{
282 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
283}
284
285/* This updates the value of observer mode based on changes in
286 permissions. Note that we are deliberately ignoring the values of
287 may-write-registers and may-write-memory, since the user may have
288 reason to enable these during a session, for instance to turn on a
289 debugging-related global. */
290
291void
292update_observer_mode (void)
293{
294 int newval;
295
296 newval = (!may_insert_breakpoints
297 && !may_insert_tracepoints
298 && may_insert_fast_tracepoints
299 && !may_stop
300 && non_stop);
301
302 /* Let the user know if things change. */
303 if (newval != observer_mode)
304 printf_filtered (_("Observer mode is now %s.\n"),
305 (newval ? "on" : "off"));
306
307 observer_mode = observer_mode_1 = newval;
308}
c2c6d25f 309
c906108c
SS
310/* Tables of how to react to signals; the user sets them. */
311
312static unsigned char *signal_stop;
313static unsigned char *signal_print;
314static unsigned char *signal_program;
315
ab04a2af
TT
316/* Table of signals that are registered with "catch signal". A
317 non-zero entry indicates that the signal is caught by some "catch
318 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
319 signals. */
320static unsigned char *signal_catch;
321
2455069d
UW
322/* Table of signals that the target may silently handle.
323 This is automatically determined from the flags above,
324 and simply cached here. */
325static unsigned char *signal_pass;
326
c906108c
SS
327#define SET_SIGS(nsigs,sigs,flags) \
328 do { \
329 int signum = (nsigs); \
330 while (signum-- > 0) \
331 if ((sigs)[signum]) \
332 (flags)[signum] = 1; \
333 } while (0)
334
335#define UNSET_SIGS(nsigs,sigs,flags) \
336 do { \
337 int signum = (nsigs); \
338 while (signum-- > 0) \
339 if ((sigs)[signum]) \
340 (flags)[signum] = 0; \
341 } while (0)
342
9b224c5e
PA
343/* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
344 this function is to avoid exporting `signal_program'. */
345
346void
347update_signals_program_target (void)
348{
a493e3e2 349 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
9b224c5e
PA
350}
351
1777feb0 352/* Value to pass to target_resume() to cause all threads to resume. */
39f77062 353
edb3359d 354#define RESUME_ALL minus_one_ptid
c906108c
SS
355
356/* Command list pointer for the "stop" placeholder. */
357
358static struct cmd_list_element *stop_command;
359
c906108c
SS
360/* Nonzero if we want to give control to the user when we're notified
361 of shared library events by the dynamic linker. */
628fe4e4 362int stop_on_solib_events;
f9e14852
GB
363
364/* Enable or disable optional shared library event breakpoints
365 as appropriate when the above flag is changed. */
366
367static void
368set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
369{
370 update_solib_breakpoints ();
371}
372
920d2a44
AC
373static void
374show_stop_on_solib_events (struct ui_file *file, int from_tty,
375 struct cmd_list_element *c, const char *value)
376{
377 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
378 value);
379}
c906108c 380
c906108c
SS
381/* Nonzero means expecting a trace trap
382 and should stop the inferior and return silently when it happens. */
383
384int stop_after_trap;
385
c906108c
SS
386/* Nonzero after stop if current stack frame should be printed. */
387
388static int stop_print_frame;
389
e02bc4cc 390/* This is a cached copy of the pid/waitstatus of the last event
9a4105ab
AC
391 returned by target_wait()/deprecated_target_wait_hook(). This
392 information is returned by get_last_target_status(). */
39f77062 393static ptid_t target_last_wait_ptid;
e02bc4cc
DS
394static struct target_waitstatus target_last_waitstatus;
395
0d1e5fa7
PA
396static void context_switch (ptid_t ptid);
397
4e1c45ea 398void init_thread_stepping_state (struct thread_info *tss);
0d1e5fa7 399
53904c9e
AC
400static const char follow_fork_mode_child[] = "child";
401static const char follow_fork_mode_parent[] = "parent";
402
40478521 403static const char *const follow_fork_mode_kind_names[] = {
53904c9e
AC
404 follow_fork_mode_child,
405 follow_fork_mode_parent,
406 NULL
ef346e04 407};
c906108c 408
53904c9e 409static const char *follow_fork_mode_string = follow_fork_mode_parent;
920d2a44
AC
410static void
411show_follow_fork_mode_string (struct ui_file *file, int from_tty,
412 struct cmd_list_element *c, const char *value)
413{
3e43a32a
MS
414 fprintf_filtered (file,
415 _("Debugger response to a program "
416 "call of fork or vfork is \"%s\".\n"),
920d2a44
AC
417 value);
418}
c906108c
SS
419\f
420
d83ad864
DB
421/* Handle changes to the inferior list based on the type of fork,
422 which process is being followed, and whether the other process
423 should be detached. On entry inferior_ptid must be the ptid of
424 the fork parent. At return inferior_ptid is the ptid of the
425 followed inferior. */
426
427static int
428follow_fork_inferior (int follow_child, int detach_fork)
429{
430 int has_vforked;
79639e11 431 ptid_t parent_ptid, child_ptid;
d83ad864
DB
432
433 has_vforked = (inferior_thread ()->pending_follow.kind
434 == TARGET_WAITKIND_VFORKED);
79639e11
PA
435 parent_ptid = inferior_ptid;
436 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
d83ad864
DB
437
438 if (has_vforked
439 && !non_stop /* Non-stop always resumes both branches. */
440 && (!target_is_async_p () || sync_execution)
441 && !(follow_child || detach_fork || sched_multi))
442 {
443 /* The parent stays blocked inside the vfork syscall until the
444 child execs or exits. If we don't let the child run, then
445 the parent stays blocked. If we're telling the parent to run
446 in the foreground, the user will not be able to ctrl-c to get
447 back the terminal, effectively hanging the debug session. */
448 fprintf_filtered (gdb_stderr, _("\
449Can not resume the parent process over vfork in the foreground while\n\
450holding the child stopped. Try \"set detach-on-fork\" or \
451\"set schedule-multiple\".\n"));
452 /* FIXME output string > 80 columns. */
453 return 1;
454 }
455
456 if (!follow_child)
457 {
458 /* Detach new forked process? */
459 if (detach_fork)
460 {
461 struct cleanup *old_chain;
462
463 /* Before detaching from the child, remove all breakpoints
464 from it. If we forked, then this has already been taken
465 care of by infrun.c. If we vforked however, any
466 breakpoint inserted in the parent is visible in the
467 child, even those added while stopped in a vfork
468 catchpoint. This will remove the breakpoints from the
469 parent also, but they'll be reinserted below. */
470 if (has_vforked)
471 {
472 /* Keep breakpoints list in sync. */
473 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
474 }
475
476 if (info_verbose || debug_infrun)
477 {
8dd06f7a
DB
478 /* Ensure that we have a process ptid. */
479 ptid_t process_ptid = pid_to_ptid (ptid_get_pid (child_ptid));
480
6f259a23 481 target_terminal_ours_for_output ();
d83ad864 482 fprintf_filtered (gdb_stdlog,
79639e11 483 _("Detaching after %s from child %s.\n"),
6f259a23 484 has_vforked ? "vfork" : "fork",
8dd06f7a 485 target_pid_to_str (process_ptid));
d83ad864
DB
486 }
487 }
488 else
489 {
490 struct inferior *parent_inf, *child_inf;
491 struct cleanup *old_chain;
492
493 /* Add process to GDB's tables. */
79639e11 494 child_inf = add_inferior (ptid_get_pid (child_ptid));
d83ad864
DB
495
496 parent_inf = current_inferior ();
497 child_inf->attach_flag = parent_inf->attach_flag;
498 copy_terminal_info (child_inf, parent_inf);
499 child_inf->gdbarch = parent_inf->gdbarch;
500 copy_inferior_target_desc_info (child_inf, parent_inf);
501
502 old_chain = save_inferior_ptid ();
503 save_current_program_space ();
504
79639e11 505 inferior_ptid = child_ptid;
d83ad864
DB
506 add_thread (inferior_ptid);
507 child_inf->symfile_flags = SYMFILE_NO_READ;
508
509 /* If this is a vfork child, then the address-space is
510 shared with the parent. */
511 if (has_vforked)
512 {
513 child_inf->pspace = parent_inf->pspace;
514 child_inf->aspace = parent_inf->aspace;
515
516 /* The parent will be frozen until the child is done
517 with the shared region. Keep track of the
518 parent. */
519 child_inf->vfork_parent = parent_inf;
520 child_inf->pending_detach = 0;
521 parent_inf->vfork_child = child_inf;
522 parent_inf->pending_detach = 0;
523 }
524 else
525 {
526 child_inf->aspace = new_address_space ();
527 child_inf->pspace = add_program_space (child_inf->aspace);
528 child_inf->removable = 1;
529 set_current_program_space (child_inf->pspace);
530 clone_program_space (child_inf->pspace, parent_inf->pspace);
531
532 /* Let the shared library layer (e.g., solib-svr4) learn
533 about this new process, relocate the cloned exec, pull
534 in shared libraries, and install the solib event
535 breakpoint. If a "cloned-VM" event was propagated
536 better throughout the core, this wouldn't be
537 required. */
538 solib_create_inferior_hook (0);
539 }
540
541 do_cleanups (old_chain);
542 }
543
544 if (has_vforked)
545 {
546 struct inferior *parent_inf;
547
548 parent_inf = current_inferior ();
549
550 /* If we detached from the child, then we have to be careful
551 to not insert breakpoints in the parent until the child
552 is done with the shared memory region. However, if we're
553 staying attached to the child, then we can and should
554 insert breakpoints, so that we can debug it. A
555 subsequent child exec or exit is enough to know when does
556 the child stops using the parent's address space. */
557 parent_inf->waiting_for_vfork_done = detach_fork;
558 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
559 }
560 }
561 else
562 {
563 /* Follow the child. */
564 struct inferior *parent_inf, *child_inf;
565 struct program_space *parent_pspace;
566
567 if (info_verbose || debug_infrun)
568 {
6f259a23
DB
569 target_terminal_ours_for_output ();
570 fprintf_filtered (gdb_stdlog,
79639e11
PA
571 _("Attaching after %s %s to child %s.\n"),
572 target_pid_to_str (parent_ptid),
6f259a23 573 has_vforked ? "vfork" : "fork",
79639e11 574 target_pid_to_str (child_ptid));
d83ad864
DB
575 }
576
577 /* Add the new inferior first, so that the target_detach below
578 doesn't unpush the target. */
579
79639e11 580 child_inf = add_inferior (ptid_get_pid (child_ptid));
d83ad864
DB
581
582 parent_inf = current_inferior ();
583 child_inf->attach_flag = parent_inf->attach_flag;
584 copy_terminal_info (child_inf, parent_inf);
585 child_inf->gdbarch = parent_inf->gdbarch;
586 copy_inferior_target_desc_info (child_inf, parent_inf);
587
588 parent_pspace = parent_inf->pspace;
589
590 /* If we're vforking, we want to hold on to the parent until the
591 child exits or execs. At child exec or exit time we can
592 remove the old breakpoints from the parent and detach or
593 resume debugging it. Otherwise, detach the parent now; we'll
594 want to reuse it's program/address spaces, but we can't set
595 them to the child before removing breakpoints from the
596 parent, otherwise, the breakpoints module could decide to
597 remove breakpoints from the wrong process (since they'd be
598 assigned to the same address space). */
599
600 if (has_vforked)
601 {
602 gdb_assert (child_inf->vfork_parent == NULL);
603 gdb_assert (parent_inf->vfork_child == NULL);
604 child_inf->vfork_parent = parent_inf;
605 child_inf->pending_detach = 0;
606 parent_inf->vfork_child = child_inf;
607 parent_inf->pending_detach = detach_fork;
608 parent_inf->waiting_for_vfork_done = 0;
609 }
610 else if (detach_fork)
6f259a23
DB
611 {
612 if (info_verbose || debug_infrun)
613 {
8dd06f7a
DB
614 /* Ensure that we have a process ptid. */
615 ptid_t process_ptid = pid_to_ptid (ptid_get_pid (child_ptid));
616
6f259a23
DB
617 target_terminal_ours_for_output ();
618 fprintf_filtered (gdb_stdlog,
619 _("Detaching after fork from "
79639e11 620 "child %s.\n"),
8dd06f7a 621 target_pid_to_str (process_ptid));
6f259a23
DB
622 }
623
624 target_detach (NULL, 0);
625 }
d83ad864
DB
626
627 /* Note that the detach above makes PARENT_INF dangling. */
628
629 /* Add the child thread to the appropriate lists, and switch to
630 this new thread, before cloning the program space, and
631 informing the solib layer about this new process. */
632
79639e11 633 inferior_ptid = child_ptid;
d83ad864
DB
634 add_thread (inferior_ptid);
635
636 /* If this is a vfork child, then the address-space is shared
637 with the parent. If we detached from the parent, then we can
638 reuse the parent's program/address spaces. */
639 if (has_vforked || detach_fork)
640 {
641 child_inf->pspace = parent_pspace;
642 child_inf->aspace = child_inf->pspace->aspace;
643 }
644 else
645 {
646 child_inf->aspace = new_address_space ();
647 child_inf->pspace = add_program_space (child_inf->aspace);
648 child_inf->removable = 1;
649 child_inf->symfile_flags = SYMFILE_NO_READ;
650 set_current_program_space (child_inf->pspace);
651 clone_program_space (child_inf->pspace, parent_pspace);
652
653 /* Let the shared library layer (e.g., solib-svr4) learn
654 about this new process, relocate the cloned exec, pull in
655 shared libraries, and install the solib event breakpoint.
656 If a "cloned-VM" event was propagated better throughout
657 the core, this wouldn't be required. */
658 solib_create_inferior_hook (0);
659 }
660 }
661
662 return target_follow_fork (follow_child, detach_fork);
663}
664
e58b0e63
PA
665/* Tell the target to follow the fork we're stopped at. Returns true
666 if the inferior should be resumed; false, if the target for some
667 reason decided it's best not to resume. */
668
6604731b 669static int
4ef3f3be 670follow_fork (void)
c906108c 671{
ea1dd7bc 672 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63
PA
673 int should_resume = 1;
674 struct thread_info *tp;
675
676 /* Copy user stepping state to the new inferior thread. FIXME: the
677 followed fork child thread should have a copy of most of the
4e3990f4
DE
678 parent thread structure's run control related fields, not just these.
679 Initialized to avoid "may be used uninitialized" warnings from gcc. */
680 struct breakpoint *step_resume_breakpoint = NULL;
186c406b 681 struct breakpoint *exception_resume_breakpoint = NULL;
4e3990f4
DE
682 CORE_ADDR step_range_start = 0;
683 CORE_ADDR step_range_end = 0;
684 struct frame_id step_frame_id = { 0 };
17b2616c 685 struct interp *command_interp = NULL;
e58b0e63
PA
686
687 if (!non_stop)
688 {
689 ptid_t wait_ptid;
690 struct target_waitstatus wait_status;
691
692 /* Get the last target status returned by target_wait(). */
693 get_last_target_status (&wait_ptid, &wait_status);
694
695 /* If not stopped at a fork event, then there's nothing else to
696 do. */
697 if (wait_status.kind != TARGET_WAITKIND_FORKED
698 && wait_status.kind != TARGET_WAITKIND_VFORKED)
699 return 1;
700
701 /* Check if we switched over from WAIT_PTID, since the event was
702 reported. */
703 if (!ptid_equal (wait_ptid, minus_one_ptid)
704 && !ptid_equal (inferior_ptid, wait_ptid))
705 {
706 /* We did. Switch back to WAIT_PTID thread, to tell the
707 target to follow it (in either direction). We'll
708 afterwards refuse to resume, and inform the user what
709 happened. */
710 switch_to_thread (wait_ptid);
711 should_resume = 0;
712 }
713 }
714
715 tp = inferior_thread ();
716
717 /* If there were any forks/vforks that were caught and are now to be
718 followed, then do so now. */
719 switch (tp->pending_follow.kind)
720 {
721 case TARGET_WAITKIND_FORKED:
722 case TARGET_WAITKIND_VFORKED:
723 {
724 ptid_t parent, child;
725
726 /* If the user did a next/step, etc, over a fork call,
727 preserve the stepping state in the fork child. */
728 if (follow_child && should_resume)
729 {
8358c15c
JK
730 step_resume_breakpoint = clone_momentary_breakpoint
731 (tp->control.step_resume_breakpoint);
16c381f0
JK
732 step_range_start = tp->control.step_range_start;
733 step_range_end = tp->control.step_range_end;
734 step_frame_id = tp->control.step_frame_id;
186c406b
TT
735 exception_resume_breakpoint
736 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
17b2616c 737 command_interp = tp->control.command_interp;
e58b0e63
PA
738
739 /* For now, delete the parent's sr breakpoint, otherwise,
740 parent/child sr breakpoints are considered duplicates,
741 and the child version will not be installed. Remove
742 this when the breakpoints module becomes aware of
743 inferiors and address spaces. */
744 delete_step_resume_breakpoint (tp);
16c381f0
JK
745 tp->control.step_range_start = 0;
746 tp->control.step_range_end = 0;
747 tp->control.step_frame_id = null_frame_id;
186c406b 748 delete_exception_resume_breakpoint (tp);
17b2616c 749 tp->control.command_interp = NULL;
e58b0e63
PA
750 }
751
752 parent = inferior_ptid;
753 child = tp->pending_follow.value.related_pid;
754
d83ad864
DB
755 /* Set up inferior(s) as specified by the caller, and tell the
756 target to do whatever is necessary to follow either parent
757 or child. */
758 if (follow_fork_inferior (follow_child, detach_fork))
e58b0e63
PA
759 {
760 /* Target refused to follow, or there's some other reason
761 we shouldn't resume. */
762 should_resume = 0;
763 }
764 else
765 {
766 /* This pending follow fork event is now handled, one way
767 or another. The previous selected thread may be gone
768 from the lists by now, but if it is still around, need
769 to clear the pending follow request. */
e09875d4 770 tp = find_thread_ptid (parent);
e58b0e63
PA
771 if (tp)
772 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
773
774 /* This makes sure we don't try to apply the "Switched
775 over from WAIT_PID" logic above. */
776 nullify_last_target_wait_ptid ();
777
1777feb0 778 /* If we followed the child, switch to it... */
e58b0e63
PA
779 if (follow_child)
780 {
781 switch_to_thread (child);
782
783 /* ... and preserve the stepping state, in case the
784 user was stepping over the fork call. */
785 if (should_resume)
786 {
787 tp = inferior_thread ();
8358c15c
JK
788 tp->control.step_resume_breakpoint
789 = step_resume_breakpoint;
16c381f0
JK
790 tp->control.step_range_start = step_range_start;
791 tp->control.step_range_end = step_range_end;
792 tp->control.step_frame_id = step_frame_id;
186c406b
TT
793 tp->control.exception_resume_breakpoint
794 = exception_resume_breakpoint;
17b2616c 795 tp->control.command_interp = command_interp;
e58b0e63
PA
796 }
797 else
798 {
799 /* If we get here, it was because we're trying to
800 resume from a fork catchpoint, but, the user
801 has switched threads away from the thread that
802 forked. In that case, the resume command
803 issued is most likely not applicable to the
804 child, so just warn, and refuse to resume. */
3e43a32a 805 warning (_("Not resuming: switched threads "
fd7dcb94 806 "before following fork child."));
e58b0e63
PA
807 }
808
809 /* Reset breakpoints in the child as appropriate. */
810 follow_inferior_reset_breakpoints ();
811 }
812 else
813 switch_to_thread (parent);
814 }
815 }
816 break;
817 case TARGET_WAITKIND_SPURIOUS:
818 /* Nothing to follow. */
819 break;
820 default:
821 internal_error (__FILE__, __LINE__,
822 "Unexpected pending_follow.kind %d\n",
823 tp->pending_follow.kind);
824 break;
825 }
c906108c 826
e58b0e63 827 return should_resume;
c906108c
SS
828}
829
d83ad864 830static void
6604731b 831follow_inferior_reset_breakpoints (void)
c906108c 832{
4e1c45ea
PA
833 struct thread_info *tp = inferior_thread ();
834
6604731b
DJ
835 /* Was there a step_resume breakpoint? (There was if the user
836 did a "next" at the fork() call.) If so, explicitly reset its
a1aa2221
LM
837 thread number. Cloned step_resume breakpoints are disabled on
838 creation, so enable it here now that it is associated with the
839 correct thread.
6604731b
DJ
840
841 step_resumes are a form of bp that are made to be per-thread.
842 Since we created the step_resume bp when the parent process
843 was being debugged, and now are switching to the child process,
844 from the breakpoint package's viewpoint, that's a switch of
845 "threads". We must update the bp's notion of which thread
846 it is for, or it'll be ignored when it triggers. */
847
8358c15c 848 if (tp->control.step_resume_breakpoint)
a1aa2221
LM
849 {
850 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
851 tp->control.step_resume_breakpoint->loc->enabled = 1;
852 }
6604731b 853
a1aa2221 854 /* Treat exception_resume breakpoints like step_resume breakpoints. */
186c406b 855 if (tp->control.exception_resume_breakpoint)
a1aa2221
LM
856 {
857 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
858 tp->control.exception_resume_breakpoint->loc->enabled = 1;
859 }
186c406b 860
6604731b
DJ
861 /* Reinsert all breakpoints in the child. The user may have set
862 breakpoints after catching the fork, in which case those
863 were never set in the child, but only in the parent. This makes
864 sure the inserted breakpoints match the breakpoint list. */
865
866 breakpoint_re_set ();
867 insert_breakpoints ();
c906108c 868}
c906108c 869
6c95b8df
PA
870/* The child has exited or execed: resume threads of the parent the
871 user wanted to be executing. */
872
873static int
874proceed_after_vfork_done (struct thread_info *thread,
875 void *arg)
876{
877 int pid = * (int *) arg;
878
879 if (ptid_get_pid (thread->ptid) == pid
880 && is_running (thread->ptid)
881 && !is_executing (thread->ptid)
882 && !thread->stop_requested
a493e3e2 883 && thread->suspend.stop_signal == GDB_SIGNAL_0)
6c95b8df
PA
884 {
885 if (debug_infrun)
886 fprintf_unfiltered (gdb_stdlog,
887 "infrun: resuming vfork parent thread %s\n",
888 target_pid_to_str (thread->ptid));
889
890 switch_to_thread (thread->ptid);
70509625 891 clear_proceed_status (0);
64ce06e4 892 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
6c95b8df
PA
893 }
894
895 return 0;
896}
897
898/* Called whenever we notice an exec or exit event, to handle
899 detaching or resuming a vfork parent. */
900
901static void
902handle_vfork_child_exec_or_exit (int exec)
903{
904 struct inferior *inf = current_inferior ();
905
906 if (inf->vfork_parent)
907 {
908 int resume_parent = -1;
909
910 /* This exec or exit marks the end of the shared memory region
911 between the parent and the child. If the user wanted to
912 detach from the parent, now is the time. */
913
914 if (inf->vfork_parent->pending_detach)
915 {
916 struct thread_info *tp;
917 struct cleanup *old_chain;
918 struct program_space *pspace;
919 struct address_space *aspace;
920
1777feb0 921 /* follow-fork child, detach-on-fork on. */
6c95b8df 922
68c9da30
PA
923 inf->vfork_parent->pending_detach = 0;
924
f50f4e56
PA
925 if (!exec)
926 {
927 /* If we're handling a child exit, then inferior_ptid
928 points at the inferior's pid, not to a thread. */
929 old_chain = save_inferior_ptid ();
930 save_current_program_space ();
931 save_current_inferior ();
932 }
933 else
934 old_chain = save_current_space_and_thread ();
6c95b8df
PA
935
936 /* We're letting loose of the parent. */
937 tp = any_live_thread_of_process (inf->vfork_parent->pid);
938 switch_to_thread (tp->ptid);
939
940 /* We're about to detach from the parent, which implicitly
941 removes breakpoints from its address space. There's a
942 catch here: we want to reuse the spaces for the child,
943 but, parent/child are still sharing the pspace at this
944 point, although the exec in reality makes the kernel give
945 the child a fresh set of new pages. The problem here is
946 that the breakpoints module being unaware of this, would
947 likely chose the child process to write to the parent
948 address space. Swapping the child temporarily away from
949 the spaces has the desired effect. Yes, this is "sort
950 of" a hack. */
951
952 pspace = inf->pspace;
953 aspace = inf->aspace;
954 inf->aspace = NULL;
955 inf->pspace = NULL;
956
957 if (debug_infrun || info_verbose)
958 {
6f259a23 959 target_terminal_ours_for_output ();
6c95b8df
PA
960
961 if (exec)
6f259a23
DB
962 {
963 fprintf_filtered (gdb_stdlog,
964 _("Detaching vfork parent process "
965 "%d after child exec.\n"),
966 inf->vfork_parent->pid);
967 }
6c95b8df 968 else
6f259a23
DB
969 {
970 fprintf_filtered (gdb_stdlog,
971 _("Detaching vfork parent process "
972 "%d after child exit.\n"),
973 inf->vfork_parent->pid);
974 }
6c95b8df
PA
975 }
976
977 target_detach (NULL, 0);
978
979 /* Put it back. */
980 inf->pspace = pspace;
981 inf->aspace = aspace;
982
983 do_cleanups (old_chain);
984 }
985 else if (exec)
986 {
987 /* We're staying attached to the parent, so, really give the
988 child a new address space. */
989 inf->pspace = add_program_space (maybe_new_address_space ());
990 inf->aspace = inf->pspace->aspace;
991 inf->removable = 1;
992 set_current_program_space (inf->pspace);
993
994 resume_parent = inf->vfork_parent->pid;
995
996 /* Break the bonds. */
997 inf->vfork_parent->vfork_child = NULL;
998 }
999 else
1000 {
1001 struct cleanup *old_chain;
1002 struct program_space *pspace;
1003
1004 /* If this is a vfork child exiting, then the pspace and
1005 aspaces were shared with the parent. Since we're
1006 reporting the process exit, we'll be mourning all that is
1007 found in the address space, and switching to null_ptid,
1008 preparing to start a new inferior. But, since we don't
1009 want to clobber the parent's address/program spaces, we
1010 go ahead and create a new one for this exiting
1011 inferior. */
1012
1013 /* Switch to null_ptid, so that clone_program_space doesn't want
1014 to read the selected frame of a dead process. */
1015 old_chain = save_inferior_ptid ();
1016 inferior_ptid = null_ptid;
1017
1018 /* This inferior is dead, so avoid giving the breakpoints
1019 module the option to write through to it (cloning a
1020 program space resets breakpoints). */
1021 inf->aspace = NULL;
1022 inf->pspace = NULL;
1023 pspace = add_program_space (maybe_new_address_space ());
1024 set_current_program_space (pspace);
1025 inf->removable = 1;
7dcd53a0 1026 inf->symfile_flags = SYMFILE_NO_READ;
6c95b8df
PA
1027 clone_program_space (pspace, inf->vfork_parent->pspace);
1028 inf->pspace = pspace;
1029 inf->aspace = pspace->aspace;
1030
1031 /* Put back inferior_ptid. We'll continue mourning this
1777feb0 1032 inferior. */
6c95b8df
PA
1033 do_cleanups (old_chain);
1034
1035 resume_parent = inf->vfork_parent->pid;
1036 /* Break the bonds. */
1037 inf->vfork_parent->vfork_child = NULL;
1038 }
1039
1040 inf->vfork_parent = NULL;
1041
1042 gdb_assert (current_program_space == inf->pspace);
1043
1044 if (non_stop && resume_parent != -1)
1045 {
1046 /* If the user wanted the parent to be running, let it go
1047 free now. */
1048 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
1049
1050 if (debug_infrun)
3e43a32a
MS
1051 fprintf_unfiltered (gdb_stdlog,
1052 "infrun: resuming vfork parent process %d\n",
6c95b8df
PA
1053 resume_parent);
1054
1055 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
1056
1057 do_cleanups (old_chain);
1058 }
1059 }
1060}
1061
eb6c553b 1062/* Enum strings for "set|show follow-exec-mode". */
6c95b8df
PA
1063
1064static const char follow_exec_mode_new[] = "new";
1065static const char follow_exec_mode_same[] = "same";
40478521 1066static const char *const follow_exec_mode_names[] =
6c95b8df
PA
1067{
1068 follow_exec_mode_new,
1069 follow_exec_mode_same,
1070 NULL,
1071};
1072
1073static const char *follow_exec_mode_string = follow_exec_mode_same;
1074static void
1075show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1076 struct cmd_list_element *c, const char *value)
1077{
1078 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1079}
1080
1777feb0 1081/* EXECD_PATHNAME is assumed to be non-NULL. */
1adeb98a 1082
c906108c 1083static void
95e50b27 1084follow_exec (ptid_t ptid, char *execd_pathname)
c906108c 1085{
95e50b27 1086 struct thread_info *th, *tmp;
6c95b8df 1087 struct inferior *inf = current_inferior ();
95e50b27 1088 int pid = ptid_get_pid (ptid);
7a292a7a 1089
c906108c
SS
1090 /* This is an exec event that we actually wish to pay attention to.
1091 Refresh our symbol table to the newly exec'd program, remove any
1092 momentary bp's, etc.
1093
1094 If there are breakpoints, they aren't really inserted now,
1095 since the exec() transformed our inferior into a fresh set
1096 of instructions.
1097
1098 We want to preserve symbolic breakpoints on the list, since
1099 we have hopes that they can be reset after the new a.out's
1100 symbol table is read.
1101
1102 However, any "raw" breakpoints must be removed from the list
1103 (e.g., the solib bp's), since their address is probably invalid
1104 now.
1105
1106 And, we DON'T want to call delete_breakpoints() here, since
1107 that may write the bp's "shadow contents" (the instruction
1108 value that was overwritten witha TRAP instruction). Since
1777feb0 1109 we now have a new a.out, those shadow contents aren't valid. */
6c95b8df
PA
1110
1111 mark_breakpoints_out ();
1112
95e50b27
PA
1113 /* The target reports the exec event to the main thread, even if
1114 some other thread does the exec, and even if the main thread was
1115 stopped or already gone. We may still have non-leader threads of
1116 the process on our list. E.g., on targets that don't have thread
1117 exit events (like remote); or on native Linux in non-stop mode if
1118 there were only two threads in the inferior and the non-leader
1119 one is the one that execs (and nothing forces an update of the
1120 thread list up to here). When debugging remotely, it's best to
1121 avoid extra traffic, when possible, so avoid syncing the thread
1122 list with the target, and instead go ahead and delete all threads
1123 of the process but one that reported the event. Note this must
1124 be done before calling update_breakpoints_after_exec, as
1125 otherwise clearing the threads' resources would reference stale
1126 thread breakpoints -- it may have been one of these threads that
1127 stepped across the exec. We could just clear their stepping
1128 states, but as long as we're iterating, might as well delete
1129 them. Deleting them now rather than at the next user-visible
1130 stop provides a nicer sequence of events for user and MI
1131 notifications. */
8a06aea7 1132 ALL_THREADS_SAFE (th, tmp)
95e50b27
PA
1133 if (ptid_get_pid (th->ptid) == pid && !ptid_equal (th->ptid, ptid))
1134 delete_thread (th->ptid);
1135
1136 /* We also need to clear any left over stale state for the
1137 leader/event thread. E.g., if there was any step-resume
1138 breakpoint or similar, it's gone now. We cannot truly
1139 step-to-next statement through an exec(). */
1140 th = inferior_thread ();
8358c15c 1141 th->control.step_resume_breakpoint = NULL;
186c406b 1142 th->control.exception_resume_breakpoint = NULL;
34b7e8a6 1143 th->control.single_step_breakpoints = NULL;
16c381f0
JK
1144 th->control.step_range_start = 0;
1145 th->control.step_range_end = 0;
c906108c 1146
95e50b27
PA
1147 /* The user may have had the main thread held stopped in the
1148 previous image (e.g., schedlock on, or non-stop). Release
1149 it now. */
a75724bc
PA
1150 th->stop_requested = 0;
1151
95e50b27
PA
1152 update_breakpoints_after_exec ();
1153
1777feb0 1154 /* What is this a.out's name? */
6c95b8df
PA
1155 printf_unfiltered (_("%s is executing new program: %s\n"),
1156 target_pid_to_str (inferior_ptid),
1157 execd_pathname);
c906108c
SS
1158
1159 /* We've followed the inferior through an exec. Therefore, the
1777feb0 1160 inferior has essentially been killed & reborn. */
7a292a7a 1161
c906108c 1162 gdb_flush (gdb_stdout);
6ca15a4b
PA
1163
1164 breakpoint_init_inferior (inf_execd);
e85a822c 1165
a3be80c3 1166 if (*gdb_sysroot != '\0')
e85a822c 1167 {
998d2a3e 1168 char *name = exec_file_find (execd_pathname, NULL);
ff862be4
GB
1169
1170 execd_pathname = alloca (strlen (name) + 1);
1171 strcpy (execd_pathname, name);
1172 xfree (name);
e85a822c 1173 }
c906108c 1174
cce9b6bf
PA
1175 /* Reset the shared library package. This ensures that we get a
1176 shlib event when the child reaches "_start", at which point the
1177 dld will have had a chance to initialize the child. */
1178 /* Also, loading a symbol file below may trigger symbol lookups, and
1179 we don't want those to be satisfied by the libraries of the
1180 previous incarnation of this process. */
1181 no_shared_libraries (NULL, 0);
1182
6c95b8df
PA
1183 if (follow_exec_mode_string == follow_exec_mode_new)
1184 {
1185 struct program_space *pspace;
6c95b8df
PA
1186
1187 /* The user wants to keep the old inferior and program spaces
1188 around. Create a new fresh one, and switch to it. */
1189
1190 inf = add_inferior (current_inferior ()->pid);
1191 pspace = add_program_space (maybe_new_address_space ());
1192 inf->pspace = pspace;
1193 inf->aspace = pspace->aspace;
1194
1195 exit_inferior_num_silent (current_inferior ()->num);
1196
1197 set_current_inferior (inf);
1198 set_current_program_space (pspace);
1199 }
9107fc8d
PA
1200 else
1201 {
1202 /* The old description may no longer be fit for the new image.
1203 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1204 old description; we'll read a new one below. No need to do
1205 this on "follow-exec-mode new", as the old inferior stays
1206 around (its description is later cleared/refetched on
1207 restart). */
1208 target_clear_description ();
1209 }
6c95b8df
PA
1210
1211 gdb_assert (current_program_space == inf->pspace);
1212
1777feb0 1213 /* That a.out is now the one to use. */
6c95b8df
PA
1214 exec_file_attach (execd_pathname, 0);
1215
c1e56572
JK
1216 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
1217 (Position Independent Executable) main symbol file will get applied by
1218 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
1219 the breakpoints with the zero displacement. */
1220
7dcd53a0
TT
1221 symbol_file_add (execd_pathname,
1222 (inf->symfile_flags
1223 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
c1e56572
JK
1224 NULL, 0);
1225
7dcd53a0
TT
1226 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
1227 set_initial_language ();
c906108c 1228
9107fc8d
PA
1229 /* If the target can specify a description, read it. Must do this
1230 after flipping to the new executable (because the target supplied
1231 description must be compatible with the executable's
1232 architecture, and the old executable may e.g., be 32-bit, while
1233 the new one 64-bit), and before anything involving memory or
1234 registers. */
1235 target_find_description ();
1236
268a4a75 1237 solib_create_inferior_hook (0);
c906108c 1238
4efc6507
DE
1239 jit_inferior_created_hook ();
1240
c1e56572
JK
1241 breakpoint_re_set ();
1242
c906108c
SS
1243 /* Reinsert all breakpoints. (Those which were symbolic have
1244 been reset to the proper address in the new a.out, thanks
1777feb0 1245 to symbol_file_command...). */
c906108c
SS
1246 insert_breakpoints ();
1247
1248 /* The next resume of this inferior should bring it to the shlib
1249 startup breakpoints. (If the user had also set bp's on
1250 "main" from the old (parent) process, then they'll auto-
1777feb0 1251 matically get reset there in the new process.). */
c906108c
SS
1252}
1253
c2829269
PA
1254/* The queue of threads that need to do a step-over operation to get
1255 past e.g., a breakpoint. What technique is used to step over the
1256 breakpoint/watchpoint does not matter -- all threads end up in the
1257 same queue, to maintain rough temporal order of execution, in order
1258 to avoid starvation, otherwise, we could e.g., find ourselves
1259 constantly stepping the same couple threads past their breakpoints
1260 over and over, if the single-step finish fast enough. */
1261struct thread_info *step_over_queue_head;
1262
6c4cfb24
PA
1263/* Bit flags indicating what the thread needs to step over. */
1264
1265enum step_over_what
1266 {
1267 /* Step over a breakpoint. */
1268 STEP_OVER_BREAKPOINT = 1,
1269
1270 /* Step past a non-continuable watchpoint, in order to let the
1271 instruction execute so we can evaluate the watchpoint
1272 expression. */
1273 STEP_OVER_WATCHPOINT = 2
1274 };
1275
963f9c80 1276/* Info about an instruction that is being stepped over. */
31e77af2
PA
1277
1278struct step_over_info
1279{
963f9c80
PA
1280 /* If we're stepping past a breakpoint, this is the address space
1281 and address of the instruction the breakpoint is set at. We'll
1282 skip inserting all breakpoints here. Valid iff ASPACE is
1283 non-NULL. */
31e77af2 1284 struct address_space *aspace;
31e77af2 1285 CORE_ADDR address;
963f9c80
PA
1286
1287 /* The instruction being stepped over triggers a nonsteppable
1288 watchpoint. If true, we'll skip inserting watchpoints. */
1289 int nonsteppable_watchpoint_p;
31e77af2
PA
1290};
1291
1292/* The step-over info of the location that is being stepped over.
1293
1294 Note that with async/breakpoint always-inserted mode, a user might
1295 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1296 being stepped over. As setting a new breakpoint inserts all
1297 breakpoints, we need to make sure the breakpoint being stepped over
1298 isn't inserted then. We do that by only clearing the step-over
1299 info when the step-over is actually finished (or aborted).
1300
1301 Presently GDB can only step over one breakpoint at any given time.
1302 Given threads that can't run code in the same address space as the
1303 breakpoint's can't really miss the breakpoint, GDB could be taught
1304 to step-over at most one breakpoint per address space (so this info
1305 could move to the address space object if/when GDB is extended).
1306 The set of breakpoints being stepped over will normally be much
1307 smaller than the set of all breakpoints, so a flag in the
1308 breakpoint location structure would be wasteful. A separate list
1309 also saves complexity and run-time, as otherwise we'd have to go
1310 through all breakpoint locations clearing their flag whenever we
1311 start a new sequence. Similar considerations weigh against storing
1312 this info in the thread object. Plus, not all step overs actually
1313 have breakpoint locations -- e.g., stepping past a single-step
1314 breakpoint, or stepping to complete a non-continuable
1315 watchpoint. */
1316static struct step_over_info step_over_info;
1317
1318/* Record the address of the breakpoint/instruction we're currently
1319 stepping over. */
1320
1321static void
963f9c80
PA
1322set_step_over_info (struct address_space *aspace, CORE_ADDR address,
1323 int nonsteppable_watchpoint_p)
31e77af2
PA
1324{
1325 step_over_info.aspace = aspace;
1326 step_over_info.address = address;
963f9c80 1327 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
31e77af2
PA
1328}
1329
1330/* Called when we're not longer stepping over a breakpoint / an
1331 instruction, so all breakpoints are free to be (re)inserted. */
1332
1333static void
1334clear_step_over_info (void)
1335{
372316f1
PA
1336 if (debug_infrun)
1337 fprintf_unfiltered (gdb_stdlog,
1338 "infrun: clear_step_over_info\n");
31e77af2
PA
1339 step_over_info.aspace = NULL;
1340 step_over_info.address = 0;
963f9c80 1341 step_over_info.nonsteppable_watchpoint_p = 0;
31e77af2
PA
1342}
1343
7f89fd65 1344/* See infrun.h. */
31e77af2
PA
1345
1346int
1347stepping_past_instruction_at (struct address_space *aspace,
1348 CORE_ADDR address)
1349{
1350 return (step_over_info.aspace != NULL
1351 && breakpoint_address_match (aspace, address,
1352 step_over_info.aspace,
1353 step_over_info.address));
1354}
1355
963f9c80
PA
1356/* See infrun.h. */
1357
1358int
1359stepping_past_nonsteppable_watchpoint (void)
1360{
1361 return step_over_info.nonsteppable_watchpoint_p;
1362}
1363
6cc83d2a
PA
1364/* Returns true if step-over info is valid. */
1365
1366static int
1367step_over_info_valid_p (void)
1368{
963f9c80
PA
1369 return (step_over_info.aspace != NULL
1370 || stepping_past_nonsteppable_watchpoint ());
6cc83d2a
PA
1371}
1372
c906108c 1373\f
237fc4c9
PA
1374/* Displaced stepping. */
1375
1376/* In non-stop debugging mode, we must take special care to manage
1377 breakpoints properly; in particular, the traditional strategy for
1378 stepping a thread past a breakpoint it has hit is unsuitable.
1379 'Displaced stepping' is a tactic for stepping one thread past a
1380 breakpoint it has hit while ensuring that other threads running
1381 concurrently will hit the breakpoint as they should.
1382
1383 The traditional way to step a thread T off a breakpoint in a
1384 multi-threaded program in all-stop mode is as follows:
1385
1386 a0) Initially, all threads are stopped, and breakpoints are not
1387 inserted.
1388 a1) We single-step T, leaving breakpoints uninserted.
1389 a2) We insert breakpoints, and resume all threads.
1390
1391 In non-stop debugging, however, this strategy is unsuitable: we
1392 don't want to have to stop all threads in the system in order to
1393 continue or step T past a breakpoint. Instead, we use displaced
1394 stepping:
1395
1396 n0) Initially, T is stopped, other threads are running, and
1397 breakpoints are inserted.
1398 n1) We copy the instruction "under" the breakpoint to a separate
1399 location, outside the main code stream, making any adjustments
1400 to the instruction, register, and memory state as directed by
1401 T's architecture.
1402 n2) We single-step T over the instruction at its new location.
1403 n3) We adjust the resulting register and memory state as directed
1404 by T's architecture. This includes resetting T's PC to point
1405 back into the main instruction stream.
1406 n4) We resume T.
1407
1408 This approach depends on the following gdbarch methods:
1409
1410 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1411 indicate where to copy the instruction, and how much space must
1412 be reserved there. We use these in step n1.
1413
1414 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1415 address, and makes any necessary adjustments to the instruction,
1416 register contents, and memory. We use this in step n1.
1417
1418 - gdbarch_displaced_step_fixup adjusts registers and memory after
1419 we have successfuly single-stepped the instruction, to yield the
1420 same effect the instruction would have had if we had executed it
1421 at its original address. We use this in step n3.
1422
1423 - gdbarch_displaced_step_free_closure provides cleanup.
1424
1425 The gdbarch_displaced_step_copy_insn and
1426 gdbarch_displaced_step_fixup functions must be written so that
1427 copying an instruction with gdbarch_displaced_step_copy_insn,
1428 single-stepping across the copied instruction, and then applying
1429 gdbarch_displaced_insn_fixup should have the same effects on the
1430 thread's memory and registers as stepping the instruction in place
1431 would have. Exactly which responsibilities fall to the copy and
1432 which fall to the fixup is up to the author of those functions.
1433
1434 See the comments in gdbarch.sh for details.
1435
1436 Note that displaced stepping and software single-step cannot
1437 currently be used in combination, although with some care I think
1438 they could be made to. Software single-step works by placing
1439 breakpoints on all possible subsequent instructions; if the
1440 displaced instruction is a PC-relative jump, those breakpoints
1441 could fall in very strange places --- on pages that aren't
1442 executable, or at addresses that are not proper instruction
1443 boundaries. (We do generally let other threads run while we wait
1444 to hit the software single-step breakpoint, and they might
1445 encounter such a corrupted instruction.) One way to work around
1446 this would be to have gdbarch_displaced_step_copy_insn fully
1447 simulate the effect of PC-relative instructions (and return NULL)
1448 on architectures that use software single-stepping.
1449
1450 In non-stop mode, we can have independent and simultaneous step
1451 requests, so more than one thread may need to simultaneously step
1452 over a breakpoint. The current implementation assumes there is
1453 only one scratch space per process. In this case, we have to
1454 serialize access to the scratch space. If thread A wants to step
1455 over a breakpoint, but we are currently waiting for some other
1456 thread to complete a displaced step, we leave thread A stopped and
1457 place it in the displaced_step_request_queue. Whenever a displaced
1458 step finishes, we pick the next thread in the queue and start a new
1459 displaced step operation on it. See displaced_step_prepare and
1460 displaced_step_fixup for details. */
1461
fc1cf338
PA
1462/* Per-inferior displaced stepping state. */
1463struct displaced_step_inferior_state
1464{
1465 /* Pointer to next in linked list. */
1466 struct displaced_step_inferior_state *next;
1467
1468 /* The process this displaced step state refers to. */
1469 int pid;
1470
3fc8eb30
PA
1471 /* True if preparing a displaced step ever failed. If so, we won't
1472 try displaced stepping for this inferior again. */
1473 int failed_before;
1474
fc1cf338
PA
1475 /* If this is not null_ptid, this is the thread carrying out a
1476 displaced single-step in process PID. This thread's state will
1477 require fixing up once it has completed its step. */
1478 ptid_t step_ptid;
1479
1480 /* The architecture the thread had when we stepped it. */
1481 struct gdbarch *step_gdbarch;
1482
1483 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1484 for post-step cleanup. */
1485 struct displaced_step_closure *step_closure;
1486
1487 /* The address of the original instruction, and the copy we
1488 made. */
1489 CORE_ADDR step_original, step_copy;
1490
1491 /* Saved contents of copy area. */
1492 gdb_byte *step_saved_copy;
1493};
1494
1495/* The list of states of processes involved in displaced stepping
1496 presently. */
1497static struct displaced_step_inferior_state *displaced_step_inferior_states;
1498
1499/* Get the displaced stepping state of process PID. */
1500
1501static struct displaced_step_inferior_state *
1502get_displaced_stepping_state (int pid)
1503{
1504 struct displaced_step_inferior_state *state;
1505
1506 for (state = displaced_step_inferior_states;
1507 state != NULL;
1508 state = state->next)
1509 if (state->pid == pid)
1510 return state;
1511
1512 return NULL;
1513}
1514
372316f1
PA
1515/* Returns true if any inferior has a thread doing a displaced
1516 step. */
1517
1518static int
1519displaced_step_in_progress_any_inferior (void)
1520{
1521 struct displaced_step_inferior_state *state;
1522
1523 for (state = displaced_step_inferior_states;
1524 state != NULL;
1525 state = state->next)
1526 if (!ptid_equal (state->step_ptid, null_ptid))
1527 return 1;
1528
1529 return 0;
1530}
1531
8f572e5c
PA
1532/* Return true if process PID has a thread doing a displaced step. */
1533
1534static int
1535displaced_step_in_progress (int pid)
1536{
1537 struct displaced_step_inferior_state *displaced;
1538
1539 displaced = get_displaced_stepping_state (pid);
1540 if (displaced != NULL && !ptid_equal (displaced->step_ptid, null_ptid))
1541 return 1;
1542
1543 return 0;
1544}
1545
fc1cf338
PA
1546/* Add a new displaced stepping state for process PID to the displaced
1547 stepping state list, or return a pointer to an already existing
1548 entry, if it already exists. Never returns NULL. */
1549
1550static struct displaced_step_inferior_state *
1551add_displaced_stepping_state (int pid)
1552{
1553 struct displaced_step_inferior_state *state;
1554
1555 for (state = displaced_step_inferior_states;
1556 state != NULL;
1557 state = state->next)
1558 if (state->pid == pid)
1559 return state;
237fc4c9 1560
fc1cf338
PA
1561 state = xcalloc (1, sizeof (*state));
1562 state->pid = pid;
1563 state->next = displaced_step_inferior_states;
1564 displaced_step_inferior_states = state;
237fc4c9 1565
fc1cf338
PA
1566 return state;
1567}
1568
a42244db
YQ
1569/* If inferior is in displaced stepping, and ADDR equals to starting address
1570 of copy area, return corresponding displaced_step_closure. Otherwise,
1571 return NULL. */
1572
1573struct displaced_step_closure*
1574get_displaced_step_closure_by_addr (CORE_ADDR addr)
1575{
1576 struct displaced_step_inferior_state *displaced
1577 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1578
1579 /* If checking the mode of displaced instruction in copy area. */
1580 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1581 && (displaced->step_copy == addr))
1582 return displaced->step_closure;
1583
1584 return NULL;
1585}
1586
fc1cf338 1587/* Remove the displaced stepping state of process PID. */
237fc4c9 1588
fc1cf338
PA
1589static void
1590remove_displaced_stepping_state (int pid)
1591{
1592 struct displaced_step_inferior_state *it, **prev_next_p;
237fc4c9 1593
fc1cf338
PA
1594 gdb_assert (pid != 0);
1595
1596 it = displaced_step_inferior_states;
1597 prev_next_p = &displaced_step_inferior_states;
1598 while (it)
1599 {
1600 if (it->pid == pid)
1601 {
1602 *prev_next_p = it->next;
1603 xfree (it);
1604 return;
1605 }
1606
1607 prev_next_p = &it->next;
1608 it = *prev_next_p;
1609 }
1610}
1611
1612static void
1613infrun_inferior_exit (struct inferior *inf)
1614{
1615 remove_displaced_stepping_state (inf->pid);
1616}
237fc4c9 1617
fff08868
HZ
1618/* If ON, and the architecture supports it, GDB will use displaced
1619 stepping to step over breakpoints. If OFF, or if the architecture
1620 doesn't support it, GDB will instead use the traditional
1621 hold-and-step approach. If AUTO (which is the default), GDB will
1622 decide which technique to use to step over breakpoints depending on
1623 which of all-stop or non-stop mode is active --- displaced stepping
1624 in non-stop mode; hold-and-step in all-stop mode. */
1625
72d0e2c5 1626static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
fff08868 1627
237fc4c9
PA
1628static void
1629show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1630 struct cmd_list_element *c,
1631 const char *value)
1632{
72d0e2c5 1633 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
3e43a32a
MS
1634 fprintf_filtered (file,
1635 _("Debugger's willingness to use displaced stepping "
1636 "to step over breakpoints is %s (currently %s).\n"),
fbea99ea 1637 value, target_is_non_stop_p () ? "on" : "off");
fff08868 1638 else
3e43a32a
MS
1639 fprintf_filtered (file,
1640 _("Debugger's willingness to use displaced stepping "
1641 "to step over breakpoints is %s.\n"), value);
237fc4c9
PA
1642}
1643
fff08868 1644/* Return non-zero if displaced stepping can/should be used to step
3fc8eb30 1645 over breakpoints of thread TP. */
fff08868 1646
237fc4c9 1647static int
3fc8eb30 1648use_displaced_stepping (struct thread_info *tp)
237fc4c9 1649{
3fc8eb30
PA
1650 struct regcache *regcache = get_thread_regcache (tp->ptid);
1651 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1652 struct displaced_step_inferior_state *displaced_state;
1653
1654 displaced_state = get_displaced_stepping_state (ptid_get_pid (tp->ptid));
1655
fbea99ea
PA
1656 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1657 && target_is_non_stop_p ())
72d0e2c5 1658 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
96429cc8 1659 && gdbarch_displaced_step_copy_insn_p (gdbarch)
3fc8eb30
PA
1660 && find_record_target () == NULL
1661 && (displaced_state == NULL
1662 || !displaced_state->failed_before));
237fc4c9
PA
1663}
1664
1665/* Clean out any stray displaced stepping state. */
1666static void
fc1cf338 1667displaced_step_clear (struct displaced_step_inferior_state *displaced)
237fc4c9
PA
1668{
1669 /* Indicate that there is no cleanup pending. */
fc1cf338 1670 displaced->step_ptid = null_ptid;
237fc4c9 1671
fc1cf338 1672 if (displaced->step_closure)
237fc4c9 1673 {
fc1cf338
PA
1674 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1675 displaced->step_closure);
1676 displaced->step_closure = NULL;
237fc4c9
PA
1677 }
1678}
1679
1680static void
fc1cf338 1681displaced_step_clear_cleanup (void *arg)
237fc4c9 1682{
fc1cf338
PA
1683 struct displaced_step_inferior_state *state = arg;
1684
1685 displaced_step_clear (state);
237fc4c9
PA
1686}
1687
1688/* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1689void
1690displaced_step_dump_bytes (struct ui_file *file,
1691 const gdb_byte *buf,
1692 size_t len)
1693{
1694 int i;
1695
1696 for (i = 0; i < len; i++)
1697 fprintf_unfiltered (file, "%02x ", buf[i]);
1698 fputs_unfiltered ("\n", file);
1699}
1700
1701/* Prepare to single-step, using displaced stepping.
1702
1703 Note that we cannot use displaced stepping when we have a signal to
1704 deliver. If we have a signal to deliver and an instruction to step
1705 over, then after the step, there will be no indication from the
1706 target whether the thread entered a signal handler or ignored the
1707 signal and stepped over the instruction successfully --- both cases
1708 result in a simple SIGTRAP. In the first case we mustn't do a
1709 fixup, and in the second case we must --- but we can't tell which.
1710 Comments in the code for 'random signals' in handle_inferior_event
1711 explain how we handle this case instead.
1712
1713 Returns 1 if preparing was successful -- this thread is going to be
7f03bd92
PA
1714 stepped now; 0 if displaced stepping this thread got queued; or -1
1715 if this instruction can't be displaced stepped. */
1716
237fc4c9 1717static int
3fc8eb30 1718displaced_step_prepare_throw (ptid_t ptid)
237fc4c9 1719{
ad53cd71 1720 struct cleanup *old_cleanups, *ignore_cleanups;
c1e36e3e 1721 struct thread_info *tp = find_thread_ptid (ptid);
237fc4c9
PA
1722 struct regcache *regcache = get_thread_regcache (ptid);
1723 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1724 CORE_ADDR original, copy;
1725 ULONGEST len;
1726 struct displaced_step_closure *closure;
fc1cf338 1727 struct displaced_step_inferior_state *displaced;
9e529e1d 1728 int status;
237fc4c9
PA
1729
1730 /* We should never reach this function if the architecture does not
1731 support displaced stepping. */
1732 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1733
c2829269
PA
1734 /* Nor if the thread isn't meant to step over a breakpoint. */
1735 gdb_assert (tp->control.trap_expected);
1736
c1e36e3e
PA
1737 /* Disable range stepping while executing in the scratch pad. We
1738 want a single-step even if executing the displaced instruction in
1739 the scratch buffer lands within the stepping range (e.g., a
1740 jump/branch). */
1741 tp->control.may_range_step = 0;
1742
fc1cf338
PA
1743 /* We have to displaced step one thread at a time, as we only have
1744 access to a single scratch space per inferior. */
237fc4c9 1745
fc1cf338
PA
1746 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1747
1748 if (!ptid_equal (displaced->step_ptid, null_ptid))
237fc4c9
PA
1749 {
1750 /* Already waiting for a displaced step to finish. Defer this
1751 request and place in queue. */
237fc4c9
PA
1752
1753 if (debug_displaced)
1754 fprintf_unfiltered (gdb_stdlog,
c2829269 1755 "displaced: deferring step of %s\n",
237fc4c9
PA
1756 target_pid_to_str (ptid));
1757
c2829269 1758 thread_step_over_chain_enqueue (tp);
237fc4c9
PA
1759 return 0;
1760 }
1761 else
1762 {
1763 if (debug_displaced)
1764 fprintf_unfiltered (gdb_stdlog,
1765 "displaced: stepping %s now\n",
1766 target_pid_to_str (ptid));
1767 }
1768
fc1cf338 1769 displaced_step_clear (displaced);
237fc4c9 1770
ad53cd71
PA
1771 old_cleanups = save_inferior_ptid ();
1772 inferior_ptid = ptid;
1773
515630c5 1774 original = regcache_read_pc (regcache);
237fc4c9
PA
1775
1776 copy = gdbarch_displaced_step_location (gdbarch);
1777 len = gdbarch_max_insn_length (gdbarch);
1778
1779 /* Save the original contents of the copy area. */
fc1cf338 1780 displaced->step_saved_copy = xmalloc (len);
ad53cd71 1781 ignore_cleanups = make_cleanup (free_current_contents,
fc1cf338 1782 &displaced->step_saved_copy);
9e529e1d
JK
1783 status = target_read_memory (copy, displaced->step_saved_copy, len);
1784 if (status != 0)
1785 throw_error (MEMORY_ERROR,
1786 _("Error accessing memory address %s (%s) for "
1787 "displaced-stepping scratch space."),
1788 paddress (gdbarch, copy), safe_strerror (status));
237fc4c9
PA
1789 if (debug_displaced)
1790 {
5af949e3
UW
1791 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1792 paddress (gdbarch, copy));
fc1cf338
PA
1793 displaced_step_dump_bytes (gdb_stdlog,
1794 displaced->step_saved_copy,
1795 len);
237fc4c9
PA
1796 };
1797
1798 closure = gdbarch_displaced_step_copy_insn (gdbarch,
ad53cd71 1799 original, copy, regcache);
7f03bd92
PA
1800 if (closure == NULL)
1801 {
1802 /* The architecture doesn't know how or want to displaced step
1803 this instruction or instruction sequence. Fallback to
1804 stepping over the breakpoint in-line. */
1805 do_cleanups (old_cleanups);
1806 return -1;
1807 }
237fc4c9 1808
9f5a595d
UW
1809 /* Save the information we need to fix things up if the step
1810 succeeds. */
fc1cf338
PA
1811 displaced->step_ptid = ptid;
1812 displaced->step_gdbarch = gdbarch;
1813 displaced->step_closure = closure;
1814 displaced->step_original = original;
1815 displaced->step_copy = copy;
9f5a595d 1816
fc1cf338 1817 make_cleanup (displaced_step_clear_cleanup, displaced);
237fc4c9
PA
1818
1819 /* Resume execution at the copy. */
515630c5 1820 regcache_write_pc (regcache, copy);
237fc4c9 1821
ad53cd71
PA
1822 discard_cleanups (ignore_cleanups);
1823
1824 do_cleanups (old_cleanups);
237fc4c9
PA
1825
1826 if (debug_displaced)
5af949e3
UW
1827 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1828 paddress (gdbarch, copy));
237fc4c9 1829
237fc4c9
PA
1830 return 1;
1831}
1832
3fc8eb30
PA
1833/* Wrapper for displaced_step_prepare_throw that disabled further
1834 attempts at displaced stepping if we get a memory error. */
1835
1836static int
1837displaced_step_prepare (ptid_t ptid)
1838{
1839 int prepared = -1;
1840
1841 TRY
1842 {
1843 prepared = displaced_step_prepare_throw (ptid);
1844 }
1845 CATCH (ex, RETURN_MASK_ERROR)
1846 {
1847 struct displaced_step_inferior_state *displaced_state;
1848
1849 if (ex.error != MEMORY_ERROR)
1850 throw_exception (ex);
1851
1852 if (debug_infrun)
1853 {
1854 fprintf_unfiltered (gdb_stdlog,
1855 "infrun: disabling displaced stepping: %s\n",
1856 ex.message);
1857 }
1858
1859 /* Be verbose if "set displaced-stepping" is "on", silent if
1860 "auto". */
1861 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1862 {
fd7dcb94 1863 warning (_("disabling displaced stepping: %s"),
3fc8eb30
PA
1864 ex.message);
1865 }
1866
1867 /* Disable further displaced stepping attempts. */
1868 displaced_state
1869 = get_displaced_stepping_state (ptid_get_pid (ptid));
1870 displaced_state->failed_before = 1;
1871 }
1872 END_CATCH
1873
1874 return prepared;
1875}
1876
237fc4c9 1877static void
3e43a32a
MS
1878write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1879 const gdb_byte *myaddr, int len)
237fc4c9
PA
1880{
1881 struct cleanup *ptid_cleanup = save_inferior_ptid ();
abbb1732 1882
237fc4c9
PA
1883 inferior_ptid = ptid;
1884 write_memory (memaddr, myaddr, len);
1885 do_cleanups (ptid_cleanup);
1886}
1887
e2d96639
YQ
1888/* Restore the contents of the copy area for thread PTID. */
1889
1890static void
1891displaced_step_restore (struct displaced_step_inferior_state *displaced,
1892 ptid_t ptid)
1893{
1894 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1895
1896 write_memory_ptid (ptid, displaced->step_copy,
1897 displaced->step_saved_copy, len);
1898 if (debug_displaced)
1899 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1900 target_pid_to_str (ptid),
1901 paddress (displaced->step_gdbarch,
1902 displaced->step_copy));
1903}
1904
372316f1
PA
1905/* If we displaced stepped an instruction successfully, adjust
1906 registers and memory to yield the same effect the instruction would
1907 have had if we had executed it at its original address, and return
1908 1. If the instruction didn't complete, relocate the PC and return
1909 -1. If the thread wasn't displaced stepping, return 0. */
1910
1911static int
2ea28649 1912displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
237fc4c9
PA
1913{
1914 struct cleanup *old_cleanups;
fc1cf338
PA
1915 struct displaced_step_inferior_state *displaced
1916 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
372316f1 1917 int ret;
fc1cf338
PA
1918
1919 /* Was any thread of this process doing a displaced step? */
1920 if (displaced == NULL)
372316f1 1921 return 0;
237fc4c9
PA
1922
1923 /* Was this event for the pid we displaced? */
fc1cf338
PA
1924 if (ptid_equal (displaced->step_ptid, null_ptid)
1925 || ! ptid_equal (displaced->step_ptid, event_ptid))
372316f1 1926 return 0;
237fc4c9 1927
fc1cf338 1928 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
237fc4c9 1929
e2d96639 1930 displaced_step_restore (displaced, displaced->step_ptid);
237fc4c9 1931
cb71640d
PA
1932 /* Fixup may need to read memory/registers. Switch to the thread
1933 that we're fixing up. Also, target_stopped_by_watchpoint checks
1934 the current thread. */
1935 switch_to_thread (event_ptid);
1936
237fc4c9 1937 /* Did the instruction complete successfully? */
cb71640d
PA
1938 if (signal == GDB_SIGNAL_TRAP
1939 && !(target_stopped_by_watchpoint ()
1940 && (gdbarch_have_nonsteppable_watchpoint (displaced->step_gdbarch)
1941 || target_have_steppable_watchpoint)))
237fc4c9
PA
1942 {
1943 /* Fix up the resulting state. */
fc1cf338
PA
1944 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1945 displaced->step_closure,
1946 displaced->step_original,
1947 displaced->step_copy,
1948 get_thread_regcache (displaced->step_ptid));
372316f1 1949 ret = 1;
237fc4c9
PA
1950 }
1951 else
1952 {
1953 /* Since the instruction didn't complete, all we can do is
1954 relocate the PC. */
515630c5
UW
1955 struct regcache *regcache = get_thread_regcache (event_ptid);
1956 CORE_ADDR pc = regcache_read_pc (regcache);
abbb1732 1957
fc1cf338 1958 pc = displaced->step_original + (pc - displaced->step_copy);
515630c5 1959 regcache_write_pc (regcache, pc);
372316f1 1960 ret = -1;
237fc4c9
PA
1961 }
1962
1963 do_cleanups (old_cleanups);
1964
fc1cf338 1965 displaced->step_ptid = null_ptid;
372316f1
PA
1966
1967 return ret;
c2829269 1968}
1c5cfe86 1969
4d9d9d04
PA
1970/* Data to be passed around while handling an event. This data is
1971 discarded between events. */
1972struct execution_control_state
1973{
1974 ptid_t ptid;
1975 /* The thread that got the event, if this was a thread event; NULL
1976 otherwise. */
1977 struct thread_info *event_thread;
1978
1979 struct target_waitstatus ws;
1980 int stop_func_filled_in;
1981 CORE_ADDR stop_func_start;
1982 CORE_ADDR stop_func_end;
1983 const char *stop_func_name;
1984 int wait_some_more;
1985
1986 /* True if the event thread hit the single-step breakpoint of
1987 another thread. Thus the event doesn't cause a stop, the thread
1988 needs to be single-stepped past the single-step breakpoint before
1989 we can switch back to the original stepping thread. */
1990 int hit_singlestep_breakpoint;
1991};
1992
1993/* Clear ECS and set it to point at TP. */
c2829269
PA
1994
1995static void
4d9d9d04
PA
1996reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1997{
1998 memset (ecs, 0, sizeof (*ecs));
1999 ecs->event_thread = tp;
2000 ecs->ptid = tp->ptid;
2001}
2002
2003static void keep_going_pass_signal (struct execution_control_state *ecs);
2004static void prepare_to_wait (struct execution_control_state *ecs);
2ac7589c 2005static int keep_going_stepped_thread (struct thread_info *tp);
4d9d9d04 2006static int thread_still_needs_step_over (struct thread_info *tp);
3fc8eb30 2007static void stop_all_threads (void);
4d9d9d04
PA
2008
2009/* Are there any pending step-over requests? If so, run all we can
2010 now and return true. Otherwise, return false. */
2011
2012static int
c2829269
PA
2013start_step_over (void)
2014{
2015 struct thread_info *tp, *next;
2016
372316f1
PA
2017 /* Don't start a new step-over if we already have an in-line
2018 step-over operation ongoing. */
2019 if (step_over_info_valid_p ())
2020 return 0;
2021
c2829269 2022 for (tp = step_over_queue_head; tp != NULL; tp = next)
237fc4c9 2023 {
4d9d9d04
PA
2024 struct execution_control_state ecss;
2025 struct execution_control_state *ecs = &ecss;
372316f1
PA
2026 enum step_over_what step_what;
2027 int must_be_in_line;
c2829269
PA
2028
2029 next = thread_step_over_chain_next (tp);
237fc4c9 2030
c2829269
PA
2031 /* If this inferior already has a displaced step in process,
2032 don't start a new one. */
4d9d9d04 2033 if (displaced_step_in_progress (ptid_get_pid (tp->ptid)))
c2829269
PA
2034 continue;
2035
372316f1
PA
2036 step_what = thread_still_needs_step_over (tp);
2037 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
2038 || ((step_what & STEP_OVER_BREAKPOINT)
3fc8eb30 2039 && !use_displaced_stepping (tp)));
372316f1
PA
2040
2041 /* We currently stop all threads of all processes to step-over
2042 in-line. If we need to start a new in-line step-over, let
2043 any pending displaced steps finish first. */
2044 if (must_be_in_line && displaced_step_in_progress_any_inferior ())
2045 return 0;
2046
c2829269
PA
2047 thread_step_over_chain_remove (tp);
2048
2049 if (step_over_queue_head == NULL)
2050 {
2051 if (debug_infrun)
2052 fprintf_unfiltered (gdb_stdlog,
2053 "infrun: step-over queue now empty\n");
2054 }
2055
372316f1
PA
2056 if (tp->control.trap_expected
2057 || tp->resumed
2058 || tp->executing)
ad53cd71 2059 {
4d9d9d04
PA
2060 internal_error (__FILE__, __LINE__,
2061 "[%s] has inconsistent state: "
372316f1 2062 "trap_expected=%d, resumed=%d, executing=%d\n",
4d9d9d04
PA
2063 target_pid_to_str (tp->ptid),
2064 tp->control.trap_expected,
372316f1 2065 tp->resumed,
4d9d9d04 2066 tp->executing);
ad53cd71 2067 }
1c5cfe86 2068
4d9d9d04
PA
2069 if (debug_infrun)
2070 fprintf_unfiltered (gdb_stdlog,
2071 "infrun: resuming [%s] for step-over\n",
2072 target_pid_to_str (tp->ptid));
2073
2074 /* keep_going_pass_signal skips the step-over if the breakpoint
2075 is no longer inserted. In all-stop, we want to keep looking
2076 for a thread that needs a step-over instead of resuming TP,
2077 because we wouldn't be able to resume anything else until the
2078 target stops again. In non-stop, the resume always resumes
2079 only TP, so it's OK to let the thread resume freely. */
fbea99ea 2080 if (!target_is_non_stop_p () && !step_what)
4d9d9d04 2081 continue;
8550d3b3 2082
4d9d9d04
PA
2083 switch_to_thread (tp->ptid);
2084 reset_ecs (ecs, tp);
2085 keep_going_pass_signal (ecs);
1c5cfe86 2086
4d9d9d04
PA
2087 if (!ecs->wait_some_more)
2088 error (_("Command aborted."));
1c5cfe86 2089
372316f1
PA
2090 gdb_assert (tp->resumed);
2091
2092 /* If we started a new in-line step-over, we're done. */
2093 if (step_over_info_valid_p ())
2094 {
2095 gdb_assert (tp->control.trap_expected);
2096 return 1;
2097 }
2098
fbea99ea 2099 if (!target_is_non_stop_p ())
4d9d9d04
PA
2100 {
2101 /* On all-stop, shouldn't have resumed unless we needed a
2102 step over. */
2103 gdb_assert (tp->control.trap_expected
2104 || tp->step_after_step_resume_breakpoint);
2105
2106 /* With remote targets (at least), in all-stop, we can't
2107 issue any further remote commands until the program stops
2108 again. */
2109 return 1;
1c5cfe86 2110 }
c2829269 2111
4d9d9d04
PA
2112 /* Either the thread no longer needed a step-over, or a new
2113 displaced stepping sequence started. Even in the latter
2114 case, continue looking. Maybe we can also start another
2115 displaced step on a thread of other process. */
237fc4c9 2116 }
4d9d9d04
PA
2117
2118 return 0;
237fc4c9
PA
2119}
2120
5231c1fd
PA
2121/* Update global variables holding ptids to hold NEW_PTID if they were
2122 holding OLD_PTID. */
2123static void
2124infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
2125{
2126 struct displaced_step_request *it;
fc1cf338 2127 struct displaced_step_inferior_state *displaced;
5231c1fd
PA
2128
2129 if (ptid_equal (inferior_ptid, old_ptid))
2130 inferior_ptid = new_ptid;
2131
fc1cf338
PA
2132 for (displaced = displaced_step_inferior_states;
2133 displaced;
2134 displaced = displaced->next)
2135 {
2136 if (ptid_equal (displaced->step_ptid, old_ptid))
2137 displaced->step_ptid = new_ptid;
fc1cf338 2138 }
5231c1fd
PA
2139}
2140
237fc4c9
PA
2141\f
2142/* Resuming. */
c906108c
SS
2143
2144/* Things to clean up if we QUIT out of resume (). */
c906108c 2145static void
74b7792f 2146resume_cleanups (void *ignore)
c906108c 2147{
34b7e8a6
PA
2148 if (!ptid_equal (inferior_ptid, null_ptid))
2149 delete_single_step_breakpoints (inferior_thread ());
7c16b83e 2150
c906108c
SS
2151 normal_stop ();
2152}
2153
53904c9e
AC
2154static const char schedlock_off[] = "off";
2155static const char schedlock_on[] = "on";
2156static const char schedlock_step[] = "step";
40478521 2157static const char *const scheduler_enums[] = {
ef346e04
AC
2158 schedlock_off,
2159 schedlock_on,
2160 schedlock_step,
2161 NULL
2162};
920d2a44
AC
2163static const char *scheduler_mode = schedlock_off;
2164static void
2165show_scheduler_mode (struct ui_file *file, int from_tty,
2166 struct cmd_list_element *c, const char *value)
2167{
3e43a32a
MS
2168 fprintf_filtered (file,
2169 _("Mode for locking scheduler "
2170 "during execution is \"%s\".\n"),
920d2a44
AC
2171 value);
2172}
c906108c
SS
2173
2174static void
96baa820 2175set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
c906108c 2176{
eefe576e
AC
2177 if (!target_can_lock_scheduler)
2178 {
2179 scheduler_mode = schedlock_off;
2180 error (_("Target '%s' cannot support this command."), target_shortname);
2181 }
c906108c
SS
2182}
2183
d4db2f36
PA
2184/* True if execution commands resume all threads of all processes by
2185 default; otherwise, resume only threads of the current inferior
2186 process. */
2187int sched_multi = 0;
2188
2facfe5c
DD
2189/* Try to setup for software single stepping over the specified location.
2190 Return 1 if target_resume() should use hardware single step.
2191
2192 GDBARCH the current gdbarch.
2193 PC the location to step over. */
2194
2195static int
2196maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2197{
2198 int hw_step = 1;
2199
f02253f1
HZ
2200 if (execution_direction == EXEC_FORWARD
2201 && gdbarch_software_single_step_p (gdbarch)
99e40580 2202 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
2facfe5c 2203 {
99e40580 2204 hw_step = 0;
2facfe5c
DD
2205 }
2206 return hw_step;
2207}
c906108c 2208
f3263aa4
PA
2209/* See infrun.h. */
2210
09cee04b
PA
2211ptid_t
2212user_visible_resume_ptid (int step)
2213{
f3263aa4 2214 ptid_t resume_ptid;
09cee04b 2215
09cee04b
PA
2216 if (non_stop)
2217 {
2218 /* With non-stop mode on, threads are always handled
2219 individually. */
2220 resume_ptid = inferior_ptid;
2221 }
2222 else if ((scheduler_mode == schedlock_on)
03d46957 2223 || (scheduler_mode == schedlock_step && step))
09cee04b 2224 {
f3263aa4
PA
2225 /* User-settable 'scheduler' mode requires solo thread
2226 resume. */
09cee04b
PA
2227 resume_ptid = inferior_ptid;
2228 }
f3263aa4
PA
2229 else if (!sched_multi && target_supports_multi_process ())
2230 {
2231 /* Resume all threads of the current process (and none of other
2232 processes). */
2233 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
2234 }
2235 else
2236 {
2237 /* Resume all threads of all processes. */
2238 resume_ptid = RESUME_ALL;
2239 }
09cee04b
PA
2240
2241 return resume_ptid;
2242}
2243
fbea99ea
PA
2244/* Return a ptid representing the set of threads that we will resume,
2245 in the perspective of the target, assuming run control handling
2246 does not require leaving some threads stopped (e.g., stepping past
2247 breakpoint). USER_STEP indicates whether we're about to start the
2248 target for a stepping command. */
2249
2250static ptid_t
2251internal_resume_ptid (int user_step)
2252{
2253 /* In non-stop, we always control threads individually. Note that
2254 the target may always work in non-stop mode even with "set
2255 non-stop off", in which case user_visible_resume_ptid could
2256 return a wildcard ptid. */
2257 if (target_is_non_stop_p ())
2258 return inferior_ptid;
2259 else
2260 return user_visible_resume_ptid (user_step);
2261}
2262
64ce06e4
PA
2263/* Wrapper for target_resume, that handles infrun-specific
2264 bookkeeping. */
2265
2266static void
2267do_target_resume (ptid_t resume_ptid, int step, enum gdb_signal sig)
2268{
2269 struct thread_info *tp = inferior_thread ();
2270
2271 /* Install inferior's terminal modes. */
2272 target_terminal_inferior ();
2273
2274 /* Avoid confusing the next resume, if the next stop/resume
2275 happens to apply to another thread. */
2276 tp->suspend.stop_signal = GDB_SIGNAL_0;
2277
8f572e5c
PA
2278 /* Advise target which signals may be handled silently.
2279
2280 If we have removed breakpoints because we are stepping over one
2281 in-line (in any thread), we need to receive all signals to avoid
2282 accidentally skipping a breakpoint during execution of a signal
2283 handler.
2284
2285 Likewise if we're displaced stepping, otherwise a trap for a
2286 breakpoint in a signal handler might be confused with the
2287 displaced step finishing. We don't make the displaced_step_fixup
2288 step distinguish the cases instead, because:
2289
2290 - a backtrace while stopped in the signal handler would show the
2291 scratch pad as frame older than the signal handler, instead of
2292 the real mainline code.
2293
2294 - when the thread is later resumed, the signal handler would
2295 return to the scratch pad area, which would no longer be
2296 valid. */
2297 if (step_over_info_valid_p ()
2298 || displaced_step_in_progress (ptid_get_pid (tp->ptid)))
64ce06e4
PA
2299 target_pass_signals (0, NULL);
2300 else
2301 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2302
2303 target_resume (resume_ptid, step, sig);
2304}
2305
c906108c
SS
2306/* Resume the inferior, but allow a QUIT. This is useful if the user
2307 wants to interrupt some lengthy single-stepping operation
2308 (for child processes, the SIGINT goes to the inferior, and so
2309 we get a SIGINT random_signal, but for remote debugging and perhaps
2310 other targets, that's not true).
2311
c906108c
SS
2312 SIG is the signal to give the inferior (zero for none). */
2313void
64ce06e4 2314resume (enum gdb_signal sig)
c906108c 2315{
74b7792f 2316 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
515630c5
UW
2317 struct regcache *regcache = get_current_regcache ();
2318 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4e1c45ea 2319 struct thread_info *tp = inferior_thread ();
515630c5 2320 CORE_ADDR pc = regcache_read_pc (regcache);
6c95b8df 2321 struct address_space *aspace = get_regcache_aspace (regcache);
b0f16a3e 2322 ptid_t resume_ptid;
856e7dd6
PA
2323 /* This represents the user's step vs continue request. When
2324 deciding whether "set scheduler-locking step" applies, it's the
2325 user's intention that counts. */
2326 const int user_step = tp->control.stepping_command;
64ce06e4
PA
2327 /* This represents what we'll actually request the target to do.
2328 This can decay from a step to a continue, if e.g., we need to
2329 implement single-stepping with breakpoints (software
2330 single-step). */
6b403daa 2331 int step;
c7e8a53c 2332
c2829269
PA
2333 gdb_assert (!thread_is_in_step_over_chain (tp));
2334
c906108c
SS
2335 QUIT;
2336
372316f1
PA
2337 if (tp->suspend.waitstatus_pending_p)
2338 {
2339 if (debug_infrun)
2340 {
2341 char *statstr;
2342
2343 statstr = target_waitstatus_to_string (&tp->suspend.waitstatus);
2344 fprintf_unfiltered (gdb_stdlog,
2345 "infrun: resume: thread %s has pending wait status %s "
2346 "(currently_stepping=%d).\n",
2347 target_pid_to_str (tp->ptid), statstr,
2348 currently_stepping (tp));
2349 xfree (statstr);
2350 }
2351
2352 tp->resumed = 1;
2353
2354 /* FIXME: What should we do if we are supposed to resume this
2355 thread with a signal? Maybe we should maintain a queue of
2356 pending signals to deliver. */
2357 if (sig != GDB_SIGNAL_0)
2358 {
fd7dcb94 2359 warning (_("Couldn't deliver signal %s to %s."),
372316f1
PA
2360 gdb_signal_to_name (sig), target_pid_to_str (tp->ptid));
2361 }
2362
2363 tp->suspend.stop_signal = GDB_SIGNAL_0;
2364 discard_cleanups (old_cleanups);
2365
2366 if (target_can_async_p ())
2367 target_async (1);
2368 return;
2369 }
2370
2371 tp->stepped_breakpoint = 0;
2372
6b403daa
PA
2373 /* Depends on stepped_breakpoint. */
2374 step = currently_stepping (tp);
2375
74609e71
YQ
2376 if (current_inferior ()->waiting_for_vfork_done)
2377 {
48f9886d
PA
2378 /* Don't try to single-step a vfork parent that is waiting for
2379 the child to get out of the shared memory region (by exec'ing
2380 or exiting). This is particularly important on software
2381 single-step archs, as the child process would trip on the
2382 software single step breakpoint inserted for the parent
2383 process. Since the parent will not actually execute any
2384 instruction until the child is out of the shared region (such
2385 are vfork's semantics), it is safe to simply continue it.
2386 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2387 the parent, and tell it to `keep_going', which automatically
2388 re-sets it stepping. */
74609e71
YQ
2389 if (debug_infrun)
2390 fprintf_unfiltered (gdb_stdlog,
2391 "infrun: resume : clear step\n");
a09dd441 2392 step = 0;
74609e71
YQ
2393 }
2394
527159b7 2395 if (debug_infrun)
237fc4c9 2396 fprintf_unfiltered (gdb_stdlog,
c9737c08 2397 "infrun: resume (step=%d, signal=%s), "
0d9a9a5f 2398 "trap_expected=%d, current thread [%s] at %s\n",
c9737c08
PA
2399 step, gdb_signal_to_symbol_string (sig),
2400 tp->control.trap_expected,
0d9a9a5f
PA
2401 target_pid_to_str (inferior_ptid),
2402 paddress (gdbarch, pc));
c906108c 2403
c2c6d25f
JM
2404 /* Normally, by the time we reach `resume', the breakpoints are either
2405 removed or inserted, as appropriate. The exception is if we're sitting
2406 at a permanent breakpoint; we need to step over it, but permanent
2407 breakpoints can't be removed. So we have to test for it here. */
6c95b8df 2408 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
6d350bb5 2409 {
af48d08f
PA
2410 if (sig != GDB_SIGNAL_0)
2411 {
2412 /* We have a signal to pass to the inferior. The resume
2413 may, or may not take us to the signal handler. If this
2414 is a step, we'll need to stop in the signal handler, if
2415 there's one, (if the target supports stepping into
2416 handlers), or in the next mainline instruction, if
2417 there's no handler. If this is a continue, we need to be
2418 sure to run the handler with all breakpoints inserted.
2419 In all cases, set a breakpoint at the current address
2420 (where the handler returns to), and once that breakpoint
2421 is hit, resume skipping the permanent breakpoint. If
2422 that breakpoint isn't hit, then we've stepped into the
2423 signal handler (or hit some other event). We'll delete
2424 the step-resume breakpoint then. */
2425
2426 if (debug_infrun)
2427 fprintf_unfiltered (gdb_stdlog,
2428 "infrun: resume: skipping permanent breakpoint, "
2429 "deliver signal first\n");
2430
2431 clear_step_over_info ();
2432 tp->control.trap_expected = 0;
2433
2434 if (tp->control.step_resume_breakpoint == NULL)
2435 {
2436 /* Set a "high-priority" step-resume, as we don't want
2437 user breakpoints at PC to trigger (again) when this
2438 hits. */
2439 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2440 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2441
2442 tp->step_after_step_resume_breakpoint = step;
2443 }
2444
2445 insert_breakpoints ();
2446 }
2447 else
2448 {
2449 /* There's no signal to pass, we can go ahead and skip the
2450 permanent breakpoint manually. */
2451 if (debug_infrun)
2452 fprintf_unfiltered (gdb_stdlog,
2453 "infrun: resume: skipping permanent breakpoint\n");
2454 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2455 /* Update pc to reflect the new address from which we will
2456 execute instructions. */
2457 pc = regcache_read_pc (regcache);
2458
2459 if (step)
2460 {
2461 /* We've already advanced the PC, so the stepping part
2462 is done. Now we need to arrange for a trap to be
2463 reported to handle_inferior_event. Set a breakpoint
2464 at the current PC, and run to it. Don't update
2465 prev_pc, because if we end in
44a1ee51
PA
2466 switch_back_to_stepped_thread, we want the "expected
2467 thread advanced also" branch to be taken. IOW, we
2468 don't want this thread to step further from PC
af48d08f 2469 (overstep). */
1ac806b8 2470 gdb_assert (!step_over_info_valid_p ());
af48d08f
PA
2471 insert_single_step_breakpoint (gdbarch, aspace, pc);
2472 insert_breakpoints ();
2473
fbea99ea 2474 resume_ptid = internal_resume_ptid (user_step);
1ac806b8 2475 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
af48d08f 2476 discard_cleanups (old_cleanups);
372316f1 2477 tp->resumed = 1;
af48d08f
PA
2478 return;
2479 }
2480 }
6d350bb5 2481 }
c2c6d25f 2482
c1e36e3e
PA
2483 /* If we have a breakpoint to step over, make sure to do a single
2484 step only. Same if we have software watchpoints. */
2485 if (tp->control.trap_expected || bpstat_should_step ())
2486 tp->control.may_range_step = 0;
2487
237fc4c9
PA
2488 /* If enabled, step over breakpoints by executing a copy of the
2489 instruction at a different address.
2490
2491 We can't use displaced stepping when we have a signal to deliver;
2492 the comments for displaced_step_prepare explain why. The
2493 comments in the handle_inferior event for dealing with 'random
74609e71
YQ
2494 signals' explain what we do instead.
2495
2496 We can't use displaced stepping when we are waiting for vfork_done
2497 event, displaced stepping breaks the vfork child similarly as single
2498 step software breakpoint. */
3fc8eb30
PA
2499 if (tp->control.trap_expected
2500 && use_displaced_stepping (tp)
cb71640d 2501 && !step_over_info_valid_p ()
a493e3e2 2502 && sig == GDB_SIGNAL_0
74609e71 2503 && !current_inferior ()->waiting_for_vfork_done)
237fc4c9 2504 {
3fc8eb30 2505 int prepared = displaced_step_prepare (inferior_ptid);
fc1cf338 2506
3fc8eb30 2507 if (prepared == 0)
d56b7306 2508 {
4d9d9d04
PA
2509 if (debug_infrun)
2510 fprintf_unfiltered (gdb_stdlog,
2511 "Got placed in step-over queue\n");
2512
2513 tp->control.trap_expected = 0;
d56b7306
VP
2514 discard_cleanups (old_cleanups);
2515 return;
2516 }
3fc8eb30
PA
2517 else if (prepared < 0)
2518 {
2519 /* Fallback to stepping over the breakpoint in-line. */
2520
2521 if (target_is_non_stop_p ())
2522 stop_all_threads ();
2523
2524 set_step_over_info (get_regcache_aspace (regcache),
2525 regcache_read_pc (regcache), 0);
2526
2527 step = maybe_software_singlestep (gdbarch, pc);
2528
2529 insert_breakpoints ();
2530 }
2531 else if (prepared > 0)
2532 {
2533 struct displaced_step_inferior_state *displaced;
99e40580 2534
3fc8eb30
PA
2535 /* Update pc to reflect the new address from which we will
2536 execute instructions due to displaced stepping. */
2537 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
ca7781d2 2538
3fc8eb30
PA
2539 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
2540 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
2541 displaced->step_closure);
2542 }
237fc4c9
PA
2543 }
2544
2facfe5c 2545 /* Do we need to do it the hard way, w/temp breakpoints? */
99e40580 2546 else if (step)
2facfe5c 2547 step = maybe_software_singlestep (gdbarch, pc);
c906108c 2548
30852783
UW
2549 /* Currently, our software single-step implementation leads to different
2550 results than hardware single-stepping in one situation: when stepping
2551 into delivering a signal which has an associated signal handler,
2552 hardware single-step will stop at the first instruction of the handler,
2553 while software single-step will simply skip execution of the handler.
2554
2555 For now, this difference in behavior is accepted since there is no
2556 easy way to actually implement single-stepping into a signal handler
2557 without kernel support.
2558
2559 However, there is one scenario where this difference leads to follow-on
2560 problems: if we're stepping off a breakpoint by removing all breakpoints
2561 and then single-stepping. In this case, the software single-step
2562 behavior means that even if there is a *breakpoint* in the signal
2563 handler, GDB still would not stop.
2564
2565 Fortunately, we can at least fix this particular issue. We detect
2566 here the case where we are about to deliver a signal while software
2567 single-stepping with breakpoints removed. In this situation, we
2568 revert the decisions to remove all breakpoints and insert single-
2569 step breakpoints, and instead we install a step-resume breakpoint
2570 at the current address, deliver the signal without stepping, and
2571 once we arrive back at the step-resume breakpoint, actually step
2572 over the breakpoint we originally wanted to step over. */
34b7e8a6 2573 if (thread_has_single_step_breakpoints_set (tp)
6cc83d2a
PA
2574 && sig != GDB_SIGNAL_0
2575 && step_over_info_valid_p ())
30852783
UW
2576 {
2577 /* If we have nested signals or a pending signal is delivered
2578 immediately after a handler returns, might might already have
2579 a step-resume breakpoint set on the earlier handler. We cannot
2580 set another step-resume breakpoint; just continue on until the
2581 original breakpoint is hit. */
2582 if (tp->control.step_resume_breakpoint == NULL)
2583 {
2c03e5be 2584 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
30852783
UW
2585 tp->step_after_step_resume_breakpoint = 1;
2586 }
2587
34b7e8a6 2588 delete_single_step_breakpoints (tp);
30852783 2589
31e77af2 2590 clear_step_over_info ();
30852783 2591 tp->control.trap_expected = 0;
31e77af2
PA
2592
2593 insert_breakpoints ();
30852783
UW
2594 }
2595
b0f16a3e
SM
2596 /* If STEP is set, it's a request to use hardware stepping
2597 facilities. But in that case, we should never
2598 use singlestep breakpoint. */
34b7e8a6 2599 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
dfcd3bfb 2600
fbea99ea 2601 /* Decide the set of threads to ask the target to resume. */
34b7e8a6 2602 if ((step || thread_has_single_step_breakpoints_set (tp))
b0f16a3e
SM
2603 && tp->control.trap_expected)
2604 {
2605 /* We're allowing a thread to run past a breakpoint it has
2606 hit, by single-stepping the thread with the breakpoint
2607 removed. In which case, we need to single-step only this
2608 thread, and keep others stopped, as they can miss this
2609 breakpoint if allowed to run. */
2610 resume_ptid = inferior_ptid;
2611 }
fbea99ea
PA
2612 else
2613 resume_ptid = internal_resume_ptid (user_step);
d4db2f36 2614
7f5ef605
PA
2615 if (execution_direction != EXEC_REVERSE
2616 && step && breakpoint_inserted_here_p (aspace, pc))
b0f16a3e 2617 {
372316f1
PA
2618 /* There are two cases where we currently need to step a
2619 breakpoint instruction when we have a signal to deliver:
2620
2621 - See handle_signal_stop where we handle random signals that
2622 could take out us out of the stepping range. Normally, in
2623 that case we end up continuing (instead of stepping) over the
7f5ef605
PA
2624 signal handler with a breakpoint at PC, but there are cases
2625 where we should _always_ single-step, even if we have a
2626 step-resume breakpoint, like when a software watchpoint is
2627 set. Assuming single-stepping and delivering a signal at the
2628 same time would takes us to the signal handler, then we could
2629 have removed the breakpoint at PC to step over it. However,
2630 some hardware step targets (like e.g., Mac OS) can't step
2631 into signal handlers, and for those, we need to leave the
2632 breakpoint at PC inserted, as otherwise if the handler
2633 recurses and executes PC again, it'll miss the breakpoint.
2634 So we leave the breakpoint inserted anyway, but we need to
2635 record that we tried to step a breakpoint instruction, so
372316f1
PA
2636 that adjust_pc_after_break doesn't end up confused.
2637
2638 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2639 in one thread after another thread that was stepping had been
2640 momentarily paused for a step-over. When we re-resume the
2641 stepping thread, it may be resumed from that address with a
2642 breakpoint that hasn't trapped yet. Seen with
2643 gdb.threads/non-stop-fair-events.exp, on targets that don't
2644 do displaced stepping. */
2645
2646 if (debug_infrun)
2647 fprintf_unfiltered (gdb_stdlog,
2648 "infrun: resume: [%s] stepped breakpoint\n",
2649 target_pid_to_str (tp->ptid));
7f5ef605
PA
2650
2651 tp->stepped_breakpoint = 1;
2652
b0f16a3e
SM
2653 /* Most targets can step a breakpoint instruction, thus
2654 executing it normally. But if this one cannot, just
2655 continue and we will hit it anyway. */
7f5ef605 2656 if (gdbarch_cannot_step_breakpoint (gdbarch))
b0f16a3e
SM
2657 step = 0;
2658 }
ef5cf84e 2659
b0f16a3e 2660 if (debug_displaced
cb71640d 2661 && tp->control.trap_expected
3fc8eb30 2662 && use_displaced_stepping (tp)
cb71640d 2663 && !step_over_info_valid_p ())
b0f16a3e 2664 {
d9b67d9f 2665 struct regcache *resume_regcache = get_thread_regcache (tp->ptid);
b0f16a3e
SM
2666 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
2667 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2668 gdb_byte buf[4];
2669
2670 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2671 paddress (resume_gdbarch, actual_pc));
2672 read_memory (actual_pc, buf, sizeof (buf));
2673 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2674 }
237fc4c9 2675
b0f16a3e
SM
2676 if (tp->control.may_range_step)
2677 {
2678 /* If we're resuming a thread with the PC out of the step
2679 range, then we're doing some nested/finer run control
2680 operation, like stepping the thread out of the dynamic
2681 linker or the displaced stepping scratch pad. We
2682 shouldn't have allowed a range step then. */
2683 gdb_assert (pc_in_thread_step_range (pc, tp));
2684 }
c1e36e3e 2685
64ce06e4 2686 do_target_resume (resume_ptid, step, sig);
372316f1 2687 tp->resumed = 1;
c906108c
SS
2688 discard_cleanups (old_cleanups);
2689}
2690\f
237fc4c9 2691/* Proceeding. */
c906108c
SS
2692
2693/* Clear out all variables saying what to do when inferior is continued.
2694 First do this, then set the ones you want, then call `proceed'. */
2695
a7212384
UW
2696static void
2697clear_proceed_status_thread (struct thread_info *tp)
c906108c 2698{
a7212384
UW
2699 if (debug_infrun)
2700 fprintf_unfiltered (gdb_stdlog,
2701 "infrun: clear_proceed_status_thread (%s)\n",
2702 target_pid_to_str (tp->ptid));
d6b48e9c 2703
372316f1
PA
2704 /* If we're starting a new sequence, then the previous finished
2705 single-step is no longer relevant. */
2706 if (tp->suspend.waitstatus_pending_p)
2707 {
2708 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
2709 {
2710 if (debug_infrun)
2711 fprintf_unfiltered (gdb_stdlog,
2712 "infrun: clear_proceed_status: pending "
2713 "event of %s was a finished step. "
2714 "Discarding.\n",
2715 target_pid_to_str (tp->ptid));
2716
2717 tp->suspend.waitstatus_pending_p = 0;
2718 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
2719 }
2720 else if (debug_infrun)
2721 {
2722 char *statstr;
2723
2724 statstr = target_waitstatus_to_string (&tp->suspend.waitstatus);
2725 fprintf_unfiltered (gdb_stdlog,
2726 "infrun: clear_proceed_status_thread: thread %s "
2727 "has pending wait status %s "
2728 "(currently_stepping=%d).\n",
2729 target_pid_to_str (tp->ptid), statstr,
2730 currently_stepping (tp));
2731 xfree (statstr);
2732 }
2733 }
2734
70509625
PA
2735 /* If this signal should not be seen by program, give it zero.
2736 Used for debugging signals. */
2737 if (!signal_pass_state (tp->suspend.stop_signal))
2738 tp->suspend.stop_signal = GDB_SIGNAL_0;
2739
16c381f0
JK
2740 tp->control.trap_expected = 0;
2741 tp->control.step_range_start = 0;
2742 tp->control.step_range_end = 0;
c1e36e3e 2743 tp->control.may_range_step = 0;
16c381f0
JK
2744 tp->control.step_frame_id = null_frame_id;
2745 tp->control.step_stack_frame_id = null_frame_id;
2746 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
885eeb5b 2747 tp->control.step_start_function = NULL;
a7212384 2748 tp->stop_requested = 0;
4e1c45ea 2749
16c381f0 2750 tp->control.stop_step = 0;
32400beb 2751
16c381f0 2752 tp->control.proceed_to_finish = 0;
414c69f7 2753
17b2616c 2754 tp->control.command_interp = NULL;
856e7dd6 2755 tp->control.stepping_command = 0;
17b2616c 2756
a7212384 2757 /* Discard any remaining commands or status from previous stop. */
16c381f0 2758 bpstat_clear (&tp->control.stop_bpstat);
a7212384 2759}
32400beb 2760
a7212384 2761void
70509625 2762clear_proceed_status (int step)
a7212384 2763{
6c95b8df
PA
2764 if (!non_stop)
2765 {
70509625
PA
2766 struct thread_info *tp;
2767 ptid_t resume_ptid;
2768
2769 resume_ptid = user_visible_resume_ptid (step);
2770
2771 /* In all-stop mode, delete the per-thread status of all threads
2772 we're about to resume, implicitly and explicitly. */
2773 ALL_NON_EXITED_THREADS (tp)
2774 {
2775 if (!ptid_match (tp->ptid, resume_ptid))
2776 continue;
2777 clear_proceed_status_thread (tp);
2778 }
6c95b8df
PA
2779 }
2780
a7212384
UW
2781 if (!ptid_equal (inferior_ptid, null_ptid))
2782 {
2783 struct inferior *inferior;
2784
2785 if (non_stop)
2786 {
6c95b8df
PA
2787 /* If in non-stop mode, only delete the per-thread status of
2788 the current thread. */
a7212384
UW
2789 clear_proceed_status_thread (inferior_thread ());
2790 }
6c95b8df 2791
d6b48e9c 2792 inferior = current_inferior ();
16c381f0 2793 inferior->control.stop_soon = NO_STOP_QUIETLY;
4e1c45ea
PA
2794 }
2795
c906108c 2796 stop_after_trap = 0;
f3b1572e
PA
2797
2798 observer_notify_about_to_proceed ();
c906108c
SS
2799}
2800
99619bea
PA
2801/* Returns true if TP is still stopped at a breakpoint that needs
2802 stepping-over in order to make progress. If the breakpoint is gone
2803 meanwhile, we can skip the whole step-over dance. */
ea67f13b
DJ
2804
2805static int
6c4cfb24 2806thread_still_needs_step_over_bp (struct thread_info *tp)
99619bea
PA
2807{
2808 if (tp->stepping_over_breakpoint)
2809 {
2810 struct regcache *regcache = get_thread_regcache (tp->ptid);
2811
2812 if (breakpoint_here_p (get_regcache_aspace (regcache),
af48d08f
PA
2813 regcache_read_pc (regcache))
2814 == ordinary_breakpoint_here)
99619bea
PA
2815 return 1;
2816
2817 tp->stepping_over_breakpoint = 0;
2818 }
2819
2820 return 0;
2821}
2822
6c4cfb24
PA
2823/* Check whether thread TP still needs to start a step-over in order
2824 to make progress when resumed. Returns an bitwise or of enum
2825 step_over_what bits, indicating what needs to be stepped over. */
2826
2827static int
2828thread_still_needs_step_over (struct thread_info *tp)
2829{
2830 struct inferior *inf = find_inferior_ptid (tp->ptid);
2831 int what = 0;
2832
2833 if (thread_still_needs_step_over_bp (tp))
2834 what |= STEP_OVER_BREAKPOINT;
2835
2836 if (tp->stepping_over_watchpoint
2837 && !target_have_steppable_watchpoint)
2838 what |= STEP_OVER_WATCHPOINT;
2839
2840 return what;
2841}
2842
483805cf
PA
2843/* Returns true if scheduler locking applies. STEP indicates whether
2844 we're about to do a step/next-like command to a thread. */
2845
2846static int
856e7dd6 2847schedlock_applies (struct thread_info *tp)
483805cf
PA
2848{
2849 return (scheduler_mode == schedlock_on
2850 || (scheduler_mode == schedlock_step
856e7dd6 2851 && tp->control.stepping_command));
483805cf
PA
2852}
2853
c906108c
SS
2854/* Basic routine for continuing the program in various fashions.
2855
2856 ADDR is the address to resume at, or -1 for resume where stopped.
2857 SIGGNAL is the signal to give it, or 0 for none,
c5aa993b 2858 or -1 for act according to how it stopped.
c906108c 2859 STEP is nonzero if should trap after one instruction.
c5aa993b
JM
2860 -1 means return after that and print nothing.
2861 You should probably set various step_... variables
2862 before calling here, if you are stepping.
c906108c
SS
2863
2864 You should call clear_proceed_status before calling proceed. */
2865
2866void
64ce06e4 2867proceed (CORE_ADDR addr, enum gdb_signal siggnal)
c906108c 2868{
e58b0e63
PA
2869 struct regcache *regcache;
2870 struct gdbarch *gdbarch;
4e1c45ea 2871 struct thread_info *tp;
e58b0e63 2872 CORE_ADDR pc;
6c95b8df 2873 struct address_space *aspace;
4d9d9d04
PA
2874 ptid_t resume_ptid;
2875 struct execution_control_state ecss;
2876 struct execution_control_state *ecs = &ecss;
2877 struct cleanup *old_chain;
2878 int started;
c906108c 2879
e58b0e63
PA
2880 /* If we're stopped at a fork/vfork, follow the branch set by the
2881 "set follow-fork-mode" command; otherwise, we'll just proceed
2882 resuming the current thread. */
2883 if (!follow_fork ())
2884 {
2885 /* The target for some reason decided not to resume. */
2886 normal_stop ();
f148b27e
PA
2887 if (target_can_async_p ())
2888 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
e58b0e63
PA
2889 return;
2890 }
2891
842951eb
PA
2892 /* We'll update this if & when we switch to a new thread. */
2893 previous_inferior_ptid = inferior_ptid;
2894
e58b0e63
PA
2895 regcache = get_current_regcache ();
2896 gdbarch = get_regcache_arch (regcache);
6c95b8df 2897 aspace = get_regcache_aspace (regcache);
e58b0e63 2898 pc = regcache_read_pc (regcache);
2adfaa28 2899 tp = inferior_thread ();
e58b0e63 2900
99619bea
PA
2901 /* Fill in with reasonable starting values. */
2902 init_thread_stepping_state (tp);
2903
c2829269
PA
2904 gdb_assert (!thread_is_in_step_over_chain (tp));
2905
2acceee2 2906 if (addr == (CORE_ADDR) -1)
c906108c 2907 {
af48d08f
PA
2908 if (pc == stop_pc
2909 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
b2175913 2910 && execution_direction != EXEC_REVERSE)
3352ef37
AC
2911 /* There is a breakpoint at the address we will resume at,
2912 step one instruction before inserting breakpoints so that
2913 we do not stop right away (and report a second hit at this
b2175913
MS
2914 breakpoint).
2915
2916 Note, we don't do this in reverse, because we won't
2917 actually be executing the breakpoint insn anyway.
2918 We'll be (un-)executing the previous instruction. */
99619bea 2919 tp->stepping_over_breakpoint = 1;
515630c5
UW
2920 else if (gdbarch_single_step_through_delay_p (gdbarch)
2921 && gdbarch_single_step_through_delay (gdbarch,
2922 get_current_frame ()))
3352ef37
AC
2923 /* We stepped onto an instruction that needs to be stepped
2924 again before re-inserting the breakpoint, do so. */
99619bea 2925 tp->stepping_over_breakpoint = 1;
c906108c
SS
2926 }
2927 else
2928 {
515630c5 2929 regcache_write_pc (regcache, addr);
c906108c
SS
2930 }
2931
70509625
PA
2932 if (siggnal != GDB_SIGNAL_DEFAULT)
2933 tp->suspend.stop_signal = siggnal;
2934
17b2616c
PA
2935 /* Record the interpreter that issued the execution command that
2936 caused this thread to resume. If the top level interpreter is
2937 MI/async, and the execution command was a CLI command
2938 (next/step/etc.), we'll want to print stop event output to the MI
2939 console channel (the stepped-to line, etc.), as if the user
2940 entered the execution command on a real GDB console. */
4d9d9d04
PA
2941 tp->control.command_interp = command_interp ();
2942
2943 resume_ptid = user_visible_resume_ptid (tp->control.stepping_command);
2944
2945 /* If an exception is thrown from this point on, make sure to
2946 propagate GDB's knowledge of the executing state to the
2947 frontend/user running state. */
2948 old_chain = make_cleanup (finish_thread_state_cleanup, &resume_ptid);
2949
2950 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
2951 threads (e.g., we might need to set threads stepping over
2952 breakpoints first), from the user/frontend's point of view, all
2953 threads in RESUME_PTID are now running. Unless we're calling an
2954 inferior function, as in that case we pretend the inferior
2955 doesn't run at all. */
2956 if (!tp->control.in_infcall)
2957 set_running (resume_ptid, 1);
17b2616c 2958
527159b7 2959 if (debug_infrun)
8a9de0e4 2960 fprintf_unfiltered (gdb_stdlog,
64ce06e4 2961 "infrun: proceed (addr=%s, signal=%s)\n",
c9737c08 2962 paddress (gdbarch, addr),
64ce06e4 2963 gdb_signal_to_symbol_string (siggnal));
527159b7 2964
4d9d9d04
PA
2965 annotate_starting ();
2966
2967 /* Make sure that output from GDB appears before output from the
2968 inferior. */
2969 gdb_flush (gdb_stdout);
2970
2971 /* In a multi-threaded task we may select another thread and
2972 then continue or step.
2973
2974 But if a thread that we're resuming had stopped at a breakpoint,
2975 it will immediately cause another breakpoint stop without any
2976 execution (i.e. it will report a breakpoint hit incorrectly). So
2977 we must step over it first.
2978
2979 Look for threads other than the current (TP) that reported a
2980 breakpoint hit and haven't been resumed yet since. */
2981
2982 /* If scheduler locking applies, we can avoid iterating over all
2983 threads. */
2984 if (!non_stop && !schedlock_applies (tp))
94cc34af 2985 {
4d9d9d04
PA
2986 struct thread_info *current = tp;
2987
2988 ALL_NON_EXITED_THREADS (tp)
2989 {
2990 /* Ignore the current thread here. It's handled
2991 afterwards. */
2992 if (tp == current)
2993 continue;
99619bea 2994
4d9d9d04
PA
2995 /* Ignore threads of processes we're not resuming. */
2996 if (!ptid_match (tp->ptid, resume_ptid))
2997 continue;
c906108c 2998
4d9d9d04
PA
2999 if (!thread_still_needs_step_over (tp))
3000 continue;
3001
3002 gdb_assert (!thread_is_in_step_over_chain (tp));
c906108c 3003
99619bea
PA
3004 if (debug_infrun)
3005 fprintf_unfiltered (gdb_stdlog,
3006 "infrun: need to step-over [%s] first\n",
4d9d9d04 3007 target_pid_to_str (tp->ptid));
99619bea 3008
4d9d9d04 3009 thread_step_over_chain_enqueue (tp);
2adfaa28 3010 }
31e77af2 3011
4d9d9d04 3012 tp = current;
30852783
UW
3013 }
3014
4d9d9d04
PA
3015 /* Enqueue the current thread last, so that we move all other
3016 threads over their breakpoints first. */
3017 if (tp->stepping_over_breakpoint)
3018 thread_step_over_chain_enqueue (tp);
30852783 3019
4d9d9d04
PA
3020 /* If the thread isn't started, we'll still need to set its prev_pc,
3021 so that switch_back_to_stepped_thread knows the thread hasn't
3022 advanced. Must do this before resuming any thread, as in
3023 all-stop/remote, once we resume we can't send any other packet
3024 until the target stops again. */
3025 tp->prev_pc = regcache_read_pc (regcache);
99619bea 3026
4d9d9d04 3027 started = start_step_over ();
c906108c 3028
4d9d9d04
PA
3029 if (step_over_info_valid_p ())
3030 {
3031 /* Either this thread started a new in-line step over, or some
3032 other thread was already doing one. In either case, don't
3033 resume anything else until the step-over is finished. */
3034 }
fbea99ea 3035 else if (started && !target_is_non_stop_p ())
4d9d9d04
PA
3036 {
3037 /* A new displaced stepping sequence was started. In all-stop,
3038 we can't talk to the target anymore until it next stops. */
3039 }
fbea99ea
PA
3040 else if (!non_stop && target_is_non_stop_p ())
3041 {
3042 /* In all-stop, but the target is always in non-stop mode.
3043 Start all other threads that are implicitly resumed too. */
3044 ALL_NON_EXITED_THREADS (tp)
3045 {
3046 /* Ignore threads of processes we're not resuming. */
3047 if (!ptid_match (tp->ptid, resume_ptid))
3048 continue;
3049
3050 if (tp->resumed)
3051 {
3052 if (debug_infrun)
3053 fprintf_unfiltered (gdb_stdlog,
3054 "infrun: proceed: [%s] resumed\n",
3055 target_pid_to_str (tp->ptid));
3056 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
3057 continue;
3058 }
3059
3060 if (thread_is_in_step_over_chain (tp))
3061 {
3062 if (debug_infrun)
3063 fprintf_unfiltered (gdb_stdlog,
3064 "infrun: proceed: [%s] needs step-over\n",
3065 target_pid_to_str (tp->ptid));
3066 continue;
3067 }
3068
3069 if (debug_infrun)
3070 fprintf_unfiltered (gdb_stdlog,
3071 "infrun: proceed: resuming %s\n",
3072 target_pid_to_str (tp->ptid));
3073
3074 reset_ecs (ecs, tp);
3075 switch_to_thread (tp->ptid);
3076 keep_going_pass_signal (ecs);
3077 if (!ecs->wait_some_more)
fd7dcb94 3078 error (_("Command aborted."));
fbea99ea
PA
3079 }
3080 }
372316f1 3081 else if (!tp->resumed && !thread_is_in_step_over_chain (tp))
4d9d9d04
PA
3082 {
3083 /* The thread wasn't started, and isn't queued, run it now. */
3084 reset_ecs (ecs, tp);
3085 switch_to_thread (tp->ptid);
3086 keep_going_pass_signal (ecs);
3087 if (!ecs->wait_some_more)
fd7dcb94 3088 error (_("Command aborted."));
4d9d9d04 3089 }
c906108c 3090
4d9d9d04 3091 discard_cleanups (old_chain);
c906108c
SS
3092
3093 /* Wait for it to stop (if not standalone)
3094 and in any case decode why it stopped, and act accordingly. */
43ff13b4 3095 /* Do this only if we are not using the event loop, or if the target
1777feb0 3096 does not support asynchronous execution. */
362646f5 3097 if (!target_can_async_p ())
43ff13b4 3098 {
e4c8541f 3099 wait_for_inferior ();
43ff13b4
JM
3100 normal_stop ();
3101 }
c906108c 3102}
c906108c
SS
3103\f
3104
3105/* Start remote-debugging of a machine over a serial link. */
96baa820 3106
c906108c 3107void
8621d6a9 3108start_remote (int from_tty)
c906108c 3109{
d6b48e9c 3110 struct inferior *inferior;
d6b48e9c
PA
3111
3112 inferior = current_inferior ();
16c381f0 3113 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
43ff13b4 3114
1777feb0 3115 /* Always go on waiting for the target, regardless of the mode. */
6426a772 3116 /* FIXME: cagney/1999-09-23: At present it isn't possible to
7e73cedf 3117 indicate to wait_for_inferior that a target should timeout if
6426a772
JM
3118 nothing is returned (instead of just blocking). Because of this,
3119 targets expecting an immediate response need to, internally, set
3120 things up so that the target_wait() is forced to eventually
1777feb0 3121 timeout. */
6426a772
JM
3122 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3123 differentiate to its caller what the state of the target is after
3124 the initial open has been performed. Here we're assuming that
3125 the target has stopped. It should be possible to eventually have
3126 target_open() return to the caller an indication that the target
3127 is currently running and GDB state should be set to the same as
1777feb0 3128 for an async run. */
e4c8541f 3129 wait_for_inferior ();
8621d6a9
DJ
3130
3131 /* Now that the inferior has stopped, do any bookkeeping like
3132 loading shared libraries. We want to do this before normal_stop,
3133 so that the displayed frame is up to date. */
3134 post_create_inferior (&current_target, from_tty);
3135
6426a772 3136 normal_stop ();
c906108c
SS
3137}
3138
3139/* Initialize static vars when a new inferior begins. */
3140
3141void
96baa820 3142init_wait_for_inferior (void)
c906108c
SS
3143{
3144 /* These are meaningless until the first time through wait_for_inferior. */
c906108c 3145
c906108c
SS
3146 breakpoint_init_inferior (inf_starting);
3147
70509625 3148 clear_proceed_status (0);
9f976b41 3149
ca005067 3150 target_last_wait_ptid = minus_one_ptid;
237fc4c9 3151
842951eb 3152 previous_inferior_ptid = inferior_ptid;
0d1e5fa7 3153
edb3359d
DJ
3154 /* Discard any skipped inlined frames. */
3155 clear_inline_frame_state (minus_one_ptid);
c906108c 3156}
237fc4c9 3157
c906108c 3158\f
488f131b 3159
ec9499be 3160static void handle_inferior_event (struct execution_control_state *ecs);
cd0fc7c3 3161
568d6575
UW
3162static void handle_step_into_function (struct gdbarch *gdbarch,
3163 struct execution_control_state *ecs);
3164static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3165 struct execution_control_state *ecs);
4f5d7f63 3166static void handle_signal_stop (struct execution_control_state *ecs);
186c406b 3167static void check_exception_resume (struct execution_control_state *,
28106bc2 3168 struct frame_info *);
611c83ae 3169
bdc36728 3170static void end_stepping_range (struct execution_control_state *ecs);
22bcd14b 3171static void stop_waiting (struct execution_control_state *ecs);
d4f3574e 3172static void keep_going (struct execution_control_state *ecs);
94c57d6a 3173static void process_event_stop_test (struct execution_control_state *ecs);
c447ac0b 3174static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
104c1213 3175
252fbfc8
PA
3176/* Callback for iterate over threads. If the thread is stopped, but
3177 the user/frontend doesn't know about that yet, go through
3178 normal_stop, as if the thread had just stopped now. ARG points at
3179 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
3180 ptid_is_pid(PTID) is true, applies to all threads of the process
3181 pointed at by PTID. Otherwise, apply only to the thread pointed by
3182 PTID. */
3183
3184static int
3185infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
3186{
3187 ptid_t ptid = * (ptid_t *) arg;
3188
3189 if ((ptid_equal (info->ptid, ptid)
3190 || ptid_equal (minus_one_ptid, ptid)
3191 || (ptid_is_pid (ptid)
3192 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
3193 && is_running (info->ptid)
3194 && !is_executing (info->ptid))
3195 {
3196 struct cleanup *old_chain;
3197 struct execution_control_state ecss;
3198 struct execution_control_state *ecs = &ecss;
3199
3200 memset (ecs, 0, sizeof (*ecs));
3201
3202 old_chain = make_cleanup_restore_current_thread ();
3203
f15cb84a
YQ
3204 overlay_cache_invalid = 1;
3205 /* Flush target cache before starting to handle each event.
3206 Target was running and cache could be stale. This is just a
3207 heuristic. Running threads may modify target memory, but we
3208 don't get any event. */
3209 target_dcache_invalidate ();
3210
252fbfc8
PA
3211 /* Go through handle_inferior_event/normal_stop, so we always
3212 have consistent output as if the stop event had been
3213 reported. */
3214 ecs->ptid = info->ptid;
e09875d4 3215 ecs->event_thread = find_thread_ptid (info->ptid);
252fbfc8 3216 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
a493e3e2 3217 ecs->ws.value.sig = GDB_SIGNAL_0;
252fbfc8
PA
3218
3219 handle_inferior_event (ecs);
3220
3221 if (!ecs->wait_some_more)
3222 {
3223 struct thread_info *tp;
3224
3225 normal_stop ();
3226
fa4cd53f 3227 /* Finish off the continuations. */
252fbfc8 3228 tp = inferior_thread ();
fa4cd53f
PA
3229 do_all_intermediate_continuations_thread (tp, 1);
3230 do_all_continuations_thread (tp, 1);
252fbfc8
PA
3231 }
3232
3233 do_cleanups (old_chain);
3234 }
3235
3236 return 0;
3237}
3238
3239/* This function is attached as a "thread_stop_requested" observer.
3240 Cleanup local state that assumed the PTID was to be resumed, and
3241 report the stop to the frontend. */
3242
2c0b251b 3243static void
252fbfc8
PA
3244infrun_thread_stop_requested (ptid_t ptid)
3245{
c2829269 3246 struct thread_info *tp;
252fbfc8 3247
c2829269
PA
3248 /* PTID was requested to stop. Remove matching threads from the
3249 step-over queue, so we don't try to resume them
3250 automatically. */
3251 ALL_NON_EXITED_THREADS (tp)
3252 if (ptid_match (tp->ptid, ptid))
3253 {
3254 if (thread_is_in_step_over_chain (tp))
3255 thread_step_over_chain_remove (tp);
3256 }
252fbfc8
PA
3257
3258 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
3259}
3260
a07daef3
PA
3261static void
3262infrun_thread_thread_exit (struct thread_info *tp, int silent)
3263{
3264 if (ptid_equal (target_last_wait_ptid, tp->ptid))
3265 nullify_last_target_wait_ptid ();
3266}
3267
0cbcdb96
PA
3268/* Delete the step resume, single-step and longjmp/exception resume
3269 breakpoints of TP. */
4e1c45ea 3270
0cbcdb96
PA
3271static void
3272delete_thread_infrun_breakpoints (struct thread_info *tp)
4e1c45ea 3273{
0cbcdb96
PA
3274 delete_step_resume_breakpoint (tp);
3275 delete_exception_resume_breakpoint (tp);
34b7e8a6 3276 delete_single_step_breakpoints (tp);
4e1c45ea
PA
3277}
3278
0cbcdb96
PA
3279/* If the target still has execution, call FUNC for each thread that
3280 just stopped. In all-stop, that's all the non-exited threads; in
3281 non-stop, that's the current thread, only. */
3282
3283typedef void (*for_each_just_stopped_thread_callback_func)
3284 (struct thread_info *tp);
4e1c45ea
PA
3285
3286static void
0cbcdb96 3287for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
4e1c45ea 3288{
0cbcdb96 3289 if (!target_has_execution || ptid_equal (inferior_ptid, null_ptid))
4e1c45ea
PA
3290 return;
3291
fbea99ea 3292 if (target_is_non_stop_p ())
4e1c45ea 3293 {
0cbcdb96
PA
3294 /* If in non-stop mode, only the current thread stopped. */
3295 func (inferior_thread ());
4e1c45ea
PA
3296 }
3297 else
0cbcdb96
PA
3298 {
3299 struct thread_info *tp;
3300
3301 /* In all-stop mode, all threads have stopped. */
3302 ALL_NON_EXITED_THREADS (tp)
3303 {
3304 func (tp);
3305 }
3306 }
3307}
3308
3309/* Delete the step resume and longjmp/exception resume breakpoints of
3310 the threads that just stopped. */
3311
3312static void
3313delete_just_stopped_threads_infrun_breakpoints (void)
3314{
3315 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
34b7e8a6
PA
3316}
3317
3318/* Delete the single-step breakpoints of the threads that just
3319 stopped. */
7c16b83e 3320
34b7e8a6
PA
3321static void
3322delete_just_stopped_threads_single_step_breakpoints (void)
3323{
3324 for_each_just_stopped_thread (delete_single_step_breakpoints);
4e1c45ea
PA
3325}
3326
1777feb0 3327/* A cleanup wrapper. */
4e1c45ea
PA
3328
3329static void
0cbcdb96 3330delete_just_stopped_threads_infrun_breakpoints_cleanup (void *arg)
4e1c45ea 3331{
0cbcdb96 3332 delete_just_stopped_threads_infrun_breakpoints ();
4e1c45ea
PA
3333}
3334
221e1a37 3335/* See infrun.h. */
223698f8 3336
221e1a37 3337void
223698f8
DE
3338print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3339 const struct target_waitstatus *ws)
3340{
3341 char *status_string = target_waitstatus_to_string (ws);
3342 struct ui_file *tmp_stream = mem_fileopen ();
3343 char *text;
223698f8
DE
3344
3345 /* The text is split over several lines because it was getting too long.
3346 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3347 output as a unit; we want only one timestamp printed if debug_timestamp
3348 is set. */
3349
3350 fprintf_unfiltered (tmp_stream,
1176ecec
PA
3351 "infrun: target_wait (%d.%ld.%ld",
3352 ptid_get_pid (waiton_ptid),
3353 ptid_get_lwp (waiton_ptid),
3354 ptid_get_tid (waiton_ptid));
dfd4cc63 3355 if (ptid_get_pid (waiton_ptid) != -1)
223698f8
DE
3356 fprintf_unfiltered (tmp_stream,
3357 " [%s]", target_pid_to_str (waiton_ptid));
3358 fprintf_unfiltered (tmp_stream, ", status) =\n");
3359 fprintf_unfiltered (tmp_stream,
1176ecec 3360 "infrun: %d.%ld.%ld [%s],\n",
dfd4cc63 3361 ptid_get_pid (result_ptid),
1176ecec
PA
3362 ptid_get_lwp (result_ptid),
3363 ptid_get_tid (result_ptid),
dfd4cc63 3364 target_pid_to_str (result_ptid));
223698f8
DE
3365 fprintf_unfiltered (tmp_stream,
3366 "infrun: %s\n",
3367 status_string);
3368
759ef836 3369 text = ui_file_xstrdup (tmp_stream, NULL);
223698f8
DE
3370
3371 /* This uses %s in part to handle %'s in the text, but also to avoid
3372 a gcc error: the format attribute requires a string literal. */
3373 fprintf_unfiltered (gdb_stdlog, "%s", text);
3374
3375 xfree (status_string);
3376 xfree (text);
3377 ui_file_delete (tmp_stream);
3378}
3379
372316f1
PA
3380/* Select a thread at random, out of those which are resumed and have
3381 had events. */
3382
3383static struct thread_info *
3384random_pending_event_thread (ptid_t waiton_ptid)
3385{
3386 struct thread_info *event_tp;
3387 int num_events = 0;
3388 int random_selector;
3389
3390 /* First see how many events we have. Count only resumed threads
3391 that have an event pending. */
3392 ALL_NON_EXITED_THREADS (event_tp)
3393 if (ptid_match (event_tp->ptid, waiton_ptid)
3394 && event_tp->resumed
3395 && event_tp->suspend.waitstatus_pending_p)
3396 num_events++;
3397
3398 if (num_events == 0)
3399 return NULL;
3400
3401 /* Now randomly pick a thread out of those that have had events. */
3402 random_selector = (int)
3403 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3404
3405 if (debug_infrun && num_events > 1)
3406 fprintf_unfiltered (gdb_stdlog,
3407 "infrun: Found %d events, selecting #%d\n",
3408 num_events, random_selector);
3409
3410 /* Select the Nth thread that has had an event. */
3411 ALL_NON_EXITED_THREADS (event_tp)
3412 if (ptid_match (event_tp->ptid, waiton_ptid)
3413 && event_tp->resumed
3414 && event_tp->suspend.waitstatus_pending_p)
3415 if (random_selector-- == 0)
3416 break;
3417
3418 return event_tp;
3419}
3420
3421/* Wrapper for target_wait that first checks whether threads have
3422 pending statuses to report before actually asking the target for
3423 more events. */
3424
3425static ptid_t
3426do_target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
3427{
3428 ptid_t event_ptid;
3429 struct thread_info *tp;
3430
3431 /* First check if there is a resumed thread with a wait status
3432 pending. */
3433 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3434 {
3435 tp = random_pending_event_thread (ptid);
3436 }
3437 else
3438 {
3439 if (debug_infrun)
3440 fprintf_unfiltered (gdb_stdlog,
3441 "infrun: Waiting for specific thread %s.\n",
3442 target_pid_to_str (ptid));
3443
3444 /* We have a specific thread to check. */
3445 tp = find_thread_ptid (ptid);
3446 gdb_assert (tp != NULL);
3447 if (!tp->suspend.waitstatus_pending_p)
3448 tp = NULL;
3449 }
3450
3451 if (tp != NULL
3452 && (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3453 || tp->suspend.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
3454 {
3455 struct regcache *regcache = get_thread_regcache (tp->ptid);
3456 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3457 CORE_ADDR pc;
3458 int discard = 0;
3459
3460 pc = regcache_read_pc (regcache);
3461
3462 if (pc != tp->suspend.stop_pc)
3463 {
3464 if (debug_infrun)
3465 fprintf_unfiltered (gdb_stdlog,
3466 "infrun: PC of %s changed. was=%s, now=%s\n",
3467 target_pid_to_str (tp->ptid),
3468 paddress (gdbarch, tp->prev_pc),
3469 paddress (gdbarch, pc));
3470 discard = 1;
3471 }
3472 else if (!breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3473 {
3474 if (debug_infrun)
3475 fprintf_unfiltered (gdb_stdlog,
3476 "infrun: previous breakpoint of %s, at %s gone\n",
3477 target_pid_to_str (tp->ptid),
3478 paddress (gdbarch, pc));
3479
3480 discard = 1;
3481 }
3482
3483 if (discard)
3484 {
3485 if (debug_infrun)
3486 fprintf_unfiltered (gdb_stdlog,
3487 "infrun: pending event of %s cancelled.\n",
3488 target_pid_to_str (tp->ptid));
3489
3490 tp->suspend.waitstatus.kind = TARGET_WAITKIND_SPURIOUS;
3491 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3492 }
3493 }
3494
3495 if (tp != NULL)
3496 {
3497 if (debug_infrun)
3498 {
3499 char *statstr;
3500
3501 statstr = target_waitstatus_to_string (&tp->suspend.waitstatus);
3502 fprintf_unfiltered (gdb_stdlog,
3503 "infrun: Using pending wait status %s for %s.\n",
3504 statstr,
3505 target_pid_to_str (tp->ptid));
3506 xfree (statstr);
3507 }
3508
3509 /* Now that we've selected our final event LWP, un-adjust its PC
3510 if it was a software breakpoint (and the target doesn't
3511 always adjust the PC itself). */
3512 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3513 && !target_supports_stopped_by_sw_breakpoint ())
3514 {
3515 struct regcache *regcache;
3516 struct gdbarch *gdbarch;
3517 int decr_pc;
3518
3519 regcache = get_thread_regcache (tp->ptid);
3520 gdbarch = get_regcache_arch (regcache);
3521
3522 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3523 if (decr_pc != 0)
3524 {
3525 CORE_ADDR pc;
3526
3527 pc = regcache_read_pc (regcache);
3528 regcache_write_pc (regcache, pc + decr_pc);
3529 }
3530 }
3531
3532 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3533 *status = tp->suspend.waitstatus;
3534 tp->suspend.waitstatus_pending_p = 0;
3535
3536 /* Wake up the event loop again, until all pending events are
3537 processed. */
3538 if (target_is_async_p ())
3539 mark_async_event_handler (infrun_async_inferior_event_token);
3540 return tp->ptid;
3541 }
3542
3543 /* But if we don't find one, we'll have to wait. */
3544
3545 if (deprecated_target_wait_hook)
3546 event_ptid = deprecated_target_wait_hook (ptid, status, options);
3547 else
3548 event_ptid = target_wait (ptid, status, options);
3549
3550 return event_ptid;
3551}
3552
24291992
PA
3553/* Prepare and stabilize the inferior for detaching it. E.g.,
3554 detaching while a thread is displaced stepping is a recipe for
3555 crashing it, as nothing would readjust the PC out of the scratch
3556 pad. */
3557
3558void
3559prepare_for_detach (void)
3560{
3561 struct inferior *inf = current_inferior ();
3562 ptid_t pid_ptid = pid_to_ptid (inf->pid);
3563 struct cleanup *old_chain_1;
3564 struct displaced_step_inferior_state *displaced;
3565
3566 displaced = get_displaced_stepping_state (inf->pid);
3567
3568 /* Is any thread of this process displaced stepping? If not,
3569 there's nothing else to do. */
3570 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
3571 return;
3572
3573 if (debug_infrun)
3574 fprintf_unfiltered (gdb_stdlog,
3575 "displaced-stepping in-process while detaching");
3576
3577 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
3578 inf->detaching = 1;
3579
3580 while (!ptid_equal (displaced->step_ptid, null_ptid))
3581 {
3582 struct cleanup *old_chain_2;
3583 struct execution_control_state ecss;
3584 struct execution_control_state *ecs;
3585
3586 ecs = &ecss;
3587 memset (ecs, 0, sizeof (*ecs));
3588
3589 overlay_cache_invalid = 1;
f15cb84a
YQ
3590 /* Flush target cache before starting to handle each event.
3591 Target was running and cache could be stale. This is just a
3592 heuristic. Running threads may modify target memory, but we
3593 don't get any event. */
3594 target_dcache_invalidate ();
24291992 3595
372316f1 3596 ecs->ptid = do_target_wait (pid_ptid, &ecs->ws, 0);
24291992
PA
3597
3598 if (debug_infrun)
3599 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3600
3601 /* If an error happens while handling the event, propagate GDB's
3602 knowledge of the executing state to the frontend/user running
3603 state. */
3e43a32a
MS
3604 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
3605 &minus_one_ptid);
24291992
PA
3606
3607 /* Now figure out what to do with the result of the result. */
3608 handle_inferior_event (ecs);
3609
3610 /* No error, don't finish the state yet. */
3611 discard_cleanups (old_chain_2);
3612
3613 /* Breakpoints and watchpoints are not installed on the target
3614 at this point, and signals are passed directly to the
3615 inferior, so this must mean the process is gone. */
3616 if (!ecs->wait_some_more)
3617 {
3618 discard_cleanups (old_chain_1);
3619 error (_("Program exited while detaching"));
3620 }
3621 }
3622
3623 discard_cleanups (old_chain_1);
3624}
3625
cd0fc7c3 3626/* Wait for control to return from inferior to debugger.
ae123ec6 3627
cd0fc7c3
SS
3628 If inferior gets a signal, we may decide to start it up again
3629 instead of returning. That is why there is a loop in this function.
3630 When this function actually returns it means the inferior
3631 should be left stopped and GDB should read more commands. */
3632
3633void
e4c8541f 3634wait_for_inferior (void)
cd0fc7c3
SS
3635{
3636 struct cleanup *old_cleanups;
e6f5c25b 3637 struct cleanup *thread_state_chain;
c906108c 3638
527159b7 3639 if (debug_infrun)
ae123ec6 3640 fprintf_unfiltered
e4c8541f 3641 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
527159b7 3642
0cbcdb96
PA
3643 old_cleanups
3644 = make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup,
3645 NULL);
cd0fc7c3 3646
e6f5c25b
PA
3647 /* If an error happens while handling the event, propagate GDB's
3648 knowledge of the executing state to the frontend/user running
3649 state. */
3650 thread_state_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3651
c906108c
SS
3652 while (1)
3653 {
ae25568b
PA
3654 struct execution_control_state ecss;
3655 struct execution_control_state *ecs = &ecss;
963f9c80 3656 ptid_t waiton_ptid = minus_one_ptid;
29f49a6a 3657
ae25568b
PA
3658 memset (ecs, 0, sizeof (*ecs));
3659
ec9499be 3660 overlay_cache_invalid = 1;
ec9499be 3661
f15cb84a
YQ
3662 /* Flush target cache before starting to handle each event.
3663 Target was running and cache could be stale. This is just a
3664 heuristic. Running threads may modify target memory, but we
3665 don't get any event. */
3666 target_dcache_invalidate ();
3667
372316f1 3668 ecs->ptid = do_target_wait (waiton_ptid, &ecs->ws, 0);
c906108c 3669
f00150c9 3670 if (debug_infrun)
223698f8 3671 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
f00150c9 3672
cd0fc7c3
SS
3673 /* Now figure out what to do with the result of the result. */
3674 handle_inferior_event (ecs);
c906108c 3675
cd0fc7c3
SS
3676 if (!ecs->wait_some_more)
3677 break;
3678 }
4e1c45ea 3679
e6f5c25b
PA
3680 /* No error, don't finish the state yet. */
3681 discard_cleanups (thread_state_chain);
3682
cd0fc7c3
SS
3683 do_cleanups (old_cleanups);
3684}
c906108c 3685
d3d4baed
PA
3686/* Cleanup that reinstalls the readline callback handler, if the
3687 target is running in the background. If while handling the target
3688 event something triggered a secondary prompt, like e.g., a
3689 pagination prompt, we'll have removed the callback handler (see
3690 gdb_readline_wrapper_line). Need to do this as we go back to the
3691 event loop, ready to process further input. Note this has no
3692 effect if the handler hasn't actually been removed, because calling
3693 rl_callback_handler_install resets the line buffer, thus losing
3694 input. */
3695
3696static void
3697reinstall_readline_callback_handler_cleanup (void *arg)
3698{
6c400b59
PA
3699 if (!interpreter_async)
3700 {
3701 /* We're not going back to the top level event loop yet. Don't
3702 install the readline callback, as it'd prep the terminal,
3703 readline-style (raw, noecho) (e.g., --batch). We'll install
3704 it the next time the prompt is displayed, when we're ready
3705 for input. */
3706 return;
3707 }
3708
d3d4baed
PA
3709 if (async_command_editing_p && !sync_execution)
3710 gdb_rl_callback_handler_reinstall ();
3711}
3712
1777feb0 3713/* Asynchronous version of wait_for_inferior. It is called by the
43ff13b4 3714 event loop whenever a change of state is detected on the file
1777feb0
MS
3715 descriptor corresponding to the target. It can be called more than
3716 once to complete a single execution command. In such cases we need
3717 to keep the state in a global variable ECSS. If it is the last time
a474d7c2
PA
3718 that this function is called for a single execution command, then
3719 report to the user that the inferior has stopped, and do the
1777feb0 3720 necessary cleanups. */
43ff13b4
JM
3721
3722void
fba45db2 3723fetch_inferior_event (void *client_data)
43ff13b4 3724{
0d1e5fa7 3725 struct execution_control_state ecss;
a474d7c2 3726 struct execution_control_state *ecs = &ecss;
4f8d22e3 3727 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
29f49a6a 3728 struct cleanup *ts_old_chain;
4f8d22e3 3729 int was_sync = sync_execution;
0f641c01 3730 int cmd_done = 0;
963f9c80 3731 ptid_t waiton_ptid = minus_one_ptid;
43ff13b4 3732
0d1e5fa7
PA
3733 memset (ecs, 0, sizeof (*ecs));
3734
d3d4baed
PA
3735 /* End up with readline processing input, if necessary. */
3736 make_cleanup (reinstall_readline_callback_handler_cleanup, NULL);
3737
c5187ac6
PA
3738 /* We're handling a live event, so make sure we're doing live
3739 debugging. If we're looking at traceframes while the target is
3740 running, we're going to need to get back to that mode after
3741 handling the event. */
3742 if (non_stop)
3743 {
3744 make_cleanup_restore_current_traceframe ();
e6e4e701 3745 set_current_traceframe (-1);
c5187ac6
PA
3746 }
3747
4f8d22e3
PA
3748 if (non_stop)
3749 /* In non-stop mode, the user/frontend should not notice a thread
3750 switch due to internal events. Make sure we reverse to the
3751 user selected thread and frame after handling the event and
3752 running any breakpoint commands. */
3753 make_cleanup_restore_current_thread ();
3754
ec9499be 3755 overlay_cache_invalid = 1;
f15cb84a
YQ
3756 /* Flush target cache before starting to handle each event. Target
3757 was running and cache could be stale. This is just a heuristic.
3758 Running threads may modify target memory, but we don't get any
3759 event. */
3760 target_dcache_invalidate ();
3dd5b83d 3761
32231432
PA
3762 make_cleanup_restore_integer (&execution_direction);
3763 execution_direction = target_execution_direction ();
3764
372316f1 3765 ecs->ptid = do_target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
43ff13b4 3766
f00150c9 3767 if (debug_infrun)
223698f8 3768 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
f00150c9 3769
29f49a6a
PA
3770 /* If an error happens while handling the event, propagate GDB's
3771 knowledge of the executing state to the frontend/user running
3772 state. */
fbea99ea 3773 if (!target_is_non_stop_p ())
29f49a6a
PA
3774 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3775 else
3776 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
3777
353d1d73
JK
3778 /* Get executed before make_cleanup_restore_current_thread above to apply
3779 still for the thread which has thrown the exception. */
3780 make_bpstat_clear_actions_cleanup ();
3781
7c16b83e
PA
3782 make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup, NULL);
3783
43ff13b4 3784 /* Now figure out what to do with the result of the result. */
a474d7c2 3785 handle_inferior_event (ecs);
43ff13b4 3786
a474d7c2 3787 if (!ecs->wait_some_more)
43ff13b4 3788 {
c9657e70 3789 struct inferior *inf = find_inferior_ptid (ecs->ptid);
d6b48e9c 3790
0cbcdb96 3791 delete_just_stopped_threads_infrun_breakpoints ();
f107f563 3792
d6b48e9c 3793 /* We may not find an inferior if this was a process exit. */
16c381f0 3794 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
83c265ab
PA
3795 normal_stop ();
3796
af679fd0 3797 if (target_has_execution
0e5bf2a8 3798 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
af679fd0
PA
3799 && ecs->ws.kind != TARGET_WAITKIND_EXITED
3800 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3801 && ecs->event_thread->step_multi
16c381f0 3802 && ecs->event_thread->control.stop_step)
c2d11a7d
JM
3803 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
3804 else
0f641c01
PA
3805 {
3806 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
3807 cmd_done = 1;
3808 }
43ff13b4 3809 }
4f8d22e3 3810
29f49a6a
PA
3811 /* No error, don't finish the thread states yet. */
3812 discard_cleanups (ts_old_chain);
3813
4f8d22e3
PA
3814 /* Revert thread and frame. */
3815 do_cleanups (old_chain);
3816
3817 /* If the inferior was in sync execution mode, and now isn't,
0f641c01
PA
3818 restore the prompt (a synchronous execution command has finished,
3819 and we're ready for input). */
b4a14fd0 3820 if (interpreter_async && was_sync && !sync_execution)
92bcb5f9 3821 observer_notify_sync_execution_done ();
0f641c01
PA
3822
3823 if (cmd_done
3824 && !was_sync
3825 && exec_done_display_p
3826 && (ptid_equal (inferior_ptid, null_ptid)
3827 || !is_running (inferior_ptid)))
3828 printf_unfiltered (_("completed.\n"));
43ff13b4
JM
3829}
3830
edb3359d
DJ
3831/* Record the frame and location we're currently stepping through. */
3832void
3833set_step_info (struct frame_info *frame, struct symtab_and_line sal)
3834{
3835 struct thread_info *tp = inferior_thread ();
3836
16c381f0
JK
3837 tp->control.step_frame_id = get_frame_id (frame);
3838 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
edb3359d
DJ
3839
3840 tp->current_symtab = sal.symtab;
3841 tp->current_line = sal.line;
3842}
3843
0d1e5fa7
PA
3844/* Clear context switchable stepping state. */
3845
3846void
4e1c45ea 3847init_thread_stepping_state (struct thread_info *tss)
0d1e5fa7 3848{
7f5ef605 3849 tss->stepped_breakpoint = 0;
0d1e5fa7 3850 tss->stepping_over_breakpoint = 0;
963f9c80 3851 tss->stepping_over_watchpoint = 0;
0d1e5fa7 3852 tss->step_after_step_resume_breakpoint = 0;
cd0fc7c3
SS
3853}
3854
c32c64b7
DE
3855/* Set the cached copy of the last ptid/waitstatus. */
3856
3857static void
3858set_last_target_status (ptid_t ptid, struct target_waitstatus status)
3859{
3860 target_last_wait_ptid = ptid;
3861 target_last_waitstatus = status;
3862}
3863
e02bc4cc 3864/* Return the cached copy of the last pid/waitstatus returned by
9a4105ab
AC
3865 target_wait()/deprecated_target_wait_hook(). The data is actually
3866 cached by handle_inferior_event(), which gets called immediately
3867 after target_wait()/deprecated_target_wait_hook(). */
e02bc4cc
DS
3868
3869void
488f131b 3870get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
e02bc4cc 3871{
39f77062 3872 *ptidp = target_last_wait_ptid;
e02bc4cc
DS
3873 *status = target_last_waitstatus;
3874}
3875
ac264b3b
MS
3876void
3877nullify_last_target_wait_ptid (void)
3878{
3879 target_last_wait_ptid = minus_one_ptid;
3880}
3881
dcf4fbde 3882/* Switch thread contexts. */
dd80620e
MS
3883
3884static void
0d1e5fa7 3885context_switch (ptid_t ptid)
dd80620e 3886{
4b51d87b 3887 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
fd48f117
DJ
3888 {
3889 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3890 target_pid_to_str (inferior_ptid));
3891 fprintf_unfiltered (gdb_stdlog, "to %s\n",
0d1e5fa7 3892 target_pid_to_str (ptid));
fd48f117
DJ
3893 }
3894
0d1e5fa7 3895 switch_to_thread (ptid);
dd80620e
MS
3896}
3897
d8dd4d5f
PA
3898/* If the target can't tell whether we've hit breakpoints
3899 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
3900 check whether that could have been caused by a breakpoint. If so,
3901 adjust the PC, per gdbarch_decr_pc_after_break. */
3902
4fa8626c 3903static void
d8dd4d5f
PA
3904adjust_pc_after_break (struct thread_info *thread,
3905 struct target_waitstatus *ws)
4fa8626c 3906{
24a73cce
UW
3907 struct regcache *regcache;
3908 struct gdbarch *gdbarch;
6c95b8df 3909 struct address_space *aspace;
118e6252 3910 CORE_ADDR breakpoint_pc, decr_pc;
4fa8626c 3911
4fa8626c
DJ
3912 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3913 we aren't, just return.
9709f61c
DJ
3914
3915 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
b798847d
UW
3916 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3917 implemented by software breakpoints should be handled through the normal
3918 breakpoint layer.
8fb3e588 3919
4fa8626c
DJ
3920 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3921 different signals (SIGILL or SIGEMT for instance), but it is less
3922 clear where the PC is pointing afterwards. It may not match
b798847d
UW
3923 gdbarch_decr_pc_after_break. I don't know any specific target that
3924 generates these signals at breakpoints (the code has been in GDB since at
3925 least 1992) so I can not guess how to handle them here.
8fb3e588 3926
e6cf7916
UW
3927 In earlier versions of GDB, a target with
3928 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
b798847d
UW
3929 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3930 target with both of these set in GDB history, and it seems unlikely to be
3931 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4fa8626c 3932
d8dd4d5f 3933 if (ws->kind != TARGET_WAITKIND_STOPPED)
4fa8626c
DJ
3934 return;
3935
d8dd4d5f 3936 if (ws->value.sig != GDB_SIGNAL_TRAP)
4fa8626c
DJ
3937 return;
3938
4058b839
PA
3939 /* In reverse execution, when a breakpoint is hit, the instruction
3940 under it has already been de-executed. The reported PC always
3941 points at the breakpoint address, so adjusting it further would
3942 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3943 architecture:
3944
3945 B1 0x08000000 : INSN1
3946 B2 0x08000001 : INSN2
3947 0x08000002 : INSN3
3948 PC -> 0x08000003 : INSN4
3949
3950 Say you're stopped at 0x08000003 as above. Reverse continuing
3951 from that point should hit B2 as below. Reading the PC when the
3952 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3953 been de-executed already.
3954
3955 B1 0x08000000 : INSN1
3956 B2 PC -> 0x08000001 : INSN2
3957 0x08000002 : INSN3
3958 0x08000003 : INSN4
3959
3960 We can't apply the same logic as for forward execution, because
3961 we would wrongly adjust the PC to 0x08000000, since there's a
3962 breakpoint at PC - 1. We'd then report a hit on B1, although
3963 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3964 behaviour. */
3965 if (execution_direction == EXEC_REVERSE)
3966 return;
3967
1cf4d951
PA
3968 /* If the target can tell whether the thread hit a SW breakpoint,
3969 trust it. Targets that can tell also adjust the PC
3970 themselves. */
3971 if (target_supports_stopped_by_sw_breakpoint ())
3972 return;
3973
3974 /* Note that relying on whether a breakpoint is planted in memory to
3975 determine this can fail. E.g,. the breakpoint could have been
3976 removed since. Or the thread could have been told to step an
3977 instruction the size of a breakpoint instruction, and only
3978 _after_ was a breakpoint inserted at its address. */
3979
24a73cce
UW
3980 /* If this target does not decrement the PC after breakpoints, then
3981 we have nothing to do. */
d8dd4d5f 3982 regcache = get_thread_regcache (thread->ptid);
24a73cce 3983 gdbarch = get_regcache_arch (regcache);
118e6252 3984
527a273a 3985 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
118e6252 3986 if (decr_pc == 0)
24a73cce
UW
3987 return;
3988
6c95b8df
PA
3989 aspace = get_regcache_aspace (regcache);
3990
8aad930b
AC
3991 /* Find the location where (if we've hit a breakpoint) the
3992 breakpoint would be. */
118e6252 3993 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
8aad930b 3994
1cf4d951
PA
3995 /* If the target can't tell whether a software breakpoint triggered,
3996 fallback to figuring it out based on breakpoints we think were
3997 inserted in the target, and on whether the thread was stepped or
3998 continued. */
3999
1c5cfe86
PA
4000 /* Check whether there actually is a software breakpoint inserted at
4001 that location.
4002
4003 If in non-stop mode, a race condition is possible where we've
4004 removed a breakpoint, but stop events for that breakpoint were
4005 already queued and arrive later. To suppress those spurious
4006 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
1cf4d951
PA
4007 and retire them after a number of stop events are reported. Note
4008 this is an heuristic and can thus get confused. The real fix is
4009 to get the "stopped by SW BP and needs adjustment" info out of
4010 the target/kernel (and thus never reach here; see above). */
6c95b8df 4011 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
fbea99ea
PA
4012 || (target_is_non_stop_p ()
4013 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
8aad930b 4014 {
77f9e713 4015 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
abbb1732 4016
8213266a 4017 if (record_full_is_used ())
77f9e713 4018 record_full_gdb_operation_disable_set ();
96429cc8 4019
1c0fdd0e
UW
4020 /* When using hardware single-step, a SIGTRAP is reported for both
4021 a completed single-step and a software breakpoint. Need to
4022 differentiate between the two, as the latter needs adjusting
4023 but the former does not.
4024
4025 The SIGTRAP can be due to a completed hardware single-step only if
4026 - we didn't insert software single-step breakpoints
1c0fdd0e
UW
4027 - this thread is currently being stepped
4028
4029 If any of these events did not occur, we must have stopped due
4030 to hitting a software breakpoint, and have to back up to the
4031 breakpoint address.
4032
4033 As a special case, we could have hardware single-stepped a
4034 software breakpoint. In this case (prev_pc == breakpoint_pc),
4035 we also need to back up to the breakpoint address. */
4036
d8dd4d5f
PA
4037 if (thread_has_single_step_breakpoints_set (thread)
4038 || !currently_stepping (thread)
4039 || (thread->stepped_breakpoint
4040 && thread->prev_pc == breakpoint_pc))
515630c5 4041 regcache_write_pc (regcache, breakpoint_pc);
96429cc8 4042
77f9e713 4043 do_cleanups (old_cleanups);
8aad930b 4044 }
4fa8626c
DJ
4045}
4046
edb3359d
DJ
4047static int
4048stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4049{
4050 for (frame = get_prev_frame (frame);
4051 frame != NULL;
4052 frame = get_prev_frame (frame))
4053 {
4054 if (frame_id_eq (get_frame_id (frame), step_frame_id))
4055 return 1;
4056 if (get_frame_type (frame) != INLINE_FRAME)
4057 break;
4058 }
4059
4060 return 0;
4061}
4062
a96d9b2e
SDJ
4063/* Auxiliary function that handles syscall entry/return events.
4064 It returns 1 if the inferior should keep going (and GDB
4065 should ignore the event), or 0 if the event deserves to be
4066 processed. */
ca2163eb 4067
a96d9b2e 4068static int
ca2163eb 4069handle_syscall_event (struct execution_control_state *ecs)
a96d9b2e 4070{
ca2163eb 4071 struct regcache *regcache;
ca2163eb
PA
4072 int syscall_number;
4073
4074 if (!ptid_equal (ecs->ptid, inferior_ptid))
4075 context_switch (ecs->ptid);
4076
4077 regcache = get_thread_regcache (ecs->ptid);
f90263c1 4078 syscall_number = ecs->ws.value.syscall_number;
ca2163eb
PA
4079 stop_pc = regcache_read_pc (regcache);
4080
a96d9b2e
SDJ
4081 if (catch_syscall_enabled () > 0
4082 && catching_syscall_number (syscall_number) > 0)
4083 {
4084 if (debug_infrun)
4085 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
4086 syscall_number);
a96d9b2e 4087
16c381f0 4088 ecs->event_thread->control.stop_bpstat
6c95b8df 4089 = bpstat_stop_status (get_regcache_aspace (regcache),
09ac7c10 4090 stop_pc, ecs->ptid, &ecs->ws);
ab04a2af 4091
ce12b012 4092 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
ca2163eb
PA
4093 {
4094 /* Catchpoint hit. */
ca2163eb
PA
4095 return 0;
4096 }
a96d9b2e 4097 }
ca2163eb
PA
4098
4099 /* If no catchpoint triggered for this, then keep going. */
ca2163eb
PA
4100 keep_going (ecs);
4101 return 1;
a96d9b2e
SDJ
4102}
4103
7e324e48
GB
4104/* Lazily fill in the execution_control_state's stop_func_* fields. */
4105
4106static void
4107fill_in_stop_func (struct gdbarch *gdbarch,
4108 struct execution_control_state *ecs)
4109{
4110 if (!ecs->stop_func_filled_in)
4111 {
4112 /* Don't care about return value; stop_func_start and stop_func_name
4113 will both be 0 if it doesn't work. */
4114 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
4115 &ecs->stop_func_start, &ecs->stop_func_end);
4116 ecs->stop_func_start
4117 += gdbarch_deprecated_function_start_offset (gdbarch);
4118
591a12a1
UW
4119 if (gdbarch_skip_entrypoint_p (gdbarch))
4120 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
4121 ecs->stop_func_start);
4122
7e324e48
GB
4123 ecs->stop_func_filled_in = 1;
4124 }
4125}
4126
4f5d7f63
PA
4127
4128/* Return the STOP_SOON field of the inferior pointed at by PTID. */
4129
4130static enum stop_kind
4131get_inferior_stop_soon (ptid_t ptid)
4132{
c9657e70 4133 struct inferior *inf = find_inferior_ptid (ptid);
4f5d7f63
PA
4134
4135 gdb_assert (inf != NULL);
4136 return inf->control.stop_soon;
4137}
4138
372316f1
PA
4139/* Wait for one event. Store the resulting waitstatus in WS, and
4140 return the event ptid. */
4141
4142static ptid_t
4143wait_one (struct target_waitstatus *ws)
4144{
4145 ptid_t event_ptid;
4146 ptid_t wait_ptid = minus_one_ptid;
4147
4148 overlay_cache_invalid = 1;
4149
4150 /* Flush target cache before starting to handle each event.
4151 Target was running and cache could be stale. This is just a
4152 heuristic. Running threads may modify target memory, but we
4153 don't get any event. */
4154 target_dcache_invalidate ();
4155
4156 if (deprecated_target_wait_hook)
4157 event_ptid = deprecated_target_wait_hook (wait_ptid, ws, 0);
4158 else
4159 event_ptid = target_wait (wait_ptid, ws, 0);
4160
4161 if (debug_infrun)
4162 print_target_wait_results (wait_ptid, event_ptid, ws);
4163
4164 return event_ptid;
4165}
4166
4167/* Generate a wrapper for target_stopped_by_REASON that works on PTID
4168 instead of the current thread. */
4169#define THREAD_STOPPED_BY(REASON) \
4170static int \
4171thread_stopped_by_ ## REASON (ptid_t ptid) \
4172{ \
4173 struct cleanup *old_chain; \
4174 int res; \
4175 \
4176 old_chain = save_inferior_ptid (); \
4177 inferior_ptid = ptid; \
4178 \
4179 res = target_stopped_by_ ## REASON (); \
4180 \
4181 do_cleanups (old_chain); \
4182 \
4183 return res; \
4184}
4185
4186/* Generate thread_stopped_by_watchpoint. */
4187THREAD_STOPPED_BY (watchpoint)
4188/* Generate thread_stopped_by_sw_breakpoint. */
4189THREAD_STOPPED_BY (sw_breakpoint)
4190/* Generate thread_stopped_by_hw_breakpoint. */
4191THREAD_STOPPED_BY (hw_breakpoint)
4192
4193/* Cleanups that switches to the PTID pointed at by PTID_P. */
4194
4195static void
4196switch_to_thread_cleanup (void *ptid_p)
4197{
4198 ptid_t ptid = *(ptid_t *) ptid_p;
4199
4200 switch_to_thread (ptid);
4201}
4202
4203/* Save the thread's event and stop reason to process it later. */
4204
4205static void
4206save_waitstatus (struct thread_info *tp, struct target_waitstatus *ws)
4207{
4208 struct regcache *regcache;
4209 struct address_space *aspace;
4210
4211 if (debug_infrun)
4212 {
4213 char *statstr;
4214
4215 statstr = target_waitstatus_to_string (ws);
4216 fprintf_unfiltered (gdb_stdlog,
4217 "infrun: saving status %s for %d.%ld.%ld\n",
4218 statstr,
4219 ptid_get_pid (tp->ptid),
4220 ptid_get_lwp (tp->ptid),
4221 ptid_get_tid (tp->ptid));
4222 xfree (statstr);
4223 }
4224
4225 /* Record for later. */
4226 tp->suspend.waitstatus = *ws;
4227 tp->suspend.waitstatus_pending_p = 1;
4228
4229 regcache = get_thread_regcache (tp->ptid);
4230 aspace = get_regcache_aspace (regcache);
4231
4232 if (ws->kind == TARGET_WAITKIND_STOPPED
4233 && ws->value.sig == GDB_SIGNAL_TRAP)
4234 {
4235 CORE_ADDR pc = regcache_read_pc (regcache);
4236
4237 adjust_pc_after_break (tp, &tp->suspend.waitstatus);
4238
4239 if (thread_stopped_by_watchpoint (tp->ptid))
4240 {
4241 tp->suspend.stop_reason
4242 = TARGET_STOPPED_BY_WATCHPOINT;
4243 }
4244 else if (target_supports_stopped_by_sw_breakpoint ()
4245 && thread_stopped_by_sw_breakpoint (tp->ptid))
4246 {
4247 tp->suspend.stop_reason
4248 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4249 }
4250 else if (target_supports_stopped_by_hw_breakpoint ()
4251 && thread_stopped_by_hw_breakpoint (tp->ptid))
4252 {
4253 tp->suspend.stop_reason
4254 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4255 }
4256 else if (!target_supports_stopped_by_hw_breakpoint ()
4257 && hardware_breakpoint_inserted_here_p (aspace,
4258 pc))
4259 {
4260 tp->suspend.stop_reason
4261 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4262 }
4263 else if (!target_supports_stopped_by_sw_breakpoint ()
4264 && software_breakpoint_inserted_here_p (aspace,
4265 pc))
4266 {
4267 tp->suspend.stop_reason
4268 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4269 }
4270 else if (!thread_has_single_step_breakpoints_set (tp)
4271 && currently_stepping (tp))
4272 {
4273 tp->suspend.stop_reason
4274 = TARGET_STOPPED_BY_SINGLE_STEP;
4275 }
4276 }
4277}
4278
4279/* Stop all threads. */
4280
4281static void
4282stop_all_threads (void)
4283{
4284 /* We may need multiple passes to discover all threads. */
4285 int pass;
4286 int iterations = 0;
4287 ptid_t entry_ptid;
4288 struct cleanup *old_chain;
4289
fbea99ea 4290 gdb_assert (target_is_non_stop_p ());
372316f1
PA
4291
4292 if (debug_infrun)
4293 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads\n");
4294
4295 entry_ptid = inferior_ptid;
4296 old_chain = make_cleanup (switch_to_thread_cleanup, &entry_ptid);
4297
4298 /* Request threads to stop, and then wait for the stops. Because
4299 threads we already know about can spawn more threads while we're
4300 trying to stop them, and we only learn about new threads when we
4301 update the thread list, do this in a loop, and keep iterating
4302 until two passes find no threads that need to be stopped. */
4303 for (pass = 0; pass < 2; pass++, iterations++)
4304 {
4305 if (debug_infrun)
4306 fprintf_unfiltered (gdb_stdlog,
4307 "infrun: stop_all_threads, pass=%d, "
4308 "iterations=%d\n", pass, iterations);
4309 while (1)
4310 {
4311 ptid_t event_ptid;
4312 struct target_waitstatus ws;
4313 int need_wait = 0;
4314 struct thread_info *t;
4315
4316 update_thread_list ();
4317
4318 /* Go through all threads looking for threads that we need
4319 to tell the target to stop. */
4320 ALL_NON_EXITED_THREADS (t)
4321 {
4322 if (t->executing)
4323 {
4324 /* If already stopping, don't request a stop again.
4325 We just haven't seen the notification yet. */
4326 if (!t->stop_requested)
4327 {
4328 if (debug_infrun)
4329 fprintf_unfiltered (gdb_stdlog,
4330 "infrun: %s executing, "
4331 "need stop\n",
4332 target_pid_to_str (t->ptid));
4333 target_stop (t->ptid);
4334 t->stop_requested = 1;
4335 }
4336 else
4337 {
4338 if (debug_infrun)
4339 fprintf_unfiltered (gdb_stdlog,
4340 "infrun: %s executing, "
4341 "already stopping\n",
4342 target_pid_to_str (t->ptid));
4343 }
4344
4345 if (t->stop_requested)
4346 need_wait = 1;
4347 }
4348 else
4349 {
4350 if (debug_infrun)
4351 fprintf_unfiltered (gdb_stdlog,
4352 "infrun: %s not executing\n",
4353 target_pid_to_str (t->ptid));
4354
4355 /* The thread may be not executing, but still be
4356 resumed with a pending status to process. */
4357 t->resumed = 0;
4358 }
4359 }
4360
4361 if (!need_wait)
4362 break;
4363
4364 /* If we find new threads on the second iteration, restart
4365 over. We want to see two iterations in a row with all
4366 threads stopped. */
4367 if (pass > 0)
4368 pass = -1;
4369
4370 event_ptid = wait_one (&ws);
4371 if (ws.kind == TARGET_WAITKIND_NO_RESUMED)
4372 {
4373 /* All resumed threads exited. */
4374 }
4375 else if (ws.kind == TARGET_WAITKIND_EXITED
4376 || ws.kind == TARGET_WAITKIND_SIGNALLED)
4377 {
4378 if (debug_infrun)
4379 {
4380 ptid_t ptid = pid_to_ptid (ws.value.integer);
4381
4382 fprintf_unfiltered (gdb_stdlog,
4383 "infrun: %s exited while "
4384 "stopping threads\n",
4385 target_pid_to_str (ptid));
4386 }
4387 }
4388 else
4389 {
4390 t = find_thread_ptid (event_ptid);
4391 if (t == NULL)
4392 t = add_thread (event_ptid);
4393
4394 t->stop_requested = 0;
4395 t->executing = 0;
4396 t->resumed = 0;
4397 t->control.may_range_step = 0;
4398
4399 if (ws.kind == TARGET_WAITKIND_STOPPED
4400 && ws.value.sig == GDB_SIGNAL_0)
4401 {
4402 /* We caught the event that we intended to catch, so
4403 there's no event pending. */
4404 t->suspend.waitstatus.kind = TARGET_WAITKIND_IGNORE;
4405 t->suspend.waitstatus_pending_p = 0;
4406
4407 if (displaced_step_fixup (t->ptid, GDB_SIGNAL_0) < 0)
4408 {
4409 /* Add it back to the step-over queue. */
4410 if (debug_infrun)
4411 {
4412 fprintf_unfiltered (gdb_stdlog,
4413 "infrun: displaced-step of %s "
4414 "canceled: adding back to the "
4415 "step-over queue\n",
4416 target_pid_to_str (t->ptid));
4417 }
4418 t->control.trap_expected = 0;
4419 thread_step_over_chain_enqueue (t);
4420 }
4421 }
4422 else
4423 {
4424 enum gdb_signal sig;
4425 struct regcache *regcache;
4426 struct address_space *aspace;
4427
4428 if (debug_infrun)
4429 {
4430 char *statstr;
4431
4432 statstr = target_waitstatus_to_string (&ws);
4433 fprintf_unfiltered (gdb_stdlog,
4434 "infrun: target_wait %s, saving "
4435 "status for %d.%ld.%ld\n",
4436 statstr,
4437 ptid_get_pid (t->ptid),
4438 ptid_get_lwp (t->ptid),
4439 ptid_get_tid (t->ptid));
4440 xfree (statstr);
4441 }
4442
4443 /* Record for later. */
4444 save_waitstatus (t, &ws);
4445
4446 sig = (ws.kind == TARGET_WAITKIND_STOPPED
4447 ? ws.value.sig : GDB_SIGNAL_0);
4448
4449 if (displaced_step_fixup (t->ptid, sig) < 0)
4450 {
4451 /* Add it back to the step-over queue. */
4452 t->control.trap_expected = 0;
4453 thread_step_over_chain_enqueue (t);
4454 }
4455
4456 regcache = get_thread_regcache (t->ptid);
4457 t->suspend.stop_pc = regcache_read_pc (regcache);
4458
4459 if (debug_infrun)
4460 {
4461 fprintf_unfiltered (gdb_stdlog,
4462 "infrun: saved stop_pc=%s for %s "
4463 "(currently_stepping=%d)\n",
4464 paddress (target_gdbarch (),
4465 t->suspend.stop_pc),
4466 target_pid_to_str (t->ptid),
4467 currently_stepping (t));
4468 }
4469 }
4470 }
4471 }
4472 }
4473
4474 do_cleanups (old_chain);
4475
4476 if (debug_infrun)
4477 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads done\n");
4478}
4479
05ba8510
PA
4480/* Given an execution control state that has been freshly filled in by
4481 an event from the inferior, figure out what it means and take
4482 appropriate action.
4483
4484 The alternatives are:
4485
22bcd14b 4486 1) stop_waiting and return; to really stop and return to the
05ba8510
PA
4487 debugger.
4488
4489 2) keep_going and return; to wait for the next event (set
4490 ecs->event_thread->stepping_over_breakpoint to 1 to single step
4491 once). */
c906108c 4492
ec9499be 4493static void
0b6e5e10 4494handle_inferior_event_1 (struct execution_control_state *ecs)
cd0fc7c3 4495{
d6b48e9c
PA
4496 enum stop_kind stop_soon;
4497
28736962
PA
4498 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
4499 {
4500 /* We had an event in the inferior, but we are not interested in
4501 handling it at this level. The lower layers have already
4502 done what needs to be done, if anything.
4503
4504 One of the possible circumstances for this is when the
4505 inferior produces output for the console. The inferior has
4506 not stopped, and we are ignoring the event. Another possible
4507 circumstance is any event which the lower level knows will be
4508 reported multiple times without an intervening resume. */
4509 if (debug_infrun)
4510 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
4511 prepare_to_wait (ecs);
4512 return;
4513 }
4514
0e5bf2a8
PA
4515 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
4516 && target_can_async_p () && !sync_execution)
4517 {
4518 /* There were no unwaited-for children left in the target, but,
4519 we're not synchronously waiting for events either. Just
4520 ignore. Otherwise, if we were running a synchronous
4521 execution command, we need to cancel it and give the user
4522 back the terminal. */
4523 if (debug_infrun)
4524 fprintf_unfiltered (gdb_stdlog,
4525 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
4526 prepare_to_wait (ecs);
4527 return;
4528 }
4529
1777feb0 4530 /* Cache the last pid/waitstatus. */
c32c64b7 4531 set_last_target_status (ecs->ptid, ecs->ws);
e02bc4cc 4532
ca005067 4533 /* Always clear state belonging to the previous time we stopped. */
aa7d318d 4534 stop_stack_dummy = STOP_NONE;
ca005067 4535
0e5bf2a8
PA
4536 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
4537 {
4538 /* No unwaited-for children left. IOW, all resumed children
4539 have exited. */
4540 if (debug_infrun)
4541 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
4542
4543 stop_print_frame = 0;
22bcd14b 4544 stop_waiting (ecs);
0e5bf2a8
PA
4545 return;
4546 }
4547
8c90c137 4548 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
64776a0b 4549 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
359f5fe6
PA
4550 {
4551 ecs->event_thread = find_thread_ptid (ecs->ptid);
4552 /* If it's a new thread, add it to the thread database. */
4553 if (ecs->event_thread == NULL)
4554 ecs->event_thread = add_thread (ecs->ptid);
c1e36e3e
PA
4555
4556 /* Disable range stepping. If the next step request could use a
4557 range, this will be end up re-enabled then. */
4558 ecs->event_thread->control.may_range_step = 0;
359f5fe6 4559 }
88ed393a
JK
4560
4561 /* Dependent on valid ECS->EVENT_THREAD. */
d8dd4d5f 4562 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
88ed393a
JK
4563
4564 /* Dependent on the current PC value modified by adjust_pc_after_break. */
4565 reinit_frame_cache ();
4566
28736962
PA
4567 breakpoint_retire_moribund ();
4568
2b009048
DJ
4569 /* First, distinguish signals caused by the debugger from signals
4570 that have to do with the program's own actions. Note that
4571 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
4572 on the operating system version. Here we detect when a SIGILL or
4573 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
4574 something similar for SIGSEGV, since a SIGSEGV will be generated
4575 when we're trying to execute a breakpoint instruction on a
4576 non-executable stack. This happens for call dummy breakpoints
4577 for architectures like SPARC that place call dummies on the
4578 stack. */
2b009048 4579 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
a493e3e2
PA
4580 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
4581 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
4582 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
2b009048 4583 {
de0a0249
UW
4584 struct regcache *regcache = get_thread_regcache (ecs->ptid);
4585
4586 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
4587 regcache_read_pc (regcache)))
4588 {
4589 if (debug_infrun)
4590 fprintf_unfiltered (gdb_stdlog,
4591 "infrun: Treating signal as SIGTRAP\n");
a493e3e2 4592 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
de0a0249 4593 }
2b009048
DJ
4594 }
4595
28736962
PA
4596 /* Mark the non-executing threads accordingly. In all-stop, all
4597 threads of all processes are stopped when we get any event
e1316e60 4598 reported. In non-stop mode, only the event thread stops. */
372316f1
PA
4599 {
4600 ptid_t mark_ptid;
4601
fbea99ea 4602 if (!target_is_non_stop_p ())
372316f1
PA
4603 mark_ptid = minus_one_ptid;
4604 else if (ecs->ws.kind == TARGET_WAITKIND_SIGNALLED
4605 || ecs->ws.kind == TARGET_WAITKIND_EXITED)
4606 {
4607 /* If we're handling a process exit in non-stop mode, even
4608 though threads haven't been deleted yet, one would think
4609 that there is nothing to do, as threads of the dead process
4610 will be soon deleted, and threads of any other process were
4611 left running. However, on some targets, threads survive a
4612 process exit event. E.g., for the "checkpoint" command,
4613 when the current checkpoint/fork exits, linux-fork.c
4614 automatically switches to another fork from within
4615 target_mourn_inferior, by associating the same
4616 inferior/thread to another fork. We haven't mourned yet at
4617 this point, but we must mark any threads left in the
4618 process as not-executing so that finish_thread_state marks
4619 them stopped (in the user's perspective) if/when we present
4620 the stop to the user. */
4621 mark_ptid = pid_to_ptid (ptid_get_pid (ecs->ptid));
4622 }
4623 else
4624 mark_ptid = ecs->ptid;
4625
4626 set_executing (mark_ptid, 0);
4627
4628 /* Likewise the resumed flag. */
4629 set_resumed (mark_ptid, 0);
4630 }
8c90c137 4631
488f131b
JB
4632 switch (ecs->ws.kind)
4633 {
4634 case TARGET_WAITKIND_LOADED:
527159b7 4635 if (debug_infrun)
8a9de0e4 4636 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
5c09a2c5
PA
4637 if (!ptid_equal (ecs->ptid, inferior_ptid))
4638 context_switch (ecs->ptid);
b0f4b84b
DJ
4639 /* Ignore gracefully during startup of the inferior, as it might
4640 be the shell which has just loaded some objects, otherwise
4641 add the symbols for the newly loaded objects. Also ignore at
4642 the beginning of an attach or remote session; we will query
4643 the full list of libraries once the connection is
4644 established. */
4f5d7f63
PA
4645
4646 stop_soon = get_inferior_stop_soon (ecs->ptid);
c0236d92 4647 if (stop_soon == NO_STOP_QUIETLY)
488f131b 4648 {
edcc5120
TT
4649 struct regcache *regcache;
4650
edcc5120
TT
4651 regcache = get_thread_regcache (ecs->ptid);
4652
4653 handle_solib_event ();
4654
4655 ecs->event_thread->control.stop_bpstat
4656 = bpstat_stop_status (get_regcache_aspace (regcache),
4657 stop_pc, ecs->ptid, &ecs->ws);
ab04a2af 4658
ce12b012 4659 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
edcc5120
TT
4660 {
4661 /* A catchpoint triggered. */
94c57d6a
PA
4662 process_event_stop_test (ecs);
4663 return;
edcc5120 4664 }
488f131b 4665
b0f4b84b
DJ
4666 /* If requested, stop when the dynamic linker notifies
4667 gdb of events. This allows the user to get control
4668 and place breakpoints in initializer routines for
4669 dynamically loaded objects (among other things). */
a493e3e2 4670 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
b0f4b84b
DJ
4671 if (stop_on_solib_events)
4672 {
55409f9d
DJ
4673 /* Make sure we print "Stopped due to solib-event" in
4674 normal_stop. */
4675 stop_print_frame = 1;
4676
22bcd14b 4677 stop_waiting (ecs);
b0f4b84b
DJ
4678 return;
4679 }
488f131b 4680 }
b0f4b84b
DJ
4681
4682 /* If we are skipping through a shell, or through shared library
4683 loading that we aren't interested in, resume the program. If
5c09a2c5 4684 we're running the program normally, also resume. */
b0f4b84b
DJ
4685 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
4686 {
74960c60
VP
4687 /* Loading of shared libraries might have changed breakpoint
4688 addresses. Make sure new breakpoints are inserted. */
a25a5a45 4689 if (stop_soon == NO_STOP_QUIETLY)
74960c60 4690 insert_breakpoints ();
64ce06e4 4691 resume (GDB_SIGNAL_0);
b0f4b84b
DJ
4692 prepare_to_wait (ecs);
4693 return;
4694 }
4695
5c09a2c5
PA
4696 /* But stop if we're attaching or setting up a remote
4697 connection. */
4698 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4699 || stop_soon == STOP_QUIETLY_REMOTE)
4700 {
4701 if (debug_infrun)
4702 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
22bcd14b 4703 stop_waiting (ecs);
5c09a2c5
PA
4704 return;
4705 }
4706
4707 internal_error (__FILE__, __LINE__,
4708 _("unhandled stop_soon: %d"), (int) stop_soon);
c5aa993b 4709
488f131b 4710 case TARGET_WAITKIND_SPURIOUS:
527159b7 4711 if (debug_infrun)
8a9de0e4 4712 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
64776a0b 4713 if (!ptid_equal (ecs->ptid, inferior_ptid))
8b3ee56d 4714 context_switch (ecs->ptid);
64ce06e4 4715 resume (GDB_SIGNAL_0);
488f131b
JB
4716 prepare_to_wait (ecs);
4717 return;
c5aa993b 4718
488f131b 4719 case TARGET_WAITKIND_EXITED:
940c3c06 4720 case TARGET_WAITKIND_SIGNALLED:
527159b7 4721 if (debug_infrun)
940c3c06
PA
4722 {
4723 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
4724 fprintf_unfiltered (gdb_stdlog,
4725 "infrun: TARGET_WAITKIND_EXITED\n");
4726 else
4727 fprintf_unfiltered (gdb_stdlog,
4728 "infrun: TARGET_WAITKIND_SIGNALLED\n");
4729 }
4730
fb66883a 4731 inferior_ptid = ecs->ptid;
c9657e70 4732 set_current_inferior (find_inferior_ptid (ecs->ptid));
6c95b8df
PA
4733 set_current_program_space (current_inferior ()->pspace);
4734 handle_vfork_child_exec_or_exit (0);
1777feb0 4735 target_terminal_ours (); /* Must do this before mourn anyway. */
488f131b 4736
0c557179
SDJ
4737 /* Clearing any previous state of convenience variables. */
4738 clear_exit_convenience_vars ();
4739
940c3c06
PA
4740 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
4741 {
4742 /* Record the exit code in the convenience variable $_exitcode, so
4743 that the user can inspect this again later. */
4744 set_internalvar_integer (lookup_internalvar ("_exitcode"),
4745 (LONGEST) ecs->ws.value.integer);
4746
4747 /* Also record this in the inferior itself. */
4748 current_inferior ()->has_exit_code = 1;
4749 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
8cf64490 4750
98eb56a4
PA
4751 /* Support the --return-child-result option. */
4752 return_child_result_value = ecs->ws.value.integer;
4753
fd664c91 4754 observer_notify_exited (ecs->ws.value.integer);
940c3c06
PA
4755 }
4756 else
0c557179
SDJ
4757 {
4758 struct regcache *regcache = get_thread_regcache (ecs->ptid);
4759 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4760
4761 if (gdbarch_gdb_signal_to_target_p (gdbarch))
4762 {
4763 /* Set the value of the internal variable $_exitsignal,
4764 which holds the signal uncaught by the inferior. */
4765 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
4766 gdbarch_gdb_signal_to_target (gdbarch,
4767 ecs->ws.value.sig));
4768 }
4769 else
4770 {
4771 /* We don't have access to the target's method used for
4772 converting between signal numbers (GDB's internal
4773 representation <-> target's representation).
4774 Therefore, we cannot do a good job at displaying this
4775 information to the user. It's better to just warn
4776 her about it (if infrun debugging is enabled), and
4777 give up. */
4778 if (debug_infrun)
4779 fprintf_filtered (gdb_stdlog, _("\
4780Cannot fill $_exitsignal with the correct signal number.\n"));
4781 }
4782
fd664c91 4783 observer_notify_signal_exited (ecs->ws.value.sig);
0c557179 4784 }
8cf64490 4785
488f131b
JB
4786 gdb_flush (gdb_stdout);
4787 target_mourn_inferior ();
488f131b 4788 stop_print_frame = 0;
22bcd14b 4789 stop_waiting (ecs);
488f131b 4790 return;
c5aa993b 4791
488f131b 4792 /* The following are the only cases in which we keep going;
1777feb0 4793 the above cases end in a continue or goto. */
488f131b 4794 case TARGET_WAITKIND_FORKED:
deb3b17b 4795 case TARGET_WAITKIND_VFORKED:
527159b7 4796 if (debug_infrun)
fed708ed
PA
4797 {
4798 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
4799 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
4800 else
4801 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
4802 }
c906108c 4803
e2d96639
YQ
4804 /* Check whether the inferior is displaced stepping. */
4805 {
4806 struct regcache *regcache = get_thread_regcache (ecs->ptid);
4807 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4808 struct displaced_step_inferior_state *displaced
4809 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
4810
4811 /* If checking displaced stepping is supported, and thread
4812 ecs->ptid is displaced stepping. */
4813 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
4814 {
4815 struct inferior *parent_inf
c9657e70 4816 = find_inferior_ptid (ecs->ptid);
e2d96639
YQ
4817 struct regcache *child_regcache;
4818 CORE_ADDR parent_pc;
4819
4820 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
4821 indicating that the displaced stepping of syscall instruction
4822 has been done. Perform cleanup for parent process here. Note
4823 that this operation also cleans up the child process for vfork,
4824 because their pages are shared. */
a493e3e2 4825 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
c2829269
PA
4826 /* Start a new step-over in another thread if there's one
4827 that needs it. */
4828 start_step_over ();
e2d96639
YQ
4829
4830 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
4831 {
4832 /* Restore scratch pad for child process. */
4833 displaced_step_restore (displaced, ecs->ws.value.related_pid);
4834 }
4835
4836 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
4837 the child's PC is also within the scratchpad. Set the child's PC
4838 to the parent's PC value, which has already been fixed up.
4839 FIXME: we use the parent's aspace here, although we're touching
4840 the child, because the child hasn't been added to the inferior
4841 list yet at this point. */
4842
4843 child_regcache
4844 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
4845 gdbarch,
4846 parent_inf->aspace);
4847 /* Read PC value of parent process. */
4848 parent_pc = regcache_read_pc (regcache);
4849
4850 if (debug_displaced)
4851 fprintf_unfiltered (gdb_stdlog,
4852 "displaced: write child pc from %s to %s\n",
4853 paddress (gdbarch,
4854 regcache_read_pc (child_regcache)),
4855 paddress (gdbarch, parent_pc));
4856
4857 regcache_write_pc (child_regcache, parent_pc);
4858 }
4859 }
4860
5a2901d9 4861 if (!ptid_equal (ecs->ptid, inferior_ptid))
c3a01a22 4862 context_switch (ecs->ptid);
5a2901d9 4863
b242c3c2
PA
4864 /* Immediately detach breakpoints from the child before there's
4865 any chance of letting the user delete breakpoints from the
4866 breakpoint lists. If we don't do this early, it's easy to
4867 leave left over traps in the child, vis: "break foo; catch
4868 fork; c; <fork>; del; c; <child calls foo>". We only follow
4869 the fork on the last `continue', and by that time the
4870 breakpoint at "foo" is long gone from the breakpoint table.
4871 If we vforked, then we don't need to unpatch here, since both
4872 parent and child are sharing the same memory pages; we'll
4873 need to unpatch at follow/detach time instead to be certain
4874 that new breakpoints added between catchpoint hit time and
4875 vfork follow are detached. */
4876 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
4877 {
b242c3c2
PA
4878 /* This won't actually modify the breakpoint list, but will
4879 physically remove the breakpoints from the child. */
d80ee84f 4880 detach_breakpoints (ecs->ws.value.related_pid);
b242c3c2
PA
4881 }
4882
34b7e8a6 4883 delete_just_stopped_threads_single_step_breakpoints ();
d03285ec 4884
e58b0e63
PA
4885 /* In case the event is caught by a catchpoint, remember that
4886 the event is to be followed at the next resume of the thread,
4887 and not immediately. */
4888 ecs->event_thread->pending_follow = ecs->ws;
4889
fb14de7b 4890 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
675bf4cb 4891
16c381f0 4892 ecs->event_thread->control.stop_bpstat
6c95b8df 4893 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
09ac7c10 4894 stop_pc, ecs->ptid, &ecs->ws);
675bf4cb 4895
ce12b012
PA
4896 /* If no catchpoint triggered for this, then keep going. Note
4897 that we're interested in knowing the bpstat actually causes a
4898 stop, not just if it may explain the signal. Software
4899 watchpoints, for example, always appear in the bpstat. */
4900 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 4901 {
6c95b8df
PA
4902 ptid_t parent;
4903 ptid_t child;
e58b0e63 4904 int should_resume;
3e43a32a
MS
4905 int follow_child
4906 = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63 4907
a493e3e2 4908 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
e58b0e63
PA
4909
4910 should_resume = follow_fork ();
4911
6c95b8df
PA
4912 parent = ecs->ptid;
4913 child = ecs->ws.value.related_pid;
4914
4915 /* In non-stop mode, also resume the other branch. */
fbea99ea
PA
4916 if (!detach_fork && (non_stop
4917 || (sched_multi && target_is_non_stop_p ())))
6c95b8df
PA
4918 {
4919 if (follow_child)
4920 switch_to_thread (parent);
4921 else
4922 switch_to_thread (child);
4923
4924 ecs->event_thread = inferior_thread ();
4925 ecs->ptid = inferior_ptid;
4926 keep_going (ecs);
4927 }
4928
4929 if (follow_child)
4930 switch_to_thread (child);
4931 else
4932 switch_to_thread (parent);
4933
e58b0e63
PA
4934 ecs->event_thread = inferior_thread ();
4935 ecs->ptid = inferior_ptid;
4936
4937 if (should_resume)
4938 keep_going (ecs);
4939 else
22bcd14b 4940 stop_waiting (ecs);
04e68871
DJ
4941 return;
4942 }
94c57d6a
PA
4943 process_event_stop_test (ecs);
4944 return;
488f131b 4945
6c95b8df
PA
4946 case TARGET_WAITKIND_VFORK_DONE:
4947 /* Done with the shared memory region. Re-insert breakpoints in
4948 the parent, and keep going. */
4949
4950 if (debug_infrun)
3e43a32a
MS
4951 fprintf_unfiltered (gdb_stdlog,
4952 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
6c95b8df
PA
4953
4954 if (!ptid_equal (ecs->ptid, inferior_ptid))
4955 context_switch (ecs->ptid);
4956
4957 current_inferior ()->waiting_for_vfork_done = 0;
56710373 4958 current_inferior ()->pspace->breakpoints_not_allowed = 0;
6c95b8df
PA
4959 /* This also takes care of reinserting breakpoints in the
4960 previously locked inferior. */
4961 keep_going (ecs);
4962 return;
4963
488f131b 4964 case TARGET_WAITKIND_EXECD:
527159b7 4965 if (debug_infrun)
fc5261f2 4966 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
488f131b 4967
5a2901d9 4968 if (!ptid_equal (ecs->ptid, inferior_ptid))
c3a01a22 4969 context_switch (ecs->ptid);
5a2901d9 4970
fb14de7b 4971 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
795e548f 4972
6c95b8df
PA
4973 /* Do whatever is necessary to the parent branch of the vfork. */
4974 handle_vfork_child_exec_or_exit (1);
4975
795e548f
PA
4976 /* This causes the eventpoints and symbol table to be reset.
4977 Must do this now, before trying to determine whether to
4978 stop. */
71b43ef8 4979 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
795e548f 4980
16c381f0 4981 ecs->event_thread->control.stop_bpstat
6c95b8df 4982 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
09ac7c10 4983 stop_pc, ecs->ptid, &ecs->ws);
795e548f 4984
71b43ef8
PA
4985 /* Note that this may be referenced from inside
4986 bpstat_stop_status above, through inferior_has_execd. */
4987 xfree (ecs->ws.value.execd_pathname);
4988 ecs->ws.value.execd_pathname = NULL;
4989
04e68871 4990 /* If no catchpoint triggered for this, then keep going. */
ce12b012 4991 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 4992 {
a493e3e2 4993 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
04e68871
DJ
4994 keep_going (ecs);
4995 return;
4996 }
94c57d6a
PA
4997 process_event_stop_test (ecs);
4998 return;
488f131b 4999
b4dc5ffa
MK
5000 /* Be careful not to try to gather much state about a thread
5001 that's in a syscall. It's frequently a losing proposition. */
488f131b 5002 case TARGET_WAITKIND_SYSCALL_ENTRY:
527159b7 5003 if (debug_infrun)
3e43a32a
MS
5004 fprintf_unfiltered (gdb_stdlog,
5005 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
1777feb0 5006 /* Getting the current syscall number. */
94c57d6a
PA
5007 if (handle_syscall_event (ecs) == 0)
5008 process_event_stop_test (ecs);
5009 return;
c906108c 5010
488f131b
JB
5011 /* Before examining the threads further, step this thread to
5012 get it entirely out of the syscall. (We get notice of the
5013 event when the thread is just on the verge of exiting a
5014 syscall. Stepping one instruction seems to get it back
b4dc5ffa 5015 into user code.) */
488f131b 5016 case TARGET_WAITKIND_SYSCALL_RETURN:
527159b7 5017 if (debug_infrun)
3e43a32a
MS
5018 fprintf_unfiltered (gdb_stdlog,
5019 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
94c57d6a
PA
5020 if (handle_syscall_event (ecs) == 0)
5021 process_event_stop_test (ecs);
5022 return;
c906108c 5023
488f131b 5024 case TARGET_WAITKIND_STOPPED:
527159b7 5025 if (debug_infrun)
8a9de0e4 5026 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
16c381f0 5027 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
4f5d7f63
PA
5028 handle_signal_stop (ecs);
5029 return;
c906108c 5030
b2175913 5031 case TARGET_WAITKIND_NO_HISTORY:
4b4e080e
PA
5032 if (debug_infrun)
5033 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
b2175913 5034 /* Reverse execution: target ran out of history info. */
eab402df 5035
34b7e8a6 5036 delete_just_stopped_threads_single_step_breakpoints ();
fb14de7b 5037 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
fd664c91 5038 observer_notify_no_history ();
22bcd14b 5039 stop_waiting (ecs);
b2175913 5040 return;
488f131b 5041 }
4f5d7f63
PA
5042}
5043
0b6e5e10
JB
5044/* A wrapper around handle_inferior_event_1, which also makes sure
5045 that all temporary struct value objects that were created during
5046 the handling of the event get deleted at the end. */
5047
5048static void
5049handle_inferior_event (struct execution_control_state *ecs)
5050{
5051 struct value *mark = value_mark ();
5052
5053 handle_inferior_event_1 (ecs);
5054 /* Purge all temporary values created during the event handling,
5055 as it could be a long time before we return to the command level
5056 where such values would otherwise be purged. */
5057 value_free_to_mark (mark);
5058}
5059
372316f1
PA
5060/* Restart threads back to what they were trying to do back when we
5061 paused them for an in-line step-over. The EVENT_THREAD thread is
5062 ignored. */
4d9d9d04
PA
5063
5064static void
372316f1
PA
5065restart_threads (struct thread_info *event_thread)
5066{
5067 struct thread_info *tp;
5068 struct thread_info *step_over = NULL;
5069
5070 /* In case the instruction just stepped spawned a new thread. */
5071 update_thread_list ();
5072
5073 ALL_NON_EXITED_THREADS (tp)
5074 {
5075 if (tp == event_thread)
5076 {
5077 if (debug_infrun)
5078 fprintf_unfiltered (gdb_stdlog,
5079 "infrun: restart threads: "
5080 "[%s] is event thread\n",
5081 target_pid_to_str (tp->ptid));
5082 continue;
5083 }
5084
5085 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5086 {
5087 if (debug_infrun)
5088 fprintf_unfiltered (gdb_stdlog,
5089 "infrun: restart threads: "
5090 "[%s] not meant to be running\n",
5091 target_pid_to_str (tp->ptid));
5092 continue;
5093 }
5094
5095 if (tp->resumed)
5096 {
5097 if (debug_infrun)
5098 fprintf_unfiltered (gdb_stdlog,
5099 "infrun: restart threads: [%s] resumed\n",
5100 target_pid_to_str (tp->ptid));
5101 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
5102 continue;
5103 }
5104
5105 if (thread_is_in_step_over_chain (tp))
5106 {
5107 if (debug_infrun)
5108 fprintf_unfiltered (gdb_stdlog,
5109 "infrun: restart threads: "
5110 "[%s] needs step-over\n",
5111 target_pid_to_str (tp->ptid));
5112 gdb_assert (!tp->resumed);
5113 continue;
5114 }
5115
5116
5117 if (tp->suspend.waitstatus_pending_p)
5118 {
5119 if (debug_infrun)
5120 fprintf_unfiltered (gdb_stdlog,
5121 "infrun: restart threads: "
5122 "[%s] has pending status\n",
5123 target_pid_to_str (tp->ptid));
5124 tp->resumed = 1;
5125 continue;
5126 }
5127
5128 /* If some thread needs to start a step-over at this point, it
5129 should still be in the step-over queue, and thus skipped
5130 above. */
5131 if (thread_still_needs_step_over (tp))
5132 {
5133 internal_error (__FILE__, __LINE__,
5134 "thread [%s] needs a step-over, but not in "
5135 "step-over queue\n",
5136 target_pid_to_str (tp->ptid));
5137 }
5138
5139 if (currently_stepping (tp))
5140 {
5141 if (debug_infrun)
5142 fprintf_unfiltered (gdb_stdlog,
5143 "infrun: restart threads: [%s] was stepping\n",
5144 target_pid_to_str (tp->ptid));
5145 keep_going_stepped_thread (tp);
5146 }
5147 else
5148 {
5149 struct execution_control_state ecss;
5150 struct execution_control_state *ecs = &ecss;
5151
5152 if (debug_infrun)
5153 fprintf_unfiltered (gdb_stdlog,
5154 "infrun: restart threads: [%s] continuing\n",
5155 target_pid_to_str (tp->ptid));
5156 reset_ecs (ecs, tp);
5157 switch_to_thread (tp->ptid);
5158 keep_going_pass_signal (ecs);
5159 }
5160 }
5161}
5162
5163/* Callback for iterate_over_threads. Find a resumed thread that has
5164 a pending waitstatus. */
5165
5166static int
5167resumed_thread_with_pending_status (struct thread_info *tp,
5168 void *arg)
5169{
5170 return (tp->resumed
5171 && tp->suspend.waitstatus_pending_p);
5172}
5173
5174/* Called when we get an event that may finish an in-line or
5175 out-of-line (displaced stepping) step-over started previously.
5176 Return true if the event is processed and we should go back to the
5177 event loop; false if the caller should continue processing the
5178 event. */
5179
5180static int
4d9d9d04
PA
5181finish_step_over (struct execution_control_state *ecs)
5182{
372316f1
PA
5183 int had_step_over_info;
5184
4d9d9d04
PA
5185 displaced_step_fixup (ecs->ptid,
5186 ecs->event_thread->suspend.stop_signal);
5187
372316f1
PA
5188 had_step_over_info = step_over_info_valid_p ();
5189
5190 if (had_step_over_info)
4d9d9d04
PA
5191 {
5192 /* If we're stepping over a breakpoint with all threads locked,
5193 then only the thread that was stepped should be reporting
5194 back an event. */
5195 gdb_assert (ecs->event_thread->control.trap_expected);
5196
5197 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
5198 clear_step_over_info ();
5199 }
5200
fbea99ea 5201 if (!target_is_non_stop_p ())
372316f1 5202 return 0;
4d9d9d04
PA
5203
5204 /* Start a new step-over in another thread if there's one that
5205 needs it. */
5206 start_step_over ();
372316f1
PA
5207
5208 /* If we were stepping over a breakpoint before, and haven't started
5209 a new in-line step-over sequence, then restart all other threads
5210 (except the event thread). We can't do this in all-stop, as then
5211 e.g., we wouldn't be able to issue any other remote packet until
5212 these other threads stop. */
5213 if (had_step_over_info && !step_over_info_valid_p ())
5214 {
5215 struct thread_info *pending;
5216
5217 /* If we only have threads with pending statuses, the restart
5218 below won't restart any thread and so nothing re-inserts the
5219 breakpoint we just stepped over. But we need it inserted
5220 when we later process the pending events, otherwise if
5221 another thread has a pending event for this breakpoint too,
5222 we'd discard its event (because the breakpoint that
5223 originally caused the event was no longer inserted). */
5224 context_switch (ecs->ptid);
5225 insert_breakpoints ();
5226
5227 restart_threads (ecs->event_thread);
5228
5229 /* If we have events pending, go through handle_inferior_event
5230 again, picking up a pending event at random. This avoids
5231 thread starvation. */
5232
5233 /* But not if we just stepped over a watchpoint in order to let
5234 the instruction execute so we can evaluate its expression.
5235 The set of watchpoints that triggered is recorded in the
5236 breakpoint objects themselves (see bp->watchpoint_triggered).
5237 If we processed another event first, that other event could
5238 clobber this info. */
5239 if (ecs->event_thread->stepping_over_watchpoint)
5240 return 0;
5241
5242 pending = iterate_over_threads (resumed_thread_with_pending_status,
5243 NULL);
5244 if (pending != NULL)
5245 {
5246 struct thread_info *tp = ecs->event_thread;
5247 struct regcache *regcache;
5248
5249 if (debug_infrun)
5250 {
5251 fprintf_unfiltered (gdb_stdlog,
5252 "infrun: found resumed threads with "
5253 "pending events, saving status\n");
5254 }
5255
5256 gdb_assert (pending != tp);
5257
5258 /* Record the event thread's event for later. */
5259 save_waitstatus (tp, &ecs->ws);
5260 /* This was cleared early, by handle_inferior_event. Set it
5261 so this pending event is considered by
5262 do_target_wait. */
5263 tp->resumed = 1;
5264
5265 gdb_assert (!tp->executing);
5266
5267 regcache = get_thread_regcache (tp->ptid);
5268 tp->suspend.stop_pc = regcache_read_pc (regcache);
5269
5270 if (debug_infrun)
5271 {
5272 fprintf_unfiltered (gdb_stdlog,
5273 "infrun: saved stop_pc=%s for %s "
5274 "(currently_stepping=%d)\n",
5275 paddress (target_gdbarch (),
5276 tp->suspend.stop_pc),
5277 target_pid_to_str (tp->ptid),
5278 currently_stepping (tp));
5279 }
5280
5281 /* This in-line step-over finished; clear this so we won't
5282 start a new one. This is what handle_signal_stop would
5283 do, if we returned false. */
5284 tp->stepping_over_breakpoint = 0;
5285
5286 /* Wake up the event loop again. */
5287 mark_async_event_handler (infrun_async_inferior_event_token);
5288
5289 prepare_to_wait (ecs);
5290 return 1;
5291 }
5292 }
5293
5294 return 0;
4d9d9d04
PA
5295}
5296
4f5d7f63
PA
5297/* Come here when the program has stopped with a signal. */
5298
5299static void
5300handle_signal_stop (struct execution_control_state *ecs)
5301{
5302 struct frame_info *frame;
5303 struct gdbarch *gdbarch;
5304 int stopped_by_watchpoint;
5305 enum stop_kind stop_soon;
5306 int random_signal;
c906108c 5307
f0407826
DE
5308 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
5309
5310 /* Do we need to clean up the state of a thread that has
5311 completed a displaced single-step? (Doing so usually affects
5312 the PC, so do it here, before we set stop_pc.) */
372316f1
PA
5313 if (finish_step_over (ecs))
5314 return;
f0407826
DE
5315
5316 /* If we either finished a single-step or hit a breakpoint, but
5317 the user wanted this thread to be stopped, pretend we got a
5318 SIG0 (generic unsignaled stop). */
5319 if (ecs->event_thread->stop_requested
5320 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
5321 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
237fc4c9 5322
515630c5 5323 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
488f131b 5324
527159b7 5325 if (debug_infrun)
237fc4c9 5326 {
5af949e3
UW
5327 struct regcache *regcache = get_thread_regcache (ecs->ptid);
5328 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7f82dfc7
JK
5329 struct cleanup *old_chain = save_inferior_ptid ();
5330
5331 inferior_ptid = ecs->ptid;
5af949e3
UW
5332
5333 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
5334 paddress (gdbarch, stop_pc));
d92524f1 5335 if (target_stopped_by_watchpoint ())
237fc4c9
PA
5336 {
5337 CORE_ADDR addr;
abbb1732 5338
237fc4c9
PA
5339 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
5340
5341 if (target_stopped_data_address (&current_target, &addr))
5342 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
5343 "infrun: stopped data address = %s\n",
5344 paddress (gdbarch, addr));
237fc4c9
PA
5345 else
5346 fprintf_unfiltered (gdb_stdlog,
5347 "infrun: (no data address available)\n");
5348 }
7f82dfc7
JK
5349
5350 do_cleanups (old_chain);
237fc4c9 5351 }
527159b7 5352
36fa8042
PA
5353 /* This is originated from start_remote(), start_inferior() and
5354 shared libraries hook functions. */
5355 stop_soon = get_inferior_stop_soon (ecs->ptid);
5356 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
5357 {
5358 if (!ptid_equal (ecs->ptid, inferior_ptid))
5359 context_switch (ecs->ptid);
5360 if (debug_infrun)
5361 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
5362 stop_print_frame = 1;
22bcd14b 5363 stop_waiting (ecs);
36fa8042
PA
5364 return;
5365 }
5366
5367 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5368 && stop_after_trap)
5369 {
5370 if (!ptid_equal (ecs->ptid, inferior_ptid))
5371 context_switch (ecs->ptid);
5372 if (debug_infrun)
5373 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
5374 stop_print_frame = 0;
22bcd14b 5375 stop_waiting (ecs);
36fa8042
PA
5376 return;
5377 }
5378
5379 /* This originates from attach_command(). We need to overwrite
5380 the stop_signal here, because some kernels don't ignore a
5381 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
5382 See more comments in inferior.h. On the other hand, if we
5383 get a non-SIGSTOP, report it to the user - assume the backend
5384 will handle the SIGSTOP if it should show up later.
5385
5386 Also consider that the attach is complete when we see a
5387 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
5388 target extended-remote report it instead of a SIGSTOP
5389 (e.g. gdbserver). We already rely on SIGTRAP being our
5390 signal, so this is no exception.
5391
5392 Also consider that the attach is complete when we see a
5393 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
5394 the target to stop all threads of the inferior, in case the
5395 low level attach operation doesn't stop them implicitly. If
5396 they weren't stopped implicitly, then the stub will report a
5397 GDB_SIGNAL_0, meaning: stopped for no particular reason
5398 other than GDB's request. */
5399 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5400 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
5401 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5402 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
5403 {
5404 stop_print_frame = 1;
22bcd14b 5405 stop_waiting (ecs);
36fa8042
PA
5406 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5407 return;
5408 }
5409
488f131b 5410 /* See if something interesting happened to the non-current thread. If
b40c7d58
DJ
5411 so, then switch to that thread. */
5412 if (!ptid_equal (ecs->ptid, inferior_ptid))
488f131b 5413 {
527159b7 5414 if (debug_infrun)
8a9de0e4 5415 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
527159b7 5416
0d1e5fa7 5417 context_switch (ecs->ptid);
c5aa993b 5418
9a4105ab
AC
5419 if (deprecated_context_hook)
5420 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
488f131b 5421 }
c906108c 5422
568d6575
UW
5423 /* At this point, get hold of the now-current thread's frame. */
5424 frame = get_current_frame ();
5425 gdbarch = get_frame_arch (frame);
5426
2adfaa28 5427 /* Pull the single step breakpoints out of the target. */
af48d08f 5428 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
488f131b 5429 {
af48d08f
PA
5430 struct regcache *regcache;
5431 struct address_space *aspace;
5432 CORE_ADDR pc;
2adfaa28 5433
af48d08f
PA
5434 regcache = get_thread_regcache (ecs->ptid);
5435 aspace = get_regcache_aspace (regcache);
5436 pc = regcache_read_pc (regcache);
34b7e8a6 5437
af48d08f
PA
5438 /* However, before doing so, if this single-step breakpoint was
5439 actually for another thread, set this thread up for moving
5440 past it. */
5441 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
5442 aspace, pc))
5443 {
5444 if (single_step_breakpoint_inserted_here_p (aspace, pc))
2adfaa28
PA
5445 {
5446 if (debug_infrun)
5447 {
5448 fprintf_unfiltered (gdb_stdlog,
af48d08f 5449 "infrun: [%s] hit another thread's "
34b7e8a6
PA
5450 "single-step breakpoint\n",
5451 target_pid_to_str (ecs->ptid));
2adfaa28 5452 }
af48d08f
PA
5453 ecs->hit_singlestep_breakpoint = 1;
5454 }
5455 }
5456 else
5457 {
5458 if (debug_infrun)
5459 {
5460 fprintf_unfiltered (gdb_stdlog,
5461 "infrun: [%s] hit its "
5462 "single-step breakpoint\n",
5463 target_pid_to_str (ecs->ptid));
2adfaa28
PA
5464 }
5465 }
488f131b 5466 }
af48d08f 5467 delete_just_stopped_threads_single_step_breakpoints ();
c906108c 5468
963f9c80
PA
5469 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5470 && ecs->event_thread->control.trap_expected
5471 && ecs->event_thread->stepping_over_watchpoint)
d983da9c
DJ
5472 stopped_by_watchpoint = 0;
5473 else
5474 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
5475
5476 /* If necessary, step over this watchpoint. We'll be back to display
5477 it in a moment. */
5478 if (stopped_by_watchpoint
d92524f1 5479 && (target_have_steppable_watchpoint
568d6575 5480 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
488f131b 5481 {
488f131b
JB
5482 /* At this point, we are stopped at an instruction which has
5483 attempted to write to a piece of memory under control of
5484 a watchpoint. The instruction hasn't actually executed
5485 yet. If we were to evaluate the watchpoint expression
5486 now, we would get the old value, and therefore no change
5487 would seem to have occurred.
5488
5489 In order to make watchpoints work `right', we really need
5490 to complete the memory write, and then evaluate the
d983da9c
DJ
5491 watchpoint expression. We do this by single-stepping the
5492 target.
5493
7f89fd65 5494 It may not be necessary to disable the watchpoint to step over
d983da9c
DJ
5495 it. For example, the PA can (with some kernel cooperation)
5496 single step over a watchpoint without disabling the watchpoint.
5497
5498 It is far more common to need to disable a watchpoint to step
5499 the inferior over it. If we have non-steppable watchpoints,
5500 we must disable the current watchpoint; it's simplest to
963f9c80
PA
5501 disable all watchpoints.
5502
5503 Any breakpoint at PC must also be stepped over -- if there's
5504 one, it will have already triggered before the watchpoint
5505 triggered, and we either already reported it to the user, or
5506 it didn't cause a stop and we called keep_going. In either
5507 case, if there was a breakpoint at PC, we must be trying to
5508 step past it. */
5509 ecs->event_thread->stepping_over_watchpoint = 1;
5510 keep_going (ecs);
488f131b
JB
5511 return;
5512 }
5513
4e1c45ea 5514 ecs->event_thread->stepping_over_breakpoint = 0;
963f9c80 5515 ecs->event_thread->stepping_over_watchpoint = 0;
16c381f0
JK
5516 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
5517 ecs->event_thread->control.stop_step = 0;
488f131b 5518 stop_print_frame = 1;
488f131b 5519 stopped_by_random_signal = 0;
488f131b 5520
edb3359d
DJ
5521 /* Hide inlined functions starting here, unless we just performed stepi or
5522 nexti. After stepi and nexti, always show the innermost frame (not any
5523 inline function call sites). */
16c381f0 5524 if (ecs->event_thread->control.step_range_end != 1)
0574c78f
GB
5525 {
5526 struct address_space *aspace =
5527 get_regcache_aspace (get_thread_regcache (ecs->ptid));
5528
5529 /* skip_inline_frames is expensive, so we avoid it if we can
5530 determine that the address is one where functions cannot have
5531 been inlined. This improves performance with inferiors that
5532 load a lot of shared libraries, because the solib event
5533 breakpoint is defined as the address of a function (i.e. not
5534 inline). Note that we have to check the previous PC as well
5535 as the current one to catch cases when we have just
5536 single-stepped off a breakpoint prior to reinstating it.
5537 Note that we're assuming that the code we single-step to is
5538 not inline, but that's not definitive: there's nothing
5539 preventing the event breakpoint function from containing
5540 inlined code, and the single-step ending up there. If the
5541 user had set a breakpoint on that inlined code, the missing
5542 skip_inline_frames call would break things. Fortunately
5543 that's an extremely unlikely scenario. */
09ac7c10 5544 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
a210c238
MR
5545 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5546 && ecs->event_thread->control.trap_expected
5547 && pc_at_non_inline_function (aspace,
5548 ecs->event_thread->prev_pc,
09ac7c10 5549 &ecs->ws)))
1c5a993e
MR
5550 {
5551 skip_inline_frames (ecs->ptid);
5552
5553 /* Re-fetch current thread's frame in case that invalidated
5554 the frame cache. */
5555 frame = get_current_frame ();
5556 gdbarch = get_frame_arch (frame);
5557 }
0574c78f 5558 }
edb3359d 5559
a493e3e2 5560 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
16c381f0 5561 && ecs->event_thread->control.trap_expected
568d6575 5562 && gdbarch_single_step_through_delay_p (gdbarch)
4e1c45ea 5563 && currently_stepping (ecs->event_thread))
3352ef37 5564 {
b50d7442 5565 /* We're trying to step off a breakpoint. Turns out that we're
3352ef37 5566 also on an instruction that needs to be stepped multiple
1777feb0 5567 times before it's been fully executing. E.g., architectures
3352ef37
AC
5568 with a delay slot. It needs to be stepped twice, once for
5569 the instruction and once for the delay slot. */
5570 int step_through_delay
568d6575 5571 = gdbarch_single_step_through_delay (gdbarch, frame);
abbb1732 5572
527159b7 5573 if (debug_infrun && step_through_delay)
8a9de0e4 5574 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
16c381f0
JK
5575 if (ecs->event_thread->control.step_range_end == 0
5576 && step_through_delay)
3352ef37
AC
5577 {
5578 /* The user issued a continue when stopped at a breakpoint.
5579 Set up for another trap and get out of here. */
4e1c45ea 5580 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
5581 keep_going (ecs);
5582 return;
5583 }
5584 else if (step_through_delay)
5585 {
5586 /* The user issued a step when stopped at a breakpoint.
5587 Maybe we should stop, maybe we should not - the delay
5588 slot *might* correspond to a line of source. In any
ca67fcb8
VP
5589 case, don't decide that here, just set
5590 ecs->stepping_over_breakpoint, making sure we
5591 single-step again before breakpoints are re-inserted. */
4e1c45ea 5592 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
5593 }
5594 }
5595
ab04a2af
TT
5596 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
5597 handles this event. */
5598 ecs->event_thread->control.stop_bpstat
5599 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
5600 stop_pc, ecs->ptid, &ecs->ws);
db82e815 5601
ab04a2af
TT
5602 /* Following in case break condition called a
5603 function. */
5604 stop_print_frame = 1;
73dd234f 5605
ab04a2af
TT
5606 /* This is where we handle "moribund" watchpoints. Unlike
5607 software breakpoints traps, hardware watchpoint traps are
5608 always distinguishable from random traps. If no high-level
5609 watchpoint is associated with the reported stop data address
5610 anymore, then the bpstat does not explain the signal ---
5611 simply make sure to ignore it if `stopped_by_watchpoint' is
5612 set. */
5613
5614 if (debug_infrun
5615 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
47591c29 5616 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
427cd150 5617 GDB_SIGNAL_TRAP)
ab04a2af
TT
5618 && stopped_by_watchpoint)
5619 fprintf_unfiltered (gdb_stdlog,
5620 "infrun: no user watchpoint explains "
5621 "watchpoint SIGTRAP, ignoring\n");
73dd234f 5622
bac7d97b 5623 /* NOTE: cagney/2003-03-29: These checks for a random signal
ab04a2af
TT
5624 at one stage in the past included checks for an inferior
5625 function call's call dummy's return breakpoint. The original
5626 comment, that went with the test, read:
03cebad2 5627
ab04a2af
TT
5628 ``End of a stack dummy. Some systems (e.g. Sony news) give
5629 another signal besides SIGTRAP, so check here as well as
5630 above.''
73dd234f 5631
ab04a2af
TT
5632 If someone ever tries to get call dummys on a
5633 non-executable stack to work (where the target would stop
5634 with something like a SIGSEGV), then those tests might need
5635 to be re-instated. Given, however, that the tests were only
5636 enabled when momentary breakpoints were not being used, I
5637 suspect that it won't be the case.
488f131b 5638
ab04a2af
TT
5639 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
5640 be necessary for call dummies on a non-executable stack on
5641 SPARC. */
488f131b 5642
bac7d97b 5643 /* See if the breakpoints module can explain the signal. */
47591c29
PA
5644 random_signal
5645 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
5646 ecs->event_thread->suspend.stop_signal);
bac7d97b 5647
1cf4d951
PA
5648 /* Maybe this was a trap for a software breakpoint that has since
5649 been removed. */
5650 if (random_signal && target_stopped_by_sw_breakpoint ())
5651 {
5652 if (program_breakpoint_here_p (gdbarch, stop_pc))
5653 {
5654 struct regcache *regcache;
5655 int decr_pc;
5656
5657 /* Re-adjust PC to what the program would see if GDB was not
5658 debugging it. */
5659 regcache = get_thread_regcache (ecs->event_thread->ptid);
527a273a 5660 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
1cf4d951
PA
5661 if (decr_pc != 0)
5662 {
5663 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
5664
5665 if (record_full_is_used ())
5666 record_full_gdb_operation_disable_set ();
5667
5668 regcache_write_pc (regcache, stop_pc + decr_pc);
5669
5670 do_cleanups (old_cleanups);
5671 }
5672 }
5673 else
5674 {
5675 /* A delayed software breakpoint event. Ignore the trap. */
5676 if (debug_infrun)
5677 fprintf_unfiltered (gdb_stdlog,
5678 "infrun: delayed software breakpoint "
5679 "trap, ignoring\n");
5680 random_signal = 0;
5681 }
5682 }
5683
5684 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
5685 has since been removed. */
5686 if (random_signal && target_stopped_by_hw_breakpoint ())
5687 {
5688 /* A delayed hardware breakpoint event. Ignore the trap. */
5689 if (debug_infrun)
5690 fprintf_unfiltered (gdb_stdlog,
5691 "infrun: delayed hardware breakpoint/watchpoint "
5692 "trap, ignoring\n");
5693 random_signal = 0;
5694 }
5695
bac7d97b
PA
5696 /* If not, perhaps stepping/nexting can. */
5697 if (random_signal)
5698 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5699 && currently_stepping (ecs->event_thread));
ab04a2af 5700
2adfaa28
PA
5701 /* Perhaps the thread hit a single-step breakpoint of _another_
5702 thread. Single-step breakpoints are transparent to the
5703 breakpoints module. */
5704 if (random_signal)
5705 random_signal = !ecs->hit_singlestep_breakpoint;
5706
bac7d97b
PA
5707 /* No? Perhaps we got a moribund watchpoint. */
5708 if (random_signal)
5709 random_signal = !stopped_by_watchpoint;
ab04a2af 5710
488f131b
JB
5711 /* For the program's own signals, act according to
5712 the signal handling tables. */
5713
ce12b012 5714 if (random_signal)
488f131b
JB
5715 {
5716 /* Signal not for debugging purposes. */
c9657e70 5717 struct inferior *inf = find_inferior_ptid (ecs->ptid);
c9737c08 5718 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
488f131b 5719
527159b7 5720 if (debug_infrun)
c9737c08
PA
5721 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
5722 gdb_signal_to_symbol_string (stop_signal));
527159b7 5723
488f131b
JB
5724 stopped_by_random_signal = 1;
5725
252fbfc8
PA
5726 /* Always stop on signals if we're either just gaining control
5727 of the program, or the user explicitly requested this thread
5728 to remain stopped. */
d6b48e9c 5729 if (stop_soon != NO_STOP_QUIETLY
252fbfc8 5730 || ecs->event_thread->stop_requested
24291992 5731 || (!inf->detaching
16c381f0 5732 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
488f131b 5733 {
22bcd14b 5734 stop_waiting (ecs);
488f131b
JB
5735 return;
5736 }
b57bacec
PA
5737
5738 /* Notify observers the signal has "handle print" set. Note we
5739 returned early above if stopping; normal_stop handles the
5740 printing in that case. */
5741 if (signal_print[ecs->event_thread->suspend.stop_signal])
5742 {
5743 /* The signal table tells us to print about this signal. */
5744 target_terminal_ours_for_output ();
5745 observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
5746 target_terminal_inferior ();
5747 }
488f131b
JB
5748
5749 /* Clear the signal if it should not be passed. */
16c381f0 5750 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
a493e3e2 5751 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
488f131b 5752
fb14de7b 5753 if (ecs->event_thread->prev_pc == stop_pc
16c381f0 5754 && ecs->event_thread->control.trap_expected
8358c15c 5755 && ecs->event_thread->control.step_resume_breakpoint == NULL)
68f53502 5756 {
372316f1
PA
5757 int was_in_line;
5758
68f53502
AC
5759 /* We were just starting a new sequence, attempting to
5760 single-step off of a breakpoint and expecting a SIGTRAP.
237fc4c9 5761 Instead this signal arrives. This signal will take us out
68f53502
AC
5762 of the stepping range so GDB needs to remember to, when
5763 the signal handler returns, resume stepping off that
5764 breakpoint. */
5765 /* To simplify things, "continue" is forced to use the same
5766 code paths as single-step - set a breakpoint at the
5767 signal return address and then, once hit, step off that
5768 breakpoint. */
237fc4c9
PA
5769 if (debug_infrun)
5770 fprintf_unfiltered (gdb_stdlog,
5771 "infrun: signal arrived while stepping over "
5772 "breakpoint\n");
d3169d93 5773
372316f1
PA
5774 was_in_line = step_over_info_valid_p ();
5775 clear_step_over_info ();
2c03e5be 5776 insert_hp_step_resume_breakpoint_at_frame (frame);
4e1c45ea 5777 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
5778 /* Reset trap_expected to ensure breakpoints are re-inserted. */
5779 ecs->event_thread->control.trap_expected = 0;
d137e6dc 5780
fbea99ea 5781 if (target_is_non_stop_p ())
372316f1 5782 {
fbea99ea
PA
5783 /* Either "set non-stop" is "on", or the target is
5784 always in non-stop mode. In this case, we have a bit
5785 more work to do. Resume the current thread, and if
5786 we had paused all threads, restart them while the
5787 signal handler runs. */
372316f1
PA
5788 keep_going (ecs);
5789
372316f1
PA
5790 if (was_in_line)
5791 {
372316f1
PA
5792 restart_threads (ecs->event_thread);
5793 }
5794 else if (debug_infrun)
5795 {
5796 fprintf_unfiltered (gdb_stdlog,
5797 "infrun: no need to restart threads\n");
5798 }
5799 return;
5800 }
5801
d137e6dc
PA
5802 /* If we were nexting/stepping some other thread, switch to
5803 it, so that we don't continue it, losing control. */
5804 if (!switch_back_to_stepped_thread (ecs))
5805 keep_going (ecs);
9d799f85 5806 return;
68f53502 5807 }
9d799f85 5808
e5f8a7cc
PA
5809 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
5810 && (pc_in_thread_step_range (stop_pc, ecs->event_thread)
5811 || ecs->event_thread->control.step_range_end == 1)
edb3359d 5812 && frame_id_eq (get_stack_frame_id (frame),
16c381f0 5813 ecs->event_thread->control.step_stack_frame_id)
8358c15c 5814 && ecs->event_thread->control.step_resume_breakpoint == NULL)
d303a6c7
AC
5815 {
5816 /* The inferior is about to take a signal that will take it
5817 out of the single step range. Set a breakpoint at the
5818 current PC (which is presumably where the signal handler
5819 will eventually return) and then allow the inferior to
5820 run free.
5821
5822 Note that this is only needed for a signal delivered
5823 while in the single-step range. Nested signals aren't a
5824 problem as they eventually all return. */
237fc4c9
PA
5825 if (debug_infrun)
5826 fprintf_unfiltered (gdb_stdlog,
5827 "infrun: signal may take us out of "
5828 "single-step range\n");
5829
372316f1 5830 clear_step_over_info ();
2c03e5be 5831 insert_hp_step_resume_breakpoint_at_frame (frame);
e5f8a7cc 5832 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
5833 /* Reset trap_expected to ensure breakpoints are re-inserted. */
5834 ecs->event_thread->control.trap_expected = 0;
9d799f85
AC
5835 keep_going (ecs);
5836 return;
d303a6c7 5837 }
9d799f85
AC
5838
5839 /* Note: step_resume_breakpoint may be non-NULL. This occures
5840 when either there's a nested signal, or when there's a
5841 pending signal enabled just as the signal handler returns
5842 (leaving the inferior at the step-resume-breakpoint without
5843 actually executing it). Either way continue until the
5844 breakpoint is really hit. */
c447ac0b
PA
5845
5846 if (!switch_back_to_stepped_thread (ecs))
5847 {
5848 if (debug_infrun)
5849 fprintf_unfiltered (gdb_stdlog,
5850 "infrun: random signal, keep going\n");
5851
5852 keep_going (ecs);
5853 }
5854 return;
488f131b 5855 }
94c57d6a
PA
5856
5857 process_event_stop_test (ecs);
5858}
5859
5860/* Come here when we've got some debug event / signal we can explain
5861 (IOW, not a random signal), and test whether it should cause a
5862 stop, or whether we should resume the inferior (transparently).
5863 E.g., could be a breakpoint whose condition evaluates false; we
5864 could be still stepping within the line; etc. */
5865
5866static void
5867process_event_stop_test (struct execution_control_state *ecs)
5868{
5869 struct symtab_and_line stop_pc_sal;
5870 struct frame_info *frame;
5871 struct gdbarch *gdbarch;
cdaa5b73
PA
5872 CORE_ADDR jmp_buf_pc;
5873 struct bpstat_what what;
94c57d6a 5874
cdaa5b73 5875 /* Handle cases caused by hitting a breakpoint. */
611c83ae 5876
cdaa5b73
PA
5877 frame = get_current_frame ();
5878 gdbarch = get_frame_arch (frame);
fcf3daef 5879
cdaa5b73 5880 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
611c83ae 5881
cdaa5b73
PA
5882 if (what.call_dummy)
5883 {
5884 stop_stack_dummy = what.call_dummy;
5885 }
186c406b 5886
cdaa5b73
PA
5887 /* If we hit an internal event that triggers symbol changes, the
5888 current frame will be invalidated within bpstat_what (e.g., if we
5889 hit an internal solib event). Re-fetch it. */
5890 frame = get_current_frame ();
5891 gdbarch = get_frame_arch (frame);
e2e4d78b 5892
cdaa5b73
PA
5893 switch (what.main_action)
5894 {
5895 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
5896 /* If we hit the breakpoint at longjmp while stepping, we
5897 install a momentary breakpoint at the target of the
5898 jmp_buf. */
186c406b 5899
cdaa5b73
PA
5900 if (debug_infrun)
5901 fprintf_unfiltered (gdb_stdlog,
5902 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
186c406b 5903
cdaa5b73 5904 ecs->event_thread->stepping_over_breakpoint = 1;
611c83ae 5905
cdaa5b73
PA
5906 if (what.is_longjmp)
5907 {
5908 struct value *arg_value;
5909
5910 /* If we set the longjmp breakpoint via a SystemTap probe,
5911 then use it to extract the arguments. The destination PC
5912 is the third argument to the probe. */
5913 arg_value = probe_safe_evaluate_at_pc (frame, 2);
5914 if (arg_value)
8fa0c4f8
AA
5915 {
5916 jmp_buf_pc = value_as_address (arg_value);
5917 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
5918 }
cdaa5b73
PA
5919 else if (!gdbarch_get_longjmp_target_p (gdbarch)
5920 || !gdbarch_get_longjmp_target (gdbarch,
5921 frame, &jmp_buf_pc))
e2e4d78b 5922 {
cdaa5b73
PA
5923 if (debug_infrun)
5924 fprintf_unfiltered (gdb_stdlog,
5925 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
5926 "(!gdbarch_get_longjmp_target)\n");
5927 keep_going (ecs);
5928 return;
e2e4d78b 5929 }
e2e4d78b 5930
cdaa5b73
PA
5931 /* Insert a breakpoint at resume address. */
5932 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
5933 }
5934 else
5935 check_exception_resume (ecs, frame);
5936 keep_going (ecs);
5937 return;
e81a37f7 5938
cdaa5b73
PA
5939 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
5940 {
5941 struct frame_info *init_frame;
e81a37f7 5942
cdaa5b73 5943 /* There are several cases to consider.
c906108c 5944
cdaa5b73
PA
5945 1. The initiating frame no longer exists. In this case we
5946 must stop, because the exception or longjmp has gone too
5947 far.
2c03e5be 5948
cdaa5b73
PA
5949 2. The initiating frame exists, and is the same as the
5950 current frame. We stop, because the exception or longjmp
5951 has been caught.
2c03e5be 5952
cdaa5b73
PA
5953 3. The initiating frame exists and is different from the
5954 current frame. This means the exception or longjmp has
5955 been caught beneath the initiating frame, so keep going.
c906108c 5956
cdaa5b73
PA
5957 4. longjmp breakpoint has been placed just to protect
5958 against stale dummy frames and user is not interested in
5959 stopping around longjmps. */
c5aa993b 5960
cdaa5b73
PA
5961 if (debug_infrun)
5962 fprintf_unfiltered (gdb_stdlog,
5963 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
c5aa993b 5964
cdaa5b73
PA
5965 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
5966 != NULL);
5967 delete_exception_resume_breakpoint (ecs->event_thread);
c5aa993b 5968
cdaa5b73
PA
5969 if (what.is_longjmp)
5970 {
b67a2c6f 5971 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
c5aa993b 5972
cdaa5b73 5973 if (!frame_id_p (ecs->event_thread->initiating_frame))
e5ef252a 5974 {
cdaa5b73
PA
5975 /* Case 4. */
5976 keep_going (ecs);
5977 return;
e5ef252a 5978 }
cdaa5b73 5979 }
c5aa993b 5980
cdaa5b73 5981 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
527159b7 5982
cdaa5b73
PA
5983 if (init_frame)
5984 {
5985 struct frame_id current_id
5986 = get_frame_id (get_current_frame ());
5987 if (frame_id_eq (current_id,
5988 ecs->event_thread->initiating_frame))
5989 {
5990 /* Case 2. Fall through. */
5991 }
5992 else
5993 {
5994 /* Case 3. */
5995 keep_going (ecs);
5996 return;
5997 }
68f53502 5998 }
488f131b 5999
cdaa5b73
PA
6000 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6001 exists. */
6002 delete_step_resume_breakpoint (ecs->event_thread);
e5ef252a 6003
bdc36728 6004 end_stepping_range (ecs);
cdaa5b73
PA
6005 }
6006 return;
e5ef252a 6007
cdaa5b73
PA
6008 case BPSTAT_WHAT_SINGLE:
6009 if (debug_infrun)
6010 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
6011 ecs->event_thread->stepping_over_breakpoint = 1;
6012 /* Still need to check other stuff, at least the case where we
6013 are stepping and step out of the right range. */
6014 break;
e5ef252a 6015
cdaa5b73
PA
6016 case BPSTAT_WHAT_STEP_RESUME:
6017 if (debug_infrun)
6018 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
e5ef252a 6019
cdaa5b73
PA
6020 delete_step_resume_breakpoint (ecs->event_thread);
6021 if (ecs->event_thread->control.proceed_to_finish
6022 && execution_direction == EXEC_REVERSE)
6023 {
6024 struct thread_info *tp = ecs->event_thread;
6025
6026 /* We are finishing a function in reverse, and just hit the
6027 step-resume breakpoint at the start address of the
6028 function, and we're almost there -- just need to back up
6029 by one more single-step, which should take us back to the
6030 function call. */
6031 tp->control.step_range_start = tp->control.step_range_end = 1;
6032 keep_going (ecs);
e5ef252a 6033 return;
cdaa5b73
PA
6034 }
6035 fill_in_stop_func (gdbarch, ecs);
6036 if (stop_pc == ecs->stop_func_start
6037 && execution_direction == EXEC_REVERSE)
6038 {
6039 /* We are stepping over a function call in reverse, and just
6040 hit the step-resume breakpoint at the start address of
6041 the function. Go back to single-stepping, which should
6042 take us back to the function call. */
6043 ecs->event_thread->stepping_over_breakpoint = 1;
6044 keep_going (ecs);
6045 return;
6046 }
6047 break;
e5ef252a 6048
cdaa5b73
PA
6049 case BPSTAT_WHAT_STOP_NOISY:
6050 if (debug_infrun)
6051 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
6052 stop_print_frame = 1;
e5ef252a 6053
99619bea
PA
6054 /* Assume the thread stopped for a breapoint. We'll still check
6055 whether a/the breakpoint is there when the thread is next
6056 resumed. */
6057 ecs->event_thread->stepping_over_breakpoint = 1;
e5ef252a 6058
22bcd14b 6059 stop_waiting (ecs);
cdaa5b73 6060 return;
e5ef252a 6061
cdaa5b73
PA
6062 case BPSTAT_WHAT_STOP_SILENT:
6063 if (debug_infrun)
6064 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
6065 stop_print_frame = 0;
e5ef252a 6066
99619bea
PA
6067 /* Assume the thread stopped for a breapoint. We'll still check
6068 whether a/the breakpoint is there when the thread is next
6069 resumed. */
6070 ecs->event_thread->stepping_over_breakpoint = 1;
22bcd14b 6071 stop_waiting (ecs);
cdaa5b73
PA
6072 return;
6073
6074 case BPSTAT_WHAT_HP_STEP_RESUME:
6075 if (debug_infrun)
6076 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
6077
6078 delete_step_resume_breakpoint (ecs->event_thread);
6079 if (ecs->event_thread->step_after_step_resume_breakpoint)
6080 {
6081 /* Back when the step-resume breakpoint was inserted, we
6082 were trying to single-step off a breakpoint. Go back to
6083 doing that. */
6084 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6085 ecs->event_thread->stepping_over_breakpoint = 1;
6086 keep_going (ecs);
6087 return;
e5ef252a 6088 }
cdaa5b73
PA
6089 break;
6090
6091 case BPSTAT_WHAT_KEEP_CHECKING:
6092 break;
e5ef252a 6093 }
c906108c 6094
af48d08f
PA
6095 /* If we stepped a permanent breakpoint and we had a high priority
6096 step-resume breakpoint for the address we stepped, but we didn't
6097 hit it, then we must have stepped into the signal handler. The
6098 step-resume was only necessary to catch the case of _not_
6099 stepping into the handler, so delete it, and fall through to
6100 checking whether the step finished. */
6101 if (ecs->event_thread->stepped_breakpoint)
6102 {
6103 struct breakpoint *sr_bp
6104 = ecs->event_thread->control.step_resume_breakpoint;
6105
8d707a12
PA
6106 if (sr_bp != NULL
6107 && sr_bp->loc->permanent
af48d08f
PA
6108 && sr_bp->type == bp_hp_step_resume
6109 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6110 {
6111 if (debug_infrun)
6112 fprintf_unfiltered (gdb_stdlog,
6113 "infrun: stepped permanent breakpoint, stopped in "
6114 "handler\n");
6115 delete_step_resume_breakpoint (ecs->event_thread);
6116 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6117 }
6118 }
6119
cdaa5b73
PA
6120 /* We come here if we hit a breakpoint but should not stop for it.
6121 Possibly we also were stepping and should stop for that. So fall
6122 through and test for stepping. But, if not stepping, do not
6123 stop. */
c906108c 6124
a7212384
UW
6125 /* In all-stop mode, if we're currently stepping but have stopped in
6126 some other thread, we need to switch back to the stepped thread. */
c447ac0b
PA
6127 if (switch_back_to_stepped_thread (ecs))
6128 return;
776f04fa 6129
8358c15c 6130 if (ecs->event_thread->control.step_resume_breakpoint)
488f131b 6131 {
527159b7 6132 if (debug_infrun)
d3169d93
DJ
6133 fprintf_unfiltered (gdb_stdlog,
6134 "infrun: step-resume breakpoint is inserted\n");
527159b7 6135
488f131b
JB
6136 /* Having a step-resume breakpoint overrides anything
6137 else having to do with stepping commands until
6138 that breakpoint is reached. */
488f131b
JB
6139 keep_going (ecs);
6140 return;
6141 }
c5aa993b 6142
16c381f0 6143 if (ecs->event_thread->control.step_range_end == 0)
488f131b 6144 {
527159b7 6145 if (debug_infrun)
8a9de0e4 6146 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
488f131b 6147 /* Likewise if we aren't even stepping. */
488f131b
JB
6148 keep_going (ecs);
6149 return;
6150 }
c5aa993b 6151
4b7703ad
JB
6152 /* Re-fetch current thread's frame in case the code above caused
6153 the frame cache to be re-initialized, making our FRAME variable
6154 a dangling pointer. */
6155 frame = get_current_frame ();
628fe4e4 6156 gdbarch = get_frame_arch (frame);
7e324e48 6157 fill_in_stop_func (gdbarch, ecs);
4b7703ad 6158
488f131b 6159 /* If stepping through a line, keep going if still within it.
c906108c 6160
488f131b
JB
6161 Note that step_range_end is the address of the first instruction
6162 beyond the step range, and NOT the address of the last instruction
31410e84
MS
6163 within it!
6164
6165 Note also that during reverse execution, we may be stepping
6166 through a function epilogue and therefore must detect when
6167 the current-frame changes in the middle of a line. */
6168
ce4c476a 6169 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
31410e84 6170 && (execution_direction != EXEC_REVERSE
388a8562 6171 || frame_id_eq (get_frame_id (frame),
16c381f0 6172 ecs->event_thread->control.step_frame_id)))
488f131b 6173 {
527159b7 6174 if (debug_infrun)
5af949e3
UW
6175 fprintf_unfiltered
6176 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
16c381f0
JK
6177 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6178 paddress (gdbarch, ecs->event_thread->control.step_range_end));
b2175913 6179
c1e36e3e
PA
6180 /* Tentatively re-enable range stepping; `resume' disables it if
6181 necessary (e.g., if we're stepping over a breakpoint or we
6182 have software watchpoints). */
6183 ecs->event_thread->control.may_range_step = 1;
6184
b2175913
MS
6185 /* When stepping backward, stop at beginning of line range
6186 (unless it's the function entry point, in which case
6187 keep going back to the call point). */
16c381f0 6188 if (stop_pc == ecs->event_thread->control.step_range_start
b2175913
MS
6189 && stop_pc != ecs->stop_func_start
6190 && execution_direction == EXEC_REVERSE)
bdc36728 6191 end_stepping_range (ecs);
b2175913
MS
6192 else
6193 keep_going (ecs);
6194
488f131b
JB
6195 return;
6196 }
c5aa993b 6197
488f131b 6198 /* We stepped out of the stepping range. */
c906108c 6199
488f131b 6200 /* If we are stepping at the source level and entered the runtime
388a8562
MS
6201 loader dynamic symbol resolution code...
6202
6203 EXEC_FORWARD: we keep on single stepping until we exit the run
6204 time loader code and reach the callee's address.
6205
6206 EXEC_REVERSE: we've already executed the callee (backward), and
6207 the runtime loader code is handled just like any other
6208 undebuggable function call. Now we need only keep stepping
6209 backward through the trampoline code, and that's handled further
6210 down, so there is nothing for us to do here. */
6211
6212 if (execution_direction != EXEC_REVERSE
16c381f0 6213 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
cfd8ab24 6214 && in_solib_dynsym_resolve_code (stop_pc))
488f131b 6215 {
4c8c40e6 6216 CORE_ADDR pc_after_resolver =
568d6575 6217 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
c906108c 6218
527159b7 6219 if (debug_infrun)
3e43a32a
MS
6220 fprintf_unfiltered (gdb_stdlog,
6221 "infrun: stepped into dynsym resolve code\n");
527159b7 6222
488f131b
JB
6223 if (pc_after_resolver)
6224 {
6225 /* Set up a step-resume breakpoint at the address
6226 indicated by SKIP_SOLIB_RESOLVER. */
6227 struct symtab_and_line sr_sal;
abbb1732 6228
fe39c653 6229 init_sal (&sr_sal);
488f131b 6230 sr_sal.pc = pc_after_resolver;
6c95b8df 6231 sr_sal.pspace = get_frame_program_space (frame);
488f131b 6232
a6d9a66e
UW
6233 insert_step_resume_breakpoint_at_sal (gdbarch,
6234 sr_sal, null_frame_id);
c5aa993b 6235 }
c906108c 6236
488f131b
JB
6237 keep_going (ecs);
6238 return;
6239 }
c906108c 6240
16c381f0
JK
6241 if (ecs->event_thread->control.step_range_end != 1
6242 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6243 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
568d6575 6244 && get_frame_type (frame) == SIGTRAMP_FRAME)
488f131b 6245 {
527159b7 6246 if (debug_infrun)
3e43a32a
MS
6247 fprintf_unfiltered (gdb_stdlog,
6248 "infrun: stepped into signal trampoline\n");
42edda50 6249 /* The inferior, while doing a "step" or "next", has ended up in
8fb3e588
AC
6250 a signal trampoline (either by a signal being delivered or by
6251 the signal handler returning). Just single-step until the
6252 inferior leaves the trampoline (either by calling the handler
6253 or returning). */
488f131b
JB
6254 keep_going (ecs);
6255 return;
6256 }
c906108c 6257
14132e89
MR
6258 /* If we're in the return path from a shared library trampoline,
6259 we want to proceed through the trampoline when stepping. */
6260 /* macro/2012-04-25: This needs to come before the subroutine
6261 call check below as on some targets return trampolines look
6262 like subroutine calls (MIPS16 return thunks). */
6263 if (gdbarch_in_solib_return_trampoline (gdbarch,
6264 stop_pc, ecs->stop_func_name)
6265 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
6266 {
6267 /* Determine where this trampoline returns. */
6268 CORE_ADDR real_stop_pc;
6269
6270 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
6271
6272 if (debug_infrun)
6273 fprintf_unfiltered (gdb_stdlog,
6274 "infrun: stepped into solib return tramp\n");
6275
6276 /* Only proceed through if we know where it's going. */
6277 if (real_stop_pc)
6278 {
6279 /* And put the step-breakpoint there and go until there. */
6280 struct symtab_and_line sr_sal;
6281
6282 init_sal (&sr_sal); /* initialize to zeroes */
6283 sr_sal.pc = real_stop_pc;
6284 sr_sal.section = find_pc_overlay (sr_sal.pc);
6285 sr_sal.pspace = get_frame_program_space (frame);
6286
6287 /* Do not specify what the fp should be when we stop since
6288 on some machines the prologue is where the new fp value
6289 is established. */
6290 insert_step_resume_breakpoint_at_sal (gdbarch,
6291 sr_sal, null_frame_id);
6292
6293 /* Restart without fiddling with the step ranges or
6294 other state. */
6295 keep_going (ecs);
6296 return;
6297 }
6298 }
6299
c17eaafe
DJ
6300 /* Check for subroutine calls. The check for the current frame
6301 equalling the step ID is not necessary - the check of the
6302 previous frame's ID is sufficient - but it is a common case and
6303 cheaper than checking the previous frame's ID.
14e60db5
DJ
6304
6305 NOTE: frame_id_eq will never report two invalid frame IDs as
6306 being equal, so to get into this block, both the current and
6307 previous frame must have valid frame IDs. */
005ca36a
JB
6308 /* The outer_frame_id check is a heuristic to detect stepping
6309 through startup code. If we step over an instruction which
6310 sets the stack pointer from an invalid value to a valid value,
6311 we may detect that as a subroutine call from the mythical
6312 "outermost" function. This could be fixed by marking
6313 outermost frames as !stack_p,code_p,special_p. Then the
6314 initial outermost frame, before sp was valid, would
ce6cca6d 6315 have code_addr == &_start. See the comment in frame_id_eq
005ca36a 6316 for more. */
edb3359d 6317 if (!frame_id_eq (get_stack_frame_id (frame),
16c381f0 6318 ecs->event_thread->control.step_stack_frame_id)
005ca36a 6319 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
16c381f0
JK
6320 ecs->event_thread->control.step_stack_frame_id)
6321 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
005ca36a 6322 outer_frame_id)
885eeb5b
PA
6323 || (ecs->event_thread->control.step_start_function
6324 != find_pc_function (stop_pc)))))
488f131b 6325 {
95918acb 6326 CORE_ADDR real_stop_pc;
8fb3e588 6327
527159b7 6328 if (debug_infrun)
8a9de0e4 6329 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
527159b7 6330
b7a084be 6331 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
95918acb
AC
6332 {
6333 /* I presume that step_over_calls is only 0 when we're
6334 supposed to be stepping at the assembly language level
6335 ("stepi"). Just stop. */
388a8562 6336 /* And this works the same backward as frontward. MVS */
bdc36728 6337 end_stepping_range (ecs);
95918acb
AC
6338 return;
6339 }
8fb3e588 6340
388a8562
MS
6341 /* Reverse stepping through solib trampolines. */
6342
6343 if (execution_direction == EXEC_REVERSE
16c381f0 6344 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
388a8562
MS
6345 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6346 || (ecs->stop_func_start == 0
6347 && in_solib_dynsym_resolve_code (stop_pc))))
6348 {
6349 /* Any solib trampoline code can be handled in reverse
6350 by simply continuing to single-step. We have already
6351 executed the solib function (backwards), and a few
6352 steps will take us back through the trampoline to the
6353 caller. */
6354 keep_going (ecs);
6355 return;
6356 }
6357
16c381f0 6358 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
8567c30f 6359 {
b2175913
MS
6360 /* We're doing a "next".
6361
6362 Normal (forward) execution: set a breakpoint at the
6363 callee's return address (the address at which the caller
6364 will resume).
6365
6366 Reverse (backward) execution. set the step-resume
6367 breakpoint at the start of the function that we just
6368 stepped into (backwards), and continue to there. When we
6130d0b7 6369 get there, we'll need to single-step back to the caller. */
b2175913
MS
6370
6371 if (execution_direction == EXEC_REVERSE)
6372 {
acf9414f
JK
6373 /* If we're already at the start of the function, we've either
6374 just stepped backward into a single instruction function,
6375 or stepped back out of a signal handler to the first instruction
6376 of the function. Just keep going, which will single-step back
6377 to the caller. */
58c48e72 6378 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
acf9414f
JK
6379 {
6380 struct symtab_and_line sr_sal;
6381
6382 /* Normal function call return (static or dynamic). */
6383 init_sal (&sr_sal);
6384 sr_sal.pc = ecs->stop_func_start;
6385 sr_sal.pspace = get_frame_program_space (frame);
6386 insert_step_resume_breakpoint_at_sal (gdbarch,
6387 sr_sal, null_frame_id);
6388 }
b2175913
MS
6389 }
6390 else
568d6575 6391 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6392
8567c30f
AC
6393 keep_going (ecs);
6394 return;
6395 }
a53c66de 6396
95918acb 6397 /* If we are in a function call trampoline (a stub between the
8fb3e588
AC
6398 calling routine and the real function), locate the real
6399 function. That's what tells us (a) whether we want to step
6400 into it at all, and (b) what prologue we want to run to the
6401 end of, if we do step into it. */
568d6575 6402 real_stop_pc = skip_language_trampoline (frame, stop_pc);
95918acb 6403 if (real_stop_pc == 0)
568d6575 6404 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
95918acb
AC
6405 if (real_stop_pc != 0)
6406 ecs->stop_func_start = real_stop_pc;
8fb3e588 6407
db5f024e 6408 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
1b2bfbb9
RC
6409 {
6410 struct symtab_and_line sr_sal;
abbb1732 6411
1b2bfbb9
RC
6412 init_sal (&sr_sal);
6413 sr_sal.pc = ecs->stop_func_start;
6c95b8df 6414 sr_sal.pspace = get_frame_program_space (frame);
1b2bfbb9 6415
a6d9a66e
UW
6416 insert_step_resume_breakpoint_at_sal (gdbarch,
6417 sr_sal, null_frame_id);
8fb3e588
AC
6418 keep_going (ecs);
6419 return;
1b2bfbb9
RC
6420 }
6421
95918acb 6422 /* If we have line number information for the function we are
1bfeeb0f
JL
6423 thinking of stepping into and the function isn't on the skip
6424 list, step into it.
95918acb 6425
8fb3e588
AC
6426 If there are several symtabs at that PC (e.g. with include
6427 files), just want to know whether *any* of them have line
6428 numbers. find_pc_line handles this. */
95918acb
AC
6429 {
6430 struct symtab_and_line tmp_sal;
8fb3e588 6431
95918acb 6432 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
2b914b52 6433 if (tmp_sal.line != 0
85817405
JK
6434 && !function_name_is_marked_for_skip (ecs->stop_func_name,
6435 &tmp_sal))
95918acb 6436 {
b2175913 6437 if (execution_direction == EXEC_REVERSE)
568d6575 6438 handle_step_into_function_backward (gdbarch, ecs);
b2175913 6439 else
568d6575 6440 handle_step_into_function (gdbarch, ecs);
95918acb
AC
6441 return;
6442 }
6443 }
6444
6445 /* If we have no line number and the step-stop-if-no-debug is
8fb3e588
AC
6446 set, we stop the step so that the user has a chance to switch
6447 in assembly mode. */
16c381f0 6448 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
078130d0 6449 && step_stop_if_no_debug)
95918acb 6450 {
bdc36728 6451 end_stepping_range (ecs);
95918acb
AC
6452 return;
6453 }
6454
b2175913
MS
6455 if (execution_direction == EXEC_REVERSE)
6456 {
acf9414f
JK
6457 /* If we're already at the start of the function, we've either just
6458 stepped backward into a single instruction function without line
6459 number info, or stepped back out of a signal handler to the first
6460 instruction of the function without line number info. Just keep
6461 going, which will single-step back to the caller. */
6462 if (ecs->stop_func_start != stop_pc)
6463 {
6464 /* Set a breakpoint at callee's start address.
6465 From there we can step once and be back in the caller. */
6466 struct symtab_and_line sr_sal;
abbb1732 6467
acf9414f
JK
6468 init_sal (&sr_sal);
6469 sr_sal.pc = ecs->stop_func_start;
6470 sr_sal.pspace = get_frame_program_space (frame);
6471 insert_step_resume_breakpoint_at_sal (gdbarch,
6472 sr_sal, null_frame_id);
6473 }
b2175913
MS
6474 }
6475 else
6476 /* Set a breakpoint at callee's return address (the address
6477 at which the caller will resume). */
568d6575 6478 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6479
95918acb 6480 keep_going (ecs);
488f131b 6481 return;
488f131b 6482 }
c906108c 6483
fdd654f3
MS
6484 /* Reverse stepping through solib trampolines. */
6485
6486 if (execution_direction == EXEC_REVERSE
16c381f0 6487 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
fdd654f3
MS
6488 {
6489 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6490 || (ecs->stop_func_start == 0
6491 && in_solib_dynsym_resolve_code (stop_pc)))
6492 {
6493 /* Any solib trampoline code can be handled in reverse
6494 by simply continuing to single-step. We have already
6495 executed the solib function (backwards), and a few
6496 steps will take us back through the trampoline to the
6497 caller. */
6498 keep_going (ecs);
6499 return;
6500 }
6501 else if (in_solib_dynsym_resolve_code (stop_pc))
6502 {
6503 /* Stepped backward into the solib dynsym resolver.
6504 Set a breakpoint at its start and continue, then
6505 one more step will take us out. */
6506 struct symtab_and_line sr_sal;
abbb1732 6507
fdd654f3
MS
6508 init_sal (&sr_sal);
6509 sr_sal.pc = ecs->stop_func_start;
9d1807c3 6510 sr_sal.pspace = get_frame_program_space (frame);
fdd654f3
MS
6511 insert_step_resume_breakpoint_at_sal (gdbarch,
6512 sr_sal, null_frame_id);
6513 keep_going (ecs);
6514 return;
6515 }
6516 }
6517
2afb61aa 6518 stop_pc_sal = find_pc_line (stop_pc, 0);
7ed0fe66 6519
1b2bfbb9
RC
6520 /* NOTE: tausq/2004-05-24: This if block used to be done before all
6521 the trampoline processing logic, however, there are some trampolines
6522 that have no names, so we should do trampoline handling first. */
16c381f0 6523 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7ed0fe66 6524 && ecs->stop_func_name == NULL
2afb61aa 6525 && stop_pc_sal.line == 0)
1b2bfbb9 6526 {
527159b7 6527 if (debug_infrun)
3e43a32a
MS
6528 fprintf_unfiltered (gdb_stdlog,
6529 "infrun: stepped into undebuggable function\n");
527159b7 6530
1b2bfbb9 6531 /* The inferior just stepped into, or returned to, an
7ed0fe66
DJ
6532 undebuggable function (where there is no debugging information
6533 and no line number corresponding to the address where the
1b2bfbb9
RC
6534 inferior stopped). Since we want to skip this kind of code,
6535 we keep going until the inferior returns from this
14e60db5
DJ
6536 function - unless the user has asked us not to (via
6537 set step-mode) or we no longer know how to get back
6538 to the call site. */
6539 if (step_stop_if_no_debug
c7ce8faa 6540 || !frame_id_p (frame_unwind_caller_id (frame)))
1b2bfbb9
RC
6541 {
6542 /* If we have no line number and the step-stop-if-no-debug
6543 is set, we stop the step so that the user has a chance to
6544 switch in assembly mode. */
bdc36728 6545 end_stepping_range (ecs);
1b2bfbb9
RC
6546 return;
6547 }
6548 else
6549 {
6550 /* Set a breakpoint at callee's return address (the address
6551 at which the caller will resume). */
568d6575 6552 insert_step_resume_breakpoint_at_caller (frame);
1b2bfbb9
RC
6553 keep_going (ecs);
6554 return;
6555 }
6556 }
6557
16c381f0 6558 if (ecs->event_thread->control.step_range_end == 1)
1b2bfbb9
RC
6559 {
6560 /* It is stepi or nexti. We always want to stop stepping after
6561 one instruction. */
527159b7 6562 if (debug_infrun)
8a9de0e4 6563 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
bdc36728 6564 end_stepping_range (ecs);
1b2bfbb9
RC
6565 return;
6566 }
6567
2afb61aa 6568 if (stop_pc_sal.line == 0)
488f131b
JB
6569 {
6570 /* We have no line number information. That means to stop
6571 stepping (does this always happen right after one instruction,
6572 when we do "s" in a function with no line numbers,
6573 or can this happen as a result of a return or longjmp?). */
527159b7 6574 if (debug_infrun)
8a9de0e4 6575 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
bdc36728 6576 end_stepping_range (ecs);
488f131b
JB
6577 return;
6578 }
c906108c 6579
edb3359d
DJ
6580 /* Look for "calls" to inlined functions, part one. If the inline
6581 frame machinery detected some skipped call sites, we have entered
6582 a new inline function. */
6583
6584 if (frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 6585 ecs->event_thread->control.step_frame_id)
edb3359d
DJ
6586 && inline_skipped_frames (ecs->ptid))
6587 {
6588 struct symtab_and_line call_sal;
6589
6590 if (debug_infrun)
6591 fprintf_unfiltered (gdb_stdlog,
6592 "infrun: stepped into inlined function\n");
6593
6594 find_frame_sal (get_current_frame (), &call_sal);
6595
16c381f0 6596 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
edb3359d
DJ
6597 {
6598 /* For "step", we're going to stop. But if the call site
6599 for this inlined function is on the same source line as
6600 we were previously stepping, go down into the function
6601 first. Otherwise stop at the call site. */
6602
6603 if (call_sal.line == ecs->event_thread->current_line
6604 && call_sal.symtab == ecs->event_thread->current_symtab)
6605 step_into_inline_frame (ecs->ptid);
6606
bdc36728 6607 end_stepping_range (ecs);
edb3359d
DJ
6608 return;
6609 }
6610 else
6611 {
6612 /* For "next", we should stop at the call site if it is on a
6613 different source line. Otherwise continue through the
6614 inlined function. */
6615 if (call_sal.line == ecs->event_thread->current_line
6616 && call_sal.symtab == ecs->event_thread->current_symtab)
6617 keep_going (ecs);
6618 else
bdc36728 6619 end_stepping_range (ecs);
edb3359d
DJ
6620 return;
6621 }
6622 }
6623
6624 /* Look for "calls" to inlined functions, part two. If we are still
6625 in the same real function we were stepping through, but we have
6626 to go further up to find the exact frame ID, we are stepping
6627 through a more inlined call beyond its call site. */
6628
6629 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
6630 && !frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 6631 ecs->event_thread->control.step_frame_id)
edb3359d 6632 && stepped_in_from (get_current_frame (),
16c381f0 6633 ecs->event_thread->control.step_frame_id))
edb3359d
DJ
6634 {
6635 if (debug_infrun)
6636 fprintf_unfiltered (gdb_stdlog,
6637 "infrun: stepping through inlined function\n");
6638
16c381f0 6639 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
edb3359d
DJ
6640 keep_going (ecs);
6641 else
bdc36728 6642 end_stepping_range (ecs);
edb3359d
DJ
6643 return;
6644 }
6645
2afb61aa 6646 if ((stop_pc == stop_pc_sal.pc)
4e1c45ea
PA
6647 && (ecs->event_thread->current_line != stop_pc_sal.line
6648 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
488f131b
JB
6649 {
6650 /* We are at the start of a different line. So stop. Note that
6651 we don't stop if we step into the middle of a different line.
6652 That is said to make things like for (;;) statements work
6653 better. */
527159b7 6654 if (debug_infrun)
3e43a32a
MS
6655 fprintf_unfiltered (gdb_stdlog,
6656 "infrun: stepped to a different line\n");
bdc36728 6657 end_stepping_range (ecs);
488f131b
JB
6658 return;
6659 }
c906108c 6660
488f131b 6661 /* We aren't done stepping.
c906108c 6662
488f131b
JB
6663 Optimize by setting the stepping range to the line.
6664 (We might not be in the original line, but if we entered a
6665 new line in mid-statement, we continue stepping. This makes
6666 things like for(;;) statements work better.) */
c906108c 6667
16c381f0
JK
6668 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
6669 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
c1e36e3e 6670 ecs->event_thread->control.may_range_step = 1;
edb3359d 6671 set_step_info (frame, stop_pc_sal);
488f131b 6672
527159b7 6673 if (debug_infrun)
8a9de0e4 6674 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
488f131b 6675 keep_going (ecs);
104c1213
JM
6676}
6677
c447ac0b
PA
6678/* In all-stop mode, if we're currently stepping but have stopped in
6679 some other thread, we may need to switch back to the stepped
6680 thread. Returns true we set the inferior running, false if we left
6681 it stopped (and the event needs further processing). */
6682
6683static int
6684switch_back_to_stepped_thread (struct execution_control_state *ecs)
6685{
fbea99ea 6686 if (!target_is_non_stop_p ())
c447ac0b
PA
6687 {
6688 struct thread_info *tp;
99619bea
PA
6689 struct thread_info *stepping_thread;
6690
6691 /* If any thread is blocked on some internal breakpoint, and we
6692 simply need to step over that breakpoint to get it going
6693 again, do that first. */
6694
6695 /* However, if we see an event for the stepping thread, then we
6696 know all other threads have been moved past their breakpoints
6697 already. Let the caller check whether the step is finished,
6698 etc., before deciding to move it past a breakpoint. */
6699 if (ecs->event_thread->control.step_range_end != 0)
6700 return 0;
6701
6702 /* Check if the current thread is blocked on an incomplete
6703 step-over, interrupted by a random signal. */
6704 if (ecs->event_thread->control.trap_expected
6705 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
c447ac0b 6706 {
99619bea
PA
6707 if (debug_infrun)
6708 {
6709 fprintf_unfiltered (gdb_stdlog,
6710 "infrun: need to finish step-over of [%s]\n",
6711 target_pid_to_str (ecs->event_thread->ptid));
6712 }
6713 keep_going (ecs);
6714 return 1;
6715 }
2adfaa28 6716
99619bea
PA
6717 /* Check if the current thread is blocked by a single-step
6718 breakpoint of another thread. */
6719 if (ecs->hit_singlestep_breakpoint)
6720 {
6721 if (debug_infrun)
6722 {
6723 fprintf_unfiltered (gdb_stdlog,
6724 "infrun: need to step [%s] over single-step "
6725 "breakpoint\n",
6726 target_pid_to_str (ecs->ptid));
6727 }
6728 keep_going (ecs);
6729 return 1;
6730 }
6731
4d9d9d04
PA
6732 /* If this thread needs yet another step-over (e.g., stepping
6733 through a delay slot), do it first before moving on to
6734 another thread. */
6735 if (thread_still_needs_step_over (ecs->event_thread))
6736 {
6737 if (debug_infrun)
6738 {
6739 fprintf_unfiltered (gdb_stdlog,
6740 "infrun: thread [%s] still needs step-over\n",
6741 target_pid_to_str (ecs->event_thread->ptid));
6742 }
6743 keep_going (ecs);
6744 return 1;
6745 }
70509625 6746
483805cf
PA
6747 /* If scheduler locking applies even if not stepping, there's no
6748 need to walk over threads. Above we've checked whether the
6749 current thread is stepping. If some other thread not the
6750 event thread is stepping, then it must be that scheduler
6751 locking is not in effect. */
856e7dd6 6752 if (schedlock_applies (ecs->event_thread))
483805cf
PA
6753 return 0;
6754
4d9d9d04
PA
6755 /* Otherwise, we no longer expect a trap in the current thread.
6756 Clear the trap_expected flag before switching back -- this is
6757 what keep_going does as well, if we call it. */
6758 ecs->event_thread->control.trap_expected = 0;
6759
6760 /* Likewise, clear the signal if it should not be passed. */
6761 if (!signal_program[ecs->event_thread->suspend.stop_signal])
6762 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6763
6764 /* Do all pending step-overs before actually proceeding with
483805cf 6765 step/next/etc. */
4d9d9d04
PA
6766 if (start_step_over ())
6767 {
6768 prepare_to_wait (ecs);
6769 return 1;
6770 }
6771
6772 /* Look for the stepping/nexting thread. */
483805cf 6773 stepping_thread = NULL;
4d9d9d04 6774
034f788c 6775 ALL_NON_EXITED_THREADS (tp)
483805cf 6776 {
fbea99ea
PA
6777 /* Ignore threads of processes the caller is not
6778 resuming. */
483805cf 6779 if (!sched_multi
1afd5965 6780 && ptid_get_pid (tp->ptid) != ptid_get_pid (ecs->ptid))
483805cf
PA
6781 continue;
6782
6783 /* When stepping over a breakpoint, we lock all threads
6784 except the one that needs to move past the breakpoint.
6785 If a non-event thread has this set, the "incomplete
6786 step-over" check above should have caught it earlier. */
372316f1
PA
6787 if (tp->control.trap_expected)
6788 {
6789 internal_error (__FILE__, __LINE__,
6790 "[%s] has inconsistent state: "
6791 "trap_expected=%d\n",
6792 target_pid_to_str (tp->ptid),
6793 tp->control.trap_expected);
6794 }
483805cf
PA
6795
6796 /* Did we find the stepping thread? */
6797 if (tp->control.step_range_end)
6798 {
6799 /* Yep. There should only one though. */
6800 gdb_assert (stepping_thread == NULL);
6801
6802 /* The event thread is handled at the top, before we
6803 enter this loop. */
6804 gdb_assert (tp != ecs->event_thread);
6805
6806 /* If some thread other than the event thread is
6807 stepping, then scheduler locking can't be in effect,
6808 otherwise we wouldn't have resumed the current event
6809 thread in the first place. */
856e7dd6 6810 gdb_assert (!schedlock_applies (tp));
483805cf
PA
6811
6812 stepping_thread = tp;
6813 }
99619bea
PA
6814 }
6815
483805cf 6816 if (stepping_thread != NULL)
99619bea 6817 {
c447ac0b
PA
6818 if (debug_infrun)
6819 fprintf_unfiltered (gdb_stdlog,
6820 "infrun: switching back to stepped thread\n");
6821
2ac7589c
PA
6822 if (keep_going_stepped_thread (stepping_thread))
6823 {
6824 prepare_to_wait (ecs);
6825 return 1;
6826 }
6827 }
6828 }
2adfaa28 6829
2ac7589c
PA
6830 return 0;
6831}
2adfaa28 6832
2ac7589c
PA
6833/* Set a previously stepped thread back to stepping. Returns true on
6834 success, false if the resume is not possible (e.g., the thread
6835 vanished). */
6836
6837static int
6838keep_going_stepped_thread (struct thread_info *tp)
6839{
6840 struct frame_info *frame;
6841 struct gdbarch *gdbarch;
6842 struct execution_control_state ecss;
6843 struct execution_control_state *ecs = &ecss;
2adfaa28 6844
2ac7589c
PA
6845 /* If the stepping thread exited, then don't try to switch back and
6846 resume it, which could fail in several different ways depending
6847 on the target. Instead, just keep going.
2adfaa28 6848
2ac7589c
PA
6849 We can find a stepping dead thread in the thread list in two
6850 cases:
2adfaa28 6851
2ac7589c
PA
6852 - The target supports thread exit events, and when the target
6853 tries to delete the thread from the thread list, inferior_ptid
6854 pointed at the exiting thread. In such case, calling
6855 delete_thread does not really remove the thread from the list;
6856 instead, the thread is left listed, with 'exited' state.
64ce06e4 6857
2ac7589c
PA
6858 - The target's debug interface does not support thread exit
6859 events, and so we have no idea whatsoever if the previously
6860 stepping thread is still alive. For that reason, we need to
6861 synchronously query the target now. */
2adfaa28 6862
2ac7589c
PA
6863 if (is_exited (tp->ptid)
6864 || !target_thread_alive (tp->ptid))
6865 {
6866 if (debug_infrun)
6867 fprintf_unfiltered (gdb_stdlog,
6868 "infrun: not resuming previously "
6869 "stepped thread, it has vanished\n");
6870
6871 delete_thread (tp->ptid);
6872 return 0;
c447ac0b 6873 }
2ac7589c
PA
6874
6875 if (debug_infrun)
6876 fprintf_unfiltered (gdb_stdlog,
6877 "infrun: resuming previously stepped thread\n");
6878
6879 reset_ecs (ecs, tp);
6880 switch_to_thread (tp->ptid);
6881
6882 stop_pc = regcache_read_pc (get_thread_regcache (tp->ptid));
6883 frame = get_current_frame ();
6884 gdbarch = get_frame_arch (frame);
6885
6886 /* If the PC of the thread we were trying to single-step has
6887 changed, then that thread has trapped or been signaled, but the
6888 event has not been reported to GDB yet. Re-poll the target
6889 looking for this particular thread's event (i.e. temporarily
6890 enable schedlock) by:
6891
6892 - setting a break at the current PC
6893 - resuming that particular thread, only (by setting trap
6894 expected)
6895
6896 This prevents us continuously moving the single-step breakpoint
6897 forward, one instruction at a time, overstepping. */
6898
6899 if (stop_pc != tp->prev_pc)
6900 {
6901 ptid_t resume_ptid;
6902
6903 if (debug_infrun)
6904 fprintf_unfiltered (gdb_stdlog,
6905 "infrun: expected thread advanced also (%s -> %s)\n",
6906 paddress (target_gdbarch (), tp->prev_pc),
6907 paddress (target_gdbarch (), stop_pc));
6908
6909 /* Clear the info of the previous step-over, as it's no longer
6910 valid (if the thread was trying to step over a breakpoint, it
6911 has already succeeded). It's what keep_going would do too,
6912 if we called it. Do this before trying to insert the sss
6913 breakpoint, otherwise if we were previously trying to step
6914 over this exact address in another thread, the breakpoint is
6915 skipped. */
6916 clear_step_over_info ();
6917 tp->control.trap_expected = 0;
6918
6919 insert_single_step_breakpoint (get_frame_arch (frame),
6920 get_frame_address_space (frame),
6921 stop_pc);
6922
372316f1 6923 tp->resumed = 1;
fbea99ea 6924 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
2ac7589c
PA
6925 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
6926 }
6927 else
6928 {
6929 if (debug_infrun)
6930 fprintf_unfiltered (gdb_stdlog,
6931 "infrun: expected thread still hasn't advanced\n");
6932
6933 keep_going_pass_signal (ecs);
6934 }
6935 return 1;
c447ac0b
PA
6936}
6937
8b061563
PA
6938/* Is thread TP in the middle of (software or hardware)
6939 single-stepping? (Note the result of this function must never be
6940 passed directly as target_resume's STEP parameter.) */
104c1213 6941
a289b8f6 6942static int
b3444185 6943currently_stepping (struct thread_info *tp)
a7212384 6944{
8358c15c
JK
6945 return ((tp->control.step_range_end
6946 && tp->control.step_resume_breakpoint == NULL)
6947 || tp->control.trap_expected
af48d08f 6948 || tp->stepped_breakpoint
8358c15c 6949 || bpstat_should_step ());
a7212384
UW
6950}
6951
b2175913
MS
6952/* Inferior has stepped into a subroutine call with source code that
6953 we should not step over. Do step to the first line of code in
6954 it. */
c2c6d25f
JM
6955
6956static void
568d6575
UW
6957handle_step_into_function (struct gdbarch *gdbarch,
6958 struct execution_control_state *ecs)
c2c6d25f 6959{
43f3e411 6960 struct compunit_symtab *cust;
2afb61aa 6961 struct symtab_and_line stop_func_sal, sr_sal;
c2c6d25f 6962
7e324e48
GB
6963 fill_in_stop_func (gdbarch, ecs);
6964
43f3e411
DE
6965 cust = find_pc_compunit_symtab (stop_pc);
6966 if (cust != NULL && compunit_language (cust) != language_asm)
568d6575 6967 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
b2175913 6968 ecs->stop_func_start);
c2c6d25f 6969
2afb61aa 6970 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
c2c6d25f
JM
6971 /* Use the step_resume_break to step until the end of the prologue,
6972 even if that involves jumps (as it seems to on the vax under
6973 4.2). */
6974 /* If the prologue ends in the middle of a source line, continue to
6975 the end of that source line (if it is still within the function).
6976 Otherwise, just go to end of prologue. */
2afb61aa
PA
6977 if (stop_func_sal.end
6978 && stop_func_sal.pc != ecs->stop_func_start
6979 && stop_func_sal.end < ecs->stop_func_end)
6980 ecs->stop_func_start = stop_func_sal.end;
c2c6d25f 6981
2dbd5e30
KB
6982 /* Architectures which require breakpoint adjustment might not be able
6983 to place a breakpoint at the computed address. If so, the test
6984 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
6985 ecs->stop_func_start to an address at which a breakpoint may be
6986 legitimately placed.
8fb3e588 6987
2dbd5e30
KB
6988 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
6989 made, GDB will enter an infinite loop when stepping through
6990 optimized code consisting of VLIW instructions which contain
6991 subinstructions corresponding to different source lines. On
6992 FR-V, it's not permitted to place a breakpoint on any but the
6993 first subinstruction of a VLIW instruction. When a breakpoint is
6994 set, GDB will adjust the breakpoint address to the beginning of
6995 the VLIW instruction. Thus, we need to make the corresponding
6996 adjustment here when computing the stop address. */
8fb3e588 6997
568d6575 6998 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
2dbd5e30
KB
6999 {
7000 ecs->stop_func_start
568d6575 7001 = gdbarch_adjust_breakpoint_address (gdbarch,
8fb3e588 7002 ecs->stop_func_start);
2dbd5e30
KB
7003 }
7004
c2c6d25f
JM
7005 if (ecs->stop_func_start == stop_pc)
7006 {
7007 /* We are already there: stop now. */
bdc36728 7008 end_stepping_range (ecs);
c2c6d25f
JM
7009 return;
7010 }
7011 else
7012 {
7013 /* Put the step-breakpoint there and go until there. */
fe39c653 7014 init_sal (&sr_sal); /* initialize to zeroes */
c2c6d25f
JM
7015 sr_sal.pc = ecs->stop_func_start;
7016 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
6c95b8df 7017 sr_sal.pspace = get_frame_program_space (get_current_frame ());
44cbf7b5 7018
c2c6d25f 7019 /* Do not specify what the fp should be when we stop since on
488f131b
JB
7020 some machines the prologue is where the new fp value is
7021 established. */
a6d9a66e 7022 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
c2c6d25f
JM
7023
7024 /* And make sure stepping stops right away then. */
16c381f0
JK
7025 ecs->event_thread->control.step_range_end
7026 = ecs->event_thread->control.step_range_start;
c2c6d25f
JM
7027 }
7028 keep_going (ecs);
7029}
d4f3574e 7030
b2175913
MS
7031/* Inferior has stepped backward into a subroutine call with source
7032 code that we should not step over. Do step to the beginning of the
7033 last line of code in it. */
7034
7035static void
568d6575
UW
7036handle_step_into_function_backward (struct gdbarch *gdbarch,
7037 struct execution_control_state *ecs)
b2175913 7038{
43f3e411 7039 struct compunit_symtab *cust;
167e4384 7040 struct symtab_and_line stop_func_sal;
b2175913 7041
7e324e48
GB
7042 fill_in_stop_func (gdbarch, ecs);
7043
43f3e411
DE
7044 cust = find_pc_compunit_symtab (stop_pc);
7045 if (cust != NULL && compunit_language (cust) != language_asm)
568d6575 7046 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
b2175913
MS
7047 ecs->stop_func_start);
7048
7049 stop_func_sal = find_pc_line (stop_pc, 0);
7050
7051 /* OK, we're just going to keep stepping here. */
7052 if (stop_func_sal.pc == stop_pc)
7053 {
7054 /* We're there already. Just stop stepping now. */
bdc36728 7055 end_stepping_range (ecs);
b2175913
MS
7056 }
7057 else
7058 {
7059 /* Else just reset the step range and keep going.
7060 No step-resume breakpoint, they don't work for
7061 epilogues, which can have multiple entry paths. */
16c381f0
JK
7062 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7063 ecs->event_thread->control.step_range_end = stop_func_sal.end;
b2175913
MS
7064 keep_going (ecs);
7065 }
7066 return;
7067}
7068
d3169d93 7069/* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
44cbf7b5
AC
7070 This is used to both functions and to skip over code. */
7071
7072static void
2c03e5be
PA
7073insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7074 struct symtab_and_line sr_sal,
7075 struct frame_id sr_id,
7076 enum bptype sr_type)
44cbf7b5 7077{
611c83ae
PA
7078 /* There should never be more than one step-resume or longjmp-resume
7079 breakpoint per thread, so we should never be setting a new
44cbf7b5 7080 step_resume_breakpoint when one is already active. */
8358c15c 7081 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
2c03e5be 7082 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
d3169d93
DJ
7083
7084 if (debug_infrun)
7085 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
7086 "infrun: inserting step-resume breakpoint at %s\n",
7087 paddress (gdbarch, sr_sal.pc));
d3169d93 7088
8358c15c 7089 inferior_thread ()->control.step_resume_breakpoint
2c03e5be
PA
7090 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
7091}
7092
9da8c2a0 7093void
2c03e5be
PA
7094insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7095 struct symtab_and_line sr_sal,
7096 struct frame_id sr_id)
7097{
7098 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7099 sr_sal, sr_id,
7100 bp_step_resume);
44cbf7b5 7101}
7ce450bd 7102
2c03e5be
PA
7103/* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7104 This is used to skip a potential signal handler.
7ce450bd 7105
14e60db5
DJ
7106 This is called with the interrupted function's frame. The signal
7107 handler, when it returns, will resume the interrupted function at
7108 RETURN_FRAME.pc. */
d303a6c7
AC
7109
7110static void
2c03e5be 7111insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
d303a6c7
AC
7112{
7113 struct symtab_and_line sr_sal;
a6d9a66e 7114 struct gdbarch *gdbarch;
d303a6c7 7115
f4c1edd8 7116 gdb_assert (return_frame != NULL);
d303a6c7
AC
7117 init_sal (&sr_sal); /* initialize to zeros */
7118
a6d9a66e 7119 gdbarch = get_frame_arch (return_frame);
568d6575 7120 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
d303a6c7 7121 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7122 sr_sal.pspace = get_frame_program_space (return_frame);
d303a6c7 7123
2c03e5be
PA
7124 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7125 get_stack_frame_id (return_frame),
7126 bp_hp_step_resume);
d303a6c7
AC
7127}
7128
2c03e5be
PA
7129/* Insert a "step-resume breakpoint" at the previous frame's PC. This
7130 is used to skip a function after stepping into it (for "next" or if
7131 the called function has no debugging information).
14e60db5
DJ
7132
7133 The current function has almost always been reached by single
7134 stepping a call or return instruction. NEXT_FRAME belongs to the
7135 current function, and the breakpoint will be set at the caller's
7136 resume address.
7137
7138 This is a separate function rather than reusing
2c03e5be 7139 insert_hp_step_resume_breakpoint_at_frame in order to avoid
14e60db5 7140 get_prev_frame, which may stop prematurely (see the implementation
c7ce8faa 7141 of frame_unwind_caller_id for an example). */
14e60db5
DJ
7142
7143static void
7144insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7145{
7146 struct symtab_and_line sr_sal;
a6d9a66e 7147 struct gdbarch *gdbarch;
14e60db5
DJ
7148
7149 /* We shouldn't have gotten here if we don't know where the call site
7150 is. */
c7ce8faa 7151 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
14e60db5
DJ
7152
7153 init_sal (&sr_sal); /* initialize to zeros */
7154
a6d9a66e 7155 gdbarch = frame_unwind_caller_arch (next_frame);
c7ce8faa
DJ
7156 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7157 frame_unwind_caller_pc (next_frame));
14e60db5 7158 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7159 sr_sal.pspace = frame_unwind_program_space (next_frame);
14e60db5 7160
a6d9a66e 7161 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
c7ce8faa 7162 frame_unwind_caller_id (next_frame));
14e60db5
DJ
7163}
7164
611c83ae
PA
7165/* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7166 new breakpoint at the target of a jmp_buf. The handling of
7167 longjmp-resume uses the same mechanisms used for handling
7168 "step-resume" breakpoints. */
7169
7170static void
a6d9a66e 7171insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
611c83ae 7172{
e81a37f7
TT
7173 /* There should never be more than one longjmp-resume breakpoint per
7174 thread, so we should never be setting a new
611c83ae 7175 longjmp_resume_breakpoint when one is already active. */
e81a37f7 7176 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
611c83ae
PA
7177
7178 if (debug_infrun)
7179 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
7180 "infrun: inserting longjmp-resume breakpoint at %s\n",
7181 paddress (gdbarch, pc));
611c83ae 7182
e81a37f7 7183 inferior_thread ()->control.exception_resume_breakpoint =
a6d9a66e 7184 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
611c83ae
PA
7185}
7186
186c406b
TT
7187/* Insert an exception resume breakpoint. TP is the thread throwing
7188 the exception. The block B is the block of the unwinder debug hook
7189 function. FRAME is the frame corresponding to the call to this
7190 function. SYM is the symbol of the function argument holding the
7191 target PC of the exception. */
7192
7193static void
7194insert_exception_resume_breakpoint (struct thread_info *tp,
3977b71f 7195 const struct block *b,
186c406b
TT
7196 struct frame_info *frame,
7197 struct symbol *sym)
7198{
492d29ea 7199 TRY
186c406b
TT
7200 {
7201 struct symbol *vsym;
7202 struct value *value;
7203 CORE_ADDR handler;
7204 struct breakpoint *bp;
7205
d12307c1
PMR
7206 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN,
7207 NULL).symbol;
186c406b
TT
7208 value = read_var_value (vsym, frame);
7209 /* If the value was optimized out, revert to the old behavior. */
7210 if (! value_optimized_out (value))
7211 {
7212 handler = value_as_address (value);
7213
7214 if (debug_infrun)
7215 fprintf_unfiltered (gdb_stdlog,
7216 "infrun: exception resume at %lx\n",
7217 (unsigned long) handler);
7218
7219 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
7220 handler, bp_exception_resume);
c70a6932
JK
7221
7222 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
7223 frame = NULL;
7224
186c406b
TT
7225 bp->thread = tp->num;
7226 inferior_thread ()->control.exception_resume_breakpoint = bp;
7227 }
7228 }
492d29ea
PA
7229 CATCH (e, RETURN_MASK_ERROR)
7230 {
7231 /* We want to ignore errors here. */
7232 }
7233 END_CATCH
186c406b
TT
7234}
7235
28106bc2
SDJ
7236/* A helper for check_exception_resume that sets an
7237 exception-breakpoint based on a SystemTap probe. */
7238
7239static void
7240insert_exception_resume_from_probe (struct thread_info *tp,
729662a5 7241 const struct bound_probe *probe,
28106bc2
SDJ
7242 struct frame_info *frame)
7243{
7244 struct value *arg_value;
7245 CORE_ADDR handler;
7246 struct breakpoint *bp;
7247
7248 arg_value = probe_safe_evaluate_at_pc (frame, 1);
7249 if (!arg_value)
7250 return;
7251
7252 handler = value_as_address (arg_value);
7253
7254 if (debug_infrun)
7255 fprintf_unfiltered (gdb_stdlog,
7256 "infrun: exception resume at %s\n",
6bac7473 7257 paddress (get_objfile_arch (probe->objfile),
28106bc2
SDJ
7258 handler));
7259
7260 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
7261 handler, bp_exception_resume);
7262 bp->thread = tp->num;
7263 inferior_thread ()->control.exception_resume_breakpoint = bp;
7264}
7265
186c406b
TT
7266/* This is called when an exception has been intercepted. Check to
7267 see whether the exception's destination is of interest, and if so,
7268 set an exception resume breakpoint there. */
7269
7270static void
7271check_exception_resume (struct execution_control_state *ecs,
28106bc2 7272 struct frame_info *frame)
186c406b 7273{
729662a5 7274 struct bound_probe probe;
28106bc2
SDJ
7275 struct symbol *func;
7276
7277 /* First see if this exception unwinding breakpoint was set via a
7278 SystemTap probe point. If so, the probe has two arguments: the
7279 CFA and the HANDLER. We ignore the CFA, extract the handler, and
7280 set a breakpoint there. */
6bac7473 7281 probe = find_probe_by_pc (get_frame_pc (frame));
729662a5 7282 if (probe.probe)
28106bc2 7283 {
729662a5 7284 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
28106bc2
SDJ
7285 return;
7286 }
7287
7288 func = get_frame_function (frame);
7289 if (!func)
7290 return;
186c406b 7291
492d29ea 7292 TRY
186c406b 7293 {
3977b71f 7294 const struct block *b;
8157b174 7295 struct block_iterator iter;
186c406b
TT
7296 struct symbol *sym;
7297 int argno = 0;
7298
7299 /* The exception breakpoint is a thread-specific breakpoint on
7300 the unwinder's debug hook, declared as:
7301
7302 void _Unwind_DebugHook (void *cfa, void *handler);
7303
7304 The CFA argument indicates the frame to which control is
7305 about to be transferred. HANDLER is the destination PC.
7306
7307 We ignore the CFA and set a temporary breakpoint at HANDLER.
7308 This is not extremely efficient but it avoids issues in gdb
7309 with computing the DWARF CFA, and it also works even in weird
7310 cases such as throwing an exception from inside a signal
7311 handler. */
7312
7313 b = SYMBOL_BLOCK_VALUE (func);
7314 ALL_BLOCK_SYMBOLS (b, iter, sym)
7315 {
7316 if (!SYMBOL_IS_ARGUMENT (sym))
7317 continue;
7318
7319 if (argno == 0)
7320 ++argno;
7321 else
7322 {
7323 insert_exception_resume_breakpoint (ecs->event_thread,
7324 b, frame, sym);
7325 break;
7326 }
7327 }
7328 }
492d29ea
PA
7329 CATCH (e, RETURN_MASK_ERROR)
7330 {
7331 }
7332 END_CATCH
186c406b
TT
7333}
7334
104c1213 7335static void
22bcd14b 7336stop_waiting (struct execution_control_state *ecs)
104c1213 7337{
527159b7 7338 if (debug_infrun)
22bcd14b 7339 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
527159b7 7340
31e77af2
PA
7341 clear_step_over_info ();
7342
cd0fc7c3
SS
7343 /* Let callers know we don't want to wait for the inferior anymore. */
7344 ecs->wait_some_more = 0;
fbea99ea
PA
7345
7346 /* If all-stop, but the target is always in non-stop mode, stop all
7347 threads now that we're presenting the stop to the user. */
7348 if (!non_stop && target_is_non_stop_p ())
7349 stop_all_threads ();
cd0fc7c3
SS
7350}
7351
4d9d9d04
PA
7352/* Like keep_going, but passes the signal to the inferior, even if the
7353 signal is set to nopass. */
d4f3574e
SS
7354
7355static void
4d9d9d04 7356keep_going_pass_signal (struct execution_control_state *ecs)
d4f3574e 7357{
c4dbc9af
PA
7358 /* Make sure normal_stop is called if we get a QUIT handled before
7359 reaching resume. */
7360 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
7361
4d9d9d04 7362 gdb_assert (ptid_equal (ecs->event_thread->ptid, inferior_ptid));
372316f1 7363 gdb_assert (!ecs->event_thread->resumed);
4d9d9d04 7364
d4f3574e 7365 /* Save the pc before execution, to compare with pc after stop. */
fb14de7b
UW
7366 ecs->event_thread->prev_pc
7367 = regcache_read_pc (get_thread_regcache (ecs->ptid));
d4f3574e 7368
4d9d9d04 7369 if (ecs->event_thread->control.trap_expected)
d4f3574e 7370 {
4d9d9d04
PA
7371 struct thread_info *tp = ecs->event_thread;
7372
7373 if (debug_infrun)
7374 fprintf_unfiltered (gdb_stdlog,
7375 "infrun: %s has trap_expected set, "
7376 "resuming to collect trap\n",
7377 target_pid_to_str (tp->ptid));
7378
a9ba6bae
PA
7379 /* We haven't yet gotten our trap, and either: intercepted a
7380 non-signal event (e.g., a fork); or took a signal which we
7381 are supposed to pass through to the inferior. Simply
7382 continue. */
c4dbc9af 7383 discard_cleanups (old_cleanups);
64ce06e4 7384 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e 7385 }
372316f1
PA
7386 else if (step_over_info_valid_p ())
7387 {
7388 /* Another thread is stepping over a breakpoint in-line. If
7389 this thread needs a step-over too, queue the request. In
7390 either case, this resume must be deferred for later. */
7391 struct thread_info *tp = ecs->event_thread;
7392
7393 if (ecs->hit_singlestep_breakpoint
7394 || thread_still_needs_step_over (tp))
7395 {
7396 if (debug_infrun)
7397 fprintf_unfiltered (gdb_stdlog,
7398 "infrun: step-over already in progress: "
7399 "step-over for %s deferred\n",
7400 target_pid_to_str (tp->ptid));
7401 thread_step_over_chain_enqueue (tp);
7402 }
7403 else
7404 {
7405 if (debug_infrun)
7406 fprintf_unfiltered (gdb_stdlog,
7407 "infrun: step-over in progress: "
7408 "resume of %s deferred\n",
7409 target_pid_to_str (tp->ptid));
7410 }
7411
7412 discard_cleanups (old_cleanups);
7413 }
d4f3574e
SS
7414 else
7415 {
31e77af2 7416 struct regcache *regcache = get_current_regcache ();
963f9c80
PA
7417 int remove_bp;
7418 int remove_wps;
6c4cfb24 7419 enum step_over_what step_what;
31e77af2 7420
d4f3574e 7421 /* Either the trap was not expected, but we are continuing
a9ba6bae
PA
7422 anyway (if we got a signal, the user asked it be passed to
7423 the child)
7424 -- or --
7425 We got our expected trap, but decided we should resume from
7426 it.
d4f3574e 7427
a9ba6bae 7428 We're going to run this baby now!
d4f3574e 7429
c36b740a
VP
7430 Note that insert_breakpoints won't try to re-insert
7431 already inserted breakpoints. Therefore, we don't
7432 care if breakpoints were already inserted, or not. */
a9ba6bae 7433
31e77af2
PA
7434 /* If we need to step over a breakpoint, and we're not using
7435 displaced stepping to do so, insert all breakpoints
7436 (watchpoints, etc.) but the one we're stepping over, step one
7437 instruction, and then re-insert the breakpoint when that step
7438 is finished. */
963f9c80 7439
6c4cfb24
PA
7440 step_what = thread_still_needs_step_over (ecs->event_thread);
7441
963f9c80 7442 remove_bp = (ecs->hit_singlestep_breakpoint
6c4cfb24
PA
7443 || (step_what & STEP_OVER_BREAKPOINT));
7444 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
963f9c80 7445
cb71640d
PA
7446 /* We can't use displaced stepping if we need to step past a
7447 watchpoint. The instruction copied to the scratch pad would
7448 still trigger the watchpoint. */
7449 if (remove_bp
3fc8eb30 7450 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
45e8c884 7451 {
31e77af2 7452 set_step_over_info (get_regcache_aspace (regcache),
963f9c80 7453 regcache_read_pc (regcache), remove_wps);
45e8c884 7454 }
963f9c80
PA
7455 else if (remove_wps)
7456 set_step_over_info (NULL, 0, remove_wps);
372316f1
PA
7457
7458 /* If we now need to do an in-line step-over, we need to stop
7459 all other threads. Note this must be done before
7460 insert_breakpoints below, because that removes the breakpoint
7461 we're about to step over, otherwise other threads could miss
7462 it. */
fbea99ea 7463 if (step_over_info_valid_p () && target_is_non_stop_p ())
372316f1 7464 stop_all_threads ();
abbb1732 7465
31e77af2 7466 /* Stop stepping if inserting breakpoints fails. */
492d29ea 7467 TRY
31e77af2
PA
7468 {
7469 insert_breakpoints ();
7470 }
492d29ea 7471 CATCH (e, RETURN_MASK_ERROR)
31e77af2
PA
7472 {
7473 exception_print (gdb_stderr, e);
22bcd14b 7474 stop_waiting (ecs);
de1fe8c8 7475 discard_cleanups (old_cleanups);
31e77af2 7476 return;
d4f3574e 7477 }
492d29ea 7478 END_CATCH
d4f3574e 7479
963f9c80 7480 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
d4f3574e 7481
c4dbc9af 7482 discard_cleanups (old_cleanups);
64ce06e4 7483 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e
SS
7484 }
7485
488f131b 7486 prepare_to_wait (ecs);
d4f3574e
SS
7487}
7488
4d9d9d04
PA
7489/* Called when we should continue running the inferior, because the
7490 current event doesn't cause a user visible stop. This does the
7491 resuming part; waiting for the next event is done elsewhere. */
7492
7493static void
7494keep_going (struct execution_control_state *ecs)
7495{
7496 if (ecs->event_thread->control.trap_expected
7497 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
7498 ecs->event_thread->control.trap_expected = 0;
7499
7500 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7501 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7502 keep_going_pass_signal (ecs);
7503}
7504
104c1213
JM
7505/* This function normally comes after a resume, before
7506 handle_inferior_event exits. It takes care of any last bits of
7507 housekeeping, and sets the all-important wait_some_more flag. */
cd0fc7c3 7508
104c1213
JM
7509static void
7510prepare_to_wait (struct execution_control_state *ecs)
cd0fc7c3 7511{
527159b7 7512 if (debug_infrun)
8a9de0e4 7513 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
104c1213 7514
104c1213
JM
7515 /* This is the old end of the while loop. Let everybody know we
7516 want to wait for the inferior some more and get called again
7517 soon. */
7518 ecs->wait_some_more = 1;
c906108c 7519}
11cf8741 7520
fd664c91 7521/* We are done with the step range of a step/next/si/ni command.
b57bacec 7522 Called once for each n of a "step n" operation. */
fd664c91
PA
7523
7524static void
bdc36728 7525end_stepping_range (struct execution_control_state *ecs)
fd664c91 7526{
bdc36728 7527 ecs->event_thread->control.stop_step = 1;
bdc36728 7528 stop_waiting (ecs);
fd664c91
PA
7529}
7530
33d62d64
JK
7531/* Several print_*_reason functions to print why the inferior has stopped.
7532 We always print something when the inferior exits, or receives a signal.
7533 The rest of the cases are dealt with later on in normal_stop and
7534 print_it_typical. Ideally there should be a call to one of these
7535 print_*_reason functions functions from handle_inferior_event each time
22bcd14b 7536 stop_waiting is called.
33d62d64 7537
fd664c91
PA
7538 Note that we don't call these directly, instead we delegate that to
7539 the interpreters, through observers. Interpreters then call these
7540 with whatever uiout is right. */
33d62d64 7541
fd664c91
PA
7542void
7543print_end_stepping_range_reason (struct ui_out *uiout)
33d62d64 7544{
fd664c91 7545 /* For CLI-like interpreters, print nothing. */
33d62d64 7546
fd664c91
PA
7547 if (ui_out_is_mi_like_p (uiout))
7548 {
7549 ui_out_field_string (uiout, "reason",
7550 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
7551 }
7552}
33d62d64 7553
fd664c91
PA
7554void
7555print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
11cf8741 7556{
33d62d64
JK
7557 annotate_signalled ();
7558 if (ui_out_is_mi_like_p (uiout))
7559 ui_out_field_string
7560 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
7561 ui_out_text (uiout, "\nProgram terminated with signal ");
7562 annotate_signal_name ();
7563 ui_out_field_string (uiout, "signal-name",
2ea28649 7564 gdb_signal_to_name (siggnal));
33d62d64
JK
7565 annotate_signal_name_end ();
7566 ui_out_text (uiout, ", ");
7567 annotate_signal_string ();
7568 ui_out_field_string (uiout, "signal-meaning",
2ea28649 7569 gdb_signal_to_string (siggnal));
33d62d64
JK
7570 annotate_signal_string_end ();
7571 ui_out_text (uiout, ".\n");
7572 ui_out_text (uiout, "The program no longer exists.\n");
7573}
7574
fd664c91
PA
7575void
7576print_exited_reason (struct ui_out *uiout, int exitstatus)
33d62d64 7577{
fda326dd
TT
7578 struct inferior *inf = current_inferior ();
7579 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
7580
33d62d64
JK
7581 annotate_exited (exitstatus);
7582 if (exitstatus)
7583 {
7584 if (ui_out_is_mi_like_p (uiout))
7585 ui_out_field_string (uiout, "reason",
7586 async_reason_lookup (EXEC_ASYNC_EXITED));
fda326dd
TT
7587 ui_out_text (uiout, "[Inferior ");
7588 ui_out_text (uiout, plongest (inf->num));
7589 ui_out_text (uiout, " (");
7590 ui_out_text (uiout, pidstr);
7591 ui_out_text (uiout, ") exited with code ");
33d62d64 7592 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
fda326dd 7593 ui_out_text (uiout, "]\n");
33d62d64
JK
7594 }
7595 else
11cf8741 7596 {
9dc5e2a9 7597 if (ui_out_is_mi_like_p (uiout))
034dad6f 7598 ui_out_field_string
33d62d64 7599 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
fda326dd
TT
7600 ui_out_text (uiout, "[Inferior ");
7601 ui_out_text (uiout, plongest (inf->num));
7602 ui_out_text (uiout, " (");
7603 ui_out_text (uiout, pidstr);
7604 ui_out_text (uiout, ") exited normally]\n");
33d62d64 7605 }
33d62d64
JK
7606}
7607
fd664c91
PA
7608void
7609print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
33d62d64
JK
7610{
7611 annotate_signal ();
7612
a493e3e2 7613 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
33d62d64
JK
7614 {
7615 struct thread_info *t = inferior_thread ();
7616
7617 ui_out_text (uiout, "\n[");
7618 ui_out_field_string (uiout, "thread-name",
7619 target_pid_to_str (t->ptid));
7620 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
7621 ui_out_text (uiout, " stopped");
7622 }
7623 else
7624 {
7625 ui_out_text (uiout, "\nProgram received signal ");
8b93c638 7626 annotate_signal_name ();
33d62d64
JK
7627 if (ui_out_is_mi_like_p (uiout))
7628 ui_out_field_string
7629 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
488f131b 7630 ui_out_field_string (uiout, "signal-name",
2ea28649 7631 gdb_signal_to_name (siggnal));
8b93c638
JM
7632 annotate_signal_name_end ();
7633 ui_out_text (uiout, ", ");
7634 annotate_signal_string ();
488f131b 7635 ui_out_field_string (uiout, "signal-meaning",
2ea28649 7636 gdb_signal_to_string (siggnal));
8b93c638 7637 annotate_signal_string_end ();
33d62d64
JK
7638 }
7639 ui_out_text (uiout, ".\n");
7640}
252fbfc8 7641
fd664c91
PA
7642void
7643print_no_history_reason (struct ui_out *uiout)
33d62d64 7644{
fd664c91 7645 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
11cf8741 7646}
43ff13b4 7647
0c7e1a46
PA
7648/* Print current location without a level number, if we have changed
7649 functions or hit a breakpoint. Print source line if we have one.
7650 bpstat_print contains the logic deciding in detail what to print,
7651 based on the event(s) that just occurred. */
7652
7653void
7654print_stop_event (struct target_waitstatus *ws)
7655{
7656 int bpstat_ret;
f486487f 7657 enum print_what source_flag;
0c7e1a46
PA
7658 int do_frame_printing = 1;
7659 struct thread_info *tp = inferior_thread ();
7660
7661 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
7662 switch (bpstat_ret)
7663 {
7664 case PRINT_UNKNOWN:
7665 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
7666 should) carry around the function and does (or should) use
7667 that when doing a frame comparison. */
7668 if (tp->control.stop_step
7669 && frame_id_eq (tp->control.step_frame_id,
7670 get_frame_id (get_current_frame ()))
885eeb5b 7671 && tp->control.step_start_function == find_pc_function (stop_pc))
0c7e1a46
PA
7672 {
7673 /* Finished step, just print source line. */
7674 source_flag = SRC_LINE;
7675 }
7676 else
7677 {
7678 /* Print location and source line. */
7679 source_flag = SRC_AND_LOC;
7680 }
7681 break;
7682 case PRINT_SRC_AND_LOC:
7683 /* Print location and source line. */
7684 source_flag = SRC_AND_LOC;
7685 break;
7686 case PRINT_SRC_ONLY:
7687 source_flag = SRC_LINE;
7688 break;
7689 case PRINT_NOTHING:
7690 /* Something bogus. */
7691 source_flag = SRC_LINE;
7692 do_frame_printing = 0;
7693 break;
7694 default:
7695 internal_error (__FILE__, __LINE__, _("Unknown value."));
7696 }
7697
7698 /* The behavior of this routine with respect to the source
7699 flag is:
7700 SRC_LINE: Print only source line
7701 LOCATION: Print only location
7702 SRC_AND_LOC: Print location and source line. */
7703 if (do_frame_printing)
7704 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
7705
7706 /* Display the auto-display expressions. */
7707 do_displays ();
7708}
7709
c906108c
SS
7710/* Here to return control to GDB when the inferior stops for real.
7711 Print appropriate messages, remove breakpoints, give terminal our modes.
7712
7713 STOP_PRINT_FRAME nonzero means print the executing frame
7714 (pc, function, args, file, line number and line text).
7715 BREAKPOINTS_FAILED nonzero means stop was due to error
7716 attempting to insert breakpoints. */
7717
7718void
96baa820 7719normal_stop (void)
c906108c 7720{
73b65bb0
DJ
7721 struct target_waitstatus last;
7722 ptid_t last_ptid;
29f49a6a 7723 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
e1316e60 7724 ptid_t pid_ptid;
73b65bb0
DJ
7725
7726 get_last_target_status (&last_ptid, &last);
7727
29f49a6a
PA
7728 /* If an exception is thrown from this point on, make sure to
7729 propagate GDB's knowledge of the executing state to the
7730 frontend/user running state. A QUIT is an easy exception to see
7731 here, so do this before any filtered output. */
c35b1492
PA
7732 if (!non_stop)
7733 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
e1316e60
PA
7734 else if (last.kind == TARGET_WAITKIND_SIGNALLED
7735 || last.kind == TARGET_WAITKIND_EXITED)
7736 {
7737 /* On some targets, we may still have live threads in the
7738 inferior when we get a process exit event. E.g., for
7739 "checkpoint", when the current checkpoint/fork exits,
7740 linux-fork.c automatically switches to another fork from
7741 within target_mourn_inferior. */
7742 if (!ptid_equal (inferior_ptid, null_ptid))
7743 {
7744 pid_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
7745 make_cleanup (finish_thread_state_cleanup, &pid_ptid);
7746 }
7747 }
7748 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
c35b1492 7749 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
29f49a6a 7750
b57bacec
PA
7751 /* As we're presenting a stop, and potentially removing breakpoints,
7752 update the thread list so we can tell whether there are threads
7753 running on the target. With target remote, for example, we can
7754 only learn about new threads when we explicitly update the thread
7755 list. Do this before notifying the interpreters about signal
7756 stops, end of stepping ranges, etc., so that the "new thread"
7757 output is emitted before e.g., "Program received signal FOO",
7758 instead of after. */
7759 update_thread_list ();
7760
7761 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
7762 observer_notify_signal_received (inferior_thread ()->suspend.stop_signal);
7763
c906108c
SS
7764 /* As with the notification of thread events, we want to delay
7765 notifying the user that we've switched thread context until
7766 the inferior actually stops.
7767
73b65bb0
DJ
7768 There's no point in saying anything if the inferior has exited.
7769 Note that SIGNALLED here means "exited with a signal", not
b65dc60b
PA
7770 "received a signal".
7771
7772 Also skip saying anything in non-stop mode. In that mode, as we
7773 don't want GDB to switch threads behind the user's back, to avoid
7774 races where the user is typing a command to apply to thread x,
7775 but GDB switches to thread y before the user finishes entering
7776 the command, fetch_inferior_event installs a cleanup to restore
7777 the current thread back to the thread the user had selected right
7778 after this event is handled, so we're not really switching, only
7779 informing of a stop. */
4f8d22e3
PA
7780 if (!non_stop
7781 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
73b65bb0
DJ
7782 && target_has_execution
7783 && last.kind != TARGET_WAITKIND_SIGNALLED
0e5bf2a8
PA
7784 && last.kind != TARGET_WAITKIND_EXITED
7785 && last.kind != TARGET_WAITKIND_NO_RESUMED)
c906108c
SS
7786 {
7787 target_terminal_ours_for_output ();
a3f17187 7788 printf_filtered (_("[Switching to %s]\n"),
c95310c6 7789 target_pid_to_str (inferior_ptid));
b8fa951a 7790 annotate_thread_changed ();
39f77062 7791 previous_inferior_ptid = inferior_ptid;
c906108c 7792 }
c906108c 7793
0e5bf2a8
PA
7794 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
7795 {
7796 gdb_assert (sync_execution || !target_can_async_p ());
7797
7798 target_terminal_ours_for_output ();
7799 printf_filtered (_("No unwaited-for children left.\n"));
7800 }
7801
b57bacec 7802 /* Note: this depends on the update_thread_list call above. */
a25a5a45 7803 if (!breakpoints_should_be_inserted_now () && target_has_execution)
c906108c
SS
7804 {
7805 if (remove_breakpoints ())
7806 {
7807 target_terminal_ours_for_output ();
3e43a32a
MS
7808 printf_filtered (_("Cannot remove breakpoints because "
7809 "program is no longer writable.\nFurther "
7810 "execution is probably impossible.\n"));
c906108c
SS
7811 }
7812 }
c906108c 7813
c906108c
SS
7814 /* If an auto-display called a function and that got a signal,
7815 delete that auto-display to avoid an infinite recursion. */
7816
7817 if (stopped_by_random_signal)
7818 disable_current_display ();
7819
b57bacec 7820 /* Notify observers if we finished a "step"-like command, etc. */
af679fd0
PA
7821 if (target_has_execution
7822 && last.kind != TARGET_WAITKIND_SIGNALLED
7823 && last.kind != TARGET_WAITKIND_EXITED
16c381f0 7824 && inferior_thread ()->control.stop_step)
b57bacec 7825 {
31cc0b80 7826 /* But not if in the middle of doing a "step n" operation for
b57bacec
PA
7827 n > 1 */
7828 if (inferior_thread ()->step_multi)
7829 goto done;
7830
7831 observer_notify_end_stepping_range ();
7832 }
c906108c
SS
7833
7834 target_terminal_ours ();
0f641c01 7835 async_enable_stdin ();
c906108c 7836
7abfe014
DJ
7837 /* Set the current source location. This will also happen if we
7838 display the frame below, but the current SAL will be incorrect
7839 during a user hook-stop function. */
d729566a 7840 if (has_stack_frames () && !stop_stack_dummy)
5166082f 7841 set_current_sal_from_frame (get_current_frame ());
7abfe014 7842
28bf096c
PA
7843 /* Let the user/frontend see the threads as stopped, but defer to
7844 call_function_by_hand if the thread finished an infcall
7845 successfully. We may be e.g., evaluating a breakpoint condition.
7846 In that case, the thread had state THREAD_RUNNING before the
7847 infcall, and shall remain marked running, all without informing
7848 the user/frontend about state transition changes. */
7849 if (target_has_execution
7850 && inferior_thread ()->control.in_infcall
7851 && stop_stack_dummy == STOP_STACK_DUMMY)
251bde03
PA
7852 discard_cleanups (old_chain);
7853 else
7854 do_cleanups (old_chain);
dd7e2d2b
PA
7855
7856 /* Look up the hook_stop and run it (CLI internally handles problem
7857 of stop_command's pre-hook not existing). */
7858 if (stop_command)
7859 catch_errors (hook_stop_stub, stop_command,
7860 "Error while running hook_stop:\n", RETURN_MASK_ALL);
7861
d729566a 7862 if (!has_stack_frames ())
d51fd4c8 7863 goto done;
c906108c 7864
32400beb
PA
7865 if (last.kind == TARGET_WAITKIND_SIGNALLED
7866 || last.kind == TARGET_WAITKIND_EXITED)
7867 goto done;
7868
c906108c
SS
7869 /* Select innermost stack frame - i.e., current frame is frame 0,
7870 and current location is based on that.
7871 Don't do this on return from a stack dummy routine,
1777feb0 7872 or if the program has exited. */
c906108c
SS
7873
7874 if (!stop_stack_dummy)
7875 {
0f7d239c 7876 select_frame (get_current_frame ());
c906108c 7877
d01a8610
AS
7878 /* If --batch-silent is enabled then there's no need to print the current
7879 source location, and to try risks causing an error message about
7880 missing source files. */
7881 if (stop_print_frame && !batch_silent)
0c7e1a46 7882 print_stop_event (&last);
c906108c
SS
7883 }
7884
aa7d318d 7885 if (stop_stack_dummy == STOP_STACK_DUMMY)
c906108c 7886 {
b89667eb
DE
7887 /* Pop the empty frame that contains the stack dummy.
7888 This also restores inferior state prior to the call
16c381f0 7889 (struct infcall_suspend_state). */
b89667eb 7890 struct frame_info *frame = get_current_frame ();
abbb1732 7891
b89667eb
DE
7892 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
7893 frame_pop (frame);
3e43a32a
MS
7894 /* frame_pop() calls reinit_frame_cache as the last thing it
7895 does which means there's currently no selected frame. We
7896 don't need to re-establish a selected frame if the dummy call
7897 returns normally, that will be done by
7898 restore_infcall_control_state. However, we do have to handle
7899 the case where the dummy call is returning after being
7900 stopped (e.g. the dummy call previously hit a breakpoint).
7901 We can't know which case we have so just always re-establish
7902 a selected frame here. */
0f7d239c 7903 select_frame (get_current_frame ());
c906108c
SS
7904 }
7905
c906108c
SS
7906done:
7907 annotate_stopped ();
41d2bdb4
PA
7908
7909 /* Suppress the stop observer if we're in the middle of:
7910
7911 - a step n (n > 1), as there still more steps to be done.
7912
7913 - a "finish" command, as the observer will be called in
7914 finish_command_continuation, so it can include the inferior
7915 function's return value.
7916
7917 - calling an inferior function, as we pretend we inferior didn't
7918 run at all. The return value of the call is handled by the
7919 expression evaluator, through call_function_by_hand. */
7920
7921 if (!target_has_execution
7922 || last.kind == TARGET_WAITKIND_SIGNALLED
7923 || last.kind == TARGET_WAITKIND_EXITED
0e5bf2a8 7924 || last.kind == TARGET_WAITKIND_NO_RESUMED
2ca0b532
PA
7925 || (!(inferior_thread ()->step_multi
7926 && inferior_thread ()->control.stop_step)
16c381f0
JK
7927 && !(inferior_thread ()->control.stop_bpstat
7928 && inferior_thread ()->control.proceed_to_finish)
7929 && !inferior_thread ()->control.in_infcall))
347bddb7
PA
7930 {
7931 if (!ptid_equal (inferior_ptid, null_ptid))
16c381f0 7932 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
1d33d6ba 7933 stop_print_frame);
347bddb7 7934 else
1d33d6ba 7935 observer_notify_normal_stop (NULL, stop_print_frame);
347bddb7 7936 }
347bddb7 7937
48844aa6
PA
7938 if (target_has_execution)
7939 {
7940 if (last.kind != TARGET_WAITKIND_SIGNALLED
7941 && last.kind != TARGET_WAITKIND_EXITED)
7942 /* Delete the breakpoint we stopped at, if it wants to be deleted.
7943 Delete any breakpoint that is to be deleted at the next stop. */
16c381f0 7944 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
94cc34af 7945 }
6c95b8df
PA
7946
7947 /* Try to get rid of automatically added inferiors that are no
7948 longer needed. Keeping those around slows down things linearly.
7949 Note that this never removes the current inferior. */
7950 prune_inferiors ();
c906108c
SS
7951}
7952
7953static int
96baa820 7954hook_stop_stub (void *cmd)
c906108c 7955{
5913bcb0 7956 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
c906108c
SS
7957 return (0);
7958}
7959\f
c5aa993b 7960int
96baa820 7961signal_stop_state (int signo)
c906108c 7962{
d6b48e9c 7963 return signal_stop[signo];
c906108c
SS
7964}
7965
c5aa993b 7966int
96baa820 7967signal_print_state (int signo)
c906108c
SS
7968{
7969 return signal_print[signo];
7970}
7971
c5aa993b 7972int
96baa820 7973signal_pass_state (int signo)
c906108c
SS
7974{
7975 return signal_program[signo];
7976}
7977
2455069d
UW
7978static void
7979signal_cache_update (int signo)
7980{
7981 if (signo == -1)
7982 {
a493e3e2 7983 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
2455069d
UW
7984 signal_cache_update (signo);
7985
7986 return;
7987 }
7988
7989 signal_pass[signo] = (signal_stop[signo] == 0
7990 && signal_print[signo] == 0
ab04a2af
TT
7991 && signal_program[signo] == 1
7992 && signal_catch[signo] == 0);
2455069d
UW
7993}
7994
488f131b 7995int
7bda5e4a 7996signal_stop_update (int signo, int state)
d4f3574e
SS
7997{
7998 int ret = signal_stop[signo];
abbb1732 7999
d4f3574e 8000 signal_stop[signo] = state;
2455069d 8001 signal_cache_update (signo);
d4f3574e
SS
8002 return ret;
8003}
8004
488f131b 8005int
7bda5e4a 8006signal_print_update (int signo, int state)
d4f3574e
SS
8007{
8008 int ret = signal_print[signo];
abbb1732 8009
d4f3574e 8010 signal_print[signo] = state;
2455069d 8011 signal_cache_update (signo);
d4f3574e
SS
8012 return ret;
8013}
8014
488f131b 8015int
7bda5e4a 8016signal_pass_update (int signo, int state)
d4f3574e
SS
8017{
8018 int ret = signal_program[signo];
abbb1732 8019
d4f3574e 8020 signal_program[signo] = state;
2455069d 8021 signal_cache_update (signo);
d4f3574e
SS
8022 return ret;
8023}
8024
ab04a2af
TT
8025/* Update the global 'signal_catch' from INFO and notify the
8026 target. */
8027
8028void
8029signal_catch_update (const unsigned int *info)
8030{
8031 int i;
8032
8033 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8034 signal_catch[i] = info[i] > 0;
8035 signal_cache_update (-1);
8036 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
8037}
8038
c906108c 8039static void
96baa820 8040sig_print_header (void)
c906108c 8041{
3e43a32a
MS
8042 printf_filtered (_("Signal Stop\tPrint\tPass "
8043 "to program\tDescription\n"));
c906108c
SS
8044}
8045
8046static void
2ea28649 8047sig_print_info (enum gdb_signal oursig)
c906108c 8048{
2ea28649 8049 const char *name = gdb_signal_to_name (oursig);
c906108c 8050 int name_padding = 13 - strlen (name);
96baa820 8051
c906108c
SS
8052 if (name_padding <= 0)
8053 name_padding = 0;
8054
8055 printf_filtered ("%s", name);
488f131b 8056 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
c906108c
SS
8057 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8058 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
8059 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
2ea28649 8060 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
c906108c
SS
8061}
8062
8063/* Specify how various signals in the inferior should be handled. */
8064
8065static void
96baa820 8066handle_command (char *args, int from_tty)
c906108c
SS
8067{
8068 char **argv;
8069 int digits, wordlen;
8070 int sigfirst, signum, siglast;
2ea28649 8071 enum gdb_signal oursig;
c906108c
SS
8072 int allsigs;
8073 int nsigs;
8074 unsigned char *sigs;
8075 struct cleanup *old_chain;
8076
8077 if (args == NULL)
8078 {
e2e0b3e5 8079 error_no_arg (_("signal to handle"));
c906108c
SS
8080 }
8081
1777feb0 8082 /* Allocate and zero an array of flags for which signals to handle. */
c906108c 8083
a493e3e2 8084 nsigs = (int) GDB_SIGNAL_LAST;
c906108c
SS
8085 sigs = (unsigned char *) alloca (nsigs);
8086 memset (sigs, 0, nsigs);
8087
1777feb0 8088 /* Break the command line up into args. */
c906108c 8089
d1a41061 8090 argv = gdb_buildargv (args);
7a292a7a 8091 old_chain = make_cleanup_freeargv (argv);
c906108c
SS
8092
8093 /* Walk through the args, looking for signal oursigs, signal names, and
8094 actions. Signal numbers and signal names may be interspersed with
8095 actions, with the actions being performed for all signals cumulatively
1777feb0 8096 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
c906108c
SS
8097
8098 while (*argv != NULL)
8099 {
8100 wordlen = strlen (*argv);
8101 for (digits = 0; isdigit ((*argv)[digits]); digits++)
8102 {;
8103 }
8104 allsigs = 0;
8105 sigfirst = siglast = -1;
8106
8107 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
8108 {
8109 /* Apply action to all signals except those used by the
1777feb0 8110 debugger. Silently skip those. */
c906108c
SS
8111 allsigs = 1;
8112 sigfirst = 0;
8113 siglast = nsigs - 1;
8114 }
8115 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
8116 {
8117 SET_SIGS (nsigs, sigs, signal_stop);
8118 SET_SIGS (nsigs, sigs, signal_print);
8119 }
8120 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
8121 {
8122 UNSET_SIGS (nsigs, sigs, signal_program);
8123 }
8124 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
8125 {
8126 SET_SIGS (nsigs, sigs, signal_print);
8127 }
8128 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
8129 {
8130 SET_SIGS (nsigs, sigs, signal_program);
8131 }
8132 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
8133 {
8134 UNSET_SIGS (nsigs, sigs, signal_stop);
8135 }
8136 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
8137 {
8138 SET_SIGS (nsigs, sigs, signal_program);
8139 }
8140 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
8141 {
8142 UNSET_SIGS (nsigs, sigs, signal_print);
8143 UNSET_SIGS (nsigs, sigs, signal_stop);
8144 }
8145 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
8146 {
8147 UNSET_SIGS (nsigs, sigs, signal_program);
8148 }
8149 else if (digits > 0)
8150 {
8151 /* It is numeric. The numeric signal refers to our own
8152 internal signal numbering from target.h, not to host/target
8153 signal number. This is a feature; users really should be
8154 using symbolic names anyway, and the common ones like
8155 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8156
8157 sigfirst = siglast = (int)
2ea28649 8158 gdb_signal_from_command (atoi (*argv));
c906108c
SS
8159 if ((*argv)[digits] == '-')
8160 {
8161 siglast = (int)
2ea28649 8162 gdb_signal_from_command (atoi ((*argv) + digits + 1));
c906108c
SS
8163 }
8164 if (sigfirst > siglast)
8165 {
1777feb0 8166 /* Bet he didn't figure we'd think of this case... */
c906108c
SS
8167 signum = sigfirst;
8168 sigfirst = siglast;
8169 siglast = signum;
8170 }
8171 }
8172 else
8173 {
2ea28649 8174 oursig = gdb_signal_from_name (*argv);
a493e3e2 8175 if (oursig != GDB_SIGNAL_UNKNOWN)
c906108c
SS
8176 {
8177 sigfirst = siglast = (int) oursig;
8178 }
8179 else
8180 {
8181 /* Not a number and not a recognized flag word => complain. */
8a3fe4f8 8182 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
c906108c
SS
8183 }
8184 }
8185
8186 /* If any signal numbers or symbol names were found, set flags for
1777feb0 8187 which signals to apply actions to. */
c906108c
SS
8188
8189 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
8190 {
2ea28649 8191 switch ((enum gdb_signal) signum)
c906108c 8192 {
a493e3e2
PA
8193 case GDB_SIGNAL_TRAP:
8194 case GDB_SIGNAL_INT:
c906108c
SS
8195 if (!allsigs && !sigs[signum])
8196 {
9e2f0ad4 8197 if (query (_("%s is used by the debugger.\n\
3e43a32a 8198Are you sure you want to change it? "),
2ea28649 8199 gdb_signal_to_name ((enum gdb_signal) signum)))
c906108c
SS
8200 {
8201 sigs[signum] = 1;
8202 }
8203 else
8204 {
a3f17187 8205 printf_unfiltered (_("Not confirmed, unchanged.\n"));
c906108c
SS
8206 gdb_flush (gdb_stdout);
8207 }
8208 }
8209 break;
a493e3e2
PA
8210 case GDB_SIGNAL_0:
8211 case GDB_SIGNAL_DEFAULT:
8212 case GDB_SIGNAL_UNKNOWN:
c906108c
SS
8213 /* Make sure that "all" doesn't print these. */
8214 break;
8215 default:
8216 sigs[signum] = 1;
8217 break;
8218 }
8219 }
8220
8221 argv++;
8222 }
8223
3a031f65
PA
8224 for (signum = 0; signum < nsigs; signum++)
8225 if (sigs[signum])
8226 {
2455069d 8227 signal_cache_update (-1);
a493e3e2
PA
8228 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
8229 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
c906108c 8230
3a031f65
PA
8231 if (from_tty)
8232 {
8233 /* Show the results. */
8234 sig_print_header ();
8235 for (; signum < nsigs; signum++)
8236 if (sigs[signum])
aead7601 8237 sig_print_info ((enum gdb_signal) signum);
3a031f65
PA
8238 }
8239
8240 break;
8241 }
c906108c
SS
8242
8243 do_cleanups (old_chain);
8244}
8245
de0bea00
MF
8246/* Complete the "handle" command. */
8247
8248static VEC (char_ptr) *
8249handle_completer (struct cmd_list_element *ignore,
6f937416 8250 const char *text, const char *word)
de0bea00
MF
8251{
8252 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
8253 static const char * const keywords[] =
8254 {
8255 "all",
8256 "stop",
8257 "ignore",
8258 "print",
8259 "pass",
8260 "nostop",
8261 "noignore",
8262 "noprint",
8263 "nopass",
8264 NULL,
8265 };
8266
8267 vec_signals = signal_completer (ignore, text, word);
8268 vec_keywords = complete_on_enum (keywords, word, word);
8269
8270 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
8271 VEC_free (char_ptr, vec_signals);
8272 VEC_free (char_ptr, vec_keywords);
8273 return return_val;
8274}
8275
2ea28649
PA
8276enum gdb_signal
8277gdb_signal_from_command (int num)
ed01b82c
PA
8278{
8279 if (num >= 1 && num <= 15)
2ea28649 8280 return (enum gdb_signal) num;
ed01b82c
PA
8281 error (_("Only signals 1-15 are valid as numeric signals.\n\
8282Use \"info signals\" for a list of symbolic signals."));
8283}
8284
c906108c
SS
8285/* Print current contents of the tables set by the handle command.
8286 It is possible we should just be printing signals actually used
8287 by the current target (but for things to work right when switching
8288 targets, all signals should be in the signal tables). */
8289
8290static void
96baa820 8291signals_info (char *signum_exp, int from_tty)
c906108c 8292{
2ea28649 8293 enum gdb_signal oursig;
abbb1732 8294
c906108c
SS
8295 sig_print_header ();
8296
8297 if (signum_exp)
8298 {
8299 /* First see if this is a symbol name. */
2ea28649 8300 oursig = gdb_signal_from_name (signum_exp);
a493e3e2 8301 if (oursig == GDB_SIGNAL_UNKNOWN)
c906108c
SS
8302 {
8303 /* No, try numeric. */
8304 oursig =
2ea28649 8305 gdb_signal_from_command (parse_and_eval_long (signum_exp));
c906108c
SS
8306 }
8307 sig_print_info (oursig);
8308 return;
8309 }
8310
8311 printf_filtered ("\n");
8312 /* These ugly casts brought to you by the native VAX compiler. */
a493e3e2
PA
8313 for (oursig = GDB_SIGNAL_FIRST;
8314 (int) oursig < (int) GDB_SIGNAL_LAST;
2ea28649 8315 oursig = (enum gdb_signal) ((int) oursig + 1))
c906108c
SS
8316 {
8317 QUIT;
8318
a493e3e2
PA
8319 if (oursig != GDB_SIGNAL_UNKNOWN
8320 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
c906108c
SS
8321 sig_print_info (oursig);
8322 }
8323
3e43a32a
MS
8324 printf_filtered (_("\nUse the \"handle\" command "
8325 "to change these tables.\n"));
c906108c 8326}
4aa995e1 8327
c709acd1
PA
8328/* Check if it makes sense to read $_siginfo from the current thread
8329 at this point. If not, throw an error. */
8330
8331static void
8332validate_siginfo_access (void)
8333{
8334 /* No current inferior, no siginfo. */
8335 if (ptid_equal (inferior_ptid, null_ptid))
8336 error (_("No thread selected."));
8337
8338 /* Don't try to read from a dead thread. */
8339 if (is_exited (inferior_ptid))
8340 error (_("The current thread has terminated"));
8341
8342 /* ... or from a spinning thread. */
8343 if (is_running (inferior_ptid))
8344 error (_("Selected thread is running."));
8345}
8346
4aa995e1
PA
8347/* The $_siginfo convenience variable is a bit special. We don't know
8348 for sure the type of the value until we actually have a chance to
7a9dd1b2 8349 fetch the data. The type can change depending on gdbarch, so it is
4aa995e1
PA
8350 also dependent on which thread you have selected.
8351
8352 1. making $_siginfo be an internalvar that creates a new value on
8353 access.
8354
8355 2. making the value of $_siginfo be an lval_computed value. */
8356
8357/* This function implements the lval_computed support for reading a
8358 $_siginfo value. */
8359
8360static void
8361siginfo_value_read (struct value *v)
8362{
8363 LONGEST transferred;
8364
c709acd1
PA
8365 validate_siginfo_access ();
8366
4aa995e1
PA
8367 transferred =
8368 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
8369 NULL,
8370 value_contents_all_raw (v),
8371 value_offset (v),
8372 TYPE_LENGTH (value_type (v)));
8373
8374 if (transferred != TYPE_LENGTH (value_type (v)))
8375 error (_("Unable to read siginfo"));
8376}
8377
8378/* This function implements the lval_computed support for writing a
8379 $_siginfo value. */
8380
8381static void
8382siginfo_value_write (struct value *v, struct value *fromval)
8383{
8384 LONGEST transferred;
8385
c709acd1
PA
8386 validate_siginfo_access ();
8387
4aa995e1
PA
8388 transferred = target_write (&current_target,
8389 TARGET_OBJECT_SIGNAL_INFO,
8390 NULL,
8391 value_contents_all_raw (fromval),
8392 value_offset (v),
8393 TYPE_LENGTH (value_type (fromval)));
8394
8395 if (transferred != TYPE_LENGTH (value_type (fromval)))
8396 error (_("Unable to write siginfo"));
8397}
8398
c8f2448a 8399static const struct lval_funcs siginfo_value_funcs =
4aa995e1
PA
8400 {
8401 siginfo_value_read,
8402 siginfo_value_write
8403 };
8404
8405/* Return a new value with the correct type for the siginfo object of
78267919
UW
8406 the current thread using architecture GDBARCH. Return a void value
8407 if there's no object available. */
4aa995e1 8408
2c0b251b 8409static struct value *
22d2b532
SDJ
8410siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
8411 void *ignore)
4aa995e1 8412{
4aa995e1 8413 if (target_has_stack
78267919
UW
8414 && !ptid_equal (inferior_ptid, null_ptid)
8415 && gdbarch_get_siginfo_type_p (gdbarch))
4aa995e1 8416 {
78267919 8417 struct type *type = gdbarch_get_siginfo_type (gdbarch);
abbb1732 8418
78267919 8419 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
4aa995e1
PA
8420 }
8421
78267919 8422 return allocate_value (builtin_type (gdbarch)->builtin_void);
4aa995e1
PA
8423}
8424
c906108c 8425\f
16c381f0
JK
8426/* infcall_suspend_state contains state about the program itself like its
8427 registers and any signal it received when it last stopped.
8428 This state must be restored regardless of how the inferior function call
8429 ends (either successfully, or after it hits a breakpoint or signal)
8430 if the program is to properly continue where it left off. */
8431
8432struct infcall_suspend_state
7a292a7a 8433{
16c381f0 8434 struct thread_suspend_state thread_suspend;
16c381f0
JK
8435
8436 /* Other fields: */
7a292a7a 8437 CORE_ADDR stop_pc;
b89667eb 8438 struct regcache *registers;
1736ad11 8439
35515841 8440 /* Format of SIGINFO_DATA or NULL if it is not present. */
1736ad11
JK
8441 struct gdbarch *siginfo_gdbarch;
8442
8443 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
8444 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
8445 content would be invalid. */
8446 gdb_byte *siginfo_data;
b89667eb
DE
8447};
8448
16c381f0
JK
8449struct infcall_suspend_state *
8450save_infcall_suspend_state (void)
b89667eb 8451{
16c381f0 8452 struct infcall_suspend_state *inf_state;
b89667eb 8453 struct thread_info *tp = inferior_thread ();
1736ad11
JK
8454 struct regcache *regcache = get_current_regcache ();
8455 struct gdbarch *gdbarch = get_regcache_arch (regcache);
8456 gdb_byte *siginfo_data = NULL;
8457
8458 if (gdbarch_get_siginfo_type_p (gdbarch))
8459 {
8460 struct type *type = gdbarch_get_siginfo_type (gdbarch);
8461 size_t len = TYPE_LENGTH (type);
8462 struct cleanup *back_to;
8463
8464 siginfo_data = xmalloc (len);
8465 back_to = make_cleanup (xfree, siginfo_data);
8466
8467 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
8468 siginfo_data, 0, len) == len)
8469 discard_cleanups (back_to);
8470 else
8471 {
8472 /* Errors ignored. */
8473 do_cleanups (back_to);
8474 siginfo_data = NULL;
8475 }
8476 }
8477
41bf6aca 8478 inf_state = XCNEW (struct infcall_suspend_state);
1736ad11
JK
8479
8480 if (siginfo_data)
8481 {
8482 inf_state->siginfo_gdbarch = gdbarch;
8483 inf_state->siginfo_data = siginfo_data;
8484 }
b89667eb 8485
16c381f0 8486 inf_state->thread_suspend = tp->suspend;
16c381f0 8487
35515841 8488 /* run_inferior_call will not use the signal due to its `proceed' call with
a493e3e2
PA
8489 GDB_SIGNAL_0 anyway. */
8490 tp->suspend.stop_signal = GDB_SIGNAL_0;
35515841 8491
b89667eb
DE
8492 inf_state->stop_pc = stop_pc;
8493
1736ad11 8494 inf_state->registers = regcache_dup (regcache);
b89667eb
DE
8495
8496 return inf_state;
8497}
8498
8499/* Restore inferior session state to INF_STATE. */
8500
8501void
16c381f0 8502restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb
DE
8503{
8504 struct thread_info *tp = inferior_thread ();
1736ad11
JK
8505 struct regcache *regcache = get_current_regcache ();
8506 struct gdbarch *gdbarch = get_regcache_arch (regcache);
b89667eb 8507
16c381f0 8508 tp->suspend = inf_state->thread_suspend;
16c381f0 8509
b89667eb
DE
8510 stop_pc = inf_state->stop_pc;
8511
1736ad11
JK
8512 if (inf_state->siginfo_gdbarch == gdbarch)
8513 {
8514 struct type *type = gdbarch_get_siginfo_type (gdbarch);
1736ad11
JK
8515
8516 /* Errors ignored. */
8517 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6acef6cd 8518 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
1736ad11
JK
8519 }
8520
b89667eb
DE
8521 /* The inferior can be gone if the user types "print exit(0)"
8522 (and perhaps other times). */
8523 if (target_has_execution)
8524 /* NB: The register write goes through to the target. */
1736ad11 8525 regcache_cpy (regcache, inf_state->registers);
803b5f95 8526
16c381f0 8527 discard_infcall_suspend_state (inf_state);
b89667eb
DE
8528}
8529
8530static void
16c381f0 8531do_restore_infcall_suspend_state_cleanup (void *state)
b89667eb 8532{
16c381f0 8533 restore_infcall_suspend_state (state);
b89667eb
DE
8534}
8535
8536struct cleanup *
16c381f0
JK
8537make_cleanup_restore_infcall_suspend_state
8538 (struct infcall_suspend_state *inf_state)
b89667eb 8539{
16c381f0 8540 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
b89667eb
DE
8541}
8542
8543void
16c381f0 8544discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb
DE
8545{
8546 regcache_xfree (inf_state->registers);
803b5f95 8547 xfree (inf_state->siginfo_data);
b89667eb
DE
8548 xfree (inf_state);
8549}
8550
8551struct regcache *
16c381f0 8552get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
b89667eb
DE
8553{
8554 return inf_state->registers;
8555}
8556
16c381f0
JK
8557/* infcall_control_state contains state regarding gdb's control of the
8558 inferior itself like stepping control. It also contains session state like
8559 the user's currently selected frame. */
b89667eb 8560
16c381f0 8561struct infcall_control_state
b89667eb 8562{
16c381f0
JK
8563 struct thread_control_state thread_control;
8564 struct inferior_control_state inferior_control;
d82142e2
JK
8565
8566 /* Other fields: */
8567 enum stop_stack_kind stop_stack_dummy;
8568 int stopped_by_random_signal;
7a292a7a 8569 int stop_after_trap;
7a292a7a 8570
b89667eb 8571 /* ID if the selected frame when the inferior function call was made. */
101dcfbe 8572 struct frame_id selected_frame_id;
7a292a7a
SS
8573};
8574
c906108c 8575/* Save all of the information associated with the inferior<==>gdb
b89667eb 8576 connection. */
c906108c 8577
16c381f0
JK
8578struct infcall_control_state *
8579save_infcall_control_state (void)
c906108c 8580{
16c381f0 8581 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
4e1c45ea 8582 struct thread_info *tp = inferior_thread ();
d6b48e9c 8583 struct inferior *inf = current_inferior ();
7a292a7a 8584
16c381f0
JK
8585 inf_status->thread_control = tp->control;
8586 inf_status->inferior_control = inf->control;
d82142e2 8587
8358c15c 8588 tp->control.step_resume_breakpoint = NULL;
5b79abe7 8589 tp->control.exception_resume_breakpoint = NULL;
8358c15c 8590
16c381f0
JK
8591 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
8592 chain. If caller's caller is walking the chain, they'll be happier if we
8593 hand them back the original chain when restore_infcall_control_state is
8594 called. */
8595 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
d82142e2
JK
8596
8597 /* Other fields: */
8598 inf_status->stop_stack_dummy = stop_stack_dummy;
8599 inf_status->stopped_by_random_signal = stopped_by_random_signal;
8600 inf_status->stop_after_trap = stop_after_trap;
c5aa993b 8601
206415a3 8602 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
b89667eb 8603
7a292a7a 8604 return inf_status;
c906108c
SS
8605}
8606
c906108c 8607static int
96baa820 8608restore_selected_frame (void *args)
c906108c 8609{
488f131b 8610 struct frame_id *fid = (struct frame_id *) args;
c906108c 8611 struct frame_info *frame;
c906108c 8612
101dcfbe 8613 frame = frame_find_by_id (*fid);
c906108c 8614
aa0cd9c1
AC
8615 /* If inf_status->selected_frame_id is NULL, there was no previously
8616 selected frame. */
101dcfbe 8617 if (frame == NULL)
c906108c 8618 {
8a3fe4f8 8619 warning (_("Unable to restore previously selected frame."));
c906108c
SS
8620 return 0;
8621 }
8622
0f7d239c 8623 select_frame (frame);
c906108c
SS
8624
8625 return (1);
8626}
8627
b89667eb
DE
8628/* Restore inferior session state to INF_STATUS. */
8629
c906108c 8630void
16c381f0 8631restore_infcall_control_state (struct infcall_control_state *inf_status)
c906108c 8632{
4e1c45ea 8633 struct thread_info *tp = inferior_thread ();
d6b48e9c 8634 struct inferior *inf = current_inferior ();
4e1c45ea 8635
8358c15c
JK
8636 if (tp->control.step_resume_breakpoint)
8637 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
8638
5b79abe7
TT
8639 if (tp->control.exception_resume_breakpoint)
8640 tp->control.exception_resume_breakpoint->disposition
8641 = disp_del_at_next_stop;
8642
d82142e2 8643 /* Handle the bpstat_copy of the chain. */
16c381f0 8644 bpstat_clear (&tp->control.stop_bpstat);
d82142e2 8645
16c381f0
JK
8646 tp->control = inf_status->thread_control;
8647 inf->control = inf_status->inferior_control;
d82142e2
JK
8648
8649 /* Other fields: */
8650 stop_stack_dummy = inf_status->stop_stack_dummy;
8651 stopped_by_random_signal = inf_status->stopped_by_random_signal;
8652 stop_after_trap = inf_status->stop_after_trap;
c906108c 8653
b89667eb 8654 if (target_has_stack)
c906108c 8655 {
c906108c 8656 /* The point of catch_errors is that if the stack is clobbered,
101dcfbe
AC
8657 walking the stack might encounter a garbage pointer and
8658 error() trying to dereference it. */
488f131b
JB
8659 if (catch_errors
8660 (restore_selected_frame, &inf_status->selected_frame_id,
8661 "Unable to restore previously selected frame:\n",
8662 RETURN_MASK_ERROR) == 0)
c906108c
SS
8663 /* Error in restoring the selected frame. Select the innermost
8664 frame. */
0f7d239c 8665 select_frame (get_current_frame ());
c906108c 8666 }
c906108c 8667
72cec141 8668 xfree (inf_status);
7a292a7a 8669}
c906108c 8670
74b7792f 8671static void
16c381f0 8672do_restore_infcall_control_state_cleanup (void *sts)
74b7792f 8673{
16c381f0 8674 restore_infcall_control_state (sts);
74b7792f
AC
8675}
8676
8677struct cleanup *
16c381f0
JK
8678make_cleanup_restore_infcall_control_state
8679 (struct infcall_control_state *inf_status)
74b7792f 8680{
16c381f0 8681 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
74b7792f
AC
8682}
8683
c906108c 8684void
16c381f0 8685discard_infcall_control_state (struct infcall_control_state *inf_status)
7a292a7a 8686{
8358c15c
JK
8687 if (inf_status->thread_control.step_resume_breakpoint)
8688 inf_status->thread_control.step_resume_breakpoint->disposition
8689 = disp_del_at_next_stop;
8690
5b79abe7
TT
8691 if (inf_status->thread_control.exception_resume_breakpoint)
8692 inf_status->thread_control.exception_resume_breakpoint->disposition
8693 = disp_del_at_next_stop;
8694
1777feb0 8695 /* See save_infcall_control_state for info on stop_bpstat. */
16c381f0 8696 bpstat_clear (&inf_status->thread_control.stop_bpstat);
8358c15c 8697
72cec141 8698 xfree (inf_status);
7a292a7a 8699}
b89667eb 8700\f
ca6724c1
KB
8701/* restore_inferior_ptid() will be used by the cleanup machinery
8702 to restore the inferior_ptid value saved in a call to
8703 save_inferior_ptid(). */
ce696e05
KB
8704
8705static void
8706restore_inferior_ptid (void *arg)
8707{
8708 ptid_t *saved_ptid_ptr = arg;
abbb1732 8709
ce696e05
KB
8710 inferior_ptid = *saved_ptid_ptr;
8711 xfree (arg);
8712}
8713
8714/* Save the value of inferior_ptid so that it may be restored by a
8715 later call to do_cleanups(). Returns the struct cleanup pointer
8716 needed for later doing the cleanup. */
8717
8718struct cleanup *
8719save_inferior_ptid (void)
8720{
8721 ptid_t *saved_ptid_ptr;
8722
8723 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
8724 *saved_ptid_ptr = inferior_ptid;
8725 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
8726}
0c557179 8727
7f89fd65 8728/* See infrun.h. */
0c557179
SDJ
8729
8730void
8731clear_exit_convenience_vars (void)
8732{
8733 clear_internalvar (lookup_internalvar ("_exitsignal"));
8734 clear_internalvar (lookup_internalvar ("_exitcode"));
8735}
c5aa993b 8736\f
488f131b 8737
b2175913
MS
8738/* User interface for reverse debugging:
8739 Set exec-direction / show exec-direction commands
8740 (returns error unless target implements to_set_exec_direction method). */
8741
32231432 8742int execution_direction = EXEC_FORWARD;
b2175913
MS
8743static const char exec_forward[] = "forward";
8744static const char exec_reverse[] = "reverse";
8745static const char *exec_direction = exec_forward;
40478521 8746static const char *const exec_direction_names[] = {
b2175913
MS
8747 exec_forward,
8748 exec_reverse,
8749 NULL
8750};
8751
8752static void
8753set_exec_direction_func (char *args, int from_tty,
8754 struct cmd_list_element *cmd)
8755{
8756 if (target_can_execute_reverse)
8757 {
8758 if (!strcmp (exec_direction, exec_forward))
8759 execution_direction = EXEC_FORWARD;
8760 else if (!strcmp (exec_direction, exec_reverse))
8761 execution_direction = EXEC_REVERSE;
8762 }
8bbed405
MS
8763 else
8764 {
8765 exec_direction = exec_forward;
8766 error (_("Target does not support this operation."));
8767 }
b2175913
MS
8768}
8769
8770static void
8771show_exec_direction_func (struct ui_file *out, int from_tty,
8772 struct cmd_list_element *cmd, const char *value)
8773{
8774 switch (execution_direction) {
8775 case EXEC_FORWARD:
8776 fprintf_filtered (out, _("Forward.\n"));
8777 break;
8778 case EXEC_REVERSE:
8779 fprintf_filtered (out, _("Reverse.\n"));
8780 break;
b2175913 8781 default:
d8b34453
PA
8782 internal_error (__FILE__, __LINE__,
8783 _("bogus execution_direction value: %d"),
8784 (int) execution_direction);
b2175913
MS
8785 }
8786}
8787
d4db2f36
PA
8788static void
8789show_schedule_multiple (struct ui_file *file, int from_tty,
8790 struct cmd_list_element *c, const char *value)
8791{
3e43a32a
MS
8792 fprintf_filtered (file, _("Resuming the execution of threads "
8793 "of all processes is %s.\n"), value);
d4db2f36 8794}
ad52ddc6 8795
22d2b532
SDJ
8796/* Implementation of `siginfo' variable. */
8797
8798static const struct internalvar_funcs siginfo_funcs =
8799{
8800 siginfo_make_value,
8801 NULL,
8802 NULL
8803};
8804
372316f1
PA
8805/* Callback for infrun's target events source. This is marked when a
8806 thread has a pending status to process. */
8807
8808static void
8809infrun_async_inferior_event_handler (gdb_client_data data)
8810{
8811 /* If the target is closed while this event source is marked, we
8812 will reach here without execution, or a target to call
8813 target_wait on, which is an error. Instead of tracking whether
8814 the target has been popped already, or whether we do have threads
8815 with pending statutes, simply ignore the event. */
8816 if (!target_is_async_p ())
8817 return;
8818
8819 inferior_event_handler (INF_REG_EVENT, NULL);
8820}
8821
c906108c 8822void
96baa820 8823_initialize_infrun (void)
c906108c 8824{
52f0bd74
AC
8825 int i;
8826 int numsigs;
de0bea00 8827 struct cmd_list_element *c;
c906108c 8828
372316f1
PA
8829 /* Register extra event sources in the event loop. */
8830 infrun_async_inferior_event_token
8831 = create_async_event_handler (infrun_async_inferior_event_handler, NULL);
8832
1bedd215
AC
8833 add_info ("signals", signals_info, _("\
8834What debugger does when program gets various signals.\n\
8835Specify a signal as argument to print info on that signal only."));
c906108c
SS
8836 add_info_alias ("handle", "signals", 0);
8837
de0bea00 8838 c = add_com ("handle", class_run, handle_command, _("\
dfbd5e7b 8839Specify how to handle signals.\n\
486c7739 8840Usage: handle SIGNAL [ACTIONS]\n\
c906108c 8841Args are signals and actions to apply to those signals.\n\
dfbd5e7b 8842If no actions are specified, the current settings for the specified signals\n\
486c7739
MF
8843will be displayed instead.\n\
8844\n\
c906108c
SS
8845Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
8846from 1-15 are allowed for compatibility with old versions of GDB.\n\
8847Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
8848The special arg \"all\" is recognized to mean all signals except those\n\
1bedd215 8849used by the debugger, typically SIGTRAP and SIGINT.\n\
486c7739 8850\n\
1bedd215 8851Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
c906108c
SS
8852\"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
8853Stop means reenter debugger if this signal happens (implies print).\n\
8854Print means print a message if this signal happens.\n\
8855Pass means let program see this signal; otherwise program doesn't know.\n\
8856Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
dfbd5e7b
PA
8857Pass and Stop may be combined.\n\
8858\n\
8859Multiple signals may be specified. Signal numbers and signal names\n\
8860may be interspersed with actions, with the actions being performed for\n\
8861all signals cumulatively specified."));
de0bea00 8862 set_cmd_completer (c, handle_completer);
486c7739 8863
c906108c 8864 if (!dbx_commands)
1a966eab
AC
8865 stop_command = add_cmd ("stop", class_obscure,
8866 not_just_help_class_command, _("\
8867There is no `stop' command, but you can set a hook on `stop'.\n\
c906108c 8868This allows you to set a list of commands to be run each time execution\n\
1a966eab 8869of the program stops."), &cmdlist);
c906108c 8870
ccce17b0 8871 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
85c07804
AC
8872Set inferior debugging."), _("\
8873Show inferior debugging."), _("\
8874When non-zero, inferior specific debugging is enabled."),
ccce17b0
YQ
8875 NULL,
8876 show_debug_infrun,
8877 &setdebuglist, &showdebuglist);
527159b7 8878
3e43a32a
MS
8879 add_setshow_boolean_cmd ("displaced", class_maintenance,
8880 &debug_displaced, _("\
237fc4c9
PA
8881Set displaced stepping debugging."), _("\
8882Show displaced stepping debugging."), _("\
8883When non-zero, displaced stepping specific debugging is enabled."),
8884 NULL,
8885 show_debug_displaced,
8886 &setdebuglist, &showdebuglist);
8887
ad52ddc6
PA
8888 add_setshow_boolean_cmd ("non-stop", no_class,
8889 &non_stop_1, _("\
8890Set whether gdb controls the inferior in non-stop mode."), _("\
8891Show whether gdb controls the inferior in non-stop mode."), _("\
8892When debugging a multi-threaded program and this setting is\n\
8893off (the default, also called all-stop mode), when one thread stops\n\
8894(for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
8895all other threads in the program while you interact with the thread of\n\
8896interest. When you continue or step a thread, you can allow the other\n\
8897threads to run, or have them remain stopped, but while you inspect any\n\
8898thread's state, all threads stop.\n\
8899\n\
8900In non-stop mode, when one thread stops, other threads can continue\n\
8901to run freely. You'll be able to step each thread independently,\n\
8902leave it stopped or free to run as needed."),
8903 set_non_stop,
8904 show_non_stop,
8905 &setlist,
8906 &showlist);
8907
a493e3e2 8908 numsigs = (int) GDB_SIGNAL_LAST;
488f131b 8909 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
c906108c
SS
8910 signal_print = (unsigned char *)
8911 xmalloc (sizeof (signal_print[0]) * numsigs);
8912 signal_program = (unsigned char *)
8913 xmalloc (sizeof (signal_program[0]) * numsigs);
ab04a2af
TT
8914 signal_catch = (unsigned char *)
8915 xmalloc (sizeof (signal_catch[0]) * numsigs);
2455069d 8916 signal_pass = (unsigned char *)
4395285e 8917 xmalloc (sizeof (signal_pass[0]) * numsigs);
c906108c
SS
8918 for (i = 0; i < numsigs; i++)
8919 {
8920 signal_stop[i] = 1;
8921 signal_print[i] = 1;
8922 signal_program[i] = 1;
ab04a2af 8923 signal_catch[i] = 0;
c906108c
SS
8924 }
8925
4d9d9d04
PA
8926 /* Signals caused by debugger's own actions should not be given to
8927 the program afterwards.
8928
8929 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
8930 explicitly specifies that it should be delivered to the target
8931 program. Typically, that would occur when a user is debugging a
8932 target monitor on a simulator: the target monitor sets a
8933 breakpoint; the simulator encounters this breakpoint and halts
8934 the simulation handing control to GDB; GDB, noting that the stop
8935 address doesn't map to any known breakpoint, returns control back
8936 to the simulator; the simulator then delivers the hardware
8937 equivalent of a GDB_SIGNAL_TRAP to the program being
8938 debugged. */
a493e3e2
PA
8939 signal_program[GDB_SIGNAL_TRAP] = 0;
8940 signal_program[GDB_SIGNAL_INT] = 0;
c906108c
SS
8941
8942 /* Signals that are not errors should not normally enter the debugger. */
a493e3e2
PA
8943 signal_stop[GDB_SIGNAL_ALRM] = 0;
8944 signal_print[GDB_SIGNAL_ALRM] = 0;
8945 signal_stop[GDB_SIGNAL_VTALRM] = 0;
8946 signal_print[GDB_SIGNAL_VTALRM] = 0;
8947 signal_stop[GDB_SIGNAL_PROF] = 0;
8948 signal_print[GDB_SIGNAL_PROF] = 0;
8949 signal_stop[GDB_SIGNAL_CHLD] = 0;
8950 signal_print[GDB_SIGNAL_CHLD] = 0;
8951 signal_stop[GDB_SIGNAL_IO] = 0;
8952 signal_print[GDB_SIGNAL_IO] = 0;
8953 signal_stop[GDB_SIGNAL_POLL] = 0;
8954 signal_print[GDB_SIGNAL_POLL] = 0;
8955 signal_stop[GDB_SIGNAL_URG] = 0;
8956 signal_print[GDB_SIGNAL_URG] = 0;
8957 signal_stop[GDB_SIGNAL_WINCH] = 0;
8958 signal_print[GDB_SIGNAL_WINCH] = 0;
8959 signal_stop[GDB_SIGNAL_PRIO] = 0;
8960 signal_print[GDB_SIGNAL_PRIO] = 0;
c906108c 8961
cd0fc7c3
SS
8962 /* These signals are used internally by user-level thread
8963 implementations. (See signal(5) on Solaris.) Like the above
8964 signals, a healthy program receives and handles them as part of
8965 its normal operation. */
a493e3e2
PA
8966 signal_stop[GDB_SIGNAL_LWP] = 0;
8967 signal_print[GDB_SIGNAL_LWP] = 0;
8968 signal_stop[GDB_SIGNAL_WAITING] = 0;
8969 signal_print[GDB_SIGNAL_WAITING] = 0;
8970 signal_stop[GDB_SIGNAL_CANCEL] = 0;
8971 signal_print[GDB_SIGNAL_CANCEL] = 0;
cd0fc7c3 8972
2455069d
UW
8973 /* Update cached state. */
8974 signal_cache_update (-1);
8975
85c07804
AC
8976 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
8977 &stop_on_solib_events, _("\
8978Set stopping for shared library events."), _("\
8979Show stopping for shared library events."), _("\
c906108c
SS
8980If nonzero, gdb will give control to the user when the dynamic linker\n\
8981notifies gdb of shared library events. The most common event of interest\n\
85c07804 8982to the user would be loading/unloading of a new library."),
f9e14852 8983 set_stop_on_solib_events,
920d2a44 8984 show_stop_on_solib_events,
85c07804 8985 &setlist, &showlist);
c906108c 8986
7ab04401
AC
8987 add_setshow_enum_cmd ("follow-fork-mode", class_run,
8988 follow_fork_mode_kind_names,
8989 &follow_fork_mode_string, _("\
8990Set debugger response to a program call of fork or vfork."), _("\
8991Show debugger response to a program call of fork or vfork."), _("\
c906108c
SS
8992A fork or vfork creates a new process. follow-fork-mode can be:\n\
8993 parent - the original process is debugged after a fork\n\
8994 child - the new process is debugged after a fork\n\
ea1dd7bc 8995The unfollowed process will continue to run.\n\
7ab04401
AC
8996By default, the debugger will follow the parent process."),
8997 NULL,
920d2a44 8998 show_follow_fork_mode_string,
7ab04401
AC
8999 &setlist, &showlist);
9000
6c95b8df
PA
9001 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9002 follow_exec_mode_names,
9003 &follow_exec_mode_string, _("\
9004Set debugger response to a program call of exec."), _("\
9005Show debugger response to a program call of exec."), _("\
9006An exec call replaces the program image of a process.\n\
9007\n\
9008follow-exec-mode can be:\n\
9009\n\
cce7e648 9010 new - the debugger creates a new inferior and rebinds the process\n\
6c95b8df
PA
9011to this new inferior. The program the process was running before\n\
9012the exec call can be restarted afterwards by restarting the original\n\
9013inferior.\n\
9014\n\
9015 same - the debugger keeps the process bound to the same inferior.\n\
9016The new executable image replaces the previous executable loaded in\n\
9017the inferior. Restarting the inferior after the exec call restarts\n\
9018the executable the process was running after the exec call.\n\
9019\n\
9020By default, the debugger will use the same inferior."),
9021 NULL,
9022 show_follow_exec_mode_string,
9023 &setlist, &showlist);
9024
7ab04401
AC
9025 add_setshow_enum_cmd ("scheduler-locking", class_run,
9026 scheduler_enums, &scheduler_mode, _("\
9027Set mode for locking scheduler during execution."), _("\
9028Show mode for locking scheduler during execution."), _("\
c906108c
SS
9029off == no locking (threads may preempt at any time)\n\
9030on == full locking (no thread except the current thread may run)\n\
856e7dd6
PA
9031step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9032 In this mode, other threads may run during other commands."),
7ab04401 9033 set_schedlock_func, /* traps on target vector */
920d2a44 9034 show_scheduler_mode,
7ab04401 9035 &setlist, &showlist);
5fbbeb29 9036
d4db2f36
PA
9037 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9038Set mode for resuming threads of all processes."), _("\
9039Show mode for resuming threads of all processes."), _("\
9040When on, execution commands (such as 'continue' or 'next') resume all\n\
9041threads of all processes. When off (which is the default), execution\n\
9042commands only resume the threads of the current process. The set of\n\
9043threads that are resumed is further refined by the scheduler-locking\n\
9044mode (see help set scheduler-locking)."),
9045 NULL,
9046 show_schedule_multiple,
9047 &setlist, &showlist);
9048
5bf193a2
AC
9049 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9050Set mode of the step operation."), _("\
9051Show mode of the step operation."), _("\
9052When set, doing a step over a function without debug line information\n\
9053will stop at the first instruction of that function. Otherwise, the\n\
9054function is skipped and the step command stops at a different source line."),
9055 NULL,
920d2a44 9056 show_step_stop_if_no_debug,
5bf193a2 9057 &setlist, &showlist);
ca6724c1 9058
72d0e2c5
YQ
9059 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9060 &can_use_displaced_stepping, _("\
237fc4c9
PA
9061Set debugger's willingness to use displaced stepping."), _("\
9062Show debugger's willingness to use displaced stepping."), _("\
fff08868
HZ
9063If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9064supported by the target architecture. If off, gdb will not use displaced\n\
9065stepping to step over breakpoints, even if such is supported by the target\n\
9066architecture. If auto (which is the default), gdb will use displaced stepping\n\
9067if the target architecture supports it and non-stop mode is active, but will not\n\
9068use it in all-stop mode (see help set non-stop)."),
72d0e2c5
YQ
9069 NULL,
9070 show_can_use_displaced_stepping,
9071 &setlist, &showlist);
237fc4c9 9072
b2175913
MS
9073 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9074 &exec_direction, _("Set direction of execution.\n\
9075Options are 'forward' or 'reverse'."),
9076 _("Show direction of execution (forward/reverse)."),
9077 _("Tells gdb whether to execute forward or backward."),
9078 set_exec_direction_func, show_exec_direction_func,
9079 &setlist, &showlist);
9080
6c95b8df
PA
9081 /* Set/show detach-on-fork: user-settable mode. */
9082
9083 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9084Set whether gdb will detach the child of a fork."), _("\
9085Show whether gdb will detach the child of a fork."), _("\
9086Tells gdb whether to detach the child of a fork."),
9087 NULL, NULL, &setlist, &showlist);
9088
03583c20
UW
9089 /* Set/show disable address space randomization mode. */
9090
9091 add_setshow_boolean_cmd ("disable-randomization", class_support,
9092 &disable_randomization, _("\
9093Set disabling of debuggee's virtual address space randomization."), _("\
9094Show disabling of debuggee's virtual address space randomization."), _("\
9095When this mode is on (which is the default), randomization of the virtual\n\
9096address space is disabled. Standalone programs run with the randomization\n\
9097enabled by default on some platforms."),
9098 &set_disable_randomization,
9099 &show_disable_randomization,
9100 &setlist, &showlist);
9101
ca6724c1 9102 /* ptid initializations */
ca6724c1
KB
9103 inferior_ptid = null_ptid;
9104 target_last_wait_ptid = minus_one_ptid;
5231c1fd
PA
9105
9106 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
252fbfc8 9107 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
a07daef3 9108 observer_attach_thread_exit (infrun_thread_thread_exit);
fc1cf338 9109 observer_attach_inferior_exit (infrun_inferior_exit);
4aa995e1
PA
9110
9111 /* Explicitly create without lookup, since that tries to create a
9112 value with a void typed value, and when we get here, gdbarch
9113 isn't initialized yet. At this point, we're quite sure there
9114 isn't another convenience variable of the same name. */
22d2b532 9115 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
d914c394
SS
9116
9117 add_setshow_boolean_cmd ("observer", no_class,
9118 &observer_mode_1, _("\
9119Set whether gdb controls the inferior in observer mode."), _("\
9120Show whether gdb controls the inferior in observer mode."), _("\
9121In observer mode, GDB can get data from the inferior, but not\n\
9122affect its execution. Registers and memory may not be changed,\n\
9123breakpoints may not be set, and the program cannot be interrupted\n\
9124or signalled."),
9125 set_observer_mode,
9126 show_observer_mode,
9127 &setlist,
9128 &showlist);
c906108c 9129}
This page took 2.101053 seconds and 4 git commands to generate.