follow-fork: don't lose the ptids as set by the target
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2015 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "breakpoint.h"
28 #include "gdb_wait.h"
29 #include "gdbcore.h"
30 #include "gdbcmd.h"
31 #include "cli/cli-script.h"
32 #include "target.h"
33 #include "gdbthread.h"
34 #include "annotate.h"
35 #include "symfile.h"
36 #include "top.h"
37 #include <signal.h>
38 #include "inf-loop.h"
39 #include "regcache.h"
40 #include "value.h"
41 #include "observer.h"
42 #include "language.h"
43 #include "solib.h"
44 #include "main.h"
45 #include "dictionary.h"
46 #include "block.h"
47 #include "mi/mi-common.h"
48 #include "event-top.h"
49 #include "record.h"
50 #include "record-full.h"
51 #include "inline-frame.h"
52 #include "jit.h"
53 #include "tracepoint.h"
54 #include "continuations.h"
55 #include "interps.h"
56 #include "skip.h"
57 #include "probe.h"
58 #include "objfiles.h"
59 #include "completer.h"
60 #include "target-descriptions.h"
61 #include "target-dcache.h"
62 #include "terminal.h"
63
64 /* Prototypes for local functions */
65
66 static void signals_info (char *, int);
67
68 static void handle_command (char *, int);
69
70 static void sig_print_info (enum gdb_signal);
71
72 static void sig_print_header (void);
73
74 static void resume_cleanups (void *);
75
76 static int hook_stop_stub (void *);
77
78 static int restore_selected_frame (void *);
79
80 static int follow_fork (void);
81
82 static int follow_fork_inferior (int follow_child, int detach_fork);
83
84 static void follow_inferior_reset_breakpoints (void);
85
86 static void set_schedlock_func (char *args, int from_tty,
87 struct cmd_list_element *c);
88
89 static int currently_stepping (struct thread_info *tp);
90
91 static void xdb_handle_command (char *args, int from_tty);
92
93 void _initialize_infrun (void);
94
95 void nullify_last_target_wait_ptid (void);
96
97 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
98
99 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
100
101 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
102
103 /* When set, stop the 'step' command if we enter a function which has
104 no line number information. The normal behavior is that we step
105 over such function. */
106 int step_stop_if_no_debug = 0;
107 static void
108 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
109 struct cmd_list_element *c, const char *value)
110 {
111 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
112 }
113
114 /* In asynchronous mode, but simulating synchronous execution. */
115
116 int sync_execution = 0;
117
118 /* proceed and normal_stop use this to notify the user when the
119 inferior stopped in a different thread than it had been running
120 in. */
121
122 static ptid_t previous_inferior_ptid;
123
124 /* If set (default for legacy reasons), when following a fork, GDB
125 will detach from one of the fork branches, child or parent.
126 Exactly which branch is detached depends on 'set follow-fork-mode'
127 setting. */
128
129 static int detach_fork = 1;
130
131 int debug_displaced = 0;
132 static void
133 show_debug_displaced (struct ui_file *file, int from_tty,
134 struct cmd_list_element *c, const char *value)
135 {
136 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
137 }
138
139 unsigned int debug_infrun = 0;
140 static void
141 show_debug_infrun (struct ui_file *file, int from_tty,
142 struct cmd_list_element *c, const char *value)
143 {
144 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
145 }
146
147
148 /* Support for disabling address space randomization. */
149
150 int disable_randomization = 1;
151
152 static void
153 show_disable_randomization (struct ui_file *file, int from_tty,
154 struct cmd_list_element *c, const char *value)
155 {
156 if (target_supports_disable_randomization ())
157 fprintf_filtered (file,
158 _("Disabling randomization of debuggee's "
159 "virtual address space is %s.\n"),
160 value);
161 else
162 fputs_filtered (_("Disabling randomization of debuggee's "
163 "virtual address space is unsupported on\n"
164 "this platform.\n"), file);
165 }
166
167 static void
168 set_disable_randomization (char *args, int from_tty,
169 struct cmd_list_element *c)
170 {
171 if (!target_supports_disable_randomization ())
172 error (_("Disabling randomization of debuggee's "
173 "virtual address space is unsupported on\n"
174 "this platform."));
175 }
176
177 /* User interface for non-stop mode. */
178
179 int non_stop = 0;
180 static int non_stop_1 = 0;
181
182 static void
183 set_non_stop (char *args, int from_tty,
184 struct cmd_list_element *c)
185 {
186 if (target_has_execution)
187 {
188 non_stop_1 = non_stop;
189 error (_("Cannot change this setting while the inferior is running."));
190 }
191
192 non_stop = non_stop_1;
193 }
194
195 static void
196 show_non_stop (struct ui_file *file, int from_tty,
197 struct cmd_list_element *c, const char *value)
198 {
199 fprintf_filtered (file,
200 _("Controlling the inferior in non-stop mode is %s.\n"),
201 value);
202 }
203
204 /* "Observer mode" is somewhat like a more extreme version of
205 non-stop, in which all GDB operations that might affect the
206 target's execution have been disabled. */
207
208 int observer_mode = 0;
209 static int observer_mode_1 = 0;
210
211 static void
212 set_observer_mode (char *args, int from_tty,
213 struct cmd_list_element *c)
214 {
215 if (target_has_execution)
216 {
217 observer_mode_1 = observer_mode;
218 error (_("Cannot change this setting while the inferior is running."));
219 }
220
221 observer_mode = observer_mode_1;
222
223 may_write_registers = !observer_mode;
224 may_write_memory = !observer_mode;
225 may_insert_breakpoints = !observer_mode;
226 may_insert_tracepoints = !observer_mode;
227 /* We can insert fast tracepoints in or out of observer mode,
228 but enable them if we're going into this mode. */
229 if (observer_mode)
230 may_insert_fast_tracepoints = 1;
231 may_stop = !observer_mode;
232 update_target_permissions ();
233
234 /* Going *into* observer mode we must force non-stop, then
235 going out we leave it that way. */
236 if (observer_mode)
237 {
238 pagination_enabled = 0;
239 non_stop = non_stop_1 = 1;
240 }
241
242 if (from_tty)
243 printf_filtered (_("Observer mode is now %s.\n"),
244 (observer_mode ? "on" : "off"));
245 }
246
247 static void
248 show_observer_mode (struct ui_file *file, int from_tty,
249 struct cmd_list_element *c, const char *value)
250 {
251 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
252 }
253
254 /* This updates the value of observer mode based on changes in
255 permissions. Note that we are deliberately ignoring the values of
256 may-write-registers and may-write-memory, since the user may have
257 reason to enable these during a session, for instance to turn on a
258 debugging-related global. */
259
260 void
261 update_observer_mode (void)
262 {
263 int newval;
264
265 newval = (!may_insert_breakpoints
266 && !may_insert_tracepoints
267 && may_insert_fast_tracepoints
268 && !may_stop
269 && non_stop);
270
271 /* Let the user know if things change. */
272 if (newval != observer_mode)
273 printf_filtered (_("Observer mode is now %s.\n"),
274 (newval ? "on" : "off"));
275
276 observer_mode = observer_mode_1 = newval;
277 }
278
279 /* Tables of how to react to signals; the user sets them. */
280
281 static unsigned char *signal_stop;
282 static unsigned char *signal_print;
283 static unsigned char *signal_program;
284
285 /* Table of signals that are registered with "catch signal". A
286 non-zero entry indicates that the signal is caught by some "catch
287 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
288 signals. */
289 static unsigned char *signal_catch;
290
291 /* Table of signals that the target may silently handle.
292 This is automatically determined from the flags above,
293 and simply cached here. */
294 static unsigned char *signal_pass;
295
296 #define SET_SIGS(nsigs,sigs,flags) \
297 do { \
298 int signum = (nsigs); \
299 while (signum-- > 0) \
300 if ((sigs)[signum]) \
301 (flags)[signum] = 1; \
302 } while (0)
303
304 #define UNSET_SIGS(nsigs,sigs,flags) \
305 do { \
306 int signum = (nsigs); \
307 while (signum-- > 0) \
308 if ((sigs)[signum]) \
309 (flags)[signum] = 0; \
310 } while (0)
311
312 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
313 this function is to avoid exporting `signal_program'. */
314
315 void
316 update_signals_program_target (void)
317 {
318 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
319 }
320
321 /* Value to pass to target_resume() to cause all threads to resume. */
322
323 #define RESUME_ALL minus_one_ptid
324
325 /* Command list pointer for the "stop" placeholder. */
326
327 static struct cmd_list_element *stop_command;
328
329 /* Function inferior was in as of last step command. */
330
331 static struct symbol *step_start_function;
332
333 /* Nonzero if we want to give control to the user when we're notified
334 of shared library events by the dynamic linker. */
335 int stop_on_solib_events;
336
337 /* Enable or disable optional shared library event breakpoints
338 as appropriate when the above flag is changed. */
339
340 static void
341 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
342 {
343 update_solib_breakpoints ();
344 }
345
346 static void
347 show_stop_on_solib_events (struct ui_file *file, int from_tty,
348 struct cmd_list_element *c, const char *value)
349 {
350 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
351 value);
352 }
353
354 /* Nonzero means expecting a trace trap
355 and should stop the inferior and return silently when it happens. */
356
357 int stop_after_trap;
358
359 /* Save register contents here when executing a "finish" command or are
360 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
361 Thus this contains the return value from the called function (assuming
362 values are returned in a register). */
363
364 struct regcache *stop_registers;
365
366 /* Nonzero after stop if current stack frame should be printed. */
367
368 static int stop_print_frame;
369
370 /* This is a cached copy of the pid/waitstatus of the last event
371 returned by target_wait()/deprecated_target_wait_hook(). This
372 information is returned by get_last_target_status(). */
373 static ptid_t target_last_wait_ptid;
374 static struct target_waitstatus target_last_waitstatus;
375
376 static void context_switch (ptid_t ptid);
377
378 void init_thread_stepping_state (struct thread_info *tss);
379
380 static const char follow_fork_mode_child[] = "child";
381 static const char follow_fork_mode_parent[] = "parent";
382
383 static const char *const follow_fork_mode_kind_names[] = {
384 follow_fork_mode_child,
385 follow_fork_mode_parent,
386 NULL
387 };
388
389 static const char *follow_fork_mode_string = follow_fork_mode_parent;
390 static void
391 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
392 struct cmd_list_element *c, const char *value)
393 {
394 fprintf_filtered (file,
395 _("Debugger response to a program "
396 "call of fork or vfork is \"%s\".\n"),
397 value);
398 }
399 \f
400
401 /* Handle changes to the inferior list based on the type of fork,
402 which process is being followed, and whether the other process
403 should be detached. On entry inferior_ptid must be the ptid of
404 the fork parent. At return inferior_ptid is the ptid of the
405 followed inferior. */
406
407 static int
408 follow_fork_inferior (int follow_child, int detach_fork)
409 {
410 int has_vforked;
411 ptid_t parent_ptid, child_ptid;
412
413 has_vforked = (inferior_thread ()->pending_follow.kind
414 == TARGET_WAITKIND_VFORKED);
415 parent_ptid = inferior_ptid;
416 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
417
418 if (has_vforked
419 && !non_stop /* Non-stop always resumes both branches. */
420 && (!target_is_async_p () || sync_execution)
421 && !(follow_child || detach_fork || sched_multi))
422 {
423 /* The parent stays blocked inside the vfork syscall until the
424 child execs or exits. If we don't let the child run, then
425 the parent stays blocked. If we're telling the parent to run
426 in the foreground, the user will not be able to ctrl-c to get
427 back the terminal, effectively hanging the debug session. */
428 fprintf_filtered (gdb_stderr, _("\
429 Can not resume the parent process over vfork in the foreground while\n\
430 holding the child stopped. Try \"set detach-on-fork\" or \
431 \"set schedule-multiple\".\n"));
432 /* FIXME output string > 80 columns. */
433 return 1;
434 }
435
436 if (!follow_child)
437 {
438 /* Detach new forked process? */
439 if (detach_fork)
440 {
441 struct cleanup *old_chain;
442
443 /* Before detaching from the child, remove all breakpoints
444 from it. If we forked, then this has already been taken
445 care of by infrun.c. If we vforked however, any
446 breakpoint inserted in the parent is visible in the
447 child, even those added while stopped in a vfork
448 catchpoint. This will remove the breakpoints from the
449 parent also, but they'll be reinserted below. */
450 if (has_vforked)
451 {
452 /* Keep breakpoints list in sync. */
453 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
454 }
455
456 if (info_verbose || debug_infrun)
457 {
458 target_terminal_ours_for_output ();
459 fprintf_filtered (gdb_stdlog,
460 _("Detaching after %s from child %s.\n"),
461 has_vforked ? "vfork" : "fork",
462 target_pid_to_str (child_ptid));
463 }
464 }
465 else
466 {
467 struct inferior *parent_inf, *child_inf;
468 struct cleanup *old_chain;
469
470 /* Add process to GDB's tables. */
471 child_inf = add_inferior (ptid_get_pid (child_ptid));
472
473 parent_inf = current_inferior ();
474 child_inf->attach_flag = parent_inf->attach_flag;
475 copy_terminal_info (child_inf, parent_inf);
476 child_inf->gdbarch = parent_inf->gdbarch;
477 copy_inferior_target_desc_info (child_inf, parent_inf);
478
479 old_chain = save_inferior_ptid ();
480 save_current_program_space ();
481
482 inferior_ptid = child_ptid;
483 add_thread (inferior_ptid);
484 child_inf->symfile_flags = SYMFILE_NO_READ;
485
486 /* If this is a vfork child, then the address-space is
487 shared with the parent. */
488 if (has_vforked)
489 {
490 child_inf->pspace = parent_inf->pspace;
491 child_inf->aspace = parent_inf->aspace;
492
493 /* The parent will be frozen until the child is done
494 with the shared region. Keep track of the
495 parent. */
496 child_inf->vfork_parent = parent_inf;
497 child_inf->pending_detach = 0;
498 parent_inf->vfork_child = child_inf;
499 parent_inf->pending_detach = 0;
500 }
501 else
502 {
503 child_inf->aspace = new_address_space ();
504 child_inf->pspace = add_program_space (child_inf->aspace);
505 child_inf->removable = 1;
506 set_current_program_space (child_inf->pspace);
507 clone_program_space (child_inf->pspace, parent_inf->pspace);
508
509 /* Let the shared library layer (e.g., solib-svr4) learn
510 about this new process, relocate the cloned exec, pull
511 in shared libraries, and install the solib event
512 breakpoint. If a "cloned-VM" event was propagated
513 better throughout the core, this wouldn't be
514 required. */
515 solib_create_inferior_hook (0);
516 }
517
518 do_cleanups (old_chain);
519 }
520
521 if (has_vforked)
522 {
523 struct inferior *parent_inf;
524
525 parent_inf = current_inferior ();
526
527 /* If we detached from the child, then we have to be careful
528 to not insert breakpoints in the parent until the child
529 is done with the shared memory region. However, if we're
530 staying attached to the child, then we can and should
531 insert breakpoints, so that we can debug it. A
532 subsequent child exec or exit is enough to know when does
533 the child stops using the parent's address space. */
534 parent_inf->waiting_for_vfork_done = detach_fork;
535 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
536 }
537 }
538 else
539 {
540 /* Follow the child. */
541 struct inferior *parent_inf, *child_inf;
542 struct program_space *parent_pspace;
543
544 if (info_verbose || debug_infrun)
545 {
546 target_terminal_ours_for_output ();
547 fprintf_filtered (gdb_stdlog,
548 _("Attaching after %s %s to child %s.\n"),
549 target_pid_to_str (parent_ptid),
550 has_vforked ? "vfork" : "fork",
551 target_pid_to_str (child_ptid));
552 }
553
554 /* Add the new inferior first, so that the target_detach below
555 doesn't unpush the target. */
556
557 child_inf = add_inferior (ptid_get_pid (child_ptid));
558
559 parent_inf = current_inferior ();
560 child_inf->attach_flag = parent_inf->attach_flag;
561 copy_terminal_info (child_inf, parent_inf);
562 child_inf->gdbarch = parent_inf->gdbarch;
563 copy_inferior_target_desc_info (child_inf, parent_inf);
564
565 parent_pspace = parent_inf->pspace;
566
567 /* If we're vforking, we want to hold on to the parent until the
568 child exits or execs. At child exec or exit time we can
569 remove the old breakpoints from the parent and detach or
570 resume debugging it. Otherwise, detach the parent now; we'll
571 want to reuse it's program/address spaces, but we can't set
572 them to the child before removing breakpoints from the
573 parent, otherwise, the breakpoints module could decide to
574 remove breakpoints from the wrong process (since they'd be
575 assigned to the same address space). */
576
577 if (has_vforked)
578 {
579 gdb_assert (child_inf->vfork_parent == NULL);
580 gdb_assert (parent_inf->vfork_child == NULL);
581 child_inf->vfork_parent = parent_inf;
582 child_inf->pending_detach = 0;
583 parent_inf->vfork_child = child_inf;
584 parent_inf->pending_detach = detach_fork;
585 parent_inf->waiting_for_vfork_done = 0;
586 }
587 else if (detach_fork)
588 {
589 if (info_verbose || debug_infrun)
590 {
591 target_terminal_ours_for_output ();
592 fprintf_filtered (gdb_stdlog,
593 _("Detaching after fork from "
594 "child %s.\n"),
595 target_pid_to_str (child_ptid));
596 }
597
598 target_detach (NULL, 0);
599 }
600
601 /* Note that the detach above makes PARENT_INF dangling. */
602
603 /* Add the child thread to the appropriate lists, and switch to
604 this new thread, before cloning the program space, and
605 informing the solib layer about this new process. */
606
607 inferior_ptid = child_ptid;
608 add_thread (inferior_ptid);
609
610 /* If this is a vfork child, then the address-space is shared
611 with the parent. If we detached from the parent, then we can
612 reuse the parent's program/address spaces. */
613 if (has_vforked || detach_fork)
614 {
615 child_inf->pspace = parent_pspace;
616 child_inf->aspace = child_inf->pspace->aspace;
617 }
618 else
619 {
620 child_inf->aspace = new_address_space ();
621 child_inf->pspace = add_program_space (child_inf->aspace);
622 child_inf->removable = 1;
623 child_inf->symfile_flags = SYMFILE_NO_READ;
624 set_current_program_space (child_inf->pspace);
625 clone_program_space (child_inf->pspace, parent_pspace);
626
627 /* Let the shared library layer (e.g., solib-svr4) learn
628 about this new process, relocate the cloned exec, pull in
629 shared libraries, and install the solib event breakpoint.
630 If a "cloned-VM" event was propagated better throughout
631 the core, this wouldn't be required. */
632 solib_create_inferior_hook (0);
633 }
634 }
635
636 return target_follow_fork (follow_child, detach_fork);
637 }
638
639 /* Tell the target to follow the fork we're stopped at. Returns true
640 if the inferior should be resumed; false, if the target for some
641 reason decided it's best not to resume. */
642
643 static int
644 follow_fork (void)
645 {
646 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
647 int should_resume = 1;
648 struct thread_info *tp;
649
650 /* Copy user stepping state to the new inferior thread. FIXME: the
651 followed fork child thread should have a copy of most of the
652 parent thread structure's run control related fields, not just these.
653 Initialized to avoid "may be used uninitialized" warnings from gcc. */
654 struct breakpoint *step_resume_breakpoint = NULL;
655 struct breakpoint *exception_resume_breakpoint = NULL;
656 CORE_ADDR step_range_start = 0;
657 CORE_ADDR step_range_end = 0;
658 struct frame_id step_frame_id = { 0 };
659 struct interp *command_interp = NULL;
660
661 if (!non_stop)
662 {
663 ptid_t wait_ptid;
664 struct target_waitstatus wait_status;
665
666 /* Get the last target status returned by target_wait(). */
667 get_last_target_status (&wait_ptid, &wait_status);
668
669 /* If not stopped at a fork event, then there's nothing else to
670 do. */
671 if (wait_status.kind != TARGET_WAITKIND_FORKED
672 && wait_status.kind != TARGET_WAITKIND_VFORKED)
673 return 1;
674
675 /* Check if we switched over from WAIT_PTID, since the event was
676 reported. */
677 if (!ptid_equal (wait_ptid, minus_one_ptid)
678 && !ptid_equal (inferior_ptid, wait_ptid))
679 {
680 /* We did. Switch back to WAIT_PTID thread, to tell the
681 target to follow it (in either direction). We'll
682 afterwards refuse to resume, and inform the user what
683 happened. */
684 switch_to_thread (wait_ptid);
685 should_resume = 0;
686 }
687 }
688
689 tp = inferior_thread ();
690
691 /* If there were any forks/vforks that were caught and are now to be
692 followed, then do so now. */
693 switch (tp->pending_follow.kind)
694 {
695 case TARGET_WAITKIND_FORKED:
696 case TARGET_WAITKIND_VFORKED:
697 {
698 ptid_t parent, child;
699
700 /* If the user did a next/step, etc, over a fork call,
701 preserve the stepping state in the fork child. */
702 if (follow_child && should_resume)
703 {
704 step_resume_breakpoint = clone_momentary_breakpoint
705 (tp->control.step_resume_breakpoint);
706 step_range_start = tp->control.step_range_start;
707 step_range_end = tp->control.step_range_end;
708 step_frame_id = tp->control.step_frame_id;
709 exception_resume_breakpoint
710 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
711 command_interp = tp->control.command_interp;
712
713 /* For now, delete the parent's sr breakpoint, otherwise,
714 parent/child sr breakpoints are considered duplicates,
715 and the child version will not be installed. Remove
716 this when the breakpoints module becomes aware of
717 inferiors and address spaces. */
718 delete_step_resume_breakpoint (tp);
719 tp->control.step_range_start = 0;
720 tp->control.step_range_end = 0;
721 tp->control.step_frame_id = null_frame_id;
722 delete_exception_resume_breakpoint (tp);
723 tp->control.command_interp = NULL;
724 }
725
726 parent = inferior_ptid;
727 child = tp->pending_follow.value.related_pid;
728
729 /* Set up inferior(s) as specified by the caller, and tell the
730 target to do whatever is necessary to follow either parent
731 or child. */
732 if (follow_fork_inferior (follow_child, detach_fork))
733 {
734 /* Target refused to follow, or there's some other reason
735 we shouldn't resume. */
736 should_resume = 0;
737 }
738 else
739 {
740 /* This pending follow fork event is now handled, one way
741 or another. The previous selected thread may be gone
742 from the lists by now, but if it is still around, need
743 to clear the pending follow request. */
744 tp = find_thread_ptid (parent);
745 if (tp)
746 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
747
748 /* This makes sure we don't try to apply the "Switched
749 over from WAIT_PID" logic above. */
750 nullify_last_target_wait_ptid ();
751
752 /* If we followed the child, switch to it... */
753 if (follow_child)
754 {
755 switch_to_thread (child);
756
757 /* ... and preserve the stepping state, in case the
758 user was stepping over the fork call. */
759 if (should_resume)
760 {
761 tp = inferior_thread ();
762 tp->control.step_resume_breakpoint
763 = step_resume_breakpoint;
764 tp->control.step_range_start = step_range_start;
765 tp->control.step_range_end = step_range_end;
766 tp->control.step_frame_id = step_frame_id;
767 tp->control.exception_resume_breakpoint
768 = exception_resume_breakpoint;
769 tp->control.command_interp = command_interp;
770 }
771 else
772 {
773 /* If we get here, it was because we're trying to
774 resume from a fork catchpoint, but, the user
775 has switched threads away from the thread that
776 forked. In that case, the resume command
777 issued is most likely not applicable to the
778 child, so just warn, and refuse to resume. */
779 warning (_("Not resuming: switched threads "
780 "before following fork child.\n"));
781 }
782
783 /* Reset breakpoints in the child as appropriate. */
784 follow_inferior_reset_breakpoints ();
785 }
786 else
787 switch_to_thread (parent);
788 }
789 }
790 break;
791 case TARGET_WAITKIND_SPURIOUS:
792 /* Nothing to follow. */
793 break;
794 default:
795 internal_error (__FILE__, __LINE__,
796 "Unexpected pending_follow.kind %d\n",
797 tp->pending_follow.kind);
798 break;
799 }
800
801 return should_resume;
802 }
803
804 static void
805 follow_inferior_reset_breakpoints (void)
806 {
807 struct thread_info *tp = inferior_thread ();
808
809 /* Was there a step_resume breakpoint? (There was if the user
810 did a "next" at the fork() call.) If so, explicitly reset its
811 thread number. Cloned step_resume breakpoints are disabled on
812 creation, so enable it here now that it is associated with the
813 correct thread.
814
815 step_resumes are a form of bp that are made to be per-thread.
816 Since we created the step_resume bp when the parent process
817 was being debugged, and now are switching to the child process,
818 from the breakpoint package's viewpoint, that's a switch of
819 "threads". We must update the bp's notion of which thread
820 it is for, or it'll be ignored when it triggers. */
821
822 if (tp->control.step_resume_breakpoint)
823 {
824 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
825 tp->control.step_resume_breakpoint->loc->enabled = 1;
826 }
827
828 /* Treat exception_resume breakpoints like step_resume breakpoints. */
829 if (tp->control.exception_resume_breakpoint)
830 {
831 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
832 tp->control.exception_resume_breakpoint->loc->enabled = 1;
833 }
834
835 /* Reinsert all breakpoints in the child. The user may have set
836 breakpoints after catching the fork, in which case those
837 were never set in the child, but only in the parent. This makes
838 sure the inserted breakpoints match the breakpoint list. */
839
840 breakpoint_re_set ();
841 insert_breakpoints ();
842 }
843
844 /* The child has exited or execed: resume threads of the parent the
845 user wanted to be executing. */
846
847 static int
848 proceed_after_vfork_done (struct thread_info *thread,
849 void *arg)
850 {
851 int pid = * (int *) arg;
852
853 if (ptid_get_pid (thread->ptid) == pid
854 && is_running (thread->ptid)
855 && !is_executing (thread->ptid)
856 && !thread->stop_requested
857 && thread->suspend.stop_signal == GDB_SIGNAL_0)
858 {
859 if (debug_infrun)
860 fprintf_unfiltered (gdb_stdlog,
861 "infrun: resuming vfork parent thread %s\n",
862 target_pid_to_str (thread->ptid));
863
864 switch_to_thread (thread->ptid);
865 clear_proceed_status (0);
866 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
867 }
868
869 return 0;
870 }
871
872 /* Called whenever we notice an exec or exit event, to handle
873 detaching or resuming a vfork parent. */
874
875 static void
876 handle_vfork_child_exec_or_exit (int exec)
877 {
878 struct inferior *inf = current_inferior ();
879
880 if (inf->vfork_parent)
881 {
882 int resume_parent = -1;
883
884 /* This exec or exit marks the end of the shared memory region
885 between the parent and the child. If the user wanted to
886 detach from the parent, now is the time. */
887
888 if (inf->vfork_parent->pending_detach)
889 {
890 struct thread_info *tp;
891 struct cleanup *old_chain;
892 struct program_space *pspace;
893 struct address_space *aspace;
894
895 /* follow-fork child, detach-on-fork on. */
896
897 inf->vfork_parent->pending_detach = 0;
898
899 if (!exec)
900 {
901 /* If we're handling a child exit, then inferior_ptid
902 points at the inferior's pid, not to a thread. */
903 old_chain = save_inferior_ptid ();
904 save_current_program_space ();
905 save_current_inferior ();
906 }
907 else
908 old_chain = save_current_space_and_thread ();
909
910 /* We're letting loose of the parent. */
911 tp = any_live_thread_of_process (inf->vfork_parent->pid);
912 switch_to_thread (tp->ptid);
913
914 /* We're about to detach from the parent, which implicitly
915 removes breakpoints from its address space. There's a
916 catch here: we want to reuse the spaces for the child,
917 but, parent/child are still sharing the pspace at this
918 point, although the exec in reality makes the kernel give
919 the child a fresh set of new pages. The problem here is
920 that the breakpoints module being unaware of this, would
921 likely chose the child process to write to the parent
922 address space. Swapping the child temporarily away from
923 the spaces has the desired effect. Yes, this is "sort
924 of" a hack. */
925
926 pspace = inf->pspace;
927 aspace = inf->aspace;
928 inf->aspace = NULL;
929 inf->pspace = NULL;
930
931 if (debug_infrun || info_verbose)
932 {
933 target_terminal_ours_for_output ();
934
935 if (exec)
936 {
937 fprintf_filtered (gdb_stdlog,
938 _("Detaching vfork parent process "
939 "%d after child exec.\n"),
940 inf->vfork_parent->pid);
941 }
942 else
943 {
944 fprintf_filtered (gdb_stdlog,
945 _("Detaching vfork parent process "
946 "%d after child exit.\n"),
947 inf->vfork_parent->pid);
948 }
949 }
950
951 target_detach (NULL, 0);
952
953 /* Put it back. */
954 inf->pspace = pspace;
955 inf->aspace = aspace;
956
957 do_cleanups (old_chain);
958 }
959 else if (exec)
960 {
961 /* We're staying attached to the parent, so, really give the
962 child a new address space. */
963 inf->pspace = add_program_space (maybe_new_address_space ());
964 inf->aspace = inf->pspace->aspace;
965 inf->removable = 1;
966 set_current_program_space (inf->pspace);
967
968 resume_parent = inf->vfork_parent->pid;
969
970 /* Break the bonds. */
971 inf->vfork_parent->vfork_child = NULL;
972 }
973 else
974 {
975 struct cleanup *old_chain;
976 struct program_space *pspace;
977
978 /* If this is a vfork child exiting, then the pspace and
979 aspaces were shared with the parent. Since we're
980 reporting the process exit, we'll be mourning all that is
981 found in the address space, and switching to null_ptid,
982 preparing to start a new inferior. But, since we don't
983 want to clobber the parent's address/program spaces, we
984 go ahead and create a new one for this exiting
985 inferior. */
986
987 /* Switch to null_ptid, so that clone_program_space doesn't want
988 to read the selected frame of a dead process. */
989 old_chain = save_inferior_ptid ();
990 inferior_ptid = null_ptid;
991
992 /* This inferior is dead, so avoid giving the breakpoints
993 module the option to write through to it (cloning a
994 program space resets breakpoints). */
995 inf->aspace = NULL;
996 inf->pspace = NULL;
997 pspace = add_program_space (maybe_new_address_space ());
998 set_current_program_space (pspace);
999 inf->removable = 1;
1000 inf->symfile_flags = SYMFILE_NO_READ;
1001 clone_program_space (pspace, inf->vfork_parent->pspace);
1002 inf->pspace = pspace;
1003 inf->aspace = pspace->aspace;
1004
1005 /* Put back inferior_ptid. We'll continue mourning this
1006 inferior. */
1007 do_cleanups (old_chain);
1008
1009 resume_parent = inf->vfork_parent->pid;
1010 /* Break the bonds. */
1011 inf->vfork_parent->vfork_child = NULL;
1012 }
1013
1014 inf->vfork_parent = NULL;
1015
1016 gdb_assert (current_program_space == inf->pspace);
1017
1018 if (non_stop && resume_parent != -1)
1019 {
1020 /* If the user wanted the parent to be running, let it go
1021 free now. */
1022 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
1023
1024 if (debug_infrun)
1025 fprintf_unfiltered (gdb_stdlog,
1026 "infrun: resuming vfork parent process %d\n",
1027 resume_parent);
1028
1029 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
1030
1031 do_cleanups (old_chain);
1032 }
1033 }
1034 }
1035
1036 /* Enum strings for "set|show follow-exec-mode". */
1037
1038 static const char follow_exec_mode_new[] = "new";
1039 static const char follow_exec_mode_same[] = "same";
1040 static const char *const follow_exec_mode_names[] =
1041 {
1042 follow_exec_mode_new,
1043 follow_exec_mode_same,
1044 NULL,
1045 };
1046
1047 static const char *follow_exec_mode_string = follow_exec_mode_same;
1048 static void
1049 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1050 struct cmd_list_element *c, const char *value)
1051 {
1052 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1053 }
1054
1055 /* EXECD_PATHNAME is assumed to be non-NULL. */
1056
1057 static void
1058 follow_exec (ptid_t ptid, char *execd_pathname)
1059 {
1060 struct thread_info *th, *tmp;
1061 struct inferior *inf = current_inferior ();
1062 int pid = ptid_get_pid (ptid);
1063
1064 /* This is an exec event that we actually wish to pay attention to.
1065 Refresh our symbol table to the newly exec'd program, remove any
1066 momentary bp's, etc.
1067
1068 If there are breakpoints, they aren't really inserted now,
1069 since the exec() transformed our inferior into a fresh set
1070 of instructions.
1071
1072 We want to preserve symbolic breakpoints on the list, since
1073 we have hopes that they can be reset after the new a.out's
1074 symbol table is read.
1075
1076 However, any "raw" breakpoints must be removed from the list
1077 (e.g., the solib bp's), since their address is probably invalid
1078 now.
1079
1080 And, we DON'T want to call delete_breakpoints() here, since
1081 that may write the bp's "shadow contents" (the instruction
1082 value that was overwritten witha TRAP instruction). Since
1083 we now have a new a.out, those shadow contents aren't valid. */
1084
1085 mark_breakpoints_out ();
1086
1087 /* The target reports the exec event to the main thread, even if
1088 some other thread does the exec, and even if the main thread was
1089 stopped or already gone. We may still have non-leader threads of
1090 the process on our list. E.g., on targets that don't have thread
1091 exit events (like remote); or on native Linux in non-stop mode if
1092 there were only two threads in the inferior and the non-leader
1093 one is the one that execs (and nothing forces an update of the
1094 thread list up to here). When debugging remotely, it's best to
1095 avoid extra traffic, when possible, so avoid syncing the thread
1096 list with the target, and instead go ahead and delete all threads
1097 of the process but one that reported the event. Note this must
1098 be done before calling update_breakpoints_after_exec, as
1099 otherwise clearing the threads' resources would reference stale
1100 thread breakpoints -- it may have been one of these threads that
1101 stepped across the exec. We could just clear their stepping
1102 states, but as long as we're iterating, might as well delete
1103 them. Deleting them now rather than at the next user-visible
1104 stop provides a nicer sequence of events for user and MI
1105 notifications. */
1106 ALL_NON_EXITED_THREADS_SAFE (th, tmp)
1107 if (ptid_get_pid (th->ptid) == pid && !ptid_equal (th->ptid, ptid))
1108 delete_thread (th->ptid);
1109
1110 /* We also need to clear any left over stale state for the
1111 leader/event thread. E.g., if there was any step-resume
1112 breakpoint or similar, it's gone now. We cannot truly
1113 step-to-next statement through an exec(). */
1114 th = inferior_thread ();
1115 th->control.step_resume_breakpoint = NULL;
1116 th->control.exception_resume_breakpoint = NULL;
1117 th->control.single_step_breakpoints = NULL;
1118 th->control.step_range_start = 0;
1119 th->control.step_range_end = 0;
1120
1121 /* The user may have had the main thread held stopped in the
1122 previous image (e.g., schedlock on, or non-stop). Release
1123 it now. */
1124 th->stop_requested = 0;
1125
1126 update_breakpoints_after_exec ();
1127
1128 /* What is this a.out's name? */
1129 printf_unfiltered (_("%s is executing new program: %s\n"),
1130 target_pid_to_str (inferior_ptid),
1131 execd_pathname);
1132
1133 /* We've followed the inferior through an exec. Therefore, the
1134 inferior has essentially been killed & reborn. */
1135
1136 gdb_flush (gdb_stdout);
1137
1138 breakpoint_init_inferior (inf_execd);
1139
1140 if (gdb_sysroot && *gdb_sysroot)
1141 {
1142 char *name = alloca (strlen (gdb_sysroot)
1143 + strlen (execd_pathname)
1144 + 1);
1145
1146 strcpy (name, gdb_sysroot);
1147 strcat (name, execd_pathname);
1148 execd_pathname = name;
1149 }
1150
1151 /* Reset the shared library package. This ensures that we get a
1152 shlib event when the child reaches "_start", at which point the
1153 dld will have had a chance to initialize the child. */
1154 /* Also, loading a symbol file below may trigger symbol lookups, and
1155 we don't want those to be satisfied by the libraries of the
1156 previous incarnation of this process. */
1157 no_shared_libraries (NULL, 0);
1158
1159 if (follow_exec_mode_string == follow_exec_mode_new)
1160 {
1161 struct program_space *pspace;
1162
1163 /* The user wants to keep the old inferior and program spaces
1164 around. Create a new fresh one, and switch to it. */
1165
1166 inf = add_inferior (current_inferior ()->pid);
1167 pspace = add_program_space (maybe_new_address_space ());
1168 inf->pspace = pspace;
1169 inf->aspace = pspace->aspace;
1170
1171 exit_inferior_num_silent (current_inferior ()->num);
1172
1173 set_current_inferior (inf);
1174 set_current_program_space (pspace);
1175 }
1176 else
1177 {
1178 /* The old description may no longer be fit for the new image.
1179 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1180 old description; we'll read a new one below. No need to do
1181 this on "follow-exec-mode new", as the old inferior stays
1182 around (its description is later cleared/refetched on
1183 restart). */
1184 target_clear_description ();
1185 }
1186
1187 gdb_assert (current_program_space == inf->pspace);
1188
1189 /* That a.out is now the one to use. */
1190 exec_file_attach (execd_pathname, 0);
1191
1192 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
1193 (Position Independent Executable) main symbol file will get applied by
1194 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
1195 the breakpoints with the zero displacement. */
1196
1197 symbol_file_add (execd_pathname,
1198 (inf->symfile_flags
1199 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
1200 NULL, 0);
1201
1202 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
1203 set_initial_language ();
1204
1205 /* If the target can specify a description, read it. Must do this
1206 after flipping to the new executable (because the target supplied
1207 description must be compatible with the executable's
1208 architecture, and the old executable may e.g., be 32-bit, while
1209 the new one 64-bit), and before anything involving memory or
1210 registers. */
1211 target_find_description ();
1212
1213 solib_create_inferior_hook (0);
1214
1215 jit_inferior_created_hook ();
1216
1217 breakpoint_re_set ();
1218
1219 /* Reinsert all breakpoints. (Those which were symbolic have
1220 been reset to the proper address in the new a.out, thanks
1221 to symbol_file_command...). */
1222 insert_breakpoints ();
1223
1224 /* The next resume of this inferior should bring it to the shlib
1225 startup breakpoints. (If the user had also set bp's on
1226 "main" from the old (parent) process, then they'll auto-
1227 matically get reset there in the new process.). */
1228 }
1229
1230 /* Info about an instruction that is being stepped over. */
1231
1232 struct step_over_info
1233 {
1234 /* If we're stepping past a breakpoint, this is the address space
1235 and address of the instruction the breakpoint is set at. We'll
1236 skip inserting all breakpoints here. Valid iff ASPACE is
1237 non-NULL. */
1238 struct address_space *aspace;
1239 CORE_ADDR address;
1240
1241 /* The instruction being stepped over triggers a nonsteppable
1242 watchpoint. If true, we'll skip inserting watchpoints. */
1243 int nonsteppable_watchpoint_p;
1244 };
1245
1246 /* The step-over info of the location that is being stepped over.
1247
1248 Note that with async/breakpoint always-inserted mode, a user might
1249 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1250 being stepped over. As setting a new breakpoint inserts all
1251 breakpoints, we need to make sure the breakpoint being stepped over
1252 isn't inserted then. We do that by only clearing the step-over
1253 info when the step-over is actually finished (or aborted).
1254
1255 Presently GDB can only step over one breakpoint at any given time.
1256 Given threads that can't run code in the same address space as the
1257 breakpoint's can't really miss the breakpoint, GDB could be taught
1258 to step-over at most one breakpoint per address space (so this info
1259 could move to the address space object if/when GDB is extended).
1260 The set of breakpoints being stepped over will normally be much
1261 smaller than the set of all breakpoints, so a flag in the
1262 breakpoint location structure would be wasteful. A separate list
1263 also saves complexity and run-time, as otherwise we'd have to go
1264 through all breakpoint locations clearing their flag whenever we
1265 start a new sequence. Similar considerations weigh against storing
1266 this info in the thread object. Plus, not all step overs actually
1267 have breakpoint locations -- e.g., stepping past a single-step
1268 breakpoint, or stepping to complete a non-continuable
1269 watchpoint. */
1270 static struct step_over_info step_over_info;
1271
1272 /* Record the address of the breakpoint/instruction we're currently
1273 stepping over. */
1274
1275 static void
1276 set_step_over_info (struct address_space *aspace, CORE_ADDR address,
1277 int nonsteppable_watchpoint_p)
1278 {
1279 step_over_info.aspace = aspace;
1280 step_over_info.address = address;
1281 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1282 }
1283
1284 /* Called when we're not longer stepping over a breakpoint / an
1285 instruction, so all breakpoints are free to be (re)inserted. */
1286
1287 static void
1288 clear_step_over_info (void)
1289 {
1290 step_over_info.aspace = NULL;
1291 step_over_info.address = 0;
1292 step_over_info.nonsteppable_watchpoint_p = 0;
1293 }
1294
1295 /* See infrun.h. */
1296
1297 int
1298 stepping_past_instruction_at (struct address_space *aspace,
1299 CORE_ADDR address)
1300 {
1301 return (step_over_info.aspace != NULL
1302 && breakpoint_address_match (aspace, address,
1303 step_over_info.aspace,
1304 step_over_info.address));
1305 }
1306
1307 /* See infrun.h. */
1308
1309 int
1310 stepping_past_nonsteppable_watchpoint (void)
1311 {
1312 return step_over_info.nonsteppable_watchpoint_p;
1313 }
1314
1315 /* Returns true if step-over info is valid. */
1316
1317 static int
1318 step_over_info_valid_p (void)
1319 {
1320 return (step_over_info.aspace != NULL
1321 || stepping_past_nonsteppable_watchpoint ());
1322 }
1323
1324 \f
1325 /* Displaced stepping. */
1326
1327 /* In non-stop debugging mode, we must take special care to manage
1328 breakpoints properly; in particular, the traditional strategy for
1329 stepping a thread past a breakpoint it has hit is unsuitable.
1330 'Displaced stepping' is a tactic for stepping one thread past a
1331 breakpoint it has hit while ensuring that other threads running
1332 concurrently will hit the breakpoint as they should.
1333
1334 The traditional way to step a thread T off a breakpoint in a
1335 multi-threaded program in all-stop mode is as follows:
1336
1337 a0) Initially, all threads are stopped, and breakpoints are not
1338 inserted.
1339 a1) We single-step T, leaving breakpoints uninserted.
1340 a2) We insert breakpoints, and resume all threads.
1341
1342 In non-stop debugging, however, this strategy is unsuitable: we
1343 don't want to have to stop all threads in the system in order to
1344 continue or step T past a breakpoint. Instead, we use displaced
1345 stepping:
1346
1347 n0) Initially, T is stopped, other threads are running, and
1348 breakpoints are inserted.
1349 n1) We copy the instruction "under" the breakpoint to a separate
1350 location, outside the main code stream, making any adjustments
1351 to the instruction, register, and memory state as directed by
1352 T's architecture.
1353 n2) We single-step T over the instruction at its new location.
1354 n3) We adjust the resulting register and memory state as directed
1355 by T's architecture. This includes resetting T's PC to point
1356 back into the main instruction stream.
1357 n4) We resume T.
1358
1359 This approach depends on the following gdbarch methods:
1360
1361 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1362 indicate where to copy the instruction, and how much space must
1363 be reserved there. We use these in step n1.
1364
1365 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1366 address, and makes any necessary adjustments to the instruction,
1367 register contents, and memory. We use this in step n1.
1368
1369 - gdbarch_displaced_step_fixup adjusts registers and memory after
1370 we have successfuly single-stepped the instruction, to yield the
1371 same effect the instruction would have had if we had executed it
1372 at its original address. We use this in step n3.
1373
1374 - gdbarch_displaced_step_free_closure provides cleanup.
1375
1376 The gdbarch_displaced_step_copy_insn and
1377 gdbarch_displaced_step_fixup functions must be written so that
1378 copying an instruction with gdbarch_displaced_step_copy_insn,
1379 single-stepping across the copied instruction, and then applying
1380 gdbarch_displaced_insn_fixup should have the same effects on the
1381 thread's memory and registers as stepping the instruction in place
1382 would have. Exactly which responsibilities fall to the copy and
1383 which fall to the fixup is up to the author of those functions.
1384
1385 See the comments in gdbarch.sh for details.
1386
1387 Note that displaced stepping and software single-step cannot
1388 currently be used in combination, although with some care I think
1389 they could be made to. Software single-step works by placing
1390 breakpoints on all possible subsequent instructions; if the
1391 displaced instruction is a PC-relative jump, those breakpoints
1392 could fall in very strange places --- on pages that aren't
1393 executable, or at addresses that are not proper instruction
1394 boundaries. (We do generally let other threads run while we wait
1395 to hit the software single-step breakpoint, and they might
1396 encounter such a corrupted instruction.) One way to work around
1397 this would be to have gdbarch_displaced_step_copy_insn fully
1398 simulate the effect of PC-relative instructions (and return NULL)
1399 on architectures that use software single-stepping.
1400
1401 In non-stop mode, we can have independent and simultaneous step
1402 requests, so more than one thread may need to simultaneously step
1403 over a breakpoint. The current implementation assumes there is
1404 only one scratch space per process. In this case, we have to
1405 serialize access to the scratch space. If thread A wants to step
1406 over a breakpoint, but we are currently waiting for some other
1407 thread to complete a displaced step, we leave thread A stopped and
1408 place it in the displaced_step_request_queue. Whenever a displaced
1409 step finishes, we pick the next thread in the queue and start a new
1410 displaced step operation on it. See displaced_step_prepare and
1411 displaced_step_fixup for details. */
1412
1413 struct displaced_step_request
1414 {
1415 ptid_t ptid;
1416 struct displaced_step_request *next;
1417 };
1418
1419 /* Per-inferior displaced stepping state. */
1420 struct displaced_step_inferior_state
1421 {
1422 /* Pointer to next in linked list. */
1423 struct displaced_step_inferior_state *next;
1424
1425 /* The process this displaced step state refers to. */
1426 int pid;
1427
1428 /* A queue of pending displaced stepping requests. One entry per
1429 thread that needs to do a displaced step. */
1430 struct displaced_step_request *step_request_queue;
1431
1432 /* If this is not null_ptid, this is the thread carrying out a
1433 displaced single-step in process PID. This thread's state will
1434 require fixing up once it has completed its step. */
1435 ptid_t step_ptid;
1436
1437 /* The architecture the thread had when we stepped it. */
1438 struct gdbarch *step_gdbarch;
1439
1440 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1441 for post-step cleanup. */
1442 struct displaced_step_closure *step_closure;
1443
1444 /* The address of the original instruction, and the copy we
1445 made. */
1446 CORE_ADDR step_original, step_copy;
1447
1448 /* Saved contents of copy area. */
1449 gdb_byte *step_saved_copy;
1450 };
1451
1452 /* The list of states of processes involved in displaced stepping
1453 presently. */
1454 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1455
1456 /* Get the displaced stepping state of process PID. */
1457
1458 static struct displaced_step_inferior_state *
1459 get_displaced_stepping_state (int pid)
1460 {
1461 struct displaced_step_inferior_state *state;
1462
1463 for (state = displaced_step_inferior_states;
1464 state != NULL;
1465 state = state->next)
1466 if (state->pid == pid)
1467 return state;
1468
1469 return NULL;
1470 }
1471
1472 /* Add a new displaced stepping state for process PID to the displaced
1473 stepping state list, or return a pointer to an already existing
1474 entry, if it already exists. Never returns NULL. */
1475
1476 static struct displaced_step_inferior_state *
1477 add_displaced_stepping_state (int pid)
1478 {
1479 struct displaced_step_inferior_state *state;
1480
1481 for (state = displaced_step_inferior_states;
1482 state != NULL;
1483 state = state->next)
1484 if (state->pid == pid)
1485 return state;
1486
1487 state = xcalloc (1, sizeof (*state));
1488 state->pid = pid;
1489 state->next = displaced_step_inferior_states;
1490 displaced_step_inferior_states = state;
1491
1492 return state;
1493 }
1494
1495 /* If inferior is in displaced stepping, and ADDR equals to starting address
1496 of copy area, return corresponding displaced_step_closure. Otherwise,
1497 return NULL. */
1498
1499 struct displaced_step_closure*
1500 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1501 {
1502 struct displaced_step_inferior_state *displaced
1503 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1504
1505 /* If checking the mode of displaced instruction in copy area. */
1506 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1507 && (displaced->step_copy == addr))
1508 return displaced->step_closure;
1509
1510 return NULL;
1511 }
1512
1513 /* Remove the displaced stepping state of process PID. */
1514
1515 static void
1516 remove_displaced_stepping_state (int pid)
1517 {
1518 struct displaced_step_inferior_state *it, **prev_next_p;
1519
1520 gdb_assert (pid != 0);
1521
1522 it = displaced_step_inferior_states;
1523 prev_next_p = &displaced_step_inferior_states;
1524 while (it)
1525 {
1526 if (it->pid == pid)
1527 {
1528 *prev_next_p = it->next;
1529 xfree (it);
1530 return;
1531 }
1532
1533 prev_next_p = &it->next;
1534 it = *prev_next_p;
1535 }
1536 }
1537
1538 static void
1539 infrun_inferior_exit (struct inferior *inf)
1540 {
1541 remove_displaced_stepping_state (inf->pid);
1542 }
1543
1544 /* If ON, and the architecture supports it, GDB will use displaced
1545 stepping to step over breakpoints. If OFF, or if the architecture
1546 doesn't support it, GDB will instead use the traditional
1547 hold-and-step approach. If AUTO (which is the default), GDB will
1548 decide which technique to use to step over breakpoints depending on
1549 which of all-stop or non-stop mode is active --- displaced stepping
1550 in non-stop mode; hold-and-step in all-stop mode. */
1551
1552 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1553
1554 static void
1555 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1556 struct cmd_list_element *c,
1557 const char *value)
1558 {
1559 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1560 fprintf_filtered (file,
1561 _("Debugger's willingness to use displaced stepping "
1562 "to step over breakpoints is %s (currently %s).\n"),
1563 value, non_stop ? "on" : "off");
1564 else
1565 fprintf_filtered (file,
1566 _("Debugger's willingness to use displaced stepping "
1567 "to step over breakpoints is %s.\n"), value);
1568 }
1569
1570 /* Return non-zero if displaced stepping can/should be used to step
1571 over breakpoints. */
1572
1573 static int
1574 use_displaced_stepping (struct gdbarch *gdbarch)
1575 {
1576 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1577 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1578 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1579 && find_record_target () == NULL);
1580 }
1581
1582 /* Clean out any stray displaced stepping state. */
1583 static void
1584 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1585 {
1586 /* Indicate that there is no cleanup pending. */
1587 displaced->step_ptid = null_ptid;
1588
1589 if (displaced->step_closure)
1590 {
1591 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1592 displaced->step_closure);
1593 displaced->step_closure = NULL;
1594 }
1595 }
1596
1597 static void
1598 displaced_step_clear_cleanup (void *arg)
1599 {
1600 struct displaced_step_inferior_state *state = arg;
1601
1602 displaced_step_clear (state);
1603 }
1604
1605 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1606 void
1607 displaced_step_dump_bytes (struct ui_file *file,
1608 const gdb_byte *buf,
1609 size_t len)
1610 {
1611 int i;
1612
1613 for (i = 0; i < len; i++)
1614 fprintf_unfiltered (file, "%02x ", buf[i]);
1615 fputs_unfiltered ("\n", file);
1616 }
1617
1618 /* Prepare to single-step, using displaced stepping.
1619
1620 Note that we cannot use displaced stepping when we have a signal to
1621 deliver. If we have a signal to deliver and an instruction to step
1622 over, then after the step, there will be no indication from the
1623 target whether the thread entered a signal handler or ignored the
1624 signal and stepped over the instruction successfully --- both cases
1625 result in a simple SIGTRAP. In the first case we mustn't do a
1626 fixup, and in the second case we must --- but we can't tell which.
1627 Comments in the code for 'random signals' in handle_inferior_event
1628 explain how we handle this case instead.
1629
1630 Returns 1 if preparing was successful -- this thread is going to be
1631 stepped now; or 0 if displaced stepping this thread got queued. */
1632 static int
1633 displaced_step_prepare (ptid_t ptid)
1634 {
1635 struct cleanup *old_cleanups, *ignore_cleanups;
1636 struct thread_info *tp = find_thread_ptid (ptid);
1637 struct regcache *regcache = get_thread_regcache (ptid);
1638 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1639 CORE_ADDR original, copy;
1640 ULONGEST len;
1641 struct displaced_step_closure *closure;
1642 struct displaced_step_inferior_state *displaced;
1643 int status;
1644
1645 /* We should never reach this function if the architecture does not
1646 support displaced stepping. */
1647 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1648
1649 /* Disable range stepping while executing in the scratch pad. We
1650 want a single-step even if executing the displaced instruction in
1651 the scratch buffer lands within the stepping range (e.g., a
1652 jump/branch). */
1653 tp->control.may_range_step = 0;
1654
1655 /* We have to displaced step one thread at a time, as we only have
1656 access to a single scratch space per inferior. */
1657
1658 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1659
1660 if (!ptid_equal (displaced->step_ptid, null_ptid))
1661 {
1662 /* Already waiting for a displaced step to finish. Defer this
1663 request and place in queue. */
1664 struct displaced_step_request *req, *new_req;
1665
1666 if (debug_displaced)
1667 fprintf_unfiltered (gdb_stdlog,
1668 "displaced: defering step of %s\n",
1669 target_pid_to_str (ptid));
1670
1671 new_req = xmalloc (sizeof (*new_req));
1672 new_req->ptid = ptid;
1673 new_req->next = NULL;
1674
1675 if (displaced->step_request_queue)
1676 {
1677 for (req = displaced->step_request_queue;
1678 req && req->next;
1679 req = req->next)
1680 ;
1681 req->next = new_req;
1682 }
1683 else
1684 displaced->step_request_queue = new_req;
1685
1686 return 0;
1687 }
1688 else
1689 {
1690 if (debug_displaced)
1691 fprintf_unfiltered (gdb_stdlog,
1692 "displaced: stepping %s now\n",
1693 target_pid_to_str (ptid));
1694 }
1695
1696 displaced_step_clear (displaced);
1697
1698 old_cleanups = save_inferior_ptid ();
1699 inferior_ptid = ptid;
1700
1701 original = regcache_read_pc (regcache);
1702
1703 copy = gdbarch_displaced_step_location (gdbarch);
1704 len = gdbarch_max_insn_length (gdbarch);
1705
1706 /* Save the original contents of the copy area. */
1707 displaced->step_saved_copy = xmalloc (len);
1708 ignore_cleanups = make_cleanup (free_current_contents,
1709 &displaced->step_saved_copy);
1710 status = target_read_memory (copy, displaced->step_saved_copy, len);
1711 if (status != 0)
1712 throw_error (MEMORY_ERROR,
1713 _("Error accessing memory address %s (%s) for "
1714 "displaced-stepping scratch space."),
1715 paddress (gdbarch, copy), safe_strerror (status));
1716 if (debug_displaced)
1717 {
1718 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1719 paddress (gdbarch, copy));
1720 displaced_step_dump_bytes (gdb_stdlog,
1721 displaced->step_saved_copy,
1722 len);
1723 };
1724
1725 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1726 original, copy, regcache);
1727
1728 /* We don't support the fully-simulated case at present. */
1729 gdb_assert (closure);
1730
1731 /* Save the information we need to fix things up if the step
1732 succeeds. */
1733 displaced->step_ptid = ptid;
1734 displaced->step_gdbarch = gdbarch;
1735 displaced->step_closure = closure;
1736 displaced->step_original = original;
1737 displaced->step_copy = copy;
1738
1739 make_cleanup (displaced_step_clear_cleanup, displaced);
1740
1741 /* Resume execution at the copy. */
1742 regcache_write_pc (regcache, copy);
1743
1744 discard_cleanups (ignore_cleanups);
1745
1746 do_cleanups (old_cleanups);
1747
1748 if (debug_displaced)
1749 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1750 paddress (gdbarch, copy));
1751
1752 return 1;
1753 }
1754
1755 static void
1756 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1757 const gdb_byte *myaddr, int len)
1758 {
1759 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1760
1761 inferior_ptid = ptid;
1762 write_memory (memaddr, myaddr, len);
1763 do_cleanups (ptid_cleanup);
1764 }
1765
1766 /* Restore the contents of the copy area for thread PTID. */
1767
1768 static void
1769 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1770 ptid_t ptid)
1771 {
1772 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1773
1774 write_memory_ptid (ptid, displaced->step_copy,
1775 displaced->step_saved_copy, len);
1776 if (debug_displaced)
1777 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1778 target_pid_to_str (ptid),
1779 paddress (displaced->step_gdbarch,
1780 displaced->step_copy));
1781 }
1782
1783 static void
1784 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1785 {
1786 struct cleanup *old_cleanups;
1787 struct displaced_step_inferior_state *displaced
1788 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1789
1790 /* Was any thread of this process doing a displaced step? */
1791 if (displaced == NULL)
1792 return;
1793
1794 /* Was this event for the pid we displaced? */
1795 if (ptid_equal (displaced->step_ptid, null_ptid)
1796 || ! ptid_equal (displaced->step_ptid, event_ptid))
1797 return;
1798
1799 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1800
1801 displaced_step_restore (displaced, displaced->step_ptid);
1802
1803 /* Did the instruction complete successfully? */
1804 if (signal == GDB_SIGNAL_TRAP)
1805 {
1806 /* Fixup may need to read memory/registers. Switch to the
1807 thread that we're fixing up. */
1808 switch_to_thread (event_ptid);
1809
1810 /* Fix up the resulting state. */
1811 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1812 displaced->step_closure,
1813 displaced->step_original,
1814 displaced->step_copy,
1815 get_thread_regcache (displaced->step_ptid));
1816 }
1817 else
1818 {
1819 /* Since the instruction didn't complete, all we can do is
1820 relocate the PC. */
1821 struct regcache *regcache = get_thread_regcache (event_ptid);
1822 CORE_ADDR pc = regcache_read_pc (regcache);
1823
1824 pc = displaced->step_original + (pc - displaced->step_copy);
1825 regcache_write_pc (regcache, pc);
1826 }
1827
1828 do_cleanups (old_cleanups);
1829
1830 displaced->step_ptid = null_ptid;
1831
1832 /* Are there any pending displaced stepping requests? If so, run
1833 one now. Leave the state object around, since we're likely to
1834 need it again soon. */
1835 while (displaced->step_request_queue)
1836 {
1837 struct displaced_step_request *head;
1838 ptid_t ptid;
1839 struct regcache *regcache;
1840 struct gdbarch *gdbarch;
1841 CORE_ADDR actual_pc;
1842 struct address_space *aspace;
1843
1844 head = displaced->step_request_queue;
1845 ptid = head->ptid;
1846 displaced->step_request_queue = head->next;
1847 xfree (head);
1848
1849 context_switch (ptid);
1850
1851 regcache = get_thread_regcache (ptid);
1852 actual_pc = regcache_read_pc (regcache);
1853 aspace = get_regcache_aspace (regcache);
1854
1855 if (breakpoint_here_p (aspace, actual_pc))
1856 {
1857 if (debug_displaced)
1858 fprintf_unfiltered (gdb_stdlog,
1859 "displaced: stepping queued %s now\n",
1860 target_pid_to_str (ptid));
1861
1862 displaced_step_prepare (ptid);
1863
1864 gdbarch = get_regcache_arch (regcache);
1865
1866 if (debug_displaced)
1867 {
1868 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1869 gdb_byte buf[4];
1870
1871 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1872 paddress (gdbarch, actual_pc));
1873 read_memory (actual_pc, buf, sizeof (buf));
1874 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1875 }
1876
1877 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1878 displaced->step_closure))
1879 target_resume (ptid, 1, GDB_SIGNAL_0);
1880 else
1881 target_resume (ptid, 0, GDB_SIGNAL_0);
1882
1883 /* Done, we're stepping a thread. */
1884 break;
1885 }
1886 else
1887 {
1888 int step;
1889 struct thread_info *tp = inferior_thread ();
1890
1891 /* The breakpoint we were sitting under has since been
1892 removed. */
1893 tp->control.trap_expected = 0;
1894
1895 /* Go back to what we were trying to do. */
1896 step = currently_stepping (tp);
1897
1898 if (debug_displaced)
1899 fprintf_unfiltered (gdb_stdlog,
1900 "displaced: breakpoint is gone: %s, step(%d)\n",
1901 target_pid_to_str (tp->ptid), step);
1902
1903 target_resume (ptid, step, GDB_SIGNAL_0);
1904 tp->suspend.stop_signal = GDB_SIGNAL_0;
1905
1906 /* This request was discarded. See if there's any other
1907 thread waiting for its turn. */
1908 }
1909 }
1910 }
1911
1912 /* Update global variables holding ptids to hold NEW_PTID if they were
1913 holding OLD_PTID. */
1914 static void
1915 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1916 {
1917 struct displaced_step_request *it;
1918 struct displaced_step_inferior_state *displaced;
1919
1920 if (ptid_equal (inferior_ptid, old_ptid))
1921 inferior_ptid = new_ptid;
1922
1923 for (displaced = displaced_step_inferior_states;
1924 displaced;
1925 displaced = displaced->next)
1926 {
1927 if (ptid_equal (displaced->step_ptid, old_ptid))
1928 displaced->step_ptid = new_ptid;
1929
1930 for (it = displaced->step_request_queue; it; it = it->next)
1931 if (ptid_equal (it->ptid, old_ptid))
1932 it->ptid = new_ptid;
1933 }
1934 }
1935
1936 \f
1937 /* Resuming. */
1938
1939 /* Things to clean up if we QUIT out of resume (). */
1940 static void
1941 resume_cleanups (void *ignore)
1942 {
1943 if (!ptid_equal (inferior_ptid, null_ptid))
1944 delete_single_step_breakpoints (inferior_thread ());
1945
1946 normal_stop ();
1947 }
1948
1949 static const char schedlock_off[] = "off";
1950 static const char schedlock_on[] = "on";
1951 static const char schedlock_step[] = "step";
1952 static const char *const scheduler_enums[] = {
1953 schedlock_off,
1954 schedlock_on,
1955 schedlock_step,
1956 NULL
1957 };
1958 static const char *scheduler_mode = schedlock_off;
1959 static void
1960 show_scheduler_mode (struct ui_file *file, int from_tty,
1961 struct cmd_list_element *c, const char *value)
1962 {
1963 fprintf_filtered (file,
1964 _("Mode for locking scheduler "
1965 "during execution is \"%s\".\n"),
1966 value);
1967 }
1968
1969 static void
1970 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1971 {
1972 if (!target_can_lock_scheduler)
1973 {
1974 scheduler_mode = schedlock_off;
1975 error (_("Target '%s' cannot support this command."), target_shortname);
1976 }
1977 }
1978
1979 /* True if execution commands resume all threads of all processes by
1980 default; otherwise, resume only threads of the current inferior
1981 process. */
1982 int sched_multi = 0;
1983
1984 /* Try to setup for software single stepping over the specified location.
1985 Return 1 if target_resume() should use hardware single step.
1986
1987 GDBARCH the current gdbarch.
1988 PC the location to step over. */
1989
1990 static int
1991 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1992 {
1993 int hw_step = 1;
1994
1995 if (execution_direction == EXEC_FORWARD
1996 && gdbarch_software_single_step_p (gdbarch)
1997 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1998 {
1999 hw_step = 0;
2000 }
2001 return hw_step;
2002 }
2003
2004 ptid_t
2005 user_visible_resume_ptid (int step)
2006 {
2007 /* By default, resume all threads of all processes. */
2008 ptid_t resume_ptid = RESUME_ALL;
2009
2010 /* Maybe resume only all threads of the current process. */
2011 if (!sched_multi && target_supports_multi_process ())
2012 {
2013 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
2014 }
2015
2016 /* Maybe resume a single thread after all. */
2017 if (non_stop)
2018 {
2019 /* With non-stop mode on, threads are always handled
2020 individually. */
2021 resume_ptid = inferior_ptid;
2022 }
2023 else if ((scheduler_mode == schedlock_on)
2024 || (scheduler_mode == schedlock_step && step))
2025 {
2026 /* User-settable 'scheduler' mode requires solo thread resume. */
2027 resume_ptid = inferior_ptid;
2028 }
2029
2030 /* We may actually resume fewer threads at first, e.g., if a thread
2031 is stopped at a breakpoint that needs stepping-off, but that
2032 should not be visible to the user/frontend, and neither should
2033 the frontend/user be allowed to proceed any of the threads that
2034 happen to be stopped for internal run control handling, if a
2035 previous command wanted them resumed. */
2036 return resume_ptid;
2037 }
2038
2039 /* Resume the inferior, but allow a QUIT. This is useful if the user
2040 wants to interrupt some lengthy single-stepping operation
2041 (for child processes, the SIGINT goes to the inferior, and so
2042 we get a SIGINT random_signal, but for remote debugging and perhaps
2043 other targets, that's not true).
2044
2045 STEP nonzero if we should step (zero to continue instead).
2046 SIG is the signal to give the inferior (zero for none). */
2047 void
2048 resume (int step, enum gdb_signal sig)
2049 {
2050 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
2051 struct regcache *regcache = get_current_regcache ();
2052 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2053 struct thread_info *tp = inferior_thread ();
2054 CORE_ADDR pc = regcache_read_pc (regcache);
2055 struct address_space *aspace = get_regcache_aspace (regcache);
2056 ptid_t resume_ptid;
2057 /* From here on, this represents the caller's step vs continue
2058 request, while STEP represents what we'll actually request the
2059 target to do. STEP can decay from a step to a continue, if e.g.,
2060 we need to implement single-stepping with breakpoints (software
2061 single-step). When deciding whether "set scheduler-locking step"
2062 applies, it's the callers intention that counts. */
2063 const int entry_step = step;
2064
2065 tp->stepped_breakpoint = 0;
2066
2067 QUIT;
2068
2069 if (current_inferior ()->waiting_for_vfork_done)
2070 {
2071 /* Don't try to single-step a vfork parent that is waiting for
2072 the child to get out of the shared memory region (by exec'ing
2073 or exiting). This is particularly important on software
2074 single-step archs, as the child process would trip on the
2075 software single step breakpoint inserted for the parent
2076 process. Since the parent will not actually execute any
2077 instruction until the child is out of the shared region (such
2078 are vfork's semantics), it is safe to simply continue it.
2079 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2080 the parent, and tell it to `keep_going', which automatically
2081 re-sets it stepping. */
2082 if (debug_infrun)
2083 fprintf_unfiltered (gdb_stdlog,
2084 "infrun: resume : clear step\n");
2085 step = 0;
2086 }
2087
2088 if (debug_infrun)
2089 fprintf_unfiltered (gdb_stdlog,
2090 "infrun: resume (step=%d, signal=%s), "
2091 "trap_expected=%d, current thread [%s] at %s\n",
2092 step, gdb_signal_to_symbol_string (sig),
2093 tp->control.trap_expected,
2094 target_pid_to_str (inferior_ptid),
2095 paddress (gdbarch, pc));
2096
2097 /* Normally, by the time we reach `resume', the breakpoints are either
2098 removed or inserted, as appropriate. The exception is if we're sitting
2099 at a permanent breakpoint; we need to step over it, but permanent
2100 breakpoints can't be removed. So we have to test for it here. */
2101 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2102 {
2103 if (sig != GDB_SIGNAL_0)
2104 {
2105 /* We have a signal to pass to the inferior. The resume
2106 may, or may not take us to the signal handler. If this
2107 is a step, we'll need to stop in the signal handler, if
2108 there's one, (if the target supports stepping into
2109 handlers), or in the next mainline instruction, if
2110 there's no handler. If this is a continue, we need to be
2111 sure to run the handler with all breakpoints inserted.
2112 In all cases, set a breakpoint at the current address
2113 (where the handler returns to), and once that breakpoint
2114 is hit, resume skipping the permanent breakpoint. If
2115 that breakpoint isn't hit, then we've stepped into the
2116 signal handler (or hit some other event). We'll delete
2117 the step-resume breakpoint then. */
2118
2119 if (debug_infrun)
2120 fprintf_unfiltered (gdb_stdlog,
2121 "infrun: resume: skipping permanent breakpoint, "
2122 "deliver signal first\n");
2123
2124 clear_step_over_info ();
2125 tp->control.trap_expected = 0;
2126
2127 if (tp->control.step_resume_breakpoint == NULL)
2128 {
2129 /* Set a "high-priority" step-resume, as we don't want
2130 user breakpoints at PC to trigger (again) when this
2131 hits. */
2132 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2133 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2134
2135 tp->step_after_step_resume_breakpoint = step;
2136 }
2137
2138 insert_breakpoints ();
2139 }
2140 else
2141 {
2142 /* There's no signal to pass, we can go ahead and skip the
2143 permanent breakpoint manually. */
2144 if (debug_infrun)
2145 fprintf_unfiltered (gdb_stdlog,
2146 "infrun: resume: skipping permanent breakpoint\n");
2147 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2148 /* Update pc to reflect the new address from which we will
2149 execute instructions. */
2150 pc = regcache_read_pc (regcache);
2151
2152 if (step)
2153 {
2154 /* We've already advanced the PC, so the stepping part
2155 is done. Now we need to arrange for a trap to be
2156 reported to handle_inferior_event. Set a breakpoint
2157 at the current PC, and run to it. Don't update
2158 prev_pc, because if we end in
2159 switch_back_to_stepping, we want the "expected thread
2160 advanced also" branch to be taken. IOW, we don't
2161 want this thread to step further from PC
2162 (overstep). */
2163 insert_single_step_breakpoint (gdbarch, aspace, pc);
2164 insert_breakpoints ();
2165
2166 tp->suspend.stop_signal = GDB_SIGNAL_0;
2167 /* We're continuing with all breakpoints inserted. It's
2168 safe to let the target bypass signals. */
2169 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2170 /* ... and safe to let other threads run, according to
2171 schedlock. */
2172 resume_ptid = user_visible_resume_ptid (entry_step);
2173 target_resume (resume_ptid, 0, GDB_SIGNAL_0);
2174 discard_cleanups (old_cleanups);
2175 return;
2176 }
2177 }
2178 }
2179
2180 /* If we have a breakpoint to step over, make sure to do a single
2181 step only. Same if we have software watchpoints. */
2182 if (tp->control.trap_expected || bpstat_should_step ())
2183 tp->control.may_range_step = 0;
2184
2185 /* If enabled, step over breakpoints by executing a copy of the
2186 instruction at a different address.
2187
2188 We can't use displaced stepping when we have a signal to deliver;
2189 the comments for displaced_step_prepare explain why. The
2190 comments in the handle_inferior event for dealing with 'random
2191 signals' explain what we do instead.
2192
2193 We can't use displaced stepping when we are waiting for vfork_done
2194 event, displaced stepping breaks the vfork child similarly as single
2195 step software breakpoint. */
2196 if (use_displaced_stepping (gdbarch)
2197 && tp->control.trap_expected
2198 && sig == GDB_SIGNAL_0
2199 && !current_inferior ()->waiting_for_vfork_done)
2200 {
2201 struct displaced_step_inferior_state *displaced;
2202
2203 if (!displaced_step_prepare (inferior_ptid))
2204 {
2205 /* Got placed in displaced stepping queue. Will be resumed
2206 later when all the currently queued displaced stepping
2207 requests finish. The thread is not executing at this
2208 point, and the call to set_executing will be made later.
2209 But we need to call set_running here, since from the
2210 user/frontend's point of view, threads were set running.
2211 Unless we're calling an inferior function, as in that
2212 case we pretend the inferior doesn't run at all. */
2213 if (!tp->control.in_infcall)
2214 set_running (user_visible_resume_ptid (entry_step), 1);
2215 discard_cleanups (old_cleanups);
2216 return;
2217 }
2218
2219 /* Update pc to reflect the new address from which we will execute
2220 instructions due to displaced stepping. */
2221 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
2222
2223 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
2224 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
2225 displaced->step_closure);
2226 }
2227
2228 /* Do we need to do it the hard way, w/temp breakpoints? */
2229 else if (step)
2230 step = maybe_software_singlestep (gdbarch, pc);
2231
2232 /* Currently, our software single-step implementation leads to different
2233 results than hardware single-stepping in one situation: when stepping
2234 into delivering a signal which has an associated signal handler,
2235 hardware single-step will stop at the first instruction of the handler,
2236 while software single-step will simply skip execution of the handler.
2237
2238 For now, this difference in behavior is accepted since there is no
2239 easy way to actually implement single-stepping into a signal handler
2240 without kernel support.
2241
2242 However, there is one scenario where this difference leads to follow-on
2243 problems: if we're stepping off a breakpoint by removing all breakpoints
2244 and then single-stepping. In this case, the software single-step
2245 behavior means that even if there is a *breakpoint* in the signal
2246 handler, GDB still would not stop.
2247
2248 Fortunately, we can at least fix this particular issue. We detect
2249 here the case where we are about to deliver a signal while software
2250 single-stepping with breakpoints removed. In this situation, we
2251 revert the decisions to remove all breakpoints and insert single-
2252 step breakpoints, and instead we install a step-resume breakpoint
2253 at the current address, deliver the signal without stepping, and
2254 once we arrive back at the step-resume breakpoint, actually step
2255 over the breakpoint we originally wanted to step over. */
2256 if (thread_has_single_step_breakpoints_set (tp)
2257 && sig != GDB_SIGNAL_0
2258 && step_over_info_valid_p ())
2259 {
2260 /* If we have nested signals or a pending signal is delivered
2261 immediately after a handler returns, might might already have
2262 a step-resume breakpoint set on the earlier handler. We cannot
2263 set another step-resume breakpoint; just continue on until the
2264 original breakpoint is hit. */
2265 if (tp->control.step_resume_breakpoint == NULL)
2266 {
2267 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2268 tp->step_after_step_resume_breakpoint = 1;
2269 }
2270
2271 delete_single_step_breakpoints (tp);
2272
2273 clear_step_over_info ();
2274 tp->control.trap_expected = 0;
2275
2276 insert_breakpoints ();
2277 }
2278
2279 /* If STEP is set, it's a request to use hardware stepping
2280 facilities. But in that case, we should never
2281 use singlestep breakpoint. */
2282 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2283
2284 /* Decide the set of threads to ask the target to resume. Start
2285 by assuming everything will be resumed, than narrow the set
2286 by applying increasingly restricting conditions. */
2287 resume_ptid = user_visible_resume_ptid (entry_step);
2288
2289 /* Even if RESUME_PTID is a wildcard, and we end up resuming less
2290 (e.g., we might need to step over a breakpoint), from the
2291 user/frontend's point of view, all threads in RESUME_PTID are now
2292 running. Unless we're calling an inferior function, as in that
2293 case pretend we inferior doesn't run at all. */
2294 if (!tp->control.in_infcall)
2295 set_running (resume_ptid, 1);
2296
2297 /* Maybe resume a single thread after all. */
2298 if ((step || thread_has_single_step_breakpoints_set (tp))
2299 && tp->control.trap_expected)
2300 {
2301 /* We're allowing a thread to run past a breakpoint it has
2302 hit, by single-stepping the thread with the breakpoint
2303 removed. In which case, we need to single-step only this
2304 thread, and keep others stopped, as they can miss this
2305 breakpoint if allowed to run. */
2306 resume_ptid = inferior_ptid;
2307 }
2308
2309 if (execution_direction != EXEC_REVERSE
2310 && step && breakpoint_inserted_here_p (aspace, pc))
2311 {
2312 /* The only case we currently need to step a breakpoint
2313 instruction is when we have a signal to deliver. See
2314 handle_signal_stop where we handle random signals that could
2315 take out us out of the stepping range. Normally, in that
2316 case we end up continuing (instead of stepping) over the
2317 signal handler with a breakpoint at PC, but there are cases
2318 where we should _always_ single-step, even if we have a
2319 step-resume breakpoint, like when a software watchpoint is
2320 set. Assuming single-stepping and delivering a signal at the
2321 same time would takes us to the signal handler, then we could
2322 have removed the breakpoint at PC to step over it. However,
2323 some hardware step targets (like e.g., Mac OS) can't step
2324 into signal handlers, and for those, we need to leave the
2325 breakpoint at PC inserted, as otherwise if the handler
2326 recurses and executes PC again, it'll miss the breakpoint.
2327 So we leave the breakpoint inserted anyway, but we need to
2328 record that we tried to step a breakpoint instruction, so
2329 that adjust_pc_after_break doesn't end up confused. */
2330 gdb_assert (sig != GDB_SIGNAL_0);
2331
2332 tp->stepped_breakpoint = 1;
2333
2334 /* Most targets can step a breakpoint instruction, thus
2335 executing it normally. But if this one cannot, just
2336 continue and we will hit it anyway. */
2337 if (gdbarch_cannot_step_breakpoint (gdbarch))
2338 step = 0;
2339 }
2340
2341 if (debug_displaced
2342 && use_displaced_stepping (gdbarch)
2343 && tp->control.trap_expected)
2344 {
2345 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
2346 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
2347 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2348 gdb_byte buf[4];
2349
2350 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2351 paddress (resume_gdbarch, actual_pc));
2352 read_memory (actual_pc, buf, sizeof (buf));
2353 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2354 }
2355
2356 if (tp->control.may_range_step)
2357 {
2358 /* If we're resuming a thread with the PC out of the step
2359 range, then we're doing some nested/finer run control
2360 operation, like stepping the thread out of the dynamic
2361 linker or the displaced stepping scratch pad. We
2362 shouldn't have allowed a range step then. */
2363 gdb_assert (pc_in_thread_step_range (pc, tp));
2364 }
2365
2366 /* Install inferior's terminal modes. */
2367 target_terminal_inferior ();
2368
2369 /* Avoid confusing the next resume, if the next stop/resume
2370 happens to apply to another thread. */
2371 tp->suspend.stop_signal = GDB_SIGNAL_0;
2372
2373 /* Advise target which signals may be handled silently. If we have
2374 removed breakpoints because we are stepping over one (in any
2375 thread), we need to receive all signals to avoid accidentally
2376 skipping a breakpoint during execution of a signal handler. */
2377 if (step_over_info_valid_p ())
2378 target_pass_signals (0, NULL);
2379 else
2380 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2381
2382 target_resume (resume_ptid, step, sig);
2383
2384 discard_cleanups (old_cleanups);
2385 }
2386 \f
2387 /* Proceeding. */
2388
2389 /* Clear out all variables saying what to do when inferior is continued.
2390 First do this, then set the ones you want, then call `proceed'. */
2391
2392 static void
2393 clear_proceed_status_thread (struct thread_info *tp)
2394 {
2395 if (debug_infrun)
2396 fprintf_unfiltered (gdb_stdlog,
2397 "infrun: clear_proceed_status_thread (%s)\n",
2398 target_pid_to_str (tp->ptid));
2399
2400 /* If this signal should not be seen by program, give it zero.
2401 Used for debugging signals. */
2402 if (!signal_pass_state (tp->suspend.stop_signal))
2403 tp->suspend.stop_signal = GDB_SIGNAL_0;
2404
2405 tp->control.trap_expected = 0;
2406 tp->control.step_range_start = 0;
2407 tp->control.step_range_end = 0;
2408 tp->control.may_range_step = 0;
2409 tp->control.step_frame_id = null_frame_id;
2410 tp->control.step_stack_frame_id = null_frame_id;
2411 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2412 tp->stop_requested = 0;
2413
2414 tp->control.stop_step = 0;
2415
2416 tp->control.proceed_to_finish = 0;
2417
2418 tp->control.command_interp = NULL;
2419
2420 /* Discard any remaining commands or status from previous stop. */
2421 bpstat_clear (&tp->control.stop_bpstat);
2422 }
2423
2424 void
2425 clear_proceed_status (int step)
2426 {
2427 if (!non_stop)
2428 {
2429 struct thread_info *tp;
2430 ptid_t resume_ptid;
2431
2432 resume_ptid = user_visible_resume_ptid (step);
2433
2434 /* In all-stop mode, delete the per-thread status of all threads
2435 we're about to resume, implicitly and explicitly. */
2436 ALL_NON_EXITED_THREADS (tp)
2437 {
2438 if (!ptid_match (tp->ptid, resume_ptid))
2439 continue;
2440 clear_proceed_status_thread (tp);
2441 }
2442 }
2443
2444 if (!ptid_equal (inferior_ptid, null_ptid))
2445 {
2446 struct inferior *inferior;
2447
2448 if (non_stop)
2449 {
2450 /* If in non-stop mode, only delete the per-thread status of
2451 the current thread. */
2452 clear_proceed_status_thread (inferior_thread ());
2453 }
2454
2455 inferior = current_inferior ();
2456 inferior->control.stop_soon = NO_STOP_QUIETLY;
2457 }
2458
2459 stop_after_trap = 0;
2460
2461 clear_step_over_info ();
2462
2463 observer_notify_about_to_proceed ();
2464
2465 if (stop_registers)
2466 {
2467 regcache_xfree (stop_registers);
2468 stop_registers = NULL;
2469 }
2470 }
2471
2472 /* Returns true if TP is still stopped at a breakpoint that needs
2473 stepping-over in order to make progress. If the breakpoint is gone
2474 meanwhile, we can skip the whole step-over dance. */
2475
2476 static int
2477 thread_still_needs_step_over (struct thread_info *tp)
2478 {
2479 if (tp->stepping_over_breakpoint)
2480 {
2481 struct regcache *regcache = get_thread_regcache (tp->ptid);
2482
2483 if (breakpoint_here_p (get_regcache_aspace (regcache),
2484 regcache_read_pc (regcache))
2485 == ordinary_breakpoint_here)
2486 return 1;
2487
2488 tp->stepping_over_breakpoint = 0;
2489 }
2490
2491 return 0;
2492 }
2493
2494 /* Returns true if scheduler locking applies. STEP indicates whether
2495 we're about to do a step/next-like command to a thread. */
2496
2497 static int
2498 schedlock_applies (int step)
2499 {
2500 return (scheduler_mode == schedlock_on
2501 || (scheduler_mode == schedlock_step
2502 && step));
2503 }
2504
2505 /* Look a thread other than EXCEPT that has previously reported a
2506 breakpoint event, and thus needs a step-over in order to make
2507 progress. Returns NULL is none is found. STEP indicates whether
2508 we're about to step the current thread, in order to decide whether
2509 "set scheduler-locking step" applies. */
2510
2511 static struct thread_info *
2512 find_thread_needs_step_over (int step, struct thread_info *except)
2513 {
2514 struct thread_info *tp, *current;
2515
2516 /* With non-stop mode on, threads are always handled individually. */
2517 gdb_assert (! non_stop);
2518
2519 current = inferior_thread ();
2520
2521 /* If scheduler locking applies, we can avoid iterating over all
2522 threads. */
2523 if (schedlock_applies (step))
2524 {
2525 if (except != current
2526 && thread_still_needs_step_over (current))
2527 return current;
2528
2529 return NULL;
2530 }
2531
2532 ALL_NON_EXITED_THREADS (tp)
2533 {
2534 /* Ignore the EXCEPT thread. */
2535 if (tp == except)
2536 continue;
2537 /* Ignore threads of processes we're not resuming. */
2538 if (!sched_multi
2539 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2540 continue;
2541
2542 if (thread_still_needs_step_over (tp))
2543 return tp;
2544 }
2545
2546 return NULL;
2547 }
2548
2549 /* Basic routine for continuing the program in various fashions.
2550
2551 ADDR is the address to resume at, or -1 for resume where stopped.
2552 SIGGNAL is the signal to give it, or 0 for none,
2553 or -1 for act according to how it stopped.
2554 STEP is nonzero if should trap after one instruction.
2555 -1 means return after that and print nothing.
2556 You should probably set various step_... variables
2557 before calling here, if you are stepping.
2558
2559 You should call clear_proceed_status before calling proceed. */
2560
2561 void
2562 proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
2563 {
2564 struct regcache *regcache;
2565 struct gdbarch *gdbarch;
2566 struct thread_info *tp;
2567 CORE_ADDR pc;
2568 struct address_space *aspace;
2569
2570 /* If we're stopped at a fork/vfork, follow the branch set by the
2571 "set follow-fork-mode" command; otherwise, we'll just proceed
2572 resuming the current thread. */
2573 if (!follow_fork ())
2574 {
2575 /* The target for some reason decided not to resume. */
2576 normal_stop ();
2577 if (target_can_async_p ())
2578 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2579 return;
2580 }
2581
2582 /* We'll update this if & when we switch to a new thread. */
2583 previous_inferior_ptid = inferior_ptid;
2584
2585 regcache = get_current_regcache ();
2586 gdbarch = get_regcache_arch (regcache);
2587 aspace = get_regcache_aspace (regcache);
2588 pc = regcache_read_pc (regcache);
2589 tp = inferior_thread ();
2590
2591 if (step > 0)
2592 step_start_function = find_pc_function (pc);
2593 if (step < 0)
2594 stop_after_trap = 1;
2595
2596 /* Fill in with reasonable starting values. */
2597 init_thread_stepping_state (tp);
2598
2599 if (addr == (CORE_ADDR) -1)
2600 {
2601 if (pc == stop_pc
2602 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
2603 && execution_direction != EXEC_REVERSE)
2604 /* There is a breakpoint at the address we will resume at,
2605 step one instruction before inserting breakpoints so that
2606 we do not stop right away (and report a second hit at this
2607 breakpoint).
2608
2609 Note, we don't do this in reverse, because we won't
2610 actually be executing the breakpoint insn anyway.
2611 We'll be (un-)executing the previous instruction. */
2612 tp->stepping_over_breakpoint = 1;
2613 else if (gdbarch_single_step_through_delay_p (gdbarch)
2614 && gdbarch_single_step_through_delay (gdbarch,
2615 get_current_frame ()))
2616 /* We stepped onto an instruction that needs to be stepped
2617 again before re-inserting the breakpoint, do so. */
2618 tp->stepping_over_breakpoint = 1;
2619 }
2620 else
2621 {
2622 regcache_write_pc (regcache, addr);
2623 }
2624
2625 if (siggnal != GDB_SIGNAL_DEFAULT)
2626 tp->suspend.stop_signal = siggnal;
2627
2628 /* Record the interpreter that issued the execution command that
2629 caused this thread to resume. If the top level interpreter is
2630 MI/async, and the execution command was a CLI command
2631 (next/step/etc.), we'll want to print stop event output to the MI
2632 console channel (the stepped-to line, etc.), as if the user
2633 entered the execution command on a real GDB console. */
2634 inferior_thread ()->control.command_interp = command_interp ();
2635
2636 if (debug_infrun)
2637 fprintf_unfiltered (gdb_stdlog,
2638 "infrun: proceed (addr=%s, signal=%s, step=%d)\n",
2639 paddress (gdbarch, addr),
2640 gdb_signal_to_symbol_string (siggnal), step);
2641
2642 if (non_stop)
2643 /* In non-stop, each thread is handled individually. The context
2644 must already be set to the right thread here. */
2645 ;
2646 else
2647 {
2648 struct thread_info *step_over;
2649
2650 /* In a multi-threaded task we may select another thread and
2651 then continue or step.
2652
2653 But if the old thread was stopped at a breakpoint, it will
2654 immediately cause another breakpoint stop without any
2655 execution (i.e. it will report a breakpoint hit incorrectly).
2656 So we must step over it first.
2657
2658 Look for a thread other than the current (TP) that reported a
2659 breakpoint hit and hasn't been resumed yet since. */
2660 step_over = find_thread_needs_step_over (step, tp);
2661 if (step_over != NULL)
2662 {
2663 if (debug_infrun)
2664 fprintf_unfiltered (gdb_stdlog,
2665 "infrun: need to step-over [%s] first\n",
2666 target_pid_to_str (step_over->ptid));
2667
2668 /* Store the prev_pc for the stepping thread too, needed by
2669 switch_back_to_stepping thread. */
2670 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2671 switch_to_thread (step_over->ptid);
2672 tp = step_over;
2673 }
2674 }
2675
2676 /* If we need to step over a breakpoint, and we're not using
2677 displaced stepping to do so, insert all breakpoints (watchpoints,
2678 etc.) but the one we're stepping over, step one instruction, and
2679 then re-insert the breakpoint when that step is finished. */
2680 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2681 {
2682 struct regcache *regcache = get_current_regcache ();
2683
2684 set_step_over_info (get_regcache_aspace (regcache),
2685 regcache_read_pc (regcache), 0);
2686 }
2687 else
2688 clear_step_over_info ();
2689
2690 insert_breakpoints ();
2691
2692 tp->control.trap_expected = tp->stepping_over_breakpoint;
2693
2694 annotate_starting ();
2695
2696 /* Make sure that output from GDB appears before output from the
2697 inferior. */
2698 gdb_flush (gdb_stdout);
2699
2700 /* Refresh prev_pc value just prior to resuming. This used to be
2701 done in stop_waiting, however, setting prev_pc there did not handle
2702 scenarios such as inferior function calls or returning from
2703 a function via the return command. In those cases, the prev_pc
2704 value was not set properly for subsequent commands. The prev_pc value
2705 is used to initialize the starting line number in the ecs. With an
2706 invalid value, the gdb next command ends up stopping at the position
2707 represented by the next line table entry past our start position.
2708 On platforms that generate one line table entry per line, this
2709 is not a problem. However, on the ia64, the compiler generates
2710 extraneous line table entries that do not increase the line number.
2711 When we issue the gdb next command on the ia64 after an inferior call
2712 or a return command, we often end up a few instructions forward, still
2713 within the original line we started.
2714
2715 An attempt was made to refresh the prev_pc at the same time the
2716 execution_control_state is initialized (for instance, just before
2717 waiting for an inferior event). But this approach did not work
2718 because of platforms that use ptrace, where the pc register cannot
2719 be read unless the inferior is stopped. At that point, we are not
2720 guaranteed the inferior is stopped and so the regcache_read_pc() call
2721 can fail. Setting the prev_pc value here ensures the value is updated
2722 correctly when the inferior is stopped. */
2723 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2724
2725 /* Resume inferior. */
2726 resume (tp->control.trap_expected || step || bpstat_should_step (),
2727 tp->suspend.stop_signal);
2728
2729 /* Wait for it to stop (if not standalone)
2730 and in any case decode why it stopped, and act accordingly. */
2731 /* Do this only if we are not using the event loop, or if the target
2732 does not support asynchronous execution. */
2733 if (!target_can_async_p ())
2734 {
2735 wait_for_inferior ();
2736 normal_stop ();
2737 }
2738 }
2739 \f
2740
2741 /* Start remote-debugging of a machine over a serial link. */
2742
2743 void
2744 start_remote (int from_tty)
2745 {
2746 struct inferior *inferior;
2747
2748 inferior = current_inferior ();
2749 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2750
2751 /* Always go on waiting for the target, regardless of the mode. */
2752 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2753 indicate to wait_for_inferior that a target should timeout if
2754 nothing is returned (instead of just blocking). Because of this,
2755 targets expecting an immediate response need to, internally, set
2756 things up so that the target_wait() is forced to eventually
2757 timeout. */
2758 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2759 differentiate to its caller what the state of the target is after
2760 the initial open has been performed. Here we're assuming that
2761 the target has stopped. It should be possible to eventually have
2762 target_open() return to the caller an indication that the target
2763 is currently running and GDB state should be set to the same as
2764 for an async run. */
2765 wait_for_inferior ();
2766
2767 /* Now that the inferior has stopped, do any bookkeeping like
2768 loading shared libraries. We want to do this before normal_stop,
2769 so that the displayed frame is up to date. */
2770 post_create_inferior (&current_target, from_tty);
2771
2772 normal_stop ();
2773 }
2774
2775 /* Initialize static vars when a new inferior begins. */
2776
2777 void
2778 init_wait_for_inferior (void)
2779 {
2780 /* These are meaningless until the first time through wait_for_inferior. */
2781
2782 breakpoint_init_inferior (inf_starting);
2783
2784 clear_proceed_status (0);
2785
2786 target_last_wait_ptid = minus_one_ptid;
2787
2788 previous_inferior_ptid = inferior_ptid;
2789
2790 /* Discard any skipped inlined frames. */
2791 clear_inline_frame_state (minus_one_ptid);
2792 }
2793
2794 \f
2795 /* Data to be passed around while handling an event. This data is
2796 discarded between events. */
2797 struct execution_control_state
2798 {
2799 ptid_t ptid;
2800 /* The thread that got the event, if this was a thread event; NULL
2801 otherwise. */
2802 struct thread_info *event_thread;
2803
2804 struct target_waitstatus ws;
2805 int stop_func_filled_in;
2806 CORE_ADDR stop_func_start;
2807 CORE_ADDR stop_func_end;
2808 const char *stop_func_name;
2809 int wait_some_more;
2810
2811 /* True if the event thread hit the single-step breakpoint of
2812 another thread. Thus the event doesn't cause a stop, the thread
2813 needs to be single-stepped past the single-step breakpoint before
2814 we can switch back to the original stepping thread. */
2815 int hit_singlestep_breakpoint;
2816 };
2817
2818 static void handle_inferior_event (struct execution_control_state *ecs);
2819
2820 static void handle_step_into_function (struct gdbarch *gdbarch,
2821 struct execution_control_state *ecs);
2822 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2823 struct execution_control_state *ecs);
2824 static void handle_signal_stop (struct execution_control_state *ecs);
2825 static void check_exception_resume (struct execution_control_state *,
2826 struct frame_info *);
2827
2828 static void end_stepping_range (struct execution_control_state *ecs);
2829 static void stop_waiting (struct execution_control_state *ecs);
2830 static void prepare_to_wait (struct execution_control_state *ecs);
2831 static void keep_going (struct execution_control_state *ecs);
2832 static void process_event_stop_test (struct execution_control_state *ecs);
2833 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2834
2835 /* Callback for iterate over threads. If the thread is stopped, but
2836 the user/frontend doesn't know about that yet, go through
2837 normal_stop, as if the thread had just stopped now. ARG points at
2838 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2839 ptid_is_pid(PTID) is true, applies to all threads of the process
2840 pointed at by PTID. Otherwise, apply only to the thread pointed by
2841 PTID. */
2842
2843 static int
2844 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2845 {
2846 ptid_t ptid = * (ptid_t *) arg;
2847
2848 if ((ptid_equal (info->ptid, ptid)
2849 || ptid_equal (minus_one_ptid, ptid)
2850 || (ptid_is_pid (ptid)
2851 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2852 && is_running (info->ptid)
2853 && !is_executing (info->ptid))
2854 {
2855 struct cleanup *old_chain;
2856 struct execution_control_state ecss;
2857 struct execution_control_state *ecs = &ecss;
2858
2859 memset (ecs, 0, sizeof (*ecs));
2860
2861 old_chain = make_cleanup_restore_current_thread ();
2862
2863 overlay_cache_invalid = 1;
2864 /* Flush target cache before starting to handle each event.
2865 Target was running and cache could be stale. This is just a
2866 heuristic. Running threads may modify target memory, but we
2867 don't get any event. */
2868 target_dcache_invalidate ();
2869
2870 /* Go through handle_inferior_event/normal_stop, so we always
2871 have consistent output as if the stop event had been
2872 reported. */
2873 ecs->ptid = info->ptid;
2874 ecs->event_thread = find_thread_ptid (info->ptid);
2875 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2876 ecs->ws.value.sig = GDB_SIGNAL_0;
2877
2878 handle_inferior_event (ecs);
2879
2880 if (!ecs->wait_some_more)
2881 {
2882 struct thread_info *tp;
2883
2884 normal_stop ();
2885
2886 /* Finish off the continuations. */
2887 tp = inferior_thread ();
2888 do_all_intermediate_continuations_thread (tp, 1);
2889 do_all_continuations_thread (tp, 1);
2890 }
2891
2892 do_cleanups (old_chain);
2893 }
2894
2895 return 0;
2896 }
2897
2898 /* This function is attached as a "thread_stop_requested" observer.
2899 Cleanup local state that assumed the PTID was to be resumed, and
2900 report the stop to the frontend. */
2901
2902 static void
2903 infrun_thread_stop_requested (ptid_t ptid)
2904 {
2905 struct displaced_step_inferior_state *displaced;
2906
2907 /* PTID was requested to stop. Remove it from the displaced
2908 stepping queue, so we don't try to resume it automatically. */
2909
2910 for (displaced = displaced_step_inferior_states;
2911 displaced;
2912 displaced = displaced->next)
2913 {
2914 struct displaced_step_request *it, **prev_next_p;
2915
2916 it = displaced->step_request_queue;
2917 prev_next_p = &displaced->step_request_queue;
2918 while (it)
2919 {
2920 if (ptid_match (it->ptid, ptid))
2921 {
2922 *prev_next_p = it->next;
2923 it->next = NULL;
2924 xfree (it);
2925 }
2926 else
2927 {
2928 prev_next_p = &it->next;
2929 }
2930
2931 it = *prev_next_p;
2932 }
2933 }
2934
2935 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2936 }
2937
2938 static void
2939 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2940 {
2941 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2942 nullify_last_target_wait_ptid ();
2943 }
2944
2945 /* Delete the step resume, single-step and longjmp/exception resume
2946 breakpoints of TP. */
2947
2948 static void
2949 delete_thread_infrun_breakpoints (struct thread_info *tp)
2950 {
2951 delete_step_resume_breakpoint (tp);
2952 delete_exception_resume_breakpoint (tp);
2953 delete_single_step_breakpoints (tp);
2954 }
2955
2956 /* If the target still has execution, call FUNC for each thread that
2957 just stopped. In all-stop, that's all the non-exited threads; in
2958 non-stop, that's the current thread, only. */
2959
2960 typedef void (*for_each_just_stopped_thread_callback_func)
2961 (struct thread_info *tp);
2962
2963 static void
2964 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
2965 {
2966 if (!target_has_execution || ptid_equal (inferior_ptid, null_ptid))
2967 return;
2968
2969 if (non_stop)
2970 {
2971 /* If in non-stop mode, only the current thread stopped. */
2972 func (inferior_thread ());
2973 }
2974 else
2975 {
2976 struct thread_info *tp;
2977
2978 /* In all-stop mode, all threads have stopped. */
2979 ALL_NON_EXITED_THREADS (tp)
2980 {
2981 func (tp);
2982 }
2983 }
2984 }
2985
2986 /* Delete the step resume and longjmp/exception resume breakpoints of
2987 the threads that just stopped. */
2988
2989 static void
2990 delete_just_stopped_threads_infrun_breakpoints (void)
2991 {
2992 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
2993 }
2994
2995 /* Delete the single-step breakpoints of the threads that just
2996 stopped. */
2997
2998 static void
2999 delete_just_stopped_threads_single_step_breakpoints (void)
3000 {
3001 for_each_just_stopped_thread (delete_single_step_breakpoints);
3002 }
3003
3004 /* A cleanup wrapper. */
3005
3006 static void
3007 delete_just_stopped_threads_infrun_breakpoints_cleanup (void *arg)
3008 {
3009 delete_just_stopped_threads_infrun_breakpoints ();
3010 }
3011
3012 /* Pretty print the results of target_wait, for debugging purposes. */
3013
3014 static void
3015 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3016 const struct target_waitstatus *ws)
3017 {
3018 char *status_string = target_waitstatus_to_string (ws);
3019 struct ui_file *tmp_stream = mem_fileopen ();
3020 char *text;
3021
3022 /* The text is split over several lines because it was getting too long.
3023 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3024 output as a unit; we want only one timestamp printed if debug_timestamp
3025 is set. */
3026
3027 fprintf_unfiltered (tmp_stream,
3028 "infrun: target_wait (%d", ptid_get_pid (waiton_ptid));
3029 if (ptid_get_pid (waiton_ptid) != -1)
3030 fprintf_unfiltered (tmp_stream,
3031 " [%s]", target_pid_to_str (waiton_ptid));
3032 fprintf_unfiltered (tmp_stream, ", status) =\n");
3033 fprintf_unfiltered (tmp_stream,
3034 "infrun: %d [%s],\n",
3035 ptid_get_pid (result_ptid),
3036 target_pid_to_str (result_ptid));
3037 fprintf_unfiltered (tmp_stream,
3038 "infrun: %s\n",
3039 status_string);
3040
3041 text = ui_file_xstrdup (tmp_stream, NULL);
3042
3043 /* This uses %s in part to handle %'s in the text, but also to avoid
3044 a gcc error: the format attribute requires a string literal. */
3045 fprintf_unfiltered (gdb_stdlog, "%s", text);
3046
3047 xfree (status_string);
3048 xfree (text);
3049 ui_file_delete (tmp_stream);
3050 }
3051
3052 /* Prepare and stabilize the inferior for detaching it. E.g.,
3053 detaching while a thread is displaced stepping is a recipe for
3054 crashing it, as nothing would readjust the PC out of the scratch
3055 pad. */
3056
3057 void
3058 prepare_for_detach (void)
3059 {
3060 struct inferior *inf = current_inferior ();
3061 ptid_t pid_ptid = pid_to_ptid (inf->pid);
3062 struct cleanup *old_chain_1;
3063 struct displaced_step_inferior_state *displaced;
3064
3065 displaced = get_displaced_stepping_state (inf->pid);
3066
3067 /* Is any thread of this process displaced stepping? If not,
3068 there's nothing else to do. */
3069 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
3070 return;
3071
3072 if (debug_infrun)
3073 fprintf_unfiltered (gdb_stdlog,
3074 "displaced-stepping in-process while detaching");
3075
3076 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
3077 inf->detaching = 1;
3078
3079 while (!ptid_equal (displaced->step_ptid, null_ptid))
3080 {
3081 struct cleanup *old_chain_2;
3082 struct execution_control_state ecss;
3083 struct execution_control_state *ecs;
3084
3085 ecs = &ecss;
3086 memset (ecs, 0, sizeof (*ecs));
3087
3088 overlay_cache_invalid = 1;
3089 /* Flush target cache before starting to handle each event.
3090 Target was running and cache could be stale. This is just a
3091 heuristic. Running threads may modify target memory, but we
3092 don't get any event. */
3093 target_dcache_invalidate ();
3094
3095 if (deprecated_target_wait_hook)
3096 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
3097 else
3098 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
3099
3100 if (debug_infrun)
3101 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3102
3103 /* If an error happens while handling the event, propagate GDB's
3104 knowledge of the executing state to the frontend/user running
3105 state. */
3106 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
3107 &minus_one_ptid);
3108
3109 /* Now figure out what to do with the result of the result. */
3110 handle_inferior_event (ecs);
3111
3112 /* No error, don't finish the state yet. */
3113 discard_cleanups (old_chain_2);
3114
3115 /* Breakpoints and watchpoints are not installed on the target
3116 at this point, and signals are passed directly to the
3117 inferior, so this must mean the process is gone. */
3118 if (!ecs->wait_some_more)
3119 {
3120 discard_cleanups (old_chain_1);
3121 error (_("Program exited while detaching"));
3122 }
3123 }
3124
3125 discard_cleanups (old_chain_1);
3126 }
3127
3128 /* Wait for control to return from inferior to debugger.
3129
3130 If inferior gets a signal, we may decide to start it up again
3131 instead of returning. That is why there is a loop in this function.
3132 When this function actually returns it means the inferior
3133 should be left stopped and GDB should read more commands. */
3134
3135 void
3136 wait_for_inferior (void)
3137 {
3138 struct cleanup *old_cleanups;
3139
3140 if (debug_infrun)
3141 fprintf_unfiltered
3142 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
3143
3144 old_cleanups
3145 = make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup,
3146 NULL);
3147
3148 while (1)
3149 {
3150 struct execution_control_state ecss;
3151 struct execution_control_state *ecs = &ecss;
3152 struct cleanup *old_chain;
3153 ptid_t waiton_ptid = minus_one_ptid;
3154
3155 memset (ecs, 0, sizeof (*ecs));
3156
3157 overlay_cache_invalid = 1;
3158
3159 /* Flush target cache before starting to handle each event.
3160 Target was running and cache could be stale. This is just a
3161 heuristic. Running threads may modify target memory, but we
3162 don't get any event. */
3163 target_dcache_invalidate ();
3164
3165 if (deprecated_target_wait_hook)
3166 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
3167 else
3168 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
3169
3170 if (debug_infrun)
3171 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3172
3173 /* If an error happens while handling the event, propagate GDB's
3174 knowledge of the executing state to the frontend/user running
3175 state. */
3176 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3177
3178 /* Now figure out what to do with the result of the result. */
3179 handle_inferior_event (ecs);
3180
3181 /* No error, don't finish the state yet. */
3182 discard_cleanups (old_chain);
3183
3184 if (!ecs->wait_some_more)
3185 break;
3186 }
3187
3188 do_cleanups (old_cleanups);
3189 }
3190
3191 /* Cleanup that reinstalls the readline callback handler, if the
3192 target is running in the background. If while handling the target
3193 event something triggered a secondary prompt, like e.g., a
3194 pagination prompt, we'll have removed the callback handler (see
3195 gdb_readline_wrapper_line). Need to do this as we go back to the
3196 event loop, ready to process further input. Note this has no
3197 effect if the handler hasn't actually been removed, because calling
3198 rl_callback_handler_install resets the line buffer, thus losing
3199 input. */
3200
3201 static void
3202 reinstall_readline_callback_handler_cleanup (void *arg)
3203 {
3204 if (!interpreter_async)
3205 {
3206 /* We're not going back to the top level event loop yet. Don't
3207 install the readline callback, as it'd prep the terminal,
3208 readline-style (raw, noecho) (e.g., --batch). We'll install
3209 it the next time the prompt is displayed, when we're ready
3210 for input. */
3211 return;
3212 }
3213
3214 if (async_command_editing_p && !sync_execution)
3215 gdb_rl_callback_handler_reinstall ();
3216 }
3217
3218 /* Asynchronous version of wait_for_inferior. It is called by the
3219 event loop whenever a change of state is detected on the file
3220 descriptor corresponding to the target. It can be called more than
3221 once to complete a single execution command. In such cases we need
3222 to keep the state in a global variable ECSS. If it is the last time
3223 that this function is called for a single execution command, then
3224 report to the user that the inferior has stopped, and do the
3225 necessary cleanups. */
3226
3227 void
3228 fetch_inferior_event (void *client_data)
3229 {
3230 struct execution_control_state ecss;
3231 struct execution_control_state *ecs = &ecss;
3232 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
3233 struct cleanup *ts_old_chain;
3234 int was_sync = sync_execution;
3235 int cmd_done = 0;
3236 ptid_t waiton_ptid = minus_one_ptid;
3237
3238 memset (ecs, 0, sizeof (*ecs));
3239
3240 /* End up with readline processing input, if necessary. */
3241 make_cleanup (reinstall_readline_callback_handler_cleanup, NULL);
3242
3243 /* We're handling a live event, so make sure we're doing live
3244 debugging. If we're looking at traceframes while the target is
3245 running, we're going to need to get back to that mode after
3246 handling the event. */
3247 if (non_stop)
3248 {
3249 make_cleanup_restore_current_traceframe ();
3250 set_current_traceframe (-1);
3251 }
3252
3253 if (non_stop)
3254 /* In non-stop mode, the user/frontend should not notice a thread
3255 switch due to internal events. Make sure we reverse to the
3256 user selected thread and frame after handling the event and
3257 running any breakpoint commands. */
3258 make_cleanup_restore_current_thread ();
3259
3260 overlay_cache_invalid = 1;
3261 /* Flush target cache before starting to handle each event. Target
3262 was running and cache could be stale. This is just a heuristic.
3263 Running threads may modify target memory, but we don't get any
3264 event. */
3265 target_dcache_invalidate ();
3266
3267 make_cleanup_restore_integer (&execution_direction);
3268 execution_direction = target_execution_direction ();
3269
3270 if (deprecated_target_wait_hook)
3271 ecs->ptid =
3272 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3273 else
3274 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3275
3276 if (debug_infrun)
3277 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3278
3279 /* If an error happens while handling the event, propagate GDB's
3280 knowledge of the executing state to the frontend/user running
3281 state. */
3282 if (!non_stop)
3283 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3284 else
3285 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
3286
3287 /* Get executed before make_cleanup_restore_current_thread above to apply
3288 still for the thread which has thrown the exception. */
3289 make_bpstat_clear_actions_cleanup ();
3290
3291 make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup, NULL);
3292
3293 /* Now figure out what to do with the result of the result. */
3294 handle_inferior_event (ecs);
3295
3296 if (!ecs->wait_some_more)
3297 {
3298 struct inferior *inf = find_inferior_ptid (ecs->ptid);
3299
3300 delete_just_stopped_threads_infrun_breakpoints ();
3301
3302 /* We may not find an inferior if this was a process exit. */
3303 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3304 normal_stop ();
3305
3306 if (target_has_execution
3307 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
3308 && ecs->ws.kind != TARGET_WAITKIND_EXITED
3309 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3310 && ecs->event_thread->step_multi
3311 && ecs->event_thread->control.stop_step)
3312 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
3313 else
3314 {
3315 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
3316 cmd_done = 1;
3317 }
3318 }
3319
3320 /* No error, don't finish the thread states yet. */
3321 discard_cleanups (ts_old_chain);
3322
3323 /* Revert thread and frame. */
3324 do_cleanups (old_chain);
3325
3326 /* If the inferior was in sync execution mode, and now isn't,
3327 restore the prompt (a synchronous execution command has finished,
3328 and we're ready for input). */
3329 if (interpreter_async && was_sync && !sync_execution)
3330 observer_notify_sync_execution_done ();
3331
3332 if (cmd_done
3333 && !was_sync
3334 && exec_done_display_p
3335 && (ptid_equal (inferior_ptid, null_ptid)
3336 || !is_running (inferior_ptid)))
3337 printf_unfiltered (_("completed.\n"));
3338 }
3339
3340 /* Record the frame and location we're currently stepping through. */
3341 void
3342 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
3343 {
3344 struct thread_info *tp = inferior_thread ();
3345
3346 tp->control.step_frame_id = get_frame_id (frame);
3347 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
3348
3349 tp->current_symtab = sal.symtab;
3350 tp->current_line = sal.line;
3351 }
3352
3353 /* Clear context switchable stepping state. */
3354
3355 void
3356 init_thread_stepping_state (struct thread_info *tss)
3357 {
3358 tss->stepped_breakpoint = 0;
3359 tss->stepping_over_breakpoint = 0;
3360 tss->stepping_over_watchpoint = 0;
3361 tss->step_after_step_resume_breakpoint = 0;
3362 }
3363
3364 /* Set the cached copy of the last ptid/waitstatus. */
3365
3366 static void
3367 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
3368 {
3369 target_last_wait_ptid = ptid;
3370 target_last_waitstatus = status;
3371 }
3372
3373 /* Return the cached copy of the last pid/waitstatus returned by
3374 target_wait()/deprecated_target_wait_hook(). The data is actually
3375 cached by handle_inferior_event(), which gets called immediately
3376 after target_wait()/deprecated_target_wait_hook(). */
3377
3378 void
3379 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
3380 {
3381 *ptidp = target_last_wait_ptid;
3382 *status = target_last_waitstatus;
3383 }
3384
3385 void
3386 nullify_last_target_wait_ptid (void)
3387 {
3388 target_last_wait_ptid = minus_one_ptid;
3389 }
3390
3391 /* Switch thread contexts. */
3392
3393 static void
3394 context_switch (ptid_t ptid)
3395 {
3396 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3397 {
3398 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3399 target_pid_to_str (inferior_ptid));
3400 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3401 target_pid_to_str (ptid));
3402 }
3403
3404 switch_to_thread (ptid);
3405 }
3406
3407 static void
3408 adjust_pc_after_break (struct execution_control_state *ecs)
3409 {
3410 struct regcache *regcache;
3411 struct gdbarch *gdbarch;
3412 struct address_space *aspace;
3413 CORE_ADDR breakpoint_pc, decr_pc;
3414
3415 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3416 we aren't, just return.
3417
3418 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3419 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3420 implemented by software breakpoints should be handled through the normal
3421 breakpoint layer.
3422
3423 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3424 different signals (SIGILL or SIGEMT for instance), but it is less
3425 clear where the PC is pointing afterwards. It may not match
3426 gdbarch_decr_pc_after_break. I don't know any specific target that
3427 generates these signals at breakpoints (the code has been in GDB since at
3428 least 1992) so I can not guess how to handle them here.
3429
3430 In earlier versions of GDB, a target with
3431 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3432 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3433 target with both of these set in GDB history, and it seems unlikely to be
3434 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3435
3436 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3437 return;
3438
3439 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3440 return;
3441
3442 /* In reverse execution, when a breakpoint is hit, the instruction
3443 under it has already been de-executed. The reported PC always
3444 points at the breakpoint address, so adjusting it further would
3445 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3446 architecture:
3447
3448 B1 0x08000000 : INSN1
3449 B2 0x08000001 : INSN2
3450 0x08000002 : INSN3
3451 PC -> 0x08000003 : INSN4
3452
3453 Say you're stopped at 0x08000003 as above. Reverse continuing
3454 from that point should hit B2 as below. Reading the PC when the
3455 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3456 been de-executed already.
3457
3458 B1 0x08000000 : INSN1
3459 B2 PC -> 0x08000001 : INSN2
3460 0x08000002 : INSN3
3461 0x08000003 : INSN4
3462
3463 We can't apply the same logic as for forward execution, because
3464 we would wrongly adjust the PC to 0x08000000, since there's a
3465 breakpoint at PC - 1. We'd then report a hit on B1, although
3466 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3467 behaviour. */
3468 if (execution_direction == EXEC_REVERSE)
3469 return;
3470
3471 /* If this target does not decrement the PC after breakpoints, then
3472 we have nothing to do. */
3473 regcache = get_thread_regcache (ecs->ptid);
3474 gdbarch = get_regcache_arch (regcache);
3475
3476 decr_pc = target_decr_pc_after_break (gdbarch);
3477 if (decr_pc == 0)
3478 return;
3479
3480 aspace = get_regcache_aspace (regcache);
3481
3482 /* Find the location where (if we've hit a breakpoint) the
3483 breakpoint would be. */
3484 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3485
3486 /* Check whether there actually is a software breakpoint inserted at
3487 that location.
3488
3489 If in non-stop mode, a race condition is possible where we've
3490 removed a breakpoint, but stop events for that breakpoint were
3491 already queued and arrive later. To suppress those spurious
3492 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3493 and retire them after a number of stop events are reported. */
3494 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3495 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3496 {
3497 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3498
3499 if (record_full_is_used ())
3500 record_full_gdb_operation_disable_set ();
3501
3502 /* When using hardware single-step, a SIGTRAP is reported for both
3503 a completed single-step and a software breakpoint. Need to
3504 differentiate between the two, as the latter needs adjusting
3505 but the former does not.
3506
3507 The SIGTRAP can be due to a completed hardware single-step only if
3508 - we didn't insert software single-step breakpoints
3509 - this thread is currently being stepped
3510
3511 If any of these events did not occur, we must have stopped due
3512 to hitting a software breakpoint, and have to back up to the
3513 breakpoint address.
3514
3515 As a special case, we could have hardware single-stepped a
3516 software breakpoint. In this case (prev_pc == breakpoint_pc),
3517 we also need to back up to the breakpoint address. */
3518
3519 if (thread_has_single_step_breakpoints_set (ecs->event_thread)
3520 || !currently_stepping (ecs->event_thread)
3521 || (ecs->event_thread->stepped_breakpoint
3522 && ecs->event_thread->prev_pc == breakpoint_pc))
3523 regcache_write_pc (regcache, breakpoint_pc);
3524
3525 do_cleanups (old_cleanups);
3526 }
3527 }
3528
3529 static int
3530 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3531 {
3532 for (frame = get_prev_frame (frame);
3533 frame != NULL;
3534 frame = get_prev_frame (frame))
3535 {
3536 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3537 return 1;
3538 if (get_frame_type (frame) != INLINE_FRAME)
3539 break;
3540 }
3541
3542 return 0;
3543 }
3544
3545 /* Auxiliary function that handles syscall entry/return events.
3546 It returns 1 if the inferior should keep going (and GDB
3547 should ignore the event), or 0 if the event deserves to be
3548 processed. */
3549
3550 static int
3551 handle_syscall_event (struct execution_control_state *ecs)
3552 {
3553 struct regcache *regcache;
3554 int syscall_number;
3555
3556 if (!ptid_equal (ecs->ptid, inferior_ptid))
3557 context_switch (ecs->ptid);
3558
3559 regcache = get_thread_regcache (ecs->ptid);
3560 syscall_number = ecs->ws.value.syscall_number;
3561 stop_pc = regcache_read_pc (regcache);
3562
3563 if (catch_syscall_enabled () > 0
3564 && catching_syscall_number (syscall_number) > 0)
3565 {
3566 if (debug_infrun)
3567 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3568 syscall_number);
3569
3570 ecs->event_thread->control.stop_bpstat
3571 = bpstat_stop_status (get_regcache_aspace (regcache),
3572 stop_pc, ecs->ptid, &ecs->ws);
3573
3574 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3575 {
3576 /* Catchpoint hit. */
3577 return 0;
3578 }
3579 }
3580
3581 /* If no catchpoint triggered for this, then keep going. */
3582 keep_going (ecs);
3583 return 1;
3584 }
3585
3586 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3587
3588 static void
3589 fill_in_stop_func (struct gdbarch *gdbarch,
3590 struct execution_control_state *ecs)
3591 {
3592 if (!ecs->stop_func_filled_in)
3593 {
3594 /* Don't care about return value; stop_func_start and stop_func_name
3595 will both be 0 if it doesn't work. */
3596 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3597 &ecs->stop_func_start, &ecs->stop_func_end);
3598 ecs->stop_func_start
3599 += gdbarch_deprecated_function_start_offset (gdbarch);
3600
3601 if (gdbarch_skip_entrypoint_p (gdbarch))
3602 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3603 ecs->stop_func_start);
3604
3605 ecs->stop_func_filled_in = 1;
3606 }
3607 }
3608
3609
3610 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3611
3612 static enum stop_kind
3613 get_inferior_stop_soon (ptid_t ptid)
3614 {
3615 struct inferior *inf = find_inferior_ptid (ptid);
3616
3617 gdb_assert (inf != NULL);
3618 return inf->control.stop_soon;
3619 }
3620
3621 /* Given an execution control state that has been freshly filled in by
3622 an event from the inferior, figure out what it means and take
3623 appropriate action.
3624
3625 The alternatives are:
3626
3627 1) stop_waiting and return; to really stop and return to the
3628 debugger.
3629
3630 2) keep_going and return; to wait for the next event (set
3631 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3632 once). */
3633
3634 static void
3635 handle_inferior_event (struct execution_control_state *ecs)
3636 {
3637 enum stop_kind stop_soon;
3638
3639 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3640 {
3641 /* We had an event in the inferior, but we are not interested in
3642 handling it at this level. The lower layers have already
3643 done what needs to be done, if anything.
3644
3645 One of the possible circumstances for this is when the
3646 inferior produces output for the console. The inferior has
3647 not stopped, and we are ignoring the event. Another possible
3648 circumstance is any event which the lower level knows will be
3649 reported multiple times without an intervening resume. */
3650 if (debug_infrun)
3651 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3652 prepare_to_wait (ecs);
3653 return;
3654 }
3655
3656 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3657 && target_can_async_p () && !sync_execution)
3658 {
3659 /* There were no unwaited-for children left in the target, but,
3660 we're not synchronously waiting for events either. Just
3661 ignore. Otherwise, if we were running a synchronous
3662 execution command, we need to cancel it and give the user
3663 back the terminal. */
3664 if (debug_infrun)
3665 fprintf_unfiltered (gdb_stdlog,
3666 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3667 prepare_to_wait (ecs);
3668 return;
3669 }
3670
3671 /* Cache the last pid/waitstatus. */
3672 set_last_target_status (ecs->ptid, ecs->ws);
3673
3674 /* Always clear state belonging to the previous time we stopped. */
3675 stop_stack_dummy = STOP_NONE;
3676
3677 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3678 {
3679 /* No unwaited-for children left. IOW, all resumed children
3680 have exited. */
3681 if (debug_infrun)
3682 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3683
3684 stop_print_frame = 0;
3685 stop_waiting (ecs);
3686 return;
3687 }
3688
3689 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3690 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3691 {
3692 ecs->event_thread = find_thread_ptid (ecs->ptid);
3693 /* If it's a new thread, add it to the thread database. */
3694 if (ecs->event_thread == NULL)
3695 ecs->event_thread = add_thread (ecs->ptid);
3696
3697 /* Disable range stepping. If the next step request could use a
3698 range, this will be end up re-enabled then. */
3699 ecs->event_thread->control.may_range_step = 0;
3700 }
3701
3702 /* Dependent on valid ECS->EVENT_THREAD. */
3703 adjust_pc_after_break (ecs);
3704
3705 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3706 reinit_frame_cache ();
3707
3708 breakpoint_retire_moribund ();
3709
3710 /* First, distinguish signals caused by the debugger from signals
3711 that have to do with the program's own actions. Note that
3712 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3713 on the operating system version. Here we detect when a SIGILL or
3714 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3715 something similar for SIGSEGV, since a SIGSEGV will be generated
3716 when we're trying to execute a breakpoint instruction on a
3717 non-executable stack. This happens for call dummy breakpoints
3718 for architectures like SPARC that place call dummies on the
3719 stack. */
3720 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3721 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3722 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3723 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3724 {
3725 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3726
3727 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3728 regcache_read_pc (regcache)))
3729 {
3730 if (debug_infrun)
3731 fprintf_unfiltered (gdb_stdlog,
3732 "infrun: Treating signal as SIGTRAP\n");
3733 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3734 }
3735 }
3736
3737 /* Mark the non-executing threads accordingly. In all-stop, all
3738 threads of all processes are stopped when we get any event
3739 reported. In non-stop mode, only the event thread stops. If
3740 we're handling a process exit in non-stop mode, there's nothing
3741 to do, as threads of the dead process are gone, and threads of
3742 any other process were left running. */
3743 if (!non_stop)
3744 set_executing (minus_one_ptid, 0);
3745 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3746 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3747 set_executing (ecs->ptid, 0);
3748
3749 switch (ecs->ws.kind)
3750 {
3751 case TARGET_WAITKIND_LOADED:
3752 if (debug_infrun)
3753 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3754 if (!ptid_equal (ecs->ptid, inferior_ptid))
3755 context_switch (ecs->ptid);
3756 /* Ignore gracefully during startup of the inferior, as it might
3757 be the shell which has just loaded some objects, otherwise
3758 add the symbols for the newly loaded objects. Also ignore at
3759 the beginning of an attach or remote session; we will query
3760 the full list of libraries once the connection is
3761 established. */
3762
3763 stop_soon = get_inferior_stop_soon (ecs->ptid);
3764 if (stop_soon == NO_STOP_QUIETLY)
3765 {
3766 struct regcache *regcache;
3767
3768 regcache = get_thread_regcache (ecs->ptid);
3769
3770 handle_solib_event ();
3771
3772 ecs->event_thread->control.stop_bpstat
3773 = bpstat_stop_status (get_regcache_aspace (regcache),
3774 stop_pc, ecs->ptid, &ecs->ws);
3775
3776 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3777 {
3778 /* A catchpoint triggered. */
3779 process_event_stop_test (ecs);
3780 return;
3781 }
3782
3783 /* If requested, stop when the dynamic linker notifies
3784 gdb of events. This allows the user to get control
3785 and place breakpoints in initializer routines for
3786 dynamically loaded objects (among other things). */
3787 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3788 if (stop_on_solib_events)
3789 {
3790 /* Make sure we print "Stopped due to solib-event" in
3791 normal_stop. */
3792 stop_print_frame = 1;
3793
3794 stop_waiting (ecs);
3795 return;
3796 }
3797 }
3798
3799 /* If we are skipping through a shell, or through shared library
3800 loading that we aren't interested in, resume the program. If
3801 we're running the program normally, also resume. */
3802 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3803 {
3804 /* Loading of shared libraries might have changed breakpoint
3805 addresses. Make sure new breakpoints are inserted. */
3806 if (stop_soon == NO_STOP_QUIETLY)
3807 insert_breakpoints ();
3808 resume (0, GDB_SIGNAL_0);
3809 prepare_to_wait (ecs);
3810 return;
3811 }
3812
3813 /* But stop if we're attaching or setting up a remote
3814 connection. */
3815 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3816 || stop_soon == STOP_QUIETLY_REMOTE)
3817 {
3818 if (debug_infrun)
3819 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3820 stop_waiting (ecs);
3821 return;
3822 }
3823
3824 internal_error (__FILE__, __LINE__,
3825 _("unhandled stop_soon: %d"), (int) stop_soon);
3826
3827 case TARGET_WAITKIND_SPURIOUS:
3828 if (debug_infrun)
3829 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3830 if (!ptid_equal (ecs->ptid, inferior_ptid))
3831 context_switch (ecs->ptid);
3832 resume (0, GDB_SIGNAL_0);
3833 prepare_to_wait (ecs);
3834 return;
3835
3836 case TARGET_WAITKIND_EXITED:
3837 case TARGET_WAITKIND_SIGNALLED:
3838 if (debug_infrun)
3839 {
3840 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3841 fprintf_unfiltered (gdb_stdlog,
3842 "infrun: TARGET_WAITKIND_EXITED\n");
3843 else
3844 fprintf_unfiltered (gdb_stdlog,
3845 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3846 }
3847
3848 inferior_ptid = ecs->ptid;
3849 set_current_inferior (find_inferior_ptid (ecs->ptid));
3850 set_current_program_space (current_inferior ()->pspace);
3851 handle_vfork_child_exec_or_exit (0);
3852 target_terminal_ours (); /* Must do this before mourn anyway. */
3853
3854 /* Clearing any previous state of convenience variables. */
3855 clear_exit_convenience_vars ();
3856
3857 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3858 {
3859 /* Record the exit code in the convenience variable $_exitcode, so
3860 that the user can inspect this again later. */
3861 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3862 (LONGEST) ecs->ws.value.integer);
3863
3864 /* Also record this in the inferior itself. */
3865 current_inferior ()->has_exit_code = 1;
3866 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3867
3868 /* Support the --return-child-result option. */
3869 return_child_result_value = ecs->ws.value.integer;
3870
3871 observer_notify_exited (ecs->ws.value.integer);
3872 }
3873 else
3874 {
3875 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3876 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3877
3878 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3879 {
3880 /* Set the value of the internal variable $_exitsignal,
3881 which holds the signal uncaught by the inferior. */
3882 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3883 gdbarch_gdb_signal_to_target (gdbarch,
3884 ecs->ws.value.sig));
3885 }
3886 else
3887 {
3888 /* We don't have access to the target's method used for
3889 converting between signal numbers (GDB's internal
3890 representation <-> target's representation).
3891 Therefore, we cannot do a good job at displaying this
3892 information to the user. It's better to just warn
3893 her about it (if infrun debugging is enabled), and
3894 give up. */
3895 if (debug_infrun)
3896 fprintf_filtered (gdb_stdlog, _("\
3897 Cannot fill $_exitsignal with the correct signal number.\n"));
3898 }
3899
3900 observer_notify_signal_exited (ecs->ws.value.sig);
3901 }
3902
3903 gdb_flush (gdb_stdout);
3904 target_mourn_inferior ();
3905 stop_print_frame = 0;
3906 stop_waiting (ecs);
3907 return;
3908
3909 /* The following are the only cases in which we keep going;
3910 the above cases end in a continue or goto. */
3911 case TARGET_WAITKIND_FORKED:
3912 case TARGET_WAITKIND_VFORKED:
3913 if (debug_infrun)
3914 {
3915 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3916 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3917 else
3918 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3919 }
3920
3921 /* Check whether the inferior is displaced stepping. */
3922 {
3923 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3924 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3925 struct displaced_step_inferior_state *displaced
3926 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3927
3928 /* If checking displaced stepping is supported, and thread
3929 ecs->ptid is displaced stepping. */
3930 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3931 {
3932 struct inferior *parent_inf
3933 = find_inferior_ptid (ecs->ptid);
3934 struct regcache *child_regcache;
3935 CORE_ADDR parent_pc;
3936
3937 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3938 indicating that the displaced stepping of syscall instruction
3939 has been done. Perform cleanup for parent process here. Note
3940 that this operation also cleans up the child process for vfork,
3941 because their pages are shared. */
3942 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3943
3944 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3945 {
3946 /* Restore scratch pad for child process. */
3947 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3948 }
3949
3950 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3951 the child's PC is also within the scratchpad. Set the child's PC
3952 to the parent's PC value, which has already been fixed up.
3953 FIXME: we use the parent's aspace here, although we're touching
3954 the child, because the child hasn't been added to the inferior
3955 list yet at this point. */
3956
3957 child_regcache
3958 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3959 gdbarch,
3960 parent_inf->aspace);
3961 /* Read PC value of parent process. */
3962 parent_pc = regcache_read_pc (regcache);
3963
3964 if (debug_displaced)
3965 fprintf_unfiltered (gdb_stdlog,
3966 "displaced: write child pc from %s to %s\n",
3967 paddress (gdbarch,
3968 regcache_read_pc (child_regcache)),
3969 paddress (gdbarch, parent_pc));
3970
3971 regcache_write_pc (child_regcache, parent_pc);
3972 }
3973 }
3974
3975 if (!ptid_equal (ecs->ptid, inferior_ptid))
3976 context_switch (ecs->ptid);
3977
3978 /* Immediately detach breakpoints from the child before there's
3979 any chance of letting the user delete breakpoints from the
3980 breakpoint lists. If we don't do this early, it's easy to
3981 leave left over traps in the child, vis: "break foo; catch
3982 fork; c; <fork>; del; c; <child calls foo>". We only follow
3983 the fork on the last `continue', and by that time the
3984 breakpoint at "foo" is long gone from the breakpoint table.
3985 If we vforked, then we don't need to unpatch here, since both
3986 parent and child are sharing the same memory pages; we'll
3987 need to unpatch at follow/detach time instead to be certain
3988 that new breakpoints added between catchpoint hit time and
3989 vfork follow are detached. */
3990 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3991 {
3992 /* This won't actually modify the breakpoint list, but will
3993 physically remove the breakpoints from the child. */
3994 detach_breakpoints (ecs->ws.value.related_pid);
3995 }
3996
3997 delete_just_stopped_threads_single_step_breakpoints ();
3998
3999 /* In case the event is caught by a catchpoint, remember that
4000 the event is to be followed at the next resume of the thread,
4001 and not immediately. */
4002 ecs->event_thread->pending_follow = ecs->ws;
4003
4004 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4005
4006 ecs->event_thread->control.stop_bpstat
4007 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4008 stop_pc, ecs->ptid, &ecs->ws);
4009
4010 /* If no catchpoint triggered for this, then keep going. Note
4011 that we're interested in knowing the bpstat actually causes a
4012 stop, not just if it may explain the signal. Software
4013 watchpoints, for example, always appear in the bpstat. */
4014 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4015 {
4016 ptid_t parent;
4017 ptid_t child;
4018 int should_resume;
4019 int follow_child
4020 = (follow_fork_mode_string == follow_fork_mode_child);
4021
4022 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4023
4024 should_resume = follow_fork ();
4025
4026 parent = ecs->ptid;
4027 child = ecs->ws.value.related_pid;
4028
4029 /* In non-stop mode, also resume the other branch. */
4030 if (non_stop && !detach_fork)
4031 {
4032 if (follow_child)
4033 switch_to_thread (parent);
4034 else
4035 switch_to_thread (child);
4036
4037 ecs->event_thread = inferior_thread ();
4038 ecs->ptid = inferior_ptid;
4039 keep_going (ecs);
4040 }
4041
4042 if (follow_child)
4043 switch_to_thread (child);
4044 else
4045 switch_to_thread (parent);
4046
4047 ecs->event_thread = inferior_thread ();
4048 ecs->ptid = inferior_ptid;
4049
4050 if (should_resume)
4051 keep_going (ecs);
4052 else
4053 stop_waiting (ecs);
4054 return;
4055 }
4056 process_event_stop_test (ecs);
4057 return;
4058
4059 case TARGET_WAITKIND_VFORK_DONE:
4060 /* Done with the shared memory region. Re-insert breakpoints in
4061 the parent, and keep going. */
4062
4063 if (debug_infrun)
4064 fprintf_unfiltered (gdb_stdlog,
4065 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
4066
4067 if (!ptid_equal (ecs->ptid, inferior_ptid))
4068 context_switch (ecs->ptid);
4069
4070 current_inferior ()->waiting_for_vfork_done = 0;
4071 current_inferior ()->pspace->breakpoints_not_allowed = 0;
4072 /* This also takes care of reinserting breakpoints in the
4073 previously locked inferior. */
4074 keep_going (ecs);
4075 return;
4076
4077 case TARGET_WAITKIND_EXECD:
4078 if (debug_infrun)
4079 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
4080
4081 if (!ptid_equal (ecs->ptid, inferior_ptid))
4082 context_switch (ecs->ptid);
4083
4084 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4085
4086 /* Do whatever is necessary to the parent branch of the vfork. */
4087 handle_vfork_child_exec_or_exit (1);
4088
4089 /* This causes the eventpoints and symbol table to be reset.
4090 Must do this now, before trying to determine whether to
4091 stop. */
4092 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
4093
4094 ecs->event_thread->control.stop_bpstat
4095 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4096 stop_pc, ecs->ptid, &ecs->ws);
4097
4098 /* Note that this may be referenced from inside
4099 bpstat_stop_status above, through inferior_has_execd. */
4100 xfree (ecs->ws.value.execd_pathname);
4101 ecs->ws.value.execd_pathname = NULL;
4102
4103 /* If no catchpoint triggered for this, then keep going. */
4104 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4105 {
4106 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4107 keep_going (ecs);
4108 return;
4109 }
4110 process_event_stop_test (ecs);
4111 return;
4112
4113 /* Be careful not to try to gather much state about a thread
4114 that's in a syscall. It's frequently a losing proposition. */
4115 case TARGET_WAITKIND_SYSCALL_ENTRY:
4116 if (debug_infrun)
4117 fprintf_unfiltered (gdb_stdlog,
4118 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
4119 /* Getting the current syscall number. */
4120 if (handle_syscall_event (ecs) == 0)
4121 process_event_stop_test (ecs);
4122 return;
4123
4124 /* Before examining the threads further, step this thread to
4125 get it entirely out of the syscall. (We get notice of the
4126 event when the thread is just on the verge of exiting a
4127 syscall. Stepping one instruction seems to get it back
4128 into user code.) */
4129 case TARGET_WAITKIND_SYSCALL_RETURN:
4130 if (debug_infrun)
4131 fprintf_unfiltered (gdb_stdlog,
4132 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
4133 if (handle_syscall_event (ecs) == 0)
4134 process_event_stop_test (ecs);
4135 return;
4136
4137 case TARGET_WAITKIND_STOPPED:
4138 if (debug_infrun)
4139 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
4140 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
4141 handle_signal_stop (ecs);
4142 return;
4143
4144 case TARGET_WAITKIND_NO_HISTORY:
4145 if (debug_infrun)
4146 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
4147 /* Reverse execution: target ran out of history info. */
4148
4149 delete_just_stopped_threads_single_step_breakpoints ();
4150 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4151 observer_notify_no_history ();
4152 stop_waiting (ecs);
4153 return;
4154 }
4155 }
4156
4157 /* Come here when the program has stopped with a signal. */
4158
4159 static void
4160 handle_signal_stop (struct execution_control_state *ecs)
4161 {
4162 struct frame_info *frame;
4163 struct gdbarch *gdbarch;
4164 int stopped_by_watchpoint;
4165 enum stop_kind stop_soon;
4166 int random_signal;
4167
4168 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
4169
4170 /* Do we need to clean up the state of a thread that has
4171 completed a displaced single-step? (Doing so usually affects
4172 the PC, so do it here, before we set stop_pc.) */
4173 displaced_step_fixup (ecs->ptid,
4174 ecs->event_thread->suspend.stop_signal);
4175
4176 /* If we either finished a single-step or hit a breakpoint, but
4177 the user wanted this thread to be stopped, pretend we got a
4178 SIG0 (generic unsignaled stop). */
4179 if (ecs->event_thread->stop_requested
4180 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4181 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4182
4183 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4184
4185 if (debug_infrun)
4186 {
4187 struct regcache *regcache = get_thread_regcache (ecs->ptid);
4188 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4189 struct cleanup *old_chain = save_inferior_ptid ();
4190
4191 inferior_ptid = ecs->ptid;
4192
4193 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
4194 paddress (gdbarch, stop_pc));
4195 if (target_stopped_by_watchpoint ())
4196 {
4197 CORE_ADDR addr;
4198
4199 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
4200
4201 if (target_stopped_data_address (&current_target, &addr))
4202 fprintf_unfiltered (gdb_stdlog,
4203 "infrun: stopped data address = %s\n",
4204 paddress (gdbarch, addr));
4205 else
4206 fprintf_unfiltered (gdb_stdlog,
4207 "infrun: (no data address available)\n");
4208 }
4209
4210 do_cleanups (old_chain);
4211 }
4212
4213 /* This is originated from start_remote(), start_inferior() and
4214 shared libraries hook functions. */
4215 stop_soon = get_inferior_stop_soon (ecs->ptid);
4216 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4217 {
4218 if (!ptid_equal (ecs->ptid, inferior_ptid))
4219 context_switch (ecs->ptid);
4220 if (debug_infrun)
4221 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4222 stop_print_frame = 1;
4223 stop_waiting (ecs);
4224 return;
4225 }
4226
4227 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4228 && stop_after_trap)
4229 {
4230 if (!ptid_equal (ecs->ptid, inferior_ptid))
4231 context_switch (ecs->ptid);
4232 if (debug_infrun)
4233 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4234 stop_print_frame = 0;
4235 stop_waiting (ecs);
4236 return;
4237 }
4238
4239 /* This originates from attach_command(). We need to overwrite
4240 the stop_signal here, because some kernels don't ignore a
4241 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4242 See more comments in inferior.h. On the other hand, if we
4243 get a non-SIGSTOP, report it to the user - assume the backend
4244 will handle the SIGSTOP if it should show up later.
4245
4246 Also consider that the attach is complete when we see a
4247 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4248 target extended-remote report it instead of a SIGSTOP
4249 (e.g. gdbserver). We already rely on SIGTRAP being our
4250 signal, so this is no exception.
4251
4252 Also consider that the attach is complete when we see a
4253 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4254 the target to stop all threads of the inferior, in case the
4255 low level attach operation doesn't stop them implicitly. If
4256 they weren't stopped implicitly, then the stub will report a
4257 GDB_SIGNAL_0, meaning: stopped for no particular reason
4258 other than GDB's request. */
4259 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4260 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
4261 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4262 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
4263 {
4264 stop_print_frame = 1;
4265 stop_waiting (ecs);
4266 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4267 return;
4268 }
4269
4270 /* See if something interesting happened to the non-current thread. If
4271 so, then switch to that thread. */
4272 if (!ptid_equal (ecs->ptid, inferior_ptid))
4273 {
4274 if (debug_infrun)
4275 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
4276
4277 context_switch (ecs->ptid);
4278
4279 if (deprecated_context_hook)
4280 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
4281 }
4282
4283 /* At this point, get hold of the now-current thread's frame. */
4284 frame = get_current_frame ();
4285 gdbarch = get_frame_arch (frame);
4286
4287 /* Pull the single step breakpoints out of the target. */
4288 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4289 {
4290 struct regcache *regcache;
4291 struct address_space *aspace;
4292 CORE_ADDR pc;
4293
4294 regcache = get_thread_regcache (ecs->ptid);
4295 aspace = get_regcache_aspace (regcache);
4296 pc = regcache_read_pc (regcache);
4297
4298 /* However, before doing so, if this single-step breakpoint was
4299 actually for another thread, set this thread up for moving
4300 past it. */
4301 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
4302 aspace, pc))
4303 {
4304 if (single_step_breakpoint_inserted_here_p (aspace, pc))
4305 {
4306 if (debug_infrun)
4307 {
4308 fprintf_unfiltered (gdb_stdlog,
4309 "infrun: [%s] hit another thread's "
4310 "single-step breakpoint\n",
4311 target_pid_to_str (ecs->ptid));
4312 }
4313 ecs->hit_singlestep_breakpoint = 1;
4314 }
4315 }
4316 else
4317 {
4318 if (debug_infrun)
4319 {
4320 fprintf_unfiltered (gdb_stdlog,
4321 "infrun: [%s] hit its "
4322 "single-step breakpoint\n",
4323 target_pid_to_str (ecs->ptid));
4324 }
4325 }
4326 }
4327 delete_just_stopped_threads_single_step_breakpoints ();
4328
4329 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4330 && ecs->event_thread->control.trap_expected
4331 && ecs->event_thread->stepping_over_watchpoint)
4332 stopped_by_watchpoint = 0;
4333 else
4334 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4335
4336 /* If necessary, step over this watchpoint. We'll be back to display
4337 it in a moment. */
4338 if (stopped_by_watchpoint
4339 && (target_have_steppable_watchpoint
4340 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4341 {
4342 /* At this point, we are stopped at an instruction which has
4343 attempted to write to a piece of memory under control of
4344 a watchpoint. The instruction hasn't actually executed
4345 yet. If we were to evaluate the watchpoint expression
4346 now, we would get the old value, and therefore no change
4347 would seem to have occurred.
4348
4349 In order to make watchpoints work `right', we really need
4350 to complete the memory write, and then evaluate the
4351 watchpoint expression. We do this by single-stepping the
4352 target.
4353
4354 It may not be necessary to disable the watchpoint to step over
4355 it. For example, the PA can (with some kernel cooperation)
4356 single step over a watchpoint without disabling the watchpoint.
4357
4358 It is far more common to need to disable a watchpoint to step
4359 the inferior over it. If we have non-steppable watchpoints,
4360 we must disable the current watchpoint; it's simplest to
4361 disable all watchpoints.
4362
4363 Any breakpoint at PC must also be stepped over -- if there's
4364 one, it will have already triggered before the watchpoint
4365 triggered, and we either already reported it to the user, or
4366 it didn't cause a stop and we called keep_going. In either
4367 case, if there was a breakpoint at PC, we must be trying to
4368 step past it. */
4369 ecs->event_thread->stepping_over_watchpoint = 1;
4370 keep_going (ecs);
4371 return;
4372 }
4373
4374 ecs->event_thread->stepping_over_breakpoint = 0;
4375 ecs->event_thread->stepping_over_watchpoint = 0;
4376 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4377 ecs->event_thread->control.stop_step = 0;
4378 stop_print_frame = 1;
4379 stopped_by_random_signal = 0;
4380
4381 /* Hide inlined functions starting here, unless we just performed stepi or
4382 nexti. After stepi and nexti, always show the innermost frame (not any
4383 inline function call sites). */
4384 if (ecs->event_thread->control.step_range_end != 1)
4385 {
4386 struct address_space *aspace =
4387 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4388
4389 /* skip_inline_frames is expensive, so we avoid it if we can
4390 determine that the address is one where functions cannot have
4391 been inlined. This improves performance with inferiors that
4392 load a lot of shared libraries, because the solib event
4393 breakpoint is defined as the address of a function (i.e. not
4394 inline). Note that we have to check the previous PC as well
4395 as the current one to catch cases when we have just
4396 single-stepped off a breakpoint prior to reinstating it.
4397 Note that we're assuming that the code we single-step to is
4398 not inline, but that's not definitive: there's nothing
4399 preventing the event breakpoint function from containing
4400 inlined code, and the single-step ending up there. If the
4401 user had set a breakpoint on that inlined code, the missing
4402 skip_inline_frames call would break things. Fortunately
4403 that's an extremely unlikely scenario. */
4404 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4405 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4406 && ecs->event_thread->control.trap_expected
4407 && pc_at_non_inline_function (aspace,
4408 ecs->event_thread->prev_pc,
4409 &ecs->ws)))
4410 {
4411 skip_inline_frames (ecs->ptid);
4412
4413 /* Re-fetch current thread's frame in case that invalidated
4414 the frame cache. */
4415 frame = get_current_frame ();
4416 gdbarch = get_frame_arch (frame);
4417 }
4418 }
4419
4420 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4421 && ecs->event_thread->control.trap_expected
4422 && gdbarch_single_step_through_delay_p (gdbarch)
4423 && currently_stepping (ecs->event_thread))
4424 {
4425 /* We're trying to step off a breakpoint. Turns out that we're
4426 also on an instruction that needs to be stepped multiple
4427 times before it's been fully executing. E.g., architectures
4428 with a delay slot. It needs to be stepped twice, once for
4429 the instruction and once for the delay slot. */
4430 int step_through_delay
4431 = gdbarch_single_step_through_delay (gdbarch, frame);
4432
4433 if (debug_infrun && step_through_delay)
4434 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4435 if (ecs->event_thread->control.step_range_end == 0
4436 && step_through_delay)
4437 {
4438 /* The user issued a continue when stopped at a breakpoint.
4439 Set up for another trap and get out of here. */
4440 ecs->event_thread->stepping_over_breakpoint = 1;
4441 keep_going (ecs);
4442 return;
4443 }
4444 else if (step_through_delay)
4445 {
4446 /* The user issued a step when stopped at a breakpoint.
4447 Maybe we should stop, maybe we should not - the delay
4448 slot *might* correspond to a line of source. In any
4449 case, don't decide that here, just set
4450 ecs->stepping_over_breakpoint, making sure we
4451 single-step again before breakpoints are re-inserted. */
4452 ecs->event_thread->stepping_over_breakpoint = 1;
4453 }
4454 }
4455
4456 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4457 handles this event. */
4458 ecs->event_thread->control.stop_bpstat
4459 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4460 stop_pc, ecs->ptid, &ecs->ws);
4461
4462 /* Following in case break condition called a
4463 function. */
4464 stop_print_frame = 1;
4465
4466 /* This is where we handle "moribund" watchpoints. Unlike
4467 software breakpoints traps, hardware watchpoint traps are
4468 always distinguishable from random traps. If no high-level
4469 watchpoint is associated with the reported stop data address
4470 anymore, then the bpstat does not explain the signal ---
4471 simply make sure to ignore it if `stopped_by_watchpoint' is
4472 set. */
4473
4474 if (debug_infrun
4475 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4476 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4477 GDB_SIGNAL_TRAP)
4478 && stopped_by_watchpoint)
4479 fprintf_unfiltered (gdb_stdlog,
4480 "infrun: no user watchpoint explains "
4481 "watchpoint SIGTRAP, ignoring\n");
4482
4483 /* NOTE: cagney/2003-03-29: These checks for a random signal
4484 at one stage in the past included checks for an inferior
4485 function call's call dummy's return breakpoint. The original
4486 comment, that went with the test, read:
4487
4488 ``End of a stack dummy. Some systems (e.g. Sony news) give
4489 another signal besides SIGTRAP, so check here as well as
4490 above.''
4491
4492 If someone ever tries to get call dummys on a
4493 non-executable stack to work (where the target would stop
4494 with something like a SIGSEGV), then those tests might need
4495 to be re-instated. Given, however, that the tests were only
4496 enabled when momentary breakpoints were not being used, I
4497 suspect that it won't be the case.
4498
4499 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4500 be necessary for call dummies on a non-executable stack on
4501 SPARC. */
4502
4503 /* See if the breakpoints module can explain the signal. */
4504 random_signal
4505 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4506 ecs->event_thread->suspend.stop_signal);
4507
4508 /* If not, perhaps stepping/nexting can. */
4509 if (random_signal)
4510 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4511 && currently_stepping (ecs->event_thread));
4512
4513 /* Perhaps the thread hit a single-step breakpoint of _another_
4514 thread. Single-step breakpoints are transparent to the
4515 breakpoints module. */
4516 if (random_signal)
4517 random_signal = !ecs->hit_singlestep_breakpoint;
4518
4519 /* No? Perhaps we got a moribund watchpoint. */
4520 if (random_signal)
4521 random_signal = !stopped_by_watchpoint;
4522
4523 /* For the program's own signals, act according to
4524 the signal handling tables. */
4525
4526 if (random_signal)
4527 {
4528 /* Signal not for debugging purposes. */
4529 struct inferior *inf = find_inferior_ptid (ecs->ptid);
4530 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4531
4532 if (debug_infrun)
4533 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4534 gdb_signal_to_symbol_string (stop_signal));
4535
4536 stopped_by_random_signal = 1;
4537
4538 /* Always stop on signals if we're either just gaining control
4539 of the program, or the user explicitly requested this thread
4540 to remain stopped. */
4541 if (stop_soon != NO_STOP_QUIETLY
4542 || ecs->event_thread->stop_requested
4543 || (!inf->detaching
4544 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4545 {
4546 stop_waiting (ecs);
4547 return;
4548 }
4549
4550 /* Notify observers the signal has "handle print" set. Note we
4551 returned early above if stopping; normal_stop handles the
4552 printing in that case. */
4553 if (signal_print[ecs->event_thread->suspend.stop_signal])
4554 {
4555 /* The signal table tells us to print about this signal. */
4556 target_terminal_ours_for_output ();
4557 observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
4558 target_terminal_inferior ();
4559 }
4560
4561 /* Clear the signal if it should not be passed. */
4562 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4563 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4564
4565 if (ecs->event_thread->prev_pc == stop_pc
4566 && ecs->event_thread->control.trap_expected
4567 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4568 {
4569 /* We were just starting a new sequence, attempting to
4570 single-step off of a breakpoint and expecting a SIGTRAP.
4571 Instead this signal arrives. This signal will take us out
4572 of the stepping range so GDB needs to remember to, when
4573 the signal handler returns, resume stepping off that
4574 breakpoint. */
4575 /* To simplify things, "continue" is forced to use the same
4576 code paths as single-step - set a breakpoint at the
4577 signal return address and then, once hit, step off that
4578 breakpoint. */
4579 if (debug_infrun)
4580 fprintf_unfiltered (gdb_stdlog,
4581 "infrun: signal arrived while stepping over "
4582 "breakpoint\n");
4583
4584 insert_hp_step_resume_breakpoint_at_frame (frame);
4585 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4586 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4587 ecs->event_thread->control.trap_expected = 0;
4588
4589 /* If we were nexting/stepping some other thread, switch to
4590 it, so that we don't continue it, losing control. */
4591 if (!switch_back_to_stepped_thread (ecs))
4592 keep_going (ecs);
4593 return;
4594 }
4595
4596 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4597 && (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4598 || ecs->event_thread->control.step_range_end == 1)
4599 && frame_id_eq (get_stack_frame_id (frame),
4600 ecs->event_thread->control.step_stack_frame_id)
4601 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4602 {
4603 /* The inferior is about to take a signal that will take it
4604 out of the single step range. Set a breakpoint at the
4605 current PC (which is presumably where the signal handler
4606 will eventually return) and then allow the inferior to
4607 run free.
4608
4609 Note that this is only needed for a signal delivered
4610 while in the single-step range. Nested signals aren't a
4611 problem as they eventually all return. */
4612 if (debug_infrun)
4613 fprintf_unfiltered (gdb_stdlog,
4614 "infrun: signal may take us out of "
4615 "single-step range\n");
4616
4617 insert_hp_step_resume_breakpoint_at_frame (frame);
4618 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4619 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4620 ecs->event_thread->control.trap_expected = 0;
4621 keep_going (ecs);
4622 return;
4623 }
4624
4625 /* Note: step_resume_breakpoint may be non-NULL. This occures
4626 when either there's a nested signal, or when there's a
4627 pending signal enabled just as the signal handler returns
4628 (leaving the inferior at the step-resume-breakpoint without
4629 actually executing it). Either way continue until the
4630 breakpoint is really hit. */
4631
4632 if (!switch_back_to_stepped_thread (ecs))
4633 {
4634 if (debug_infrun)
4635 fprintf_unfiltered (gdb_stdlog,
4636 "infrun: random signal, keep going\n");
4637
4638 keep_going (ecs);
4639 }
4640 return;
4641 }
4642
4643 process_event_stop_test (ecs);
4644 }
4645
4646 /* Come here when we've got some debug event / signal we can explain
4647 (IOW, not a random signal), and test whether it should cause a
4648 stop, or whether we should resume the inferior (transparently).
4649 E.g., could be a breakpoint whose condition evaluates false; we
4650 could be still stepping within the line; etc. */
4651
4652 static void
4653 process_event_stop_test (struct execution_control_state *ecs)
4654 {
4655 struct symtab_and_line stop_pc_sal;
4656 struct frame_info *frame;
4657 struct gdbarch *gdbarch;
4658 CORE_ADDR jmp_buf_pc;
4659 struct bpstat_what what;
4660
4661 /* Handle cases caused by hitting a breakpoint. */
4662
4663 frame = get_current_frame ();
4664 gdbarch = get_frame_arch (frame);
4665
4666 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4667
4668 if (what.call_dummy)
4669 {
4670 stop_stack_dummy = what.call_dummy;
4671 }
4672
4673 /* If we hit an internal event that triggers symbol changes, the
4674 current frame will be invalidated within bpstat_what (e.g., if we
4675 hit an internal solib event). Re-fetch it. */
4676 frame = get_current_frame ();
4677 gdbarch = get_frame_arch (frame);
4678
4679 switch (what.main_action)
4680 {
4681 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4682 /* If we hit the breakpoint at longjmp while stepping, we
4683 install a momentary breakpoint at the target of the
4684 jmp_buf. */
4685
4686 if (debug_infrun)
4687 fprintf_unfiltered (gdb_stdlog,
4688 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4689
4690 ecs->event_thread->stepping_over_breakpoint = 1;
4691
4692 if (what.is_longjmp)
4693 {
4694 struct value *arg_value;
4695
4696 /* If we set the longjmp breakpoint via a SystemTap probe,
4697 then use it to extract the arguments. The destination PC
4698 is the third argument to the probe. */
4699 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4700 if (arg_value)
4701 {
4702 jmp_buf_pc = value_as_address (arg_value);
4703 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
4704 }
4705 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4706 || !gdbarch_get_longjmp_target (gdbarch,
4707 frame, &jmp_buf_pc))
4708 {
4709 if (debug_infrun)
4710 fprintf_unfiltered (gdb_stdlog,
4711 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4712 "(!gdbarch_get_longjmp_target)\n");
4713 keep_going (ecs);
4714 return;
4715 }
4716
4717 /* Insert a breakpoint at resume address. */
4718 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4719 }
4720 else
4721 check_exception_resume (ecs, frame);
4722 keep_going (ecs);
4723 return;
4724
4725 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4726 {
4727 struct frame_info *init_frame;
4728
4729 /* There are several cases to consider.
4730
4731 1. The initiating frame no longer exists. In this case we
4732 must stop, because the exception or longjmp has gone too
4733 far.
4734
4735 2. The initiating frame exists, and is the same as the
4736 current frame. We stop, because the exception or longjmp
4737 has been caught.
4738
4739 3. The initiating frame exists and is different from the
4740 current frame. This means the exception or longjmp has
4741 been caught beneath the initiating frame, so keep going.
4742
4743 4. longjmp breakpoint has been placed just to protect
4744 against stale dummy frames and user is not interested in
4745 stopping around longjmps. */
4746
4747 if (debug_infrun)
4748 fprintf_unfiltered (gdb_stdlog,
4749 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4750
4751 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4752 != NULL);
4753 delete_exception_resume_breakpoint (ecs->event_thread);
4754
4755 if (what.is_longjmp)
4756 {
4757 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
4758
4759 if (!frame_id_p (ecs->event_thread->initiating_frame))
4760 {
4761 /* Case 4. */
4762 keep_going (ecs);
4763 return;
4764 }
4765 }
4766
4767 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4768
4769 if (init_frame)
4770 {
4771 struct frame_id current_id
4772 = get_frame_id (get_current_frame ());
4773 if (frame_id_eq (current_id,
4774 ecs->event_thread->initiating_frame))
4775 {
4776 /* Case 2. Fall through. */
4777 }
4778 else
4779 {
4780 /* Case 3. */
4781 keep_going (ecs);
4782 return;
4783 }
4784 }
4785
4786 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4787 exists. */
4788 delete_step_resume_breakpoint (ecs->event_thread);
4789
4790 end_stepping_range (ecs);
4791 }
4792 return;
4793
4794 case BPSTAT_WHAT_SINGLE:
4795 if (debug_infrun)
4796 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4797 ecs->event_thread->stepping_over_breakpoint = 1;
4798 /* Still need to check other stuff, at least the case where we
4799 are stepping and step out of the right range. */
4800 break;
4801
4802 case BPSTAT_WHAT_STEP_RESUME:
4803 if (debug_infrun)
4804 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4805
4806 delete_step_resume_breakpoint (ecs->event_thread);
4807 if (ecs->event_thread->control.proceed_to_finish
4808 && execution_direction == EXEC_REVERSE)
4809 {
4810 struct thread_info *tp = ecs->event_thread;
4811
4812 /* We are finishing a function in reverse, and just hit the
4813 step-resume breakpoint at the start address of the
4814 function, and we're almost there -- just need to back up
4815 by one more single-step, which should take us back to the
4816 function call. */
4817 tp->control.step_range_start = tp->control.step_range_end = 1;
4818 keep_going (ecs);
4819 return;
4820 }
4821 fill_in_stop_func (gdbarch, ecs);
4822 if (stop_pc == ecs->stop_func_start
4823 && execution_direction == EXEC_REVERSE)
4824 {
4825 /* We are stepping over a function call in reverse, and just
4826 hit the step-resume breakpoint at the start address of
4827 the function. Go back to single-stepping, which should
4828 take us back to the function call. */
4829 ecs->event_thread->stepping_over_breakpoint = 1;
4830 keep_going (ecs);
4831 return;
4832 }
4833 break;
4834
4835 case BPSTAT_WHAT_STOP_NOISY:
4836 if (debug_infrun)
4837 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4838 stop_print_frame = 1;
4839
4840 /* Assume the thread stopped for a breapoint. We'll still check
4841 whether a/the breakpoint is there when the thread is next
4842 resumed. */
4843 ecs->event_thread->stepping_over_breakpoint = 1;
4844
4845 stop_waiting (ecs);
4846 return;
4847
4848 case BPSTAT_WHAT_STOP_SILENT:
4849 if (debug_infrun)
4850 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4851 stop_print_frame = 0;
4852
4853 /* Assume the thread stopped for a breapoint. We'll still check
4854 whether a/the breakpoint is there when the thread is next
4855 resumed. */
4856 ecs->event_thread->stepping_over_breakpoint = 1;
4857 stop_waiting (ecs);
4858 return;
4859
4860 case BPSTAT_WHAT_HP_STEP_RESUME:
4861 if (debug_infrun)
4862 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4863
4864 delete_step_resume_breakpoint (ecs->event_thread);
4865 if (ecs->event_thread->step_after_step_resume_breakpoint)
4866 {
4867 /* Back when the step-resume breakpoint was inserted, we
4868 were trying to single-step off a breakpoint. Go back to
4869 doing that. */
4870 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4871 ecs->event_thread->stepping_over_breakpoint = 1;
4872 keep_going (ecs);
4873 return;
4874 }
4875 break;
4876
4877 case BPSTAT_WHAT_KEEP_CHECKING:
4878 break;
4879 }
4880
4881 /* If we stepped a permanent breakpoint and we had a high priority
4882 step-resume breakpoint for the address we stepped, but we didn't
4883 hit it, then we must have stepped into the signal handler. The
4884 step-resume was only necessary to catch the case of _not_
4885 stepping into the handler, so delete it, and fall through to
4886 checking whether the step finished. */
4887 if (ecs->event_thread->stepped_breakpoint)
4888 {
4889 struct breakpoint *sr_bp
4890 = ecs->event_thread->control.step_resume_breakpoint;
4891
4892 if (sr_bp->loc->permanent
4893 && sr_bp->type == bp_hp_step_resume
4894 && sr_bp->loc->address == ecs->event_thread->prev_pc)
4895 {
4896 if (debug_infrun)
4897 fprintf_unfiltered (gdb_stdlog,
4898 "infrun: stepped permanent breakpoint, stopped in "
4899 "handler\n");
4900 delete_step_resume_breakpoint (ecs->event_thread);
4901 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4902 }
4903 }
4904
4905 /* We come here if we hit a breakpoint but should not stop for it.
4906 Possibly we also were stepping and should stop for that. So fall
4907 through and test for stepping. But, if not stepping, do not
4908 stop. */
4909
4910 /* In all-stop mode, if we're currently stepping but have stopped in
4911 some other thread, we need to switch back to the stepped thread. */
4912 if (switch_back_to_stepped_thread (ecs))
4913 return;
4914
4915 if (ecs->event_thread->control.step_resume_breakpoint)
4916 {
4917 if (debug_infrun)
4918 fprintf_unfiltered (gdb_stdlog,
4919 "infrun: step-resume breakpoint is inserted\n");
4920
4921 /* Having a step-resume breakpoint overrides anything
4922 else having to do with stepping commands until
4923 that breakpoint is reached. */
4924 keep_going (ecs);
4925 return;
4926 }
4927
4928 if (ecs->event_thread->control.step_range_end == 0)
4929 {
4930 if (debug_infrun)
4931 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4932 /* Likewise if we aren't even stepping. */
4933 keep_going (ecs);
4934 return;
4935 }
4936
4937 /* Re-fetch current thread's frame in case the code above caused
4938 the frame cache to be re-initialized, making our FRAME variable
4939 a dangling pointer. */
4940 frame = get_current_frame ();
4941 gdbarch = get_frame_arch (frame);
4942 fill_in_stop_func (gdbarch, ecs);
4943
4944 /* If stepping through a line, keep going if still within it.
4945
4946 Note that step_range_end is the address of the first instruction
4947 beyond the step range, and NOT the address of the last instruction
4948 within it!
4949
4950 Note also that during reverse execution, we may be stepping
4951 through a function epilogue and therefore must detect when
4952 the current-frame changes in the middle of a line. */
4953
4954 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4955 && (execution_direction != EXEC_REVERSE
4956 || frame_id_eq (get_frame_id (frame),
4957 ecs->event_thread->control.step_frame_id)))
4958 {
4959 if (debug_infrun)
4960 fprintf_unfiltered
4961 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4962 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4963 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4964
4965 /* Tentatively re-enable range stepping; `resume' disables it if
4966 necessary (e.g., if we're stepping over a breakpoint or we
4967 have software watchpoints). */
4968 ecs->event_thread->control.may_range_step = 1;
4969
4970 /* When stepping backward, stop at beginning of line range
4971 (unless it's the function entry point, in which case
4972 keep going back to the call point). */
4973 if (stop_pc == ecs->event_thread->control.step_range_start
4974 && stop_pc != ecs->stop_func_start
4975 && execution_direction == EXEC_REVERSE)
4976 end_stepping_range (ecs);
4977 else
4978 keep_going (ecs);
4979
4980 return;
4981 }
4982
4983 /* We stepped out of the stepping range. */
4984
4985 /* If we are stepping at the source level and entered the runtime
4986 loader dynamic symbol resolution code...
4987
4988 EXEC_FORWARD: we keep on single stepping until we exit the run
4989 time loader code and reach the callee's address.
4990
4991 EXEC_REVERSE: we've already executed the callee (backward), and
4992 the runtime loader code is handled just like any other
4993 undebuggable function call. Now we need only keep stepping
4994 backward through the trampoline code, and that's handled further
4995 down, so there is nothing for us to do here. */
4996
4997 if (execution_direction != EXEC_REVERSE
4998 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4999 && in_solib_dynsym_resolve_code (stop_pc))
5000 {
5001 CORE_ADDR pc_after_resolver =
5002 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
5003
5004 if (debug_infrun)
5005 fprintf_unfiltered (gdb_stdlog,
5006 "infrun: stepped into dynsym resolve code\n");
5007
5008 if (pc_after_resolver)
5009 {
5010 /* Set up a step-resume breakpoint at the address
5011 indicated by SKIP_SOLIB_RESOLVER. */
5012 struct symtab_and_line sr_sal;
5013
5014 init_sal (&sr_sal);
5015 sr_sal.pc = pc_after_resolver;
5016 sr_sal.pspace = get_frame_program_space (frame);
5017
5018 insert_step_resume_breakpoint_at_sal (gdbarch,
5019 sr_sal, null_frame_id);
5020 }
5021
5022 keep_going (ecs);
5023 return;
5024 }
5025
5026 if (ecs->event_thread->control.step_range_end != 1
5027 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5028 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5029 && get_frame_type (frame) == SIGTRAMP_FRAME)
5030 {
5031 if (debug_infrun)
5032 fprintf_unfiltered (gdb_stdlog,
5033 "infrun: stepped into signal trampoline\n");
5034 /* The inferior, while doing a "step" or "next", has ended up in
5035 a signal trampoline (either by a signal being delivered or by
5036 the signal handler returning). Just single-step until the
5037 inferior leaves the trampoline (either by calling the handler
5038 or returning). */
5039 keep_going (ecs);
5040 return;
5041 }
5042
5043 /* If we're in the return path from a shared library trampoline,
5044 we want to proceed through the trampoline when stepping. */
5045 /* macro/2012-04-25: This needs to come before the subroutine
5046 call check below as on some targets return trampolines look
5047 like subroutine calls (MIPS16 return thunks). */
5048 if (gdbarch_in_solib_return_trampoline (gdbarch,
5049 stop_pc, ecs->stop_func_name)
5050 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5051 {
5052 /* Determine where this trampoline returns. */
5053 CORE_ADDR real_stop_pc;
5054
5055 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5056
5057 if (debug_infrun)
5058 fprintf_unfiltered (gdb_stdlog,
5059 "infrun: stepped into solib return tramp\n");
5060
5061 /* Only proceed through if we know where it's going. */
5062 if (real_stop_pc)
5063 {
5064 /* And put the step-breakpoint there and go until there. */
5065 struct symtab_and_line sr_sal;
5066
5067 init_sal (&sr_sal); /* initialize to zeroes */
5068 sr_sal.pc = real_stop_pc;
5069 sr_sal.section = find_pc_overlay (sr_sal.pc);
5070 sr_sal.pspace = get_frame_program_space (frame);
5071
5072 /* Do not specify what the fp should be when we stop since
5073 on some machines the prologue is where the new fp value
5074 is established. */
5075 insert_step_resume_breakpoint_at_sal (gdbarch,
5076 sr_sal, null_frame_id);
5077
5078 /* Restart without fiddling with the step ranges or
5079 other state. */
5080 keep_going (ecs);
5081 return;
5082 }
5083 }
5084
5085 /* Check for subroutine calls. The check for the current frame
5086 equalling the step ID is not necessary - the check of the
5087 previous frame's ID is sufficient - but it is a common case and
5088 cheaper than checking the previous frame's ID.
5089
5090 NOTE: frame_id_eq will never report two invalid frame IDs as
5091 being equal, so to get into this block, both the current and
5092 previous frame must have valid frame IDs. */
5093 /* The outer_frame_id check is a heuristic to detect stepping
5094 through startup code. If we step over an instruction which
5095 sets the stack pointer from an invalid value to a valid value,
5096 we may detect that as a subroutine call from the mythical
5097 "outermost" function. This could be fixed by marking
5098 outermost frames as !stack_p,code_p,special_p. Then the
5099 initial outermost frame, before sp was valid, would
5100 have code_addr == &_start. See the comment in frame_id_eq
5101 for more. */
5102 if (!frame_id_eq (get_stack_frame_id (frame),
5103 ecs->event_thread->control.step_stack_frame_id)
5104 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
5105 ecs->event_thread->control.step_stack_frame_id)
5106 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
5107 outer_frame_id)
5108 || step_start_function != find_pc_function (stop_pc))))
5109 {
5110 CORE_ADDR real_stop_pc;
5111
5112 if (debug_infrun)
5113 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
5114
5115 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
5116 {
5117 /* I presume that step_over_calls is only 0 when we're
5118 supposed to be stepping at the assembly language level
5119 ("stepi"). Just stop. */
5120 /* And this works the same backward as frontward. MVS */
5121 end_stepping_range (ecs);
5122 return;
5123 }
5124
5125 /* Reverse stepping through solib trampolines. */
5126
5127 if (execution_direction == EXEC_REVERSE
5128 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
5129 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5130 || (ecs->stop_func_start == 0
5131 && in_solib_dynsym_resolve_code (stop_pc))))
5132 {
5133 /* Any solib trampoline code can be handled in reverse
5134 by simply continuing to single-step. We have already
5135 executed the solib function (backwards), and a few
5136 steps will take us back through the trampoline to the
5137 caller. */
5138 keep_going (ecs);
5139 return;
5140 }
5141
5142 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5143 {
5144 /* We're doing a "next".
5145
5146 Normal (forward) execution: set a breakpoint at the
5147 callee's return address (the address at which the caller
5148 will resume).
5149
5150 Reverse (backward) execution. set the step-resume
5151 breakpoint at the start of the function that we just
5152 stepped into (backwards), and continue to there. When we
5153 get there, we'll need to single-step back to the caller. */
5154
5155 if (execution_direction == EXEC_REVERSE)
5156 {
5157 /* If we're already at the start of the function, we've either
5158 just stepped backward into a single instruction function,
5159 or stepped back out of a signal handler to the first instruction
5160 of the function. Just keep going, which will single-step back
5161 to the caller. */
5162 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
5163 {
5164 struct symtab_and_line sr_sal;
5165
5166 /* Normal function call return (static or dynamic). */
5167 init_sal (&sr_sal);
5168 sr_sal.pc = ecs->stop_func_start;
5169 sr_sal.pspace = get_frame_program_space (frame);
5170 insert_step_resume_breakpoint_at_sal (gdbarch,
5171 sr_sal, null_frame_id);
5172 }
5173 }
5174 else
5175 insert_step_resume_breakpoint_at_caller (frame);
5176
5177 keep_going (ecs);
5178 return;
5179 }
5180
5181 /* If we are in a function call trampoline (a stub between the
5182 calling routine and the real function), locate the real
5183 function. That's what tells us (a) whether we want to step
5184 into it at all, and (b) what prologue we want to run to the
5185 end of, if we do step into it. */
5186 real_stop_pc = skip_language_trampoline (frame, stop_pc);
5187 if (real_stop_pc == 0)
5188 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5189 if (real_stop_pc != 0)
5190 ecs->stop_func_start = real_stop_pc;
5191
5192 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
5193 {
5194 struct symtab_and_line sr_sal;
5195
5196 init_sal (&sr_sal);
5197 sr_sal.pc = ecs->stop_func_start;
5198 sr_sal.pspace = get_frame_program_space (frame);
5199
5200 insert_step_resume_breakpoint_at_sal (gdbarch,
5201 sr_sal, null_frame_id);
5202 keep_going (ecs);
5203 return;
5204 }
5205
5206 /* If we have line number information for the function we are
5207 thinking of stepping into and the function isn't on the skip
5208 list, step into it.
5209
5210 If there are several symtabs at that PC (e.g. with include
5211 files), just want to know whether *any* of them have line
5212 numbers. find_pc_line handles this. */
5213 {
5214 struct symtab_and_line tmp_sal;
5215
5216 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
5217 if (tmp_sal.line != 0
5218 && !function_name_is_marked_for_skip (ecs->stop_func_name,
5219 &tmp_sal))
5220 {
5221 if (execution_direction == EXEC_REVERSE)
5222 handle_step_into_function_backward (gdbarch, ecs);
5223 else
5224 handle_step_into_function (gdbarch, ecs);
5225 return;
5226 }
5227 }
5228
5229 /* If we have no line number and the step-stop-if-no-debug is
5230 set, we stop the step so that the user has a chance to switch
5231 in assembly mode. */
5232 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5233 && step_stop_if_no_debug)
5234 {
5235 end_stepping_range (ecs);
5236 return;
5237 }
5238
5239 if (execution_direction == EXEC_REVERSE)
5240 {
5241 /* If we're already at the start of the function, we've either just
5242 stepped backward into a single instruction function without line
5243 number info, or stepped back out of a signal handler to the first
5244 instruction of the function without line number info. Just keep
5245 going, which will single-step back to the caller. */
5246 if (ecs->stop_func_start != stop_pc)
5247 {
5248 /* Set a breakpoint at callee's start address.
5249 From there we can step once and be back in the caller. */
5250 struct symtab_and_line sr_sal;
5251
5252 init_sal (&sr_sal);
5253 sr_sal.pc = ecs->stop_func_start;
5254 sr_sal.pspace = get_frame_program_space (frame);
5255 insert_step_resume_breakpoint_at_sal (gdbarch,
5256 sr_sal, null_frame_id);
5257 }
5258 }
5259 else
5260 /* Set a breakpoint at callee's return address (the address
5261 at which the caller will resume). */
5262 insert_step_resume_breakpoint_at_caller (frame);
5263
5264 keep_going (ecs);
5265 return;
5266 }
5267
5268 /* Reverse stepping through solib trampolines. */
5269
5270 if (execution_direction == EXEC_REVERSE
5271 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5272 {
5273 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5274 || (ecs->stop_func_start == 0
5275 && in_solib_dynsym_resolve_code (stop_pc)))
5276 {
5277 /* Any solib trampoline code can be handled in reverse
5278 by simply continuing to single-step. We have already
5279 executed the solib function (backwards), and a few
5280 steps will take us back through the trampoline to the
5281 caller. */
5282 keep_going (ecs);
5283 return;
5284 }
5285 else if (in_solib_dynsym_resolve_code (stop_pc))
5286 {
5287 /* Stepped backward into the solib dynsym resolver.
5288 Set a breakpoint at its start and continue, then
5289 one more step will take us out. */
5290 struct symtab_and_line sr_sal;
5291
5292 init_sal (&sr_sal);
5293 sr_sal.pc = ecs->stop_func_start;
5294 sr_sal.pspace = get_frame_program_space (frame);
5295 insert_step_resume_breakpoint_at_sal (gdbarch,
5296 sr_sal, null_frame_id);
5297 keep_going (ecs);
5298 return;
5299 }
5300 }
5301
5302 stop_pc_sal = find_pc_line (stop_pc, 0);
5303
5304 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5305 the trampoline processing logic, however, there are some trampolines
5306 that have no names, so we should do trampoline handling first. */
5307 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5308 && ecs->stop_func_name == NULL
5309 && stop_pc_sal.line == 0)
5310 {
5311 if (debug_infrun)
5312 fprintf_unfiltered (gdb_stdlog,
5313 "infrun: stepped into undebuggable function\n");
5314
5315 /* The inferior just stepped into, or returned to, an
5316 undebuggable function (where there is no debugging information
5317 and no line number corresponding to the address where the
5318 inferior stopped). Since we want to skip this kind of code,
5319 we keep going until the inferior returns from this
5320 function - unless the user has asked us not to (via
5321 set step-mode) or we no longer know how to get back
5322 to the call site. */
5323 if (step_stop_if_no_debug
5324 || !frame_id_p (frame_unwind_caller_id (frame)))
5325 {
5326 /* If we have no line number and the step-stop-if-no-debug
5327 is set, we stop the step so that the user has a chance to
5328 switch in assembly mode. */
5329 end_stepping_range (ecs);
5330 return;
5331 }
5332 else
5333 {
5334 /* Set a breakpoint at callee's return address (the address
5335 at which the caller will resume). */
5336 insert_step_resume_breakpoint_at_caller (frame);
5337 keep_going (ecs);
5338 return;
5339 }
5340 }
5341
5342 if (ecs->event_thread->control.step_range_end == 1)
5343 {
5344 /* It is stepi or nexti. We always want to stop stepping after
5345 one instruction. */
5346 if (debug_infrun)
5347 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5348 end_stepping_range (ecs);
5349 return;
5350 }
5351
5352 if (stop_pc_sal.line == 0)
5353 {
5354 /* We have no line number information. That means to stop
5355 stepping (does this always happen right after one instruction,
5356 when we do "s" in a function with no line numbers,
5357 or can this happen as a result of a return or longjmp?). */
5358 if (debug_infrun)
5359 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5360 end_stepping_range (ecs);
5361 return;
5362 }
5363
5364 /* Look for "calls" to inlined functions, part one. If the inline
5365 frame machinery detected some skipped call sites, we have entered
5366 a new inline function. */
5367
5368 if (frame_id_eq (get_frame_id (get_current_frame ()),
5369 ecs->event_thread->control.step_frame_id)
5370 && inline_skipped_frames (ecs->ptid))
5371 {
5372 struct symtab_and_line call_sal;
5373
5374 if (debug_infrun)
5375 fprintf_unfiltered (gdb_stdlog,
5376 "infrun: stepped into inlined function\n");
5377
5378 find_frame_sal (get_current_frame (), &call_sal);
5379
5380 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5381 {
5382 /* For "step", we're going to stop. But if the call site
5383 for this inlined function is on the same source line as
5384 we were previously stepping, go down into the function
5385 first. Otherwise stop at the call site. */
5386
5387 if (call_sal.line == ecs->event_thread->current_line
5388 && call_sal.symtab == ecs->event_thread->current_symtab)
5389 step_into_inline_frame (ecs->ptid);
5390
5391 end_stepping_range (ecs);
5392 return;
5393 }
5394 else
5395 {
5396 /* For "next", we should stop at the call site if it is on a
5397 different source line. Otherwise continue through the
5398 inlined function. */
5399 if (call_sal.line == ecs->event_thread->current_line
5400 && call_sal.symtab == ecs->event_thread->current_symtab)
5401 keep_going (ecs);
5402 else
5403 end_stepping_range (ecs);
5404 return;
5405 }
5406 }
5407
5408 /* Look for "calls" to inlined functions, part two. If we are still
5409 in the same real function we were stepping through, but we have
5410 to go further up to find the exact frame ID, we are stepping
5411 through a more inlined call beyond its call site. */
5412
5413 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5414 && !frame_id_eq (get_frame_id (get_current_frame ()),
5415 ecs->event_thread->control.step_frame_id)
5416 && stepped_in_from (get_current_frame (),
5417 ecs->event_thread->control.step_frame_id))
5418 {
5419 if (debug_infrun)
5420 fprintf_unfiltered (gdb_stdlog,
5421 "infrun: stepping through inlined function\n");
5422
5423 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5424 keep_going (ecs);
5425 else
5426 end_stepping_range (ecs);
5427 return;
5428 }
5429
5430 if ((stop_pc == stop_pc_sal.pc)
5431 && (ecs->event_thread->current_line != stop_pc_sal.line
5432 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5433 {
5434 /* We are at the start of a different line. So stop. Note that
5435 we don't stop if we step into the middle of a different line.
5436 That is said to make things like for (;;) statements work
5437 better. */
5438 if (debug_infrun)
5439 fprintf_unfiltered (gdb_stdlog,
5440 "infrun: stepped to a different line\n");
5441 end_stepping_range (ecs);
5442 return;
5443 }
5444
5445 /* We aren't done stepping.
5446
5447 Optimize by setting the stepping range to the line.
5448 (We might not be in the original line, but if we entered a
5449 new line in mid-statement, we continue stepping. This makes
5450 things like for(;;) statements work better.) */
5451
5452 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5453 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5454 ecs->event_thread->control.may_range_step = 1;
5455 set_step_info (frame, stop_pc_sal);
5456
5457 if (debug_infrun)
5458 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5459 keep_going (ecs);
5460 }
5461
5462 /* In all-stop mode, if we're currently stepping but have stopped in
5463 some other thread, we may need to switch back to the stepped
5464 thread. Returns true we set the inferior running, false if we left
5465 it stopped (and the event needs further processing). */
5466
5467 static int
5468 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5469 {
5470 if (!non_stop)
5471 {
5472 struct thread_info *tp;
5473 struct thread_info *stepping_thread;
5474 struct thread_info *step_over;
5475
5476 /* If any thread is blocked on some internal breakpoint, and we
5477 simply need to step over that breakpoint to get it going
5478 again, do that first. */
5479
5480 /* However, if we see an event for the stepping thread, then we
5481 know all other threads have been moved past their breakpoints
5482 already. Let the caller check whether the step is finished,
5483 etc., before deciding to move it past a breakpoint. */
5484 if (ecs->event_thread->control.step_range_end != 0)
5485 return 0;
5486
5487 /* Check if the current thread is blocked on an incomplete
5488 step-over, interrupted by a random signal. */
5489 if (ecs->event_thread->control.trap_expected
5490 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5491 {
5492 if (debug_infrun)
5493 {
5494 fprintf_unfiltered (gdb_stdlog,
5495 "infrun: need to finish step-over of [%s]\n",
5496 target_pid_to_str (ecs->event_thread->ptid));
5497 }
5498 keep_going (ecs);
5499 return 1;
5500 }
5501
5502 /* Check if the current thread is blocked by a single-step
5503 breakpoint of another thread. */
5504 if (ecs->hit_singlestep_breakpoint)
5505 {
5506 if (debug_infrun)
5507 {
5508 fprintf_unfiltered (gdb_stdlog,
5509 "infrun: need to step [%s] over single-step "
5510 "breakpoint\n",
5511 target_pid_to_str (ecs->ptid));
5512 }
5513 keep_going (ecs);
5514 return 1;
5515 }
5516
5517 /* Otherwise, we no longer expect a trap in the current thread.
5518 Clear the trap_expected flag before switching back -- this is
5519 what keep_going does as well, if we call it. */
5520 ecs->event_thread->control.trap_expected = 0;
5521
5522 /* Likewise, clear the signal if it should not be passed. */
5523 if (!signal_program[ecs->event_thread->suspend.stop_signal])
5524 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5525
5526 /* If scheduler locking applies even if not stepping, there's no
5527 need to walk over threads. Above we've checked whether the
5528 current thread is stepping. If some other thread not the
5529 event thread is stepping, then it must be that scheduler
5530 locking is not in effect. */
5531 if (schedlock_applies (0))
5532 return 0;
5533
5534 /* Look for the stepping/nexting thread, and check if any other
5535 thread other than the stepping thread needs to start a
5536 step-over. Do all step-overs before actually proceeding with
5537 step/next/etc. */
5538 stepping_thread = NULL;
5539 step_over = NULL;
5540 ALL_NON_EXITED_THREADS (tp)
5541 {
5542 /* Ignore threads of processes we're not resuming. */
5543 if (!sched_multi
5544 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
5545 continue;
5546
5547 /* When stepping over a breakpoint, we lock all threads
5548 except the one that needs to move past the breakpoint.
5549 If a non-event thread has this set, the "incomplete
5550 step-over" check above should have caught it earlier. */
5551 gdb_assert (!tp->control.trap_expected);
5552
5553 /* Did we find the stepping thread? */
5554 if (tp->control.step_range_end)
5555 {
5556 /* Yep. There should only one though. */
5557 gdb_assert (stepping_thread == NULL);
5558
5559 /* The event thread is handled at the top, before we
5560 enter this loop. */
5561 gdb_assert (tp != ecs->event_thread);
5562
5563 /* If some thread other than the event thread is
5564 stepping, then scheduler locking can't be in effect,
5565 otherwise we wouldn't have resumed the current event
5566 thread in the first place. */
5567 gdb_assert (!schedlock_applies (currently_stepping (tp)));
5568
5569 stepping_thread = tp;
5570 }
5571 else if (thread_still_needs_step_over (tp))
5572 {
5573 step_over = tp;
5574
5575 /* At the top we've returned early if the event thread
5576 is stepping. If some other thread not the event
5577 thread is stepping, then scheduler locking can't be
5578 in effect, and we can resume this thread. No need to
5579 keep looking for the stepping thread then. */
5580 break;
5581 }
5582 }
5583
5584 if (step_over != NULL)
5585 {
5586 tp = step_over;
5587 if (debug_infrun)
5588 {
5589 fprintf_unfiltered (gdb_stdlog,
5590 "infrun: need to step-over [%s]\n",
5591 target_pid_to_str (tp->ptid));
5592 }
5593
5594 /* Only the stepping thread should have this set. */
5595 gdb_assert (tp->control.step_range_end == 0);
5596
5597 ecs->ptid = tp->ptid;
5598 ecs->event_thread = tp;
5599 switch_to_thread (ecs->ptid);
5600 keep_going (ecs);
5601 return 1;
5602 }
5603
5604 if (stepping_thread != NULL)
5605 {
5606 struct frame_info *frame;
5607 struct gdbarch *gdbarch;
5608
5609 tp = stepping_thread;
5610
5611 /* If the stepping thread exited, then don't try to switch
5612 back and resume it, which could fail in several different
5613 ways depending on the target. Instead, just keep going.
5614
5615 We can find a stepping dead thread in the thread list in
5616 two cases:
5617
5618 - The target supports thread exit events, and when the
5619 target tries to delete the thread from the thread list,
5620 inferior_ptid pointed at the exiting thread. In such
5621 case, calling delete_thread does not really remove the
5622 thread from the list; instead, the thread is left listed,
5623 with 'exited' state.
5624
5625 - The target's debug interface does not support thread
5626 exit events, and so we have no idea whatsoever if the
5627 previously stepping thread is still alive. For that
5628 reason, we need to synchronously query the target
5629 now. */
5630 if (is_exited (tp->ptid)
5631 || !target_thread_alive (tp->ptid))
5632 {
5633 if (debug_infrun)
5634 fprintf_unfiltered (gdb_stdlog,
5635 "infrun: not switching back to "
5636 "stepped thread, it has vanished\n");
5637
5638 delete_thread (tp->ptid);
5639 keep_going (ecs);
5640 return 1;
5641 }
5642
5643 if (debug_infrun)
5644 fprintf_unfiltered (gdb_stdlog,
5645 "infrun: switching back to stepped thread\n");
5646
5647 ecs->event_thread = tp;
5648 ecs->ptid = tp->ptid;
5649 context_switch (ecs->ptid);
5650
5651 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5652 frame = get_current_frame ();
5653 gdbarch = get_frame_arch (frame);
5654
5655 /* If the PC of the thread we were trying to single-step has
5656 changed, then that thread has trapped or been signaled,
5657 but the event has not been reported to GDB yet. Re-poll
5658 the target looking for this particular thread's event
5659 (i.e. temporarily enable schedlock) by:
5660
5661 - setting a break at the current PC
5662 - resuming that particular thread, only (by setting
5663 trap expected)
5664
5665 This prevents us continuously moving the single-step
5666 breakpoint forward, one instruction at a time,
5667 overstepping. */
5668
5669 if (stop_pc != tp->prev_pc)
5670 {
5671 if (debug_infrun)
5672 fprintf_unfiltered (gdb_stdlog,
5673 "infrun: expected thread advanced also\n");
5674
5675 /* Clear the info of the previous step-over, as it's no
5676 longer valid. It's what keep_going would do too, if
5677 we called it. Must do this before trying to insert
5678 the sss breakpoint, otherwise if we were previously
5679 trying to step over this exact address in another
5680 thread, the breakpoint ends up not installed. */
5681 clear_step_over_info ();
5682
5683 insert_single_step_breakpoint (get_frame_arch (frame),
5684 get_frame_address_space (frame),
5685 stop_pc);
5686 ecs->event_thread->control.trap_expected = 1;
5687
5688 resume (0, GDB_SIGNAL_0);
5689 prepare_to_wait (ecs);
5690 }
5691 else
5692 {
5693 if (debug_infrun)
5694 fprintf_unfiltered (gdb_stdlog,
5695 "infrun: expected thread still "
5696 "hasn't advanced\n");
5697 keep_going (ecs);
5698 }
5699
5700 return 1;
5701 }
5702 }
5703 return 0;
5704 }
5705
5706 /* Is thread TP in the middle of single-stepping? */
5707
5708 static int
5709 currently_stepping (struct thread_info *tp)
5710 {
5711 return ((tp->control.step_range_end
5712 && tp->control.step_resume_breakpoint == NULL)
5713 || tp->control.trap_expected
5714 || tp->stepped_breakpoint
5715 || bpstat_should_step ());
5716 }
5717
5718 /* Inferior has stepped into a subroutine call with source code that
5719 we should not step over. Do step to the first line of code in
5720 it. */
5721
5722 static void
5723 handle_step_into_function (struct gdbarch *gdbarch,
5724 struct execution_control_state *ecs)
5725 {
5726 struct compunit_symtab *cust;
5727 struct symtab_and_line stop_func_sal, sr_sal;
5728
5729 fill_in_stop_func (gdbarch, ecs);
5730
5731 cust = find_pc_compunit_symtab (stop_pc);
5732 if (cust != NULL && compunit_language (cust) != language_asm)
5733 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5734 ecs->stop_func_start);
5735
5736 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5737 /* Use the step_resume_break to step until the end of the prologue,
5738 even if that involves jumps (as it seems to on the vax under
5739 4.2). */
5740 /* If the prologue ends in the middle of a source line, continue to
5741 the end of that source line (if it is still within the function).
5742 Otherwise, just go to end of prologue. */
5743 if (stop_func_sal.end
5744 && stop_func_sal.pc != ecs->stop_func_start
5745 && stop_func_sal.end < ecs->stop_func_end)
5746 ecs->stop_func_start = stop_func_sal.end;
5747
5748 /* Architectures which require breakpoint adjustment might not be able
5749 to place a breakpoint at the computed address. If so, the test
5750 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5751 ecs->stop_func_start to an address at which a breakpoint may be
5752 legitimately placed.
5753
5754 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5755 made, GDB will enter an infinite loop when stepping through
5756 optimized code consisting of VLIW instructions which contain
5757 subinstructions corresponding to different source lines. On
5758 FR-V, it's not permitted to place a breakpoint on any but the
5759 first subinstruction of a VLIW instruction. When a breakpoint is
5760 set, GDB will adjust the breakpoint address to the beginning of
5761 the VLIW instruction. Thus, we need to make the corresponding
5762 adjustment here when computing the stop address. */
5763
5764 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5765 {
5766 ecs->stop_func_start
5767 = gdbarch_adjust_breakpoint_address (gdbarch,
5768 ecs->stop_func_start);
5769 }
5770
5771 if (ecs->stop_func_start == stop_pc)
5772 {
5773 /* We are already there: stop now. */
5774 end_stepping_range (ecs);
5775 return;
5776 }
5777 else
5778 {
5779 /* Put the step-breakpoint there and go until there. */
5780 init_sal (&sr_sal); /* initialize to zeroes */
5781 sr_sal.pc = ecs->stop_func_start;
5782 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5783 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5784
5785 /* Do not specify what the fp should be when we stop since on
5786 some machines the prologue is where the new fp value is
5787 established. */
5788 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5789
5790 /* And make sure stepping stops right away then. */
5791 ecs->event_thread->control.step_range_end
5792 = ecs->event_thread->control.step_range_start;
5793 }
5794 keep_going (ecs);
5795 }
5796
5797 /* Inferior has stepped backward into a subroutine call with source
5798 code that we should not step over. Do step to the beginning of the
5799 last line of code in it. */
5800
5801 static void
5802 handle_step_into_function_backward (struct gdbarch *gdbarch,
5803 struct execution_control_state *ecs)
5804 {
5805 struct compunit_symtab *cust;
5806 struct symtab_and_line stop_func_sal;
5807
5808 fill_in_stop_func (gdbarch, ecs);
5809
5810 cust = find_pc_compunit_symtab (stop_pc);
5811 if (cust != NULL && compunit_language (cust) != language_asm)
5812 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5813 ecs->stop_func_start);
5814
5815 stop_func_sal = find_pc_line (stop_pc, 0);
5816
5817 /* OK, we're just going to keep stepping here. */
5818 if (stop_func_sal.pc == stop_pc)
5819 {
5820 /* We're there already. Just stop stepping now. */
5821 end_stepping_range (ecs);
5822 }
5823 else
5824 {
5825 /* Else just reset the step range and keep going.
5826 No step-resume breakpoint, they don't work for
5827 epilogues, which can have multiple entry paths. */
5828 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5829 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5830 keep_going (ecs);
5831 }
5832 return;
5833 }
5834
5835 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5836 This is used to both functions and to skip over code. */
5837
5838 static void
5839 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5840 struct symtab_and_line sr_sal,
5841 struct frame_id sr_id,
5842 enum bptype sr_type)
5843 {
5844 /* There should never be more than one step-resume or longjmp-resume
5845 breakpoint per thread, so we should never be setting a new
5846 step_resume_breakpoint when one is already active. */
5847 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5848 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5849
5850 if (debug_infrun)
5851 fprintf_unfiltered (gdb_stdlog,
5852 "infrun: inserting step-resume breakpoint at %s\n",
5853 paddress (gdbarch, sr_sal.pc));
5854
5855 inferior_thread ()->control.step_resume_breakpoint
5856 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5857 }
5858
5859 void
5860 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5861 struct symtab_and_line sr_sal,
5862 struct frame_id sr_id)
5863 {
5864 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5865 sr_sal, sr_id,
5866 bp_step_resume);
5867 }
5868
5869 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5870 This is used to skip a potential signal handler.
5871
5872 This is called with the interrupted function's frame. The signal
5873 handler, when it returns, will resume the interrupted function at
5874 RETURN_FRAME.pc. */
5875
5876 static void
5877 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5878 {
5879 struct symtab_and_line sr_sal;
5880 struct gdbarch *gdbarch;
5881
5882 gdb_assert (return_frame != NULL);
5883 init_sal (&sr_sal); /* initialize to zeros */
5884
5885 gdbarch = get_frame_arch (return_frame);
5886 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5887 sr_sal.section = find_pc_overlay (sr_sal.pc);
5888 sr_sal.pspace = get_frame_program_space (return_frame);
5889
5890 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5891 get_stack_frame_id (return_frame),
5892 bp_hp_step_resume);
5893 }
5894
5895 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5896 is used to skip a function after stepping into it (for "next" or if
5897 the called function has no debugging information).
5898
5899 The current function has almost always been reached by single
5900 stepping a call or return instruction. NEXT_FRAME belongs to the
5901 current function, and the breakpoint will be set at the caller's
5902 resume address.
5903
5904 This is a separate function rather than reusing
5905 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5906 get_prev_frame, which may stop prematurely (see the implementation
5907 of frame_unwind_caller_id for an example). */
5908
5909 static void
5910 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5911 {
5912 struct symtab_and_line sr_sal;
5913 struct gdbarch *gdbarch;
5914
5915 /* We shouldn't have gotten here if we don't know where the call site
5916 is. */
5917 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5918
5919 init_sal (&sr_sal); /* initialize to zeros */
5920
5921 gdbarch = frame_unwind_caller_arch (next_frame);
5922 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5923 frame_unwind_caller_pc (next_frame));
5924 sr_sal.section = find_pc_overlay (sr_sal.pc);
5925 sr_sal.pspace = frame_unwind_program_space (next_frame);
5926
5927 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5928 frame_unwind_caller_id (next_frame));
5929 }
5930
5931 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5932 new breakpoint at the target of a jmp_buf. The handling of
5933 longjmp-resume uses the same mechanisms used for handling
5934 "step-resume" breakpoints. */
5935
5936 static void
5937 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5938 {
5939 /* There should never be more than one longjmp-resume breakpoint per
5940 thread, so we should never be setting a new
5941 longjmp_resume_breakpoint when one is already active. */
5942 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
5943
5944 if (debug_infrun)
5945 fprintf_unfiltered (gdb_stdlog,
5946 "infrun: inserting longjmp-resume breakpoint at %s\n",
5947 paddress (gdbarch, pc));
5948
5949 inferior_thread ()->control.exception_resume_breakpoint =
5950 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5951 }
5952
5953 /* Insert an exception resume breakpoint. TP is the thread throwing
5954 the exception. The block B is the block of the unwinder debug hook
5955 function. FRAME is the frame corresponding to the call to this
5956 function. SYM is the symbol of the function argument holding the
5957 target PC of the exception. */
5958
5959 static void
5960 insert_exception_resume_breakpoint (struct thread_info *tp,
5961 const struct block *b,
5962 struct frame_info *frame,
5963 struct symbol *sym)
5964 {
5965 volatile struct gdb_exception e;
5966
5967 /* We want to ignore errors here. */
5968 TRY_CATCH (e, RETURN_MASK_ERROR)
5969 {
5970 struct symbol *vsym;
5971 struct value *value;
5972 CORE_ADDR handler;
5973 struct breakpoint *bp;
5974
5975 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5976 value = read_var_value (vsym, frame);
5977 /* If the value was optimized out, revert to the old behavior. */
5978 if (! value_optimized_out (value))
5979 {
5980 handler = value_as_address (value);
5981
5982 if (debug_infrun)
5983 fprintf_unfiltered (gdb_stdlog,
5984 "infrun: exception resume at %lx\n",
5985 (unsigned long) handler);
5986
5987 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5988 handler, bp_exception_resume);
5989
5990 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5991 frame = NULL;
5992
5993 bp->thread = tp->num;
5994 inferior_thread ()->control.exception_resume_breakpoint = bp;
5995 }
5996 }
5997 }
5998
5999 /* A helper for check_exception_resume that sets an
6000 exception-breakpoint based on a SystemTap probe. */
6001
6002 static void
6003 insert_exception_resume_from_probe (struct thread_info *tp,
6004 const struct bound_probe *probe,
6005 struct frame_info *frame)
6006 {
6007 struct value *arg_value;
6008 CORE_ADDR handler;
6009 struct breakpoint *bp;
6010
6011 arg_value = probe_safe_evaluate_at_pc (frame, 1);
6012 if (!arg_value)
6013 return;
6014
6015 handler = value_as_address (arg_value);
6016
6017 if (debug_infrun)
6018 fprintf_unfiltered (gdb_stdlog,
6019 "infrun: exception resume at %s\n",
6020 paddress (get_objfile_arch (probe->objfile),
6021 handler));
6022
6023 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
6024 handler, bp_exception_resume);
6025 bp->thread = tp->num;
6026 inferior_thread ()->control.exception_resume_breakpoint = bp;
6027 }
6028
6029 /* This is called when an exception has been intercepted. Check to
6030 see whether the exception's destination is of interest, and if so,
6031 set an exception resume breakpoint there. */
6032
6033 static void
6034 check_exception_resume (struct execution_control_state *ecs,
6035 struct frame_info *frame)
6036 {
6037 volatile struct gdb_exception e;
6038 struct bound_probe probe;
6039 struct symbol *func;
6040
6041 /* First see if this exception unwinding breakpoint was set via a
6042 SystemTap probe point. If so, the probe has two arguments: the
6043 CFA and the HANDLER. We ignore the CFA, extract the handler, and
6044 set a breakpoint there. */
6045 probe = find_probe_by_pc (get_frame_pc (frame));
6046 if (probe.probe)
6047 {
6048 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
6049 return;
6050 }
6051
6052 func = get_frame_function (frame);
6053 if (!func)
6054 return;
6055
6056 TRY_CATCH (e, RETURN_MASK_ERROR)
6057 {
6058 const struct block *b;
6059 struct block_iterator iter;
6060 struct symbol *sym;
6061 int argno = 0;
6062
6063 /* The exception breakpoint is a thread-specific breakpoint on
6064 the unwinder's debug hook, declared as:
6065
6066 void _Unwind_DebugHook (void *cfa, void *handler);
6067
6068 The CFA argument indicates the frame to which control is
6069 about to be transferred. HANDLER is the destination PC.
6070
6071 We ignore the CFA and set a temporary breakpoint at HANDLER.
6072 This is not extremely efficient but it avoids issues in gdb
6073 with computing the DWARF CFA, and it also works even in weird
6074 cases such as throwing an exception from inside a signal
6075 handler. */
6076
6077 b = SYMBOL_BLOCK_VALUE (func);
6078 ALL_BLOCK_SYMBOLS (b, iter, sym)
6079 {
6080 if (!SYMBOL_IS_ARGUMENT (sym))
6081 continue;
6082
6083 if (argno == 0)
6084 ++argno;
6085 else
6086 {
6087 insert_exception_resume_breakpoint (ecs->event_thread,
6088 b, frame, sym);
6089 break;
6090 }
6091 }
6092 }
6093 }
6094
6095 static void
6096 stop_waiting (struct execution_control_state *ecs)
6097 {
6098 if (debug_infrun)
6099 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
6100
6101 clear_step_over_info ();
6102
6103 /* Let callers know we don't want to wait for the inferior anymore. */
6104 ecs->wait_some_more = 0;
6105 }
6106
6107 /* Called when we should continue running the inferior, because the
6108 current event doesn't cause a user visible stop. This does the
6109 resuming part; waiting for the next event is done elsewhere. */
6110
6111 static void
6112 keep_going (struct execution_control_state *ecs)
6113 {
6114 /* Make sure normal_stop is called if we get a QUIT handled before
6115 reaching resume. */
6116 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
6117
6118 /* Save the pc before execution, to compare with pc after stop. */
6119 ecs->event_thread->prev_pc
6120 = regcache_read_pc (get_thread_regcache (ecs->ptid));
6121
6122 if (ecs->event_thread->control.trap_expected
6123 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
6124 {
6125 /* We haven't yet gotten our trap, and either: intercepted a
6126 non-signal event (e.g., a fork); or took a signal which we
6127 are supposed to pass through to the inferior. Simply
6128 continue. */
6129 discard_cleanups (old_cleanups);
6130 resume (currently_stepping (ecs->event_thread),
6131 ecs->event_thread->suspend.stop_signal);
6132 }
6133 else
6134 {
6135 volatile struct gdb_exception e;
6136 struct regcache *regcache = get_current_regcache ();
6137 int remove_bp;
6138 int remove_wps;
6139
6140 /* Either the trap was not expected, but we are continuing
6141 anyway (if we got a signal, the user asked it be passed to
6142 the child)
6143 -- or --
6144 We got our expected trap, but decided we should resume from
6145 it.
6146
6147 We're going to run this baby now!
6148
6149 Note that insert_breakpoints won't try to re-insert
6150 already inserted breakpoints. Therefore, we don't
6151 care if breakpoints were already inserted, or not. */
6152
6153 /* If we need to step over a breakpoint, and we're not using
6154 displaced stepping to do so, insert all breakpoints
6155 (watchpoints, etc.) but the one we're stepping over, step one
6156 instruction, and then re-insert the breakpoint when that step
6157 is finished. */
6158
6159 remove_bp = (ecs->hit_singlestep_breakpoint
6160 || thread_still_needs_step_over (ecs->event_thread));
6161 remove_wps = (ecs->event_thread->stepping_over_watchpoint
6162 && !target_have_steppable_watchpoint);
6163
6164 if (remove_bp && !use_displaced_stepping (get_regcache_arch (regcache)))
6165 {
6166 set_step_over_info (get_regcache_aspace (regcache),
6167 regcache_read_pc (regcache), remove_wps);
6168 }
6169 else if (remove_wps)
6170 set_step_over_info (NULL, 0, remove_wps);
6171 else
6172 clear_step_over_info ();
6173
6174 /* Stop stepping if inserting breakpoints fails. */
6175 TRY_CATCH (e, RETURN_MASK_ERROR)
6176 {
6177 insert_breakpoints ();
6178 }
6179 if (e.reason < 0)
6180 {
6181 exception_print (gdb_stderr, e);
6182 stop_waiting (ecs);
6183 return;
6184 }
6185
6186 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
6187
6188 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
6189 explicitly specifies that such a signal should be delivered
6190 to the target program). Typically, that would occur when a
6191 user is debugging a target monitor on a simulator: the target
6192 monitor sets a breakpoint; the simulator encounters this
6193 breakpoint and halts the simulation handing control to GDB;
6194 GDB, noting that the stop address doesn't map to any known
6195 breakpoint, returns control back to the simulator; the
6196 simulator then delivers the hardware equivalent of a
6197 GDB_SIGNAL_TRAP to the program being debugged. */
6198 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6199 && !signal_program[ecs->event_thread->suspend.stop_signal])
6200 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6201
6202 discard_cleanups (old_cleanups);
6203 resume (currently_stepping (ecs->event_thread),
6204 ecs->event_thread->suspend.stop_signal);
6205 }
6206
6207 prepare_to_wait (ecs);
6208 }
6209
6210 /* This function normally comes after a resume, before
6211 handle_inferior_event exits. It takes care of any last bits of
6212 housekeeping, and sets the all-important wait_some_more flag. */
6213
6214 static void
6215 prepare_to_wait (struct execution_control_state *ecs)
6216 {
6217 if (debug_infrun)
6218 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
6219
6220 /* This is the old end of the while loop. Let everybody know we
6221 want to wait for the inferior some more and get called again
6222 soon. */
6223 ecs->wait_some_more = 1;
6224 }
6225
6226 /* We are done with the step range of a step/next/si/ni command.
6227 Called once for each n of a "step n" operation. */
6228
6229 static void
6230 end_stepping_range (struct execution_control_state *ecs)
6231 {
6232 ecs->event_thread->control.stop_step = 1;
6233 stop_waiting (ecs);
6234 }
6235
6236 /* Several print_*_reason functions to print why the inferior has stopped.
6237 We always print something when the inferior exits, or receives a signal.
6238 The rest of the cases are dealt with later on in normal_stop and
6239 print_it_typical. Ideally there should be a call to one of these
6240 print_*_reason functions functions from handle_inferior_event each time
6241 stop_waiting is called.
6242
6243 Note that we don't call these directly, instead we delegate that to
6244 the interpreters, through observers. Interpreters then call these
6245 with whatever uiout is right. */
6246
6247 void
6248 print_end_stepping_range_reason (struct ui_out *uiout)
6249 {
6250 /* For CLI-like interpreters, print nothing. */
6251
6252 if (ui_out_is_mi_like_p (uiout))
6253 {
6254 ui_out_field_string (uiout, "reason",
6255 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
6256 }
6257 }
6258
6259 void
6260 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6261 {
6262 annotate_signalled ();
6263 if (ui_out_is_mi_like_p (uiout))
6264 ui_out_field_string
6265 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
6266 ui_out_text (uiout, "\nProgram terminated with signal ");
6267 annotate_signal_name ();
6268 ui_out_field_string (uiout, "signal-name",
6269 gdb_signal_to_name (siggnal));
6270 annotate_signal_name_end ();
6271 ui_out_text (uiout, ", ");
6272 annotate_signal_string ();
6273 ui_out_field_string (uiout, "signal-meaning",
6274 gdb_signal_to_string (siggnal));
6275 annotate_signal_string_end ();
6276 ui_out_text (uiout, ".\n");
6277 ui_out_text (uiout, "The program no longer exists.\n");
6278 }
6279
6280 void
6281 print_exited_reason (struct ui_out *uiout, int exitstatus)
6282 {
6283 struct inferior *inf = current_inferior ();
6284 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
6285
6286 annotate_exited (exitstatus);
6287 if (exitstatus)
6288 {
6289 if (ui_out_is_mi_like_p (uiout))
6290 ui_out_field_string (uiout, "reason",
6291 async_reason_lookup (EXEC_ASYNC_EXITED));
6292 ui_out_text (uiout, "[Inferior ");
6293 ui_out_text (uiout, plongest (inf->num));
6294 ui_out_text (uiout, " (");
6295 ui_out_text (uiout, pidstr);
6296 ui_out_text (uiout, ") exited with code ");
6297 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
6298 ui_out_text (uiout, "]\n");
6299 }
6300 else
6301 {
6302 if (ui_out_is_mi_like_p (uiout))
6303 ui_out_field_string
6304 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6305 ui_out_text (uiout, "[Inferior ");
6306 ui_out_text (uiout, plongest (inf->num));
6307 ui_out_text (uiout, " (");
6308 ui_out_text (uiout, pidstr);
6309 ui_out_text (uiout, ") exited normally]\n");
6310 }
6311 }
6312
6313 void
6314 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6315 {
6316 annotate_signal ();
6317
6318 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
6319 {
6320 struct thread_info *t = inferior_thread ();
6321
6322 ui_out_text (uiout, "\n[");
6323 ui_out_field_string (uiout, "thread-name",
6324 target_pid_to_str (t->ptid));
6325 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
6326 ui_out_text (uiout, " stopped");
6327 }
6328 else
6329 {
6330 ui_out_text (uiout, "\nProgram received signal ");
6331 annotate_signal_name ();
6332 if (ui_out_is_mi_like_p (uiout))
6333 ui_out_field_string
6334 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
6335 ui_out_field_string (uiout, "signal-name",
6336 gdb_signal_to_name (siggnal));
6337 annotate_signal_name_end ();
6338 ui_out_text (uiout, ", ");
6339 annotate_signal_string ();
6340 ui_out_field_string (uiout, "signal-meaning",
6341 gdb_signal_to_string (siggnal));
6342 annotate_signal_string_end ();
6343 }
6344 ui_out_text (uiout, ".\n");
6345 }
6346
6347 void
6348 print_no_history_reason (struct ui_out *uiout)
6349 {
6350 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
6351 }
6352
6353 /* Print current location without a level number, if we have changed
6354 functions or hit a breakpoint. Print source line if we have one.
6355 bpstat_print contains the logic deciding in detail what to print,
6356 based on the event(s) that just occurred. */
6357
6358 void
6359 print_stop_event (struct target_waitstatus *ws)
6360 {
6361 int bpstat_ret;
6362 int source_flag;
6363 int do_frame_printing = 1;
6364 struct thread_info *tp = inferior_thread ();
6365
6366 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6367 switch (bpstat_ret)
6368 {
6369 case PRINT_UNKNOWN:
6370 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6371 should) carry around the function and does (or should) use
6372 that when doing a frame comparison. */
6373 if (tp->control.stop_step
6374 && frame_id_eq (tp->control.step_frame_id,
6375 get_frame_id (get_current_frame ()))
6376 && step_start_function == find_pc_function (stop_pc))
6377 {
6378 /* Finished step, just print source line. */
6379 source_flag = SRC_LINE;
6380 }
6381 else
6382 {
6383 /* Print location and source line. */
6384 source_flag = SRC_AND_LOC;
6385 }
6386 break;
6387 case PRINT_SRC_AND_LOC:
6388 /* Print location and source line. */
6389 source_flag = SRC_AND_LOC;
6390 break;
6391 case PRINT_SRC_ONLY:
6392 source_flag = SRC_LINE;
6393 break;
6394 case PRINT_NOTHING:
6395 /* Something bogus. */
6396 source_flag = SRC_LINE;
6397 do_frame_printing = 0;
6398 break;
6399 default:
6400 internal_error (__FILE__, __LINE__, _("Unknown value."));
6401 }
6402
6403 /* The behavior of this routine with respect to the source
6404 flag is:
6405 SRC_LINE: Print only source line
6406 LOCATION: Print only location
6407 SRC_AND_LOC: Print location and source line. */
6408 if (do_frame_printing)
6409 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6410
6411 /* Display the auto-display expressions. */
6412 do_displays ();
6413 }
6414
6415 /* Here to return control to GDB when the inferior stops for real.
6416 Print appropriate messages, remove breakpoints, give terminal our modes.
6417
6418 STOP_PRINT_FRAME nonzero means print the executing frame
6419 (pc, function, args, file, line number and line text).
6420 BREAKPOINTS_FAILED nonzero means stop was due to error
6421 attempting to insert breakpoints. */
6422
6423 void
6424 normal_stop (void)
6425 {
6426 struct target_waitstatus last;
6427 ptid_t last_ptid;
6428 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6429
6430 get_last_target_status (&last_ptid, &last);
6431
6432 /* If an exception is thrown from this point on, make sure to
6433 propagate GDB's knowledge of the executing state to the
6434 frontend/user running state. A QUIT is an easy exception to see
6435 here, so do this before any filtered output. */
6436 if (!non_stop)
6437 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6438 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6439 && last.kind != TARGET_WAITKIND_EXITED
6440 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6441 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6442
6443 /* As we're presenting a stop, and potentially removing breakpoints,
6444 update the thread list so we can tell whether there are threads
6445 running on the target. With target remote, for example, we can
6446 only learn about new threads when we explicitly update the thread
6447 list. Do this before notifying the interpreters about signal
6448 stops, end of stepping ranges, etc., so that the "new thread"
6449 output is emitted before e.g., "Program received signal FOO",
6450 instead of after. */
6451 update_thread_list ();
6452
6453 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
6454 observer_notify_signal_received (inferior_thread ()->suspend.stop_signal);
6455
6456 /* As with the notification of thread events, we want to delay
6457 notifying the user that we've switched thread context until
6458 the inferior actually stops.
6459
6460 There's no point in saying anything if the inferior has exited.
6461 Note that SIGNALLED here means "exited with a signal", not
6462 "received a signal".
6463
6464 Also skip saying anything in non-stop mode. In that mode, as we
6465 don't want GDB to switch threads behind the user's back, to avoid
6466 races where the user is typing a command to apply to thread x,
6467 but GDB switches to thread y before the user finishes entering
6468 the command, fetch_inferior_event installs a cleanup to restore
6469 the current thread back to the thread the user had selected right
6470 after this event is handled, so we're not really switching, only
6471 informing of a stop. */
6472 if (!non_stop
6473 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6474 && target_has_execution
6475 && last.kind != TARGET_WAITKIND_SIGNALLED
6476 && last.kind != TARGET_WAITKIND_EXITED
6477 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6478 {
6479 target_terminal_ours_for_output ();
6480 printf_filtered (_("[Switching to %s]\n"),
6481 target_pid_to_str (inferior_ptid));
6482 annotate_thread_changed ();
6483 previous_inferior_ptid = inferior_ptid;
6484 }
6485
6486 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6487 {
6488 gdb_assert (sync_execution || !target_can_async_p ());
6489
6490 target_terminal_ours_for_output ();
6491 printf_filtered (_("No unwaited-for children left.\n"));
6492 }
6493
6494 /* Note: this depends on the update_thread_list call above. */
6495 if (!breakpoints_should_be_inserted_now () && target_has_execution)
6496 {
6497 if (remove_breakpoints ())
6498 {
6499 target_terminal_ours_for_output ();
6500 printf_filtered (_("Cannot remove breakpoints because "
6501 "program is no longer writable.\nFurther "
6502 "execution is probably impossible.\n"));
6503 }
6504 }
6505
6506 /* If an auto-display called a function and that got a signal,
6507 delete that auto-display to avoid an infinite recursion. */
6508
6509 if (stopped_by_random_signal)
6510 disable_current_display ();
6511
6512 /* Notify observers if we finished a "step"-like command, etc. */
6513 if (target_has_execution
6514 && last.kind != TARGET_WAITKIND_SIGNALLED
6515 && last.kind != TARGET_WAITKIND_EXITED
6516 && inferior_thread ()->control.stop_step)
6517 {
6518 /* But not if in the middle of doing a "step n" operation for
6519 n > 1 */
6520 if (inferior_thread ()->step_multi)
6521 goto done;
6522
6523 observer_notify_end_stepping_range ();
6524 }
6525
6526 target_terminal_ours ();
6527 async_enable_stdin ();
6528
6529 /* Set the current source location. This will also happen if we
6530 display the frame below, but the current SAL will be incorrect
6531 during a user hook-stop function. */
6532 if (has_stack_frames () && !stop_stack_dummy)
6533 set_current_sal_from_frame (get_current_frame ());
6534
6535 /* Let the user/frontend see the threads as stopped, but do nothing
6536 if the thread was running an infcall. We may be e.g., evaluating
6537 a breakpoint condition. In that case, the thread had state
6538 THREAD_RUNNING before the infcall, and shall remain set to
6539 running, all without informing the user/frontend about state
6540 transition changes. If this is actually a call command, then the
6541 thread was originally already stopped, so there's no state to
6542 finish either. */
6543 if (target_has_execution && inferior_thread ()->control.in_infcall)
6544 discard_cleanups (old_chain);
6545 else
6546 do_cleanups (old_chain);
6547
6548 /* Look up the hook_stop and run it (CLI internally handles problem
6549 of stop_command's pre-hook not existing). */
6550 if (stop_command)
6551 catch_errors (hook_stop_stub, stop_command,
6552 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6553
6554 if (!has_stack_frames ())
6555 goto done;
6556
6557 if (last.kind == TARGET_WAITKIND_SIGNALLED
6558 || last.kind == TARGET_WAITKIND_EXITED)
6559 goto done;
6560
6561 /* Select innermost stack frame - i.e., current frame is frame 0,
6562 and current location is based on that.
6563 Don't do this on return from a stack dummy routine,
6564 or if the program has exited. */
6565
6566 if (!stop_stack_dummy)
6567 {
6568 select_frame (get_current_frame ());
6569
6570 /* If --batch-silent is enabled then there's no need to print the current
6571 source location, and to try risks causing an error message about
6572 missing source files. */
6573 if (stop_print_frame && !batch_silent)
6574 print_stop_event (&last);
6575 }
6576
6577 /* Save the function value return registers, if we care.
6578 We might be about to restore their previous contents. */
6579 if (inferior_thread ()->control.proceed_to_finish
6580 && execution_direction != EXEC_REVERSE)
6581 {
6582 /* This should not be necessary. */
6583 if (stop_registers)
6584 regcache_xfree (stop_registers);
6585
6586 /* NB: The copy goes through to the target picking up the value of
6587 all the registers. */
6588 stop_registers = regcache_dup (get_current_regcache ());
6589 }
6590
6591 if (stop_stack_dummy == STOP_STACK_DUMMY)
6592 {
6593 /* Pop the empty frame that contains the stack dummy.
6594 This also restores inferior state prior to the call
6595 (struct infcall_suspend_state). */
6596 struct frame_info *frame = get_current_frame ();
6597
6598 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6599 frame_pop (frame);
6600 /* frame_pop() calls reinit_frame_cache as the last thing it
6601 does which means there's currently no selected frame. We
6602 don't need to re-establish a selected frame if the dummy call
6603 returns normally, that will be done by
6604 restore_infcall_control_state. However, we do have to handle
6605 the case where the dummy call is returning after being
6606 stopped (e.g. the dummy call previously hit a breakpoint).
6607 We can't know which case we have so just always re-establish
6608 a selected frame here. */
6609 select_frame (get_current_frame ());
6610 }
6611
6612 done:
6613 annotate_stopped ();
6614
6615 /* Suppress the stop observer if we're in the middle of:
6616
6617 - a step n (n > 1), as there still more steps to be done.
6618
6619 - a "finish" command, as the observer will be called in
6620 finish_command_continuation, so it can include the inferior
6621 function's return value.
6622
6623 - calling an inferior function, as we pretend we inferior didn't
6624 run at all. The return value of the call is handled by the
6625 expression evaluator, through call_function_by_hand. */
6626
6627 if (!target_has_execution
6628 || last.kind == TARGET_WAITKIND_SIGNALLED
6629 || last.kind == TARGET_WAITKIND_EXITED
6630 || last.kind == TARGET_WAITKIND_NO_RESUMED
6631 || (!(inferior_thread ()->step_multi
6632 && inferior_thread ()->control.stop_step)
6633 && !(inferior_thread ()->control.stop_bpstat
6634 && inferior_thread ()->control.proceed_to_finish)
6635 && !inferior_thread ()->control.in_infcall))
6636 {
6637 if (!ptid_equal (inferior_ptid, null_ptid))
6638 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6639 stop_print_frame);
6640 else
6641 observer_notify_normal_stop (NULL, stop_print_frame);
6642 }
6643
6644 if (target_has_execution)
6645 {
6646 if (last.kind != TARGET_WAITKIND_SIGNALLED
6647 && last.kind != TARGET_WAITKIND_EXITED)
6648 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6649 Delete any breakpoint that is to be deleted at the next stop. */
6650 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6651 }
6652
6653 /* Try to get rid of automatically added inferiors that are no
6654 longer needed. Keeping those around slows down things linearly.
6655 Note that this never removes the current inferior. */
6656 prune_inferiors ();
6657 }
6658
6659 static int
6660 hook_stop_stub (void *cmd)
6661 {
6662 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6663 return (0);
6664 }
6665 \f
6666 int
6667 signal_stop_state (int signo)
6668 {
6669 return signal_stop[signo];
6670 }
6671
6672 int
6673 signal_print_state (int signo)
6674 {
6675 return signal_print[signo];
6676 }
6677
6678 int
6679 signal_pass_state (int signo)
6680 {
6681 return signal_program[signo];
6682 }
6683
6684 static void
6685 signal_cache_update (int signo)
6686 {
6687 if (signo == -1)
6688 {
6689 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6690 signal_cache_update (signo);
6691
6692 return;
6693 }
6694
6695 signal_pass[signo] = (signal_stop[signo] == 0
6696 && signal_print[signo] == 0
6697 && signal_program[signo] == 1
6698 && signal_catch[signo] == 0);
6699 }
6700
6701 int
6702 signal_stop_update (int signo, int state)
6703 {
6704 int ret = signal_stop[signo];
6705
6706 signal_stop[signo] = state;
6707 signal_cache_update (signo);
6708 return ret;
6709 }
6710
6711 int
6712 signal_print_update (int signo, int state)
6713 {
6714 int ret = signal_print[signo];
6715
6716 signal_print[signo] = state;
6717 signal_cache_update (signo);
6718 return ret;
6719 }
6720
6721 int
6722 signal_pass_update (int signo, int state)
6723 {
6724 int ret = signal_program[signo];
6725
6726 signal_program[signo] = state;
6727 signal_cache_update (signo);
6728 return ret;
6729 }
6730
6731 /* Update the global 'signal_catch' from INFO and notify the
6732 target. */
6733
6734 void
6735 signal_catch_update (const unsigned int *info)
6736 {
6737 int i;
6738
6739 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6740 signal_catch[i] = info[i] > 0;
6741 signal_cache_update (-1);
6742 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6743 }
6744
6745 static void
6746 sig_print_header (void)
6747 {
6748 printf_filtered (_("Signal Stop\tPrint\tPass "
6749 "to program\tDescription\n"));
6750 }
6751
6752 static void
6753 sig_print_info (enum gdb_signal oursig)
6754 {
6755 const char *name = gdb_signal_to_name (oursig);
6756 int name_padding = 13 - strlen (name);
6757
6758 if (name_padding <= 0)
6759 name_padding = 0;
6760
6761 printf_filtered ("%s", name);
6762 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6763 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6764 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6765 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6766 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6767 }
6768
6769 /* Specify how various signals in the inferior should be handled. */
6770
6771 static void
6772 handle_command (char *args, int from_tty)
6773 {
6774 char **argv;
6775 int digits, wordlen;
6776 int sigfirst, signum, siglast;
6777 enum gdb_signal oursig;
6778 int allsigs;
6779 int nsigs;
6780 unsigned char *sigs;
6781 struct cleanup *old_chain;
6782
6783 if (args == NULL)
6784 {
6785 error_no_arg (_("signal to handle"));
6786 }
6787
6788 /* Allocate and zero an array of flags for which signals to handle. */
6789
6790 nsigs = (int) GDB_SIGNAL_LAST;
6791 sigs = (unsigned char *) alloca (nsigs);
6792 memset (sigs, 0, nsigs);
6793
6794 /* Break the command line up into args. */
6795
6796 argv = gdb_buildargv (args);
6797 old_chain = make_cleanup_freeargv (argv);
6798
6799 /* Walk through the args, looking for signal oursigs, signal names, and
6800 actions. Signal numbers and signal names may be interspersed with
6801 actions, with the actions being performed for all signals cumulatively
6802 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6803
6804 while (*argv != NULL)
6805 {
6806 wordlen = strlen (*argv);
6807 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6808 {;
6809 }
6810 allsigs = 0;
6811 sigfirst = siglast = -1;
6812
6813 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6814 {
6815 /* Apply action to all signals except those used by the
6816 debugger. Silently skip those. */
6817 allsigs = 1;
6818 sigfirst = 0;
6819 siglast = nsigs - 1;
6820 }
6821 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6822 {
6823 SET_SIGS (nsigs, sigs, signal_stop);
6824 SET_SIGS (nsigs, sigs, signal_print);
6825 }
6826 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6827 {
6828 UNSET_SIGS (nsigs, sigs, signal_program);
6829 }
6830 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6831 {
6832 SET_SIGS (nsigs, sigs, signal_print);
6833 }
6834 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6835 {
6836 SET_SIGS (nsigs, sigs, signal_program);
6837 }
6838 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6839 {
6840 UNSET_SIGS (nsigs, sigs, signal_stop);
6841 }
6842 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6843 {
6844 SET_SIGS (nsigs, sigs, signal_program);
6845 }
6846 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6847 {
6848 UNSET_SIGS (nsigs, sigs, signal_print);
6849 UNSET_SIGS (nsigs, sigs, signal_stop);
6850 }
6851 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6852 {
6853 UNSET_SIGS (nsigs, sigs, signal_program);
6854 }
6855 else if (digits > 0)
6856 {
6857 /* It is numeric. The numeric signal refers to our own
6858 internal signal numbering from target.h, not to host/target
6859 signal number. This is a feature; users really should be
6860 using symbolic names anyway, and the common ones like
6861 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6862
6863 sigfirst = siglast = (int)
6864 gdb_signal_from_command (atoi (*argv));
6865 if ((*argv)[digits] == '-')
6866 {
6867 siglast = (int)
6868 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6869 }
6870 if (sigfirst > siglast)
6871 {
6872 /* Bet he didn't figure we'd think of this case... */
6873 signum = sigfirst;
6874 sigfirst = siglast;
6875 siglast = signum;
6876 }
6877 }
6878 else
6879 {
6880 oursig = gdb_signal_from_name (*argv);
6881 if (oursig != GDB_SIGNAL_UNKNOWN)
6882 {
6883 sigfirst = siglast = (int) oursig;
6884 }
6885 else
6886 {
6887 /* Not a number and not a recognized flag word => complain. */
6888 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6889 }
6890 }
6891
6892 /* If any signal numbers or symbol names were found, set flags for
6893 which signals to apply actions to. */
6894
6895 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6896 {
6897 switch ((enum gdb_signal) signum)
6898 {
6899 case GDB_SIGNAL_TRAP:
6900 case GDB_SIGNAL_INT:
6901 if (!allsigs && !sigs[signum])
6902 {
6903 if (query (_("%s is used by the debugger.\n\
6904 Are you sure you want to change it? "),
6905 gdb_signal_to_name ((enum gdb_signal) signum)))
6906 {
6907 sigs[signum] = 1;
6908 }
6909 else
6910 {
6911 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6912 gdb_flush (gdb_stdout);
6913 }
6914 }
6915 break;
6916 case GDB_SIGNAL_0:
6917 case GDB_SIGNAL_DEFAULT:
6918 case GDB_SIGNAL_UNKNOWN:
6919 /* Make sure that "all" doesn't print these. */
6920 break;
6921 default:
6922 sigs[signum] = 1;
6923 break;
6924 }
6925 }
6926
6927 argv++;
6928 }
6929
6930 for (signum = 0; signum < nsigs; signum++)
6931 if (sigs[signum])
6932 {
6933 signal_cache_update (-1);
6934 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6935 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
6936
6937 if (from_tty)
6938 {
6939 /* Show the results. */
6940 sig_print_header ();
6941 for (; signum < nsigs; signum++)
6942 if (sigs[signum])
6943 sig_print_info (signum);
6944 }
6945
6946 break;
6947 }
6948
6949 do_cleanups (old_chain);
6950 }
6951
6952 /* Complete the "handle" command. */
6953
6954 static VEC (char_ptr) *
6955 handle_completer (struct cmd_list_element *ignore,
6956 const char *text, const char *word)
6957 {
6958 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
6959 static const char * const keywords[] =
6960 {
6961 "all",
6962 "stop",
6963 "ignore",
6964 "print",
6965 "pass",
6966 "nostop",
6967 "noignore",
6968 "noprint",
6969 "nopass",
6970 NULL,
6971 };
6972
6973 vec_signals = signal_completer (ignore, text, word);
6974 vec_keywords = complete_on_enum (keywords, word, word);
6975
6976 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
6977 VEC_free (char_ptr, vec_signals);
6978 VEC_free (char_ptr, vec_keywords);
6979 return return_val;
6980 }
6981
6982 static void
6983 xdb_handle_command (char *args, int from_tty)
6984 {
6985 char **argv;
6986 struct cleanup *old_chain;
6987
6988 if (args == NULL)
6989 error_no_arg (_("xdb command"));
6990
6991 /* Break the command line up into args. */
6992
6993 argv = gdb_buildargv (args);
6994 old_chain = make_cleanup_freeargv (argv);
6995 if (argv[1] != (char *) NULL)
6996 {
6997 char *argBuf;
6998 int bufLen;
6999
7000 bufLen = strlen (argv[0]) + 20;
7001 argBuf = (char *) xmalloc (bufLen);
7002 if (argBuf)
7003 {
7004 int validFlag = 1;
7005 enum gdb_signal oursig;
7006
7007 oursig = gdb_signal_from_name (argv[0]);
7008 memset (argBuf, 0, bufLen);
7009 if (strcmp (argv[1], "Q") == 0)
7010 sprintf (argBuf, "%s %s", argv[0], "noprint");
7011 else
7012 {
7013 if (strcmp (argv[1], "s") == 0)
7014 {
7015 if (!signal_stop[oursig])
7016 sprintf (argBuf, "%s %s", argv[0], "stop");
7017 else
7018 sprintf (argBuf, "%s %s", argv[0], "nostop");
7019 }
7020 else if (strcmp (argv[1], "i") == 0)
7021 {
7022 if (!signal_program[oursig])
7023 sprintf (argBuf, "%s %s", argv[0], "pass");
7024 else
7025 sprintf (argBuf, "%s %s", argv[0], "nopass");
7026 }
7027 else if (strcmp (argv[1], "r") == 0)
7028 {
7029 if (!signal_print[oursig])
7030 sprintf (argBuf, "%s %s", argv[0], "print");
7031 else
7032 sprintf (argBuf, "%s %s", argv[0], "noprint");
7033 }
7034 else
7035 validFlag = 0;
7036 }
7037 if (validFlag)
7038 handle_command (argBuf, from_tty);
7039 else
7040 printf_filtered (_("Invalid signal handling flag.\n"));
7041 if (argBuf)
7042 xfree (argBuf);
7043 }
7044 }
7045 do_cleanups (old_chain);
7046 }
7047
7048 enum gdb_signal
7049 gdb_signal_from_command (int num)
7050 {
7051 if (num >= 1 && num <= 15)
7052 return (enum gdb_signal) num;
7053 error (_("Only signals 1-15 are valid as numeric signals.\n\
7054 Use \"info signals\" for a list of symbolic signals."));
7055 }
7056
7057 /* Print current contents of the tables set by the handle command.
7058 It is possible we should just be printing signals actually used
7059 by the current target (but for things to work right when switching
7060 targets, all signals should be in the signal tables). */
7061
7062 static void
7063 signals_info (char *signum_exp, int from_tty)
7064 {
7065 enum gdb_signal oursig;
7066
7067 sig_print_header ();
7068
7069 if (signum_exp)
7070 {
7071 /* First see if this is a symbol name. */
7072 oursig = gdb_signal_from_name (signum_exp);
7073 if (oursig == GDB_SIGNAL_UNKNOWN)
7074 {
7075 /* No, try numeric. */
7076 oursig =
7077 gdb_signal_from_command (parse_and_eval_long (signum_exp));
7078 }
7079 sig_print_info (oursig);
7080 return;
7081 }
7082
7083 printf_filtered ("\n");
7084 /* These ugly casts brought to you by the native VAX compiler. */
7085 for (oursig = GDB_SIGNAL_FIRST;
7086 (int) oursig < (int) GDB_SIGNAL_LAST;
7087 oursig = (enum gdb_signal) ((int) oursig + 1))
7088 {
7089 QUIT;
7090
7091 if (oursig != GDB_SIGNAL_UNKNOWN
7092 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
7093 sig_print_info (oursig);
7094 }
7095
7096 printf_filtered (_("\nUse the \"handle\" command "
7097 "to change these tables.\n"));
7098 }
7099
7100 /* Check if it makes sense to read $_siginfo from the current thread
7101 at this point. If not, throw an error. */
7102
7103 static void
7104 validate_siginfo_access (void)
7105 {
7106 /* No current inferior, no siginfo. */
7107 if (ptid_equal (inferior_ptid, null_ptid))
7108 error (_("No thread selected."));
7109
7110 /* Don't try to read from a dead thread. */
7111 if (is_exited (inferior_ptid))
7112 error (_("The current thread has terminated"));
7113
7114 /* ... or from a spinning thread. */
7115 if (is_running (inferior_ptid))
7116 error (_("Selected thread is running."));
7117 }
7118
7119 /* The $_siginfo convenience variable is a bit special. We don't know
7120 for sure the type of the value until we actually have a chance to
7121 fetch the data. The type can change depending on gdbarch, so it is
7122 also dependent on which thread you have selected.
7123
7124 1. making $_siginfo be an internalvar that creates a new value on
7125 access.
7126
7127 2. making the value of $_siginfo be an lval_computed value. */
7128
7129 /* This function implements the lval_computed support for reading a
7130 $_siginfo value. */
7131
7132 static void
7133 siginfo_value_read (struct value *v)
7134 {
7135 LONGEST transferred;
7136
7137 validate_siginfo_access ();
7138
7139 transferred =
7140 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
7141 NULL,
7142 value_contents_all_raw (v),
7143 value_offset (v),
7144 TYPE_LENGTH (value_type (v)));
7145
7146 if (transferred != TYPE_LENGTH (value_type (v)))
7147 error (_("Unable to read siginfo"));
7148 }
7149
7150 /* This function implements the lval_computed support for writing a
7151 $_siginfo value. */
7152
7153 static void
7154 siginfo_value_write (struct value *v, struct value *fromval)
7155 {
7156 LONGEST transferred;
7157
7158 validate_siginfo_access ();
7159
7160 transferred = target_write (&current_target,
7161 TARGET_OBJECT_SIGNAL_INFO,
7162 NULL,
7163 value_contents_all_raw (fromval),
7164 value_offset (v),
7165 TYPE_LENGTH (value_type (fromval)));
7166
7167 if (transferred != TYPE_LENGTH (value_type (fromval)))
7168 error (_("Unable to write siginfo"));
7169 }
7170
7171 static const struct lval_funcs siginfo_value_funcs =
7172 {
7173 siginfo_value_read,
7174 siginfo_value_write
7175 };
7176
7177 /* Return a new value with the correct type for the siginfo object of
7178 the current thread using architecture GDBARCH. Return a void value
7179 if there's no object available. */
7180
7181 static struct value *
7182 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
7183 void *ignore)
7184 {
7185 if (target_has_stack
7186 && !ptid_equal (inferior_ptid, null_ptid)
7187 && gdbarch_get_siginfo_type_p (gdbarch))
7188 {
7189 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7190
7191 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
7192 }
7193
7194 return allocate_value (builtin_type (gdbarch)->builtin_void);
7195 }
7196
7197 \f
7198 /* infcall_suspend_state contains state about the program itself like its
7199 registers and any signal it received when it last stopped.
7200 This state must be restored regardless of how the inferior function call
7201 ends (either successfully, or after it hits a breakpoint or signal)
7202 if the program is to properly continue where it left off. */
7203
7204 struct infcall_suspend_state
7205 {
7206 struct thread_suspend_state thread_suspend;
7207 #if 0 /* Currently unused and empty structures are not valid C. */
7208 struct inferior_suspend_state inferior_suspend;
7209 #endif
7210
7211 /* Other fields: */
7212 CORE_ADDR stop_pc;
7213 struct regcache *registers;
7214
7215 /* Format of SIGINFO_DATA or NULL if it is not present. */
7216 struct gdbarch *siginfo_gdbarch;
7217
7218 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
7219 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
7220 content would be invalid. */
7221 gdb_byte *siginfo_data;
7222 };
7223
7224 struct infcall_suspend_state *
7225 save_infcall_suspend_state (void)
7226 {
7227 struct infcall_suspend_state *inf_state;
7228 struct thread_info *tp = inferior_thread ();
7229 #if 0
7230 struct inferior *inf = current_inferior ();
7231 #endif
7232 struct regcache *regcache = get_current_regcache ();
7233 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7234 gdb_byte *siginfo_data = NULL;
7235
7236 if (gdbarch_get_siginfo_type_p (gdbarch))
7237 {
7238 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7239 size_t len = TYPE_LENGTH (type);
7240 struct cleanup *back_to;
7241
7242 siginfo_data = xmalloc (len);
7243 back_to = make_cleanup (xfree, siginfo_data);
7244
7245 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7246 siginfo_data, 0, len) == len)
7247 discard_cleanups (back_to);
7248 else
7249 {
7250 /* Errors ignored. */
7251 do_cleanups (back_to);
7252 siginfo_data = NULL;
7253 }
7254 }
7255
7256 inf_state = XCNEW (struct infcall_suspend_state);
7257
7258 if (siginfo_data)
7259 {
7260 inf_state->siginfo_gdbarch = gdbarch;
7261 inf_state->siginfo_data = siginfo_data;
7262 }
7263
7264 inf_state->thread_suspend = tp->suspend;
7265 #if 0 /* Currently unused and empty structures are not valid C. */
7266 inf_state->inferior_suspend = inf->suspend;
7267 #endif
7268
7269 /* run_inferior_call will not use the signal due to its `proceed' call with
7270 GDB_SIGNAL_0 anyway. */
7271 tp->suspend.stop_signal = GDB_SIGNAL_0;
7272
7273 inf_state->stop_pc = stop_pc;
7274
7275 inf_state->registers = regcache_dup (regcache);
7276
7277 return inf_state;
7278 }
7279
7280 /* Restore inferior session state to INF_STATE. */
7281
7282 void
7283 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7284 {
7285 struct thread_info *tp = inferior_thread ();
7286 #if 0
7287 struct inferior *inf = current_inferior ();
7288 #endif
7289 struct regcache *regcache = get_current_regcache ();
7290 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7291
7292 tp->suspend = inf_state->thread_suspend;
7293 #if 0 /* Currently unused and empty structures are not valid C. */
7294 inf->suspend = inf_state->inferior_suspend;
7295 #endif
7296
7297 stop_pc = inf_state->stop_pc;
7298
7299 if (inf_state->siginfo_gdbarch == gdbarch)
7300 {
7301 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7302
7303 /* Errors ignored. */
7304 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7305 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
7306 }
7307
7308 /* The inferior can be gone if the user types "print exit(0)"
7309 (and perhaps other times). */
7310 if (target_has_execution)
7311 /* NB: The register write goes through to the target. */
7312 regcache_cpy (regcache, inf_state->registers);
7313
7314 discard_infcall_suspend_state (inf_state);
7315 }
7316
7317 static void
7318 do_restore_infcall_suspend_state_cleanup (void *state)
7319 {
7320 restore_infcall_suspend_state (state);
7321 }
7322
7323 struct cleanup *
7324 make_cleanup_restore_infcall_suspend_state
7325 (struct infcall_suspend_state *inf_state)
7326 {
7327 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
7328 }
7329
7330 void
7331 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7332 {
7333 regcache_xfree (inf_state->registers);
7334 xfree (inf_state->siginfo_data);
7335 xfree (inf_state);
7336 }
7337
7338 struct regcache *
7339 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
7340 {
7341 return inf_state->registers;
7342 }
7343
7344 /* infcall_control_state contains state regarding gdb's control of the
7345 inferior itself like stepping control. It also contains session state like
7346 the user's currently selected frame. */
7347
7348 struct infcall_control_state
7349 {
7350 struct thread_control_state thread_control;
7351 struct inferior_control_state inferior_control;
7352
7353 /* Other fields: */
7354 enum stop_stack_kind stop_stack_dummy;
7355 int stopped_by_random_signal;
7356 int stop_after_trap;
7357
7358 /* ID if the selected frame when the inferior function call was made. */
7359 struct frame_id selected_frame_id;
7360 };
7361
7362 /* Save all of the information associated with the inferior<==>gdb
7363 connection. */
7364
7365 struct infcall_control_state *
7366 save_infcall_control_state (void)
7367 {
7368 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
7369 struct thread_info *tp = inferior_thread ();
7370 struct inferior *inf = current_inferior ();
7371
7372 inf_status->thread_control = tp->control;
7373 inf_status->inferior_control = inf->control;
7374
7375 tp->control.step_resume_breakpoint = NULL;
7376 tp->control.exception_resume_breakpoint = NULL;
7377
7378 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
7379 chain. If caller's caller is walking the chain, they'll be happier if we
7380 hand them back the original chain when restore_infcall_control_state is
7381 called. */
7382 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
7383
7384 /* Other fields: */
7385 inf_status->stop_stack_dummy = stop_stack_dummy;
7386 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7387 inf_status->stop_after_trap = stop_after_trap;
7388
7389 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7390
7391 return inf_status;
7392 }
7393
7394 static int
7395 restore_selected_frame (void *args)
7396 {
7397 struct frame_id *fid = (struct frame_id *) args;
7398 struct frame_info *frame;
7399
7400 frame = frame_find_by_id (*fid);
7401
7402 /* If inf_status->selected_frame_id is NULL, there was no previously
7403 selected frame. */
7404 if (frame == NULL)
7405 {
7406 warning (_("Unable to restore previously selected frame."));
7407 return 0;
7408 }
7409
7410 select_frame (frame);
7411
7412 return (1);
7413 }
7414
7415 /* Restore inferior session state to INF_STATUS. */
7416
7417 void
7418 restore_infcall_control_state (struct infcall_control_state *inf_status)
7419 {
7420 struct thread_info *tp = inferior_thread ();
7421 struct inferior *inf = current_inferior ();
7422
7423 if (tp->control.step_resume_breakpoint)
7424 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7425
7426 if (tp->control.exception_resume_breakpoint)
7427 tp->control.exception_resume_breakpoint->disposition
7428 = disp_del_at_next_stop;
7429
7430 /* Handle the bpstat_copy of the chain. */
7431 bpstat_clear (&tp->control.stop_bpstat);
7432
7433 tp->control = inf_status->thread_control;
7434 inf->control = inf_status->inferior_control;
7435
7436 /* Other fields: */
7437 stop_stack_dummy = inf_status->stop_stack_dummy;
7438 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7439 stop_after_trap = inf_status->stop_after_trap;
7440
7441 if (target_has_stack)
7442 {
7443 /* The point of catch_errors is that if the stack is clobbered,
7444 walking the stack might encounter a garbage pointer and
7445 error() trying to dereference it. */
7446 if (catch_errors
7447 (restore_selected_frame, &inf_status->selected_frame_id,
7448 "Unable to restore previously selected frame:\n",
7449 RETURN_MASK_ERROR) == 0)
7450 /* Error in restoring the selected frame. Select the innermost
7451 frame. */
7452 select_frame (get_current_frame ());
7453 }
7454
7455 xfree (inf_status);
7456 }
7457
7458 static void
7459 do_restore_infcall_control_state_cleanup (void *sts)
7460 {
7461 restore_infcall_control_state (sts);
7462 }
7463
7464 struct cleanup *
7465 make_cleanup_restore_infcall_control_state
7466 (struct infcall_control_state *inf_status)
7467 {
7468 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7469 }
7470
7471 void
7472 discard_infcall_control_state (struct infcall_control_state *inf_status)
7473 {
7474 if (inf_status->thread_control.step_resume_breakpoint)
7475 inf_status->thread_control.step_resume_breakpoint->disposition
7476 = disp_del_at_next_stop;
7477
7478 if (inf_status->thread_control.exception_resume_breakpoint)
7479 inf_status->thread_control.exception_resume_breakpoint->disposition
7480 = disp_del_at_next_stop;
7481
7482 /* See save_infcall_control_state for info on stop_bpstat. */
7483 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7484
7485 xfree (inf_status);
7486 }
7487 \f
7488 /* restore_inferior_ptid() will be used by the cleanup machinery
7489 to restore the inferior_ptid value saved in a call to
7490 save_inferior_ptid(). */
7491
7492 static void
7493 restore_inferior_ptid (void *arg)
7494 {
7495 ptid_t *saved_ptid_ptr = arg;
7496
7497 inferior_ptid = *saved_ptid_ptr;
7498 xfree (arg);
7499 }
7500
7501 /* Save the value of inferior_ptid so that it may be restored by a
7502 later call to do_cleanups(). Returns the struct cleanup pointer
7503 needed for later doing the cleanup. */
7504
7505 struct cleanup *
7506 save_inferior_ptid (void)
7507 {
7508 ptid_t *saved_ptid_ptr;
7509
7510 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7511 *saved_ptid_ptr = inferior_ptid;
7512 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7513 }
7514
7515 /* See infrun.h. */
7516
7517 void
7518 clear_exit_convenience_vars (void)
7519 {
7520 clear_internalvar (lookup_internalvar ("_exitsignal"));
7521 clear_internalvar (lookup_internalvar ("_exitcode"));
7522 }
7523 \f
7524
7525 /* User interface for reverse debugging:
7526 Set exec-direction / show exec-direction commands
7527 (returns error unless target implements to_set_exec_direction method). */
7528
7529 int execution_direction = EXEC_FORWARD;
7530 static const char exec_forward[] = "forward";
7531 static const char exec_reverse[] = "reverse";
7532 static const char *exec_direction = exec_forward;
7533 static const char *const exec_direction_names[] = {
7534 exec_forward,
7535 exec_reverse,
7536 NULL
7537 };
7538
7539 static void
7540 set_exec_direction_func (char *args, int from_tty,
7541 struct cmd_list_element *cmd)
7542 {
7543 if (target_can_execute_reverse)
7544 {
7545 if (!strcmp (exec_direction, exec_forward))
7546 execution_direction = EXEC_FORWARD;
7547 else if (!strcmp (exec_direction, exec_reverse))
7548 execution_direction = EXEC_REVERSE;
7549 }
7550 else
7551 {
7552 exec_direction = exec_forward;
7553 error (_("Target does not support this operation."));
7554 }
7555 }
7556
7557 static void
7558 show_exec_direction_func (struct ui_file *out, int from_tty,
7559 struct cmd_list_element *cmd, const char *value)
7560 {
7561 switch (execution_direction) {
7562 case EXEC_FORWARD:
7563 fprintf_filtered (out, _("Forward.\n"));
7564 break;
7565 case EXEC_REVERSE:
7566 fprintf_filtered (out, _("Reverse.\n"));
7567 break;
7568 default:
7569 internal_error (__FILE__, __LINE__,
7570 _("bogus execution_direction value: %d"),
7571 (int) execution_direction);
7572 }
7573 }
7574
7575 static void
7576 show_schedule_multiple (struct ui_file *file, int from_tty,
7577 struct cmd_list_element *c, const char *value)
7578 {
7579 fprintf_filtered (file, _("Resuming the execution of threads "
7580 "of all processes is %s.\n"), value);
7581 }
7582
7583 /* Implementation of `siginfo' variable. */
7584
7585 static const struct internalvar_funcs siginfo_funcs =
7586 {
7587 siginfo_make_value,
7588 NULL,
7589 NULL
7590 };
7591
7592 void
7593 _initialize_infrun (void)
7594 {
7595 int i;
7596 int numsigs;
7597 struct cmd_list_element *c;
7598
7599 add_info ("signals", signals_info, _("\
7600 What debugger does when program gets various signals.\n\
7601 Specify a signal as argument to print info on that signal only."));
7602 add_info_alias ("handle", "signals", 0);
7603
7604 c = add_com ("handle", class_run, handle_command, _("\
7605 Specify how to handle signals.\n\
7606 Usage: handle SIGNAL [ACTIONS]\n\
7607 Args are signals and actions to apply to those signals.\n\
7608 If no actions are specified, the current settings for the specified signals\n\
7609 will be displayed instead.\n\
7610 \n\
7611 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7612 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7613 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7614 The special arg \"all\" is recognized to mean all signals except those\n\
7615 used by the debugger, typically SIGTRAP and SIGINT.\n\
7616 \n\
7617 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7618 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7619 Stop means reenter debugger if this signal happens (implies print).\n\
7620 Print means print a message if this signal happens.\n\
7621 Pass means let program see this signal; otherwise program doesn't know.\n\
7622 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7623 Pass and Stop may be combined.\n\
7624 \n\
7625 Multiple signals may be specified. Signal numbers and signal names\n\
7626 may be interspersed with actions, with the actions being performed for\n\
7627 all signals cumulatively specified."));
7628 set_cmd_completer (c, handle_completer);
7629
7630 if (xdb_commands)
7631 {
7632 add_com ("lz", class_info, signals_info, _("\
7633 What debugger does when program gets various signals.\n\
7634 Specify a signal as argument to print info on that signal only."));
7635 add_com ("z", class_run, xdb_handle_command, _("\
7636 Specify how to handle a signal.\n\
7637 Args are signals and actions to apply to those signals.\n\
7638 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7639 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7640 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7641 The special arg \"all\" is recognized to mean all signals except those\n\
7642 used by the debugger, typically SIGTRAP and SIGINT.\n\
7643 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7644 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7645 nopass), \"Q\" (noprint)\n\
7646 Stop means reenter debugger if this signal happens (implies print).\n\
7647 Print means print a message if this signal happens.\n\
7648 Pass means let program see this signal; otherwise program doesn't know.\n\
7649 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7650 Pass and Stop may be combined."));
7651 }
7652
7653 if (!dbx_commands)
7654 stop_command = add_cmd ("stop", class_obscure,
7655 not_just_help_class_command, _("\
7656 There is no `stop' command, but you can set a hook on `stop'.\n\
7657 This allows you to set a list of commands to be run each time execution\n\
7658 of the program stops."), &cmdlist);
7659
7660 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7661 Set inferior debugging."), _("\
7662 Show inferior debugging."), _("\
7663 When non-zero, inferior specific debugging is enabled."),
7664 NULL,
7665 show_debug_infrun,
7666 &setdebuglist, &showdebuglist);
7667
7668 add_setshow_boolean_cmd ("displaced", class_maintenance,
7669 &debug_displaced, _("\
7670 Set displaced stepping debugging."), _("\
7671 Show displaced stepping debugging."), _("\
7672 When non-zero, displaced stepping specific debugging is enabled."),
7673 NULL,
7674 show_debug_displaced,
7675 &setdebuglist, &showdebuglist);
7676
7677 add_setshow_boolean_cmd ("non-stop", no_class,
7678 &non_stop_1, _("\
7679 Set whether gdb controls the inferior in non-stop mode."), _("\
7680 Show whether gdb controls the inferior in non-stop mode."), _("\
7681 When debugging a multi-threaded program and this setting is\n\
7682 off (the default, also called all-stop mode), when one thread stops\n\
7683 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7684 all other threads in the program while you interact with the thread of\n\
7685 interest. When you continue or step a thread, you can allow the other\n\
7686 threads to run, or have them remain stopped, but while you inspect any\n\
7687 thread's state, all threads stop.\n\
7688 \n\
7689 In non-stop mode, when one thread stops, other threads can continue\n\
7690 to run freely. You'll be able to step each thread independently,\n\
7691 leave it stopped or free to run as needed."),
7692 set_non_stop,
7693 show_non_stop,
7694 &setlist,
7695 &showlist);
7696
7697 numsigs = (int) GDB_SIGNAL_LAST;
7698 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7699 signal_print = (unsigned char *)
7700 xmalloc (sizeof (signal_print[0]) * numsigs);
7701 signal_program = (unsigned char *)
7702 xmalloc (sizeof (signal_program[0]) * numsigs);
7703 signal_catch = (unsigned char *)
7704 xmalloc (sizeof (signal_catch[0]) * numsigs);
7705 signal_pass = (unsigned char *)
7706 xmalloc (sizeof (signal_pass[0]) * numsigs);
7707 for (i = 0; i < numsigs; i++)
7708 {
7709 signal_stop[i] = 1;
7710 signal_print[i] = 1;
7711 signal_program[i] = 1;
7712 signal_catch[i] = 0;
7713 }
7714
7715 /* Signals caused by debugger's own actions
7716 should not be given to the program afterwards. */
7717 signal_program[GDB_SIGNAL_TRAP] = 0;
7718 signal_program[GDB_SIGNAL_INT] = 0;
7719
7720 /* Signals that are not errors should not normally enter the debugger. */
7721 signal_stop[GDB_SIGNAL_ALRM] = 0;
7722 signal_print[GDB_SIGNAL_ALRM] = 0;
7723 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7724 signal_print[GDB_SIGNAL_VTALRM] = 0;
7725 signal_stop[GDB_SIGNAL_PROF] = 0;
7726 signal_print[GDB_SIGNAL_PROF] = 0;
7727 signal_stop[GDB_SIGNAL_CHLD] = 0;
7728 signal_print[GDB_SIGNAL_CHLD] = 0;
7729 signal_stop[GDB_SIGNAL_IO] = 0;
7730 signal_print[GDB_SIGNAL_IO] = 0;
7731 signal_stop[GDB_SIGNAL_POLL] = 0;
7732 signal_print[GDB_SIGNAL_POLL] = 0;
7733 signal_stop[GDB_SIGNAL_URG] = 0;
7734 signal_print[GDB_SIGNAL_URG] = 0;
7735 signal_stop[GDB_SIGNAL_WINCH] = 0;
7736 signal_print[GDB_SIGNAL_WINCH] = 0;
7737 signal_stop[GDB_SIGNAL_PRIO] = 0;
7738 signal_print[GDB_SIGNAL_PRIO] = 0;
7739
7740 /* These signals are used internally by user-level thread
7741 implementations. (See signal(5) on Solaris.) Like the above
7742 signals, a healthy program receives and handles them as part of
7743 its normal operation. */
7744 signal_stop[GDB_SIGNAL_LWP] = 0;
7745 signal_print[GDB_SIGNAL_LWP] = 0;
7746 signal_stop[GDB_SIGNAL_WAITING] = 0;
7747 signal_print[GDB_SIGNAL_WAITING] = 0;
7748 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7749 signal_print[GDB_SIGNAL_CANCEL] = 0;
7750
7751 /* Update cached state. */
7752 signal_cache_update (-1);
7753
7754 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7755 &stop_on_solib_events, _("\
7756 Set stopping for shared library events."), _("\
7757 Show stopping for shared library events."), _("\
7758 If nonzero, gdb will give control to the user when the dynamic linker\n\
7759 notifies gdb of shared library events. The most common event of interest\n\
7760 to the user would be loading/unloading of a new library."),
7761 set_stop_on_solib_events,
7762 show_stop_on_solib_events,
7763 &setlist, &showlist);
7764
7765 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7766 follow_fork_mode_kind_names,
7767 &follow_fork_mode_string, _("\
7768 Set debugger response to a program call of fork or vfork."), _("\
7769 Show debugger response to a program call of fork or vfork."), _("\
7770 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7771 parent - the original process is debugged after a fork\n\
7772 child - the new process is debugged after a fork\n\
7773 The unfollowed process will continue to run.\n\
7774 By default, the debugger will follow the parent process."),
7775 NULL,
7776 show_follow_fork_mode_string,
7777 &setlist, &showlist);
7778
7779 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7780 follow_exec_mode_names,
7781 &follow_exec_mode_string, _("\
7782 Set debugger response to a program call of exec."), _("\
7783 Show debugger response to a program call of exec."), _("\
7784 An exec call replaces the program image of a process.\n\
7785 \n\
7786 follow-exec-mode can be:\n\
7787 \n\
7788 new - the debugger creates a new inferior and rebinds the process\n\
7789 to this new inferior. The program the process was running before\n\
7790 the exec call can be restarted afterwards by restarting the original\n\
7791 inferior.\n\
7792 \n\
7793 same - the debugger keeps the process bound to the same inferior.\n\
7794 The new executable image replaces the previous executable loaded in\n\
7795 the inferior. Restarting the inferior after the exec call restarts\n\
7796 the executable the process was running after the exec call.\n\
7797 \n\
7798 By default, the debugger will use the same inferior."),
7799 NULL,
7800 show_follow_exec_mode_string,
7801 &setlist, &showlist);
7802
7803 add_setshow_enum_cmd ("scheduler-locking", class_run,
7804 scheduler_enums, &scheduler_mode, _("\
7805 Set mode for locking scheduler during execution."), _("\
7806 Show mode for locking scheduler during execution."), _("\
7807 off == no locking (threads may preempt at any time)\n\
7808 on == full locking (no thread except the current thread may run)\n\
7809 step == scheduler locked during every single-step operation.\n\
7810 In this mode, no other thread may run during a step command.\n\
7811 Other threads may run while stepping over a function call ('next')."),
7812 set_schedlock_func, /* traps on target vector */
7813 show_scheduler_mode,
7814 &setlist, &showlist);
7815
7816 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7817 Set mode for resuming threads of all processes."), _("\
7818 Show mode for resuming threads of all processes."), _("\
7819 When on, execution commands (such as 'continue' or 'next') resume all\n\
7820 threads of all processes. When off (which is the default), execution\n\
7821 commands only resume the threads of the current process. The set of\n\
7822 threads that are resumed is further refined by the scheduler-locking\n\
7823 mode (see help set scheduler-locking)."),
7824 NULL,
7825 show_schedule_multiple,
7826 &setlist, &showlist);
7827
7828 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7829 Set mode of the step operation."), _("\
7830 Show mode of the step operation."), _("\
7831 When set, doing a step over a function without debug line information\n\
7832 will stop at the first instruction of that function. Otherwise, the\n\
7833 function is skipped and the step command stops at a different source line."),
7834 NULL,
7835 show_step_stop_if_no_debug,
7836 &setlist, &showlist);
7837
7838 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7839 &can_use_displaced_stepping, _("\
7840 Set debugger's willingness to use displaced stepping."), _("\
7841 Show debugger's willingness to use displaced stepping."), _("\
7842 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7843 supported by the target architecture. If off, gdb will not use displaced\n\
7844 stepping to step over breakpoints, even if such is supported by the target\n\
7845 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7846 if the target architecture supports it and non-stop mode is active, but will not\n\
7847 use it in all-stop mode (see help set non-stop)."),
7848 NULL,
7849 show_can_use_displaced_stepping,
7850 &setlist, &showlist);
7851
7852 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7853 &exec_direction, _("Set direction of execution.\n\
7854 Options are 'forward' or 'reverse'."),
7855 _("Show direction of execution (forward/reverse)."),
7856 _("Tells gdb whether to execute forward or backward."),
7857 set_exec_direction_func, show_exec_direction_func,
7858 &setlist, &showlist);
7859
7860 /* Set/show detach-on-fork: user-settable mode. */
7861
7862 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7863 Set whether gdb will detach the child of a fork."), _("\
7864 Show whether gdb will detach the child of a fork."), _("\
7865 Tells gdb whether to detach the child of a fork."),
7866 NULL, NULL, &setlist, &showlist);
7867
7868 /* Set/show disable address space randomization mode. */
7869
7870 add_setshow_boolean_cmd ("disable-randomization", class_support,
7871 &disable_randomization, _("\
7872 Set disabling of debuggee's virtual address space randomization."), _("\
7873 Show disabling of debuggee's virtual address space randomization."), _("\
7874 When this mode is on (which is the default), randomization of the virtual\n\
7875 address space is disabled. Standalone programs run with the randomization\n\
7876 enabled by default on some platforms."),
7877 &set_disable_randomization,
7878 &show_disable_randomization,
7879 &setlist, &showlist);
7880
7881 /* ptid initializations */
7882 inferior_ptid = null_ptid;
7883 target_last_wait_ptid = minus_one_ptid;
7884
7885 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7886 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7887 observer_attach_thread_exit (infrun_thread_thread_exit);
7888 observer_attach_inferior_exit (infrun_inferior_exit);
7889
7890 /* Explicitly create without lookup, since that tries to create a
7891 value with a void typed value, and when we get here, gdbarch
7892 isn't initialized yet. At this point, we're quite sure there
7893 isn't another convenience variable of the same name. */
7894 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7895
7896 add_setshow_boolean_cmd ("observer", no_class,
7897 &observer_mode_1, _("\
7898 Set whether gdb controls the inferior in observer mode."), _("\
7899 Show whether gdb controls the inferior in observer mode."), _("\
7900 In observer mode, GDB can get data from the inferior, but not\n\
7901 affect its execution. Registers and memory may not be changed,\n\
7902 breakpoints may not be set, and the program cannot be interrupted\n\
7903 or signalled."),
7904 set_observer_mode,
7905 show_observer_mode,
7906 &setlist,
7907 &showlist);
7908 }
This page took 0.195674 seconds and 4 git commands to generate.