PPC64: Fix step-over-trips-on-watchpoint.exp with displaced stepping on
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2015 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "breakpoint.h"
28 #include "gdb_wait.h"
29 #include "gdbcore.h"
30 #include "gdbcmd.h"
31 #include "cli/cli-script.h"
32 #include "target.h"
33 #include "gdbthread.h"
34 #include "annotate.h"
35 #include "symfile.h"
36 #include "top.h"
37 #include <signal.h>
38 #include "inf-loop.h"
39 #include "regcache.h"
40 #include "value.h"
41 #include "observer.h"
42 #include "language.h"
43 #include "solib.h"
44 #include "main.h"
45 #include "dictionary.h"
46 #include "block.h"
47 #include "mi/mi-common.h"
48 #include "event-top.h"
49 #include "record.h"
50 #include "record-full.h"
51 #include "inline-frame.h"
52 #include "jit.h"
53 #include "tracepoint.h"
54 #include "continuations.h"
55 #include "interps.h"
56 #include "skip.h"
57 #include "probe.h"
58 #include "objfiles.h"
59 #include "completer.h"
60 #include "target-descriptions.h"
61 #include "target-dcache.h"
62 #include "terminal.h"
63
64 /* Prototypes for local functions */
65
66 static void signals_info (char *, int);
67
68 static void handle_command (char *, int);
69
70 static void sig_print_info (enum gdb_signal);
71
72 static void sig_print_header (void);
73
74 static void resume_cleanups (void *);
75
76 static int hook_stop_stub (void *);
77
78 static int restore_selected_frame (void *);
79
80 static int follow_fork (void);
81
82 static int follow_fork_inferior (int follow_child, int detach_fork);
83
84 static void follow_inferior_reset_breakpoints (void);
85
86 static void set_schedlock_func (char *args, int from_tty,
87 struct cmd_list_element *c);
88
89 static int currently_stepping (struct thread_info *tp);
90
91 static void xdb_handle_command (char *args, int from_tty);
92
93 void _initialize_infrun (void);
94
95 void nullify_last_target_wait_ptid (void);
96
97 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
98
99 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
100
101 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
102
103 /* When set, stop the 'step' command if we enter a function which has
104 no line number information. The normal behavior is that we step
105 over such function. */
106 int step_stop_if_no_debug = 0;
107 static void
108 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
109 struct cmd_list_element *c, const char *value)
110 {
111 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
112 }
113
114 /* In asynchronous mode, but simulating synchronous execution. */
115
116 int sync_execution = 0;
117
118 /* proceed and normal_stop use this to notify the user when the
119 inferior stopped in a different thread than it had been running
120 in. */
121
122 static ptid_t previous_inferior_ptid;
123
124 /* If set (default for legacy reasons), when following a fork, GDB
125 will detach from one of the fork branches, child or parent.
126 Exactly which branch is detached depends on 'set follow-fork-mode'
127 setting. */
128
129 static int detach_fork = 1;
130
131 int debug_displaced = 0;
132 static void
133 show_debug_displaced (struct ui_file *file, int from_tty,
134 struct cmd_list_element *c, const char *value)
135 {
136 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
137 }
138
139 unsigned int debug_infrun = 0;
140 static void
141 show_debug_infrun (struct ui_file *file, int from_tty,
142 struct cmd_list_element *c, const char *value)
143 {
144 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
145 }
146
147
148 /* Support for disabling address space randomization. */
149
150 int disable_randomization = 1;
151
152 static void
153 show_disable_randomization (struct ui_file *file, int from_tty,
154 struct cmd_list_element *c, const char *value)
155 {
156 if (target_supports_disable_randomization ())
157 fprintf_filtered (file,
158 _("Disabling randomization of debuggee's "
159 "virtual address space is %s.\n"),
160 value);
161 else
162 fputs_filtered (_("Disabling randomization of debuggee's "
163 "virtual address space is unsupported on\n"
164 "this platform.\n"), file);
165 }
166
167 static void
168 set_disable_randomization (char *args, int from_tty,
169 struct cmd_list_element *c)
170 {
171 if (!target_supports_disable_randomization ())
172 error (_("Disabling randomization of debuggee's "
173 "virtual address space is unsupported on\n"
174 "this platform."));
175 }
176
177 /* User interface for non-stop mode. */
178
179 int non_stop = 0;
180 static int non_stop_1 = 0;
181
182 static void
183 set_non_stop (char *args, int from_tty,
184 struct cmd_list_element *c)
185 {
186 if (target_has_execution)
187 {
188 non_stop_1 = non_stop;
189 error (_("Cannot change this setting while the inferior is running."));
190 }
191
192 non_stop = non_stop_1;
193 }
194
195 static void
196 show_non_stop (struct ui_file *file, int from_tty,
197 struct cmd_list_element *c, const char *value)
198 {
199 fprintf_filtered (file,
200 _("Controlling the inferior in non-stop mode is %s.\n"),
201 value);
202 }
203
204 /* "Observer mode" is somewhat like a more extreme version of
205 non-stop, in which all GDB operations that might affect the
206 target's execution have been disabled. */
207
208 int observer_mode = 0;
209 static int observer_mode_1 = 0;
210
211 static void
212 set_observer_mode (char *args, int from_tty,
213 struct cmd_list_element *c)
214 {
215 if (target_has_execution)
216 {
217 observer_mode_1 = observer_mode;
218 error (_("Cannot change this setting while the inferior is running."));
219 }
220
221 observer_mode = observer_mode_1;
222
223 may_write_registers = !observer_mode;
224 may_write_memory = !observer_mode;
225 may_insert_breakpoints = !observer_mode;
226 may_insert_tracepoints = !observer_mode;
227 /* We can insert fast tracepoints in or out of observer mode,
228 but enable them if we're going into this mode. */
229 if (observer_mode)
230 may_insert_fast_tracepoints = 1;
231 may_stop = !observer_mode;
232 update_target_permissions ();
233
234 /* Going *into* observer mode we must force non-stop, then
235 going out we leave it that way. */
236 if (observer_mode)
237 {
238 pagination_enabled = 0;
239 non_stop = non_stop_1 = 1;
240 }
241
242 if (from_tty)
243 printf_filtered (_("Observer mode is now %s.\n"),
244 (observer_mode ? "on" : "off"));
245 }
246
247 static void
248 show_observer_mode (struct ui_file *file, int from_tty,
249 struct cmd_list_element *c, const char *value)
250 {
251 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
252 }
253
254 /* This updates the value of observer mode based on changes in
255 permissions. Note that we are deliberately ignoring the values of
256 may-write-registers and may-write-memory, since the user may have
257 reason to enable these during a session, for instance to turn on a
258 debugging-related global. */
259
260 void
261 update_observer_mode (void)
262 {
263 int newval;
264
265 newval = (!may_insert_breakpoints
266 && !may_insert_tracepoints
267 && may_insert_fast_tracepoints
268 && !may_stop
269 && non_stop);
270
271 /* Let the user know if things change. */
272 if (newval != observer_mode)
273 printf_filtered (_("Observer mode is now %s.\n"),
274 (newval ? "on" : "off"));
275
276 observer_mode = observer_mode_1 = newval;
277 }
278
279 /* Tables of how to react to signals; the user sets them. */
280
281 static unsigned char *signal_stop;
282 static unsigned char *signal_print;
283 static unsigned char *signal_program;
284
285 /* Table of signals that are registered with "catch signal". A
286 non-zero entry indicates that the signal is caught by some "catch
287 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
288 signals. */
289 static unsigned char *signal_catch;
290
291 /* Table of signals that the target may silently handle.
292 This is automatically determined from the flags above,
293 and simply cached here. */
294 static unsigned char *signal_pass;
295
296 #define SET_SIGS(nsigs,sigs,flags) \
297 do { \
298 int signum = (nsigs); \
299 while (signum-- > 0) \
300 if ((sigs)[signum]) \
301 (flags)[signum] = 1; \
302 } while (0)
303
304 #define UNSET_SIGS(nsigs,sigs,flags) \
305 do { \
306 int signum = (nsigs); \
307 while (signum-- > 0) \
308 if ((sigs)[signum]) \
309 (flags)[signum] = 0; \
310 } while (0)
311
312 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
313 this function is to avoid exporting `signal_program'. */
314
315 void
316 update_signals_program_target (void)
317 {
318 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
319 }
320
321 /* Value to pass to target_resume() to cause all threads to resume. */
322
323 #define RESUME_ALL minus_one_ptid
324
325 /* Command list pointer for the "stop" placeholder. */
326
327 static struct cmd_list_element *stop_command;
328
329 /* Nonzero if we want to give control to the user when we're notified
330 of shared library events by the dynamic linker. */
331 int stop_on_solib_events;
332
333 /* Enable or disable optional shared library event breakpoints
334 as appropriate when the above flag is changed. */
335
336 static void
337 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
338 {
339 update_solib_breakpoints ();
340 }
341
342 static void
343 show_stop_on_solib_events (struct ui_file *file, int from_tty,
344 struct cmd_list_element *c, const char *value)
345 {
346 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
347 value);
348 }
349
350 /* Nonzero means expecting a trace trap
351 and should stop the inferior and return silently when it happens. */
352
353 int stop_after_trap;
354
355 /* Save register contents here when executing a "finish" command or are
356 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
357 Thus this contains the return value from the called function (assuming
358 values are returned in a register). */
359
360 struct regcache *stop_registers;
361
362 /* Nonzero after stop if current stack frame should be printed. */
363
364 static int stop_print_frame;
365
366 /* This is a cached copy of the pid/waitstatus of the last event
367 returned by target_wait()/deprecated_target_wait_hook(). This
368 information is returned by get_last_target_status(). */
369 static ptid_t target_last_wait_ptid;
370 static struct target_waitstatus target_last_waitstatus;
371
372 static void context_switch (ptid_t ptid);
373
374 void init_thread_stepping_state (struct thread_info *tss);
375
376 static const char follow_fork_mode_child[] = "child";
377 static const char follow_fork_mode_parent[] = "parent";
378
379 static const char *const follow_fork_mode_kind_names[] = {
380 follow_fork_mode_child,
381 follow_fork_mode_parent,
382 NULL
383 };
384
385 static const char *follow_fork_mode_string = follow_fork_mode_parent;
386 static void
387 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
388 struct cmd_list_element *c, const char *value)
389 {
390 fprintf_filtered (file,
391 _("Debugger response to a program "
392 "call of fork or vfork is \"%s\".\n"),
393 value);
394 }
395 \f
396
397 /* Handle changes to the inferior list based on the type of fork,
398 which process is being followed, and whether the other process
399 should be detached. On entry inferior_ptid must be the ptid of
400 the fork parent. At return inferior_ptid is the ptid of the
401 followed inferior. */
402
403 static int
404 follow_fork_inferior (int follow_child, int detach_fork)
405 {
406 int has_vforked;
407 ptid_t parent_ptid, child_ptid;
408
409 has_vforked = (inferior_thread ()->pending_follow.kind
410 == TARGET_WAITKIND_VFORKED);
411 parent_ptid = inferior_ptid;
412 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
413
414 if (has_vforked
415 && !non_stop /* Non-stop always resumes both branches. */
416 && (!target_is_async_p () || sync_execution)
417 && !(follow_child || detach_fork || sched_multi))
418 {
419 /* The parent stays blocked inside the vfork syscall until the
420 child execs or exits. If we don't let the child run, then
421 the parent stays blocked. If we're telling the parent to run
422 in the foreground, the user will not be able to ctrl-c to get
423 back the terminal, effectively hanging the debug session. */
424 fprintf_filtered (gdb_stderr, _("\
425 Can not resume the parent process over vfork in the foreground while\n\
426 holding the child stopped. Try \"set detach-on-fork\" or \
427 \"set schedule-multiple\".\n"));
428 /* FIXME output string > 80 columns. */
429 return 1;
430 }
431
432 if (!follow_child)
433 {
434 /* Detach new forked process? */
435 if (detach_fork)
436 {
437 struct cleanup *old_chain;
438
439 /* Before detaching from the child, remove all breakpoints
440 from it. If we forked, then this has already been taken
441 care of by infrun.c. If we vforked however, any
442 breakpoint inserted in the parent is visible in the
443 child, even those added while stopped in a vfork
444 catchpoint. This will remove the breakpoints from the
445 parent also, but they'll be reinserted below. */
446 if (has_vforked)
447 {
448 /* Keep breakpoints list in sync. */
449 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
450 }
451
452 if (info_verbose || debug_infrun)
453 {
454 target_terminal_ours_for_output ();
455 fprintf_filtered (gdb_stdlog,
456 _("Detaching after %s from child %s.\n"),
457 has_vforked ? "vfork" : "fork",
458 target_pid_to_str (child_ptid));
459 }
460 }
461 else
462 {
463 struct inferior *parent_inf, *child_inf;
464 struct cleanup *old_chain;
465
466 /* Add process to GDB's tables. */
467 child_inf = add_inferior (ptid_get_pid (child_ptid));
468
469 parent_inf = current_inferior ();
470 child_inf->attach_flag = parent_inf->attach_flag;
471 copy_terminal_info (child_inf, parent_inf);
472 child_inf->gdbarch = parent_inf->gdbarch;
473 copy_inferior_target_desc_info (child_inf, parent_inf);
474
475 old_chain = save_inferior_ptid ();
476 save_current_program_space ();
477
478 inferior_ptid = child_ptid;
479 add_thread (inferior_ptid);
480 child_inf->symfile_flags = SYMFILE_NO_READ;
481
482 /* If this is a vfork child, then the address-space is
483 shared with the parent. */
484 if (has_vforked)
485 {
486 child_inf->pspace = parent_inf->pspace;
487 child_inf->aspace = parent_inf->aspace;
488
489 /* The parent will be frozen until the child is done
490 with the shared region. Keep track of the
491 parent. */
492 child_inf->vfork_parent = parent_inf;
493 child_inf->pending_detach = 0;
494 parent_inf->vfork_child = child_inf;
495 parent_inf->pending_detach = 0;
496 }
497 else
498 {
499 child_inf->aspace = new_address_space ();
500 child_inf->pspace = add_program_space (child_inf->aspace);
501 child_inf->removable = 1;
502 set_current_program_space (child_inf->pspace);
503 clone_program_space (child_inf->pspace, parent_inf->pspace);
504
505 /* Let the shared library layer (e.g., solib-svr4) learn
506 about this new process, relocate the cloned exec, pull
507 in shared libraries, and install the solib event
508 breakpoint. If a "cloned-VM" event was propagated
509 better throughout the core, this wouldn't be
510 required. */
511 solib_create_inferior_hook (0);
512 }
513
514 do_cleanups (old_chain);
515 }
516
517 if (has_vforked)
518 {
519 struct inferior *parent_inf;
520
521 parent_inf = current_inferior ();
522
523 /* If we detached from the child, then we have to be careful
524 to not insert breakpoints in the parent until the child
525 is done with the shared memory region. However, if we're
526 staying attached to the child, then we can and should
527 insert breakpoints, so that we can debug it. A
528 subsequent child exec or exit is enough to know when does
529 the child stops using the parent's address space. */
530 parent_inf->waiting_for_vfork_done = detach_fork;
531 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
532 }
533 }
534 else
535 {
536 /* Follow the child. */
537 struct inferior *parent_inf, *child_inf;
538 struct program_space *parent_pspace;
539
540 if (info_verbose || debug_infrun)
541 {
542 target_terminal_ours_for_output ();
543 fprintf_filtered (gdb_stdlog,
544 _("Attaching after %s %s to child %s.\n"),
545 target_pid_to_str (parent_ptid),
546 has_vforked ? "vfork" : "fork",
547 target_pid_to_str (child_ptid));
548 }
549
550 /* Add the new inferior first, so that the target_detach below
551 doesn't unpush the target. */
552
553 child_inf = add_inferior (ptid_get_pid (child_ptid));
554
555 parent_inf = current_inferior ();
556 child_inf->attach_flag = parent_inf->attach_flag;
557 copy_terminal_info (child_inf, parent_inf);
558 child_inf->gdbarch = parent_inf->gdbarch;
559 copy_inferior_target_desc_info (child_inf, parent_inf);
560
561 parent_pspace = parent_inf->pspace;
562
563 /* If we're vforking, we want to hold on to the parent until the
564 child exits or execs. At child exec or exit time we can
565 remove the old breakpoints from the parent and detach or
566 resume debugging it. Otherwise, detach the parent now; we'll
567 want to reuse it's program/address spaces, but we can't set
568 them to the child before removing breakpoints from the
569 parent, otherwise, the breakpoints module could decide to
570 remove breakpoints from the wrong process (since they'd be
571 assigned to the same address space). */
572
573 if (has_vforked)
574 {
575 gdb_assert (child_inf->vfork_parent == NULL);
576 gdb_assert (parent_inf->vfork_child == NULL);
577 child_inf->vfork_parent = parent_inf;
578 child_inf->pending_detach = 0;
579 parent_inf->vfork_child = child_inf;
580 parent_inf->pending_detach = detach_fork;
581 parent_inf->waiting_for_vfork_done = 0;
582 }
583 else if (detach_fork)
584 {
585 if (info_verbose || debug_infrun)
586 {
587 target_terminal_ours_for_output ();
588 fprintf_filtered (gdb_stdlog,
589 _("Detaching after fork from "
590 "child %s.\n"),
591 target_pid_to_str (child_ptid));
592 }
593
594 target_detach (NULL, 0);
595 }
596
597 /* Note that the detach above makes PARENT_INF dangling. */
598
599 /* Add the child thread to the appropriate lists, and switch to
600 this new thread, before cloning the program space, and
601 informing the solib layer about this new process. */
602
603 inferior_ptid = child_ptid;
604 add_thread (inferior_ptid);
605
606 /* If this is a vfork child, then the address-space is shared
607 with the parent. If we detached from the parent, then we can
608 reuse the parent's program/address spaces. */
609 if (has_vforked || detach_fork)
610 {
611 child_inf->pspace = parent_pspace;
612 child_inf->aspace = child_inf->pspace->aspace;
613 }
614 else
615 {
616 child_inf->aspace = new_address_space ();
617 child_inf->pspace = add_program_space (child_inf->aspace);
618 child_inf->removable = 1;
619 child_inf->symfile_flags = SYMFILE_NO_READ;
620 set_current_program_space (child_inf->pspace);
621 clone_program_space (child_inf->pspace, parent_pspace);
622
623 /* Let the shared library layer (e.g., solib-svr4) learn
624 about this new process, relocate the cloned exec, pull in
625 shared libraries, and install the solib event breakpoint.
626 If a "cloned-VM" event was propagated better throughout
627 the core, this wouldn't be required. */
628 solib_create_inferior_hook (0);
629 }
630 }
631
632 return target_follow_fork (follow_child, detach_fork);
633 }
634
635 /* Tell the target to follow the fork we're stopped at. Returns true
636 if the inferior should be resumed; false, if the target for some
637 reason decided it's best not to resume. */
638
639 static int
640 follow_fork (void)
641 {
642 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
643 int should_resume = 1;
644 struct thread_info *tp;
645
646 /* Copy user stepping state to the new inferior thread. FIXME: the
647 followed fork child thread should have a copy of most of the
648 parent thread structure's run control related fields, not just these.
649 Initialized to avoid "may be used uninitialized" warnings from gcc. */
650 struct breakpoint *step_resume_breakpoint = NULL;
651 struct breakpoint *exception_resume_breakpoint = NULL;
652 CORE_ADDR step_range_start = 0;
653 CORE_ADDR step_range_end = 0;
654 struct frame_id step_frame_id = { 0 };
655 struct interp *command_interp = NULL;
656
657 if (!non_stop)
658 {
659 ptid_t wait_ptid;
660 struct target_waitstatus wait_status;
661
662 /* Get the last target status returned by target_wait(). */
663 get_last_target_status (&wait_ptid, &wait_status);
664
665 /* If not stopped at a fork event, then there's nothing else to
666 do. */
667 if (wait_status.kind != TARGET_WAITKIND_FORKED
668 && wait_status.kind != TARGET_WAITKIND_VFORKED)
669 return 1;
670
671 /* Check if we switched over from WAIT_PTID, since the event was
672 reported. */
673 if (!ptid_equal (wait_ptid, minus_one_ptid)
674 && !ptid_equal (inferior_ptid, wait_ptid))
675 {
676 /* We did. Switch back to WAIT_PTID thread, to tell the
677 target to follow it (in either direction). We'll
678 afterwards refuse to resume, and inform the user what
679 happened. */
680 switch_to_thread (wait_ptid);
681 should_resume = 0;
682 }
683 }
684
685 tp = inferior_thread ();
686
687 /* If there were any forks/vforks that were caught and are now to be
688 followed, then do so now. */
689 switch (tp->pending_follow.kind)
690 {
691 case TARGET_WAITKIND_FORKED:
692 case TARGET_WAITKIND_VFORKED:
693 {
694 ptid_t parent, child;
695
696 /* If the user did a next/step, etc, over a fork call,
697 preserve the stepping state in the fork child. */
698 if (follow_child && should_resume)
699 {
700 step_resume_breakpoint = clone_momentary_breakpoint
701 (tp->control.step_resume_breakpoint);
702 step_range_start = tp->control.step_range_start;
703 step_range_end = tp->control.step_range_end;
704 step_frame_id = tp->control.step_frame_id;
705 exception_resume_breakpoint
706 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
707 command_interp = tp->control.command_interp;
708
709 /* For now, delete the parent's sr breakpoint, otherwise,
710 parent/child sr breakpoints are considered duplicates,
711 and the child version will not be installed. Remove
712 this when the breakpoints module becomes aware of
713 inferiors and address spaces. */
714 delete_step_resume_breakpoint (tp);
715 tp->control.step_range_start = 0;
716 tp->control.step_range_end = 0;
717 tp->control.step_frame_id = null_frame_id;
718 delete_exception_resume_breakpoint (tp);
719 tp->control.command_interp = NULL;
720 }
721
722 parent = inferior_ptid;
723 child = tp->pending_follow.value.related_pid;
724
725 /* Set up inferior(s) as specified by the caller, and tell the
726 target to do whatever is necessary to follow either parent
727 or child. */
728 if (follow_fork_inferior (follow_child, detach_fork))
729 {
730 /* Target refused to follow, or there's some other reason
731 we shouldn't resume. */
732 should_resume = 0;
733 }
734 else
735 {
736 /* This pending follow fork event is now handled, one way
737 or another. The previous selected thread may be gone
738 from the lists by now, but if it is still around, need
739 to clear the pending follow request. */
740 tp = find_thread_ptid (parent);
741 if (tp)
742 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
743
744 /* This makes sure we don't try to apply the "Switched
745 over from WAIT_PID" logic above. */
746 nullify_last_target_wait_ptid ();
747
748 /* If we followed the child, switch to it... */
749 if (follow_child)
750 {
751 switch_to_thread (child);
752
753 /* ... and preserve the stepping state, in case the
754 user was stepping over the fork call. */
755 if (should_resume)
756 {
757 tp = inferior_thread ();
758 tp->control.step_resume_breakpoint
759 = step_resume_breakpoint;
760 tp->control.step_range_start = step_range_start;
761 tp->control.step_range_end = step_range_end;
762 tp->control.step_frame_id = step_frame_id;
763 tp->control.exception_resume_breakpoint
764 = exception_resume_breakpoint;
765 tp->control.command_interp = command_interp;
766 }
767 else
768 {
769 /* If we get here, it was because we're trying to
770 resume from a fork catchpoint, but, the user
771 has switched threads away from the thread that
772 forked. In that case, the resume command
773 issued is most likely not applicable to the
774 child, so just warn, and refuse to resume. */
775 warning (_("Not resuming: switched threads "
776 "before following fork child.\n"));
777 }
778
779 /* Reset breakpoints in the child as appropriate. */
780 follow_inferior_reset_breakpoints ();
781 }
782 else
783 switch_to_thread (parent);
784 }
785 }
786 break;
787 case TARGET_WAITKIND_SPURIOUS:
788 /* Nothing to follow. */
789 break;
790 default:
791 internal_error (__FILE__, __LINE__,
792 "Unexpected pending_follow.kind %d\n",
793 tp->pending_follow.kind);
794 break;
795 }
796
797 return should_resume;
798 }
799
800 static void
801 follow_inferior_reset_breakpoints (void)
802 {
803 struct thread_info *tp = inferior_thread ();
804
805 /* Was there a step_resume breakpoint? (There was if the user
806 did a "next" at the fork() call.) If so, explicitly reset its
807 thread number. Cloned step_resume breakpoints are disabled on
808 creation, so enable it here now that it is associated with the
809 correct thread.
810
811 step_resumes are a form of bp that are made to be per-thread.
812 Since we created the step_resume bp when the parent process
813 was being debugged, and now are switching to the child process,
814 from the breakpoint package's viewpoint, that's a switch of
815 "threads". We must update the bp's notion of which thread
816 it is for, or it'll be ignored when it triggers. */
817
818 if (tp->control.step_resume_breakpoint)
819 {
820 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
821 tp->control.step_resume_breakpoint->loc->enabled = 1;
822 }
823
824 /* Treat exception_resume breakpoints like step_resume breakpoints. */
825 if (tp->control.exception_resume_breakpoint)
826 {
827 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
828 tp->control.exception_resume_breakpoint->loc->enabled = 1;
829 }
830
831 /* Reinsert all breakpoints in the child. The user may have set
832 breakpoints after catching the fork, in which case those
833 were never set in the child, but only in the parent. This makes
834 sure the inserted breakpoints match the breakpoint list. */
835
836 breakpoint_re_set ();
837 insert_breakpoints ();
838 }
839
840 /* The child has exited or execed: resume threads of the parent the
841 user wanted to be executing. */
842
843 static int
844 proceed_after_vfork_done (struct thread_info *thread,
845 void *arg)
846 {
847 int pid = * (int *) arg;
848
849 if (ptid_get_pid (thread->ptid) == pid
850 && is_running (thread->ptid)
851 && !is_executing (thread->ptid)
852 && !thread->stop_requested
853 && thread->suspend.stop_signal == GDB_SIGNAL_0)
854 {
855 if (debug_infrun)
856 fprintf_unfiltered (gdb_stdlog,
857 "infrun: resuming vfork parent thread %s\n",
858 target_pid_to_str (thread->ptid));
859
860 switch_to_thread (thread->ptid);
861 clear_proceed_status (0);
862 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
863 }
864
865 return 0;
866 }
867
868 /* Called whenever we notice an exec or exit event, to handle
869 detaching or resuming a vfork parent. */
870
871 static void
872 handle_vfork_child_exec_or_exit (int exec)
873 {
874 struct inferior *inf = current_inferior ();
875
876 if (inf->vfork_parent)
877 {
878 int resume_parent = -1;
879
880 /* This exec or exit marks the end of the shared memory region
881 between the parent and the child. If the user wanted to
882 detach from the parent, now is the time. */
883
884 if (inf->vfork_parent->pending_detach)
885 {
886 struct thread_info *tp;
887 struct cleanup *old_chain;
888 struct program_space *pspace;
889 struct address_space *aspace;
890
891 /* follow-fork child, detach-on-fork on. */
892
893 inf->vfork_parent->pending_detach = 0;
894
895 if (!exec)
896 {
897 /* If we're handling a child exit, then inferior_ptid
898 points at the inferior's pid, not to a thread. */
899 old_chain = save_inferior_ptid ();
900 save_current_program_space ();
901 save_current_inferior ();
902 }
903 else
904 old_chain = save_current_space_and_thread ();
905
906 /* We're letting loose of the parent. */
907 tp = any_live_thread_of_process (inf->vfork_parent->pid);
908 switch_to_thread (tp->ptid);
909
910 /* We're about to detach from the parent, which implicitly
911 removes breakpoints from its address space. There's a
912 catch here: we want to reuse the spaces for the child,
913 but, parent/child are still sharing the pspace at this
914 point, although the exec in reality makes the kernel give
915 the child a fresh set of new pages. The problem here is
916 that the breakpoints module being unaware of this, would
917 likely chose the child process to write to the parent
918 address space. Swapping the child temporarily away from
919 the spaces has the desired effect. Yes, this is "sort
920 of" a hack. */
921
922 pspace = inf->pspace;
923 aspace = inf->aspace;
924 inf->aspace = NULL;
925 inf->pspace = NULL;
926
927 if (debug_infrun || info_verbose)
928 {
929 target_terminal_ours_for_output ();
930
931 if (exec)
932 {
933 fprintf_filtered (gdb_stdlog,
934 _("Detaching vfork parent process "
935 "%d after child exec.\n"),
936 inf->vfork_parent->pid);
937 }
938 else
939 {
940 fprintf_filtered (gdb_stdlog,
941 _("Detaching vfork parent process "
942 "%d after child exit.\n"),
943 inf->vfork_parent->pid);
944 }
945 }
946
947 target_detach (NULL, 0);
948
949 /* Put it back. */
950 inf->pspace = pspace;
951 inf->aspace = aspace;
952
953 do_cleanups (old_chain);
954 }
955 else if (exec)
956 {
957 /* We're staying attached to the parent, so, really give the
958 child a new address space. */
959 inf->pspace = add_program_space (maybe_new_address_space ());
960 inf->aspace = inf->pspace->aspace;
961 inf->removable = 1;
962 set_current_program_space (inf->pspace);
963
964 resume_parent = inf->vfork_parent->pid;
965
966 /* Break the bonds. */
967 inf->vfork_parent->vfork_child = NULL;
968 }
969 else
970 {
971 struct cleanup *old_chain;
972 struct program_space *pspace;
973
974 /* If this is a vfork child exiting, then the pspace and
975 aspaces were shared with the parent. Since we're
976 reporting the process exit, we'll be mourning all that is
977 found in the address space, and switching to null_ptid,
978 preparing to start a new inferior. But, since we don't
979 want to clobber the parent's address/program spaces, we
980 go ahead and create a new one for this exiting
981 inferior. */
982
983 /* Switch to null_ptid, so that clone_program_space doesn't want
984 to read the selected frame of a dead process. */
985 old_chain = save_inferior_ptid ();
986 inferior_ptid = null_ptid;
987
988 /* This inferior is dead, so avoid giving the breakpoints
989 module the option to write through to it (cloning a
990 program space resets breakpoints). */
991 inf->aspace = NULL;
992 inf->pspace = NULL;
993 pspace = add_program_space (maybe_new_address_space ());
994 set_current_program_space (pspace);
995 inf->removable = 1;
996 inf->symfile_flags = SYMFILE_NO_READ;
997 clone_program_space (pspace, inf->vfork_parent->pspace);
998 inf->pspace = pspace;
999 inf->aspace = pspace->aspace;
1000
1001 /* Put back inferior_ptid. We'll continue mourning this
1002 inferior. */
1003 do_cleanups (old_chain);
1004
1005 resume_parent = inf->vfork_parent->pid;
1006 /* Break the bonds. */
1007 inf->vfork_parent->vfork_child = NULL;
1008 }
1009
1010 inf->vfork_parent = NULL;
1011
1012 gdb_assert (current_program_space == inf->pspace);
1013
1014 if (non_stop && resume_parent != -1)
1015 {
1016 /* If the user wanted the parent to be running, let it go
1017 free now. */
1018 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
1019
1020 if (debug_infrun)
1021 fprintf_unfiltered (gdb_stdlog,
1022 "infrun: resuming vfork parent process %d\n",
1023 resume_parent);
1024
1025 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
1026
1027 do_cleanups (old_chain);
1028 }
1029 }
1030 }
1031
1032 /* Enum strings for "set|show follow-exec-mode". */
1033
1034 static const char follow_exec_mode_new[] = "new";
1035 static const char follow_exec_mode_same[] = "same";
1036 static const char *const follow_exec_mode_names[] =
1037 {
1038 follow_exec_mode_new,
1039 follow_exec_mode_same,
1040 NULL,
1041 };
1042
1043 static const char *follow_exec_mode_string = follow_exec_mode_same;
1044 static void
1045 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1046 struct cmd_list_element *c, const char *value)
1047 {
1048 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1049 }
1050
1051 /* EXECD_PATHNAME is assumed to be non-NULL. */
1052
1053 static void
1054 follow_exec (ptid_t ptid, char *execd_pathname)
1055 {
1056 struct thread_info *th, *tmp;
1057 struct inferior *inf = current_inferior ();
1058 int pid = ptid_get_pid (ptid);
1059
1060 /* This is an exec event that we actually wish to pay attention to.
1061 Refresh our symbol table to the newly exec'd program, remove any
1062 momentary bp's, etc.
1063
1064 If there are breakpoints, they aren't really inserted now,
1065 since the exec() transformed our inferior into a fresh set
1066 of instructions.
1067
1068 We want to preserve symbolic breakpoints on the list, since
1069 we have hopes that they can be reset after the new a.out's
1070 symbol table is read.
1071
1072 However, any "raw" breakpoints must be removed from the list
1073 (e.g., the solib bp's), since their address is probably invalid
1074 now.
1075
1076 And, we DON'T want to call delete_breakpoints() here, since
1077 that may write the bp's "shadow contents" (the instruction
1078 value that was overwritten witha TRAP instruction). Since
1079 we now have a new a.out, those shadow contents aren't valid. */
1080
1081 mark_breakpoints_out ();
1082
1083 /* The target reports the exec event to the main thread, even if
1084 some other thread does the exec, and even if the main thread was
1085 stopped or already gone. We may still have non-leader threads of
1086 the process on our list. E.g., on targets that don't have thread
1087 exit events (like remote); or on native Linux in non-stop mode if
1088 there were only two threads in the inferior and the non-leader
1089 one is the one that execs (and nothing forces an update of the
1090 thread list up to here). When debugging remotely, it's best to
1091 avoid extra traffic, when possible, so avoid syncing the thread
1092 list with the target, and instead go ahead and delete all threads
1093 of the process but one that reported the event. Note this must
1094 be done before calling update_breakpoints_after_exec, as
1095 otherwise clearing the threads' resources would reference stale
1096 thread breakpoints -- it may have been one of these threads that
1097 stepped across the exec. We could just clear their stepping
1098 states, but as long as we're iterating, might as well delete
1099 them. Deleting them now rather than at the next user-visible
1100 stop provides a nicer sequence of events for user and MI
1101 notifications. */
1102 ALL_THREADS_SAFE (th, tmp)
1103 if (ptid_get_pid (th->ptid) == pid && !ptid_equal (th->ptid, ptid))
1104 delete_thread (th->ptid);
1105
1106 /* We also need to clear any left over stale state for the
1107 leader/event thread. E.g., if there was any step-resume
1108 breakpoint or similar, it's gone now. We cannot truly
1109 step-to-next statement through an exec(). */
1110 th = inferior_thread ();
1111 th->control.step_resume_breakpoint = NULL;
1112 th->control.exception_resume_breakpoint = NULL;
1113 th->control.single_step_breakpoints = NULL;
1114 th->control.step_range_start = 0;
1115 th->control.step_range_end = 0;
1116
1117 /* The user may have had the main thread held stopped in the
1118 previous image (e.g., schedlock on, or non-stop). Release
1119 it now. */
1120 th->stop_requested = 0;
1121
1122 update_breakpoints_after_exec ();
1123
1124 /* What is this a.out's name? */
1125 printf_unfiltered (_("%s is executing new program: %s\n"),
1126 target_pid_to_str (inferior_ptid),
1127 execd_pathname);
1128
1129 /* We've followed the inferior through an exec. Therefore, the
1130 inferior has essentially been killed & reborn. */
1131
1132 gdb_flush (gdb_stdout);
1133
1134 breakpoint_init_inferior (inf_execd);
1135
1136 if (gdb_sysroot && *gdb_sysroot)
1137 {
1138 char *name = alloca (strlen (gdb_sysroot)
1139 + strlen (execd_pathname)
1140 + 1);
1141
1142 strcpy (name, gdb_sysroot);
1143 strcat (name, execd_pathname);
1144 execd_pathname = name;
1145 }
1146
1147 /* Reset the shared library package. This ensures that we get a
1148 shlib event when the child reaches "_start", at which point the
1149 dld will have had a chance to initialize the child. */
1150 /* Also, loading a symbol file below may trigger symbol lookups, and
1151 we don't want those to be satisfied by the libraries of the
1152 previous incarnation of this process. */
1153 no_shared_libraries (NULL, 0);
1154
1155 if (follow_exec_mode_string == follow_exec_mode_new)
1156 {
1157 struct program_space *pspace;
1158
1159 /* The user wants to keep the old inferior and program spaces
1160 around. Create a new fresh one, and switch to it. */
1161
1162 inf = add_inferior (current_inferior ()->pid);
1163 pspace = add_program_space (maybe_new_address_space ());
1164 inf->pspace = pspace;
1165 inf->aspace = pspace->aspace;
1166
1167 exit_inferior_num_silent (current_inferior ()->num);
1168
1169 set_current_inferior (inf);
1170 set_current_program_space (pspace);
1171 }
1172 else
1173 {
1174 /* The old description may no longer be fit for the new image.
1175 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1176 old description; we'll read a new one below. No need to do
1177 this on "follow-exec-mode new", as the old inferior stays
1178 around (its description is later cleared/refetched on
1179 restart). */
1180 target_clear_description ();
1181 }
1182
1183 gdb_assert (current_program_space == inf->pspace);
1184
1185 /* That a.out is now the one to use. */
1186 exec_file_attach (execd_pathname, 0);
1187
1188 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
1189 (Position Independent Executable) main symbol file will get applied by
1190 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
1191 the breakpoints with the zero displacement. */
1192
1193 symbol_file_add (execd_pathname,
1194 (inf->symfile_flags
1195 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
1196 NULL, 0);
1197
1198 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
1199 set_initial_language ();
1200
1201 /* If the target can specify a description, read it. Must do this
1202 after flipping to the new executable (because the target supplied
1203 description must be compatible with the executable's
1204 architecture, and the old executable may e.g., be 32-bit, while
1205 the new one 64-bit), and before anything involving memory or
1206 registers. */
1207 target_find_description ();
1208
1209 solib_create_inferior_hook (0);
1210
1211 jit_inferior_created_hook ();
1212
1213 breakpoint_re_set ();
1214
1215 /* Reinsert all breakpoints. (Those which were symbolic have
1216 been reset to the proper address in the new a.out, thanks
1217 to symbol_file_command...). */
1218 insert_breakpoints ();
1219
1220 /* The next resume of this inferior should bring it to the shlib
1221 startup breakpoints. (If the user had also set bp's on
1222 "main" from the old (parent) process, then they'll auto-
1223 matically get reset there in the new process.). */
1224 }
1225
1226 /* Info about an instruction that is being stepped over. */
1227
1228 struct step_over_info
1229 {
1230 /* If we're stepping past a breakpoint, this is the address space
1231 and address of the instruction the breakpoint is set at. We'll
1232 skip inserting all breakpoints here. Valid iff ASPACE is
1233 non-NULL. */
1234 struct address_space *aspace;
1235 CORE_ADDR address;
1236
1237 /* The instruction being stepped over triggers a nonsteppable
1238 watchpoint. If true, we'll skip inserting watchpoints. */
1239 int nonsteppable_watchpoint_p;
1240 };
1241
1242 /* The step-over info of the location that is being stepped over.
1243
1244 Note that with async/breakpoint always-inserted mode, a user might
1245 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1246 being stepped over. As setting a new breakpoint inserts all
1247 breakpoints, we need to make sure the breakpoint being stepped over
1248 isn't inserted then. We do that by only clearing the step-over
1249 info when the step-over is actually finished (or aborted).
1250
1251 Presently GDB can only step over one breakpoint at any given time.
1252 Given threads that can't run code in the same address space as the
1253 breakpoint's can't really miss the breakpoint, GDB could be taught
1254 to step-over at most one breakpoint per address space (so this info
1255 could move to the address space object if/when GDB is extended).
1256 The set of breakpoints being stepped over will normally be much
1257 smaller than the set of all breakpoints, so a flag in the
1258 breakpoint location structure would be wasteful. A separate list
1259 also saves complexity and run-time, as otherwise we'd have to go
1260 through all breakpoint locations clearing their flag whenever we
1261 start a new sequence. Similar considerations weigh against storing
1262 this info in the thread object. Plus, not all step overs actually
1263 have breakpoint locations -- e.g., stepping past a single-step
1264 breakpoint, or stepping to complete a non-continuable
1265 watchpoint. */
1266 static struct step_over_info step_over_info;
1267
1268 /* Record the address of the breakpoint/instruction we're currently
1269 stepping over. */
1270
1271 static void
1272 set_step_over_info (struct address_space *aspace, CORE_ADDR address,
1273 int nonsteppable_watchpoint_p)
1274 {
1275 step_over_info.aspace = aspace;
1276 step_over_info.address = address;
1277 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1278 }
1279
1280 /* Called when we're not longer stepping over a breakpoint / an
1281 instruction, so all breakpoints are free to be (re)inserted. */
1282
1283 static void
1284 clear_step_over_info (void)
1285 {
1286 step_over_info.aspace = NULL;
1287 step_over_info.address = 0;
1288 step_over_info.nonsteppable_watchpoint_p = 0;
1289 }
1290
1291 /* See infrun.h. */
1292
1293 int
1294 stepping_past_instruction_at (struct address_space *aspace,
1295 CORE_ADDR address)
1296 {
1297 return (step_over_info.aspace != NULL
1298 && breakpoint_address_match (aspace, address,
1299 step_over_info.aspace,
1300 step_over_info.address));
1301 }
1302
1303 /* See infrun.h. */
1304
1305 int
1306 stepping_past_nonsteppable_watchpoint (void)
1307 {
1308 return step_over_info.nonsteppable_watchpoint_p;
1309 }
1310
1311 /* Returns true if step-over info is valid. */
1312
1313 static int
1314 step_over_info_valid_p (void)
1315 {
1316 return (step_over_info.aspace != NULL
1317 || stepping_past_nonsteppable_watchpoint ());
1318 }
1319
1320 \f
1321 /* Displaced stepping. */
1322
1323 /* In non-stop debugging mode, we must take special care to manage
1324 breakpoints properly; in particular, the traditional strategy for
1325 stepping a thread past a breakpoint it has hit is unsuitable.
1326 'Displaced stepping' is a tactic for stepping one thread past a
1327 breakpoint it has hit while ensuring that other threads running
1328 concurrently will hit the breakpoint as they should.
1329
1330 The traditional way to step a thread T off a breakpoint in a
1331 multi-threaded program in all-stop mode is as follows:
1332
1333 a0) Initially, all threads are stopped, and breakpoints are not
1334 inserted.
1335 a1) We single-step T, leaving breakpoints uninserted.
1336 a2) We insert breakpoints, and resume all threads.
1337
1338 In non-stop debugging, however, this strategy is unsuitable: we
1339 don't want to have to stop all threads in the system in order to
1340 continue or step T past a breakpoint. Instead, we use displaced
1341 stepping:
1342
1343 n0) Initially, T is stopped, other threads are running, and
1344 breakpoints are inserted.
1345 n1) We copy the instruction "under" the breakpoint to a separate
1346 location, outside the main code stream, making any adjustments
1347 to the instruction, register, and memory state as directed by
1348 T's architecture.
1349 n2) We single-step T over the instruction at its new location.
1350 n3) We adjust the resulting register and memory state as directed
1351 by T's architecture. This includes resetting T's PC to point
1352 back into the main instruction stream.
1353 n4) We resume T.
1354
1355 This approach depends on the following gdbarch methods:
1356
1357 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1358 indicate where to copy the instruction, and how much space must
1359 be reserved there. We use these in step n1.
1360
1361 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1362 address, and makes any necessary adjustments to the instruction,
1363 register contents, and memory. We use this in step n1.
1364
1365 - gdbarch_displaced_step_fixup adjusts registers and memory after
1366 we have successfuly single-stepped the instruction, to yield the
1367 same effect the instruction would have had if we had executed it
1368 at its original address. We use this in step n3.
1369
1370 - gdbarch_displaced_step_free_closure provides cleanup.
1371
1372 The gdbarch_displaced_step_copy_insn and
1373 gdbarch_displaced_step_fixup functions must be written so that
1374 copying an instruction with gdbarch_displaced_step_copy_insn,
1375 single-stepping across the copied instruction, and then applying
1376 gdbarch_displaced_insn_fixup should have the same effects on the
1377 thread's memory and registers as stepping the instruction in place
1378 would have. Exactly which responsibilities fall to the copy and
1379 which fall to the fixup is up to the author of those functions.
1380
1381 See the comments in gdbarch.sh for details.
1382
1383 Note that displaced stepping and software single-step cannot
1384 currently be used in combination, although with some care I think
1385 they could be made to. Software single-step works by placing
1386 breakpoints on all possible subsequent instructions; if the
1387 displaced instruction is a PC-relative jump, those breakpoints
1388 could fall in very strange places --- on pages that aren't
1389 executable, or at addresses that are not proper instruction
1390 boundaries. (We do generally let other threads run while we wait
1391 to hit the software single-step breakpoint, and they might
1392 encounter such a corrupted instruction.) One way to work around
1393 this would be to have gdbarch_displaced_step_copy_insn fully
1394 simulate the effect of PC-relative instructions (and return NULL)
1395 on architectures that use software single-stepping.
1396
1397 In non-stop mode, we can have independent and simultaneous step
1398 requests, so more than one thread may need to simultaneously step
1399 over a breakpoint. The current implementation assumes there is
1400 only one scratch space per process. In this case, we have to
1401 serialize access to the scratch space. If thread A wants to step
1402 over a breakpoint, but we are currently waiting for some other
1403 thread to complete a displaced step, we leave thread A stopped and
1404 place it in the displaced_step_request_queue. Whenever a displaced
1405 step finishes, we pick the next thread in the queue and start a new
1406 displaced step operation on it. See displaced_step_prepare and
1407 displaced_step_fixup for details. */
1408
1409 struct displaced_step_request
1410 {
1411 ptid_t ptid;
1412 struct displaced_step_request *next;
1413 };
1414
1415 /* Per-inferior displaced stepping state. */
1416 struct displaced_step_inferior_state
1417 {
1418 /* Pointer to next in linked list. */
1419 struct displaced_step_inferior_state *next;
1420
1421 /* The process this displaced step state refers to. */
1422 int pid;
1423
1424 /* A queue of pending displaced stepping requests. One entry per
1425 thread that needs to do a displaced step. */
1426 struct displaced_step_request *step_request_queue;
1427
1428 /* If this is not null_ptid, this is the thread carrying out a
1429 displaced single-step in process PID. This thread's state will
1430 require fixing up once it has completed its step. */
1431 ptid_t step_ptid;
1432
1433 /* The architecture the thread had when we stepped it. */
1434 struct gdbarch *step_gdbarch;
1435
1436 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1437 for post-step cleanup. */
1438 struct displaced_step_closure *step_closure;
1439
1440 /* The address of the original instruction, and the copy we
1441 made. */
1442 CORE_ADDR step_original, step_copy;
1443
1444 /* Saved contents of copy area. */
1445 gdb_byte *step_saved_copy;
1446 };
1447
1448 /* The list of states of processes involved in displaced stepping
1449 presently. */
1450 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1451
1452 /* Get the displaced stepping state of process PID. */
1453
1454 static struct displaced_step_inferior_state *
1455 get_displaced_stepping_state (int pid)
1456 {
1457 struct displaced_step_inferior_state *state;
1458
1459 for (state = displaced_step_inferior_states;
1460 state != NULL;
1461 state = state->next)
1462 if (state->pid == pid)
1463 return state;
1464
1465 return NULL;
1466 }
1467
1468 /* Return true if process PID has a thread doing a displaced step. */
1469
1470 static int
1471 displaced_step_in_progress (int pid)
1472 {
1473 struct displaced_step_inferior_state *displaced;
1474
1475 displaced = get_displaced_stepping_state (pid);
1476 if (displaced != NULL && !ptid_equal (displaced->step_ptid, null_ptid))
1477 return 1;
1478
1479 return 0;
1480 }
1481
1482 /* Add a new displaced stepping state for process PID to the displaced
1483 stepping state list, or return a pointer to an already existing
1484 entry, if it already exists. Never returns NULL. */
1485
1486 static struct displaced_step_inferior_state *
1487 add_displaced_stepping_state (int pid)
1488 {
1489 struct displaced_step_inferior_state *state;
1490
1491 for (state = displaced_step_inferior_states;
1492 state != NULL;
1493 state = state->next)
1494 if (state->pid == pid)
1495 return state;
1496
1497 state = xcalloc (1, sizeof (*state));
1498 state->pid = pid;
1499 state->next = displaced_step_inferior_states;
1500 displaced_step_inferior_states = state;
1501
1502 return state;
1503 }
1504
1505 /* If inferior is in displaced stepping, and ADDR equals to starting address
1506 of copy area, return corresponding displaced_step_closure. Otherwise,
1507 return NULL. */
1508
1509 struct displaced_step_closure*
1510 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1511 {
1512 struct displaced_step_inferior_state *displaced
1513 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1514
1515 /* If checking the mode of displaced instruction in copy area. */
1516 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1517 && (displaced->step_copy == addr))
1518 return displaced->step_closure;
1519
1520 return NULL;
1521 }
1522
1523 /* Remove the displaced stepping state of process PID. */
1524
1525 static void
1526 remove_displaced_stepping_state (int pid)
1527 {
1528 struct displaced_step_inferior_state *it, **prev_next_p;
1529
1530 gdb_assert (pid != 0);
1531
1532 it = displaced_step_inferior_states;
1533 prev_next_p = &displaced_step_inferior_states;
1534 while (it)
1535 {
1536 if (it->pid == pid)
1537 {
1538 *prev_next_p = it->next;
1539 xfree (it);
1540 return;
1541 }
1542
1543 prev_next_p = &it->next;
1544 it = *prev_next_p;
1545 }
1546 }
1547
1548 static void
1549 infrun_inferior_exit (struct inferior *inf)
1550 {
1551 remove_displaced_stepping_state (inf->pid);
1552 }
1553
1554 /* If ON, and the architecture supports it, GDB will use displaced
1555 stepping to step over breakpoints. If OFF, or if the architecture
1556 doesn't support it, GDB will instead use the traditional
1557 hold-and-step approach. If AUTO (which is the default), GDB will
1558 decide which technique to use to step over breakpoints depending on
1559 which of all-stop or non-stop mode is active --- displaced stepping
1560 in non-stop mode; hold-and-step in all-stop mode. */
1561
1562 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1563
1564 static void
1565 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1566 struct cmd_list_element *c,
1567 const char *value)
1568 {
1569 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1570 fprintf_filtered (file,
1571 _("Debugger's willingness to use displaced stepping "
1572 "to step over breakpoints is %s (currently %s).\n"),
1573 value, non_stop ? "on" : "off");
1574 else
1575 fprintf_filtered (file,
1576 _("Debugger's willingness to use displaced stepping "
1577 "to step over breakpoints is %s.\n"), value);
1578 }
1579
1580 /* Return non-zero if displaced stepping can/should be used to step
1581 over breakpoints. */
1582
1583 static int
1584 use_displaced_stepping (struct gdbarch *gdbarch)
1585 {
1586 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1587 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1588 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1589 && find_record_target () == NULL);
1590 }
1591
1592 /* Clean out any stray displaced stepping state. */
1593 static void
1594 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1595 {
1596 /* Indicate that there is no cleanup pending. */
1597 displaced->step_ptid = null_ptid;
1598
1599 if (displaced->step_closure)
1600 {
1601 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1602 displaced->step_closure);
1603 displaced->step_closure = NULL;
1604 }
1605 }
1606
1607 static void
1608 displaced_step_clear_cleanup (void *arg)
1609 {
1610 struct displaced_step_inferior_state *state = arg;
1611
1612 displaced_step_clear (state);
1613 }
1614
1615 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1616 void
1617 displaced_step_dump_bytes (struct ui_file *file,
1618 const gdb_byte *buf,
1619 size_t len)
1620 {
1621 int i;
1622
1623 for (i = 0; i < len; i++)
1624 fprintf_unfiltered (file, "%02x ", buf[i]);
1625 fputs_unfiltered ("\n", file);
1626 }
1627
1628 /* Prepare to single-step, using displaced stepping.
1629
1630 Note that we cannot use displaced stepping when we have a signal to
1631 deliver. If we have a signal to deliver and an instruction to step
1632 over, then after the step, there will be no indication from the
1633 target whether the thread entered a signal handler or ignored the
1634 signal and stepped over the instruction successfully --- both cases
1635 result in a simple SIGTRAP. In the first case we mustn't do a
1636 fixup, and in the second case we must --- but we can't tell which.
1637 Comments in the code for 'random signals' in handle_inferior_event
1638 explain how we handle this case instead.
1639
1640 Returns 1 if preparing was successful -- this thread is going to be
1641 stepped now; or 0 if displaced stepping this thread got queued. */
1642 static int
1643 displaced_step_prepare (ptid_t ptid)
1644 {
1645 struct cleanup *old_cleanups, *ignore_cleanups;
1646 struct thread_info *tp = find_thread_ptid (ptid);
1647 struct regcache *regcache = get_thread_regcache (ptid);
1648 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1649 CORE_ADDR original, copy;
1650 ULONGEST len;
1651 struct displaced_step_closure *closure;
1652 struct displaced_step_inferior_state *displaced;
1653 int status;
1654
1655 /* We should never reach this function if the architecture does not
1656 support displaced stepping. */
1657 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1658
1659 /* Disable range stepping while executing in the scratch pad. We
1660 want a single-step even if executing the displaced instruction in
1661 the scratch buffer lands within the stepping range (e.g., a
1662 jump/branch). */
1663 tp->control.may_range_step = 0;
1664
1665 /* We have to displaced step one thread at a time, as we only have
1666 access to a single scratch space per inferior. */
1667
1668 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1669
1670 if (!ptid_equal (displaced->step_ptid, null_ptid))
1671 {
1672 /* Already waiting for a displaced step to finish. Defer this
1673 request and place in queue. */
1674 struct displaced_step_request *req, *new_req;
1675
1676 if (debug_displaced)
1677 fprintf_unfiltered (gdb_stdlog,
1678 "displaced: defering step of %s\n",
1679 target_pid_to_str (ptid));
1680
1681 new_req = xmalloc (sizeof (*new_req));
1682 new_req->ptid = ptid;
1683 new_req->next = NULL;
1684
1685 if (displaced->step_request_queue)
1686 {
1687 for (req = displaced->step_request_queue;
1688 req && req->next;
1689 req = req->next)
1690 ;
1691 req->next = new_req;
1692 }
1693 else
1694 displaced->step_request_queue = new_req;
1695
1696 return 0;
1697 }
1698 else
1699 {
1700 if (debug_displaced)
1701 fprintf_unfiltered (gdb_stdlog,
1702 "displaced: stepping %s now\n",
1703 target_pid_to_str (ptid));
1704 }
1705
1706 displaced_step_clear (displaced);
1707
1708 old_cleanups = save_inferior_ptid ();
1709 inferior_ptid = ptid;
1710
1711 original = regcache_read_pc (regcache);
1712
1713 copy = gdbarch_displaced_step_location (gdbarch);
1714 len = gdbarch_max_insn_length (gdbarch);
1715
1716 /* Save the original contents of the copy area. */
1717 displaced->step_saved_copy = xmalloc (len);
1718 ignore_cleanups = make_cleanup (free_current_contents,
1719 &displaced->step_saved_copy);
1720 status = target_read_memory (copy, displaced->step_saved_copy, len);
1721 if (status != 0)
1722 throw_error (MEMORY_ERROR,
1723 _("Error accessing memory address %s (%s) for "
1724 "displaced-stepping scratch space."),
1725 paddress (gdbarch, copy), safe_strerror (status));
1726 if (debug_displaced)
1727 {
1728 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1729 paddress (gdbarch, copy));
1730 displaced_step_dump_bytes (gdb_stdlog,
1731 displaced->step_saved_copy,
1732 len);
1733 };
1734
1735 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1736 original, copy, regcache);
1737
1738 /* We don't support the fully-simulated case at present. */
1739 gdb_assert (closure);
1740
1741 /* Save the information we need to fix things up if the step
1742 succeeds. */
1743 displaced->step_ptid = ptid;
1744 displaced->step_gdbarch = gdbarch;
1745 displaced->step_closure = closure;
1746 displaced->step_original = original;
1747 displaced->step_copy = copy;
1748
1749 make_cleanup (displaced_step_clear_cleanup, displaced);
1750
1751 /* Resume execution at the copy. */
1752 regcache_write_pc (regcache, copy);
1753
1754 discard_cleanups (ignore_cleanups);
1755
1756 do_cleanups (old_cleanups);
1757
1758 if (debug_displaced)
1759 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1760 paddress (gdbarch, copy));
1761
1762 return 1;
1763 }
1764
1765 static void
1766 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1767 const gdb_byte *myaddr, int len)
1768 {
1769 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1770
1771 inferior_ptid = ptid;
1772 write_memory (memaddr, myaddr, len);
1773 do_cleanups (ptid_cleanup);
1774 }
1775
1776 /* Restore the contents of the copy area for thread PTID. */
1777
1778 static void
1779 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1780 ptid_t ptid)
1781 {
1782 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1783
1784 write_memory_ptid (ptid, displaced->step_copy,
1785 displaced->step_saved_copy, len);
1786 if (debug_displaced)
1787 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1788 target_pid_to_str (ptid),
1789 paddress (displaced->step_gdbarch,
1790 displaced->step_copy));
1791 }
1792
1793 static void
1794 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1795 {
1796 struct cleanup *old_cleanups;
1797 struct displaced_step_inferior_state *displaced
1798 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1799
1800 /* Was any thread of this process doing a displaced step? */
1801 if (displaced == NULL)
1802 return;
1803
1804 /* Was this event for the pid we displaced? */
1805 if (ptid_equal (displaced->step_ptid, null_ptid)
1806 || ! ptid_equal (displaced->step_ptid, event_ptid))
1807 return;
1808
1809 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1810
1811 displaced_step_restore (displaced, displaced->step_ptid);
1812
1813 /* Fixup may need to read memory/registers. Switch to the thread
1814 that we're fixing up. Also, target_stopped_by_watchpoint checks
1815 the current thread. */
1816 switch_to_thread (event_ptid);
1817
1818 /* Did the instruction complete successfully? */
1819 if (signal == GDB_SIGNAL_TRAP
1820 && !(target_stopped_by_watchpoint ()
1821 && (gdbarch_have_nonsteppable_watchpoint (displaced->step_gdbarch)
1822 || target_have_steppable_watchpoint)))
1823 {
1824 /* Fix up the resulting state. */
1825 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1826 displaced->step_closure,
1827 displaced->step_original,
1828 displaced->step_copy,
1829 get_thread_regcache (displaced->step_ptid));
1830 }
1831 else
1832 {
1833 /* Since the instruction didn't complete, all we can do is
1834 relocate the PC. */
1835 struct regcache *regcache = get_thread_regcache (event_ptid);
1836 CORE_ADDR pc = regcache_read_pc (regcache);
1837
1838 pc = displaced->step_original + (pc - displaced->step_copy);
1839 regcache_write_pc (regcache, pc);
1840 }
1841
1842 do_cleanups (old_cleanups);
1843
1844 displaced->step_ptid = null_ptid;
1845
1846 /* Are there any pending displaced stepping requests? If so, run
1847 one now. Leave the state object around, since we're likely to
1848 need it again soon. */
1849 while (displaced->step_request_queue)
1850 {
1851 struct displaced_step_request *head;
1852 ptid_t ptid;
1853 struct regcache *regcache;
1854 struct gdbarch *gdbarch;
1855 CORE_ADDR actual_pc;
1856 struct address_space *aspace;
1857
1858 head = displaced->step_request_queue;
1859 ptid = head->ptid;
1860 displaced->step_request_queue = head->next;
1861 xfree (head);
1862
1863 context_switch (ptid);
1864
1865 regcache = get_thread_regcache (ptid);
1866 actual_pc = regcache_read_pc (regcache);
1867 aspace = get_regcache_aspace (regcache);
1868
1869 if (breakpoint_here_p (aspace, actual_pc))
1870 {
1871 if (debug_displaced)
1872 fprintf_unfiltered (gdb_stdlog,
1873 "displaced: stepping queued %s now\n",
1874 target_pid_to_str (ptid));
1875
1876 displaced_step_prepare (ptid);
1877
1878 gdbarch = get_regcache_arch (regcache);
1879
1880 if (debug_displaced)
1881 {
1882 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1883 gdb_byte buf[4];
1884
1885 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1886 paddress (gdbarch, actual_pc));
1887 read_memory (actual_pc, buf, sizeof (buf));
1888 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1889 }
1890
1891 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1892 displaced->step_closure))
1893 target_resume (ptid, 1, GDB_SIGNAL_0);
1894 else
1895 target_resume (ptid, 0, GDB_SIGNAL_0);
1896
1897 /* Done, we're stepping a thread. */
1898 break;
1899 }
1900 else
1901 {
1902 int step;
1903 struct thread_info *tp = inferior_thread ();
1904
1905 /* The breakpoint we were sitting under has since been
1906 removed. */
1907 tp->control.trap_expected = 0;
1908
1909 /* Go back to what we were trying to do. */
1910 step = currently_stepping (tp);
1911
1912 if (debug_displaced)
1913 fprintf_unfiltered (gdb_stdlog,
1914 "displaced: breakpoint is gone: %s, step(%d)\n",
1915 target_pid_to_str (tp->ptid), step);
1916
1917 target_resume (ptid, step, GDB_SIGNAL_0);
1918 tp->suspend.stop_signal = GDB_SIGNAL_0;
1919
1920 /* This request was discarded. See if there's any other
1921 thread waiting for its turn. */
1922 }
1923 }
1924 }
1925
1926 /* Update global variables holding ptids to hold NEW_PTID if they were
1927 holding OLD_PTID. */
1928 static void
1929 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1930 {
1931 struct displaced_step_request *it;
1932 struct displaced_step_inferior_state *displaced;
1933
1934 if (ptid_equal (inferior_ptid, old_ptid))
1935 inferior_ptid = new_ptid;
1936
1937 for (displaced = displaced_step_inferior_states;
1938 displaced;
1939 displaced = displaced->next)
1940 {
1941 if (ptid_equal (displaced->step_ptid, old_ptid))
1942 displaced->step_ptid = new_ptid;
1943
1944 for (it = displaced->step_request_queue; it; it = it->next)
1945 if (ptid_equal (it->ptid, old_ptid))
1946 it->ptid = new_ptid;
1947 }
1948 }
1949
1950 \f
1951 /* Resuming. */
1952
1953 /* Things to clean up if we QUIT out of resume (). */
1954 static void
1955 resume_cleanups (void *ignore)
1956 {
1957 if (!ptid_equal (inferior_ptid, null_ptid))
1958 delete_single_step_breakpoints (inferior_thread ());
1959
1960 normal_stop ();
1961 }
1962
1963 static const char schedlock_off[] = "off";
1964 static const char schedlock_on[] = "on";
1965 static const char schedlock_step[] = "step";
1966 static const char *const scheduler_enums[] = {
1967 schedlock_off,
1968 schedlock_on,
1969 schedlock_step,
1970 NULL
1971 };
1972 static const char *scheduler_mode = schedlock_off;
1973 static void
1974 show_scheduler_mode (struct ui_file *file, int from_tty,
1975 struct cmd_list_element *c, const char *value)
1976 {
1977 fprintf_filtered (file,
1978 _("Mode for locking scheduler "
1979 "during execution is \"%s\".\n"),
1980 value);
1981 }
1982
1983 static void
1984 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1985 {
1986 if (!target_can_lock_scheduler)
1987 {
1988 scheduler_mode = schedlock_off;
1989 error (_("Target '%s' cannot support this command."), target_shortname);
1990 }
1991 }
1992
1993 /* True if execution commands resume all threads of all processes by
1994 default; otherwise, resume only threads of the current inferior
1995 process. */
1996 int sched_multi = 0;
1997
1998 /* Try to setup for software single stepping over the specified location.
1999 Return 1 if target_resume() should use hardware single step.
2000
2001 GDBARCH the current gdbarch.
2002 PC the location to step over. */
2003
2004 static int
2005 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2006 {
2007 int hw_step = 1;
2008
2009 if (execution_direction == EXEC_FORWARD
2010 && gdbarch_software_single_step_p (gdbarch)
2011 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
2012 {
2013 hw_step = 0;
2014 }
2015 return hw_step;
2016 }
2017
2018 /* See infrun.h. */
2019
2020 ptid_t
2021 user_visible_resume_ptid (int step)
2022 {
2023 ptid_t resume_ptid;
2024
2025 if (non_stop)
2026 {
2027 /* With non-stop mode on, threads are always handled
2028 individually. */
2029 resume_ptid = inferior_ptid;
2030 }
2031 else if ((scheduler_mode == schedlock_on)
2032 || (scheduler_mode == schedlock_step && step))
2033 {
2034 /* User-settable 'scheduler' mode requires solo thread
2035 resume. */
2036 resume_ptid = inferior_ptid;
2037 }
2038 else if (!sched_multi && target_supports_multi_process ())
2039 {
2040 /* Resume all threads of the current process (and none of other
2041 processes). */
2042 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
2043 }
2044 else
2045 {
2046 /* Resume all threads of all processes. */
2047 resume_ptid = RESUME_ALL;
2048 }
2049
2050 return resume_ptid;
2051 }
2052
2053 /* Wrapper for target_resume, that handles infrun-specific
2054 bookkeeping. */
2055
2056 static void
2057 do_target_resume (ptid_t resume_ptid, int step, enum gdb_signal sig)
2058 {
2059 struct thread_info *tp = inferior_thread ();
2060
2061 /* Install inferior's terminal modes. */
2062 target_terminal_inferior ();
2063
2064 /* Avoid confusing the next resume, if the next stop/resume
2065 happens to apply to another thread. */
2066 tp->suspend.stop_signal = GDB_SIGNAL_0;
2067
2068 /* Advise target which signals may be handled silently.
2069
2070 If we have removed breakpoints because we are stepping over one
2071 in-line (in any thread), we need to receive all signals to avoid
2072 accidentally skipping a breakpoint during execution of a signal
2073 handler.
2074
2075 Likewise if we're displaced stepping, otherwise a trap for a
2076 breakpoint in a signal handler might be confused with the
2077 displaced step finishing. We don't make the displaced_step_fixup
2078 step distinguish the cases instead, because:
2079
2080 - a backtrace while stopped in the signal handler would show the
2081 scratch pad as frame older than the signal handler, instead of
2082 the real mainline code.
2083
2084 - when the thread is later resumed, the signal handler would
2085 return to the scratch pad area, which would no longer be
2086 valid. */
2087 if (step_over_info_valid_p ()
2088 || displaced_step_in_progress (ptid_get_pid (tp->ptid)))
2089 target_pass_signals (0, NULL);
2090 else
2091 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2092
2093 target_resume (resume_ptid, step, sig);
2094 }
2095
2096 /* Resume the inferior, but allow a QUIT. This is useful if the user
2097 wants to interrupt some lengthy single-stepping operation
2098 (for child processes, the SIGINT goes to the inferior, and so
2099 we get a SIGINT random_signal, but for remote debugging and perhaps
2100 other targets, that's not true).
2101
2102 SIG is the signal to give the inferior (zero for none). */
2103 void
2104 resume (enum gdb_signal sig)
2105 {
2106 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
2107 struct regcache *regcache = get_current_regcache ();
2108 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2109 struct thread_info *tp = inferior_thread ();
2110 CORE_ADDR pc = regcache_read_pc (regcache);
2111 struct address_space *aspace = get_regcache_aspace (regcache);
2112 ptid_t resume_ptid;
2113 /* This represents the user's step vs continue request. When
2114 deciding whether "set scheduler-locking step" applies, it's the
2115 user's intention that counts. */
2116 const int user_step = tp->control.stepping_command;
2117 /* This represents what we'll actually request the target to do.
2118 This can decay from a step to a continue, if e.g., we need to
2119 implement single-stepping with breakpoints (software
2120 single-step). */
2121 int step;
2122
2123 tp->stepped_breakpoint = 0;
2124
2125 QUIT;
2126
2127 /* Depends on stepped_breakpoint. */
2128 step = currently_stepping (tp);
2129
2130 if (current_inferior ()->waiting_for_vfork_done)
2131 {
2132 /* Don't try to single-step a vfork parent that is waiting for
2133 the child to get out of the shared memory region (by exec'ing
2134 or exiting). This is particularly important on software
2135 single-step archs, as the child process would trip on the
2136 software single step breakpoint inserted for the parent
2137 process. Since the parent will not actually execute any
2138 instruction until the child is out of the shared region (such
2139 are vfork's semantics), it is safe to simply continue it.
2140 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2141 the parent, and tell it to `keep_going', which automatically
2142 re-sets it stepping. */
2143 if (debug_infrun)
2144 fprintf_unfiltered (gdb_stdlog,
2145 "infrun: resume : clear step\n");
2146 step = 0;
2147 }
2148
2149 if (debug_infrun)
2150 fprintf_unfiltered (gdb_stdlog,
2151 "infrun: resume (step=%d, signal=%s), "
2152 "trap_expected=%d, current thread [%s] at %s\n",
2153 step, gdb_signal_to_symbol_string (sig),
2154 tp->control.trap_expected,
2155 target_pid_to_str (inferior_ptid),
2156 paddress (gdbarch, pc));
2157
2158 /* Normally, by the time we reach `resume', the breakpoints are either
2159 removed or inserted, as appropriate. The exception is if we're sitting
2160 at a permanent breakpoint; we need to step over it, but permanent
2161 breakpoints can't be removed. So we have to test for it here. */
2162 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2163 {
2164 if (sig != GDB_SIGNAL_0)
2165 {
2166 /* We have a signal to pass to the inferior. The resume
2167 may, or may not take us to the signal handler. If this
2168 is a step, we'll need to stop in the signal handler, if
2169 there's one, (if the target supports stepping into
2170 handlers), or in the next mainline instruction, if
2171 there's no handler. If this is a continue, we need to be
2172 sure to run the handler with all breakpoints inserted.
2173 In all cases, set a breakpoint at the current address
2174 (where the handler returns to), and once that breakpoint
2175 is hit, resume skipping the permanent breakpoint. If
2176 that breakpoint isn't hit, then we've stepped into the
2177 signal handler (or hit some other event). We'll delete
2178 the step-resume breakpoint then. */
2179
2180 if (debug_infrun)
2181 fprintf_unfiltered (gdb_stdlog,
2182 "infrun: resume: skipping permanent breakpoint, "
2183 "deliver signal first\n");
2184
2185 clear_step_over_info ();
2186 tp->control.trap_expected = 0;
2187
2188 if (tp->control.step_resume_breakpoint == NULL)
2189 {
2190 /* Set a "high-priority" step-resume, as we don't want
2191 user breakpoints at PC to trigger (again) when this
2192 hits. */
2193 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2194 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2195
2196 tp->step_after_step_resume_breakpoint = step;
2197 }
2198
2199 insert_breakpoints ();
2200 }
2201 else
2202 {
2203 /* There's no signal to pass, we can go ahead and skip the
2204 permanent breakpoint manually. */
2205 if (debug_infrun)
2206 fprintf_unfiltered (gdb_stdlog,
2207 "infrun: resume: skipping permanent breakpoint\n");
2208 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2209 /* Update pc to reflect the new address from which we will
2210 execute instructions. */
2211 pc = regcache_read_pc (regcache);
2212
2213 if (step)
2214 {
2215 /* We've already advanced the PC, so the stepping part
2216 is done. Now we need to arrange for a trap to be
2217 reported to handle_inferior_event. Set a breakpoint
2218 at the current PC, and run to it. Don't update
2219 prev_pc, because if we end in
2220 switch_back_to_stepped_thread, we want the "expected
2221 thread advanced also" branch to be taken. IOW, we
2222 don't want this thread to step further from PC
2223 (overstep). */
2224 gdb_assert (!step_over_info_valid_p ());
2225 insert_single_step_breakpoint (gdbarch, aspace, pc);
2226 insert_breakpoints ();
2227
2228 resume_ptid = user_visible_resume_ptid (user_step);
2229 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
2230 discard_cleanups (old_cleanups);
2231 return;
2232 }
2233 }
2234 }
2235
2236 /* If we have a breakpoint to step over, make sure to do a single
2237 step only. Same if we have software watchpoints. */
2238 if (tp->control.trap_expected || bpstat_should_step ())
2239 tp->control.may_range_step = 0;
2240
2241 /* If enabled, step over breakpoints by executing a copy of the
2242 instruction at a different address.
2243
2244 We can't use displaced stepping when we have a signal to deliver;
2245 the comments for displaced_step_prepare explain why. The
2246 comments in the handle_inferior event for dealing with 'random
2247 signals' explain what we do instead.
2248
2249 We can't use displaced stepping when we are waiting for vfork_done
2250 event, displaced stepping breaks the vfork child similarly as single
2251 step software breakpoint. */
2252 if (use_displaced_stepping (gdbarch)
2253 && tp->control.trap_expected
2254 && !step_over_info_valid_p ()
2255 && sig == GDB_SIGNAL_0
2256 && !current_inferior ()->waiting_for_vfork_done)
2257 {
2258 struct displaced_step_inferior_state *displaced;
2259
2260 if (!displaced_step_prepare (inferior_ptid))
2261 {
2262 /* Got placed in displaced stepping queue. Will be resumed
2263 later when all the currently queued displaced stepping
2264 requests finish. The thread is not executing at this
2265 point, and the call to set_executing will be made later.
2266 But we need to call set_running here, since from the
2267 user/frontend's point of view, threads were set running.
2268 Unless we're calling an inferior function, as in that
2269 case we pretend the inferior doesn't run at all. */
2270 if (!tp->control.in_infcall)
2271 set_running (user_visible_resume_ptid (user_step), 1);
2272 discard_cleanups (old_cleanups);
2273 return;
2274 }
2275
2276 /* Update pc to reflect the new address from which we will execute
2277 instructions due to displaced stepping. */
2278 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
2279
2280 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
2281 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
2282 displaced->step_closure);
2283 }
2284
2285 /* Do we need to do it the hard way, w/temp breakpoints? */
2286 else if (step)
2287 step = maybe_software_singlestep (gdbarch, pc);
2288
2289 /* Currently, our software single-step implementation leads to different
2290 results than hardware single-stepping in one situation: when stepping
2291 into delivering a signal which has an associated signal handler,
2292 hardware single-step will stop at the first instruction of the handler,
2293 while software single-step will simply skip execution of the handler.
2294
2295 For now, this difference in behavior is accepted since there is no
2296 easy way to actually implement single-stepping into a signal handler
2297 without kernel support.
2298
2299 However, there is one scenario where this difference leads to follow-on
2300 problems: if we're stepping off a breakpoint by removing all breakpoints
2301 and then single-stepping. In this case, the software single-step
2302 behavior means that even if there is a *breakpoint* in the signal
2303 handler, GDB still would not stop.
2304
2305 Fortunately, we can at least fix this particular issue. We detect
2306 here the case where we are about to deliver a signal while software
2307 single-stepping with breakpoints removed. In this situation, we
2308 revert the decisions to remove all breakpoints and insert single-
2309 step breakpoints, and instead we install a step-resume breakpoint
2310 at the current address, deliver the signal without stepping, and
2311 once we arrive back at the step-resume breakpoint, actually step
2312 over the breakpoint we originally wanted to step over. */
2313 if (thread_has_single_step_breakpoints_set (tp)
2314 && sig != GDB_SIGNAL_0
2315 && step_over_info_valid_p ())
2316 {
2317 /* If we have nested signals or a pending signal is delivered
2318 immediately after a handler returns, might might already have
2319 a step-resume breakpoint set on the earlier handler. We cannot
2320 set another step-resume breakpoint; just continue on until the
2321 original breakpoint is hit. */
2322 if (tp->control.step_resume_breakpoint == NULL)
2323 {
2324 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2325 tp->step_after_step_resume_breakpoint = 1;
2326 }
2327
2328 delete_single_step_breakpoints (tp);
2329
2330 clear_step_over_info ();
2331 tp->control.trap_expected = 0;
2332
2333 insert_breakpoints ();
2334 }
2335
2336 /* If STEP is set, it's a request to use hardware stepping
2337 facilities. But in that case, we should never
2338 use singlestep breakpoint. */
2339 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2340
2341 /* Decide the set of threads to ask the target to resume. Start
2342 by assuming everything will be resumed, than narrow the set
2343 by applying increasingly restricting conditions. */
2344 resume_ptid = user_visible_resume_ptid (user_step);
2345
2346 /* Even if RESUME_PTID is a wildcard, and we end up resuming less
2347 (e.g., we might need to step over a breakpoint), from the
2348 user/frontend's point of view, all threads in RESUME_PTID are now
2349 running. Unless we're calling an inferior function, as in that
2350 case pretend we inferior doesn't run at all. */
2351 if (!tp->control.in_infcall)
2352 set_running (resume_ptid, 1);
2353
2354 /* Maybe resume a single thread after all. */
2355 if ((step || thread_has_single_step_breakpoints_set (tp))
2356 && tp->control.trap_expected)
2357 {
2358 /* We're allowing a thread to run past a breakpoint it has
2359 hit, by single-stepping the thread with the breakpoint
2360 removed. In which case, we need to single-step only this
2361 thread, and keep others stopped, as they can miss this
2362 breakpoint if allowed to run. */
2363 resume_ptid = inferior_ptid;
2364 }
2365
2366 if (execution_direction != EXEC_REVERSE
2367 && step && breakpoint_inserted_here_p (aspace, pc))
2368 {
2369 /* The only case we currently need to step a breakpoint
2370 instruction is when we have a signal to deliver. See
2371 handle_signal_stop where we handle random signals that could
2372 take out us out of the stepping range. Normally, in that
2373 case we end up continuing (instead of stepping) over the
2374 signal handler with a breakpoint at PC, but there are cases
2375 where we should _always_ single-step, even if we have a
2376 step-resume breakpoint, like when a software watchpoint is
2377 set. Assuming single-stepping and delivering a signal at the
2378 same time would takes us to the signal handler, then we could
2379 have removed the breakpoint at PC to step over it. However,
2380 some hardware step targets (like e.g., Mac OS) can't step
2381 into signal handlers, and for those, we need to leave the
2382 breakpoint at PC inserted, as otherwise if the handler
2383 recurses and executes PC again, it'll miss the breakpoint.
2384 So we leave the breakpoint inserted anyway, but we need to
2385 record that we tried to step a breakpoint instruction, so
2386 that adjust_pc_after_break doesn't end up confused. */
2387 gdb_assert (sig != GDB_SIGNAL_0);
2388
2389 tp->stepped_breakpoint = 1;
2390
2391 /* Most targets can step a breakpoint instruction, thus
2392 executing it normally. But if this one cannot, just
2393 continue and we will hit it anyway. */
2394 if (gdbarch_cannot_step_breakpoint (gdbarch))
2395 step = 0;
2396 }
2397
2398 if (debug_displaced
2399 && use_displaced_stepping (gdbarch)
2400 && tp->control.trap_expected
2401 && !step_over_info_valid_p ())
2402 {
2403 struct regcache *resume_regcache = get_thread_regcache (tp->ptid);
2404 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
2405 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2406 gdb_byte buf[4];
2407
2408 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2409 paddress (resume_gdbarch, actual_pc));
2410 read_memory (actual_pc, buf, sizeof (buf));
2411 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2412 }
2413
2414 if (tp->control.may_range_step)
2415 {
2416 /* If we're resuming a thread with the PC out of the step
2417 range, then we're doing some nested/finer run control
2418 operation, like stepping the thread out of the dynamic
2419 linker or the displaced stepping scratch pad. We
2420 shouldn't have allowed a range step then. */
2421 gdb_assert (pc_in_thread_step_range (pc, tp));
2422 }
2423
2424 do_target_resume (resume_ptid, step, sig);
2425 discard_cleanups (old_cleanups);
2426 }
2427 \f
2428 /* Proceeding. */
2429
2430 /* Clear out all variables saying what to do when inferior is continued.
2431 First do this, then set the ones you want, then call `proceed'. */
2432
2433 static void
2434 clear_proceed_status_thread (struct thread_info *tp)
2435 {
2436 if (debug_infrun)
2437 fprintf_unfiltered (gdb_stdlog,
2438 "infrun: clear_proceed_status_thread (%s)\n",
2439 target_pid_to_str (tp->ptid));
2440
2441 /* If this signal should not be seen by program, give it zero.
2442 Used for debugging signals. */
2443 if (!signal_pass_state (tp->suspend.stop_signal))
2444 tp->suspend.stop_signal = GDB_SIGNAL_0;
2445
2446 tp->control.trap_expected = 0;
2447 tp->control.step_range_start = 0;
2448 tp->control.step_range_end = 0;
2449 tp->control.may_range_step = 0;
2450 tp->control.step_frame_id = null_frame_id;
2451 tp->control.step_stack_frame_id = null_frame_id;
2452 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2453 tp->control.step_start_function = NULL;
2454 tp->stop_requested = 0;
2455
2456 tp->control.stop_step = 0;
2457
2458 tp->control.proceed_to_finish = 0;
2459
2460 tp->control.command_interp = NULL;
2461 tp->control.stepping_command = 0;
2462
2463 /* Discard any remaining commands or status from previous stop. */
2464 bpstat_clear (&tp->control.stop_bpstat);
2465 }
2466
2467 void
2468 clear_proceed_status (int step)
2469 {
2470 if (!non_stop)
2471 {
2472 struct thread_info *tp;
2473 ptid_t resume_ptid;
2474
2475 resume_ptid = user_visible_resume_ptid (step);
2476
2477 /* In all-stop mode, delete the per-thread status of all threads
2478 we're about to resume, implicitly and explicitly. */
2479 ALL_NON_EXITED_THREADS (tp)
2480 {
2481 if (!ptid_match (tp->ptid, resume_ptid))
2482 continue;
2483 clear_proceed_status_thread (tp);
2484 }
2485 }
2486
2487 if (!ptid_equal (inferior_ptid, null_ptid))
2488 {
2489 struct inferior *inferior;
2490
2491 if (non_stop)
2492 {
2493 /* If in non-stop mode, only delete the per-thread status of
2494 the current thread. */
2495 clear_proceed_status_thread (inferior_thread ());
2496 }
2497
2498 inferior = current_inferior ();
2499 inferior->control.stop_soon = NO_STOP_QUIETLY;
2500 }
2501
2502 stop_after_trap = 0;
2503
2504 clear_step_over_info ();
2505
2506 observer_notify_about_to_proceed ();
2507
2508 if (stop_registers)
2509 {
2510 regcache_xfree (stop_registers);
2511 stop_registers = NULL;
2512 }
2513 }
2514
2515 /* Returns true if TP is still stopped at a breakpoint that needs
2516 stepping-over in order to make progress. If the breakpoint is gone
2517 meanwhile, we can skip the whole step-over dance. */
2518
2519 static int
2520 thread_still_needs_step_over (struct thread_info *tp)
2521 {
2522 if (tp->stepping_over_breakpoint)
2523 {
2524 struct regcache *regcache = get_thread_regcache (tp->ptid);
2525
2526 if (breakpoint_here_p (get_regcache_aspace (regcache),
2527 regcache_read_pc (regcache))
2528 == ordinary_breakpoint_here)
2529 return 1;
2530
2531 tp->stepping_over_breakpoint = 0;
2532 }
2533
2534 return 0;
2535 }
2536
2537 /* Returns true if scheduler locking applies. STEP indicates whether
2538 we're about to do a step/next-like command to a thread. */
2539
2540 static int
2541 schedlock_applies (struct thread_info *tp)
2542 {
2543 return (scheduler_mode == schedlock_on
2544 || (scheduler_mode == schedlock_step
2545 && tp->control.stepping_command));
2546 }
2547
2548 /* Look a thread other than EXCEPT that has previously reported a
2549 breakpoint event, and thus needs a step-over in order to make
2550 progress. Returns NULL is none is found. */
2551
2552 static struct thread_info *
2553 find_thread_needs_step_over (struct thread_info *except)
2554 {
2555 struct thread_info *tp, *current;
2556
2557 /* With non-stop mode on, threads are always handled individually. */
2558 gdb_assert (! non_stop);
2559
2560 current = inferior_thread ();
2561
2562 /* If scheduler locking applies, we can avoid iterating over all
2563 threads. */
2564 if (schedlock_applies (except))
2565 {
2566 if (except != current
2567 && thread_still_needs_step_over (current))
2568 return current;
2569
2570 return NULL;
2571 }
2572
2573 ALL_NON_EXITED_THREADS (tp)
2574 {
2575 /* Ignore the EXCEPT thread. */
2576 if (tp == except)
2577 continue;
2578 /* Ignore threads of processes we're not resuming. */
2579 if (!sched_multi
2580 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2581 continue;
2582
2583 if (thread_still_needs_step_over (tp))
2584 return tp;
2585 }
2586
2587 return NULL;
2588 }
2589
2590 /* Basic routine for continuing the program in various fashions.
2591
2592 ADDR is the address to resume at, or -1 for resume where stopped.
2593 SIGGNAL is the signal to give it, or 0 for none,
2594 or -1 for act according to how it stopped.
2595 STEP is nonzero if should trap after one instruction.
2596 -1 means return after that and print nothing.
2597 You should probably set various step_... variables
2598 before calling here, if you are stepping.
2599
2600 You should call clear_proceed_status before calling proceed. */
2601
2602 void
2603 proceed (CORE_ADDR addr, enum gdb_signal siggnal)
2604 {
2605 struct regcache *regcache;
2606 struct gdbarch *gdbarch;
2607 struct thread_info *tp;
2608 CORE_ADDR pc;
2609 struct address_space *aspace;
2610
2611 /* If we're stopped at a fork/vfork, follow the branch set by the
2612 "set follow-fork-mode" command; otherwise, we'll just proceed
2613 resuming the current thread. */
2614 if (!follow_fork ())
2615 {
2616 /* The target for some reason decided not to resume. */
2617 normal_stop ();
2618 if (target_can_async_p ())
2619 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2620 return;
2621 }
2622
2623 /* We'll update this if & when we switch to a new thread. */
2624 previous_inferior_ptid = inferior_ptid;
2625
2626 regcache = get_current_regcache ();
2627 gdbarch = get_regcache_arch (regcache);
2628 aspace = get_regcache_aspace (regcache);
2629 pc = regcache_read_pc (regcache);
2630 tp = inferior_thread ();
2631
2632 /* Fill in with reasonable starting values. */
2633 init_thread_stepping_state (tp);
2634
2635 if (addr == (CORE_ADDR) -1)
2636 {
2637 if (pc == stop_pc
2638 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
2639 && execution_direction != EXEC_REVERSE)
2640 /* There is a breakpoint at the address we will resume at,
2641 step one instruction before inserting breakpoints so that
2642 we do not stop right away (and report a second hit at this
2643 breakpoint).
2644
2645 Note, we don't do this in reverse, because we won't
2646 actually be executing the breakpoint insn anyway.
2647 We'll be (un-)executing the previous instruction. */
2648 tp->stepping_over_breakpoint = 1;
2649 else if (gdbarch_single_step_through_delay_p (gdbarch)
2650 && gdbarch_single_step_through_delay (gdbarch,
2651 get_current_frame ()))
2652 /* We stepped onto an instruction that needs to be stepped
2653 again before re-inserting the breakpoint, do so. */
2654 tp->stepping_over_breakpoint = 1;
2655 }
2656 else
2657 {
2658 regcache_write_pc (regcache, addr);
2659 }
2660
2661 if (siggnal != GDB_SIGNAL_DEFAULT)
2662 tp->suspend.stop_signal = siggnal;
2663
2664 /* Record the interpreter that issued the execution command that
2665 caused this thread to resume. If the top level interpreter is
2666 MI/async, and the execution command was a CLI command
2667 (next/step/etc.), we'll want to print stop event output to the MI
2668 console channel (the stepped-to line, etc.), as if the user
2669 entered the execution command on a real GDB console. */
2670 inferior_thread ()->control.command_interp = command_interp ();
2671
2672 if (debug_infrun)
2673 fprintf_unfiltered (gdb_stdlog,
2674 "infrun: proceed (addr=%s, signal=%s)\n",
2675 paddress (gdbarch, addr),
2676 gdb_signal_to_symbol_string (siggnal));
2677
2678 if (non_stop)
2679 /* In non-stop, each thread is handled individually. The context
2680 must already be set to the right thread here. */
2681 ;
2682 else
2683 {
2684 struct thread_info *step_over;
2685
2686 /* In a multi-threaded task we may select another thread and
2687 then continue or step.
2688
2689 But if the old thread was stopped at a breakpoint, it will
2690 immediately cause another breakpoint stop without any
2691 execution (i.e. it will report a breakpoint hit incorrectly).
2692 So we must step over it first.
2693
2694 Look for a thread other than the current (TP) that reported a
2695 breakpoint hit and hasn't been resumed yet since. */
2696 step_over = find_thread_needs_step_over (tp);
2697 if (step_over != NULL)
2698 {
2699 if (debug_infrun)
2700 fprintf_unfiltered (gdb_stdlog,
2701 "infrun: need to step-over [%s] first\n",
2702 target_pid_to_str (step_over->ptid));
2703
2704 /* Store the prev_pc for the stepping thread too, needed by
2705 switch_back_to_stepped_thread. */
2706 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2707 switch_to_thread (step_over->ptid);
2708 tp = step_over;
2709 }
2710 }
2711
2712 /* If we need to step over a breakpoint, and we're not using
2713 displaced stepping to do so, insert all breakpoints (watchpoints,
2714 etc.) but the one we're stepping over, step one instruction, and
2715 then re-insert the breakpoint when that step is finished. */
2716 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2717 {
2718 struct regcache *regcache = get_current_regcache ();
2719
2720 set_step_over_info (get_regcache_aspace (regcache),
2721 regcache_read_pc (regcache), 0);
2722 }
2723 else
2724 clear_step_over_info ();
2725
2726 insert_breakpoints ();
2727
2728 tp->control.trap_expected = tp->stepping_over_breakpoint;
2729
2730 annotate_starting ();
2731
2732 /* Make sure that output from GDB appears before output from the
2733 inferior. */
2734 gdb_flush (gdb_stdout);
2735
2736 /* Refresh prev_pc value just prior to resuming. This used to be
2737 done in stop_waiting, however, setting prev_pc there did not handle
2738 scenarios such as inferior function calls or returning from
2739 a function via the return command. In those cases, the prev_pc
2740 value was not set properly for subsequent commands. The prev_pc value
2741 is used to initialize the starting line number in the ecs. With an
2742 invalid value, the gdb next command ends up stopping at the position
2743 represented by the next line table entry past our start position.
2744 On platforms that generate one line table entry per line, this
2745 is not a problem. However, on the ia64, the compiler generates
2746 extraneous line table entries that do not increase the line number.
2747 When we issue the gdb next command on the ia64 after an inferior call
2748 or a return command, we often end up a few instructions forward, still
2749 within the original line we started.
2750
2751 An attempt was made to refresh the prev_pc at the same time the
2752 execution_control_state is initialized (for instance, just before
2753 waiting for an inferior event). But this approach did not work
2754 because of platforms that use ptrace, where the pc register cannot
2755 be read unless the inferior is stopped. At that point, we are not
2756 guaranteed the inferior is stopped and so the regcache_read_pc() call
2757 can fail. Setting the prev_pc value here ensures the value is updated
2758 correctly when the inferior is stopped. */
2759 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2760
2761 /* Resume inferior. */
2762 resume (tp->suspend.stop_signal);
2763
2764 /* Wait for it to stop (if not standalone)
2765 and in any case decode why it stopped, and act accordingly. */
2766 /* Do this only if we are not using the event loop, or if the target
2767 does not support asynchronous execution. */
2768 if (!target_can_async_p ())
2769 {
2770 wait_for_inferior ();
2771 normal_stop ();
2772 }
2773 }
2774 \f
2775
2776 /* Start remote-debugging of a machine over a serial link. */
2777
2778 void
2779 start_remote (int from_tty)
2780 {
2781 struct inferior *inferior;
2782
2783 inferior = current_inferior ();
2784 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2785
2786 /* Always go on waiting for the target, regardless of the mode. */
2787 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2788 indicate to wait_for_inferior that a target should timeout if
2789 nothing is returned (instead of just blocking). Because of this,
2790 targets expecting an immediate response need to, internally, set
2791 things up so that the target_wait() is forced to eventually
2792 timeout. */
2793 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2794 differentiate to its caller what the state of the target is after
2795 the initial open has been performed. Here we're assuming that
2796 the target has stopped. It should be possible to eventually have
2797 target_open() return to the caller an indication that the target
2798 is currently running and GDB state should be set to the same as
2799 for an async run. */
2800 wait_for_inferior ();
2801
2802 /* Now that the inferior has stopped, do any bookkeeping like
2803 loading shared libraries. We want to do this before normal_stop,
2804 so that the displayed frame is up to date. */
2805 post_create_inferior (&current_target, from_tty);
2806
2807 normal_stop ();
2808 }
2809
2810 /* Initialize static vars when a new inferior begins. */
2811
2812 void
2813 init_wait_for_inferior (void)
2814 {
2815 /* These are meaningless until the first time through wait_for_inferior. */
2816
2817 breakpoint_init_inferior (inf_starting);
2818
2819 clear_proceed_status (0);
2820
2821 target_last_wait_ptid = minus_one_ptid;
2822
2823 previous_inferior_ptid = inferior_ptid;
2824
2825 /* Discard any skipped inlined frames. */
2826 clear_inline_frame_state (minus_one_ptid);
2827 }
2828
2829 \f
2830 /* Data to be passed around while handling an event. This data is
2831 discarded between events. */
2832 struct execution_control_state
2833 {
2834 ptid_t ptid;
2835 /* The thread that got the event, if this was a thread event; NULL
2836 otherwise. */
2837 struct thread_info *event_thread;
2838
2839 struct target_waitstatus ws;
2840 int stop_func_filled_in;
2841 CORE_ADDR stop_func_start;
2842 CORE_ADDR stop_func_end;
2843 const char *stop_func_name;
2844 int wait_some_more;
2845
2846 /* True if the event thread hit the single-step breakpoint of
2847 another thread. Thus the event doesn't cause a stop, the thread
2848 needs to be single-stepped past the single-step breakpoint before
2849 we can switch back to the original stepping thread. */
2850 int hit_singlestep_breakpoint;
2851 };
2852
2853 static void handle_inferior_event (struct execution_control_state *ecs);
2854
2855 static void handle_step_into_function (struct gdbarch *gdbarch,
2856 struct execution_control_state *ecs);
2857 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2858 struct execution_control_state *ecs);
2859 static void handle_signal_stop (struct execution_control_state *ecs);
2860 static void check_exception_resume (struct execution_control_state *,
2861 struct frame_info *);
2862
2863 static void end_stepping_range (struct execution_control_state *ecs);
2864 static void stop_waiting (struct execution_control_state *ecs);
2865 static void prepare_to_wait (struct execution_control_state *ecs);
2866 static void keep_going (struct execution_control_state *ecs);
2867 static void process_event_stop_test (struct execution_control_state *ecs);
2868 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2869
2870 /* Callback for iterate over threads. If the thread is stopped, but
2871 the user/frontend doesn't know about that yet, go through
2872 normal_stop, as if the thread had just stopped now. ARG points at
2873 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2874 ptid_is_pid(PTID) is true, applies to all threads of the process
2875 pointed at by PTID. Otherwise, apply only to the thread pointed by
2876 PTID. */
2877
2878 static int
2879 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2880 {
2881 ptid_t ptid = * (ptid_t *) arg;
2882
2883 if ((ptid_equal (info->ptid, ptid)
2884 || ptid_equal (minus_one_ptid, ptid)
2885 || (ptid_is_pid (ptid)
2886 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2887 && is_running (info->ptid)
2888 && !is_executing (info->ptid))
2889 {
2890 struct cleanup *old_chain;
2891 struct execution_control_state ecss;
2892 struct execution_control_state *ecs = &ecss;
2893
2894 memset (ecs, 0, sizeof (*ecs));
2895
2896 old_chain = make_cleanup_restore_current_thread ();
2897
2898 overlay_cache_invalid = 1;
2899 /* Flush target cache before starting to handle each event.
2900 Target was running and cache could be stale. This is just a
2901 heuristic. Running threads may modify target memory, but we
2902 don't get any event. */
2903 target_dcache_invalidate ();
2904
2905 /* Go through handle_inferior_event/normal_stop, so we always
2906 have consistent output as if the stop event had been
2907 reported. */
2908 ecs->ptid = info->ptid;
2909 ecs->event_thread = find_thread_ptid (info->ptid);
2910 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2911 ecs->ws.value.sig = GDB_SIGNAL_0;
2912
2913 handle_inferior_event (ecs);
2914
2915 if (!ecs->wait_some_more)
2916 {
2917 struct thread_info *tp;
2918
2919 normal_stop ();
2920
2921 /* Finish off the continuations. */
2922 tp = inferior_thread ();
2923 do_all_intermediate_continuations_thread (tp, 1);
2924 do_all_continuations_thread (tp, 1);
2925 }
2926
2927 do_cleanups (old_chain);
2928 }
2929
2930 return 0;
2931 }
2932
2933 /* This function is attached as a "thread_stop_requested" observer.
2934 Cleanup local state that assumed the PTID was to be resumed, and
2935 report the stop to the frontend. */
2936
2937 static void
2938 infrun_thread_stop_requested (ptid_t ptid)
2939 {
2940 struct displaced_step_inferior_state *displaced;
2941
2942 /* PTID was requested to stop. Remove it from the displaced
2943 stepping queue, so we don't try to resume it automatically. */
2944
2945 for (displaced = displaced_step_inferior_states;
2946 displaced;
2947 displaced = displaced->next)
2948 {
2949 struct displaced_step_request *it, **prev_next_p;
2950
2951 it = displaced->step_request_queue;
2952 prev_next_p = &displaced->step_request_queue;
2953 while (it)
2954 {
2955 if (ptid_match (it->ptid, ptid))
2956 {
2957 *prev_next_p = it->next;
2958 it->next = NULL;
2959 xfree (it);
2960 }
2961 else
2962 {
2963 prev_next_p = &it->next;
2964 }
2965
2966 it = *prev_next_p;
2967 }
2968 }
2969
2970 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2971 }
2972
2973 static void
2974 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2975 {
2976 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2977 nullify_last_target_wait_ptid ();
2978 }
2979
2980 /* Delete the step resume, single-step and longjmp/exception resume
2981 breakpoints of TP. */
2982
2983 static void
2984 delete_thread_infrun_breakpoints (struct thread_info *tp)
2985 {
2986 delete_step_resume_breakpoint (tp);
2987 delete_exception_resume_breakpoint (tp);
2988 delete_single_step_breakpoints (tp);
2989 }
2990
2991 /* If the target still has execution, call FUNC for each thread that
2992 just stopped. In all-stop, that's all the non-exited threads; in
2993 non-stop, that's the current thread, only. */
2994
2995 typedef void (*for_each_just_stopped_thread_callback_func)
2996 (struct thread_info *tp);
2997
2998 static void
2999 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
3000 {
3001 if (!target_has_execution || ptid_equal (inferior_ptid, null_ptid))
3002 return;
3003
3004 if (non_stop)
3005 {
3006 /* If in non-stop mode, only the current thread stopped. */
3007 func (inferior_thread ());
3008 }
3009 else
3010 {
3011 struct thread_info *tp;
3012
3013 /* In all-stop mode, all threads have stopped. */
3014 ALL_NON_EXITED_THREADS (tp)
3015 {
3016 func (tp);
3017 }
3018 }
3019 }
3020
3021 /* Delete the step resume and longjmp/exception resume breakpoints of
3022 the threads that just stopped. */
3023
3024 static void
3025 delete_just_stopped_threads_infrun_breakpoints (void)
3026 {
3027 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
3028 }
3029
3030 /* Delete the single-step breakpoints of the threads that just
3031 stopped. */
3032
3033 static void
3034 delete_just_stopped_threads_single_step_breakpoints (void)
3035 {
3036 for_each_just_stopped_thread (delete_single_step_breakpoints);
3037 }
3038
3039 /* A cleanup wrapper. */
3040
3041 static void
3042 delete_just_stopped_threads_infrun_breakpoints_cleanup (void *arg)
3043 {
3044 delete_just_stopped_threads_infrun_breakpoints ();
3045 }
3046
3047 /* Pretty print the results of target_wait, for debugging purposes. */
3048
3049 static void
3050 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3051 const struct target_waitstatus *ws)
3052 {
3053 char *status_string = target_waitstatus_to_string (ws);
3054 struct ui_file *tmp_stream = mem_fileopen ();
3055 char *text;
3056
3057 /* The text is split over several lines because it was getting too long.
3058 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3059 output as a unit; we want only one timestamp printed if debug_timestamp
3060 is set. */
3061
3062 fprintf_unfiltered (tmp_stream,
3063 "infrun: target_wait (%d.%ld.%ld",
3064 ptid_get_pid (waiton_ptid),
3065 ptid_get_lwp (waiton_ptid),
3066 ptid_get_tid (waiton_ptid));
3067 if (ptid_get_pid (waiton_ptid) != -1)
3068 fprintf_unfiltered (tmp_stream,
3069 " [%s]", target_pid_to_str (waiton_ptid));
3070 fprintf_unfiltered (tmp_stream, ", status) =\n");
3071 fprintf_unfiltered (tmp_stream,
3072 "infrun: %d.%ld.%ld [%s],\n",
3073 ptid_get_pid (result_ptid),
3074 ptid_get_lwp (result_ptid),
3075 ptid_get_tid (result_ptid),
3076 target_pid_to_str (result_ptid));
3077 fprintf_unfiltered (tmp_stream,
3078 "infrun: %s\n",
3079 status_string);
3080
3081 text = ui_file_xstrdup (tmp_stream, NULL);
3082
3083 /* This uses %s in part to handle %'s in the text, but also to avoid
3084 a gcc error: the format attribute requires a string literal. */
3085 fprintf_unfiltered (gdb_stdlog, "%s", text);
3086
3087 xfree (status_string);
3088 xfree (text);
3089 ui_file_delete (tmp_stream);
3090 }
3091
3092 /* Prepare and stabilize the inferior for detaching it. E.g.,
3093 detaching while a thread is displaced stepping is a recipe for
3094 crashing it, as nothing would readjust the PC out of the scratch
3095 pad. */
3096
3097 void
3098 prepare_for_detach (void)
3099 {
3100 struct inferior *inf = current_inferior ();
3101 ptid_t pid_ptid = pid_to_ptid (inf->pid);
3102 struct cleanup *old_chain_1;
3103 struct displaced_step_inferior_state *displaced;
3104
3105 displaced = get_displaced_stepping_state (inf->pid);
3106
3107 /* Is any thread of this process displaced stepping? If not,
3108 there's nothing else to do. */
3109 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
3110 return;
3111
3112 if (debug_infrun)
3113 fprintf_unfiltered (gdb_stdlog,
3114 "displaced-stepping in-process while detaching");
3115
3116 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
3117 inf->detaching = 1;
3118
3119 while (!ptid_equal (displaced->step_ptid, null_ptid))
3120 {
3121 struct cleanup *old_chain_2;
3122 struct execution_control_state ecss;
3123 struct execution_control_state *ecs;
3124
3125 ecs = &ecss;
3126 memset (ecs, 0, sizeof (*ecs));
3127
3128 overlay_cache_invalid = 1;
3129 /* Flush target cache before starting to handle each event.
3130 Target was running and cache could be stale. This is just a
3131 heuristic. Running threads may modify target memory, but we
3132 don't get any event. */
3133 target_dcache_invalidate ();
3134
3135 if (deprecated_target_wait_hook)
3136 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
3137 else
3138 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
3139
3140 if (debug_infrun)
3141 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3142
3143 /* If an error happens while handling the event, propagate GDB's
3144 knowledge of the executing state to the frontend/user running
3145 state. */
3146 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
3147 &minus_one_ptid);
3148
3149 /* Now figure out what to do with the result of the result. */
3150 handle_inferior_event (ecs);
3151
3152 /* No error, don't finish the state yet. */
3153 discard_cleanups (old_chain_2);
3154
3155 /* Breakpoints and watchpoints are not installed on the target
3156 at this point, and signals are passed directly to the
3157 inferior, so this must mean the process is gone. */
3158 if (!ecs->wait_some_more)
3159 {
3160 discard_cleanups (old_chain_1);
3161 error (_("Program exited while detaching"));
3162 }
3163 }
3164
3165 discard_cleanups (old_chain_1);
3166 }
3167
3168 /* Wait for control to return from inferior to debugger.
3169
3170 If inferior gets a signal, we may decide to start it up again
3171 instead of returning. That is why there is a loop in this function.
3172 When this function actually returns it means the inferior
3173 should be left stopped and GDB should read more commands. */
3174
3175 void
3176 wait_for_inferior (void)
3177 {
3178 struct cleanup *old_cleanups;
3179 struct cleanup *thread_state_chain;
3180
3181 if (debug_infrun)
3182 fprintf_unfiltered
3183 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
3184
3185 old_cleanups
3186 = make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup,
3187 NULL);
3188
3189 /* If an error happens while handling the event, propagate GDB's
3190 knowledge of the executing state to the frontend/user running
3191 state. */
3192 thread_state_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3193
3194 while (1)
3195 {
3196 struct execution_control_state ecss;
3197 struct execution_control_state *ecs = &ecss;
3198 ptid_t waiton_ptid = minus_one_ptid;
3199
3200 memset (ecs, 0, sizeof (*ecs));
3201
3202 overlay_cache_invalid = 1;
3203
3204 /* Flush target cache before starting to handle each event.
3205 Target was running and cache could be stale. This is just a
3206 heuristic. Running threads may modify target memory, but we
3207 don't get any event. */
3208 target_dcache_invalidate ();
3209
3210 if (deprecated_target_wait_hook)
3211 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
3212 else
3213 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
3214
3215 if (debug_infrun)
3216 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3217
3218 /* Now figure out what to do with the result of the result. */
3219 handle_inferior_event (ecs);
3220
3221 if (!ecs->wait_some_more)
3222 break;
3223 }
3224
3225 /* No error, don't finish the state yet. */
3226 discard_cleanups (thread_state_chain);
3227
3228 do_cleanups (old_cleanups);
3229 }
3230
3231 /* Cleanup that reinstalls the readline callback handler, if the
3232 target is running in the background. If while handling the target
3233 event something triggered a secondary prompt, like e.g., a
3234 pagination prompt, we'll have removed the callback handler (see
3235 gdb_readline_wrapper_line). Need to do this as we go back to the
3236 event loop, ready to process further input. Note this has no
3237 effect if the handler hasn't actually been removed, because calling
3238 rl_callback_handler_install resets the line buffer, thus losing
3239 input. */
3240
3241 static void
3242 reinstall_readline_callback_handler_cleanup (void *arg)
3243 {
3244 if (!interpreter_async)
3245 {
3246 /* We're not going back to the top level event loop yet. Don't
3247 install the readline callback, as it'd prep the terminal,
3248 readline-style (raw, noecho) (e.g., --batch). We'll install
3249 it the next time the prompt is displayed, when we're ready
3250 for input. */
3251 return;
3252 }
3253
3254 if (async_command_editing_p && !sync_execution)
3255 gdb_rl_callback_handler_reinstall ();
3256 }
3257
3258 /* Asynchronous version of wait_for_inferior. It is called by the
3259 event loop whenever a change of state is detected on the file
3260 descriptor corresponding to the target. It can be called more than
3261 once to complete a single execution command. In such cases we need
3262 to keep the state in a global variable ECSS. If it is the last time
3263 that this function is called for a single execution command, then
3264 report to the user that the inferior has stopped, and do the
3265 necessary cleanups. */
3266
3267 void
3268 fetch_inferior_event (void *client_data)
3269 {
3270 struct execution_control_state ecss;
3271 struct execution_control_state *ecs = &ecss;
3272 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
3273 struct cleanup *ts_old_chain;
3274 int was_sync = sync_execution;
3275 int cmd_done = 0;
3276 ptid_t waiton_ptid = minus_one_ptid;
3277
3278 memset (ecs, 0, sizeof (*ecs));
3279
3280 /* End up with readline processing input, if necessary. */
3281 make_cleanup (reinstall_readline_callback_handler_cleanup, NULL);
3282
3283 /* We're handling a live event, so make sure we're doing live
3284 debugging. If we're looking at traceframes while the target is
3285 running, we're going to need to get back to that mode after
3286 handling the event. */
3287 if (non_stop)
3288 {
3289 make_cleanup_restore_current_traceframe ();
3290 set_current_traceframe (-1);
3291 }
3292
3293 if (non_stop)
3294 /* In non-stop mode, the user/frontend should not notice a thread
3295 switch due to internal events. Make sure we reverse to the
3296 user selected thread and frame after handling the event and
3297 running any breakpoint commands. */
3298 make_cleanup_restore_current_thread ();
3299
3300 overlay_cache_invalid = 1;
3301 /* Flush target cache before starting to handle each event. Target
3302 was running and cache could be stale. This is just a heuristic.
3303 Running threads may modify target memory, but we don't get any
3304 event. */
3305 target_dcache_invalidate ();
3306
3307 make_cleanup_restore_integer (&execution_direction);
3308 execution_direction = target_execution_direction ();
3309
3310 if (deprecated_target_wait_hook)
3311 ecs->ptid =
3312 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3313 else
3314 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3315
3316 if (debug_infrun)
3317 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3318
3319 /* If an error happens while handling the event, propagate GDB's
3320 knowledge of the executing state to the frontend/user running
3321 state. */
3322 if (!non_stop)
3323 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3324 else
3325 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
3326
3327 /* Get executed before make_cleanup_restore_current_thread above to apply
3328 still for the thread which has thrown the exception. */
3329 make_bpstat_clear_actions_cleanup ();
3330
3331 make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup, NULL);
3332
3333 /* Now figure out what to do with the result of the result. */
3334 handle_inferior_event (ecs);
3335
3336 if (!ecs->wait_some_more)
3337 {
3338 struct inferior *inf = find_inferior_ptid (ecs->ptid);
3339
3340 delete_just_stopped_threads_infrun_breakpoints ();
3341
3342 /* We may not find an inferior if this was a process exit. */
3343 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3344 normal_stop ();
3345
3346 if (target_has_execution
3347 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
3348 && ecs->ws.kind != TARGET_WAITKIND_EXITED
3349 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3350 && ecs->event_thread->step_multi
3351 && ecs->event_thread->control.stop_step)
3352 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
3353 else
3354 {
3355 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
3356 cmd_done = 1;
3357 }
3358 }
3359
3360 /* No error, don't finish the thread states yet. */
3361 discard_cleanups (ts_old_chain);
3362
3363 /* Revert thread and frame. */
3364 do_cleanups (old_chain);
3365
3366 /* If the inferior was in sync execution mode, and now isn't,
3367 restore the prompt (a synchronous execution command has finished,
3368 and we're ready for input). */
3369 if (interpreter_async && was_sync && !sync_execution)
3370 observer_notify_sync_execution_done ();
3371
3372 if (cmd_done
3373 && !was_sync
3374 && exec_done_display_p
3375 && (ptid_equal (inferior_ptid, null_ptid)
3376 || !is_running (inferior_ptid)))
3377 printf_unfiltered (_("completed.\n"));
3378 }
3379
3380 /* Record the frame and location we're currently stepping through. */
3381 void
3382 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
3383 {
3384 struct thread_info *tp = inferior_thread ();
3385
3386 tp->control.step_frame_id = get_frame_id (frame);
3387 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
3388
3389 tp->current_symtab = sal.symtab;
3390 tp->current_line = sal.line;
3391 }
3392
3393 /* Clear context switchable stepping state. */
3394
3395 void
3396 init_thread_stepping_state (struct thread_info *tss)
3397 {
3398 tss->stepped_breakpoint = 0;
3399 tss->stepping_over_breakpoint = 0;
3400 tss->stepping_over_watchpoint = 0;
3401 tss->step_after_step_resume_breakpoint = 0;
3402 }
3403
3404 /* Set the cached copy of the last ptid/waitstatus. */
3405
3406 static void
3407 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
3408 {
3409 target_last_wait_ptid = ptid;
3410 target_last_waitstatus = status;
3411 }
3412
3413 /* Return the cached copy of the last pid/waitstatus returned by
3414 target_wait()/deprecated_target_wait_hook(). The data is actually
3415 cached by handle_inferior_event(), which gets called immediately
3416 after target_wait()/deprecated_target_wait_hook(). */
3417
3418 void
3419 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
3420 {
3421 *ptidp = target_last_wait_ptid;
3422 *status = target_last_waitstatus;
3423 }
3424
3425 void
3426 nullify_last_target_wait_ptid (void)
3427 {
3428 target_last_wait_ptid = minus_one_ptid;
3429 }
3430
3431 /* Switch thread contexts. */
3432
3433 static void
3434 context_switch (ptid_t ptid)
3435 {
3436 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3437 {
3438 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3439 target_pid_to_str (inferior_ptid));
3440 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3441 target_pid_to_str (ptid));
3442 }
3443
3444 switch_to_thread (ptid);
3445 }
3446
3447 static void
3448 adjust_pc_after_break (struct execution_control_state *ecs)
3449 {
3450 struct regcache *regcache;
3451 struct gdbarch *gdbarch;
3452 struct address_space *aspace;
3453 CORE_ADDR breakpoint_pc, decr_pc;
3454
3455 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3456 we aren't, just return.
3457
3458 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3459 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3460 implemented by software breakpoints should be handled through the normal
3461 breakpoint layer.
3462
3463 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3464 different signals (SIGILL or SIGEMT for instance), but it is less
3465 clear where the PC is pointing afterwards. It may not match
3466 gdbarch_decr_pc_after_break. I don't know any specific target that
3467 generates these signals at breakpoints (the code has been in GDB since at
3468 least 1992) so I can not guess how to handle them here.
3469
3470 In earlier versions of GDB, a target with
3471 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3472 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3473 target with both of these set in GDB history, and it seems unlikely to be
3474 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3475
3476 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3477 return;
3478
3479 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3480 return;
3481
3482 /* In reverse execution, when a breakpoint is hit, the instruction
3483 under it has already been de-executed. The reported PC always
3484 points at the breakpoint address, so adjusting it further would
3485 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3486 architecture:
3487
3488 B1 0x08000000 : INSN1
3489 B2 0x08000001 : INSN2
3490 0x08000002 : INSN3
3491 PC -> 0x08000003 : INSN4
3492
3493 Say you're stopped at 0x08000003 as above. Reverse continuing
3494 from that point should hit B2 as below. Reading the PC when the
3495 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3496 been de-executed already.
3497
3498 B1 0x08000000 : INSN1
3499 B2 PC -> 0x08000001 : INSN2
3500 0x08000002 : INSN3
3501 0x08000003 : INSN4
3502
3503 We can't apply the same logic as for forward execution, because
3504 we would wrongly adjust the PC to 0x08000000, since there's a
3505 breakpoint at PC - 1. We'd then report a hit on B1, although
3506 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3507 behaviour. */
3508 if (execution_direction == EXEC_REVERSE)
3509 return;
3510
3511 /* If the target can tell whether the thread hit a SW breakpoint,
3512 trust it. Targets that can tell also adjust the PC
3513 themselves. */
3514 if (target_supports_stopped_by_sw_breakpoint ())
3515 return;
3516
3517 /* Note that relying on whether a breakpoint is planted in memory to
3518 determine this can fail. E.g,. the breakpoint could have been
3519 removed since. Or the thread could have been told to step an
3520 instruction the size of a breakpoint instruction, and only
3521 _after_ was a breakpoint inserted at its address. */
3522
3523 /* If this target does not decrement the PC after breakpoints, then
3524 we have nothing to do. */
3525 regcache = get_thread_regcache (ecs->ptid);
3526 gdbarch = get_regcache_arch (regcache);
3527
3528 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3529 if (decr_pc == 0)
3530 return;
3531
3532 aspace = get_regcache_aspace (regcache);
3533
3534 /* Find the location where (if we've hit a breakpoint) the
3535 breakpoint would be. */
3536 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3537
3538 /* If the target can't tell whether a software breakpoint triggered,
3539 fallback to figuring it out based on breakpoints we think were
3540 inserted in the target, and on whether the thread was stepped or
3541 continued. */
3542
3543 /* Check whether there actually is a software breakpoint inserted at
3544 that location.
3545
3546 If in non-stop mode, a race condition is possible where we've
3547 removed a breakpoint, but stop events for that breakpoint were
3548 already queued and arrive later. To suppress those spurious
3549 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3550 and retire them after a number of stop events are reported. Note
3551 this is an heuristic and can thus get confused. The real fix is
3552 to get the "stopped by SW BP and needs adjustment" info out of
3553 the target/kernel (and thus never reach here; see above). */
3554 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3555 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3556 {
3557 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3558
3559 if (record_full_is_used ())
3560 record_full_gdb_operation_disable_set ();
3561
3562 /* When using hardware single-step, a SIGTRAP is reported for both
3563 a completed single-step and a software breakpoint. Need to
3564 differentiate between the two, as the latter needs adjusting
3565 but the former does not.
3566
3567 The SIGTRAP can be due to a completed hardware single-step only if
3568 - we didn't insert software single-step breakpoints
3569 - this thread is currently being stepped
3570
3571 If any of these events did not occur, we must have stopped due
3572 to hitting a software breakpoint, and have to back up to the
3573 breakpoint address.
3574
3575 As a special case, we could have hardware single-stepped a
3576 software breakpoint. In this case (prev_pc == breakpoint_pc),
3577 we also need to back up to the breakpoint address. */
3578
3579 if (thread_has_single_step_breakpoints_set (ecs->event_thread)
3580 || !currently_stepping (ecs->event_thread)
3581 || (ecs->event_thread->stepped_breakpoint
3582 && ecs->event_thread->prev_pc == breakpoint_pc))
3583 regcache_write_pc (regcache, breakpoint_pc);
3584
3585 do_cleanups (old_cleanups);
3586 }
3587 }
3588
3589 static int
3590 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3591 {
3592 for (frame = get_prev_frame (frame);
3593 frame != NULL;
3594 frame = get_prev_frame (frame))
3595 {
3596 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3597 return 1;
3598 if (get_frame_type (frame) != INLINE_FRAME)
3599 break;
3600 }
3601
3602 return 0;
3603 }
3604
3605 /* Auxiliary function that handles syscall entry/return events.
3606 It returns 1 if the inferior should keep going (and GDB
3607 should ignore the event), or 0 if the event deserves to be
3608 processed. */
3609
3610 static int
3611 handle_syscall_event (struct execution_control_state *ecs)
3612 {
3613 struct regcache *regcache;
3614 int syscall_number;
3615
3616 if (!ptid_equal (ecs->ptid, inferior_ptid))
3617 context_switch (ecs->ptid);
3618
3619 regcache = get_thread_regcache (ecs->ptid);
3620 syscall_number = ecs->ws.value.syscall_number;
3621 stop_pc = regcache_read_pc (regcache);
3622
3623 if (catch_syscall_enabled () > 0
3624 && catching_syscall_number (syscall_number) > 0)
3625 {
3626 if (debug_infrun)
3627 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3628 syscall_number);
3629
3630 ecs->event_thread->control.stop_bpstat
3631 = bpstat_stop_status (get_regcache_aspace (regcache),
3632 stop_pc, ecs->ptid, &ecs->ws);
3633
3634 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3635 {
3636 /* Catchpoint hit. */
3637 return 0;
3638 }
3639 }
3640
3641 /* If no catchpoint triggered for this, then keep going. */
3642 keep_going (ecs);
3643 return 1;
3644 }
3645
3646 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3647
3648 static void
3649 fill_in_stop_func (struct gdbarch *gdbarch,
3650 struct execution_control_state *ecs)
3651 {
3652 if (!ecs->stop_func_filled_in)
3653 {
3654 /* Don't care about return value; stop_func_start and stop_func_name
3655 will both be 0 if it doesn't work. */
3656 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3657 &ecs->stop_func_start, &ecs->stop_func_end);
3658 ecs->stop_func_start
3659 += gdbarch_deprecated_function_start_offset (gdbarch);
3660
3661 if (gdbarch_skip_entrypoint_p (gdbarch))
3662 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3663 ecs->stop_func_start);
3664
3665 ecs->stop_func_filled_in = 1;
3666 }
3667 }
3668
3669
3670 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3671
3672 static enum stop_kind
3673 get_inferior_stop_soon (ptid_t ptid)
3674 {
3675 struct inferior *inf = find_inferior_ptid (ptid);
3676
3677 gdb_assert (inf != NULL);
3678 return inf->control.stop_soon;
3679 }
3680
3681 /* Given an execution control state that has been freshly filled in by
3682 an event from the inferior, figure out what it means and take
3683 appropriate action.
3684
3685 The alternatives are:
3686
3687 1) stop_waiting and return; to really stop and return to the
3688 debugger.
3689
3690 2) keep_going and return; to wait for the next event (set
3691 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3692 once). */
3693
3694 static void
3695 handle_inferior_event (struct execution_control_state *ecs)
3696 {
3697 enum stop_kind stop_soon;
3698
3699 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3700 {
3701 /* We had an event in the inferior, but we are not interested in
3702 handling it at this level. The lower layers have already
3703 done what needs to be done, if anything.
3704
3705 One of the possible circumstances for this is when the
3706 inferior produces output for the console. The inferior has
3707 not stopped, and we are ignoring the event. Another possible
3708 circumstance is any event which the lower level knows will be
3709 reported multiple times without an intervening resume. */
3710 if (debug_infrun)
3711 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3712 prepare_to_wait (ecs);
3713 return;
3714 }
3715
3716 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3717 && target_can_async_p () && !sync_execution)
3718 {
3719 /* There were no unwaited-for children left in the target, but,
3720 we're not synchronously waiting for events either. Just
3721 ignore. Otherwise, if we were running a synchronous
3722 execution command, we need to cancel it and give the user
3723 back the terminal. */
3724 if (debug_infrun)
3725 fprintf_unfiltered (gdb_stdlog,
3726 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3727 prepare_to_wait (ecs);
3728 return;
3729 }
3730
3731 /* Cache the last pid/waitstatus. */
3732 set_last_target_status (ecs->ptid, ecs->ws);
3733
3734 /* Always clear state belonging to the previous time we stopped. */
3735 stop_stack_dummy = STOP_NONE;
3736
3737 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3738 {
3739 /* No unwaited-for children left. IOW, all resumed children
3740 have exited. */
3741 if (debug_infrun)
3742 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3743
3744 stop_print_frame = 0;
3745 stop_waiting (ecs);
3746 return;
3747 }
3748
3749 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3750 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3751 {
3752 ecs->event_thread = find_thread_ptid (ecs->ptid);
3753 /* If it's a new thread, add it to the thread database. */
3754 if (ecs->event_thread == NULL)
3755 ecs->event_thread = add_thread (ecs->ptid);
3756
3757 /* Disable range stepping. If the next step request could use a
3758 range, this will be end up re-enabled then. */
3759 ecs->event_thread->control.may_range_step = 0;
3760 }
3761
3762 /* Dependent on valid ECS->EVENT_THREAD. */
3763 adjust_pc_after_break (ecs);
3764
3765 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3766 reinit_frame_cache ();
3767
3768 breakpoint_retire_moribund ();
3769
3770 /* First, distinguish signals caused by the debugger from signals
3771 that have to do with the program's own actions. Note that
3772 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3773 on the operating system version. Here we detect when a SIGILL or
3774 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3775 something similar for SIGSEGV, since a SIGSEGV will be generated
3776 when we're trying to execute a breakpoint instruction on a
3777 non-executable stack. This happens for call dummy breakpoints
3778 for architectures like SPARC that place call dummies on the
3779 stack. */
3780 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3781 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3782 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3783 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3784 {
3785 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3786
3787 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3788 regcache_read_pc (regcache)))
3789 {
3790 if (debug_infrun)
3791 fprintf_unfiltered (gdb_stdlog,
3792 "infrun: Treating signal as SIGTRAP\n");
3793 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3794 }
3795 }
3796
3797 /* Mark the non-executing threads accordingly. In all-stop, all
3798 threads of all processes are stopped when we get any event
3799 reported. In non-stop mode, only the event thread stops. If
3800 we're handling a process exit in non-stop mode, there's nothing
3801 to do, as threads of the dead process are gone, and threads of
3802 any other process were left running. */
3803 if (!non_stop)
3804 set_executing (minus_one_ptid, 0);
3805 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3806 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3807 set_executing (ecs->ptid, 0);
3808
3809 switch (ecs->ws.kind)
3810 {
3811 case TARGET_WAITKIND_LOADED:
3812 if (debug_infrun)
3813 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3814 if (!ptid_equal (ecs->ptid, inferior_ptid))
3815 context_switch (ecs->ptid);
3816 /* Ignore gracefully during startup of the inferior, as it might
3817 be the shell which has just loaded some objects, otherwise
3818 add the symbols for the newly loaded objects. Also ignore at
3819 the beginning of an attach or remote session; we will query
3820 the full list of libraries once the connection is
3821 established. */
3822
3823 stop_soon = get_inferior_stop_soon (ecs->ptid);
3824 if (stop_soon == NO_STOP_QUIETLY)
3825 {
3826 struct regcache *regcache;
3827
3828 regcache = get_thread_regcache (ecs->ptid);
3829
3830 handle_solib_event ();
3831
3832 ecs->event_thread->control.stop_bpstat
3833 = bpstat_stop_status (get_regcache_aspace (regcache),
3834 stop_pc, ecs->ptid, &ecs->ws);
3835
3836 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3837 {
3838 /* A catchpoint triggered. */
3839 process_event_stop_test (ecs);
3840 return;
3841 }
3842
3843 /* If requested, stop when the dynamic linker notifies
3844 gdb of events. This allows the user to get control
3845 and place breakpoints in initializer routines for
3846 dynamically loaded objects (among other things). */
3847 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3848 if (stop_on_solib_events)
3849 {
3850 /* Make sure we print "Stopped due to solib-event" in
3851 normal_stop. */
3852 stop_print_frame = 1;
3853
3854 stop_waiting (ecs);
3855 return;
3856 }
3857 }
3858
3859 /* If we are skipping through a shell, or through shared library
3860 loading that we aren't interested in, resume the program. If
3861 we're running the program normally, also resume. */
3862 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3863 {
3864 /* Loading of shared libraries might have changed breakpoint
3865 addresses. Make sure new breakpoints are inserted. */
3866 if (stop_soon == NO_STOP_QUIETLY)
3867 insert_breakpoints ();
3868 resume (GDB_SIGNAL_0);
3869 prepare_to_wait (ecs);
3870 return;
3871 }
3872
3873 /* But stop if we're attaching or setting up a remote
3874 connection. */
3875 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3876 || stop_soon == STOP_QUIETLY_REMOTE)
3877 {
3878 if (debug_infrun)
3879 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3880 stop_waiting (ecs);
3881 return;
3882 }
3883
3884 internal_error (__FILE__, __LINE__,
3885 _("unhandled stop_soon: %d"), (int) stop_soon);
3886
3887 case TARGET_WAITKIND_SPURIOUS:
3888 if (debug_infrun)
3889 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3890 if (!ptid_equal (ecs->ptid, inferior_ptid))
3891 context_switch (ecs->ptid);
3892 resume (GDB_SIGNAL_0);
3893 prepare_to_wait (ecs);
3894 return;
3895
3896 case TARGET_WAITKIND_EXITED:
3897 case TARGET_WAITKIND_SIGNALLED:
3898 if (debug_infrun)
3899 {
3900 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3901 fprintf_unfiltered (gdb_stdlog,
3902 "infrun: TARGET_WAITKIND_EXITED\n");
3903 else
3904 fprintf_unfiltered (gdb_stdlog,
3905 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3906 }
3907
3908 inferior_ptid = ecs->ptid;
3909 set_current_inferior (find_inferior_ptid (ecs->ptid));
3910 set_current_program_space (current_inferior ()->pspace);
3911 handle_vfork_child_exec_or_exit (0);
3912 target_terminal_ours (); /* Must do this before mourn anyway. */
3913
3914 /* Clearing any previous state of convenience variables. */
3915 clear_exit_convenience_vars ();
3916
3917 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3918 {
3919 /* Record the exit code in the convenience variable $_exitcode, so
3920 that the user can inspect this again later. */
3921 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3922 (LONGEST) ecs->ws.value.integer);
3923
3924 /* Also record this in the inferior itself. */
3925 current_inferior ()->has_exit_code = 1;
3926 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3927
3928 /* Support the --return-child-result option. */
3929 return_child_result_value = ecs->ws.value.integer;
3930
3931 observer_notify_exited (ecs->ws.value.integer);
3932 }
3933 else
3934 {
3935 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3936 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3937
3938 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3939 {
3940 /* Set the value of the internal variable $_exitsignal,
3941 which holds the signal uncaught by the inferior. */
3942 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3943 gdbarch_gdb_signal_to_target (gdbarch,
3944 ecs->ws.value.sig));
3945 }
3946 else
3947 {
3948 /* We don't have access to the target's method used for
3949 converting between signal numbers (GDB's internal
3950 representation <-> target's representation).
3951 Therefore, we cannot do a good job at displaying this
3952 information to the user. It's better to just warn
3953 her about it (if infrun debugging is enabled), and
3954 give up. */
3955 if (debug_infrun)
3956 fprintf_filtered (gdb_stdlog, _("\
3957 Cannot fill $_exitsignal with the correct signal number.\n"));
3958 }
3959
3960 observer_notify_signal_exited (ecs->ws.value.sig);
3961 }
3962
3963 gdb_flush (gdb_stdout);
3964 target_mourn_inferior ();
3965 stop_print_frame = 0;
3966 stop_waiting (ecs);
3967 return;
3968
3969 /* The following are the only cases in which we keep going;
3970 the above cases end in a continue or goto. */
3971 case TARGET_WAITKIND_FORKED:
3972 case TARGET_WAITKIND_VFORKED:
3973 if (debug_infrun)
3974 {
3975 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3976 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3977 else
3978 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3979 }
3980
3981 /* Check whether the inferior is displaced stepping. */
3982 {
3983 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3984 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3985 struct displaced_step_inferior_state *displaced
3986 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3987
3988 /* If checking displaced stepping is supported, and thread
3989 ecs->ptid is displaced stepping. */
3990 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3991 {
3992 struct inferior *parent_inf
3993 = find_inferior_ptid (ecs->ptid);
3994 struct regcache *child_regcache;
3995 CORE_ADDR parent_pc;
3996
3997 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3998 indicating that the displaced stepping of syscall instruction
3999 has been done. Perform cleanup for parent process here. Note
4000 that this operation also cleans up the child process for vfork,
4001 because their pages are shared. */
4002 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
4003
4004 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
4005 {
4006 /* Restore scratch pad for child process. */
4007 displaced_step_restore (displaced, ecs->ws.value.related_pid);
4008 }
4009
4010 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
4011 the child's PC is also within the scratchpad. Set the child's PC
4012 to the parent's PC value, which has already been fixed up.
4013 FIXME: we use the parent's aspace here, although we're touching
4014 the child, because the child hasn't been added to the inferior
4015 list yet at this point. */
4016
4017 child_regcache
4018 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
4019 gdbarch,
4020 parent_inf->aspace);
4021 /* Read PC value of parent process. */
4022 parent_pc = regcache_read_pc (regcache);
4023
4024 if (debug_displaced)
4025 fprintf_unfiltered (gdb_stdlog,
4026 "displaced: write child pc from %s to %s\n",
4027 paddress (gdbarch,
4028 regcache_read_pc (child_regcache)),
4029 paddress (gdbarch, parent_pc));
4030
4031 regcache_write_pc (child_regcache, parent_pc);
4032 }
4033 }
4034
4035 if (!ptid_equal (ecs->ptid, inferior_ptid))
4036 context_switch (ecs->ptid);
4037
4038 /* Immediately detach breakpoints from the child before there's
4039 any chance of letting the user delete breakpoints from the
4040 breakpoint lists. If we don't do this early, it's easy to
4041 leave left over traps in the child, vis: "break foo; catch
4042 fork; c; <fork>; del; c; <child calls foo>". We only follow
4043 the fork on the last `continue', and by that time the
4044 breakpoint at "foo" is long gone from the breakpoint table.
4045 If we vforked, then we don't need to unpatch here, since both
4046 parent and child are sharing the same memory pages; we'll
4047 need to unpatch at follow/detach time instead to be certain
4048 that new breakpoints added between catchpoint hit time and
4049 vfork follow are detached. */
4050 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
4051 {
4052 /* This won't actually modify the breakpoint list, but will
4053 physically remove the breakpoints from the child. */
4054 detach_breakpoints (ecs->ws.value.related_pid);
4055 }
4056
4057 delete_just_stopped_threads_single_step_breakpoints ();
4058
4059 /* In case the event is caught by a catchpoint, remember that
4060 the event is to be followed at the next resume of the thread,
4061 and not immediately. */
4062 ecs->event_thread->pending_follow = ecs->ws;
4063
4064 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4065
4066 ecs->event_thread->control.stop_bpstat
4067 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4068 stop_pc, ecs->ptid, &ecs->ws);
4069
4070 /* If no catchpoint triggered for this, then keep going. Note
4071 that we're interested in knowing the bpstat actually causes a
4072 stop, not just if it may explain the signal. Software
4073 watchpoints, for example, always appear in the bpstat. */
4074 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4075 {
4076 ptid_t parent;
4077 ptid_t child;
4078 int should_resume;
4079 int follow_child
4080 = (follow_fork_mode_string == follow_fork_mode_child);
4081
4082 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4083
4084 should_resume = follow_fork ();
4085
4086 parent = ecs->ptid;
4087 child = ecs->ws.value.related_pid;
4088
4089 /* In non-stop mode, also resume the other branch. */
4090 if (non_stop && !detach_fork)
4091 {
4092 if (follow_child)
4093 switch_to_thread (parent);
4094 else
4095 switch_to_thread (child);
4096
4097 ecs->event_thread = inferior_thread ();
4098 ecs->ptid = inferior_ptid;
4099 keep_going (ecs);
4100 }
4101
4102 if (follow_child)
4103 switch_to_thread (child);
4104 else
4105 switch_to_thread (parent);
4106
4107 ecs->event_thread = inferior_thread ();
4108 ecs->ptid = inferior_ptid;
4109
4110 if (should_resume)
4111 keep_going (ecs);
4112 else
4113 stop_waiting (ecs);
4114 return;
4115 }
4116 process_event_stop_test (ecs);
4117 return;
4118
4119 case TARGET_WAITKIND_VFORK_DONE:
4120 /* Done with the shared memory region. Re-insert breakpoints in
4121 the parent, and keep going. */
4122
4123 if (debug_infrun)
4124 fprintf_unfiltered (gdb_stdlog,
4125 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
4126
4127 if (!ptid_equal (ecs->ptid, inferior_ptid))
4128 context_switch (ecs->ptid);
4129
4130 current_inferior ()->waiting_for_vfork_done = 0;
4131 current_inferior ()->pspace->breakpoints_not_allowed = 0;
4132 /* This also takes care of reinserting breakpoints in the
4133 previously locked inferior. */
4134 keep_going (ecs);
4135 return;
4136
4137 case TARGET_WAITKIND_EXECD:
4138 if (debug_infrun)
4139 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
4140
4141 if (!ptid_equal (ecs->ptid, inferior_ptid))
4142 context_switch (ecs->ptid);
4143
4144 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4145
4146 /* Do whatever is necessary to the parent branch of the vfork. */
4147 handle_vfork_child_exec_or_exit (1);
4148
4149 /* This causes the eventpoints and symbol table to be reset.
4150 Must do this now, before trying to determine whether to
4151 stop. */
4152 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
4153
4154 ecs->event_thread->control.stop_bpstat
4155 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4156 stop_pc, ecs->ptid, &ecs->ws);
4157
4158 /* Note that this may be referenced from inside
4159 bpstat_stop_status above, through inferior_has_execd. */
4160 xfree (ecs->ws.value.execd_pathname);
4161 ecs->ws.value.execd_pathname = NULL;
4162
4163 /* If no catchpoint triggered for this, then keep going. */
4164 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4165 {
4166 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4167 keep_going (ecs);
4168 return;
4169 }
4170 process_event_stop_test (ecs);
4171 return;
4172
4173 /* Be careful not to try to gather much state about a thread
4174 that's in a syscall. It's frequently a losing proposition. */
4175 case TARGET_WAITKIND_SYSCALL_ENTRY:
4176 if (debug_infrun)
4177 fprintf_unfiltered (gdb_stdlog,
4178 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
4179 /* Getting the current syscall number. */
4180 if (handle_syscall_event (ecs) == 0)
4181 process_event_stop_test (ecs);
4182 return;
4183
4184 /* Before examining the threads further, step this thread to
4185 get it entirely out of the syscall. (We get notice of the
4186 event when the thread is just on the verge of exiting a
4187 syscall. Stepping one instruction seems to get it back
4188 into user code.) */
4189 case TARGET_WAITKIND_SYSCALL_RETURN:
4190 if (debug_infrun)
4191 fprintf_unfiltered (gdb_stdlog,
4192 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
4193 if (handle_syscall_event (ecs) == 0)
4194 process_event_stop_test (ecs);
4195 return;
4196
4197 case TARGET_WAITKIND_STOPPED:
4198 if (debug_infrun)
4199 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
4200 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
4201 handle_signal_stop (ecs);
4202 return;
4203
4204 case TARGET_WAITKIND_NO_HISTORY:
4205 if (debug_infrun)
4206 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
4207 /* Reverse execution: target ran out of history info. */
4208
4209 delete_just_stopped_threads_single_step_breakpoints ();
4210 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4211 observer_notify_no_history ();
4212 stop_waiting (ecs);
4213 return;
4214 }
4215 }
4216
4217 /* Come here when the program has stopped with a signal. */
4218
4219 static void
4220 handle_signal_stop (struct execution_control_state *ecs)
4221 {
4222 struct frame_info *frame;
4223 struct gdbarch *gdbarch;
4224 int stopped_by_watchpoint;
4225 enum stop_kind stop_soon;
4226 int random_signal;
4227
4228 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
4229
4230 /* Do we need to clean up the state of a thread that has
4231 completed a displaced single-step? (Doing so usually affects
4232 the PC, so do it here, before we set stop_pc.) */
4233 displaced_step_fixup (ecs->ptid,
4234 ecs->event_thread->suspend.stop_signal);
4235
4236 /* If we either finished a single-step or hit a breakpoint, but
4237 the user wanted this thread to be stopped, pretend we got a
4238 SIG0 (generic unsignaled stop). */
4239 if (ecs->event_thread->stop_requested
4240 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4241 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4242
4243 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4244
4245 if (debug_infrun)
4246 {
4247 struct regcache *regcache = get_thread_regcache (ecs->ptid);
4248 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4249 struct cleanup *old_chain = save_inferior_ptid ();
4250
4251 inferior_ptid = ecs->ptid;
4252
4253 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
4254 paddress (gdbarch, stop_pc));
4255 if (target_stopped_by_watchpoint ())
4256 {
4257 CORE_ADDR addr;
4258
4259 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
4260
4261 if (target_stopped_data_address (&current_target, &addr))
4262 fprintf_unfiltered (gdb_stdlog,
4263 "infrun: stopped data address = %s\n",
4264 paddress (gdbarch, addr));
4265 else
4266 fprintf_unfiltered (gdb_stdlog,
4267 "infrun: (no data address available)\n");
4268 }
4269
4270 do_cleanups (old_chain);
4271 }
4272
4273 /* This is originated from start_remote(), start_inferior() and
4274 shared libraries hook functions. */
4275 stop_soon = get_inferior_stop_soon (ecs->ptid);
4276 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4277 {
4278 if (!ptid_equal (ecs->ptid, inferior_ptid))
4279 context_switch (ecs->ptid);
4280 if (debug_infrun)
4281 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4282 stop_print_frame = 1;
4283 stop_waiting (ecs);
4284 return;
4285 }
4286
4287 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4288 && stop_after_trap)
4289 {
4290 if (!ptid_equal (ecs->ptid, inferior_ptid))
4291 context_switch (ecs->ptid);
4292 if (debug_infrun)
4293 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4294 stop_print_frame = 0;
4295 stop_waiting (ecs);
4296 return;
4297 }
4298
4299 /* This originates from attach_command(). We need to overwrite
4300 the stop_signal here, because some kernels don't ignore a
4301 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4302 See more comments in inferior.h. On the other hand, if we
4303 get a non-SIGSTOP, report it to the user - assume the backend
4304 will handle the SIGSTOP if it should show up later.
4305
4306 Also consider that the attach is complete when we see a
4307 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4308 target extended-remote report it instead of a SIGSTOP
4309 (e.g. gdbserver). We already rely on SIGTRAP being our
4310 signal, so this is no exception.
4311
4312 Also consider that the attach is complete when we see a
4313 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4314 the target to stop all threads of the inferior, in case the
4315 low level attach operation doesn't stop them implicitly. If
4316 they weren't stopped implicitly, then the stub will report a
4317 GDB_SIGNAL_0, meaning: stopped for no particular reason
4318 other than GDB's request. */
4319 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4320 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
4321 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4322 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
4323 {
4324 stop_print_frame = 1;
4325 stop_waiting (ecs);
4326 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4327 return;
4328 }
4329
4330 /* See if something interesting happened to the non-current thread. If
4331 so, then switch to that thread. */
4332 if (!ptid_equal (ecs->ptid, inferior_ptid))
4333 {
4334 if (debug_infrun)
4335 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
4336
4337 context_switch (ecs->ptid);
4338
4339 if (deprecated_context_hook)
4340 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
4341 }
4342
4343 /* At this point, get hold of the now-current thread's frame. */
4344 frame = get_current_frame ();
4345 gdbarch = get_frame_arch (frame);
4346
4347 /* Pull the single step breakpoints out of the target. */
4348 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4349 {
4350 struct regcache *regcache;
4351 struct address_space *aspace;
4352 CORE_ADDR pc;
4353
4354 regcache = get_thread_regcache (ecs->ptid);
4355 aspace = get_regcache_aspace (regcache);
4356 pc = regcache_read_pc (regcache);
4357
4358 /* However, before doing so, if this single-step breakpoint was
4359 actually for another thread, set this thread up for moving
4360 past it. */
4361 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
4362 aspace, pc))
4363 {
4364 if (single_step_breakpoint_inserted_here_p (aspace, pc))
4365 {
4366 if (debug_infrun)
4367 {
4368 fprintf_unfiltered (gdb_stdlog,
4369 "infrun: [%s] hit another thread's "
4370 "single-step breakpoint\n",
4371 target_pid_to_str (ecs->ptid));
4372 }
4373 ecs->hit_singlestep_breakpoint = 1;
4374 }
4375 }
4376 else
4377 {
4378 if (debug_infrun)
4379 {
4380 fprintf_unfiltered (gdb_stdlog,
4381 "infrun: [%s] hit its "
4382 "single-step breakpoint\n",
4383 target_pid_to_str (ecs->ptid));
4384 }
4385 }
4386 }
4387 delete_just_stopped_threads_single_step_breakpoints ();
4388
4389 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4390 && ecs->event_thread->control.trap_expected
4391 && ecs->event_thread->stepping_over_watchpoint)
4392 stopped_by_watchpoint = 0;
4393 else
4394 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4395
4396 /* If necessary, step over this watchpoint. We'll be back to display
4397 it in a moment. */
4398 if (stopped_by_watchpoint
4399 && (target_have_steppable_watchpoint
4400 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4401 {
4402 /* At this point, we are stopped at an instruction which has
4403 attempted to write to a piece of memory under control of
4404 a watchpoint. The instruction hasn't actually executed
4405 yet. If we were to evaluate the watchpoint expression
4406 now, we would get the old value, and therefore no change
4407 would seem to have occurred.
4408
4409 In order to make watchpoints work `right', we really need
4410 to complete the memory write, and then evaluate the
4411 watchpoint expression. We do this by single-stepping the
4412 target.
4413
4414 It may not be necessary to disable the watchpoint to step over
4415 it. For example, the PA can (with some kernel cooperation)
4416 single step over a watchpoint without disabling the watchpoint.
4417
4418 It is far more common to need to disable a watchpoint to step
4419 the inferior over it. If we have non-steppable watchpoints,
4420 we must disable the current watchpoint; it's simplest to
4421 disable all watchpoints.
4422
4423 Any breakpoint at PC must also be stepped over -- if there's
4424 one, it will have already triggered before the watchpoint
4425 triggered, and we either already reported it to the user, or
4426 it didn't cause a stop and we called keep_going. In either
4427 case, if there was a breakpoint at PC, we must be trying to
4428 step past it. */
4429 ecs->event_thread->stepping_over_watchpoint = 1;
4430 keep_going (ecs);
4431 return;
4432 }
4433
4434 ecs->event_thread->stepping_over_breakpoint = 0;
4435 ecs->event_thread->stepping_over_watchpoint = 0;
4436 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4437 ecs->event_thread->control.stop_step = 0;
4438 stop_print_frame = 1;
4439 stopped_by_random_signal = 0;
4440
4441 /* Hide inlined functions starting here, unless we just performed stepi or
4442 nexti. After stepi and nexti, always show the innermost frame (not any
4443 inline function call sites). */
4444 if (ecs->event_thread->control.step_range_end != 1)
4445 {
4446 struct address_space *aspace =
4447 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4448
4449 /* skip_inline_frames is expensive, so we avoid it if we can
4450 determine that the address is one where functions cannot have
4451 been inlined. This improves performance with inferiors that
4452 load a lot of shared libraries, because the solib event
4453 breakpoint is defined as the address of a function (i.e. not
4454 inline). Note that we have to check the previous PC as well
4455 as the current one to catch cases when we have just
4456 single-stepped off a breakpoint prior to reinstating it.
4457 Note that we're assuming that the code we single-step to is
4458 not inline, but that's not definitive: there's nothing
4459 preventing the event breakpoint function from containing
4460 inlined code, and the single-step ending up there. If the
4461 user had set a breakpoint on that inlined code, the missing
4462 skip_inline_frames call would break things. Fortunately
4463 that's an extremely unlikely scenario. */
4464 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4465 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4466 && ecs->event_thread->control.trap_expected
4467 && pc_at_non_inline_function (aspace,
4468 ecs->event_thread->prev_pc,
4469 &ecs->ws)))
4470 {
4471 skip_inline_frames (ecs->ptid);
4472
4473 /* Re-fetch current thread's frame in case that invalidated
4474 the frame cache. */
4475 frame = get_current_frame ();
4476 gdbarch = get_frame_arch (frame);
4477 }
4478 }
4479
4480 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4481 && ecs->event_thread->control.trap_expected
4482 && gdbarch_single_step_through_delay_p (gdbarch)
4483 && currently_stepping (ecs->event_thread))
4484 {
4485 /* We're trying to step off a breakpoint. Turns out that we're
4486 also on an instruction that needs to be stepped multiple
4487 times before it's been fully executing. E.g., architectures
4488 with a delay slot. It needs to be stepped twice, once for
4489 the instruction and once for the delay slot. */
4490 int step_through_delay
4491 = gdbarch_single_step_through_delay (gdbarch, frame);
4492
4493 if (debug_infrun && step_through_delay)
4494 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4495 if (ecs->event_thread->control.step_range_end == 0
4496 && step_through_delay)
4497 {
4498 /* The user issued a continue when stopped at a breakpoint.
4499 Set up for another trap and get out of here. */
4500 ecs->event_thread->stepping_over_breakpoint = 1;
4501 keep_going (ecs);
4502 return;
4503 }
4504 else if (step_through_delay)
4505 {
4506 /* The user issued a step when stopped at a breakpoint.
4507 Maybe we should stop, maybe we should not - the delay
4508 slot *might* correspond to a line of source. In any
4509 case, don't decide that here, just set
4510 ecs->stepping_over_breakpoint, making sure we
4511 single-step again before breakpoints are re-inserted. */
4512 ecs->event_thread->stepping_over_breakpoint = 1;
4513 }
4514 }
4515
4516 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4517 handles this event. */
4518 ecs->event_thread->control.stop_bpstat
4519 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4520 stop_pc, ecs->ptid, &ecs->ws);
4521
4522 /* Following in case break condition called a
4523 function. */
4524 stop_print_frame = 1;
4525
4526 /* This is where we handle "moribund" watchpoints. Unlike
4527 software breakpoints traps, hardware watchpoint traps are
4528 always distinguishable from random traps. If no high-level
4529 watchpoint is associated with the reported stop data address
4530 anymore, then the bpstat does not explain the signal ---
4531 simply make sure to ignore it if `stopped_by_watchpoint' is
4532 set. */
4533
4534 if (debug_infrun
4535 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4536 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4537 GDB_SIGNAL_TRAP)
4538 && stopped_by_watchpoint)
4539 fprintf_unfiltered (gdb_stdlog,
4540 "infrun: no user watchpoint explains "
4541 "watchpoint SIGTRAP, ignoring\n");
4542
4543 /* NOTE: cagney/2003-03-29: These checks for a random signal
4544 at one stage in the past included checks for an inferior
4545 function call's call dummy's return breakpoint. The original
4546 comment, that went with the test, read:
4547
4548 ``End of a stack dummy. Some systems (e.g. Sony news) give
4549 another signal besides SIGTRAP, so check here as well as
4550 above.''
4551
4552 If someone ever tries to get call dummys on a
4553 non-executable stack to work (where the target would stop
4554 with something like a SIGSEGV), then those tests might need
4555 to be re-instated. Given, however, that the tests were only
4556 enabled when momentary breakpoints were not being used, I
4557 suspect that it won't be the case.
4558
4559 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4560 be necessary for call dummies on a non-executable stack on
4561 SPARC. */
4562
4563 /* See if the breakpoints module can explain the signal. */
4564 random_signal
4565 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4566 ecs->event_thread->suspend.stop_signal);
4567
4568 /* Maybe this was a trap for a software breakpoint that has since
4569 been removed. */
4570 if (random_signal && target_stopped_by_sw_breakpoint ())
4571 {
4572 if (program_breakpoint_here_p (gdbarch, stop_pc))
4573 {
4574 struct regcache *regcache;
4575 int decr_pc;
4576
4577 /* Re-adjust PC to what the program would see if GDB was not
4578 debugging it. */
4579 regcache = get_thread_regcache (ecs->event_thread->ptid);
4580 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4581 if (decr_pc != 0)
4582 {
4583 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
4584
4585 if (record_full_is_used ())
4586 record_full_gdb_operation_disable_set ();
4587
4588 regcache_write_pc (regcache, stop_pc + decr_pc);
4589
4590 do_cleanups (old_cleanups);
4591 }
4592 }
4593 else
4594 {
4595 /* A delayed software breakpoint event. Ignore the trap. */
4596 if (debug_infrun)
4597 fprintf_unfiltered (gdb_stdlog,
4598 "infrun: delayed software breakpoint "
4599 "trap, ignoring\n");
4600 random_signal = 0;
4601 }
4602 }
4603
4604 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
4605 has since been removed. */
4606 if (random_signal && target_stopped_by_hw_breakpoint ())
4607 {
4608 /* A delayed hardware breakpoint event. Ignore the trap. */
4609 if (debug_infrun)
4610 fprintf_unfiltered (gdb_stdlog,
4611 "infrun: delayed hardware breakpoint/watchpoint "
4612 "trap, ignoring\n");
4613 random_signal = 0;
4614 }
4615
4616 /* If not, perhaps stepping/nexting can. */
4617 if (random_signal)
4618 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4619 && currently_stepping (ecs->event_thread));
4620
4621 /* Perhaps the thread hit a single-step breakpoint of _another_
4622 thread. Single-step breakpoints are transparent to the
4623 breakpoints module. */
4624 if (random_signal)
4625 random_signal = !ecs->hit_singlestep_breakpoint;
4626
4627 /* No? Perhaps we got a moribund watchpoint. */
4628 if (random_signal)
4629 random_signal = !stopped_by_watchpoint;
4630
4631 /* For the program's own signals, act according to
4632 the signal handling tables. */
4633
4634 if (random_signal)
4635 {
4636 /* Signal not for debugging purposes. */
4637 struct inferior *inf = find_inferior_ptid (ecs->ptid);
4638 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4639
4640 if (debug_infrun)
4641 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4642 gdb_signal_to_symbol_string (stop_signal));
4643
4644 stopped_by_random_signal = 1;
4645
4646 /* Always stop on signals if we're either just gaining control
4647 of the program, or the user explicitly requested this thread
4648 to remain stopped. */
4649 if (stop_soon != NO_STOP_QUIETLY
4650 || ecs->event_thread->stop_requested
4651 || (!inf->detaching
4652 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4653 {
4654 stop_waiting (ecs);
4655 return;
4656 }
4657
4658 /* Notify observers the signal has "handle print" set. Note we
4659 returned early above if stopping; normal_stop handles the
4660 printing in that case. */
4661 if (signal_print[ecs->event_thread->suspend.stop_signal])
4662 {
4663 /* The signal table tells us to print about this signal. */
4664 target_terminal_ours_for_output ();
4665 observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
4666 target_terminal_inferior ();
4667 }
4668
4669 /* Clear the signal if it should not be passed. */
4670 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4671 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4672
4673 if (ecs->event_thread->prev_pc == stop_pc
4674 && ecs->event_thread->control.trap_expected
4675 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4676 {
4677 /* We were just starting a new sequence, attempting to
4678 single-step off of a breakpoint and expecting a SIGTRAP.
4679 Instead this signal arrives. This signal will take us out
4680 of the stepping range so GDB needs to remember to, when
4681 the signal handler returns, resume stepping off that
4682 breakpoint. */
4683 /* To simplify things, "continue" is forced to use the same
4684 code paths as single-step - set a breakpoint at the
4685 signal return address and then, once hit, step off that
4686 breakpoint. */
4687 if (debug_infrun)
4688 fprintf_unfiltered (gdb_stdlog,
4689 "infrun: signal arrived while stepping over "
4690 "breakpoint\n");
4691
4692 insert_hp_step_resume_breakpoint_at_frame (frame);
4693 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4694 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4695 ecs->event_thread->control.trap_expected = 0;
4696
4697 /* If we were nexting/stepping some other thread, switch to
4698 it, so that we don't continue it, losing control. */
4699 if (!switch_back_to_stepped_thread (ecs))
4700 keep_going (ecs);
4701 return;
4702 }
4703
4704 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4705 && (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4706 || ecs->event_thread->control.step_range_end == 1)
4707 && frame_id_eq (get_stack_frame_id (frame),
4708 ecs->event_thread->control.step_stack_frame_id)
4709 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4710 {
4711 /* The inferior is about to take a signal that will take it
4712 out of the single step range. Set a breakpoint at the
4713 current PC (which is presumably where the signal handler
4714 will eventually return) and then allow the inferior to
4715 run free.
4716
4717 Note that this is only needed for a signal delivered
4718 while in the single-step range. Nested signals aren't a
4719 problem as they eventually all return. */
4720 if (debug_infrun)
4721 fprintf_unfiltered (gdb_stdlog,
4722 "infrun: signal may take us out of "
4723 "single-step range\n");
4724
4725 insert_hp_step_resume_breakpoint_at_frame (frame);
4726 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4727 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4728 ecs->event_thread->control.trap_expected = 0;
4729 keep_going (ecs);
4730 return;
4731 }
4732
4733 /* Note: step_resume_breakpoint may be non-NULL. This occures
4734 when either there's a nested signal, or when there's a
4735 pending signal enabled just as the signal handler returns
4736 (leaving the inferior at the step-resume-breakpoint without
4737 actually executing it). Either way continue until the
4738 breakpoint is really hit. */
4739
4740 if (!switch_back_to_stepped_thread (ecs))
4741 {
4742 if (debug_infrun)
4743 fprintf_unfiltered (gdb_stdlog,
4744 "infrun: random signal, keep going\n");
4745
4746 keep_going (ecs);
4747 }
4748 return;
4749 }
4750
4751 process_event_stop_test (ecs);
4752 }
4753
4754 /* Come here when we've got some debug event / signal we can explain
4755 (IOW, not a random signal), and test whether it should cause a
4756 stop, or whether we should resume the inferior (transparently).
4757 E.g., could be a breakpoint whose condition evaluates false; we
4758 could be still stepping within the line; etc. */
4759
4760 static void
4761 process_event_stop_test (struct execution_control_state *ecs)
4762 {
4763 struct symtab_and_line stop_pc_sal;
4764 struct frame_info *frame;
4765 struct gdbarch *gdbarch;
4766 CORE_ADDR jmp_buf_pc;
4767 struct bpstat_what what;
4768
4769 /* Handle cases caused by hitting a breakpoint. */
4770
4771 frame = get_current_frame ();
4772 gdbarch = get_frame_arch (frame);
4773
4774 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4775
4776 if (what.call_dummy)
4777 {
4778 stop_stack_dummy = what.call_dummy;
4779 }
4780
4781 /* If we hit an internal event that triggers symbol changes, the
4782 current frame will be invalidated within bpstat_what (e.g., if we
4783 hit an internal solib event). Re-fetch it. */
4784 frame = get_current_frame ();
4785 gdbarch = get_frame_arch (frame);
4786
4787 switch (what.main_action)
4788 {
4789 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4790 /* If we hit the breakpoint at longjmp while stepping, we
4791 install a momentary breakpoint at the target of the
4792 jmp_buf. */
4793
4794 if (debug_infrun)
4795 fprintf_unfiltered (gdb_stdlog,
4796 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4797
4798 ecs->event_thread->stepping_over_breakpoint = 1;
4799
4800 if (what.is_longjmp)
4801 {
4802 struct value *arg_value;
4803
4804 /* If we set the longjmp breakpoint via a SystemTap probe,
4805 then use it to extract the arguments. The destination PC
4806 is the third argument to the probe. */
4807 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4808 if (arg_value)
4809 {
4810 jmp_buf_pc = value_as_address (arg_value);
4811 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
4812 }
4813 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4814 || !gdbarch_get_longjmp_target (gdbarch,
4815 frame, &jmp_buf_pc))
4816 {
4817 if (debug_infrun)
4818 fprintf_unfiltered (gdb_stdlog,
4819 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4820 "(!gdbarch_get_longjmp_target)\n");
4821 keep_going (ecs);
4822 return;
4823 }
4824
4825 /* Insert a breakpoint at resume address. */
4826 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4827 }
4828 else
4829 check_exception_resume (ecs, frame);
4830 keep_going (ecs);
4831 return;
4832
4833 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4834 {
4835 struct frame_info *init_frame;
4836
4837 /* There are several cases to consider.
4838
4839 1. The initiating frame no longer exists. In this case we
4840 must stop, because the exception or longjmp has gone too
4841 far.
4842
4843 2. The initiating frame exists, and is the same as the
4844 current frame. We stop, because the exception or longjmp
4845 has been caught.
4846
4847 3. The initiating frame exists and is different from the
4848 current frame. This means the exception or longjmp has
4849 been caught beneath the initiating frame, so keep going.
4850
4851 4. longjmp breakpoint has been placed just to protect
4852 against stale dummy frames and user is not interested in
4853 stopping around longjmps. */
4854
4855 if (debug_infrun)
4856 fprintf_unfiltered (gdb_stdlog,
4857 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4858
4859 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4860 != NULL);
4861 delete_exception_resume_breakpoint (ecs->event_thread);
4862
4863 if (what.is_longjmp)
4864 {
4865 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
4866
4867 if (!frame_id_p (ecs->event_thread->initiating_frame))
4868 {
4869 /* Case 4. */
4870 keep_going (ecs);
4871 return;
4872 }
4873 }
4874
4875 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4876
4877 if (init_frame)
4878 {
4879 struct frame_id current_id
4880 = get_frame_id (get_current_frame ());
4881 if (frame_id_eq (current_id,
4882 ecs->event_thread->initiating_frame))
4883 {
4884 /* Case 2. Fall through. */
4885 }
4886 else
4887 {
4888 /* Case 3. */
4889 keep_going (ecs);
4890 return;
4891 }
4892 }
4893
4894 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4895 exists. */
4896 delete_step_resume_breakpoint (ecs->event_thread);
4897
4898 end_stepping_range (ecs);
4899 }
4900 return;
4901
4902 case BPSTAT_WHAT_SINGLE:
4903 if (debug_infrun)
4904 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4905 ecs->event_thread->stepping_over_breakpoint = 1;
4906 /* Still need to check other stuff, at least the case where we
4907 are stepping and step out of the right range. */
4908 break;
4909
4910 case BPSTAT_WHAT_STEP_RESUME:
4911 if (debug_infrun)
4912 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4913
4914 delete_step_resume_breakpoint (ecs->event_thread);
4915 if (ecs->event_thread->control.proceed_to_finish
4916 && execution_direction == EXEC_REVERSE)
4917 {
4918 struct thread_info *tp = ecs->event_thread;
4919
4920 /* We are finishing a function in reverse, and just hit the
4921 step-resume breakpoint at the start address of the
4922 function, and we're almost there -- just need to back up
4923 by one more single-step, which should take us back to the
4924 function call. */
4925 tp->control.step_range_start = tp->control.step_range_end = 1;
4926 keep_going (ecs);
4927 return;
4928 }
4929 fill_in_stop_func (gdbarch, ecs);
4930 if (stop_pc == ecs->stop_func_start
4931 && execution_direction == EXEC_REVERSE)
4932 {
4933 /* We are stepping over a function call in reverse, and just
4934 hit the step-resume breakpoint at the start address of
4935 the function. Go back to single-stepping, which should
4936 take us back to the function call. */
4937 ecs->event_thread->stepping_over_breakpoint = 1;
4938 keep_going (ecs);
4939 return;
4940 }
4941 break;
4942
4943 case BPSTAT_WHAT_STOP_NOISY:
4944 if (debug_infrun)
4945 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4946 stop_print_frame = 1;
4947
4948 /* Assume the thread stopped for a breapoint. We'll still check
4949 whether a/the breakpoint is there when the thread is next
4950 resumed. */
4951 ecs->event_thread->stepping_over_breakpoint = 1;
4952
4953 stop_waiting (ecs);
4954 return;
4955
4956 case BPSTAT_WHAT_STOP_SILENT:
4957 if (debug_infrun)
4958 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4959 stop_print_frame = 0;
4960
4961 /* Assume the thread stopped for a breapoint. We'll still check
4962 whether a/the breakpoint is there when the thread is next
4963 resumed. */
4964 ecs->event_thread->stepping_over_breakpoint = 1;
4965 stop_waiting (ecs);
4966 return;
4967
4968 case BPSTAT_WHAT_HP_STEP_RESUME:
4969 if (debug_infrun)
4970 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4971
4972 delete_step_resume_breakpoint (ecs->event_thread);
4973 if (ecs->event_thread->step_after_step_resume_breakpoint)
4974 {
4975 /* Back when the step-resume breakpoint was inserted, we
4976 were trying to single-step off a breakpoint. Go back to
4977 doing that. */
4978 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4979 ecs->event_thread->stepping_over_breakpoint = 1;
4980 keep_going (ecs);
4981 return;
4982 }
4983 break;
4984
4985 case BPSTAT_WHAT_KEEP_CHECKING:
4986 break;
4987 }
4988
4989 /* If we stepped a permanent breakpoint and we had a high priority
4990 step-resume breakpoint for the address we stepped, but we didn't
4991 hit it, then we must have stepped into the signal handler. The
4992 step-resume was only necessary to catch the case of _not_
4993 stepping into the handler, so delete it, and fall through to
4994 checking whether the step finished. */
4995 if (ecs->event_thread->stepped_breakpoint)
4996 {
4997 struct breakpoint *sr_bp
4998 = ecs->event_thread->control.step_resume_breakpoint;
4999
5000 if (sr_bp != NULL
5001 && sr_bp->loc->permanent
5002 && sr_bp->type == bp_hp_step_resume
5003 && sr_bp->loc->address == ecs->event_thread->prev_pc)
5004 {
5005 if (debug_infrun)
5006 fprintf_unfiltered (gdb_stdlog,
5007 "infrun: stepped permanent breakpoint, stopped in "
5008 "handler\n");
5009 delete_step_resume_breakpoint (ecs->event_thread);
5010 ecs->event_thread->step_after_step_resume_breakpoint = 0;
5011 }
5012 }
5013
5014 /* We come here if we hit a breakpoint but should not stop for it.
5015 Possibly we also were stepping and should stop for that. So fall
5016 through and test for stepping. But, if not stepping, do not
5017 stop. */
5018
5019 /* In all-stop mode, if we're currently stepping but have stopped in
5020 some other thread, we need to switch back to the stepped thread. */
5021 if (switch_back_to_stepped_thread (ecs))
5022 return;
5023
5024 if (ecs->event_thread->control.step_resume_breakpoint)
5025 {
5026 if (debug_infrun)
5027 fprintf_unfiltered (gdb_stdlog,
5028 "infrun: step-resume breakpoint is inserted\n");
5029
5030 /* Having a step-resume breakpoint overrides anything
5031 else having to do with stepping commands until
5032 that breakpoint is reached. */
5033 keep_going (ecs);
5034 return;
5035 }
5036
5037 if (ecs->event_thread->control.step_range_end == 0)
5038 {
5039 if (debug_infrun)
5040 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
5041 /* Likewise if we aren't even stepping. */
5042 keep_going (ecs);
5043 return;
5044 }
5045
5046 /* Re-fetch current thread's frame in case the code above caused
5047 the frame cache to be re-initialized, making our FRAME variable
5048 a dangling pointer. */
5049 frame = get_current_frame ();
5050 gdbarch = get_frame_arch (frame);
5051 fill_in_stop_func (gdbarch, ecs);
5052
5053 /* If stepping through a line, keep going if still within it.
5054
5055 Note that step_range_end is the address of the first instruction
5056 beyond the step range, and NOT the address of the last instruction
5057 within it!
5058
5059 Note also that during reverse execution, we may be stepping
5060 through a function epilogue and therefore must detect when
5061 the current-frame changes in the middle of a line. */
5062
5063 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
5064 && (execution_direction != EXEC_REVERSE
5065 || frame_id_eq (get_frame_id (frame),
5066 ecs->event_thread->control.step_frame_id)))
5067 {
5068 if (debug_infrun)
5069 fprintf_unfiltered
5070 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
5071 paddress (gdbarch, ecs->event_thread->control.step_range_start),
5072 paddress (gdbarch, ecs->event_thread->control.step_range_end));
5073
5074 /* Tentatively re-enable range stepping; `resume' disables it if
5075 necessary (e.g., if we're stepping over a breakpoint or we
5076 have software watchpoints). */
5077 ecs->event_thread->control.may_range_step = 1;
5078
5079 /* When stepping backward, stop at beginning of line range
5080 (unless it's the function entry point, in which case
5081 keep going back to the call point). */
5082 if (stop_pc == ecs->event_thread->control.step_range_start
5083 && stop_pc != ecs->stop_func_start
5084 && execution_direction == EXEC_REVERSE)
5085 end_stepping_range (ecs);
5086 else
5087 keep_going (ecs);
5088
5089 return;
5090 }
5091
5092 /* We stepped out of the stepping range. */
5093
5094 /* If we are stepping at the source level and entered the runtime
5095 loader dynamic symbol resolution code...
5096
5097 EXEC_FORWARD: we keep on single stepping until we exit the run
5098 time loader code and reach the callee's address.
5099
5100 EXEC_REVERSE: we've already executed the callee (backward), and
5101 the runtime loader code is handled just like any other
5102 undebuggable function call. Now we need only keep stepping
5103 backward through the trampoline code, and that's handled further
5104 down, so there is nothing for us to do here. */
5105
5106 if (execution_direction != EXEC_REVERSE
5107 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5108 && in_solib_dynsym_resolve_code (stop_pc))
5109 {
5110 CORE_ADDR pc_after_resolver =
5111 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
5112
5113 if (debug_infrun)
5114 fprintf_unfiltered (gdb_stdlog,
5115 "infrun: stepped into dynsym resolve code\n");
5116
5117 if (pc_after_resolver)
5118 {
5119 /* Set up a step-resume breakpoint at the address
5120 indicated by SKIP_SOLIB_RESOLVER. */
5121 struct symtab_and_line sr_sal;
5122
5123 init_sal (&sr_sal);
5124 sr_sal.pc = pc_after_resolver;
5125 sr_sal.pspace = get_frame_program_space (frame);
5126
5127 insert_step_resume_breakpoint_at_sal (gdbarch,
5128 sr_sal, null_frame_id);
5129 }
5130
5131 keep_going (ecs);
5132 return;
5133 }
5134
5135 if (ecs->event_thread->control.step_range_end != 1
5136 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5137 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5138 && get_frame_type (frame) == SIGTRAMP_FRAME)
5139 {
5140 if (debug_infrun)
5141 fprintf_unfiltered (gdb_stdlog,
5142 "infrun: stepped into signal trampoline\n");
5143 /* The inferior, while doing a "step" or "next", has ended up in
5144 a signal trampoline (either by a signal being delivered or by
5145 the signal handler returning). Just single-step until the
5146 inferior leaves the trampoline (either by calling the handler
5147 or returning). */
5148 keep_going (ecs);
5149 return;
5150 }
5151
5152 /* If we're in the return path from a shared library trampoline,
5153 we want to proceed through the trampoline when stepping. */
5154 /* macro/2012-04-25: This needs to come before the subroutine
5155 call check below as on some targets return trampolines look
5156 like subroutine calls (MIPS16 return thunks). */
5157 if (gdbarch_in_solib_return_trampoline (gdbarch,
5158 stop_pc, ecs->stop_func_name)
5159 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5160 {
5161 /* Determine where this trampoline returns. */
5162 CORE_ADDR real_stop_pc;
5163
5164 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5165
5166 if (debug_infrun)
5167 fprintf_unfiltered (gdb_stdlog,
5168 "infrun: stepped into solib return tramp\n");
5169
5170 /* Only proceed through if we know where it's going. */
5171 if (real_stop_pc)
5172 {
5173 /* And put the step-breakpoint there and go until there. */
5174 struct symtab_and_line sr_sal;
5175
5176 init_sal (&sr_sal); /* initialize to zeroes */
5177 sr_sal.pc = real_stop_pc;
5178 sr_sal.section = find_pc_overlay (sr_sal.pc);
5179 sr_sal.pspace = get_frame_program_space (frame);
5180
5181 /* Do not specify what the fp should be when we stop since
5182 on some machines the prologue is where the new fp value
5183 is established. */
5184 insert_step_resume_breakpoint_at_sal (gdbarch,
5185 sr_sal, null_frame_id);
5186
5187 /* Restart without fiddling with the step ranges or
5188 other state. */
5189 keep_going (ecs);
5190 return;
5191 }
5192 }
5193
5194 /* Check for subroutine calls. The check for the current frame
5195 equalling the step ID is not necessary - the check of the
5196 previous frame's ID is sufficient - but it is a common case and
5197 cheaper than checking the previous frame's ID.
5198
5199 NOTE: frame_id_eq will never report two invalid frame IDs as
5200 being equal, so to get into this block, both the current and
5201 previous frame must have valid frame IDs. */
5202 /* The outer_frame_id check is a heuristic to detect stepping
5203 through startup code. If we step over an instruction which
5204 sets the stack pointer from an invalid value to a valid value,
5205 we may detect that as a subroutine call from the mythical
5206 "outermost" function. This could be fixed by marking
5207 outermost frames as !stack_p,code_p,special_p. Then the
5208 initial outermost frame, before sp was valid, would
5209 have code_addr == &_start. See the comment in frame_id_eq
5210 for more. */
5211 if (!frame_id_eq (get_stack_frame_id (frame),
5212 ecs->event_thread->control.step_stack_frame_id)
5213 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
5214 ecs->event_thread->control.step_stack_frame_id)
5215 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
5216 outer_frame_id)
5217 || (ecs->event_thread->control.step_start_function
5218 != find_pc_function (stop_pc)))))
5219 {
5220 CORE_ADDR real_stop_pc;
5221
5222 if (debug_infrun)
5223 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
5224
5225 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
5226 {
5227 /* I presume that step_over_calls is only 0 when we're
5228 supposed to be stepping at the assembly language level
5229 ("stepi"). Just stop. */
5230 /* And this works the same backward as frontward. MVS */
5231 end_stepping_range (ecs);
5232 return;
5233 }
5234
5235 /* Reverse stepping through solib trampolines. */
5236
5237 if (execution_direction == EXEC_REVERSE
5238 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
5239 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5240 || (ecs->stop_func_start == 0
5241 && in_solib_dynsym_resolve_code (stop_pc))))
5242 {
5243 /* Any solib trampoline code can be handled in reverse
5244 by simply continuing to single-step. We have already
5245 executed the solib function (backwards), and a few
5246 steps will take us back through the trampoline to the
5247 caller. */
5248 keep_going (ecs);
5249 return;
5250 }
5251
5252 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5253 {
5254 /* We're doing a "next".
5255
5256 Normal (forward) execution: set a breakpoint at the
5257 callee's return address (the address at which the caller
5258 will resume).
5259
5260 Reverse (backward) execution. set the step-resume
5261 breakpoint at the start of the function that we just
5262 stepped into (backwards), and continue to there. When we
5263 get there, we'll need to single-step back to the caller. */
5264
5265 if (execution_direction == EXEC_REVERSE)
5266 {
5267 /* If we're already at the start of the function, we've either
5268 just stepped backward into a single instruction function,
5269 or stepped back out of a signal handler to the first instruction
5270 of the function. Just keep going, which will single-step back
5271 to the caller. */
5272 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
5273 {
5274 struct symtab_and_line sr_sal;
5275
5276 /* Normal function call return (static or dynamic). */
5277 init_sal (&sr_sal);
5278 sr_sal.pc = ecs->stop_func_start;
5279 sr_sal.pspace = get_frame_program_space (frame);
5280 insert_step_resume_breakpoint_at_sal (gdbarch,
5281 sr_sal, null_frame_id);
5282 }
5283 }
5284 else
5285 insert_step_resume_breakpoint_at_caller (frame);
5286
5287 keep_going (ecs);
5288 return;
5289 }
5290
5291 /* If we are in a function call trampoline (a stub between the
5292 calling routine and the real function), locate the real
5293 function. That's what tells us (a) whether we want to step
5294 into it at all, and (b) what prologue we want to run to the
5295 end of, if we do step into it. */
5296 real_stop_pc = skip_language_trampoline (frame, stop_pc);
5297 if (real_stop_pc == 0)
5298 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5299 if (real_stop_pc != 0)
5300 ecs->stop_func_start = real_stop_pc;
5301
5302 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
5303 {
5304 struct symtab_and_line sr_sal;
5305
5306 init_sal (&sr_sal);
5307 sr_sal.pc = ecs->stop_func_start;
5308 sr_sal.pspace = get_frame_program_space (frame);
5309
5310 insert_step_resume_breakpoint_at_sal (gdbarch,
5311 sr_sal, null_frame_id);
5312 keep_going (ecs);
5313 return;
5314 }
5315
5316 /* If we have line number information for the function we are
5317 thinking of stepping into and the function isn't on the skip
5318 list, step into it.
5319
5320 If there are several symtabs at that PC (e.g. with include
5321 files), just want to know whether *any* of them have line
5322 numbers. find_pc_line handles this. */
5323 {
5324 struct symtab_and_line tmp_sal;
5325
5326 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
5327 if (tmp_sal.line != 0
5328 && !function_name_is_marked_for_skip (ecs->stop_func_name,
5329 &tmp_sal))
5330 {
5331 if (execution_direction == EXEC_REVERSE)
5332 handle_step_into_function_backward (gdbarch, ecs);
5333 else
5334 handle_step_into_function (gdbarch, ecs);
5335 return;
5336 }
5337 }
5338
5339 /* If we have no line number and the step-stop-if-no-debug is
5340 set, we stop the step so that the user has a chance to switch
5341 in assembly mode. */
5342 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5343 && step_stop_if_no_debug)
5344 {
5345 end_stepping_range (ecs);
5346 return;
5347 }
5348
5349 if (execution_direction == EXEC_REVERSE)
5350 {
5351 /* If we're already at the start of the function, we've either just
5352 stepped backward into a single instruction function without line
5353 number info, or stepped back out of a signal handler to the first
5354 instruction of the function without line number info. Just keep
5355 going, which will single-step back to the caller. */
5356 if (ecs->stop_func_start != stop_pc)
5357 {
5358 /* Set a breakpoint at callee's start address.
5359 From there we can step once and be back in the caller. */
5360 struct symtab_and_line sr_sal;
5361
5362 init_sal (&sr_sal);
5363 sr_sal.pc = ecs->stop_func_start;
5364 sr_sal.pspace = get_frame_program_space (frame);
5365 insert_step_resume_breakpoint_at_sal (gdbarch,
5366 sr_sal, null_frame_id);
5367 }
5368 }
5369 else
5370 /* Set a breakpoint at callee's return address (the address
5371 at which the caller will resume). */
5372 insert_step_resume_breakpoint_at_caller (frame);
5373
5374 keep_going (ecs);
5375 return;
5376 }
5377
5378 /* Reverse stepping through solib trampolines. */
5379
5380 if (execution_direction == EXEC_REVERSE
5381 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5382 {
5383 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5384 || (ecs->stop_func_start == 0
5385 && in_solib_dynsym_resolve_code (stop_pc)))
5386 {
5387 /* Any solib trampoline code can be handled in reverse
5388 by simply continuing to single-step. We have already
5389 executed the solib function (backwards), and a few
5390 steps will take us back through the trampoline to the
5391 caller. */
5392 keep_going (ecs);
5393 return;
5394 }
5395 else if (in_solib_dynsym_resolve_code (stop_pc))
5396 {
5397 /* Stepped backward into the solib dynsym resolver.
5398 Set a breakpoint at its start and continue, then
5399 one more step will take us out. */
5400 struct symtab_and_line sr_sal;
5401
5402 init_sal (&sr_sal);
5403 sr_sal.pc = ecs->stop_func_start;
5404 sr_sal.pspace = get_frame_program_space (frame);
5405 insert_step_resume_breakpoint_at_sal (gdbarch,
5406 sr_sal, null_frame_id);
5407 keep_going (ecs);
5408 return;
5409 }
5410 }
5411
5412 stop_pc_sal = find_pc_line (stop_pc, 0);
5413
5414 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5415 the trampoline processing logic, however, there are some trampolines
5416 that have no names, so we should do trampoline handling first. */
5417 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5418 && ecs->stop_func_name == NULL
5419 && stop_pc_sal.line == 0)
5420 {
5421 if (debug_infrun)
5422 fprintf_unfiltered (gdb_stdlog,
5423 "infrun: stepped into undebuggable function\n");
5424
5425 /* The inferior just stepped into, or returned to, an
5426 undebuggable function (where there is no debugging information
5427 and no line number corresponding to the address where the
5428 inferior stopped). Since we want to skip this kind of code,
5429 we keep going until the inferior returns from this
5430 function - unless the user has asked us not to (via
5431 set step-mode) or we no longer know how to get back
5432 to the call site. */
5433 if (step_stop_if_no_debug
5434 || !frame_id_p (frame_unwind_caller_id (frame)))
5435 {
5436 /* If we have no line number and the step-stop-if-no-debug
5437 is set, we stop the step so that the user has a chance to
5438 switch in assembly mode. */
5439 end_stepping_range (ecs);
5440 return;
5441 }
5442 else
5443 {
5444 /* Set a breakpoint at callee's return address (the address
5445 at which the caller will resume). */
5446 insert_step_resume_breakpoint_at_caller (frame);
5447 keep_going (ecs);
5448 return;
5449 }
5450 }
5451
5452 if (ecs->event_thread->control.step_range_end == 1)
5453 {
5454 /* It is stepi or nexti. We always want to stop stepping after
5455 one instruction. */
5456 if (debug_infrun)
5457 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5458 end_stepping_range (ecs);
5459 return;
5460 }
5461
5462 if (stop_pc_sal.line == 0)
5463 {
5464 /* We have no line number information. That means to stop
5465 stepping (does this always happen right after one instruction,
5466 when we do "s" in a function with no line numbers,
5467 or can this happen as a result of a return or longjmp?). */
5468 if (debug_infrun)
5469 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5470 end_stepping_range (ecs);
5471 return;
5472 }
5473
5474 /* Look for "calls" to inlined functions, part one. If the inline
5475 frame machinery detected some skipped call sites, we have entered
5476 a new inline function. */
5477
5478 if (frame_id_eq (get_frame_id (get_current_frame ()),
5479 ecs->event_thread->control.step_frame_id)
5480 && inline_skipped_frames (ecs->ptid))
5481 {
5482 struct symtab_and_line call_sal;
5483
5484 if (debug_infrun)
5485 fprintf_unfiltered (gdb_stdlog,
5486 "infrun: stepped into inlined function\n");
5487
5488 find_frame_sal (get_current_frame (), &call_sal);
5489
5490 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5491 {
5492 /* For "step", we're going to stop. But if the call site
5493 for this inlined function is on the same source line as
5494 we were previously stepping, go down into the function
5495 first. Otherwise stop at the call site. */
5496
5497 if (call_sal.line == ecs->event_thread->current_line
5498 && call_sal.symtab == ecs->event_thread->current_symtab)
5499 step_into_inline_frame (ecs->ptid);
5500
5501 end_stepping_range (ecs);
5502 return;
5503 }
5504 else
5505 {
5506 /* For "next", we should stop at the call site if it is on a
5507 different source line. Otherwise continue through the
5508 inlined function. */
5509 if (call_sal.line == ecs->event_thread->current_line
5510 && call_sal.symtab == ecs->event_thread->current_symtab)
5511 keep_going (ecs);
5512 else
5513 end_stepping_range (ecs);
5514 return;
5515 }
5516 }
5517
5518 /* Look for "calls" to inlined functions, part two. If we are still
5519 in the same real function we were stepping through, but we have
5520 to go further up to find the exact frame ID, we are stepping
5521 through a more inlined call beyond its call site. */
5522
5523 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5524 && !frame_id_eq (get_frame_id (get_current_frame ()),
5525 ecs->event_thread->control.step_frame_id)
5526 && stepped_in_from (get_current_frame (),
5527 ecs->event_thread->control.step_frame_id))
5528 {
5529 if (debug_infrun)
5530 fprintf_unfiltered (gdb_stdlog,
5531 "infrun: stepping through inlined function\n");
5532
5533 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5534 keep_going (ecs);
5535 else
5536 end_stepping_range (ecs);
5537 return;
5538 }
5539
5540 if ((stop_pc == stop_pc_sal.pc)
5541 && (ecs->event_thread->current_line != stop_pc_sal.line
5542 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5543 {
5544 /* We are at the start of a different line. So stop. Note that
5545 we don't stop if we step into the middle of a different line.
5546 That is said to make things like for (;;) statements work
5547 better. */
5548 if (debug_infrun)
5549 fprintf_unfiltered (gdb_stdlog,
5550 "infrun: stepped to a different line\n");
5551 end_stepping_range (ecs);
5552 return;
5553 }
5554
5555 /* We aren't done stepping.
5556
5557 Optimize by setting the stepping range to the line.
5558 (We might not be in the original line, but if we entered a
5559 new line in mid-statement, we continue stepping. This makes
5560 things like for(;;) statements work better.) */
5561
5562 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5563 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5564 ecs->event_thread->control.may_range_step = 1;
5565 set_step_info (frame, stop_pc_sal);
5566
5567 if (debug_infrun)
5568 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5569 keep_going (ecs);
5570 }
5571
5572 /* In all-stop mode, if we're currently stepping but have stopped in
5573 some other thread, we may need to switch back to the stepped
5574 thread. Returns true we set the inferior running, false if we left
5575 it stopped (and the event needs further processing). */
5576
5577 static int
5578 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5579 {
5580 if (!non_stop)
5581 {
5582 struct thread_info *tp;
5583 struct thread_info *stepping_thread;
5584 struct thread_info *step_over;
5585
5586 /* If any thread is blocked on some internal breakpoint, and we
5587 simply need to step over that breakpoint to get it going
5588 again, do that first. */
5589
5590 /* However, if we see an event for the stepping thread, then we
5591 know all other threads have been moved past their breakpoints
5592 already. Let the caller check whether the step is finished,
5593 etc., before deciding to move it past a breakpoint. */
5594 if (ecs->event_thread->control.step_range_end != 0)
5595 return 0;
5596
5597 /* Check if the current thread is blocked on an incomplete
5598 step-over, interrupted by a random signal. */
5599 if (ecs->event_thread->control.trap_expected
5600 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5601 {
5602 if (debug_infrun)
5603 {
5604 fprintf_unfiltered (gdb_stdlog,
5605 "infrun: need to finish step-over of [%s]\n",
5606 target_pid_to_str (ecs->event_thread->ptid));
5607 }
5608 keep_going (ecs);
5609 return 1;
5610 }
5611
5612 /* Check if the current thread is blocked by a single-step
5613 breakpoint of another thread. */
5614 if (ecs->hit_singlestep_breakpoint)
5615 {
5616 if (debug_infrun)
5617 {
5618 fprintf_unfiltered (gdb_stdlog,
5619 "infrun: need to step [%s] over single-step "
5620 "breakpoint\n",
5621 target_pid_to_str (ecs->ptid));
5622 }
5623 keep_going (ecs);
5624 return 1;
5625 }
5626
5627 /* Otherwise, we no longer expect a trap in the current thread.
5628 Clear the trap_expected flag before switching back -- this is
5629 what keep_going does as well, if we call it. */
5630 ecs->event_thread->control.trap_expected = 0;
5631
5632 /* Likewise, clear the signal if it should not be passed. */
5633 if (!signal_program[ecs->event_thread->suspend.stop_signal])
5634 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5635
5636 /* If scheduler locking applies even if not stepping, there's no
5637 need to walk over threads. Above we've checked whether the
5638 current thread is stepping. If some other thread not the
5639 event thread is stepping, then it must be that scheduler
5640 locking is not in effect. */
5641 if (schedlock_applies (ecs->event_thread))
5642 return 0;
5643
5644 /* Look for the stepping/nexting thread, and check if any other
5645 thread other than the stepping thread needs to start a
5646 step-over. Do all step-overs before actually proceeding with
5647 step/next/etc. */
5648 stepping_thread = NULL;
5649 step_over = NULL;
5650 ALL_NON_EXITED_THREADS (tp)
5651 {
5652 /* Ignore threads of processes we're not resuming. */
5653 if (!sched_multi
5654 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
5655 continue;
5656
5657 /* When stepping over a breakpoint, we lock all threads
5658 except the one that needs to move past the breakpoint.
5659 If a non-event thread has this set, the "incomplete
5660 step-over" check above should have caught it earlier. */
5661 gdb_assert (!tp->control.trap_expected);
5662
5663 /* Did we find the stepping thread? */
5664 if (tp->control.step_range_end)
5665 {
5666 /* Yep. There should only one though. */
5667 gdb_assert (stepping_thread == NULL);
5668
5669 /* The event thread is handled at the top, before we
5670 enter this loop. */
5671 gdb_assert (tp != ecs->event_thread);
5672
5673 /* If some thread other than the event thread is
5674 stepping, then scheduler locking can't be in effect,
5675 otherwise we wouldn't have resumed the current event
5676 thread in the first place. */
5677 gdb_assert (!schedlock_applies (tp));
5678
5679 stepping_thread = tp;
5680 }
5681 else if (thread_still_needs_step_over (tp))
5682 {
5683 step_over = tp;
5684
5685 /* At the top we've returned early if the event thread
5686 is stepping. If some other thread not the event
5687 thread is stepping, then scheduler locking can't be
5688 in effect, and we can resume this thread. No need to
5689 keep looking for the stepping thread then. */
5690 break;
5691 }
5692 }
5693
5694 if (step_over != NULL)
5695 {
5696 tp = step_over;
5697 if (debug_infrun)
5698 {
5699 fprintf_unfiltered (gdb_stdlog,
5700 "infrun: need to step-over [%s]\n",
5701 target_pid_to_str (tp->ptid));
5702 }
5703
5704 /* Only the stepping thread should have this set. */
5705 gdb_assert (tp->control.step_range_end == 0);
5706
5707 ecs->ptid = tp->ptid;
5708 ecs->event_thread = tp;
5709 switch_to_thread (ecs->ptid);
5710 keep_going (ecs);
5711 return 1;
5712 }
5713
5714 if (stepping_thread != NULL)
5715 {
5716 struct frame_info *frame;
5717 struct gdbarch *gdbarch;
5718
5719 tp = stepping_thread;
5720
5721 /* If the stepping thread exited, then don't try to switch
5722 back and resume it, which could fail in several different
5723 ways depending on the target. Instead, just keep going.
5724
5725 We can find a stepping dead thread in the thread list in
5726 two cases:
5727
5728 - The target supports thread exit events, and when the
5729 target tries to delete the thread from the thread list,
5730 inferior_ptid pointed at the exiting thread. In such
5731 case, calling delete_thread does not really remove the
5732 thread from the list; instead, the thread is left listed,
5733 with 'exited' state.
5734
5735 - The target's debug interface does not support thread
5736 exit events, and so we have no idea whatsoever if the
5737 previously stepping thread is still alive. For that
5738 reason, we need to synchronously query the target
5739 now. */
5740 if (is_exited (tp->ptid)
5741 || !target_thread_alive (tp->ptid))
5742 {
5743 if (debug_infrun)
5744 fprintf_unfiltered (gdb_stdlog,
5745 "infrun: not switching back to "
5746 "stepped thread, it has vanished\n");
5747
5748 delete_thread (tp->ptid);
5749 keep_going (ecs);
5750 return 1;
5751 }
5752
5753 if (debug_infrun)
5754 fprintf_unfiltered (gdb_stdlog,
5755 "infrun: switching back to stepped thread\n");
5756
5757 ecs->event_thread = tp;
5758 ecs->ptid = tp->ptid;
5759 context_switch (ecs->ptid);
5760
5761 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5762 frame = get_current_frame ();
5763 gdbarch = get_frame_arch (frame);
5764
5765 /* If the PC of the thread we were trying to single-step has
5766 changed, then that thread has trapped or been signaled,
5767 but the event has not been reported to GDB yet. Re-poll
5768 the target looking for this particular thread's event
5769 (i.e. temporarily enable schedlock) by:
5770
5771 - setting a break at the current PC
5772 - resuming that particular thread, only (by setting
5773 trap expected)
5774
5775 This prevents us continuously moving the single-step
5776 breakpoint forward, one instruction at a time,
5777 overstepping. */
5778
5779 if (stop_pc != tp->prev_pc)
5780 {
5781 ptid_t resume_ptid;
5782
5783 if (debug_infrun)
5784 fprintf_unfiltered (gdb_stdlog,
5785 "infrun: expected thread advanced also\n");
5786
5787 /* Clear the info of the previous step-over, as it's no
5788 longer valid. It's what keep_going would do too, if
5789 we called it. Must do this before trying to insert
5790 the sss breakpoint, otherwise if we were previously
5791 trying to step over this exact address in another
5792 thread, the breakpoint ends up not installed. */
5793 clear_step_over_info ();
5794
5795 insert_single_step_breakpoint (get_frame_arch (frame),
5796 get_frame_address_space (frame),
5797 stop_pc);
5798
5799 resume_ptid = user_visible_resume_ptid (tp->control.stepping_command);
5800 do_target_resume (resume_ptid,
5801 currently_stepping (tp), GDB_SIGNAL_0);
5802 prepare_to_wait (ecs);
5803 }
5804 else
5805 {
5806 if (debug_infrun)
5807 fprintf_unfiltered (gdb_stdlog,
5808 "infrun: expected thread still "
5809 "hasn't advanced\n");
5810 keep_going (ecs);
5811 }
5812
5813 return 1;
5814 }
5815 }
5816 return 0;
5817 }
5818
5819 /* Is thread TP in the middle of single-stepping? */
5820
5821 static int
5822 currently_stepping (struct thread_info *tp)
5823 {
5824 return ((tp->control.step_range_end
5825 && tp->control.step_resume_breakpoint == NULL)
5826 || tp->control.trap_expected
5827 || tp->stepped_breakpoint
5828 || bpstat_should_step ());
5829 }
5830
5831 /* Inferior has stepped into a subroutine call with source code that
5832 we should not step over. Do step to the first line of code in
5833 it. */
5834
5835 static void
5836 handle_step_into_function (struct gdbarch *gdbarch,
5837 struct execution_control_state *ecs)
5838 {
5839 struct compunit_symtab *cust;
5840 struct symtab_and_line stop_func_sal, sr_sal;
5841
5842 fill_in_stop_func (gdbarch, ecs);
5843
5844 cust = find_pc_compunit_symtab (stop_pc);
5845 if (cust != NULL && compunit_language (cust) != language_asm)
5846 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5847 ecs->stop_func_start);
5848
5849 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5850 /* Use the step_resume_break to step until the end of the prologue,
5851 even if that involves jumps (as it seems to on the vax under
5852 4.2). */
5853 /* If the prologue ends in the middle of a source line, continue to
5854 the end of that source line (if it is still within the function).
5855 Otherwise, just go to end of prologue. */
5856 if (stop_func_sal.end
5857 && stop_func_sal.pc != ecs->stop_func_start
5858 && stop_func_sal.end < ecs->stop_func_end)
5859 ecs->stop_func_start = stop_func_sal.end;
5860
5861 /* Architectures which require breakpoint adjustment might not be able
5862 to place a breakpoint at the computed address. If so, the test
5863 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5864 ecs->stop_func_start to an address at which a breakpoint may be
5865 legitimately placed.
5866
5867 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5868 made, GDB will enter an infinite loop when stepping through
5869 optimized code consisting of VLIW instructions which contain
5870 subinstructions corresponding to different source lines. On
5871 FR-V, it's not permitted to place a breakpoint on any but the
5872 first subinstruction of a VLIW instruction. When a breakpoint is
5873 set, GDB will adjust the breakpoint address to the beginning of
5874 the VLIW instruction. Thus, we need to make the corresponding
5875 adjustment here when computing the stop address. */
5876
5877 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5878 {
5879 ecs->stop_func_start
5880 = gdbarch_adjust_breakpoint_address (gdbarch,
5881 ecs->stop_func_start);
5882 }
5883
5884 if (ecs->stop_func_start == stop_pc)
5885 {
5886 /* We are already there: stop now. */
5887 end_stepping_range (ecs);
5888 return;
5889 }
5890 else
5891 {
5892 /* Put the step-breakpoint there and go until there. */
5893 init_sal (&sr_sal); /* initialize to zeroes */
5894 sr_sal.pc = ecs->stop_func_start;
5895 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5896 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5897
5898 /* Do not specify what the fp should be when we stop since on
5899 some machines the prologue is where the new fp value is
5900 established. */
5901 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5902
5903 /* And make sure stepping stops right away then. */
5904 ecs->event_thread->control.step_range_end
5905 = ecs->event_thread->control.step_range_start;
5906 }
5907 keep_going (ecs);
5908 }
5909
5910 /* Inferior has stepped backward into a subroutine call with source
5911 code that we should not step over. Do step to the beginning of the
5912 last line of code in it. */
5913
5914 static void
5915 handle_step_into_function_backward (struct gdbarch *gdbarch,
5916 struct execution_control_state *ecs)
5917 {
5918 struct compunit_symtab *cust;
5919 struct symtab_and_line stop_func_sal;
5920
5921 fill_in_stop_func (gdbarch, ecs);
5922
5923 cust = find_pc_compunit_symtab (stop_pc);
5924 if (cust != NULL && compunit_language (cust) != language_asm)
5925 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5926 ecs->stop_func_start);
5927
5928 stop_func_sal = find_pc_line (stop_pc, 0);
5929
5930 /* OK, we're just going to keep stepping here. */
5931 if (stop_func_sal.pc == stop_pc)
5932 {
5933 /* We're there already. Just stop stepping now. */
5934 end_stepping_range (ecs);
5935 }
5936 else
5937 {
5938 /* Else just reset the step range and keep going.
5939 No step-resume breakpoint, they don't work for
5940 epilogues, which can have multiple entry paths. */
5941 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5942 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5943 keep_going (ecs);
5944 }
5945 return;
5946 }
5947
5948 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5949 This is used to both functions and to skip over code. */
5950
5951 static void
5952 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5953 struct symtab_and_line sr_sal,
5954 struct frame_id sr_id,
5955 enum bptype sr_type)
5956 {
5957 /* There should never be more than one step-resume or longjmp-resume
5958 breakpoint per thread, so we should never be setting a new
5959 step_resume_breakpoint when one is already active. */
5960 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5961 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5962
5963 if (debug_infrun)
5964 fprintf_unfiltered (gdb_stdlog,
5965 "infrun: inserting step-resume breakpoint at %s\n",
5966 paddress (gdbarch, sr_sal.pc));
5967
5968 inferior_thread ()->control.step_resume_breakpoint
5969 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5970 }
5971
5972 void
5973 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5974 struct symtab_and_line sr_sal,
5975 struct frame_id sr_id)
5976 {
5977 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5978 sr_sal, sr_id,
5979 bp_step_resume);
5980 }
5981
5982 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5983 This is used to skip a potential signal handler.
5984
5985 This is called with the interrupted function's frame. The signal
5986 handler, when it returns, will resume the interrupted function at
5987 RETURN_FRAME.pc. */
5988
5989 static void
5990 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5991 {
5992 struct symtab_and_line sr_sal;
5993 struct gdbarch *gdbarch;
5994
5995 gdb_assert (return_frame != NULL);
5996 init_sal (&sr_sal); /* initialize to zeros */
5997
5998 gdbarch = get_frame_arch (return_frame);
5999 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
6000 sr_sal.section = find_pc_overlay (sr_sal.pc);
6001 sr_sal.pspace = get_frame_program_space (return_frame);
6002
6003 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
6004 get_stack_frame_id (return_frame),
6005 bp_hp_step_resume);
6006 }
6007
6008 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
6009 is used to skip a function after stepping into it (for "next" or if
6010 the called function has no debugging information).
6011
6012 The current function has almost always been reached by single
6013 stepping a call or return instruction. NEXT_FRAME belongs to the
6014 current function, and the breakpoint will be set at the caller's
6015 resume address.
6016
6017 This is a separate function rather than reusing
6018 insert_hp_step_resume_breakpoint_at_frame in order to avoid
6019 get_prev_frame, which may stop prematurely (see the implementation
6020 of frame_unwind_caller_id for an example). */
6021
6022 static void
6023 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
6024 {
6025 struct symtab_and_line sr_sal;
6026 struct gdbarch *gdbarch;
6027
6028 /* We shouldn't have gotten here if we don't know where the call site
6029 is. */
6030 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
6031
6032 init_sal (&sr_sal); /* initialize to zeros */
6033
6034 gdbarch = frame_unwind_caller_arch (next_frame);
6035 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
6036 frame_unwind_caller_pc (next_frame));
6037 sr_sal.section = find_pc_overlay (sr_sal.pc);
6038 sr_sal.pspace = frame_unwind_program_space (next_frame);
6039
6040 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
6041 frame_unwind_caller_id (next_frame));
6042 }
6043
6044 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
6045 new breakpoint at the target of a jmp_buf. The handling of
6046 longjmp-resume uses the same mechanisms used for handling
6047 "step-resume" breakpoints. */
6048
6049 static void
6050 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
6051 {
6052 /* There should never be more than one longjmp-resume breakpoint per
6053 thread, so we should never be setting a new
6054 longjmp_resume_breakpoint when one is already active. */
6055 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
6056
6057 if (debug_infrun)
6058 fprintf_unfiltered (gdb_stdlog,
6059 "infrun: inserting longjmp-resume breakpoint at %s\n",
6060 paddress (gdbarch, pc));
6061
6062 inferior_thread ()->control.exception_resume_breakpoint =
6063 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
6064 }
6065
6066 /* Insert an exception resume breakpoint. TP is the thread throwing
6067 the exception. The block B is the block of the unwinder debug hook
6068 function. FRAME is the frame corresponding to the call to this
6069 function. SYM is the symbol of the function argument holding the
6070 target PC of the exception. */
6071
6072 static void
6073 insert_exception_resume_breakpoint (struct thread_info *tp,
6074 const struct block *b,
6075 struct frame_info *frame,
6076 struct symbol *sym)
6077 {
6078 TRY
6079 {
6080 struct symbol *vsym;
6081 struct value *value;
6082 CORE_ADDR handler;
6083 struct breakpoint *bp;
6084
6085 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
6086 value = read_var_value (vsym, frame);
6087 /* If the value was optimized out, revert to the old behavior. */
6088 if (! value_optimized_out (value))
6089 {
6090 handler = value_as_address (value);
6091
6092 if (debug_infrun)
6093 fprintf_unfiltered (gdb_stdlog,
6094 "infrun: exception resume at %lx\n",
6095 (unsigned long) handler);
6096
6097 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
6098 handler, bp_exception_resume);
6099
6100 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
6101 frame = NULL;
6102
6103 bp->thread = tp->num;
6104 inferior_thread ()->control.exception_resume_breakpoint = bp;
6105 }
6106 }
6107 CATCH (e, RETURN_MASK_ERROR)
6108 {
6109 /* We want to ignore errors here. */
6110 }
6111 END_CATCH
6112 }
6113
6114 /* A helper for check_exception_resume that sets an
6115 exception-breakpoint based on a SystemTap probe. */
6116
6117 static void
6118 insert_exception_resume_from_probe (struct thread_info *tp,
6119 const struct bound_probe *probe,
6120 struct frame_info *frame)
6121 {
6122 struct value *arg_value;
6123 CORE_ADDR handler;
6124 struct breakpoint *bp;
6125
6126 arg_value = probe_safe_evaluate_at_pc (frame, 1);
6127 if (!arg_value)
6128 return;
6129
6130 handler = value_as_address (arg_value);
6131
6132 if (debug_infrun)
6133 fprintf_unfiltered (gdb_stdlog,
6134 "infrun: exception resume at %s\n",
6135 paddress (get_objfile_arch (probe->objfile),
6136 handler));
6137
6138 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
6139 handler, bp_exception_resume);
6140 bp->thread = tp->num;
6141 inferior_thread ()->control.exception_resume_breakpoint = bp;
6142 }
6143
6144 /* This is called when an exception has been intercepted. Check to
6145 see whether the exception's destination is of interest, and if so,
6146 set an exception resume breakpoint there. */
6147
6148 static void
6149 check_exception_resume (struct execution_control_state *ecs,
6150 struct frame_info *frame)
6151 {
6152 struct bound_probe probe;
6153 struct symbol *func;
6154
6155 /* First see if this exception unwinding breakpoint was set via a
6156 SystemTap probe point. If so, the probe has two arguments: the
6157 CFA and the HANDLER. We ignore the CFA, extract the handler, and
6158 set a breakpoint there. */
6159 probe = find_probe_by_pc (get_frame_pc (frame));
6160 if (probe.probe)
6161 {
6162 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
6163 return;
6164 }
6165
6166 func = get_frame_function (frame);
6167 if (!func)
6168 return;
6169
6170 TRY
6171 {
6172 const struct block *b;
6173 struct block_iterator iter;
6174 struct symbol *sym;
6175 int argno = 0;
6176
6177 /* The exception breakpoint is a thread-specific breakpoint on
6178 the unwinder's debug hook, declared as:
6179
6180 void _Unwind_DebugHook (void *cfa, void *handler);
6181
6182 The CFA argument indicates the frame to which control is
6183 about to be transferred. HANDLER is the destination PC.
6184
6185 We ignore the CFA and set a temporary breakpoint at HANDLER.
6186 This is not extremely efficient but it avoids issues in gdb
6187 with computing the DWARF CFA, and it also works even in weird
6188 cases such as throwing an exception from inside a signal
6189 handler. */
6190
6191 b = SYMBOL_BLOCK_VALUE (func);
6192 ALL_BLOCK_SYMBOLS (b, iter, sym)
6193 {
6194 if (!SYMBOL_IS_ARGUMENT (sym))
6195 continue;
6196
6197 if (argno == 0)
6198 ++argno;
6199 else
6200 {
6201 insert_exception_resume_breakpoint (ecs->event_thread,
6202 b, frame, sym);
6203 break;
6204 }
6205 }
6206 }
6207 CATCH (e, RETURN_MASK_ERROR)
6208 {
6209 }
6210 END_CATCH
6211 }
6212
6213 static void
6214 stop_waiting (struct execution_control_state *ecs)
6215 {
6216 if (debug_infrun)
6217 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
6218
6219 clear_step_over_info ();
6220
6221 /* Let callers know we don't want to wait for the inferior anymore. */
6222 ecs->wait_some_more = 0;
6223 }
6224
6225 /* Called when we should continue running the inferior, because the
6226 current event doesn't cause a user visible stop. This does the
6227 resuming part; waiting for the next event is done elsewhere. */
6228
6229 static void
6230 keep_going (struct execution_control_state *ecs)
6231 {
6232 /* Make sure normal_stop is called if we get a QUIT handled before
6233 reaching resume. */
6234 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
6235
6236 /* Save the pc before execution, to compare with pc after stop. */
6237 ecs->event_thread->prev_pc
6238 = regcache_read_pc (get_thread_regcache (ecs->ptid));
6239
6240 if (ecs->event_thread->control.trap_expected
6241 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
6242 {
6243 /* We haven't yet gotten our trap, and either: intercepted a
6244 non-signal event (e.g., a fork); or took a signal which we
6245 are supposed to pass through to the inferior. Simply
6246 continue. */
6247 discard_cleanups (old_cleanups);
6248 resume (ecs->event_thread->suspend.stop_signal);
6249 }
6250 else
6251 {
6252 struct regcache *regcache = get_current_regcache ();
6253 int remove_bp;
6254 int remove_wps;
6255
6256 /* Either the trap was not expected, but we are continuing
6257 anyway (if we got a signal, the user asked it be passed to
6258 the child)
6259 -- or --
6260 We got our expected trap, but decided we should resume from
6261 it.
6262
6263 We're going to run this baby now!
6264
6265 Note that insert_breakpoints won't try to re-insert
6266 already inserted breakpoints. Therefore, we don't
6267 care if breakpoints were already inserted, or not. */
6268
6269 /* If we need to step over a breakpoint, and we're not using
6270 displaced stepping to do so, insert all breakpoints
6271 (watchpoints, etc.) but the one we're stepping over, step one
6272 instruction, and then re-insert the breakpoint when that step
6273 is finished. */
6274
6275 remove_bp = (ecs->hit_singlestep_breakpoint
6276 || thread_still_needs_step_over (ecs->event_thread));
6277 remove_wps = (ecs->event_thread->stepping_over_watchpoint
6278 && !target_have_steppable_watchpoint);
6279
6280 /* We can't use displaced stepping if we need to step past a
6281 watchpoint. The instruction copied to the scratch pad would
6282 still trigger the watchpoint. */
6283 if (remove_bp
6284 && (remove_wps
6285 || !use_displaced_stepping (get_regcache_arch (regcache))))
6286 {
6287 set_step_over_info (get_regcache_aspace (regcache),
6288 regcache_read_pc (regcache), remove_wps);
6289 }
6290 else if (remove_wps)
6291 set_step_over_info (NULL, 0, remove_wps);
6292 else
6293 clear_step_over_info ();
6294
6295 /* Stop stepping if inserting breakpoints fails. */
6296 TRY
6297 {
6298 insert_breakpoints ();
6299 }
6300 CATCH (e, RETURN_MASK_ERROR)
6301 {
6302 exception_print (gdb_stderr, e);
6303 stop_waiting (ecs);
6304 discard_cleanups (old_cleanups);
6305 return;
6306 }
6307 END_CATCH
6308
6309 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
6310
6311 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
6312 explicitly specifies that such a signal should be delivered
6313 to the target program). Typically, that would occur when a
6314 user is debugging a target monitor on a simulator: the target
6315 monitor sets a breakpoint; the simulator encounters this
6316 breakpoint and halts the simulation handing control to GDB;
6317 GDB, noting that the stop address doesn't map to any known
6318 breakpoint, returns control back to the simulator; the
6319 simulator then delivers the hardware equivalent of a
6320 GDB_SIGNAL_TRAP to the program being debugged. */
6321 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6322 && !signal_program[ecs->event_thread->suspend.stop_signal])
6323 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6324
6325 discard_cleanups (old_cleanups);
6326 resume (ecs->event_thread->suspend.stop_signal);
6327 }
6328
6329 prepare_to_wait (ecs);
6330 }
6331
6332 /* This function normally comes after a resume, before
6333 handle_inferior_event exits. It takes care of any last bits of
6334 housekeeping, and sets the all-important wait_some_more flag. */
6335
6336 static void
6337 prepare_to_wait (struct execution_control_state *ecs)
6338 {
6339 if (debug_infrun)
6340 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
6341
6342 /* This is the old end of the while loop. Let everybody know we
6343 want to wait for the inferior some more and get called again
6344 soon. */
6345 ecs->wait_some_more = 1;
6346 }
6347
6348 /* We are done with the step range of a step/next/si/ni command.
6349 Called once for each n of a "step n" operation. */
6350
6351 static void
6352 end_stepping_range (struct execution_control_state *ecs)
6353 {
6354 ecs->event_thread->control.stop_step = 1;
6355 stop_waiting (ecs);
6356 }
6357
6358 /* Several print_*_reason functions to print why the inferior has stopped.
6359 We always print something when the inferior exits, or receives a signal.
6360 The rest of the cases are dealt with later on in normal_stop and
6361 print_it_typical. Ideally there should be a call to one of these
6362 print_*_reason functions functions from handle_inferior_event each time
6363 stop_waiting is called.
6364
6365 Note that we don't call these directly, instead we delegate that to
6366 the interpreters, through observers. Interpreters then call these
6367 with whatever uiout is right. */
6368
6369 void
6370 print_end_stepping_range_reason (struct ui_out *uiout)
6371 {
6372 /* For CLI-like interpreters, print nothing. */
6373
6374 if (ui_out_is_mi_like_p (uiout))
6375 {
6376 ui_out_field_string (uiout, "reason",
6377 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
6378 }
6379 }
6380
6381 void
6382 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6383 {
6384 annotate_signalled ();
6385 if (ui_out_is_mi_like_p (uiout))
6386 ui_out_field_string
6387 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
6388 ui_out_text (uiout, "\nProgram terminated with signal ");
6389 annotate_signal_name ();
6390 ui_out_field_string (uiout, "signal-name",
6391 gdb_signal_to_name (siggnal));
6392 annotate_signal_name_end ();
6393 ui_out_text (uiout, ", ");
6394 annotate_signal_string ();
6395 ui_out_field_string (uiout, "signal-meaning",
6396 gdb_signal_to_string (siggnal));
6397 annotate_signal_string_end ();
6398 ui_out_text (uiout, ".\n");
6399 ui_out_text (uiout, "The program no longer exists.\n");
6400 }
6401
6402 void
6403 print_exited_reason (struct ui_out *uiout, int exitstatus)
6404 {
6405 struct inferior *inf = current_inferior ();
6406 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
6407
6408 annotate_exited (exitstatus);
6409 if (exitstatus)
6410 {
6411 if (ui_out_is_mi_like_p (uiout))
6412 ui_out_field_string (uiout, "reason",
6413 async_reason_lookup (EXEC_ASYNC_EXITED));
6414 ui_out_text (uiout, "[Inferior ");
6415 ui_out_text (uiout, plongest (inf->num));
6416 ui_out_text (uiout, " (");
6417 ui_out_text (uiout, pidstr);
6418 ui_out_text (uiout, ") exited with code ");
6419 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
6420 ui_out_text (uiout, "]\n");
6421 }
6422 else
6423 {
6424 if (ui_out_is_mi_like_p (uiout))
6425 ui_out_field_string
6426 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6427 ui_out_text (uiout, "[Inferior ");
6428 ui_out_text (uiout, plongest (inf->num));
6429 ui_out_text (uiout, " (");
6430 ui_out_text (uiout, pidstr);
6431 ui_out_text (uiout, ") exited normally]\n");
6432 }
6433 }
6434
6435 void
6436 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6437 {
6438 annotate_signal ();
6439
6440 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
6441 {
6442 struct thread_info *t = inferior_thread ();
6443
6444 ui_out_text (uiout, "\n[");
6445 ui_out_field_string (uiout, "thread-name",
6446 target_pid_to_str (t->ptid));
6447 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
6448 ui_out_text (uiout, " stopped");
6449 }
6450 else
6451 {
6452 ui_out_text (uiout, "\nProgram received signal ");
6453 annotate_signal_name ();
6454 if (ui_out_is_mi_like_p (uiout))
6455 ui_out_field_string
6456 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
6457 ui_out_field_string (uiout, "signal-name",
6458 gdb_signal_to_name (siggnal));
6459 annotate_signal_name_end ();
6460 ui_out_text (uiout, ", ");
6461 annotate_signal_string ();
6462 ui_out_field_string (uiout, "signal-meaning",
6463 gdb_signal_to_string (siggnal));
6464 annotate_signal_string_end ();
6465 }
6466 ui_out_text (uiout, ".\n");
6467 }
6468
6469 void
6470 print_no_history_reason (struct ui_out *uiout)
6471 {
6472 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
6473 }
6474
6475 /* Print current location without a level number, if we have changed
6476 functions or hit a breakpoint. Print source line if we have one.
6477 bpstat_print contains the logic deciding in detail what to print,
6478 based on the event(s) that just occurred. */
6479
6480 void
6481 print_stop_event (struct target_waitstatus *ws)
6482 {
6483 int bpstat_ret;
6484 int source_flag;
6485 int do_frame_printing = 1;
6486 struct thread_info *tp = inferior_thread ();
6487
6488 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6489 switch (bpstat_ret)
6490 {
6491 case PRINT_UNKNOWN:
6492 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6493 should) carry around the function and does (or should) use
6494 that when doing a frame comparison. */
6495 if (tp->control.stop_step
6496 && frame_id_eq (tp->control.step_frame_id,
6497 get_frame_id (get_current_frame ()))
6498 && tp->control.step_start_function == find_pc_function (stop_pc))
6499 {
6500 /* Finished step, just print source line. */
6501 source_flag = SRC_LINE;
6502 }
6503 else
6504 {
6505 /* Print location and source line. */
6506 source_flag = SRC_AND_LOC;
6507 }
6508 break;
6509 case PRINT_SRC_AND_LOC:
6510 /* Print location and source line. */
6511 source_flag = SRC_AND_LOC;
6512 break;
6513 case PRINT_SRC_ONLY:
6514 source_flag = SRC_LINE;
6515 break;
6516 case PRINT_NOTHING:
6517 /* Something bogus. */
6518 source_flag = SRC_LINE;
6519 do_frame_printing = 0;
6520 break;
6521 default:
6522 internal_error (__FILE__, __LINE__, _("Unknown value."));
6523 }
6524
6525 /* The behavior of this routine with respect to the source
6526 flag is:
6527 SRC_LINE: Print only source line
6528 LOCATION: Print only location
6529 SRC_AND_LOC: Print location and source line. */
6530 if (do_frame_printing)
6531 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6532
6533 /* Display the auto-display expressions. */
6534 do_displays ();
6535 }
6536
6537 /* Here to return control to GDB when the inferior stops for real.
6538 Print appropriate messages, remove breakpoints, give terminal our modes.
6539
6540 STOP_PRINT_FRAME nonzero means print the executing frame
6541 (pc, function, args, file, line number and line text).
6542 BREAKPOINTS_FAILED nonzero means stop was due to error
6543 attempting to insert breakpoints. */
6544
6545 void
6546 normal_stop (void)
6547 {
6548 struct target_waitstatus last;
6549 ptid_t last_ptid;
6550 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6551
6552 get_last_target_status (&last_ptid, &last);
6553
6554 /* If an exception is thrown from this point on, make sure to
6555 propagate GDB's knowledge of the executing state to the
6556 frontend/user running state. A QUIT is an easy exception to see
6557 here, so do this before any filtered output. */
6558 if (!non_stop)
6559 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6560 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6561 && last.kind != TARGET_WAITKIND_EXITED
6562 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6563 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6564
6565 /* As we're presenting a stop, and potentially removing breakpoints,
6566 update the thread list so we can tell whether there are threads
6567 running on the target. With target remote, for example, we can
6568 only learn about new threads when we explicitly update the thread
6569 list. Do this before notifying the interpreters about signal
6570 stops, end of stepping ranges, etc., so that the "new thread"
6571 output is emitted before e.g., "Program received signal FOO",
6572 instead of after. */
6573 update_thread_list ();
6574
6575 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
6576 observer_notify_signal_received (inferior_thread ()->suspend.stop_signal);
6577
6578 /* As with the notification of thread events, we want to delay
6579 notifying the user that we've switched thread context until
6580 the inferior actually stops.
6581
6582 There's no point in saying anything if the inferior has exited.
6583 Note that SIGNALLED here means "exited with a signal", not
6584 "received a signal".
6585
6586 Also skip saying anything in non-stop mode. In that mode, as we
6587 don't want GDB to switch threads behind the user's back, to avoid
6588 races where the user is typing a command to apply to thread x,
6589 but GDB switches to thread y before the user finishes entering
6590 the command, fetch_inferior_event installs a cleanup to restore
6591 the current thread back to the thread the user had selected right
6592 after this event is handled, so we're not really switching, only
6593 informing of a stop. */
6594 if (!non_stop
6595 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6596 && target_has_execution
6597 && last.kind != TARGET_WAITKIND_SIGNALLED
6598 && last.kind != TARGET_WAITKIND_EXITED
6599 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6600 {
6601 target_terminal_ours_for_output ();
6602 printf_filtered (_("[Switching to %s]\n"),
6603 target_pid_to_str (inferior_ptid));
6604 annotate_thread_changed ();
6605 previous_inferior_ptid = inferior_ptid;
6606 }
6607
6608 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6609 {
6610 gdb_assert (sync_execution || !target_can_async_p ());
6611
6612 target_terminal_ours_for_output ();
6613 printf_filtered (_("No unwaited-for children left.\n"));
6614 }
6615
6616 /* Note: this depends on the update_thread_list call above. */
6617 if (!breakpoints_should_be_inserted_now () && target_has_execution)
6618 {
6619 if (remove_breakpoints ())
6620 {
6621 target_terminal_ours_for_output ();
6622 printf_filtered (_("Cannot remove breakpoints because "
6623 "program is no longer writable.\nFurther "
6624 "execution is probably impossible.\n"));
6625 }
6626 }
6627
6628 /* If an auto-display called a function and that got a signal,
6629 delete that auto-display to avoid an infinite recursion. */
6630
6631 if (stopped_by_random_signal)
6632 disable_current_display ();
6633
6634 /* Notify observers if we finished a "step"-like command, etc. */
6635 if (target_has_execution
6636 && last.kind != TARGET_WAITKIND_SIGNALLED
6637 && last.kind != TARGET_WAITKIND_EXITED
6638 && inferior_thread ()->control.stop_step)
6639 {
6640 /* But not if in the middle of doing a "step n" operation for
6641 n > 1 */
6642 if (inferior_thread ()->step_multi)
6643 goto done;
6644
6645 observer_notify_end_stepping_range ();
6646 }
6647
6648 target_terminal_ours ();
6649 async_enable_stdin ();
6650
6651 /* Set the current source location. This will also happen if we
6652 display the frame below, but the current SAL will be incorrect
6653 during a user hook-stop function. */
6654 if (has_stack_frames () && !stop_stack_dummy)
6655 set_current_sal_from_frame (get_current_frame ());
6656
6657 /* Let the user/frontend see the threads as stopped, but do nothing
6658 if the thread was running an infcall. We may be e.g., evaluating
6659 a breakpoint condition. In that case, the thread had state
6660 THREAD_RUNNING before the infcall, and shall remain set to
6661 running, all without informing the user/frontend about state
6662 transition changes. If this is actually a call command, then the
6663 thread was originally already stopped, so there's no state to
6664 finish either. */
6665 if (target_has_execution && inferior_thread ()->control.in_infcall)
6666 discard_cleanups (old_chain);
6667 else
6668 do_cleanups (old_chain);
6669
6670 /* Look up the hook_stop and run it (CLI internally handles problem
6671 of stop_command's pre-hook not existing). */
6672 if (stop_command)
6673 catch_errors (hook_stop_stub, stop_command,
6674 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6675
6676 if (!has_stack_frames ())
6677 goto done;
6678
6679 if (last.kind == TARGET_WAITKIND_SIGNALLED
6680 || last.kind == TARGET_WAITKIND_EXITED)
6681 goto done;
6682
6683 /* Select innermost stack frame - i.e., current frame is frame 0,
6684 and current location is based on that.
6685 Don't do this on return from a stack dummy routine,
6686 or if the program has exited. */
6687
6688 if (!stop_stack_dummy)
6689 {
6690 select_frame (get_current_frame ());
6691
6692 /* If --batch-silent is enabled then there's no need to print the current
6693 source location, and to try risks causing an error message about
6694 missing source files. */
6695 if (stop_print_frame && !batch_silent)
6696 print_stop_event (&last);
6697 }
6698
6699 /* Save the function value return registers, if we care.
6700 We might be about to restore their previous contents. */
6701 if (inferior_thread ()->control.proceed_to_finish
6702 && execution_direction != EXEC_REVERSE)
6703 {
6704 /* This should not be necessary. */
6705 if (stop_registers)
6706 regcache_xfree (stop_registers);
6707
6708 /* NB: The copy goes through to the target picking up the value of
6709 all the registers. */
6710 stop_registers = regcache_dup (get_current_regcache ());
6711 }
6712
6713 if (stop_stack_dummy == STOP_STACK_DUMMY)
6714 {
6715 /* Pop the empty frame that contains the stack dummy.
6716 This also restores inferior state prior to the call
6717 (struct infcall_suspend_state). */
6718 struct frame_info *frame = get_current_frame ();
6719
6720 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6721 frame_pop (frame);
6722 /* frame_pop() calls reinit_frame_cache as the last thing it
6723 does which means there's currently no selected frame. We
6724 don't need to re-establish a selected frame if the dummy call
6725 returns normally, that will be done by
6726 restore_infcall_control_state. However, we do have to handle
6727 the case where the dummy call is returning after being
6728 stopped (e.g. the dummy call previously hit a breakpoint).
6729 We can't know which case we have so just always re-establish
6730 a selected frame here. */
6731 select_frame (get_current_frame ());
6732 }
6733
6734 done:
6735 annotate_stopped ();
6736
6737 /* Suppress the stop observer if we're in the middle of:
6738
6739 - a step n (n > 1), as there still more steps to be done.
6740
6741 - a "finish" command, as the observer will be called in
6742 finish_command_continuation, so it can include the inferior
6743 function's return value.
6744
6745 - calling an inferior function, as we pretend we inferior didn't
6746 run at all. The return value of the call is handled by the
6747 expression evaluator, through call_function_by_hand. */
6748
6749 if (!target_has_execution
6750 || last.kind == TARGET_WAITKIND_SIGNALLED
6751 || last.kind == TARGET_WAITKIND_EXITED
6752 || last.kind == TARGET_WAITKIND_NO_RESUMED
6753 || (!(inferior_thread ()->step_multi
6754 && inferior_thread ()->control.stop_step)
6755 && !(inferior_thread ()->control.stop_bpstat
6756 && inferior_thread ()->control.proceed_to_finish)
6757 && !inferior_thread ()->control.in_infcall))
6758 {
6759 if (!ptid_equal (inferior_ptid, null_ptid))
6760 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6761 stop_print_frame);
6762 else
6763 observer_notify_normal_stop (NULL, stop_print_frame);
6764 }
6765
6766 if (target_has_execution)
6767 {
6768 if (last.kind != TARGET_WAITKIND_SIGNALLED
6769 && last.kind != TARGET_WAITKIND_EXITED)
6770 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6771 Delete any breakpoint that is to be deleted at the next stop. */
6772 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6773 }
6774
6775 /* Try to get rid of automatically added inferiors that are no
6776 longer needed. Keeping those around slows down things linearly.
6777 Note that this never removes the current inferior. */
6778 prune_inferiors ();
6779 }
6780
6781 static int
6782 hook_stop_stub (void *cmd)
6783 {
6784 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6785 return (0);
6786 }
6787 \f
6788 int
6789 signal_stop_state (int signo)
6790 {
6791 return signal_stop[signo];
6792 }
6793
6794 int
6795 signal_print_state (int signo)
6796 {
6797 return signal_print[signo];
6798 }
6799
6800 int
6801 signal_pass_state (int signo)
6802 {
6803 return signal_program[signo];
6804 }
6805
6806 static void
6807 signal_cache_update (int signo)
6808 {
6809 if (signo == -1)
6810 {
6811 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6812 signal_cache_update (signo);
6813
6814 return;
6815 }
6816
6817 signal_pass[signo] = (signal_stop[signo] == 0
6818 && signal_print[signo] == 0
6819 && signal_program[signo] == 1
6820 && signal_catch[signo] == 0);
6821 }
6822
6823 int
6824 signal_stop_update (int signo, int state)
6825 {
6826 int ret = signal_stop[signo];
6827
6828 signal_stop[signo] = state;
6829 signal_cache_update (signo);
6830 return ret;
6831 }
6832
6833 int
6834 signal_print_update (int signo, int state)
6835 {
6836 int ret = signal_print[signo];
6837
6838 signal_print[signo] = state;
6839 signal_cache_update (signo);
6840 return ret;
6841 }
6842
6843 int
6844 signal_pass_update (int signo, int state)
6845 {
6846 int ret = signal_program[signo];
6847
6848 signal_program[signo] = state;
6849 signal_cache_update (signo);
6850 return ret;
6851 }
6852
6853 /* Update the global 'signal_catch' from INFO and notify the
6854 target. */
6855
6856 void
6857 signal_catch_update (const unsigned int *info)
6858 {
6859 int i;
6860
6861 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6862 signal_catch[i] = info[i] > 0;
6863 signal_cache_update (-1);
6864 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6865 }
6866
6867 static void
6868 sig_print_header (void)
6869 {
6870 printf_filtered (_("Signal Stop\tPrint\tPass "
6871 "to program\tDescription\n"));
6872 }
6873
6874 static void
6875 sig_print_info (enum gdb_signal oursig)
6876 {
6877 const char *name = gdb_signal_to_name (oursig);
6878 int name_padding = 13 - strlen (name);
6879
6880 if (name_padding <= 0)
6881 name_padding = 0;
6882
6883 printf_filtered ("%s", name);
6884 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6885 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6886 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6887 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6888 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6889 }
6890
6891 /* Specify how various signals in the inferior should be handled. */
6892
6893 static void
6894 handle_command (char *args, int from_tty)
6895 {
6896 char **argv;
6897 int digits, wordlen;
6898 int sigfirst, signum, siglast;
6899 enum gdb_signal oursig;
6900 int allsigs;
6901 int nsigs;
6902 unsigned char *sigs;
6903 struct cleanup *old_chain;
6904
6905 if (args == NULL)
6906 {
6907 error_no_arg (_("signal to handle"));
6908 }
6909
6910 /* Allocate and zero an array of flags for which signals to handle. */
6911
6912 nsigs = (int) GDB_SIGNAL_LAST;
6913 sigs = (unsigned char *) alloca (nsigs);
6914 memset (sigs, 0, nsigs);
6915
6916 /* Break the command line up into args. */
6917
6918 argv = gdb_buildargv (args);
6919 old_chain = make_cleanup_freeargv (argv);
6920
6921 /* Walk through the args, looking for signal oursigs, signal names, and
6922 actions. Signal numbers and signal names may be interspersed with
6923 actions, with the actions being performed for all signals cumulatively
6924 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6925
6926 while (*argv != NULL)
6927 {
6928 wordlen = strlen (*argv);
6929 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6930 {;
6931 }
6932 allsigs = 0;
6933 sigfirst = siglast = -1;
6934
6935 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6936 {
6937 /* Apply action to all signals except those used by the
6938 debugger. Silently skip those. */
6939 allsigs = 1;
6940 sigfirst = 0;
6941 siglast = nsigs - 1;
6942 }
6943 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6944 {
6945 SET_SIGS (nsigs, sigs, signal_stop);
6946 SET_SIGS (nsigs, sigs, signal_print);
6947 }
6948 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6949 {
6950 UNSET_SIGS (nsigs, sigs, signal_program);
6951 }
6952 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6953 {
6954 SET_SIGS (nsigs, sigs, signal_print);
6955 }
6956 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6957 {
6958 SET_SIGS (nsigs, sigs, signal_program);
6959 }
6960 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6961 {
6962 UNSET_SIGS (nsigs, sigs, signal_stop);
6963 }
6964 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6965 {
6966 SET_SIGS (nsigs, sigs, signal_program);
6967 }
6968 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6969 {
6970 UNSET_SIGS (nsigs, sigs, signal_print);
6971 UNSET_SIGS (nsigs, sigs, signal_stop);
6972 }
6973 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6974 {
6975 UNSET_SIGS (nsigs, sigs, signal_program);
6976 }
6977 else if (digits > 0)
6978 {
6979 /* It is numeric. The numeric signal refers to our own
6980 internal signal numbering from target.h, not to host/target
6981 signal number. This is a feature; users really should be
6982 using symbolic names anyway, and the common ones like
6983 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6984
6985 sigfirst = siglast = (int)
6986 gdb_signal_from_command (atoi (*argv));
6987 if ((*argv)[digits] == '-')
6988 {
6989 siglast = (int)
6990 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6991 }
6992 if (sigfirst > siglast)
6993 {
6994 /* Bet he didn't figure we'd think of this case... */
6995 signum = sigfirst;
6996 sigfirst = siglast;
6997 siglast = signum;
6998 }
6999 }
7000 else
7001 {
7002 oursig = gdb_signal_from_name (*argv);
7003 if (oursig != GDB_SIGNAL_UNKNOWN)
7004 {
7005 sigfirst = siglast = (int) oursig;
7006 }
7007 else
7008 {
7009 /* Not a number and not a recognized flag word => complain. */
7010 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
7011 }
7012 }
7013
7014 /* If any signal numbers or symbol names were found, set flags for
7015 which signals to apply actions to. */
7016
7017 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
7018 {
7019 switch ((enum gdb_signal) signum)
7020 {
7021 case GDB_SIGNAL_TRAP:
7022 case GDB_SIGNAL_INT:
7023 if (!allsigs && !sigs[signum])
7024 {
7025 if (query (_("%s is used by the debugger.\n\
7026 Are you sure you want to change it? "),
7027 gdb_signal_to_name ((enum gdb_signal) signum)))
7028 {
7029 sigs[signum] = 1;
7030 }
7031 else
7032 {
7033 printf_unfiltered (_("Not confirmed, unchanged.\n"));
7034 gdb_flush (gdb_stdout);
7035 }
7036 }
7037 break;
7038 case GDB_SIGNAL_0:
7039 case GDB_SIGNAL_DEFAULT:
7040 case GDB_SIGNAL_UNKNOWN:
7041 /* Make sure that "all" doesn't print these. */
7042 break;
7043 default:
7044 sigs[signum] = 1;
7045 break;
7046 }
7047 }
7048
7049 argv++;
7050 }
7051
7052 for (signum = 0; signum < nsigs; signum++)
7053 if (sigs[signum])
7054 {
7055 signal_cache_update (-1);
7056 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
7057 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
7058
7059 if (from_tty)
7060 {
7061 /* Show the results. */
7062 sig_print_header ();
7063 for (; signum < nsigs; signum++)
7064 if (sigs[signum])
7065 sig_print_info (signum);
7066 }
7067
7068 break;
7069 }
7070
7071 do_cleanups (old_chain);
7072 }
7073
7074 /* Complete the "handle" command. */
7075
7076 static VEC (char_ptr) *
7077 handle_completer (struct cmd_list_element *ignore,
7078 const char *text, const char *word)
7079 {
7080 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
7081 static const char * const keywords[] =
7082 {
7083 "all",
7084 "stop",
7085 "ignore",
7086 "print",
7087 "pass",
7088 "nostop",
7089 "noignore",
7090 "noprint",
7091 "nopass",
7092 NULL,
7093 };
7094
7095 vec_signals = signal_completer (ignore, text, word);
7096 vec_keywords = complete_on_enum (keywords, word, word);
7097
7098 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
7099 VEC_free (char_ptr, vec_signals);
7100 VEC_free (char_ptr, vec_keywords);
7101 return return_val;
7102 }
7103
7104 static void
7105 xdb_handle_command (char *args, int from_tty)
7106 {
7107 char **argv;
7108 struct cleanup *old_chain;
7109
7110 if (args == NULL)
7111 error_no_arg (_("xdb command"));
7112
7113 /* Break the command line up into args. */
7114
7115 argv = gdb_buildargv (args);
7116 old_chain = make_cleanup_freeargv (argv);
7117 if (argv[1] != (char *) NULL)
7118 {
7119 char *argBuf;
7120 int bufLen;
7121
7122 bufLen = strlen (argv[0]) + 20;
7123 argBuf = (char *) xmalloc (bufLen);
7124 if (argBuf)
7125 {
7126 int validFlag = 1;
7127 enum gdb_signal oursig;
7128
7129 oursig = gdb_signal_from_name (argv[0]);
7130 memset (argBuf, 0, bufLen);
7131 if (strcmp (argv[1], "Q") == 0)
7132 sprintf (argBuf, "%s %s", argv[0], "noprint");
7133 else
7134 {
7135 if (strcmp (argv[1], "s") == 0)
7136 {
7137 if (!signal_stop[oursig])
7138 sprintf (argBuf, "%s %s", argv[0], "stop");
7139 else
7140 sprintf (argBuf, "%s %s", argv[0], "nostop");
7141 }
7142 else if (strcmp (argv[1], "i") == 0)
7143 {
7144 if (!signal_program[oursig])
7145 sprintf (argBuf, "%s %s", argv[0], "pass");
7146 else
7147 sprintf (argBuf, "%s %s", argv[0], "nopass");
7148 }
7149 else if (strcmp (argv[1], "r") == 0)
7150 {
7151 if (!signal_print[oursig])
7152 sprintf (argBuf, "%s %s", argv[0], "print");
7153 else
7154 sprintf (argBuf, "%s %s", argv[0], "noprint");
7155 }
7156 else
7157 validFlag = 0;
7158 }
7159 if (validFlag)
7160 handle_command (argBuf, from_tty);
7161 else
7162 printf_filtered (_("Invalid signal handling flag.\n"));
7163 if (argBuf)
7164 xfree (argBuf);
7165 }
7166 }
7167 do_cleanups (old_chain);
7168 }
7169
7170 enum gdb_signal
7171 gdb_signal_from_command (int num)
7172 {
7173 if (num >= 1 && num <= 15)
7174 return (enum gdb_signal) num;
7175 error (_("Only signals 1-15 are valid as numeric signals.\n\
7176 Use \"info signals\" for a list of symbolic signals."));
7177 }
7178
7179 /* Print current contents of the tables set by the handle command.
7180 It is possible we should just be printing signals actually used
7181 by the current target (but for things to work right when switching
7182 targets, all signals should be in the signal tables). */
7183
7184 static void
7185 signals_info (char *signum_exp, int from_tty)
7186 {
7187 enum gdb_signal oursig;
7188
7189 sig_print_header ();
7190
7191 if (signum_exp)
7192 {
7193 /* First see if this is a symbol name. */
7194 oursig = gdb_signal_from_name (signum_exp);
7195 if (oursig == GDB_SIGNAL_UNKNOWN)
7196 {
7197 /* No, try numeric. */
7198 oursig =
7199 gdb_signal_from_command (parse_and_eval_long (signum_exp));
7200 }
7201 sig_print_info (oursig);
7202 return;
7203 }
7204
7205 printf_filtered ("\n");
7206 /* These ugly casts brought to you by the native VAX compiler. */
7207 for (oursig = GDB_SIGNAL_FIRST;
7208 (int) oursig < (int) GDB_SIGNAL_LAST;
7209 oursig = (enum gdb_signal) ((int) oursig + 1))
7210 {
7211 QUIT;
7212
7213 if (oursig != GDB_SIGNAL_UNKNOWN
7214 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
7215 sig_print_info (oursig);
7216 }
7217
7218 printf_filtered (_("\nUse the \"handle\" command "
7219 "to change these tables.\n"));
7220 }
7221
7222 /* Check if it makes sense to read $_siginfo from the current thread
7223 at this point. If not, throw an error. */
7224
7225 static void
7226 validate_siginfo_access (void)
7227 {
7228 /* No current inferior, no siginfo. */
7229 if (ptid_equal (inferior_ptid, null_ptid))
7230 error (_("No thread selected."));
7231
7232 /* Don't try to read from a dead thread. */
7233 if (is_exited (inferior_ptid))
7234 error (_("The current thread has terminated"));
7235
7236 /* ... or from a spinning thread. */
7237 if (is_running (inferior_ptid))
7238 error (_("Selected thread is running."));
7239 }
7240
7241 /* The $_siginfo convenience variable is a bit special. We don't know
7242 for sure the type of the value until we actually have a chance to
7243 fetch the data. The type can change depending on gdbarch, so it is
7244 also dependent on which thread you have selected.
7245
7246 1. making $_siginfo be an internalvar that creates a new value on
7247 access.
7248
7249 2. making the value of $_siginfo be an lval_computed value. */
7250
7251 /* This function implements the lval_computed support for reading a
7252 $_siginfo value. */
7253
7254 static void
7255 siginfo_value_read (struct value *v)
7256 {
7257 LONGEST transferred;
7258
7259 validate_siginfo_access ();
7260
7261 transferred =
7262 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
7263 NULL,
7264 value_contents_all_raw (v),
7265 value_offset (v),
7266 TYPE_LENGTH (value_type (v)));
7267
7268 if (transferred != TYPE_LENGTH (value_type (v)))
7269 error (_("Unable to read siginfo"));
7270 }
7271
7272 /* This function implements the lval_computed support for writing a
7273 $_siginfo value. */
7274
7275 static void
7276 siginfo_value_write (struct value *v, struct value *fromval)
7277 {
7278 LONGEST transferred;
7279
7280 validate_siginfo_access ();
7281
7282 transferred = target_write (&current_target,
7283 TARGET_OBJECT_SIGNAL_INFO,
7284 NULL,
7285 value_contents_all_raw (fromval),
7286 value_offset (v),
7287 TYPE_LENGTH (value_type (fromval)));
7288
7289 if (transferred != TYPE_LENGTH (value_type (fromval)))
7290 error (_("Unable to write siginfo"));
7291 }
7292
7293 static const struct lval_funcs siginfo_value_funcs =
7294 {
7295 siginfo_value_read,
7296 siginfo_value_write
7297 };
7298
7299 /* Return a new value with the correct type for the siginfo object of
7300 the current thread using architecture GDBARCH. Return a void value
7301 if there's no object available. */
7302
7303 static struct value *
7304 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
7305 void *ignore)
7306 {
7307 if (target_has_stack
7308 && !ptid_equal (inferior_ptid, null_ptid)
7309 && gdbarch_get_siginfo_type_p (gdbarch))
7310 {
7311 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7312
7313 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
7314 }
7315
7316 return allocate_value (builtin_type (gdbarch)->builtin_void);
7317 }
7318
7319 \f
7320 /* infcall_suspend_state contains state about the program itself like its
7321 registers and any signal it received when it last stopped.
7322 This state must be restored regardless of how the inferior function call
7323 ends (either successfully, or after it hits a breakpoint or signal)
7324 if the program is to properly continue where it left off. */
7325
7326 struct infcall_suspend_state
7327 {
7328 struct thread_suspend_state thread_suspend;
7329 #if 0 /* Currently unused and empty structures are not valid C. */
7330 struct inferior_suspend_state inferior_suspend;
7331 #endif
7332
7333 /* Other fields: */
7334 CORE_ADDR stop_pc;
7335 struct regcache *registers;
7336
7337 /* Format of SIGINFO_DATA or NULL if it is not present. */
7338 struct gdbarch *siginfo_gdbarch;
7339
7340 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
7341 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
7342 content would be invalid. */
7343 gdb_byte *siginfo_data;
7344 };
7345
7346 struct infcall_suspend_state *
7347 save_infcall_suspend_state (void)
7348 {
7349 struct infcall_suspend_state *inf_state;
7350 struct thread_info *tp = inferior_thread ();
7351 #if 0
7352 struct inferior *inf = current_inferior ();
7353 #endif
7354 struct regcache *regcache = get_current_regcache ();
7355 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7356 gdb_byte *siginfo_data = NULL;
7357
7358 if (gdbarch_get_siginfo_type_p (gdbarch))
7359 {
7360 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7361 size_t len = TYPE_LENGTH (type);
7362 struct cleanup *back_to;
7363
7364 siginfo_data = xmalloc (len);
7365 back_to = make_cleanup (xfree, siginfo_data);
7366
7367 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7368 siginfo_data, 0, len) == len)
7369 discard_cleanups (back_to);
7370 else
7371 {
7372 /* Errors ignored. */
7373 do_cleanups (back_to);
7374 siginfo_data = NULL;
7375 }
7376 }
7377
7378 inf_state = XCNEW (struct infcall_suspend_state);
7379
7380 if (siginfo_data)
7381 {
7382 inf_state->siginfo_gdbarch = gdbarch;
7383 inf_state->siginfo_data = siginfo_data;
7384 }
7385
7386 inf_state->thread_suspend = tp->suspend;
7387 #if 0 /* Currently unused and empty structures are not valid C. */
7388 inf_state->inferior_suspend = inf->suspend;
7389 #endif
7390
7391 /* run_inferior_call will not use the signal due to its `proceed' call with
7392 GDB_SIGNAL_0 anyway. */
7393 tp->suspend.stop_signal = GDB_SIGNAL_0;
7394
7395 inf_state->stop_pc = stop_pc;
7396
7397 inf_state->registers = regcache_dup (regcache);
7398
7399 return inf_state;
7400 }
7401
7402 /* Restore inferior session state to INF_STATE. */
7403
7404 void
7405 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7406 {
7407 struct thread_info *tp = inferior_thread ();
7408 #if 0
7409 struct inferior *inf = current_inferior ();
7410 #endif
7411 struct regcache *regcache = get_current_regcache ();
7412 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7413
7414 tp->suspend = inf_state->thread_suspend;
7415 #if 0 /* Currently unused and empty structures are not valid C. */
7416 inf->suspend = inf_state->inferior_suspend;
7417 #endif
7418
7419 stop_pc = inf_state->stop_pc;
7420
7421 if (inf_state->siginfo_gdbarch == gdbarch)
7422 {
7423 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7424
7425 /* Errors ignored. */
7426 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7427 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
7428 }
7429
7430 /* The inferior can be gone if the user types "print exit(0)"
7431 (and perhaps other times). */
7432 if (target_has_execution)
7433 /* NB: The register write goes through to the target. */
7434 regcache_cpy (regcache, inf_state->registers);
7435
7436 discard_infcall_suspend_state (inf_state);
7437 }
7438
7439 static void
7440 do_restore_infcall_suspend_state_cleanup (void *state)
7441 {
7442 restore_infcall_suspend_state (state);
7443 }
7444
7445 struct cleanup *
7446 make_cleanup_restore_infcall_suspend_state
7447 (struct infcall_suspend_state *inf_state)
7448 {
7449 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
7450 }
7451
7452 void
7453 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7454 {
7455 regcache_xfree (inf_state->registers);
7456 xfree (inf_state->siginfo_data);
7457 xfree (inf_state);
7458 }
7459
7460 struct regcache *
7461 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
7462 {
7463 return inf_state->registers;
7464 }
7465
7466 /* infcall_control_state contains state regarding gdb's control of the
7467 inferior itself like stepping control. It also contains session state like
7468 the user's currently selected frame. */
7469
7470 struct infcall_control_state
7471 {
7472 struct thread_control_state thread_control;
7473 struct inferior_control_state inferior_control;
7474
7475 /* Other fields: */
7476 enum stop_stack_kind stop_stack_dummy;
7477 int stopped_by_random_signal;
7478 int stop_after_trap;
7479
7480 /* ID if the selected frame when the inferior function call was made. */
7481 struct frame_id selected_frame_id;
7482 };
7483
7484 /* Save all of the information associated with the inferior<==>gdb
7485 connection. */
7486
7487 struct infcall_control_state *
7488 save_infcall_control_state (void)
7489 {
7490 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
7491 struct thread_info *tp = inferior_thread ();
7492 struct inferior *inf = current_inferior ();
7493
7494 inf_status->thread_control = tp->control;
7495 inf_status->inferior_control = inf->control;
7496
7497 tp->control.step_resume_breakpoint = NULL;
7498 tp->control.exception_resume_breakpoint = NULL;
7499
7500 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
7501 chain. If caller's caller is walking the chain, they'll be happier if we
7502 hand them back the original chain when restore_infcall_control_state is
7503 called. */
7504 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
7505
7506 /* Other fields: */
7507 inf_status->stop_stack_dummy = stop_stack_dummy;
7508 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7509 inf_status->stop_after_trap = stop_after_trap;
7510
7511 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7512
7513 return inf_status;
7514 }
7515
7516 static int
7517 restore_selected_frame (void *args)
7518 {
7519 struct frame_id *fid = (struct frame_id *) args;
7520 struct frame_info *frame;
7521
7522 frame = frame_find_by_id (*fid);
7523
7524 /* If inf_status->selected_frame_id is NULL, there was no previously
7525 selected frame. */
7526 if (frame == NULL)
7527 {
7528 warning (_("Unable to restore previously selected frame."));
7529 return 0;
7530 }
7531
7532 select_frame (frame);
7533
7534 return (1);
7535 }
7536
7537 /* Restore inferior session state to INF_STATUS. */
7538
7539 void
7540 restore_infcall_control_state (struct infcall_control_state *inf_status)
7541 {
7542 struct thread_info *tp = inferior_thread ();
7543 struct inferior *inf = current_inferior ();
7544
7545 if (tp->control.step_resume_breakpoint)
7546 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7547
7548 if (tp->control.exception_resume_breakpoint)
7549 tp->control.exception_resume_breakpoint->disposition
7550 = disp_del_at_next_stop;
7551
7552 /* Handle the bpstat_copy of the chain. */
7553 bpstat_clear (&tp->control.stop_bpstat);
7554
7555 tp->control = inf_status->thread_control;
7556 inf->control = inf_status->inferior_control;
7557
7558 /* Other fields: */
7559 stop_stack_dummy = inf_status->stop_stack_dummy;
7560 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7561 stop_after_trap = inf_status->stop_after_trap;
7562
7563 if (target_has_stack)
7564 {
7565 /* The point of catch_errors is that if the stack is clobbered,
7566 walking the stack might encounter a garbage pointer and
7567 error() trying to dereference it. */
7568 if (catch_errors
7569 (restore_selected_frame, &inf_status->selected_frame_id,
7570 "Unable to restore previously selected frame:\n",
7571 RETURN_MASK_ERROR) == 0)
7572 /* Error in restoring the selected frame. Select the innermost
7573 frame. */
7574 select_frame (get_current_frame ());
7575 }
7576
7577 xfree (inf_status);
7578 }
7579
7580 static void
7581 do_restore_infcall_control_state_cleanup (void *sts)
7582 {
7583 restore_infcall_control_state (sts);
7584 }
7585
7586 struct cleanup *
7587 make_cleanup_restore_infcall_control_state
7588 (struct infcall_control_state *inf_status)
7589 {
7590 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7591 }
7592
7593 void
7594 discard_infcall_control_state (struct infcall_control_state *inf_status)
7595 {
7596 if (inf_status->thread_control.step_resume_breakpoint)
7597 inf_status->thread_control.step_resume_breakpoint->disposition
7598 = disp_del_at_next_stop;
7599
7600 if (inf_status->thread_control.exception_resume_breakpoint)
7601 inf_status->thread_control.exception_resume_breakpoint->disposition
7602 = disp_del_at_next_stop;
7603
7604 /* See save_infcall_control_state for info on stop_bpstat. */
7605 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7606
7607 xfree (inf_status);
7608 }
7609 \f
7610 /* restore_inferior_ptid() will be used by the cleanup machinery
7611 to restore the inferior_ptid value saved in a call to
7612 save_inferior_ptid(). */
7613
7614 static void
7615 restore_inferior_ptid (void *arg)
7616 {
7617 ptid_t *saved_ptid_ptr = arg;
7618
7619 inferior_ptid = *saved_ptid_ptr;
7620 xfree (arg);
7621 }
7622
7623 /* Save the value of inferior_ptid so that it may be restored by a
7624 later call to do_cleanups(). Returns the struct cleanup pointer
7625 needed for later doing the cleanup. */
7626
7627 struct cleanup *
7628 save_inferior_ptid (void)
7629 {
7630 ptid_t *saved_ptid_ptr;
7631
7632 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7633 *saved_ptid_ptr = inferior_ptid;
7634 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7635 }
7636
7637 /* See infrun.h. */
7638
7639 void
7640 clear_exit_convenience_vars (void)
7641 {
7642 clear_internalvar (lookup_internalvar ("_exitsignal"));
7643 clear_internalvar (lookup_internalvar ("_exitcode"));
7644 }
7645 \f
7646
7647 /* User interface for reverse debugging:
7648 Set exec-direction / show exec-direction commands
7649 (returns error unless target implements to_set_exec_direction method). */
7650
7651 int execution_direction = EXEC_FORWARD;
7652 static const char exec_forward[] = "forward";
7653 static const char exec_reverse[] = "reverse";
7654 static const char *exec_direction = exec_forward;
7655 static const char *const exec_direction_names[] = {
7656 exec_forward,
7657 exec_reverse,
7658 NULL
7659 };
7660
7661 static void
7662 set_exec_direction_func (char *args, int from_tty,
7663 struct cmd_list_element *cmd)
7664 {
7665 if (target_can_execute_reverse)
7666 {
7667 if (!strcmp (exec_direction, exec_forward))
7668 execution_direction = EXEC_FORWARD;
7669 else if (!strcmp (exec_direction, exec_reverse))
7670 execution_direction = EXEC_REVERSE;
7671 }
7672 else
7673 {
7674 exec_direction = exec_forward;
7675 error (_("Target does not support this operation."));
7676 }
7677 }
7678
7679 static void
7680 show_exec_direction_func (struct ui_file *out, int from_tty,
7681 struct cmd_list_element *cmd, const char *value)
7682 {
7683 switch (execution_direction) {
7684 case EXEC_FORWARD:
7685 fprintf_filtered (out, _("Forward.\n"));
7686 break;
7687 case EXEC_REVERSE:
7688 fprintf_filtered (out, _("Reverse.\n"));
7689 break;
7690 default:
7691 internal_error (__FILE__, __LINE__,
7692 _("bogus execution_direction value: %d"),
7693 (int) execution_direction);
7694 }
7695 }
7696
7697 static void
7698 show_schedule_multiple (struct ui_file *file, int from_tty,
7699 struct cmd_list_element *c, const char *value)
7700 {
7701 fprintf_filtered (file, _("Resuming the execution of threads "
7702 "of all processes is %s.\n"), value);
7703 }
7704
7705 /* Implementation of `siginfo' variable. */
7706
7707 static const struct internalvar_funcs siginfo_funcs =
7708 {
7709 siginfo_make_value,
7710 NULL,
7711 NULL
7712 };
7713
7714 void
7715 _initialize_infrun (void)
7716 {
7717 int i;
7718 int numsigs;
7719 struct cmd_list_element *c;
7720
7721 add_info ("signals", signals_info, _("\
7722 What debugger does when program gets various signals.\n\
7723 Specify a signal as argument to print info on that signal only."));
7724 add_info_alias ("handle", "signals", 0);
7725
7726 c = add_com ("handle", class_run, handle_command, _("\
7727 Specify how to handle signals.\n\
7728 Usage: handle SIGNAL [ACTIONS]\n\
7729 Args are signals and actions to apply to those signals.\n\
7730 If no actions are specified, the current settings for the specified signals\n\
7731 will be displayed instead.\n\
7732 \n\
7733 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7734 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7735 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7736 The special arg \"all\" is recognized to mean all signals except those\n\
7737 used by the debugger, typically SIGTRAP and SIGINT.\n\
7738 \n\
7739 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7740 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7741 Stop means reenter debugger if this signal happens (implies print).\n\
7742 Print means print a message if this signal happens.\n\
7743 Pass means let program see this signal; otherwise program doesn't know.\n\
7744 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7745 Pass and Stop may be combined.\n\
7746 \n\
7747 Multiple signals may be specified. Signal numbers and signal names\n\
7748 may be interspersed with actions, with the actions being performed for\n\
7749 all signals cumulatively specified."));
7750 set_cmd_completer (c, handle_completer);
7751
7752 if (xdb_commands)
7753 {
7754 add_com ("lz", class_info, signals_info, _("\
7755 What debugger does when program gets various signals.\n\
7756 Specify a signal as argument to print info on that signal only."));
7757 add_com ("z", class_run, xdb_handle_command, _("\
7758 Specify how to handle a signal.\n\
7759 Args are signals and actions to apply to those signals.\n\
7760 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7761 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7762 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7763 The special arg \"all\" is recognized to mean all signals except those\n\
7764 used by the debugger, typically SIGTRAP and SIGINT.\n\
7765 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7766 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7767 nopass), \"Q\" (noprint)\n\
7768 Stop means reenter debugger if this signal happens (implies print).\n\
7769 Print means print a message if this signal happens.\n\
7770 Pass means let program see this signal; otherwise program doesn't know.\n\
7771 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7772 Pass and Stop may be combined."));
7773 }
7774
7775 if (!dbx_commands)
7776 stop_command = add_cmd ("stop", class_obscure,
7777 not_just_help_class_command, _("\
7778 There is no `stop' command, but you can set a hook on `stop'.\n\
7779 This allows you to set a list of commands to be run each time execution\n\
7780 of the program stops."), &cmdlist);
7781
7782 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7783 Set inferior debugging."), _("\
7784 Show inferior debugging."), _("\
7785 When non-zero, inferior specific debugging is enabled."),
7786 NULL,
7787 show_debug_infrun,
7788 &setdebuglist, &showdebuglist);
7789
7790 add_setshow_boolean_cmd ("displaced", class_maintenance,
7791 &debug_displaced, _("\
7792 Set displaced stepping debugging."), _("\
7793 Show displaced stepping debugging."), _("\
7794 When non-zero, displaced stepping specific debugging is enabled."),
7795 NULL,
7796 show_debug_displaced,
7797 &setdebuglist, &showdebuglist);
7798
7799 add_setshow_boolean_cmd ("non-stop", no_class,
7800 &non_stop_1, _("\
7801 Set whether gdb controls the inferior in non-stop mode."), _("\
7802 Show whether gdb controls the inferior in non-stop mode."), _("\
7803 When debugging a multi-threaded program and this setting is\n\
7804 off (the default, also called all-stop mode), when one thread stops\n\
7805 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7806 all other threads in the program while you interact with the thread of\n\
7807 interest. When you continue or step a thread, you can allow the other\n\
7808 threads to run, or have them remain stopped, but while you inspect any\n\
7809 thread's state, all threads stop.\n\
7810 \n\
7811 In non-stop mode, when one thread stops, other threads can continue\n\
7812 to run freely. You'll be able to step each thread independently,\n\
7813 leave it stopped or free to run as needed."),
7814 set_non_stop,
7815 show_non_stop,
7816 &setlist,
7817 &showlist);
7818
7819 numsigs = (int) GDB_SIGNAL_LAST;
7820 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7821 signal_print = (unsigned char *)
7822 xmalloc (sizeof (signal_print[0]) * numsigs);
7823 signal_program = (unsigned char *)
7824 xmalloc (sizeof (signal_program[0]) * numsigs);
7825 signal_catch = (unsigned char *)
7826 xmalloc (sizeof (signal_catch[0]) * numsigs);
7827 signal_pass = (unsigned char *)
7828 xmalloc (sizeof (signal_pass[0]) * numsigs);
7829 for (i = 0; i < numsigs; i++)
7830 {
7831 signal_stop[i] = 1;
7832 signal_print[i] = 1;
7833 signal_program[i] = 1;
7834 signal_catch[i] = 0;
7835 }
7836
7837 /* Signals caused by debugger's own actions
7838 should not be given to the program afterwards. */
7839 signal_program[GDB_SIGNAL_TRAP] = 0;
7840 signal_program[GDB_SIGNAL_INT] = 0;
7841
7842 /* Signals that are not errors should not normally enter the debugger. */
7843 signal_stop[GDB_SIGNAL_ALRM] = 0;
7844 signal_print[GDB_SIGNAL_ALRM] = 0;
7845 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7846 signal_print[GDB_SIGNAL_VTALRM] = 0;
7847 signal_stop[GDB_SIGNAL_PROF] = 0;
7848 signal_print[GDB_SIGNAL_PROF] = 0;
7849 signal_stop[GDB_SIGNAL_CHLD] = 0;
7850 signal_print[GDB_SIGNAL_CHLD] = 0;
7851 signal_stop[GDB_SIGNAL_IO] = 0;
7852 signal_print[GDB_SIGNAL_IO] = 0;
7853 signal_stop[GDB_SIGNAL_POLL] = 0;
7854 signal_print[GDB_SIGNAL_POLL] = 0;
7855 signal_stop[GDB_SIGNAL_URG] = 0;
7856 signal_print[GDB_SIGNAL_URG] = 0;
7857 signal_stop[GDB_SIGNAL_WINCH] = 0;
7858 signal_print[GDB_SIGNAL_WINCH] = 0;
7859 signal_stop[GDB_SIGNAL_PRIO] = 0;
7860 signal_print[GDB_SIGNAL_PRIO] = 0;
7861
7862 /* These signals are used internally by user-level thread
7863 implementations. (See signal(5) on Solaris.) Like the above
7864 signals, a healthy program receives and handles them as part of
7865 its normal operation. */
7866 signal_stop[GDB_SIGNAL_LWP] = 0;
7867 signal_print[GDB_SIGNAL_LWP] = 0;
7868 signal_stop[GDB_SIGNAL_WAITING] = 0;
7869 signal_print[GDB_SIGNAL_WAITING] = 0;
7870 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7871 signal_print[GDB_SIGNAL_CANCEL] = 0;
7872
7873 /* Update cached state. */
7874 signal_cache_update (-1);
7875
7876 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7877 &stop_on_solib_events, _("\
7878 Set stopping for shared library events."), _("\
7879 Show stopping for shared library events."), _("\
7880 If nonzero, gdb will give control to the user when the dynamic linker\n\
7881 notifies gdb of shared library events. The most common event of interest\n\
7882 to the user would be loading/unloading of a new library."),
7883 set_stop_on_solib_events,
7884 show_stop_on_solib_events,
7885 &setlist, &showlist);
7886
7887 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7888 follow_fork_mode_kind_names,
7889 &follow_fork_mode_string, _("\
7890 Set debugger response to a program call of fork or vfork."), _("\
7891 Show debugger response to a program call of fork or vfork."), _("\
7892 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7893 parent - the original process is debugged after a fork\n\
7894 child - the new process is debugged after a fork\n\
7895 The unfollowed process will continue to run.\n\
7896 By default, the debugger will follow the parent process."),
7897 NULL,
7898 show_follow_fork_mode_string,
7899 &setlist, &showlist);
7900
7901 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7902 follow_exec_mode_names,
7903 &follow_exec_mode_string, _("\
7904 Set debugger response to a program call of exec."), _("\
7905 Show debugger response to a program call of exec."), _("\
7906 An exec call replaces the program image of a process.\n\
7907 \n\
7908 follow-exec-mode can be:\n\
7909 \n\
7910 new - the debugger creates a new inferior and rebinds the process\n\
7911 to this new inferior. The program the process was running before\n\
7912 the exec call can be restarted afterwards by restarting the original\n\
7913 inferior.\n\
7914 \n\
7915 same - the debugger keeps the process bound to the same inferior.\n\
7916 The new executable image replaces the previous executable loaded in\n\
7917 the inferior. Restarting the inferior after the exec call restarts\n\
7918 the executable the process was running after the exec call.\n\
7919 \n\
7920 By default, the debugger will use the same inferior."),
7921 NULL,
7922 show_follow_exec_mode_string,
7923 &setlist, &showlist);
7924
7925 add_setshow_enum_cmd ("scheduler-locking", class_run,
7926 scheduler_enums, &scheduler_mode, _("\
7927 Set mode for locking scheduler during execution."), _("\
7928 Show mode for locking scheduler during execution."), _("\
7929 off == no locking (threads may preempt at any time)\n\
7930 on == full locking (no thread except the current thread may run)\n\
7931 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
7932 In this mode, other threads may run during other commands."),
7933 set_schedlock_func, /* traps on target vector */
7934 show_scheduler_mode,
7935 &setlist, &showlist);
7936
7937 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7938 Set mode for resuming threads of all processes."), _("\
7939 Show mode for resuming threads of all processes."), _("\
7940 When on, execution commands (such as 'continue' or 'next') resume all\n\
7941 threads of all processes. When off (which is the default), execution\n\
7942 commands only resume the threads of the current process. The set of\n\
7943 threads that are resumed is further refined by the scheduler-locking\n\
7944 mode (see help set scheduler-locking)."),
7945 NULL,
7946 show_schedule_multiple,
7947 &setlist, &showlist);
7948
7949 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7950 Set mode of the step operation."), _("\
7951 Show mode of the step operation."), _("\
7952 When set, doing a step over a function without debug line information\n\
7953 will stop at the first instruction of that function. Otherwise, the\n\
7954 function is skipped and the step command stops at a different source line."),
7955 NULL,
7956 show_step_stop_if_no_debug,
7957 &setlist, &showlist);
7958
7959 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7960 &can_use_displaced_stepping, _("\
7961 Set debugger's willingness to use displaced stepping."), _("\
7962 Show debugger's willingness to use displaced stepping."), _("\
7963 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7964 supported by the target architecture. If off, gdb will not use displaced\n\
7965 stepping to step over breakpoints, even if such is supported by the target\n\
7966 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7967 if the target architecture supports it and non-stop mode is active, but will not\n\
7968 use it in all-stop mode (see help set non-stop)."),
7969 NULL,
7970 show_can_use_displaced_stepping,
7971 &setlist, &showlist);
7972
7973 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7974 &exec_direction, _("Set direction of execution.\n\
7975 Options are 'forward' or 'reverse'."),
7976 _("Show direction of execution (forward/reverse)."),
7977 _("Tells gdb whether to execute forward or backward."),
7978 set_exec_direction_func, show_exec_direction_func,
7979 &setlist, &showlist);
7980
7981 /* Set/show detach-on-fork: user-settable mode. */
7982
7983 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7984 Set whether gdb will detach the child of a fork."), _("\
7985 Show whether gdb will detach the child of a fork."), _("\
7986 Tells gdb whether to detach the child of a fork."),
7987 NULL, NULL, &setlist, &showlist);
7988
7989 /* Set/show disable address space randomization mode. */
7990
7991 add_setshow_boolean_cmd ("disable-randomization", class_support,
7992 &disable_randomization, _("\
7993 Set disabling of debuggee's virtual address space randomization."), _("\
7994 Show disabling of debuggee's virtual address space randomization."), _("\
7995 When this mode is on (which is the default), randomization of the virtual\n\
7996 address space is disabled. Standalone programs run with the randomization\n\
7997 enabled by default on some platforms."),
7998 &set_disable_randomization,
7999 &show_disable_randomization,
8000 &setlist, &showlist);
8001
8002 /* ptid initializations */
8003 inferior_ptid = null_ptid;
8004 target_last_wait_ptid = minus_one_ptid;
8005
8006 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
8007 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
8008 observer_attach_thread_exit (infrun_thread_thread_exit);
8009 observer_attach_inferior_exit (infrun_inferior_exit);
8010
8011 /* Explicitly create without lookup, since that tries to create a
8012 value with a void typed value, and when we get here, gdbarch
8013 isn't initialized yet. At this point, we're quite sure there
8014 isn't another convenience variable of the same name. */
8015 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
8016
8017 add_setshow_boolean_cmd ("observer", no_class,
8018 &observer_mode_1, _("\
8019 Set whether gdb controls the inferior in observer mode."), _("\
8020 Show whether gdb controls the inferior in observer mode."), _("\
8021 In observer mode, GDB can get data from the inferior, but not\n\
8022 affect its execution. Registers and memory may not be changed,\n\
8023 breakpoints may not be set, and the program cannot be interrupted\n\
8024 or signalled."),
8025 set_observer_mode,
8026 show_observer_mode,
8027 &setlist,
8028 &showlist);
8029 }
This page took 0.183476 seconds and 5 git commands to generate.