Fix gdb.base/sigstep.exp with displaced stepping on software single-step targets
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2015 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "breakpoint.h"
28 #include "gdb_wait.h"
29 #include "gdbcore.h"
30 #include "gdbcmd.h"
31 #include "cli/cli-script.h"
32 #include "target.h"
33 #include "gdbthread.h"
34 #include "annotate.h"
35 #include "symfile.h"
36 #include "top.h"
37 #include <signal.h>
38 #include "inf-loop.h"
39 #include "regcache.h"
40 #include "value.h"
41 #include "observer.h"
42 #include "language.h"
43 #include "solib.h"
44 #include "main.h"
45 #include "dictionary.h"
46 #include "block.h"
47 #include "mi/mi-common.h"
48 #include "event-top.h"
49 #include "record.h"
50 #include "record-full.h"
51 #include "inline-frame.h"
52 #include "jit.h"
53 #include "tracepoint.h"
54 #include "continuations.h"
55 #include "interps.h"
56 #include "skip.h"
57 #include "probe.h"
58 #include "objfiles.h"
59 #include "completer.h"
60 #include "target-descriptions.h"
61 #include "target-dcache.h"
62 #include "terminal.h"
63
64 /* Prototypes for local functions */
65
66 static void signals_info (char *, int);
67
68 static void handle_command (char *, int);
69
70 static void sig_print_info (enum gdb_signal);
71
72 static void sig_print_header (void);
73
74 static void resume_cleanups (void *);
75
76 static int hook_stop_stub (void *);
77
78 static int restore_selected_frame (void *);
79
80 static int follow_fork (void);
81
82 static int follow_fork_inferior (int follow_child, int detach_fork);
83
84 static void follow_inferior_reset_breakpoints (void);
85
86 static void set_schedlock_func (char *args, int from_tty,
87 struct cmd_list_element *c);
88
89 static int currently_stepping (struct thread_info *tp);
90
91 static void xdb_handle_command (char *args, int from_tty);
92
93 void _initialize_infrun (void);
94
95 void nullify_last_target_wait_ptid (void);
96
97 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
98
99 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
100
101 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
102
103 /* When set, stop the 'step' command if we enter a function which has
104 no line number information. The normal behavior is that we step
105 over such function. */
106 int step_stop_if_no_debug = 0;
107 static void
108 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
109 struct cmd_list_element *c, const char *value)
110 {
111 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
112 }
113
114 /* In asynchronous mode, but simulating synchronous execution. */
115
116 int sync_execution = 0;
117
118 /* proceed and normal_stop use this to notify the user when the
119 inferior stopped in a different thread than it had been running
120 in. */
121
122 static ptid_t previous_inferior_ptid;
123
124 /* If set (default for legacy reasons), when following a fork, GDB
125 will detach from one of the fork branches, child or parent.
126 Exactly which branch is detached depends on 'set follow-fork-mode'
127 setting. */
128
129 static int detach_fork = 1;
130
131 int debug_displaced = 0;
132 static void
133 show_debug_displaced (struct ui_file *file, int from_tty,
134 struct cmd_list_element *c, const char *value)
135 {
136 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
137 }
138
139 unsigned int debug_infrun = 0;
140 static void
141 show_debug_infrun (struct ui_file *file, int from_tty,
142 struct cmd_list_element *c, const char *value)
143 {
144 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
145 }
146
147
148 /* Support for disabling address space randomization. */
149
150 int disable_randomization = 1;
151
152 static void
153 show_disable_randomization (struct ui_file *file, int from_tty,
154 struct cmd_list_element *c, const char *value)
155 {
156 if (target_supports_disable_randomization ())
157 fprintf_filtered (file,
158 _("Disabling randomization of debuggee's "
159 "virtual address space is %s.\n"),
160 value);
161 else
162 fputs_filtered (_("Disabling randomization of debuggee's "
163 "virtual address space is unsupported on\n"
164 "this platform.\n"), file);
165 }
166
167 static void
168 set_disable_randomization (char *args, int from_tty,
169 struct cmd_list_element *c)
170 {
171 if (!target_supports_disable_randomization ())
172 error (_("Disabling randomization of debuggee's "
173 "virtual address space is unsupported on\n"
174 "this platform."));
175 }
176
177 /* User interface for non-stop mode. */
178
179 int non_stop = 0;
180 static int non_stop_1 = 0;
181
182 static void
183 set_non_stop (char *args, int from_tty,
184 struct cmd_list_element *c)
185 {
186 if (target_has_execution)
187 {
188 non_stop_1 = non_stop;
189 error (_("Cannot change this setting while the inferior is running."));
190 }
191
192 non_stop = non_stop_1;
193 }
194
195 static void
196 show_non_stop (struct ui_file *file, int from_tty,
197 struct cmd_list_element *c, const char *value)
198 {
199 fprintf_filtered (file,
200 _("Controlling the inferior in non-stop mode is %s.\n"),
201 value);
202 }
203
204 /* "Observer mode" is somewhat like a more extreme version of
205 non-stop, in which all GDB operations that might affect the
206 target's execution have been disabled. */
207
208 int observer_mode = 0;
209 static int observer_mode_1 = 0;
210
211 static void
212 set_observer_mode (char *args, int from_tty,
213 struct cmd_list_element *c)
214 {
215 if (target_has_execution)
216 {
217 observer_mode_1 = observer_mode;
218 error (_("Cannot change this setting while the inferior is running."));
219 }
220
221 observer_mode = observer_mode_1;
222
223 may_write_registers = !observer_mode;
224 may_write_memory = !observer_mode;
225 may_insert_breakpoints = !observer_mode;
226 may_insert_tracepoints = !observer_mode;
227 /* We can insert fast tracepoints in or out of observer mode,
228 but enable them if we're going into this mode. */
229 if (observer_mode)
230 may_insert_fast_tracepoints = 1;
231 may_stop = !observer_mode;
232 update_target_permissions ();
233
234 /* Going *into* observer mode we must force non-stop, then
235 going out we leave it that way. */
236 if (observer_mode)
237 {
238 pagination_enabled = 0;
239 non_stop = non_stop_1 = 1;
240 }
241
242 if (from_tty)
243 printf_filtered (_("Observer mode is now %s.\n"),
244 (observer_mode ? "on" : "off"));
245 }
246
247 static void
248 show_observer_mode (struct ui_file *file, int from_tty,
249 struct cmd_list_element *c, const char *value)
250 {
251 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
252 }
253
254 /* This updates the value of observer mode based on changes in
255 permissions. Note that we are deliberately ignoring the values of
256 may-write-registers and may-write-memory, since the user may have
257 reason to enable these during a session, for instance to turn on a
258 debugging-related global. */
259
260 void
261 update_observer_mode (void)
262 {
263 int newval;
264
265 newval = (!may_insert_breakpoints
266 && !may_insert_tracepoints
267 && may_insert_fast_tracepoints
268 && !may_stop
269 && non_stop);
270
271 /* Let the user know if things change. */
272 if (newval != observer_mode)
273 printf_filtered (_("Observer mode is now %s.\n"),
274 (newval ? "on" : "off"));
275
276 observer_mode = observer_mode_1 = newval;
277 }
278
279 /* Tables of how to react to signals; the user sets them. */
280
281 static unsigned char *signal_stop;
282 static unsigned char *signal_print;
283 static unsigned char *signal_program;
284
285 /* Table of signals that are registered with "catch signal". A
286 non-zero entry indicates that the signal is caught by some "catch
287 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
288 signals. */
289 static unsigned char *signal_catch;
290
291 /* Table of signals that the target may silently handle.
292 This is automatically determined from the flags above,
293 and simply cached here. */
294 static unsigned char *signal_pass;
295
296 #define SET_SIGS(nsigs,sigs,flags) \
297 do { \
298 int signum = (nsigs); \
299 while (signum-- > 0) \
300 if ((sigs)[signum]) \
301 (flags)[signum] = 1; \
302 } while (0)
303
304 #define UNSET_SIGS(nsigs,sigs,flags) \
305 do { \
306 int signum = (nsigs); \
307 while (signum-- > 0) \
308 if ((sigs)[signum]) \
309 (flags)[signum] = 0; \
310 } while (0)
311
312 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
313 this function is to avoid exporting `signal_program'. */
314
315 void
316 update_signals_program_target (void)
317 {
318 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
319 }
320
321 /* Value to pass to target_resume() to cause all threads to resume. */
322
323 #define RESUME_ALL minus_one_ptid
324
325 /* Command list pointer for the "stop" placeholder. */
326
327 static struct cmd_list_element *stop_command;
328
329 /* Nonzero if we want to give control to the user when we're notified
330 of shared library events by the dynamic linker. */
331 int stop_on_solib_events;
332
333 /* Enable or disable optional shared library event breakpoints
334 as appropriate when the above flag is changed. */
335
336 static void
337 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
338 {
339 update_solib_breakpoints ();
340 }
341
342 static void
343 show_stop_on_solib_events (struct ui_file *file, int from_tty,
344 struct cmd_list_element *c, const char *value)
345 {
346 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
347 value);
348 }
349
350 /* Nonzero means expecting a trace trap
351 and should stop the inferior and return silently when it happens. */
352
353 int stop_after_trap;
354
355 /* Save register contents here when executing a "finish" command or are
356 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
357 Thus this contains the return value from the called function (assuming
358 values are returned in a register). */
359
360 struct regcache *stop_registers;
361
362 /* Nonzero after stop if current stack frame should be printed. */
363
364 static int stop_print_frame;
365
366 /* This is a cached copy of the pid/waitstatus of the last event
367 returned by target_wait()/deprecated_target_wait_hook(). This
368 information is returned by get_last_target_status(). */
369 static ptid_t target_last_wait_ptid;
370 static struct target_waitstatus target_last_waitstatus;
371
372 static void context_switch (ptid_t ptid);
373
374 void init_thread_stepping_state (struct thread_info *tss);
375
376 static const char follow_fork_mode_child[] = "child";
377 static const char follow_fork_mode_parent[] = "parent";
378
379 static const char *const follow_fork_mode_kind_names[] = {
380 follow_fork_mode_child,
381 follow_fork_mode_parent,
382 NULL
383 };
384
385 static const char *follow_fork_mode_string = follow_fork_mode_parent;
386 static void
387 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
388 struct cmd_list_element *c, const char *value)
389 {
390 fprintf_filtered (file,
391 _("Debugger response to a program "
392 "call of fork or vfork is \"%s\".\n"),
393 value);
394 }
395 \f
396
397 /* Handle changes to the inferior list based on the type of fork,
398 which process is being followed, and whether the other process
399 should be detached. On entry inferior_ptid must be the ptid of
400 the fork parent. At return inferior_ptid is the ptid of the
401 followed inferior. */
402
403 static int
404 follow_fork_inferior (int follow_child, int detach_fork)
405 {
406 int has_vforked;
407 ptid_t parent_ptid, child_ptid;
408
409 has_vforked = (inferior_thread ()->pending_follow.kind
410 == TARGET_WAITKIND_VFORKED);
411 parent_ptid = inferior_ptid;
412 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
413
414 if (has_vforked
415 && !non_stop /* Non-stop always resumes both branches. */
416 && (!target_is_async_p () || sync_execution)
417 && !(follow_child || detach_fork || sched_multi))
418 {
419 /* The parent stays blocked inside the vfork syscall until the
420 child execs or exits. If we don't let the child run, then
421 the parent stays blocked. If we're telling the parent to run
422 in the foreground, the user will not be able to ctrl-c to get
423 back the terminal, effectively hanging the debug session. */
424 fprintf_filtered (gdb_stderr, _("\
425 Can not resume the parent process over vfork in the foreground while\n\
426 holding the child stopped. Try \"set detach-on-fork\" or \
427 \"set schedule-multiple\".\n"));
428 /* FIXME output string > 80 columns. */
429 return 1;
430 }
431
432 if (!follow_child)
433 {
434 /* Detach new forked process? */
435 if (detach_fork)
436 {
437 struct cleanup *old_chain;
438
439 /* Before detaching from the child, remove all breakpoints
440 from it. If we forked, then this has already been taken
441 care of by infrun.c. If we vforked however, any
442 breakpoint inserted in the parent is visible in the
443 child, even those added while stopped in a vfork
444 catchpoint. This will remove the breakpoints from the
445 parent also, but they'll be reinserted below. */
446 if (has_vforked)
447 {
448 /* Keep breakpoints list in sync. */
449 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
450 }
451
452 if (info_verbose || debug_infrun)
453 {
454 target_terminal_ours_for_output ();
455 fprintf_filtered (gdb_stdlog,
456 _("Detaching after %s from child %s.\n"),
457 has_vforked ? "vfork" : "fork",
458 target_pid_to_str (child_ptid));
459 }
460 }
461 else
462 {
463 struct inferior *parent_inf, *child_inf;
464 struct cleanup *old_chain;
465
466 /* Add process to GDB's tables. */
467 child_inf = add_inferior (ptid_get_pid (child_ptid));
468
469 parent_inf = current_inferior ();
470 child_inf->attach_flag = parent_inf->attach_flag;
471 copy_terminal_info (child_inf, parent_inf);
472 child_inf->gdbarch = parent_inf->gdbarch;
473 copy_inferior_target_desc_info (child_inf, parent_inf);
474
475 old_chain = save_inferior_ptid ();
476 save_current_program_space ();
477
478 inferior_ptid = child_ptid;
479 add_thread (inferior_ptid);
480 child_inf->symfile_flags = SYMFILE_NO_READ;
481
482 /* If this is a vfork child, then the address-space is
483 shared with the parent. */
484 if (has_vforked)
485 {
486 child_inf->pspace = parent_inf->pspace;
487 child_inf->aspace = parent_inf->aspace;
488
489 /* The parent will be frozen until the child is done
490 with the shared region. Keep track of the
491 parent. */
492 child_inf->vfork_parent = parent_inf;
493 child_inf->pending_detach = 0;
494 parent_inf->vfork_child = child_inf;
495 parent_inf->pending_detach = 0;
496 }
497 else
498 {
499 child_inf->aspace = new_address_space ();
500 child_inf->pspace = add_program_space (child_inf->aspace);
501 child_inf->removable = 1;
502 set_current_program_space (child_inf->pspace);
503 clone_program_space (child_inf->pspace, parent_inf->pspace);
504
505 /* Let the shared library layer (e.g., solib-svr4) learn
506 about this new process, relocate the cloned exec, pull
507 in shared libraries, and install the solib event
508 breakpoint. If a "cloned-VM" event was propagated
509 better throughout the core, this wouldn't be
510 required. */
511 solib_create_inferior_hook (0);
512 }
513
514 do_cleanups (old_chain);
515 }
516
517 if (has_vforked)
518 {
519 struct inferior *parent_inf;
520
521 parent_inf = current_inferior ();
522
523 /* If we detached from the child, then we have to be careful
524 to not insert breakpoints in the parent until the child
525 is done with the shared memory region. However, if we're
526 staying attached to the child, then we can and should
527 insert breakpoints, so that we can debug it. A
528 subsequent child exec or exit is enough to know when does
529 the child stops using the parent's address space. */
530 parent_inf->waiting_for_vfork_done = detach_fork;
531 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
532 }
533 }
534 else
535 {
536 /* Follow the child. */
537 struct inferior *parent_inf, *child_inf;
538 struct program_space *parent_pspace;
539
540 if (info_verbose || debug_infrun)
541 {
542 target_terminal_ours_for_output ();
543 fprintf_filtered (gdb_stdlog,
544 _("Attaching after %s %s to child %s.\n"),
545 target_pid_to_str (parent_ptid),
546 has_vforked ? "vfork" : "fork",
547 target_pid_to_str (child_ptid));
548 }
549
550 /* Add the new inferior first, so that the target_detach below
551 doesn't unpush the target. */
552
553 child_inf = add_inferior (ptid_get_pid (child_ptid));
554
555 parent_inf = current_inferior ();
556 child_inf->attach_flag = parent_inf->attach_flag;
557 copy_terminal_info (child_inf, parent_inf);
558 child_inf->gdbarch = parent_inf->gdbarch;
559 copy_inferior_target_desc_info (child_inf, parent_inf);
560
561 parent_pspace = parent_inf->pspace;
562
563 /* If we're vforking, we want to hold on to the parent until the
564 child exits or execs. At child exec or exit time we can
565 remove the old breakpoints from the parent and detach or
566 resume debugging it. Otherwise, detach the parent now; we'll
567 want to reuse it's program/address spaces, but we can't set
568 them to the child before removing breakpoints from the
569 parent, otherwise, the breakpoints module could decide to
570 remove breakpoints from the wrong process (since they'd be
571 assigned to the same address space). */
572
573 if (has_vforked)
574 {
575 gdb_assert (child_inf->vfork_parent == NULL);
576 gdb_assert (parent_inf->vfork_child == NULL);
577 child_inf->vfork_parent = parent_inf;
578 child_inf->pending_detach = 0;
579 parent_inf->vfork_child = child_inf;
580 parent_inf->pending_detach = detach_fork;
581 parent_inf->waiting_for_vfork_done = 0;
582 }
583 else if (detach_fork)
584 {
585 if (info_verbose || debug_infrun)
586 {
587 target_terminal_ours_for_output ();
588 fprintf_filtered (gdb_stdlog,
589 _("Detaching after fork from "
590 "child %s.\n"),
591 target_pid_to_str (child_ptid));
592 }
593
594 target_detach (NULL, 0);
595 }
596
597 /* Note that the detach above makes PARENT_INF dangling. */
598
599 /* Add the child thread to the appropriate lists, and switch to
600 this new thread, before cloning the program space, and
601 informing the solib layer about this new process. */
602
603 inferior_ptid = child_ptid;
604 add_thread (inferior_ptid);
605
606 /* If this is a vfork child, then the address-space is shared
607 with the parent. If we detached from the parent, then we can
608 reuse the parent's program/address spaces. */
609 if (has_vforked || detach_fork)
610 {
611 child_inf->pspace = parent_pspace;
612 child_inf->aspace = child_inf->pspace->aspace;
613 }
614 else
615 {
616 child_inf->aspace = new_address_space ();
617 child_inf->pspace = add_program_space (child_inf->aspace);
618 child_inf->removable = 1;
619 child_inf->symfile_flags = SYMFILE_NO_READ;
620 set_current_program_space (child_inf->pspace);
621 clone_program_space (child_inf->pspace, parent_pspace);
622
623 /* Let the shared library layer (e.g., solib-svr4) learn
624 about this new process, relocate the cloned exec, pull in
625 shared libraries, and install the solib event breakpoint.
626 If a "cloned-VM" event was propagated better throughout
627 the core, this wouldn't be required. */
628 solib_create_inferior_hook (0);
629 }
630 }
631
632 return target_follow_fork (follow_child, detach_fork);
633 }
634
635 /* Tell the target to follow the fork we're stopped at. Returns true
636 if the inferior should be resumed; false, if the target for some
637 reason decided it's best not to resume. */
638
639 static int
640 follow_fork (void)
641 {
642 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
643 int should_resume = 1;
644 struct thread_info *tp;
645
646 /* Copy user stepping state to the new inferior thread. FIXME: the
647 followed fork child thread should have a copy of most of the
648 parent thread structure's run control related fields, not just these.
649 Initialized to avoid "may be used uninitialized" warnings from gcc. */
650 struct breakpoint *step_resume_breakpoint = NULL;
651 struct breakpoint *exception_resume_breakpoint = NULL;
652 CORE_ADDR step_range_start = 0;
653 CORE_ADDR step_range_end = 0;
654 struct frame_id step_frame_id = { 0 };
655 struct interp *command_interp = NULL;
656
657 if (!non_stop)
658 {
659 ptid_t wait_ptid;
660 struct target_waitstatus wait_status;
661
662 /* Get the last target status returned by target_wait(). */
663 get_last_target_status (&wait_ptid, &wait_status);
664
665 /* If not stopped at a fork event, then there's nothing else to
666 do. */
667 if (wait_status.kind != TARGET_WAITKIND_FORKED
668 && wait_status.kind != TARGET_WAITKIND_VFORKED)
669 return 1;
670
671 /* Check if we switched over from WAIT_PTID, since the event was
672 reported. */
673 if (!ptid_equal (wait_ptid, minus_one_ptid)
674 && !ptid_equal (inferior_ptid, wait_ptid))
675 {
676 /* We did. Switch back to WAIT_PTID thread, to tell the
677 target to follow it (in either direction). We'll
678 afterwards refuse to resume, and inform the user what
679 happened. */
680 switch_to_thread (wait_ptid);
681 should_resume = 0;
682 }
683 }
684
685 tp = inferior_thread ();
686
687 /* If there were any forks/vforks that were caught and are now to be
688 followed, then do so now. */
689 switch (tp->pending_follow.kind)
690 {
691 case TARGET_WAITKIND_FORKED:
692 case TARGET_WAITKIND_VFORKED:
693 {
694 ptid_t parent, child;
695
696 /* If the user did a next/step, etc, over a fork call,
697 preserve the stepping state in the fork child. */
698 if (follow_child && should_resume)
699 {
700 step_resume_breakpoint = clone_momentary_breakpoint
701 (tp->control.step_resume_breakpoint);
702 step_range_start = tp->control.step_range_start;
703 step_range_end = tp->control.step_range_end;
704 step_frame_id = tp->control.step_frame_id;
705 exception_resume_breakpoint
706 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
707 command_interp = tp->control.command_interp;
708
709 /* For now, delete the parent's sr breakpoint, otherwise,
710 parent/child sr breakpoints are considered duplicates,
711 and the child version will not be installed. Remove
712 this when the breakpoints module becomes aware of
713 inferiors and address spaces. */
714 delete_step_resume_breakpoint (tp);
715 tp->control.step_range_start = 0;
716 tp->control.step_range_end = 0;
717 tp->control.step_frame_id = null_frame_id;
718 delete_exception_resume_breakpoint (tp);
719 tp->control.command_interp = NULL;
720 }
721
722 parent = inferior_ptid;
723 child = tp->pending_follow.value.related_pid;
724
725 /* Set up inferior(s) as specified by the caller, and tell the
726 target to do whatever is necessary to follow either parent
727 or child. */
728 if (follow_fork_inferior (follow_child, detach_fork))
729 {
730 /* Target refused to follow, or there's some other reason
731 we shouldn't resume. */
732 should_resume = 0;
733 }
734 else
735 {
736 /* This pending follow fork event is now handled, one way
737 or another. The previous selected thread may be gone
738 from the lists by now, but if it is still around, need
739 to clear the pending follow request. */
740 tp = find_thread_ptid (parent);
741 if (tp)
742 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
743
744 /* This makes sure we don't try to apply the "Switched
745 over from WAIT_PID" logic above. */
746 nullify_last_target_wait_ptid ();
747
748 /* If we followed the child, switch to it... */
749 if (follow_child)
750 {
751 switch_to_thread (child);
752
753 /* ... and preserve the stepping state, in case the
754 user was stepping over the fork call. */
755 if (should_resume)
756 {
757 tp = inferior_thread ();
758 tp->control.step_resume_breakpoint
759 = step_resume_breakpoint;
760 tp->control.step_range_start = step_range_start;
761 tp->control.step_range_end = step_range_end;
762 tp->control.step_frame_id = step_frame_id;
763 tp->control.exception_resume_breakpoint
764 = exception_resume_breakpoint;
765 tp->control.command_interp = command_interp;
766 }
767 else
768 {
769 /* If we get here, it was because we're trying to
770 resume from a fork catchpoint, but, the user
771 has switched threads away from the thread that
772 forked. In that case, the resume command
773 issued is most likely not applicable to the
774 child, so just warn, and refuse to resume. */
775 warning (_("Not resuming: switched threads "
776 "before following fork child.\n"));
777 }
778
779 /* Reset breakpoints in the child as appropriate. */
780 follow_inferior_reset_breakpoints ();
781 }
782 else
783 switch_to_thread (parent);
784 }
785 }
786 break;
787 case TARGET_WAITKIND_SPURIOUS:
788 /* Nothing to follow. */
789 break;
790 default:
791 internal_error (__FILE__, __LINE__,
792 "Unexpected pending_follow.kind %d\n",
793 tp->pending_follow.kind);
794 break;
795 }
796
797 return should_resume;
798 }
799
800 static void
801 follow_inferior_reset_breakpoints (void)
802 {
803 struct thread_info *tp = inferior_thread ();
804
805 /* Was there a step_resume breakpoint? (There was if the user
806 did a "next" at the fork() call.) If so, explicitly reset its
807 thread number. Cloned step_resume breakpoints are disabled on
808 creation, so enable it here now that it is associated with the
809 correct thread.
810
811 step_resumes are a form of bp that are made to be per-thread.
812 Since we created the step_resume bp when the parent process
813 was being debugged, and now are switching to the child process,
814 from the breakpoint package's viewpoint, that's a switch of
815 "threads". We must update the bp's notion of which thread
816 it is for, or it'll be ignored when it triggers. */
817
818 if (tp->control.step_resume_breakpoint)
819 {
820 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
821 tp->control.step_resume_breakpoint->loc->enabled = 1;
822 }
823
824 /* Treat exception_resume breakpoints like step_resume breakpoints. */
825 if (tp->control.exception_resume_breakpoint)
826 {
827 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
828 tp->control.exception_resume_breakpoint->loc->enabled = 1;
829 }
830
831 /* Reinsert all breakpoints in the child. The user may have set
832 breakpoints after catching the fork, in which case those
833 were never set in the child, but only in the parent. This makes
834 sure the inserted breakpoints match the breakpoint list. */
835
836 breakpoint_re_set ();
837 insert_breakpoints ();
838 }
839
840 /* The child has exited or execed: resume threads of the parent the
841 user wanted to be executing. */
842
843 static int
844 proceed_after_vfork_done (struct thread_info *thread,
845 void *arg)
846 {
847 int pid = * (int *) arg;
848
849 if (ptid_get_pid (thread->ptid) == pid
850 && is_running (thread->ptid)
851 && !is_executing (thread->ptid)
852 && !thread->stop_requested
853 && thread->suspend.stop_signal == GDB_SIGNAL_0)
854 {
855 if (debug_infrun)
856 fprintf_unfiltered (gdb_stdlog,
857 "infrun: resuming vfork parent thread %s\n",
858 target_pid_to_str (thread->ptid));
859
860 switch_to_thread (thread->ptid);
861 clear_proceed_status (0);
862 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
863 }
864
865 return 0;
866 }
867
868 /* Called whenever we notice an exec or exit event, to handle
869 detaching or resuming a vfork parent. */
870
871 static void
872 handle_vfork_child_exec_or_exit (int exec)
873 {
874 struct inferior *inf = current_inferior ();
875
876 if (inf->vfork_parent)
877 {
878 int resume_parent = -1;
879
880 /* This exec or exit marks the end of the shared memory region
881 between the parent and the child. If the user wanted to
882 detach from the parent, now is the time. */
883
884 if (inf->vfork_parent->pending_detach)
885 {
886 struct thread_info *tp;
887 struct cleanup *old_chain;
888 struct program_space *pspace;
889 struct address_space *aspace;
890
891 /* follow-fork child, detach-on-fork on. */
892
893 inf->vfork_parent->pending_detach = 0;
894
895 if (!exec)
896 {
897 /* If we're handling a child exit, then inferior_ptid
898 points at the inferior's pid, not to a thread. */
899 old_chain = save_inferior_ptid ();
900 save_current_program_space ();
901 save_current_inferior ();
902 }
903 else
904 old_chain = save_current_space_and_thread ();
905
906 /* We're letting loose of the parent. */
907 tp = any_live_thread_of_process (inf->vfork_parent->pid);
908 switch_to_thread (tp->ptid);
909
910 /* We're about to detach from the parent, which implicitly
911 removes breakpoints from its address space. There's a
912 catch here: we want to reuse the spaces for the child,
913 but, parent/child are still sharing the pspace at this
914 point, although the exec in reality makes the kernel give
915 the child a fresh set of new pages. The problem here is
916 that the breakpoints module being unaware of this, would
917 likely chose the child process to write to the parent
918 address space. Swapping the child temporarily away from
919 the spaces has the desired effect. Yes, this is "sort
920 of" a hack. */
921
922 pspace = inf->pspace;
923 aspace = inf->aspace;
924 inf->aspace = NULL;
925 inf->pspace = NULL;
926
927 if (debug_infrun || info_verbose)
928 {
929 target_terminal_ours_for_output ();
930
931 if (exec)
932 {
933 fprintf_filtered (gdb_stdlog,
934 _("Detaching vfork parent process "
935 "%d after child exec.\n"),
936 inf->vfork_parent->pid);
937 }
938 else
939 {
940 fprintf_filtered (gdb_stdlog,
941 _("Detaching vfork parent process "
942 "%d after child exit.\n"),
943 inf->vfork_parent->pid);
944 }
945 }
946
947 target_detach (NULL, 0);
948
949 /* Put it back. */
950 inf->pspace = pspace;
951 inf->aspace = aspace;
952
953 do_cleanups (old_chain);
954 }
955 else if (exec)
956 {
957 /* We're staying attached to the parent, so, really give the
958 child a new address space. */
959 inf->pspace = add_program_space (maybe_new_address_space ());
960 inf->aspace = inf->pspace->aspace;
961 inf->removable = 1;
962 set_current_program_space (inf->pspace);
963
964 resume_parent = inf->vfork_parent->pid;
965
966 /* Break the bonds. */
967 inf->vfork_parent->vfork_child = NULL;
968 }
969 else
970 {
971 struct cleanup *old_chain;
972 struct program_space *pspace;
973
974 /* If this is a vfork child exiting, then the pspace and
975 aspaces were shared with the parent. Since we're
976 reporting the process exit, we'll be mourning all that is
977 found in the address space, and switching to null_ptid,
978 preparing to start a new inferior. But, since we don't
979 want to clobber the parent's address/program spaces, we
980 go ahead and create a new one for this exiting
981 inferior. */
982
983 /* Switch to null_ptid, so that clone_program_space doesn't want
984 to read the selected frame of a dead process. */
985 old_chain = save_inferior_ptid ();
986 inferior_ptid = null_ptid;
987
988 /* This inferior is dead, so avoid giving the breakpoints
989 module the option to write through to it (cloning a
990 program space resets breakpoints). */
991 inf->aspace = NULL;
992 inf->pspace = NULL;
993 pspace = add_program_space (maybe_new_address_space ());
994 set_current_program_space (pspace);
995 inf->removable = 1;
996 inf->symfile_flags = SYMFILE_NO_READ;
997 clone_program_space (pspace, inf->vfork_parent->pspace);
998 inf->pspace = pspace;
999 inf->aspace = pspace->aspace;
1000
1001 /* Put back inferior_ptid. We'll continue mourning this
1002 inferior. */
1003 do_cleanups (old_chain);
1004
1005 resume_parent = inf->vfork_parent->pid;
1006 /* Break the bonds. */
1007 inf->vfork_parent->vfork_child = NULL;
1008 }
1009
1010 inf->vfork_parent = NULL;
1011
1012 gdb_assert (current_program_space == inf->pspace);
1013
1014 if (non_stop && resume_parent != -1)
1015 {
1016 /* If the user wanted the parent to be running, let it go
1017 free now. */
1018 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
1019
1020 if (debug_infrun)
1021 fprintf_unfiltered (gdb_stdlog,
1022 "infrun: resuming vfork parent process %d\n",
1023 resume_parent);
1024
1025 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
1026
1027 do_cleanups (old_chain);
1028 }
1029 }
1030 }
1031
1032 /* Enum strings for "set|show follow-exec-mode". */
1033
1034 static const char follow_exec_mode_new[] = "new";
1035 static const char follow_exec_mode_same[] = "same";
1036 static const char *const follow_exec_mode_names[] =
1037 {
1038 follow_exec_mode_new,
1039 follow_exec_mode_same,
1040 NULL,
1041 };
1042
1043 static const char *follow_exec_mode_string = follow_exec_mode_same;
1044 static void
1045 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1046 struct cmd_list_element *c, const char *value)
1047 {
1048 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1049 }
1050
1051 /* EXECD_PATHNAME is assumed to be non-NULL. */
1052
1053 static void
1054 follow_exec (ptid_t ptid, char *execd_pathname)
1055 {
1056 struct thread_info *th, *tmp;
1057 struct inferior *inf = current_inferior ();
1058 int pid = ptid_get_pid (ptid);
1059
1060 /* This is an exec event that we actually wish to pay attention to.
1061 Refresh our symbol table to the newly exec'd program, remove any
1062 momentary bp's, etc.
1063
1064 If there are breakpoints, they aren't really inserted now,
1065 since the exec() transformed our inferior into a fresh set
1066 of instructions.
1067
1068 We want to preserve symbolic breakpoints on the list, since
1069 we have hopes that they can be reset after the new a.out's
1070 symbol table is read.
1071
1072 However, any "raw" breakpoints must be removed from the list
1073 (e.g., the solib bp's), since their address is probably invalid
1074 now.
1075
1076 And, we DON'T want to call delete_breakpoints() here, since
1077 that may write the bp's "shadow contents" (the instruction
1078 value that was overwritten witha TRAP instruction). Since
1079 we now have a new a.out, those shadow contents aren't valid. */
1080
1081 mark_breakpoints_out ();
1082
1083 /* The target reports the exec event to the main thread, even if
1084 some other thread does the exec, and even if the main thread was
1085 stopped or already gone. We may still have non-leader threads of
1086 the process on our list. E.g., on targets that don't have thread
1087 exit events (like remote); or on native Linux in non-stop mode if
1088 there were only two threads in the inferior and the non-leader
1089 one is the one that execs (and nothing forces an update of the
1090 thread list up to here). When debugging remotely, it's best to
1091 avoid extra traffic, when possible, so avoid syncing the thread
1092 list with the target, and instead go ahead and delete all threads
1093 of the process but one that reported the event. Note this must
1094 be done before calling update_breakpoints_after_exec, as
1095 otherwise clearing the threads' resources would reference stale
1096 thread breakpoints -- it may have been one of these threads that
1097 stepped across the exec. We could just clear their stepping
1098 states, but as long as we're iterating, might as well delete
1099 them. Deleting them now rather than at the next user-visible
1100 stop provides a nicer sequence of events for user and MI
1101 notifications. */
1102 ALL_THREADS_SAFE (th, tmp)
1103 if (ptid_get_pid (th->ptid) == pid && !ptid_equal (th->ptid, ptid))
1104 delete_thread (th->ptid);
1105
1106 /* We also need to clear any left over stale state for the
1107 leader/event thread. E.g., if there was any step-resume
1108 breakpoint or similar, it's gone now. We cannot truly
1109 step-to-next statement through an exec(). */
1110 th = inferior_thread ();
1111 th->control.step_resume_breakpoint = NULL;
1112 th->control.exception_resume_breakpoint = NULL;
1113 th->control.single_step_breakpoints = NULL;
1114 th->control.step_range_start = 0;
1115 th->control.step_range_end = 0;
1116
1117 /* The user may have had the main thread held stopped in the
1118 previous image (e.g., schedlock on, or non-stop). Release
1119 it now. */
1120 th->stop_requested = 0;
1121
1122 update_breakpoints_after_exec ();
1123
1124 /* What is this a.out's name? */
1125 printf_unfiltered (_("%s is executing new program: %s\n"),
1126 target_pid_to_str (inferior_ptid),
1127 execd_pathname);
1128
1129 /* We've followed the inferior through an exec. Therefore, the
1130 inferior has essentially been killed & reborn. */
1131
1132 gdb_flush (gdb_stdout);
1133
1134 breakpoint_init_inferior (inf_execd);
1135
1136 if (gdb_sysroot && *gdb_sysroot)
1137 {
1138 char *name = alloca (strlen (gdb_sysroot)
1139 + strlen (execd_pathname)
1140 + 1);
1141
1142 strcpy (name, gdb_sysroot);
1143 strcat (name, execd_pathname);
1144 execd_pathname = name;
1145 }
1146
1147 /* Reset the shared library package. This ensures that we get a
1148 shlib event when the child reaches "_start", at which point the
1149 dld will have had a chance to initialize the child. */
1150 /* Also, loading a symbol file below may trigger symbol lookups, and
1151 we don't want those to be satisfied by the libraries of the
1152 previous incarnation of this process. */
1153 no_shared_libraries (NULL, 0);
1154
1155 if (follow_exec_mode_string == follow_exec_mode_new)
1156 {
1157 struct program_space *pspace;
1158
1159 /* The user wants to keep the old inferior and program spaces
1160 around. Create a new fresh one, and switch to it. */
1161
1162 inf = add_inferior (current_inferior ()->pid);
1163 pspace = add_program_space (maybe_new_address_space ());
1164 inf->pspace = pspace;
1165 inf->aspace = pspace->aspace;
1166
1167 exit_inferior_num_silent (current_inferior ()->num);
1168
1169 set_current_inferior (inf);
1170 set_current_program_space (pspace);
1171 }
1172 else
1173 {
1174 /* The old description may no longer be fit for the new image.
1175 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1176 old description; we'll read a new one below. No need to do
1177 this on "follow-exec-mode new", as the old inferior stays
1178 around (its description is later cleared/refetched on
1179 restart). */
1180 target_clear_description ();
1181 }
1182
1183 gdb_assert (current_program_space == inf->pspace);
1184
1185 /* That a.out is now the one to use. */
1186 exec_file_attach (execd_pathname, 0);
1187
1188 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
1189 (Position Independent Executable) main symbol file will get applied by
1190 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
1191 the breakpoints with the zero displacement. */
1192
1193 symbol_file_add (execd_pathname,
1194 (inf->symfile_flags
1195 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
1196 NULL, 0);
1197
1198 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
1199 set_initial_language ();
1200
1201 /* If the target can specify a description, read it. Must do this
1202 after flipping to the new executable (because the target supplied
1203 description must be compatible with the executable's
1204 architecture, and the old executable may e.g., be 32-bit, while
1205 the new one 64-bit), and before anything involving memory or
1206 registers. */
1207 target_find_description ();
1208
1209 solib_create_inferior_hook (0);
1210
1211 jit_inferior_created_hook ();
1212
1213 breakpoint_re_set ();
1214
1215 /* Reinsert all breakpoints. (Those which were symbolic have
1216 been reset to the proper address in the new a.out, thanks
1217 to symbol_file_command...). */
1218 insert_breakpoints ();
1219
1220 /* The next resume of this inferior should bring it to the shlib
1221 startup breakpoints. (If the user had also set bp's on
1222 "main" from the old (parent) process, then they'll auto-
1223 matically get reset there in the new process.). */
1224 }
1225
1226 /* Info about an instruction that is being stepped over. */
1227
1228 struct step_over_info
1229 {
1230 /* If we're stepping past a breakpoint, this is the address space
1231 and address of the instruction the breakpoint is set at. We'll
1232 skip inserting all breakpoints here. Valid iff ASPACE is
1233 non-NULL. */
1234 struct address_space *aspace;
1235 CORE_ADDR address;
1236
1237 /* The instruction being stepped over triggers a nonsteppable
1238 watchpoint. If true, we'll skip inserting watchpoints. */
1239 int nonsteppable_watchpoint_p;
1240 };
1241
1242 /* The step-over info of the location that is being stepped over.
1243
1244 Note that with async/breakpoint always-inserted mode, a user might
1245 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1246 being stepped over. As setting a new breakpoint inserts all
1247 breakpoints, we need to make sure the breakpoint being stepped over
1248 isn't inserted then. We do that by only clearing the step-over
1249 info when the step-over is actually finished (or aborted).
1250
1251 Presently GDB can only step over one breakpoint at any given time.
1252 Given threads that can't run code in the same address space as the
1253 breakpoint's can't really miss the breakpoint, GDB could be taught
1254 to step-over at most one breakpoint per address space (so this info
1255 could move to the address space object if/when GDB is extended).
1256 The set of breakpoints being stepped over will normally be much
1257 smaller than the set of all breakpoints, so a flag in the
1258 breakpoint location structure would be wasteful. A separate list
1259 also saves complexity and run-time, as otherwise we'd have to go
1260 through all breakpoint locations clearing their flag whenever we
1261 start a new sequence. Similar considerations weigh against storing
1262 this info in the thread object. Plus, not all step overs actually
1263 have breakpoint locations -- e.g., stepping past a single-step
1264 breakpoint, or stepping to complete a non-continuable
1265 watchpoint. */
1266 static struct step_over_info step_over_info;
1267
1268 /* Record the address of the breakpoint/instruction we're currently
1269 stepping over. */
1270
1271 static void
1272 set_step_over_info (struct address_space *aspace, CORE_ADDR address,
1273 int nonsteppable_watchpoint_p)
1274 {
1275 step_over_info.aspace = aspace;
1276 step_over_info.address = address;
1277 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1278 }
1279
1280 /* Called when we're not longer stepping over a breakpoint / an
1281 instruction, so all breakpoints are free to be (re)inserted. */
1282
1283 static void
1284 clear_step_over_info (void)
1285 {
1286 step_over_info.aspace = NULL;
1287 step_over_info.address = 0;
1288 step_over_info.nonsteppable_watchpoint_p = 0;
1289 }
1290
1291 /* See infrun.h. */
1292
1293 int
1294 stepping_past_instruction_at (struct address_space *aspace,
1295 CORE_ADDR address)
1296 {
1297 return (step_over_info.aspace != NULL
1298 && breakpoint_address_match (aspace, address,
1299 step_over_info.aspace,
1300 step_over_info.address));
1301 }
1302
1303 /* See infrun.h. */
1304
1305 int
1306 stepping_past_nonsteppable_watchpoint (void)
1307 {
1308 return step_over_info.nonsteppable_watchpoint_p;
1309 }
1310
1311 /* Returns true if step-over info is valid. */
1312
1313 static int
1314 step_over_info_valid_p (void)
1315 {
1316 return (step_over_info.aspace != NULL
1317 || stepping_past_nonsteppable_watchpoint ());
1318 }
1319
1320 \f
1321 /* Displaced stepping. */
1322
1323 /* In non-stop debugging mode, we must take special care to manage
1324 breakpoints properly; in particular, the traditional strategy for
1325 stepping a thread past a breakpoint it has hit is unsuitable.
1326 'Displaced stepping' is a tactic for stepping one thread past a
1327 breakpoint it has hit while ensuring that other threads running
1328 concurrently will hit the breakpoint as they should.
1329
1330 The traditional way to step a thread T off a breakpoint in a
1331 multi-threaded program in all-stop mode is as follows:
1332
1333 a0) Initially, all threads are stopped, and breakpoints are not
1334 inserted.
1335 a1) We single-step T, leaving breakpoints uninserted.
1336 a2) We insert breakpoints, and resume all threads.
1337
1338 In non-stop debugging, however, this strategy is unsuitable: we
1339 don't want to have to stop all threads in the system in order to
1340 continue or step T past a breakpoint. Instead, we use displaced
1341 stepping:
1342
1343 n0) Initially, T is stopped, other threads are running, and
1344 breakpoints are inserted.
1345 n1) We copy the instruction "under" the breakpoint to a separate
1346 location, outside the main code stream, making any adjustments
1347 to the instruction, register, and memory state as directed by
1348 T's architecture.
1349 n2) We single-step T over the instruction at its new location.
1350 n3) We adjust the resulting register and memory state as directed
1351 by T's architecture. This includes resetting T's PC to point
1352 back into the main instruction stream.
1353 n4) We resume T.
1354
1355 This approach depends on the following gdbarch methods:
1356
1357 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1358 indicate where to copy the instruction, and how much space must
1359 be reserved there. We use these in step n1.
1360
1361 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1362 address, and makes any necessary adjustments to the instruction,
1363 register contents, and memory. We use this in step n1.
1364
1365 - gdbarch_displaced_step_fixup adjusts registers and memory after
1366 we have successfuly single-stepped the instruction, to yield the
1367 same effect the instruction would have had if we had executed it
1368 at its original address. We use this in step n3.
1369
1370 - gdbarch_displaced_step_free_closure provides cleanup.
1371
1372 The gdbarch_displaced_step_copy_insn and
1373 gdbarch_displaced_step_fixup functions must be written so that
1374 copying an instruction with gdbarch_displaced_step_copy_insn,
1375 single-stepping across the copied instruction, and then applying
1376 gdbarch_displaced_insn_fixup should have the same effects on the
1377 thread's memory and registers as stepping the instruction in place
1378 would have. Exactly which responsibilities fall to the copy and
1379 which fall to the fixup is up to the author of those functions.
1380
1381 See the comments in gdbarch.sh for details.
1382
1383 Note that displaced stepping and software single-step cannot
1384 currently be used in combination, although with some care I think
1385 they could be made to. Software single-step works by placing
1386 breakpoints on all possible subsequent instructions; if the
1387 displaced instruction is a PC-relative jump, those breakpoints
1388 could fall in very strange places --- on pages that aren't
1389 executable, or at addresses that are not proper instruction
1390 boundaries. (We do generally let other threads run while we wait
1391 to hit the software single-step breakpoint, and they might
1392 encounter such a corrupted instruction.) One way to work around
1393 this would be to have gdbarch_displaced_step_copy_insn fully
1394 simulate the effect of PC-relative instructions (and return NULL)
1395 on architectures that use software single-stepping.
1396
1397 In non-stop mode, we can have independent and simultaneous step
1398 requests, so more than one thread may need to simultaneously step
1399 over a breakpoint. The current implementation assumes there is
1400 only one scratch space per process. In this case, we have to
1401 serialize access to the scratch space. If thread A wants to step
1402 over a breakpoint, but we are currently waiting for some other
1403 thread to complete a displaced step, we leave thread A stopped and
1404 place it in the displaced_step_request_queue. Whenever a displaced
1405 step finishes, we pick the next thread in the queue and start a new
1406 displaced step operation on it. See displaced_step_prepare and
1407 displaced_step_fixup for details. */
1408
1409 struct displaced_step_request
1410 {
1411 ptid_t ptid;
1412 struct displaced_step_request *next;
1413 };
1414
1415 /* Per-inferior displaced stepping state. */
1416 struct displaced_step_inferior_state
1417 {
1418 /* Pointer to next in linked list. */
1419 struct displaced_step_inferior_state *next;
1420
1421 /* The process this displaced step state refers to. */
1422 int pid;
1423
1424 /* A queue of pending displaced stepping requests. One entry per
1425 thread that needs to do a displaced step. */
1426 struct displaced_step_request *step_request_queue;
1427
1428 /* If this is not null_ptid, this is the thread carrying out a
1429 displaced single-step in process PID. This thread's state will
1430 require fixing up once it has completed its step. */
1431 ptid_t step_ptid;
1432
1433 /* The architecture the thread had when we stepped it. */
1434 struct gdbarch *step_gdbarch;
1435
1436 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1437 for post-step cleanup. */
1438 struct displaced_step_closure *step_closure;
1439
1440 /* The address of the original instruction, and the copy we
1441 made. */
1442 CORE_ADDR step_original, step_copy;
1443
1444 /* Saved contents of copy area. */
1445 gdb_byte *step_saved_copy;
1446 };
1447
1448 /* The list of states of processes involved in displaced stepping
1449 presently. */
1450 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1451
1452 /* Get the displaced stepping state of process PID. */
1453
1454 static struct displaced_step_inferior_state *
1455 get_displaced_stepping_state (int pid)
1456 {
1457 struct displaced_step_inferior_state *state;
1458
1459 for (state = displaced_step_inferior_states;
1460 state != NULL;
1461 state = state->next)
1462 if (state->pid == pid)
1463 return state;
1464
1465 return NULL;
1466 }
1467
1468 /* Return true if process PID has a thread doing a displaced step. */
1469
1470 static int
1471 displaced_step_in_progress (int pid)
1472 {
1473 struct displaced_step_inferior_state *displaced;
1474
1475 displaced = get_displaced_stepping_state (pid);
1476 if (displaced != NULL && !ptid_equal (displaced->step_ptid, null_ptid))
1477 return 1;
1478
1479 return 0;
1480 }
1481
1482 /* Add a new displaced stepping state for process PID to the displaced
1483 stepping state list, or return a pointer to an already existing
1484 entry, if it already exists. Never returns NULL. */
1485
1486 static struct displaced_step_inferior_state *
1487 add_displaced_stepping_state (int pid)
1488 {
1489 struct displaced_step_inferior_state *state;
1490
1491 for (state = displaced_step_inferior_states;
1492 state != NULL;
1493 state = state->next)
1494 if (state->pid == pid)
1495 return state;
1496
1497 state = xcalloc (1, sizeof (*state));
1498 state->pid = pid;
1499 state->next = displaced_step_inferior_states;
1500 displaced_step_inferior_states = state;
1501
1502 return state;
1503 }
1504
1505 /* If inferior is in displaced stepping, and ADDR equals to starting address
1506 of copy area, return corresponding displaced_step_closure. Otherwise,
1507 return NULL. */
1508
1509 struct displaced_step_closure*
1510 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1511 {
1512 struct displaced_step_inferior_state *displaced
1513 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1514
1515 /* If checking the mode of displaced instruction in copy area. */
1516 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1517 && (displaced->step_copy == addr))
1518 return displaced->step_closure;
1519
1520 return NULL;
1521 }
1522
1523 /* Remove the displaced stepping state of process PID. */
1524
1525 static void
1526 remove_displaced_stepping_state (int pid)
1527 {
1528 struct displaced_step_inferior_state *it, **prev_next_p;
1529
1530 gdb_assert (pid != 0);
1531
1532 it = displaced_step_inferior_states;
1533 prev_next_p = &displaced_step_inferior_states;
1534 while (it)
1535 {
1536 if (it->pid == pid)
1537 {
1538 *prev_next_p = it->next;
1539 xfree (it);
1540 return;
1541 }
1542
1543 prev_next_p = &it->next;
1544 it = *prev_next_p;
1545 }
1546 }
1547
1548 static void
1549 infrun_inferior_exit (struct inferior *inf)
1550 {
1551 remove_displaced_stepping_state (inf->pid);
1552 }
1553
1554 /* If ON, and the architecture supports it, GDB will use displaced
1555 stepping to step over breakpoints. If OFF, or if the architecture
1556 doesn't support it, GDB will instead use the traditional
1557 hold-and-step approach. If AUTO (which is the default), GDB will
1558 decide which technique to use to step over breakpoints depending on
1559 which of all-stop or non-stop mode is active --- displaced stepping
1560 in non-stop mode; hold-and-step in all-stop mode. */
1561
1562 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1563
1564 static void
1565 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1566 struct cmd_list_element *c,
1567 const char *value)
1568 {
1569 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1570 fprintf_filtered (file,
1571 _("Debugger's willingness to use displaced stepping "
1572 "to step over breakpoints is %s (currently %s).\n"),
1573 value, non_stop ? "on" : "off");
1574 else
1575 fprintf_filtered (file,
1576 _("Debugger's willingness to use displaced stepping "
1577 "to step over breakpoints is %s.\n"), value);
1578 }
1579
1580 /* Return non-zero if displaced stepping can/should be used to step
1581 over breakpoints. */
1582
1583 static int
1584 use_displaced_stepping (struct gdbarch *gdbarch)
1585 {
1586 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1587 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1588 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1589 && find_record_target () == NULL);
1590 }
1591
1592 /* Clean out any stray displaced stepping state. */
1593 static void
1594 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1595 {
1596 /* Indicate that there is no cleanup pending. */
1597 displaced->step_ptid = null_ptid;
1598
1599 if (displaced->step_closure)
1600 {
1601 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1602 displaced->step_closure);
1603 displaced->step_closure = NULL;
1604 }
1605 }
1606
1607 static void
1608 displaced_step_clear_cleanup (void *arg)
1609 {
1610 struct displaced_step_inferior_state *state = arg;
1611
1612 displaced_step_clear (state);
1613 }
1614
1615 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1616 void
1617 displaced_step_dump_bytes (struct ui_file *file,
1618 const gdb_byte *buf,
1619 size_t len)
1620 {
1621 int i;
1622
1623 for (i = 0; i < len; i++)
1624 fprintf_unfiltered (file, "%02x ", buf[i]);
1625 fputs_unfiltered ("\n", file);
1626 }
1627
1628 /* Prepare to single-step, using displaced stepping.
1629
1630 Note that we cannot use displaced stepping when we have a signal to
1631 deliver. If we have a signal to deliver and an instruction to step
1632 over, then after the step, there will be no indication from the
1633 target whether the thread entered a signal handler or ignored the
1634 signal and stepped over the instruction successfully --- both cases
1635 result in a simple SIGTRAP. In the first case we mustn't do a
1636 fixup, and in the second case we must --- but we can't tell which.
1637 Comments in the code for 'random signals' in handle_inferior_event
1638 explain how we handle this case instead.
1639
1640 Returns 1 if preparing was successful -- this thread is going to be
1641 stepped now; or 0 if displaced stepping this thread got queued. */
1642 static int
1643 displaced_step_prepare (ptid_t ptid)
1644 {
1645 struct cleanup *old_cleanups, *ignore_cleanups;
1646 struct thread_info *tp = find_thread_ptid (ptid);
1647 struct regcache *regcache = get_thread_regcache (ptid);
1648 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1649 CORE_ADDR original, copy;
1650 ULONGEST len;
1651 struct displaced_step_closure *closure;
1652 struct displaced_step_inferior_state *displaced;
1653 int status;
1654
1655 /* We should never reach this function if the architecture does not
1656 support displaced stepping. */
1657 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1658
1659 /* Disable range stepping while executing in the scratch pad. We
1660 want a single-step even if executing the displaced instruction in
1661 the scratch buffer lands within the stepping range (e.g., a
1662 jump/branch). */
1663 tp->control.may_range_step = 0;
1664
1665 /* We have to displaced step one thread at a time, as we only have
1666 access to a single scratch space per inferior. */
1667
1668 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1669
1670 if (!ptid_equal (displaced->step_ptid, null_ptid))
1671 {
1672 /* Already waiting for a displaced step to finish. Defer this
1673 request and place in queue. */
1674 struct displaced_step_request *req, *new_req;
1675
1676 if (debug_displaced)
1677 fprintf_unfiltered (gdb_stdlog,
1678 "displaced: defering step of %s\n",
1679 target_pid_to_str (ptid));
1680
1681 new_req = xmalloc (sizeof (*new_req));
1682 new_req->ptid = ptid;
1683 new_req->next = NULL;
1684
1685 if (displaced->step_request_queue)
1686 {
1687 for (req = displaced->step_request_queue;
1688 req && req->next;
1689 req = req->next)
1690 ;
1691 req->next = new_req;
1692 }
1693 else
1694 displaced->step_request_queue = new_req;
1695
1696 return 0;
1697 }
1698 else
1699 {
1700 if (debug_displaced)
1701 fprintf_unfiltered (gdb_stdlog,
1702 "displaced: stepping %s now\n",
1703 target_pid_to_str (ptid));
1704 }
1705
1706 displaced_step_clear (displaced);
1707
1708 old_cleanups = save_inferior_ptid ();
1709 inferior_ptid = ptid;
1710
1711 original = regcache_read_pc (regcache);
1712
1713 copy = gdbarch_displaced_step_location (gdbarch);
1714 len = gdbarch_max_insn_length (gdbarch);
1715
1716 /* Save the original contents of the copy area. */
1717 displaced->step_saved_copy = xmalloc (len);
1718 ignore_cleanups = make_cleanup (free_current_contents,
1719 &displaced->step_saved_copy);
1720 status = target_read_memory (copy, displaced->step_saved_copy, len);
1721 if (status != 0)
1722 throw_error (MEMORY_ERROR,
1723 _("Error accessing memory address %s (%s) for "
1724 "displaced-stepping scratch space."),
1725 paddress (gdbarch, copy), safe_strerror (status));
1726 if (debug_displaced)
1727 {
1728 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1729 paddress (gdbarch, copy));
1730 displaced_step_dump_bytes (gdb_stdlog,
1731 displaced->step_saved_copy,
1732 len);
1733 };
1734
1735 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1736 original, copy, regcache);
1737
1738 /* We don't support the fully-simulated case at present. */
1739 gdb_assert (closure);
1740
1741 /* Save the information we need to fix things up if the step
1742 succeeds. */
1743 displaced->step_ptid = ptid;
1744 displaced->step_gdbarch = gdbarch;
1745 displaced->step_closure = closure;
1746 displaced->step_original = original;
1747 displaced->step_copy = copy;
1748
1749 make_cleanup (displaced_step_clear_cleanup, displaced);
1750
1751 /* Resume execution at the copy. */
1752 regcache_write_pc (regcache, copy);
1753
1754 discard_cleanups (ignore_cleanups);
1755
1756 do_cleanups (old_cleanups);
1757
1758 if (debug_displaced)
1759 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1760 paddress (gdbarch, copy));
1761
1762 return 1;
1763 }
1764
1765 static void
1766 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1767 const gdb_byte *myaddr, int len)
1768 {
1769 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1770
1771 inferior_ptid = ptid;
1772 write_memory (memaddr, myaddr, len);
1773 do_cleanups (ptid_cleanup);
1774 }
1775
1776 /* Restore the contents of the copy area for thread PTID. */
1777
1778 static void
1779 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1780 ptid_t ptid)
1781 {
1782 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1783
1784 write_memory_ptid (ptid, displaced->step_copy,
1785 displaced->step_saved_copy, len);
1786 if (debug_displaced)
1787 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1788 target_pid_to_str (ptid),
1789 paddress (displaced->step_gdbarch,
1790 displaced->step_copy));
1791 }
1792
1793 static void
1794 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1795 {
1796 struct cleanup *old_cleanups;
1797 struct displaced_step_inferior_state *displaced
1798 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1799
1800 /* Was any thread of this process doing a displaced step? */
1801 if (displaced == NULL)
1802 return;
1803
1804 /* Was this event for the pid we displaced? */
1805 if (ptid_equal (displaced->step_ptid, null_ptid)
1806 || ! ptid_equal (displaced->step_ptid, event_ptid))
1807 return;
1808
1809 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1810
1811 displaced_step_restore (displaced, displaced->step_ptid);
1812
1813 /* Did the instruction complete successfully? */
1814 if (signal == GDB_SIGNAL_TRAP)
1815 {
1816 /* Fixup may need to read memory/registers. Switch to the
1817 thread that we're fixing up. */
1818 switch_to_thread (event_ptid);
1819
1820 /* Fix up the resulting state. */
1821 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1822 displaced->step_closure,
1823 displaced->step_original,
1824 displaced->step_copy,
1825 get_thread_regcache (displaced->step_ptid));
1826 }
1827 else
1828 {
1829 /* Since the instruction didn't complete, all we can do is
1830 relocate the PC. */
1831 struct regcache *regcache = get_thread_regcache (event_ptid);
1832 CORE_ADDR pc = regcache_read_pc (regcache);
1833
1834 pc = displaced->step_original + (pc - displaced->step_copy);
1835 regcache_write_pc (regcache, pc);
1836 }
1837
1838 do_cleanups (old_cleanups);
1839
1840 displaced->step_ptid = null_ptid;
1841
1842 /* Are there any pending displaced stepping requests? If so, run
1843 one now. Leave the state object around, since we're likely to
1844 need it again soon. */
1845 while (displaced->step_request_queue)
1846 {
1847 struct displaced_step_request *head;
1848 ptid_t ptid;
1849 struct regcache *regcache;
1850 struct gdbarch *gdbarch;
1851 CORE_ADDR actual_pc;
1852 struct address_space *aspace;
1853
1854 head = displaced->step_request_queue;
1855 ptid = head->ptid;
1856 displaced->step_request_queue = head->next;
1857 xfree (head);
1858
1859 context_switch (ptid);
1860
1861 regcache = get_thread_regcache (ptid);
1862 actual_pc = regcache_read_pc (regcache);
1863 aspace = get_regcache_aspace (regcache);
1864
1865 if (breakpoint_here_p (aspace, actual_pc))
1866 {
1867 if (debug_displaced)
1868 fprintf_unfiltered (gdb_stdlog,
1869 "displaced: stepping queued %s now\n",
1870 target_pid_to_str (ptid));
1871
1872 displaced_step_prepare (ptid);
1873
1874 gdbarch = get_regcache_arch (regcache);
1875
1876 if (debug_displaced)
1877 {
1878 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1879 gdb_byte buf[4];
1880
1881 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1882 paddress (gdbarch, actual_pc));
1883 read_memory (actual_pc, buf, sizeof (buf));
1884 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1885 }
1886
1887 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1888 displaced->step_closure))
1889 target_resume (ptid, 1, GDB_SIGNAL_0);
1890 else
1891 target_resume (ptid, 0, GDB_SIGNAL_0);
1892
1893 /* Done, we're stepping a thread. */
1894 break;
1895 }
1896 else
1897 {
1898 int step;
1899 struct thread_info *tp = inferior_thread ();
1900
1901 /* The breakpoint we were sitting under has since been
1902 removed. */
1903 tp->control.trap_expected = 0;
1904
1905 /* Go back to what we were trying to do. */
1906 step = currently_stepping (tp);
1907
1908 if (debug_displaced)
1909 fprintf_unfiltered (gdb_stdlog,
1910 "displaced: breakpoint is gone: %s, step(%d)\n",
1911 target_pid_to_str (tp->ptid), step);
1912
1913 target_resume (ptid, step, GDB_SIGNAL_0);
1914 tp->suspend.stop_signal = GDB_SIGNAL_0;
1915
1916 /* This request was discarded. See if there's any other
1917 thread waiting for its turn. */
1918 }
1919 }
1920 }
1921
1922 /* Update global variables holding ptids to hold NEW_PTID if they were
1923 holding OLD_PTID. */
1924 static void
1925 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1926 {
1927 struct displaced_step_request *it;
1928 struct displaced_step_inferior_state *displaced;
1929
1930 if (ptid_equal (inferior_ptid, old_ptid))
1931 inferior_ptid = new_ptid;
1932
1933 for (displaced = displaced_step_inferior_states;
1934 displaced;
1935 displaced = displaced->next)
1936 {
1937 if (ptid_equal (displaced->step_ptid, old_ptid))
1938 displaced->step_ptid = new_ptid;
1939
1940 for (it = displaced->step_request_queue; it; it = it->next)
1941 if (ptid_equal (it->ptid, old_ptid))
1942 it->ptid = new_ptid;
1943 }
1944 }
1945
1946 \f
1947 /* Resuming. */
1948
1949 /* Things to clean up if we QUIT out of resume (). */
1950 static void
1951 resume_cleanups (void *ignore)
1952 {
1953 if (!ptid_equal (inferior_ptid, null_ptid))
1954 delete_single_step_breakpoints (inferior_thread ());
1955
1956 normal_stop ();
1957 }
1958
1959 static const char schedlock_off[] = "off";
1960 static const char schedlock_on[] = "on";
1961 static const char schedlock_step[] = "step";
1962 static const char *const scheduler_enums[] = {
1963 schedlock_off,
1964 schedlock_on,
1965 schedlock_step,
1966 NULL
1967 };
1968 static const char *scheduler_mode = schedlock_off;
1969 static void
1970 show_scheduler_mode (struct ui_file *file, int from_tty,
1971 struct cmd_list_element *c, const char *value)
1972 {
1973 fprintf_filtered (file,
1974 _("Mode for locking scheduler "
1975 "during execution is \"%s\".\n"),
1976 value);
1977 }
1978
1979 static void
1980 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1981 {
1982 if (!target_can_lock_scheduler)
1983 {
1984 scheduler_mode = schedlock_off;
1985 error (_("Target '%s' cannot support this command."), target_shortname);
1986 }
1987 }
1988
1989 /* True if execution commands resume all threads of all processes by
1990 default; otherwise, resume only threads of the current inferior
1991 process. */
1992 int sched_multi = 0;
1993
1994 /* Try to setup for software single stepping over the specified location.
1995 Return 1 if target_resume() should use hardware single step.
1996
1997 GDBARCH the current gdbarch.
1998 PC the location to step over. */
1999
2000 static int
2001 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2002 {
2003 int hw_step = 1;
2004
2005 if (execution_direction == EXEC_FORWARD
2006 && gdbarch_software_single_step_p (gdbarch)
2007 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
2008 {
2009 hw_step = 0;
2010 }
2011 return hw_step;
2012 }
2013
2014 /* See infrun.h. */
2015
2016 ptid_t
2017 user_visible_resume_ptid (int step)
2018 {
2019 ptid_t resume_ptid;
2020
2021 if (non_stop)
2022 {
2023 /* With non-stop mode on, threads are always handled
2024 individually. */
2025 resume_ptid = inferior_ptid;
2026 }
2027 else if ((scheduler_mode == schedlock_on)
2028 || (scheduler_mode == schedlock_step && step))
2029 {
2030 /* User-settable 'scheduler' mode requires solo thread
2031 resume. */
2032 resume_ptid = inferior_ptid;
2033 }
2034 else if (!sched_multi && target_supports_multi_process ())
2035 {
2036 /* Resume all threads of the current process (and none of other
2037 processes). */
2038 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
2039 }
2040 else
2041 {
2042 /* Resume all threads of all processes. */
2043 resume_ptid = RESUME_ALL;
2044 }
2045
2046 return resume_ptid;
2047 }
2048
2049 /* Wrapper for target_resume, that handles infrun-specific
2050 bookkeeping. */
2051
2052 static void
2053 do_target_resume (ptid_t resume_ptid, int step, enum gdb_signal sig)
2054 {
2055 struct thread_info *tp = inferior_thread ();
2056
2057 /* Install inferior's terminal modes. */
2058 target_terminal_inferior ();
2059
2060 /* Avoid confusing the next resume, if the next stop/resume
2061 happens to apply to another thread. */
2062 tp->suspend.stop_signal = GDB_SIGNAL_0;
2063
2064 /* Advise target which signals may be handled silently.
2065
2066 If we have removed breakpoints because we are stepping over one
2067 in-line (in any thread), we need to receive all signals to avoid
2068 accidentally skipping a breakpoint during execution of a signal
2069 handler.
2070
2071 Likewise if we're displaced stepping, otherwise a trap for a
2072 breakpoint in a signal handler might be confused with the
2073 displaced step finishing. We don't make the displaced_step_fixup
2074 step distinguish the cases instead, because:
2075
2076 - a backtrace while stopped in the signal handler would show the
2077 scratch pad as frame older than the signal handler, instead of
2078 the real mainline code.
2079
2080 - when the thread is later resumed, the signal handler would
2081 return to the scratch pad area, which would no longer be
2082 valid. */
2083 if (step_over_info_valid_p ()
2084 || displaced_step_in_progress (ptid_get_pid (tp->ptid)))
2085 target_pass_signals (0, NULL);
2086 else
2087 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2088
2089 target_resume (resume_ptid, step, sig);
2090 }
2091
2092 /* Resume the inferior, but allow a QUIT. This is useful if the user
2093 wants to interrupt some lengthy single-stepping operation
2094 (for child processes, the SIGINT goes to the inferior, and so
2095 we get a SIGINT random_signal, but for remote debugging and perhaps
2096 other targets, that's not true).
2097
2098 SIG is the signal to give the inferior (zero for none). */
2099 void
2100 resume (enum gdb_signal sig)
2101 {
2102 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
2103 struct regcache *regcache = get_current_regcache ();
2104 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2105 struct thread_info *tp = inferior_thread ();
2106 CORE_ADDR pc = regcache_read_pc (regcache);
2107 struct address_space *aspace = get_regcache_aspace (regcache);
2108 ptid_t resume_ptid;
2109 /* This represents the user's step vs continue request. When
2110 deciding whether "set scheduler-locking step" applies, it's the
2111 user's intention that counts. */
2112 const int user_step = tp->control.stepping_command;
2113 /* This represents what we'll actually request the target to do.
2114 This can decay from a step to a continue, if e.g., we need to
2115 implement single-stepping with breakpoints (software
2116 single-step). */
2117 int step;
2118
2119 tp->stepped_breakpoint = 0;
2120
2121 QUIT;
2122
2123 /* Depends on stepped_breakpoint. */
2124 step = currently_stepping (tp);
2125
2126 if (current_inferior ()->waiting_for_vfork_done)
2127 {
2128 /* Don't try to single-step a vfork parent that is waiting for
2129 the child to get out of the shared memory region (by exec'ing
2130 or exiting). This is particularly important on software
2131 single-step archs, as the child process would trip on the
2132 software single step breakpoint inserted for the parent
2133 process. Since the parent will not actually execute any
2134 instruction until the child is out of the shared region (such
2135 are vfork's semantics), it is safe to simply continue it.
2136 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2137 the parent, and tell it to `keep_going', which automatically
2138 re-sets it stepping. */
2139 if (debug_infrun)
2140 fprintf_unfiltered (gdb_stdlog,
2141 "infrun: resume : clear step\n");
2142 step = 0;
2143 }
2144
2145 if (debug_infrun)
2146 fprintf_unfiltered (gdb_stdlog,
2147 "infrun: resume (step=%d, signal=%s), "
2148 "trap_expected=%d, current thread [%s] at %s\n",
2149 step, gdb_signal_to_symbol_string (sig),
2150 tp->control.trap_expected,
2151 target_pid_to_str (inferior_ptid),
2152 paddress (gdbarch, pc));
2153
2154 /* Normally, by the time we reach `resume', the breakpoints are either
2155 removed or inserted, as appropriate. The exception is if we're sitting
2156 at a permanent breakpoint; we need to step over it, but permanent
2157 breakpoints can't be removed. So we have to test for it here. */
2158 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2159 {
2160 if (sig != GDB_SIGNAL_0)
2161 {
2162 /* We have a signal to pass to the inferior. The resume
2163 may, or may not take us to the signal handler. If this
2164 is a step, we'll need to stop in the signal handler, if
2165 there's one, (if the target supports stepping into
2166 handlers), or in the next mainline instruction, if
2167 there's no handler. If this is a continue, we need to be
2168 sure to run the handler with all breakpoints inserted.
2169 In all cases, set a breakpoint at the current address
2170 (where the handler returns to), and once that breakpoint
2171 is hit, resume skipping the permanent breakpoint. If
2172 that breakpoint isn't hit, then we've stepped into the
2173 signal handler (or hit some other event). We'll delete
2174 the step-resume breakpoint then. */
2175
2176 if (debug_infrun)
2177 fprintf_unfiltered (gdb_stdlog,
2178 "infrun: resume: skipping permanent breakpoint, "
2179 "deliver signal first\n");
2180
2181 clear_step_over_info ();
2182 tp->control.trap_expected = 0;
2183
2184 if (tp->control.step_resume_breakpoint == NULL)
2185 {
2186 /* Set a "high-priority" step-resume, as we don't want
2187 user breakpoints at PC to trigger (again) when this
2188 hits. */
2189 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2190 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2191
2192 tp->step_after_step_resume_breakpoint = step;
2193 }
2194
2195 insert_breakpoints ();
2196 }
2197 else
2198 {
2199 /* There's no signal to pass, we can go ahead and skip the
2200 permanent breakpoint manually. */
2201 if (debug_infrun)
2202 fprintf_unfiltered (gdb_stdlog,
2203 "infrun: resume: skipping permanent breakpoint\n");
2204 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2205 /* Update pc to reflect the new address from which we will
2206 execute instructions. */
2207 pc = regcache_read_pc (regcache);
2208
2209 if (step)
2210 {
2211 /* We've already advanced the PC, so the stepping part
2212 is done. Now we need to arrange for a trap to be
2213 reported to handle_inferior_event. Set a breakpoint
2214 at the current PC, and run to it. Don't update
2215 prev_pc, because if we end in
2216 switch_back_to_stepped_thread, we want the "expected
2217 thread advanced also" branch to be taken. IOW, we
2218 don't want this thread to step further from PC
2219 (overstep). */
2220 gdb_assert (!step_over_info_valid_p ());
2221 insert_single_step_breakpoint (gdbarch, aspace, pc);
2222 insert_breakpoints ();
2223
2224 resume_ptid = user_visible_resume_ptid (user_step);
2225 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
2226 discard_cleanups (old_cleanups);
2227 return;
2228 }
2229 }
2230 }
2231
2232 /* If we have a breakpoint to step over, make sure to do a single
2233 step only. Same if we have software watchpoints. */
2234 if (tp->control.trap_expected || bpstat_should_step ())
2235 tp->control.may_range_step = 0;
2236
2237 /* If enabled, step over breakpoints by executing a copy of the
2238 instruction at a different address.
2239
2240 We can't use displaced stepping when we have a signal to deliver;
2241 the comments for displaced_step_prepare explain why. The
2242 comments in the handle_inferior event for dealing with 'random
2243 signals' explain what we do instead.
2244
2245 We can't use displaced stepping when we are waiting for vfork_done
2246 event, displaced stepping breaks the vfork child similarly as single
2247 step software breakpoint. */
2248 if (use_displaced_stepping (gdbarch)
2249 && tp->control.trap_expected
2250 && sig == GDB_SIGNAL_0
2251 && !current_inferior ()->waiting_for_vfork_done)
2252 {
2253 struct displaced_step_inferior_state *displaced;
2254
2255 if (!displaced_step_prepare (inferior_ptid))
2256 {
2257 /* Got placed in displaced stepping queue. Will be resumed
2258 later when all the currently queued displaced stepping
2259 requests finish. The thread is not executing at this
2260 point, and the call to set_executing will be made later.
2261 But we need to call set_running here, since from the
2262 user/frontend's point of view, threads were set running.
2263 Unless we're calling an inferior function, as in that
2264 case we pretend the inferior doesn't run at all. */
2265 if (!tp->control.in_infcall)
2266 set_running (user_visible_resume_ptid (user_step), 1);
2267 discard_cleanups (old_cleanups);
2268 return;
2269 }
2270
2271 /* Update pc to reflect the new address from which we will execute
2272 instructions due to displaced stepping. */
2273 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
2274
2275 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
2276 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
2277 displaced->step_closure);
2278 }
2279
2280 /* Do we need to do it the hard way, w/temp breakpoints? */
2281 else if (step)
2282 step = maybe_software_singlestep (gdbarch, pc);
2283
2284 /* Currently, our software single-step implementation leads to different
2285 results than hardware single-stepping in one situation: when stepping
2286 into delivering a signal which has an associated signal handler,
2287 hardware single-step will stop at the first instruction of the handler,
2288 while software single-step will simply skip execution of the handler.
2289
2290 For now, this difference in behavior is accepted since there is no
2291 easy way to actually implement single-stepping into a signal handler
2292 without kernel support.
2293
2294 However, there is one scenario where this difference leads to follow-on
2295 problems: if we're stepping off a breakpoint by removing all breakpoints
2296 and then single-stepping. In this case, the software single-step
2297 behavior means that even if there is a *breakpoint* in the signal
2298 handler, GDB still would not stop.
2299
2300 Fortunately, we can at least fix this particular issue. We detect
2301 here the case where we are about to deliver a signal while software
2302 single-stepping with breakpoints removed. In this situation, we
2303 revert the decisions to remove all breakpoints and insert single-
2304 step breakpoints, and instead we install a step-resume breakpoint
2305 at the current address, deliver the signal without stepping, and
2306 once we arrive back at the step-resume breakpoint, actually step
2307 over the breakpoint we originally wanted to step over. */
2308 if (thread_has_single_step_breakpoints_set (tp)
2309 && sig != GDB_SIGNAL_0
2310 && step_over_info_valid_p ())
2311 {
2312 /* If we have nested signals or a pending signal is delivered
2313 immediately after a handler returns, might might already have
2314 a step-resume breakpoint set on the earlier handler. We cannot
2315 set another step-resume breakpoint; just continue on until the
2316 original breakpoint is hit. */
2317 if (tp->control.step_resume_breakpoint == NULL)
2318 {
2319 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2320 tp->step_after_step_resume_breakpoint = 1;
2321 }
2322
2323 delete_single_step_breakpoints (tp);
2324
2325 clear_step_over_info ();
2326 tp->control.trap_expected = 0;
2327
2328 insert_breakpoints ();
2329 }
2330
2331 /* If STEP is set, it's a request to use hardware stepping
2332 facilities. But in that case, we should never
2333 use singlestep breakpoint. */
2334 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2335
2336 /* Decide the set of threads to ask the target to resume. Start
2337 by assuming everything will be resumed, than narrow the set
2338 by applying increasingly restricting conditions. */
2339 resume_ptid = user_visible_resume_ptid (user_step);
2340
2341 /* Even if RESUME_PTID is a wildcard, and we end up resuming less
2342 (e.g., we might need to step over a breakpoint), from the
2343 user/frontend's point of view, all threads in RESUME_PTID are now
2344 running. Unless we're calling an inferior function, as in that
2345 case pretend we inferior doesn't run at all. */
2346 if (!tp->control.in_infcall)
2347 set_running (resume_ptid, 1);
2348
2349 /* Maybe resume a single thread after all. */
2350 if ((step || thread_has_single_step_breakpoints_set (tp))
2351 && tp->control.trap_expected)
2352 {
2353 /* We're allowing a thread to run past a breakpoint it has
2354 hit, by single-stepping the thread with the breakpoint
2355 removed. In which case, we need to single-step only this
2356 thread, and keep others stopped, as they can miss this
2357 breakpoint if allowed to run. */
2358 resume_ptid = inferior_ptid;
2359 }
2360
2361 if (execution_direction != EXEC_REVERSE
2362 && step && breakpoint_inserted_here_p (aspace, pc))
2363 {
2364 /* The only case we currently need to step a breakpoint
2365 instruction is when we have a signal to deliver. See
2366 handle_signal_stop where we handle random signals that could
2367 take out us out of the stepping range. Normally, in that
2368 case we end up continuing (instead of stepping) over the
2369 signal handler with a breakpoint at PC, but there are cases
2370 where we should _always_ single-step, even if we have a
2371 step-resume breakpoint, like when a software watchpoint is
2372 set. Assuming single-stepping and delivering a signal at the
2373 same time would takes us to the signal handler, then we could
2374 have removed the breakpoint at PC to step over it. However,
2375 some hardware step targets (like e.g., Mac OS) can't step
2376 into signal handlers, and for those, we need to leave the
2377 breakpoint at PC inserted, as otherwise if the handler
2378 recurses and executes PC again, it'll miss the breakpoint.
2379 So we leave the breakpoint inserted anyway, but we need to
2380 record that we tried to step a breakpoint instruction, so
2381 that adjust_pc_after_break doesn't end up confused. */
2382 gdb_assert (sig != GDB_SIGNAL_0);
2383
2384 tp->stepped_breakpoint = 1;
2385
2386 /* Most targets can step a breakpoint instruction, thus
2387 executing it normally. But if this one cannot, just
2388 continue and we will hit it anyway. */
2389 if (gdbarch_cannot_step_breakpoint (gdbarch))
2390 step = 0;
2391 }
2392
2393 if (debug_displaced
2394 && use_displaced_stepping (gdbarch)
2395 && tp->control.trap_expected)
2396 {
2397 struct regcache *resume_regcache = get_thread_regcache (tp->ptid);
2398 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
2399 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2400 gdb_byte buf[4];
2401
2402 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2403 paddress (resume_gdbarch, actual_pc));
2404 read_memory (actual_pc, buf, sizeof (buf));
2405 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2406 }
2407
2408 if (tp->control.may_range_step)
2409 {
2410 /* If we're resuming a thread with the PC out of the step
2411 range, then we're doing some nested/finer run control
2412 operation, like stepping the thread out of the dynamic
2413 linker or the displaced stepping scratch pad. We
2414 shouldn't have allowed a range step then. */
2415 gdb_assert (pc_in_thread_step_range (pc, tp));
2416 }
2417
2418 do_target_resume (resume_ptid, step, sig);
2419 discard_cleanups (old_cleanups);
2420 }
2421 \f
2422 /* Proceeding. */
2423
2424 /* Clear out all variables saying what to do when inferior is continued.
2425 First do this, then set the ones you want, then call `proceed'. */
2426
2427 static void
2428 clear_proceed_status_thread (struct thread_info *tp)
2429 {
2430 if (debug_infrun)
2431 fprintf_unfiltered (gdb_stdlog,
2432 "infrun: clear_proceed_status_thread (%s)\n",
2433 target_pid_to_str (tp->ptid));
2434
2435 /* If this signal should not be seen by program, give it zero.
2436 Used for debugging signals. */
2437 if (!signal_pass_state (tp->suspend.stop_signal))
2438 tp->suspend.stop_signal = GDB_SIGNAL_0;
2439
2440 tp->control.trap_expected = 0;
2441 tp->control.step_range_start = 0;
2442 tp->control.step_range_end = 0;
2443 tp->control.may_range_step = 0;
2444 tp->control.step_frame_id = null_frame_id;
2445 tp->control.step_stack_frame_id = null_frame_id;
2446 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2447 tp->control.step_start_function = NULL;
2448 tp->stop_requested = 0;
2449
2450 tp->control.stop_step = 0;
2451
2452 tp->control.proceed_to_finish = 0;
2453
2454 tp->control.command_interp = NULL;
2455 tp->control.stepping_command = 0;
2456
2457 /* Discard any remaining commands or status from previous stop. */
2458 bpstat_clear (&tp->control.stop_bpstat);
2459 }
2460
2461 void
2462 clear_proceed_status (int step)
2463 {
2464 if (!non_stop)
2465 {
2466 struct thread_info *tp;
2467 ptid_t resume_ptid;
2468
2469 resume_ptid = user_visible_resume_ptid (step);
2470
2471 /* In all-stop mode, delete the per-thread status of all threads
2472 we're about to resume, implicitly and explicitly. */
2473 ALL_NON_EXITED_THREADS (tp)
2474 {
2475 if (!ptid_match (tp->ptid, resume_ptid))
2476 continue;
2477 clear_proceed_status_thread (tp);
2478 }
2479 }
2480
2481 if (!ptid_equal (inferior_ptid, null_ptid))
2482 {
2483 struct inferior *inferior;
2484
2485 if (non_stop)
2486 {
2487 /* If in non-stop mode, only delete the per-thread status of
2488 the current thread. */
2489 clear_proceed_status_thread (inferior_thread ());
2490 }
2491
2492 inferior = current_inferior ();
2493 inferior->control.stop_soon = NO_STOP_QUIETLY;
2494 }
2495
2496 stop_after_trap = 0;
2497
2498 clear_step_over_info ();
2499
2500 observer_notify_about_to_proceed ();
2501
2502 if (stop_registers)
2503 {
2504 regcache_xfree (stop_registers);
2505 stop_registers = NULL;
2506 }
2507 }
2508
2509 /* Returns true if TP is still stopped at a breakpoint that needs
2510 stepping-over in order to make progress. If the breakpoint is gone
2511 meanwhile, we can skip the whole step-over dance. */
2512
2513 static int
2514 thread_still_needs_step_over (struct thread_info *tp)
2515 {
2516 if (tp->stepping_over_breakpoint)
2517 {
2518 struct regcache *regcache = get_thread_regcache (tp->ptid);
2519
2520 if (breakpoint_here_p (get_regcache_aspace (regcache),
2521 regcache_read_pc (regcache))
2522 == ordinary_breakpoint_here)
2523 return 1;
2524
2525 tp->stepping_over_breakpoint = 0;
2526 }
2527
2528 return 0;
2529 }
2530
2531 /* Returns true if scheduler locking applies. STEP indicates whether
2532 we're about to do a step/next-like command to a thread. */
2533
2534 static int
2535 schedlock_applies (struct thread_info *tp)
2536 {
2537 return (scheduler_mode == schedlock_on
2538 || (scheduler_mode == schedlock_step
2539 && tp->control.stepping_command));
2540 }
2541
2542 /* Look a thread other than EXCEPT that has previously reported a
2543 breakpoint event, and thus needs a step-over in order to make
2544 progress. Returns NULL is none is found. */
2545
2546 static struct thread_info *
2547 find_thread_needs_step_over (struct thread_info *except)
2548 {
2549 struct thread_info *tp, *current;
2550
2551 /* With non-stop mode on, threads are always handled individually. */
2552 gdb_assert (! non_stop);
2553
2554 current = inferior_thread ();
2555
2556 /* If scheduler locking applies, we can avoid iterating over all
2557 threads. */
2558 if (schedlock_applies (except))
2559 {
2560 if (except != current
2561 && thread_still_needs_step_over (current))
2562 return current;
2563
2564 return NULL;
2565 }
2566
2567 ALL_NON_EXITED_THREADS (tp)
2568 {
2569 /* Ignore the EXCEPT thread. */
2570 if (tp == except)
2571 continue;
2572 /* Ignore threads of processes we're not resuming. */
2573 if (!sched_multi
2574 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2575 continue;
2576
2577 if (thread_still_needs_step_over (tp))
2578 return tp;
2579 }
2580
2581 return NULL;
2582 }
2583
2584 /* Basic routine for continuing the program in various fashions.
2585
2586 ADDR is the address to resume at, or -1 for resume where stopped.
2587 SIGGNAL is the signal to give it, or 0 for none,
2588 or -1 for act according to how it stopped.
2589 STEP is nonzero if should trap after one instruction.
2590 -1 means return after that and print nothing.
2591 You should probably set various step_... variables
2592 before calling here, if you are stepping.
2593
2594 You should call clear_proceed_status before calling proceed. */
2595
2596 void
2597 proceed (CORE_ADDR addr, enum gdb_signal siggnal)
2598 {
2599 struct regcache *regcache;
2600 struct gdbarch *gdbarch;
2601 struct thread_info *tp;
2602 CORE_ADDR pc;
2603 struct address_space *aspace;
2604
2605 /* If we're stopped at a fork/vfork, follow the branch set by the
2606 "set follow-fork-mode" command; otherwise, we'll just proceed
2607 resuming the current thread. */
2608 if (!follow_fork ())
2609 {
2610 /* The target for some reason decided not to resume. */
2611 normal_stop ();
2612 if (target_can_async_p ())
2613 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2614 return;
2615 }
2616
2617 /* We'll update this if & when we switch to a new thread. */
2618 previous_inferior_ptid = inferior_ptid;
2619
2620 regcache = get_current_regcache ();
2621 gdbarch = get_regcache_arch (regcache);
2622 aspace = get_regcache_aspace (regcache);
2623 pc = regcache_read_pc (regcache);
2624 tp = inferior_thread ();
2625
2626 /* Fill in with reasonable starting values. */
2627 init_thread_stepping_state (tp);
2628
2629 if (addr == (CORE_ADDR) -1)
2630 {
2631 if (pc == stop_pc
2632 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
2633 && execution_direction != EXEC_REVERSE)
2634 /* There is a breakpoint at the address we will resume at,
2635 step one instruction before inserting breakpoints so that
2636 we do not stop right away (and report a second hit at this
2637 breakpoint).
2638
2639 Note, we don't do this in reverse, because we won't
2640 actually be executing the breakpoint insn anyway.
2641 We'll be (un-)executing the previous instruction. */
2642 tp->stepping_over_breakpoint = 1;
2643 else if (gdbarch_single_step_through_delay_p (gdbarch)
2644 && gdbarch_single_step_through_delay (gdbarch,
2645 get_current_frame ()))
2646 /* We stepped onto an instruction that needs to be stepped
2647 again before re-inserting the breakpoint, do so. */
2648 tp->stepping_over_breakpoint = 1;
2649 }
2650 else
2651 {
2652 regcache_write_pc (regcache, addr);
2653 }
2654
2655 if (siggnal != GDB_SIGNAL_DEFAULT)
2656 tp->suspend.stop_signal = siggnal;
2657
2658 /* Record the interpreter that issued the execution command that
2659 caused this thread to resume. If the top level interpreter is
2660 MI/async, and the execution command was a CLI command
2661 (next/step/etc.), we'll want to print stop event output to the MI
2662 console channel (the stepped-to line, etc.), as if the user
2663 entered the execution command on a real GDB console. */
2664 inferior_thread ()->control.command_interp = command_interp ();
2665
2666 if (debug_infrun)
2667 fprintf_unfiltered (gdb_stdlog,
2668 "infrun: proceed (addr=%s, signal=%s)\n",
2669 paddress (gdbarch, addr),
2670 gdb_signal_to_symbol_string (siggnal));
2671
2672 if (non_stop)
2673 /* In non-stop, each thread is handled individually. The context
2674 must already be set to the right thread here. */
2675 ;
2676 else
2677 {
2678 struct thread_info *step_over;
2679
2680 /* In a multi-threaded task we may select another thread and
2681 then continue or step.
2682
2683 But if the old thread was stopped at a breakpoint, it will
2684 immediately cause another breakpoint stop without any
2685 execution (i.e. it will report a breakpoint hit incorrectly).
2686 So we must step over it first.
2687
2688 Look for a thread other than the current (TP) that reported a
2689 breakpoint hit and hasn't been resumed yet since. */
2690 step_over = find_thread_needs_step_over (tp);
2691 if (step_over != NULL)
2692 {
2693 if (debug_infrun)
2694 fprintf_unfiltered (gdb_stdlog,
2695 "infrun: need to step-over [%s] first\n",
2696 target_pid_to_str (step_over->ptid));
2697
2698 /* Store the prev_pc for the stepping thread too, needed by
2699 switch_back_to_stepped_thread. */
2700 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2701 switch_to_thread (step_over->ptid);
2702 tp = step_over;
2703 }
2704 }
2705
2706 /* If we need to step over a breakpoint, and we're not using
2707 displaced stepping to do so, insert all breakpoints (watchpoints,
2708 etc.) but the one we're stepping over, step one instruction, and
2709 then re-insert the breakpoint when that step is finished. */
2710 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2711 {
2712 struct regcache *regcache = get_current_regcache ();
2713
2714 set_step_over_info (get_regcache_aspace (regcache),
2715 regcache_read_pc (regcache), 0);
2716 }
2717 else
2718 clear_step_over_info ();
2719
2720 insert_breakpoints ();
2721
2722 tp->control.trap_expected = tp->stepping_over_breakpoint;
2723
2724 annotate_starting ();
2725
2726 /* Make sure that output from GDB appears before output from the
2727 inferior. */
2728 gdb_flush (gdb_stdout);
2729
2730 /* Refresh prev_pc value just prior to resuming. This used to be
2731 done in stop_waiting, however, setting prev_pc there did not handle
2732 scenarios such as inferior function calls or returning from
2733 a function via the return command. In those cases, the prev_pc
2734 value was not set properly for subsequent commands. The prev_pc value
2735 is used to initialize the starting line number in the ecs. With an
2736 invalid value, the gdb next command ends up stopping at the position
2737 represented by the next line table entry past our start position.
2738 On platforms that generate one line table entry per line, this
2739 is not a problem. However, on the ia64, the compiler generates
2740 extraneous line table entries that do not increase the line number.
2741 When we issue the gdb next command on the ia64 after an inferior call
2742 or a return command, we often end up a few instructions forward, still
2743 within the original line we started.
2744
2745 An attempt was made to refresh the prev_pc at the same time the
2746 execution_control_state is initialized (for instance, just before
2747 waiting for an inferior event). But this approach did not work
2748 because of platforms that use ptrace, where the pc register cannot
2749 be read unless the inferior is stopped. At that point, we are not
2750 guaranteed the inferior is stopped and so the regcache_read_pc() call
2751 can fail. Setting the prev_pc value here ensures the value is updated
2752 correctly when the inferior is stopped. */
2753 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2754
2755 /* Resume inferior. */
2756 resume (tp->suspend.stop_signal);
2757
2758 /* Wait for it to stop (if not standalone)
2759 and in any case decode why it stopped, and act accordingly. */
2760 /* Do this only if we are not using the event loop, or if the target
2761 does not support asynchronous execution. */
2762 if (!target_can_async_p ())
2763 {
2764 wait_for_inferior ();
2765 normal_stop ();
2766 }
2767 }
2768 \f
2769
2770 /* Start remote-debugging of a machine over a serial link. */
2771
2772 void
2773 start_remote (int from_tty)
2774 {
2775 struct inferior *inferior;
2776
2777 inferior = current_inferior ();
2778 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2779
2780 /* Always go on waiting for the target, regardless of the mode. */
2781 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2782 indicate to wait_for_inferior that a target should timeout if
2783 nothing is returned (instead of just blocking). Because of this,
2784 targets expecting an immediate response need to, internally, set
2785 things up so that the target_wait() is forced to eventually
2786 timeout. */
2787 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2788 differentiate to its caller what the state of the target is after
2789 the initial open has been performed. Here we're assuming that
2790 the target has stopped. It should be possible to eventually have
2791 target_open() return to the caller an indication that the target
2792 is currently running and GDB state should be set to the same as
2793 for an async run. */
2794 wait_for_inferior ();
2795
2796 /* Now that the inferior has stopped, do any bookkeeping like
2797 loading shared libraries. We want to do this before normal_stop,
2798 so that the displayed frame is up to date. */
2799 post_create_inferior (&current_target, from_tty);
2800
2801 normal_stop ();
2802 }
2803
2804 /* Initialize static vars when a new inferior begins. */
2805
2806 void
2807 init_wait_for_inferior (void)
2808 {
2809 /* These are meaningless until the first time through wait_for_inferior. */
2810
2811 breakpoint_init_inferior (inf_starting);
2812
2813 clear_proceed_status (0);
2814
2815 target_last_wait_ptid = minus_one_ptid;
2816
2817 previous_inferior_ptid = inferior_ptid;
2818
2819 /* Discard any skipped inlined frames. */
2820 clear_inline_frame_state (minus_one_ptid);
2821 }
2822
2823 \f
2824 /* Data to be passed around while handling an event. This data is
2825 discarded between events. */
2826 struct execution_control_state
2827 {
2828 ptid_t ptid;
2829 /* The thread that got the event, if this was a thread event; NULL
2830 otherwise. */
2831 struct thread_info *event_thread;
2832
2833 struct target_waitstatus ws;
2834 int stop_func_filled_in;
2835 CORE_ADDR stop_func_start;
2836 CORE_ADDR stop_func_end;
2837 const char *stop_func_name;
2838 int wait_some_more;
2839
2840 /* True if the event thread hit the single-step breakpoint of
2841 another thread. Thus the event doesn't cause a stop, the thread
2842 needs to be single-stepped past the single-step breakpoint before
2843 we can switch back to the original stepping thread. */
2844 int hit_singlestep_breakpoint;
2845 };
2846
2847 static void handle_inferior_event (struct execution_control_state *ecs);
2848
2849 static void handle_step_into_function (struct gdbarch *gdbarch,
2850 struct execution_control_state *ecs);
2851 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2852 struct execution_control_state *ecs);
2853 static void handle_signal_stop (struct execution_control_state *ecs);
2854 static void check_exception_resume (struct execution_control_state *,
2855 struct frame_info *);
2856
2857 static void end_stepping_range (struct execution_control_state *ecs);
2858 static void stop_waiting (struct execution_control_state *ecs);
2859 static void prepare_to_wait (struct execution_control_state *ecs);
2860 static void keep_going (struct execution_control_state *ecs);
2861 static void process_event_stop_test (struct execution_control_state *ecs);
2862 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2863
2864 /* Callback for iterate over threads. If the thread is stopped, but
2865 the user/frontend doesn't know about that yet, go through
2866 normal_stop, as if the thread had just stopped now. ARG points at
2867 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2868 ptid_is_pid(PTID) is true, applies to all threads of the process
2869 pointed at by PTID. Otherwise, apply only to the thread pointed by
2870 PTID. */
2871
2872 static int
2873 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2874 {
2875 ptid_t ptid = * (ptid_t *) arg;
2876
2877 if ((ptid_equal (info->ptid, ptid)
2878 || ptid_equal (minus_one_ptid, ptid)
2879 || (ptid_is_pid (ptid)
2880 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2881 && is_running (info->ptid)
2882 && !is_executing (info->ptid))
2883 {
2884 struct cleanup *old_chain;
2885 struct execution_control_state ecss;
2886 struct execution_control_state *ecs = &ecss;
2887
2888 memset (ecs, 0, sizeof (*ecs));
2889
2890 old_chain = make_cleanup_restore_current_thread ();
2891
2892 overlay_cache_invalid = 1;
2893 /* Flush target cache before starting to handle each event.
2894 Target was running and cache could be stale. This is just a
2895 heuristic. Running threads may modify target memory, but we
2896 don't get any event. */
2897 target_dcache_invalidate ();
2898
2899 /* Go through handle_inferior_event/normal_stop, so we always
2900 have consistent output as if the stop event had been
2901 reported. */
2902 ecs->ptid = info->ptid;
2903 ecs->event_thread = find_thread_ptid (info->ptid);
2904 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2905 ecs->ws.value.sig = GDB_SIGNAL_0;
2906
2907 handle_inferior_event (ecs);
2908
2909 if (!ecs->wait_some_more)
2910 {
2911 struct thread_info *tp;
2912
2913 normal_stop ();
2914
2915 /* Finish off the continuations. */
2916 tp = inferior_thread ();
2917 do_all_intermediate_continuations_thread (tp, 1);
2918 do_all_continuations_thread (tp, 1);
2919 }
2920
2921 do_cleanups (old_chain);
2922 }
2923
2924 return 0;
2925 }
2926
2927 /* This function is attached as a "thread_stop_requested" observer.
2928 Cleanup local state that assumed the PTID was to be resumed, and
2929 report the stop to the frontend. */
2930
2931 static void
2932 infrun_thread_stop_requested (ptid_t ptid)
2933 {
2934 struct displaced_step_inferior_state *displaced;
2935
2936 /* PTID was requested to stop. Remove it from the displaced
2937 stepping queue, so we don't try to resume it automatically. */
2938
2939 for (displaced = displaced_step_inferior_states;
2940 displaced;
2941 displaced = displaced->next)
2942 {
2943 struct displaced_step_request *it, **prev_next_p;
2944
2945 it = displaced->step_request_queue;
2946 prev_next_p = &displaced->step_request_queue;
2947 while (it)
2948 {
2949 if (ptid_match (it->ptid, ptid))
2950 {
2951 *prev_next_p = it->next;
2952 it->next = NULL;
2953 xfree (it);
2954 }
2955 else
2956 {
2957 prev_next_p = &it->next;
2958 }
2959
2960 it = *prev_next_p;
2961 }
2962 }
2963
2964 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2965 }
2966
2967 static void
2968 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2969 {
2970 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2971 nullify_last_target_wait_ptid ();
2972 }
2973
2974 /* Delete the step resume, single-step and longjmp/exception resume
2975 breakpoints of TP. */
2976
2977 static void
2978 delete_thread_infrun_breakpoints (struct thread_info *tp)
2979 {
2980 delete_step_resume_breakpoint (tp);
2981 delete_exception_resume_breakpoint (tp);
2982 delete_single_step_breakpoints (tp);
2983 }
2984
2985 /* If the target still has execution, call FUNC for each thread that
2986 just stopped. In all-stop, that's all the non-exited threads; in
2987 non-stop, that's the current thread, only. */
2988
2989 typedef void (*for_each_just_stopped_thread_callback_func)
2990 (struct thread_info *tp);
2991
2992 static void
2993 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
2994 {
2995 if (!target_has_execution || ptid_equal (inferior_ptid, null_ptid))
2996 return;
2997
2998 if (non_stop)
2999 {
3000 /* If in non-stop mode, only the current thread stopped. */
3001 func (inferior_thread ());
3002 }
3003 else
3004 {
3005 struct thread_info *tp;
3006
3007 /* In all-stop mode, all threads have stopped. */
3008 ALL_NON_EXITED_THREADS (tp)
3009 {
3010 func (tp);
3011 }
3012 }
3013 }
3014
3015 /* Delete the step resume and longjmp/exception resume breakpoints of
3016 the threads that just stopped. */
3017
3018 static void
3019 delete_just_stopped_threads_infrun_breakpoints (void)
3020 {
3021 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
3022 }
3023
3024 /* Delete the single-step breakpoints of the threads that just
3025 stopped. */
3026
3027 static void
3028 delete_just_stopped_threads_single_step_breakpoints (void)
3029 {
3030 for_each_just_stopped_thread (delete_single_step_breakpoints);
3031 }
3032
3033 /* A cleanup wrapper. */
3034
3035 static void
3036 delete_just_stopped_threads_infrun_breakpoints_cleanup (void *arg)
3037 {
3038 delete_just_stopped_threads_infrun_breakpoints ();
3039 }
3040
3041 /* Pretty print the results of target_wait, for debugging purposes. */
3042
3043 static void
3044 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3045 const struct target_waitstatus *ws)
3046 {
3047 char *status_string = target_waitstatus_to_string (ws);
3048 struct ui_file *tmp_stream = mem_fileopen ();
3049 char *text;
3050
3051 /* The text is split over several lines because it was getting too long.
3052 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3053 output as a unit; we want only one timestamp printed if debug_timestamp
3054 is set. */
3055
3056 fprintf_unfiltered (tmp_stream,
3057 "infrun: target_wait (%d.%ld.%ld",
3058 ptid_get_pid (waiton_ptid),
3059 ptid_get_lwp (waiton_ptid),
3060 ptid_get_tid (waiton_ptid));
3061 if (ptid_get_pid (waiton_ptid) != -1)
3062 fprintf_unfiltered (tmp_stream,
3063 " [%s]", target_pid_to_str (waiton_ptid));
3064 fprintf_unfiltered (tmp_stream, ", status) =\n");
3065 fprintf_unfiltered (tmp_stream,
3066 "infrun: %d.%ld.%ld [%s],\n",
3067 ptid_get_pid (result_ptid),
3068 ptid_get_lwp (result_ptid),
3069 ptid_get_tid (result_ptid),
3070 target_pid_to_str (result_ptid));
3071 fprintf_unfiltered (tmp_stream,
3072 "infrun: %s\n",
3073 status_string);
3074
3075 text = ui_file_xstrdup (tmp_stream, NULL);
3076
3077 /* This uses %s in part to handle %'s in the text, but also to avoid
3078 a gcc error: the format attribute requires a string literal. */
3079 fprintf_unfiltered (gdb_stdlog, "%s", text);
3080
3081 xfree (status_string);
3082 xfree (text);
3083 ui_file_delete (tmp_stream);
3084 }
3085
3086 /* Prepare and stabilize the inferior for detaching it. E.g.,
3087 detaching while a thread is displaced stepping is a recipe for
3088 crashing it, as nothing would readjust the PC out of the scratch
3089 pad. */
3090
3091 void
3092 prepare_for_detach (void)
3093 {
3094 struct inferior *inf = current_inferior ();
3095 ptid_t pid_ptid = pid_to_ptid (inf->pid);
3096 struct cleanup *old_chain_1;
3097 struct displaced_step_inferior_state *displaced;
3098
3099 displaced = get_displaced_stepping_state (inf->pid);
3100
3101 /* Is any thread of this process displaced stepping? If not,
3102 there's nothing else to do. */
3103 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
3104 return;
3105
3106 if (debug_infrun)
3107 fprintf_unfiltered (gdb_stdlog,
3108 "displaced-stepping in-process while detaching");
3109
3110 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
3111 inf->detaching = 1;
3112
3113 while (!ptid_equal (displaced->step_ptid, null_ptid))
3114 {
3115 struct cleanup *old_chain_2;
3116 struct execution_control_state ecss;
3117 struct execution_control_state *ecs;
3118
3119 ecs = &ecss;
3120 memset (ecs, 0, sizeof (*ecs));
3121
3122 overlay_cache_invalid = 1;
3123 /* Flush target cache before starting to handle each event.
3124 Target was running and cache could be stale. This is just a
3125 heuristic. Running threads may modify target memory, but we
3126 don't get any event. */
3127 target_dcache_invalidate ();
3128
3129 if (deprecated_target_wait_hook)
3130 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
3131 else
3132 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
3133
3134 if (debug_infrun)
3135 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3136
3137 /* If an error happens while handling the event, propagate GDB's
3138 knowledge of the executing state to the frontend/user running
3139 state. */
3140 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
3141 &minus_one_ptid);
3142
3143 /* Now figure out what to do with the result of the result. */
3144 handle_inferior_event (ecs);
3145
3146 /* No error, don't finish the state yet. */
3147 discard_cleanups (old_chain_2);
3148
3149 /* Breakpoints and watchpoints are not installed on the target
3150 at this point, and signals are passed directly to the
3151 inferior, so this must mean the process is gone. */
3152 if (!ecs->wait_some_more)
3153 {
3154 discard_cleanups (old_chain_1);
3155 error (_("Program exited while detaching"));
3156 }
3157 }
3158
3159 discard_cleanups (old_chain_1);
3160 }
3161
3162 /* Wait for control to return from inferior to debugger.
3163
3164 If inferior gets a signal, we may decide to start it up again
3165 instead of returning. That is why there is a loop in this function.
3166 When this function actually returns it means the inferior
3167 should be left stopped and GDB should read more commands. */
3168
3169 void
3170 wait_for_inferior (void)
3171 {
3172 struct cleanup *old_cleanups;
3173 struct cleanup *thread_state_chain;
3174
3175 if (debug_infrun)
3176 fprintf_unfiltered
3177 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
3178
3179 old_cleanups
3180 = make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup,
3181 NULL);
3182
3183 /* If an error happens while handling the event, propagate GDB's
3184 knowledge of the executing state to the frontend/user running
3185 state. */
3186 thread_state_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3187
3188 while (1)
3189 {
3190 struct execution_control_state ecss;
3191 struct execution_control_state *ecs = &ecss;
3192 ptid_t waiton_ptid = minus_one_ptid;
3193
3194 memset (ecs, 0, sizeof (*ecs));
3195
3196 overlay_cache_invalid = 1;
3197
3198 /* Flush target cache before starting to handle each event.
3199 Target was running and cache could be stale. This is just a
3200 heuristic. Running threads may modify target memory, but we
3201 don't get any event. */
3202 target_dcache_invalidate ();
3203
3204 if (deprecated_target_wait_hook)
3205 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
3206 else
3207 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
3208
3209 if (debug_infrun)
3210 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3211
3212 /* Now figure out what to do with the result of the result. */
3213 handle_inferior_event (ecs);
3214
3215 if (!ecs->wait_some_more)
3216 break;
3217 }
3218
3219 /* No error, don't finish the state yet. */
3220 discard_cleanups (thread_state_chain);
3221
3222 do_cleanups (old_cleanups);
3223 }
3224
3225 /* Cleanup that reinstalls the readline callback handler, if the
3226 target is running in the background. If while handling the target
3227 event something triggered a secondary prompt, like e.g., a
3228 pagination prompt, we'll have removed the callback handler (see
3229 gdb_readline_wrapper_line). Need to do this as we go back to the
3230 event loop, ready to process further input. Note this has no
3231 effect if the handler hasn't actually been removed, because calling
3232 rl_callback_handler_install resets the line buffer, thus losing
3233 input. */
3234
3235 static void
3236 reinstall_readline_callback_handler_cleanup (void *arg)
3237 {
3238 if (!interpreter_async)
3239 {
3240 /* We're not going back to the top level event loop yet. Don't
3241 install the readline callback, as it'd prep the terminal,
3242 readline-style (raw, noecho) (e.g., --batch). We'll install
3243 it the next time the prompt is displayed, when we're ready
3244 for input. */
3245 return;
3246 }
3247
3248 if (async_command_editing_p && !sync_execution)
3249 gdb_rl_callback_handler_reinstall ();
3250 }
3251
3252 /* Asynchronous version of wait_for_inferior. It is called by the
3253 event loop whenever a change of state is detected on the file
3254 descriptor corresponding to the target. It can be called more than
3255 once to complete a single execution command. In such cases we need
3256 to keep the state in a global variable ECSS. If it is the last time
3257 that this function is called for a single execution command, then
3258 report to the user that the inferior has stopped, and do the
3259 necessary cleanups. */
3260
3261 void
3262 fetch_inferior_event (void *client_data)
3263 {
3264 struct execution_control_state ecss;
3265 struct execution_control_state *ecs = &ecss;
3266 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
3267 struct cleanup *ts_old_chain;
3268 int was_sync = sync_execution;
3269 int cmd_done = 0;
3270 ptid_t waiton_ptid = minus_one_ptid;
3271
3272 memset (ecs, 0, sizeof (*ecs));
3273
3274 /* End up with readline processing input, if necessary. */
3275 make_cleanup (reinstall_readline_callback_handler_cleanup, NULL);
3276
3277 /* We're handling a live event, so make sure we're doing live
3278 debugging. If we're looking at traceframes while the target is
3279 running, we're going to need to get back to that mode after
3280 handling the event. */
3281 if (non_stop)
3282 {
3283 make_cleanup_restore_current_traceframe ();
3284 set_current_traceframe (-1);
3285 }
3286
3287 if (non_stop)
3288 /* In non-stop mode, the user/frontend should not notice a thread
3289 switch due to internal events. Make sure we reverse to the
3290 user selected thread and frame after handling the event and
3291 running any breakpoint commands. */
3292 make_cleanup_restore_current_thread ();
3293
3294 overlay_cache_invalid = 1;
3295 /* Flush target cache before starting to handle each event. Target
3296 was running and cache could be stale. This is just a heuristic.
3297 Running threads may modify target memory, but we don't get any
3298 event. */
3299 target_dcache_invalidate ();
3300
3301 make_cleanup_restore_integer (&execution_direction);
3302 execution_direction = target_execution_direction ();
3303
3304 if (deprecated_target_wait_hook)
3305 ecs->ptid =
3306 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3307 else
3308 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3309
3310 if (debug_infrun)
3311 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3312
3313 /* If an error happens while handling the event, propagate GDB's
3314 knowledge of the executing state to the frontend/user running
3315 state. */
3316 if (!non_stop)
3317 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3318 else
3319 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
3320
3321 /* Get executed before make_cleanup_restore_current_thread above to apply
3322 still for the thread which has thrown the exception. */
3323 make_bpstat_clear_actions_cleanup ();
3324
3325 make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup, NULL);
3326
3327 /* Now figure out what to do with the result of the result. */
3328 handle_inferior_event (ecs);
3329
3330 if (!ecs->wait_some_more)
3331 {
3332 struct inferior *inf = find_inferior_ptid (ecs->ptid);
3333
3334 delete_just_stopped_threads_infrun_breakpoints ();
3335
3336 /* We may not find an inferior if this was a process exit. */
3337 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3338 normal_stop ();
3339
3340 if (target_has_execution
3341 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
3342 && ecs->ws.kind != TARGET_WAITKIND_EXITED
3343 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3344 && ecs->event_thread->step_multi
3345 && ecs->event_thread->control.stop_step)
3346 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
3347 else
3348 {
3349 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
3350 cmd_done = 1;
3351 }
3352 }
3353
3354 /* No error, don't finish the thread states yet. */
3355 discard_cleanups (ts_old_chain);
3356
3357 /* Revert thread and frame. */
3358 do_cleanups (old_chain);
3359
3360 /* If the inferior was in sync execution mode, and now isn't,
3361 restore the prompt (a synchronous execution command has finished,
3362 and we're ready for input). */
3363 if (interpreter_async && was_sync && !sync_execution)
3364 observer_notify_sync_execution_done ();
3365
3366 if (cmd_done
3367 && !was_sync
3368 && exec_done_display_p
3369 && (ptid_equal (inferior_ptid, null_ptid)
3370 || !is_running (inferior_ptid)))
3371 printf_unfiltered (_("completed.\n"));
3372 }
3373
3374 /* Record the frame and location we're currently stepping through. */
3375 void
3376 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
3377 {
3378 struct thread_info *tp = inferior_thread ();
3379
3380 tp->control.step_frame_id = get_frame_id (frame);
3381 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
3382
3383 tp->current_symtab = sal.symtab;
3384 tp->current_line = sal.line;
3385 }
3386
3387 /* Clear context switchable stepping state. */
3388
3389 void
3390 init_thread_stepping_state (struct thread_info *tss)
3391 {
3392 tss->stepped_breakpoint = 0;
3393 tss->stepping_over_breakpoint = 0;
3394 tss->stepping_over_watchpoint = 0;
3395 tss->step_after_step_resume_breakpoint = 0;
3396 }
3397
3398 /* Set the cached copy of the last ptid/waitstatus. */
3399
3400 static void
3401 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
3402 {
3403 target_last_wait_ptid = ptid;
3404 target_last_waitstatus = status;
3405 }
3406
3407 /* Return the cached copy of the last pid/waitstatus returned by
3408 target_wait()/deprecated_target_wait_hook(). The data is actually
3409 cached by handle_inferior_event(), which gets called immediately
3410 after target_wait()/deprecated_target_wait_hook(). */
3411
3412 void
3413 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
3414 {
3415 *ptidp = target_last_wait_ptid;
3416 *status = target_last_waitstatus;
3417 }
3418
3419 void
3420 nullify_last_target_wait_ptid (void)
3421 {
3422 target_last_wait_ptid = minus_one_ptid;
3423 }
3424
3425 /* Switch thread contexts. */
3426
3427 static void
3428 context_switch (ptid_t ptid)
3429 {
3430 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3431 {
3432 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3433 target_pid_to_str (inferior_ptid));
3434 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3435 target_pid_to_str (ptid));
3436 }
3437
3438 switch_to_thread (ptid);
3439 }
3440
3441 static void
3442 adjust_pc_after_break (struct execution_control_state *ecs)
3443 {
3444 struct regcache *regcache;
3445 struct gdbarch *gdbarch;
3446 struct address_space *aspace;
3447 CORE_ADDR breakpoint_pc, decr_pc;
3448
3449 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3450 we aren't, just return.
3451
3452 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3453 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3454 implemented by software breakpoints should be handled through the normal
3455 breakpoint layer.
3456
3457 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3458 different signals (SIGILL or SIGEMT for instance), but it is less
3459 clear where the PC is pointing afterwards. It may not match
3460 gdbarch_decr_pc_after_break. I don't know any specific target that
3461 generates these signals at breakpoints (the code has been in GDB since at
3462 least 1992) so I can not guess how to handle them here.
3463
3464 In earlier versions of GDB, a target with
3465 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3466 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3467 target with both of these set in GDB history, and it seems unlikely to be
3468 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3469
3470 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3471 return;
3472
3473 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3474 return;
3475
3476 /* In reverse execution, when a breakpoint is hit, the instruction
3477 under it has already been de-executed. The reported PC always
3478 points at the breakpoint address, so adjusting it further would
3479 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3480 architecture:
3481
3482 B1 0x08000000 : INSN1
3483 B2 0x08000001 : INSN2
3484 0x08000002 : INSN3
3485 PC -> 0x08000003 : INSN4
3486
3487 Say you're stopped at 0x08000003 as above. Reverse continuing
3488 from that point should hit B2 as below. Reading the PC when the
3489 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3490 been de-executed already.
3491
3492 B1 0x08000000 : INSN1
3493 B2 PC -> 0x08000001 : INSN2
3494 0x08000002 : INSN3
3495 0x08000003 : INSN4
3496
3497 We can't apply the same logic as for forward execution, because
3498 we would wrongly adjust the PC to 0x08000000, since there's a
3499 breakpoint at PC - 1. We'd then report a hit on B1, although
3500 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3501 behaviour. */
3502 if (execution_direction == EXEC_REVERSE)
3503 return;
3504
3505 /* If the target can tell whether the thread hit a SW breakpoint,
3506 trust it. Targets that can tell also adjust the PC
3507 themselves. */
3508 if (target_supports_stopped_by_sw_breakpoint ())
3509 return;
3510
3511 /* Note that relying on whether a breakpoint is planted in memory to
3512 determine this can fail. E.g,. the breakpoint could have been
3513 removed since. Or the thread could have been told to step an
3514 instruction the size of a breakpoint instruction, and only
3515 _after_ was a breakpoint inserted at its address. */
3516
3517 /* If this target does not decrement the PC after breakpoints, then
3518 we have nothing to do. */
3519 regcache = get_thread_regcache (ecs->ptid);
3520 gdbarch = get_regcache_arch (regcache);
3521
3522 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3523 if (decr_pc == 0)
3524 return;
3525
3526 aspace = get_regcache_aspace (regcache);
3527
3528 /* Find the location where (if we've hit a breakpoint) the
3529 breakpoint would be. */
3530 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3531
3532 /* If the target can't tell whether a software breakpoint triggered,
3533 fallback to figuring it out based on breakpoints we think were
3534 inserted in the target, and on whether the thread was stepped or
3535 continued. */
3536
3537 /* Check whether there actually is a software breakpoint inserted at
3538 that location.
3539
3540 If in non-stop mode, a race condition is possible where we've
3541 removed a breakpoint, but stop events for that breakpoint were
3542 already queued and arrive later. To suppress those spurious
3543 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3544 and retire them after a number of stop events are reported. Note
3545 this is an heuristic and can thus get confused. The real fix is
3546 to get the "stopped by SW BP and needs adjustment" info out of
3547 the target/kernel (and thus never reach here; see above). */
3548 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3549 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3550 {
3551 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3552
3553 if (record_full_is_used ())
3554 record_full_gdb_operation_disable_set ();
3555
3556 /* When using hardware single-step, a SIGTRAP is reported for both
3557 a completed single-step and a software breakpoint. Need to
3558 differentiate between the two, as the latter needs adjusting
3559 but the former does not.
3560
3561 The SIGTRAP can be due to a completed hardware single-step only if
3562 - we didn't insert software single-step breakpoints
3563 - this thread is currently being stepped
3564
3565 If any of these events did not occur, we must have stopped due
3566 to hitting a software breakpoint, and have to back up to the
3567 breakpoint address.
3568
3569 As a special case, we could have hardware single-stepped a
3570 software breakpoint. In this case (prev_pc == breakpoint_pc),
3571 we also need to back up to the breakpoint address. */
3572
3573 if (thread_has_single_step_breakpoints_set (ecs->event_thread)
3574 || !currently_stepping (ecs->event_thread)
3575 || (ecs->event_thread->stepped_breakpoint
3576 && ecs->event_thread->prev_pc == breakpoint_pc))
3577 regcache_write_pc (regcache, breakpoint_pc);
3578
3579 do_cleanups (old_cleanups);
3580 }
3581 }
3582
3583 static int
3584 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3585 {
3586 for (frame = get_prev_frame (frame);
3587 frame != NULL;
3588 frame = get_prev_frame (frame))
3589 {
3590 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3591 return 1;
3592 if (get_frame_type (frame) != INLINE_FRAME)
3593 break;
3594 }
3595
3596 return 0;
3597 }
3598
3599 /* Auxiliary function that handles syscall entry/return events.
3600 It returns 1 if the inferior should keep going (and GDB
3601 should ignore the event), or 0 if the event deserves to be
3602 processed. */
3603
3604 static int
3605 handle_syscall_event (struct execution_control_state *ecs)
3606 {
3607 struct regcache *regcache;
3608 int syscall_number;
3609
3610 if (!ptid_equal (ecs->ptid, inferior_ptid))
3611 context_switch (ecs->ptid);
3612
3613 regcache = get_thread_regcache (ecs->ptid);
3614 syscall_number = ecs->ws.value.syscall_number;
3615 stop_pc = regcache_read_pc (regcache);
3616
3617 if (catch_syscall_enabled () > 0
3618 && catching_syscall_number (syscall_number) > 0)
3619 {
3620 if (debug_infrun)
3621 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3622 syscall_number);
3623
3624 ecs->event_thread->control.stop_bpstat
3625 = bpstat_stop_status (get_regcache_aspace (regcache),
3626 stop_pc, ecs->ptid, &ecs->ws);
3627
3628 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3629 {
3630 /* Catchpoint hit. */
3631 return 0;
3632 }
3633 }
3634
3635 /* If no catchpoint triggered for this, then keep going. */
3636 keep_going (ecs);
3637 return 1;
3638 }
3639
3640 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3641
3642 static void
3643 fill_in_stop_func (struct gdbarch *gdbarch,
3644 struct execution_control_state *ecs)
3645 {
3646 if (!ecs->stop_func_filled_in)
3647 {
3648 /* Don't care about return value; stop_func_start and stop_func_name
3649 will both be 0 if it doesn't work. */
3650 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3651 &ecs->stop_func_start, &ecs->stop_func_end);
3652 ecs->stop_func_start
3653 += gdbarch_deprecated_function_start_offset (gdbarch);
3654
3655 if (gdbarch_skip_entrypoint_p (gdbarch))
3656 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3657 ecs->stop_func_start);
3658
3659 ecs->stop_func_filled_in = 1;
3660 }
3661 }
3662
3663
3664 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3665
3666 static enum stop_kind
3667 get_inferior_stop_soon (ptid_t ptid)
3668 {
3669 struct inferior *inf = find_inferior_ptid (ptid);
3670
3671 gdb_assert (inf != NULL);
3672 return inf->control.stop_soon;
3673 }
3674
3675 /* Given an execution control state that has been freshly filled in by
3676 an event from the inferior, figure out what it means and take
3677 appropriate action.
3678
3679 The alternatives are:
3680
3681 1) stop_waiting and return; to really stop and return to the
3682 debugger.
3683
3684 2) keep_going and return; to wait for the next event (set
3685 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3686 once). */
3687
3688 static void
3689 handle_inferior_event (struct execution_control_state *ecs)
3690 {
3691 enum stop_kind stop_soon;
3692
3693 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3694 {
3695 /* We had an event in the inferior, but we are not interested in
3696 handling it at this level. The lower layers have already
3697 done what needs to be done, if anything.
3698
3699 One of the possible circumstances for this is when the
3700 inferior produces output for the console. The inferior has
3701 not stopped, and we are ignoring the event. Another possible
3702 circumstance is any event which the lower level knows will be
3703 reported multiple times without an intervening resume. */
3704 if (debug_infrun)
3705 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3706 prepare_to_wait (ecs);
3707 return;
3708 }
3709
3710 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3711 && target_can_async_p () && !sync_execution)
3712 {
3713 /* There were no unwaited-for children left in the target, but,
3714 we're not synchronously waiting for events either. Just
3715 ignore. Otherwise, if we were running a synchronous
3716 execution command, we need to cancel it and give the user
3717 back the terminal. */
3718 if (debug_infrun)
3719 fprintf_unfiltered (gdb_stdlog,
3720 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3721 prepare_to_wait (ecs);
3722 return;
3723 }
3724
3725 /* Cache the last pid/waitstatus. */
3726 set_last_target_status (ecs->ptid, ecs->ws);
3727
3728 /* Always clear state belonging to the previous time we stopped. */
3729 stop_stack_dummy = STOP_NONE;
3730
3731 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3732 {
3733 /* No unwaited-for children left. IOW, all resumed children
3734 have exited. */
3735 if (debug_infrun)
3736 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3737
3738 stop_print_frame = 0;
3739 stop_waiting (ecs);
3740 return;
3741 }
3742
3743 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3744 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3745 {
3746 ecs->event_thread = find_thread_ptid (ecs->ptid);
3747 /* If it's a new thread, add it to the thread database. */
3748 if (ecs->event_thread == NULL)
3749 ecs->event_thread = add_thread (ecs->ptid);
3750
3751 /* Disable range stepping. If the next step request could use a
3752 range, this will be end up re-enabled then. */
3753 ecs->event_thread->control.may_range_step = 0;
3754 }
3755
3756 /* Dependent on valid ECS->EVENT_THREAD. */
3757 adjust_pc_after_break (ecs);
3758
3759 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3760 reinit_frame_cache ();
3761
3762 breakpoint_retire_moribund ();
3763
3764 /* First, distinguish signals caused by the debugger from signals
3765 that have to do with the program's own actions. Note that
3766 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3767 on the operating system version. Here we detect when a SIGILL or
3768 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3769 something similar for SIGSEGV, since a SIGSEGV will be generated
3770 when we're trying to execute a breakpoint instruction on a
3771 non-executable stack. This happens for call dummy breakpoints
3772 for architectures like SPARC that place call dummies on the
3773 stack. */
3774 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3775 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3776 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3777 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3778 {
3779 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3780
3781 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3782 regcache_read_pc (regcache)))
3783 {
3784 if (debug_infrun)
3785 fprintf_unfiltered (gdb_stdlog,
3786 "infrun: Treating signal as SIGTRAP\n");
3787 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3788 }
3789 }
3790
3791 /* Mark the non-executing threads accordingly. In all-stop, all
3792 threads of all processes are stopped when we get any event
3793 reported. In non-stop mode, only the event thread stops. If
3794 we're handling a process exit in non-stop mode, there's nothing
3795 to do, as threads of the dead process are gone, and threads of
3796 any other process were left running. */
3797 if (!non_stop)
3798 set_executing (minus_one_ptid, 0);
3799 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3800 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3801 set_executing (ecs->ptid, 0);
3802
3803 switch (ecs->ws.kind)
3804 {
3805 case TARGET_WAITKIND_LOADED:
3806 if (debug_infrun)
3807 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3808 if (!ptid_equal (ecs->ptid, inferior_ptid))
3809 context_switch (ecs->ptid);
3810 /* Ignore gracefully during startup of the inferior, as it might
3811 be the shell which has just loaded some objects, otherwise
3812 add the symbols for the newly loaded objects. Also ignore at
3813 the beginning of an attach or remote session; we will query
3814 the full list of libraries once the connection is
3815 established. */
3816
3817 stop_soon = get_inferior_stop_soon (ecs->ptid);
3818 if (stop_soon == NO_STOP_QUIETLY)
3819 {
3820 struct regcache *regcache;
3821
3822 regcache = get_thread_regcache (ecs->ptid);
3823
3824 handle_solib_event ();
3825
3826 ecs->event_thread->control.stop_bpstat
3827 = bpstat_stop_status (get_regcache_aspace (regcache),
3828 stop_pc, ecs->ptid, &ecs->ws);
3829
3830 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3831 {
3832 /* A catchpoint triggered. */
3833 process_event_stop_test (ecs);
3834 return;
3835 }
3836
3837 /* If requested, stop when the dynamic linker notifies
3838 gdb of events. This allows the user to get control
3839 and place breakpoints in initializer routines for
3840 dynamically loaded objects (among other things). */
3841 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3842 if (stop_on_solib_events)
3843 {
3844 /* Make sure we print "Stopped due to solib-event" in
3845 normal_stop. */
3846 stop_print_frame = 1;
3847
3848 stop_waiting (ecs);
3849 return;
3850 }
3851 }
3852
3853 /* If we are skipping through a shell, or through shared library
3854 loading that we aren't interested in, resume the program. If
3855 we're running the program normally, also resume. */
3856 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3857 {
3858 /* Loading of shared libraries might have changed breakpoint
3859 addresses. Make sure new breakpoints are inserted. */
3860 if (stop_soon == NO_STOP_QUIETLY)
3861 insert_breakpoints ();
3862 resume (GDB_SIGNAL_0);
3863 prepare_to_wait (ecs);
3864 return;
3865 }
3866
3867 /* But stop if we're attaching or setting up a remote
3868 connection. */
3869 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3870 || stop_soon == STOP_QUIETLY_REMOTE)
3871 {
3872 if (debug_infrun)
3873 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3874 stop_waiting (ecs);
3875 return;
3876 }
3877
3878 internal_error (__FILE__, __LINE__,
3879 _("unhandled stop_soon: %d"), (int) stop_soon);
3880
3881 case TARGET_WAITKIND_SPURIOUS:
3882 if (debug_infrun)
3883 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3884 if (!ptid_equal (ecs->ptid, inferior_ptid))
3885 context_switch (ecs->ptid);
3886 resume (GDB_SIGNAL_0);
3887 prepare_to_wait (ecs);
3888 return;
3889
3890 case TARGET_WAITKIND_EXITED:
3891 case TARGET_WAITKIND_SIGNALLED:
3892 if (debug_infrun)
3893 {
3894 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3895 fprintf_unfiltered (gdb_stdlog,
3896 "infrun: TARGET_WAITKIND_EXITED\n");
3897 else
3898 fprintf_unfiltered (gdb_stdlog,
3899 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3900 }
3901
3902 inferior_ptid = ecs->ptid;
3903 set_current_inferior (find_inferior_ptid (ecs->ptid));
3904 set_current_program_space (current_inferior ()->pspace);
3905 handle_vfork_child_exec_or_exit (0);
3906 target_terminal_ours (); /* Must do this before mourn anyway. */
3907
3908 /* Clearing any previous state of convenience variables. */
3909 clear_exit_convenience_vars ();
3910
3911 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3912 {
3913 /* Record the exit code in the convenience variable $_exitcode, so
3914 that the user can inspect this again later. */
3915 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3916 (LONGEST) ecs->ws.value.integer);
3917
3918 /* Also record this in the inferior itself. */
3919 current_inferior ()->has_exit_code = 1;
3920 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3921
3922 /* Support the --return-child-result option. */
3923 return_child_result_value = ecs->ws.value.integer;
3924
3925 observer_notify_exited (ecs->ws.value.integer);
3926 }
3927 else
3928 {
3929 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3930 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3931
3932 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3933 {
3934 /* Set the value of the internal variable $_exitsignal,
3935 which holds the signal uncaught by the inferior. */
3936 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3937 gdbarch_gdb_signal_to_target (gdbarch,
3938 ecs->ws.value.sig));
3939 }
3940 else
3941 {
3942 /* We don't have access to the target's method used for
3943 converting between signal numbers (GDB's internal
3944 representation <-> target's representation).
3945 Therefore, we cannot do a good job at displaying this
3946 information to the user. It's better to just warn
3947 her about it (if infrun debugging is enabled), and
3948 give up. */
3949 if (debug_infrun)
3950 fprintf_filtered (gdb_stdlog, _("\
3951 Cannot fill $_exitsignal with the correct signal number.\n"));
3952 }
3953
3954 observer_notify_signal_exited (ecs->ws.value.sig);
3955 }
3956
3957 gdb_flush (gdb_stdout);
3958 target_mourn_inferior ();
3959 stop_print_frame = 0;
3960 stop_waiting (ecs);
3961 return;
3962
3963 /* The following are the only cases in which we keep going;
3964 the above cases end in a continue or goto. */
3965 case TARGET_WAITKIND_FORKED:
3966 case TARGET_WAITKIND_VFORKED:
3967 if (debug_infrun)
3968 {
3969 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3970 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3971 else
3972 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3973 }
3974
3975 /* Check whether the inferior is displaced stepping. */
3976 {
3977 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3978 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3979 struct displaced_step_inferior_state *displaced
3980 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3981
3982 /* If checking displaced stepping is supported, and thread
3983 ecs->ptid is displaced stepping. */
3984 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3985 {
3986 struct inferior *parent_inf
3987 = find_inferior_ptid (ecs->ptid);
3988 struct regcache *child_regcache;
3989 CORE_ADDR parent_pc;
3990
3991 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3992 indicating that the displaced stepping of syscall instruction
3993 has been done. Perform cleanup for parent process here. Note
3994 that this operation also cleans up the child process for vfork,
3995 because their pages are shared. */
3996 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3997
3998 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3999 {
4000 /* Restore scratch pad for child process. */
4001 displaced_step_restore (displaced, ecs->ws.value.related_pid);
4002 }
4003
4004 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
4005 the child's PC is also within the scratchpad. Set the child's PC
4006 to the parent's PC value, which has already been fixed up.
4007 FIXME: we use the parent's aspace here, although we're touching
4008 the child, because the child hasn't been added to the inferior
4009 list yet at this point. */
4010
4011 child_regcache
4012 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
4013 gdbarch,
4014 parent_inf->aspace);
4015 /* Read PC value of parent process. */
4016 parent_pc = regcache_read_pc (regcache);
4017
4018 if (debug_displaced)
4019 fprintf_unfiltered (gdb_stdlog,
4020 "displaced: write child pc from %s to %s\n",
4021 paddress (gdbarch,
4022 regcache_read_pc (child_regcache)),
4023 paddress (gdbarch, parent_pc));
4024
4025 regcache_write_pc (child_regcache, parent_pc);
4026 }
4027 }
4028
4029 if (!ptid_equal (ecs->ptid, inferior_ptid))
4030 context_switch (ecs->ptid);
4031
4032 /* Immediately detach breakpoints from the child before there's
4033 any chance of letting the user delete breakpoints from the
4034 breakpoint lists. If we don't do this early, it's easy to
4035 leave left over traps in the child, vis: "break foo; catch
4036 fork; c; <fork>; del; c; <child calls foo>". We only follow
4037 the fork on the last `continue', and by that time the
4038 breakpoint at "foo" is long gone from the breakpoint table.
4039 If we vforked, then we don't need to unpatch here, since both
4040 parent and child are sharing the same memory pages; we'll
4041 need to unpatch at follow/detach time instead to be certain
4042 that new breakpoints added between catchpoint hit time and
4043 vfork follow are detached. */
4044 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
4045 {
4046 /* This won't actually modify the breakpoint list, but will
4047 physically remove the breakpoints from the child. */
4048 detach_breakpoints (ecs->ws.value.related_pid);
4049 }
4050
4051 delete_just_stopped_threads_single_step_breakpoints ();
4052
4053 /* In case the event is caught by a catchpoint, remember that
4054 the event is to be followed at the next resume of the thread,
4055 and not immediately. */
4056 ecs->event_thread->pending_follow = ecs->ws;
4057
4058 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4059
4060 ecs->event_thread->control.stop_bpstat
4061 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4062 stop_pc, ecs->ptid, &ecs->ws);
4063
4064 /* If no catchpoint triggered for this, then keep going. Note
4065 that we're interested in knowing the bpstat actually causes a
4066 stop, not just if it may explain the signal. Software
4067 watchpoints, for example, always appear in the bpstat. */
4068 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4069 {
4070 ptid_t parent;
4071 ptid_t child;
4072 int should_resume;
4073 int follow_child
4074 = (follow_fork_mode_string == follow_fork_mode_child);
4075
4076 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4077
4078 should_resume = follow_fork ();
4079
4080 parent = ecs->ptid;
4081 child = ecs->ws.value.related_pid;
4082
4083 /* In non-stop mode, also resume the other branch. */
4084 if (non_stop && !detach_fork)
4085 {
4086 if (follow_child)
4087 switch_to_thread (parent);
4088 else
4089 switch_to_thread (child);
4090
4091 ecs->event_thread = inferior_thread ();
4092 ecs->ptid = inferior_ptid;
4093 keep_going (ecs);
4094 }
4095
4096 if (follow_child)
4097 switch_to_thread (child);
4098 else
4099 switch_to_thread (parent);
4100
4101 ecs->event_thread = inferior_thread ();
4102 ecs->ptid = inferior_ptid;
4103
4104 if (should_resume)
4105 keep_going (ecs);
4106 else
4107 stop_waiting (ecs);
4108 return;
4109 }
4110 process_event_stop_test (ecs);
4111 return;
4112
4113 case TARGET_WAITKIND_VFORK_DONE:
4114 /* Done with the shared memory region. Re-insert breakpoints in
4115 the parent, and keep going. */
4116
4117 if (debug_infrun)
4118 fprintf_unfiltered (gdb_stdlog,
4119 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
4120
4121 if (!ptid_equal (ecs->ptid, inferior_ptid))
4122 context_switch (ecs->ptid);
4123
4124 current_inferior ()->waiting_for_vfork_done = 0;
4125 current_inferior ()->pspace->breakpoints_not_allowed = 0;
4126 /* This also takes care of reinserting breakpoints in the
4127 previously locked inferior. */
4128 keep_going (ecs);
4129 return;
4130
4131 case TARGET_WAITKIND_EXECD:
4132 if (debug_infrun)
4133 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
4134
4135 if (!ptid_equal (ecs->ptid, inferior_ptid))
4136 context_switch (ecs->ptid);
4137
4138 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4139
4140 /* Do whatever is necessary to the parent branch of the vfork. */
4141 handle_vfork_child_exec_or_exit (1);
4142
4143 /* This causes the eventpoints and symbol table to be reset.
4144 Must do this now, before trying to determine whether to
4145 stop. */
4146 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
4147
4148 ecs->event_thread->control.stop_bpstat
4149 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4150 stop_pc, ecs->ptid, &ecs->ws);
4151
4152 /* Note that this may be referenced from inside
4153 bpstat_stop_status above, through inferior_has_execd. */
4154 xfree (ecs->ws.value.execd_pathname);
4155 ecs->ws.value.execd_pathname = NULL;
4156
4157 /* If no catchpoint triggered for this, then keep going. */
4158 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4159 {
4160 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4161 keep_going (ecs);
4162 return;
4163 }
4164 process_event_stop_test (ecs);
4165 return;
4166
4167 /* Be careful not to try to gather much state about a thread
4168 that's in a syscall. It's frequently a losing proposition. */
4169 case TARGET_WAITKIND_SYSCALL_ENTRY:
4170 if (debug_infrun)
4171 fprintf_unfiltered (gdb_stdlog,
4172 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
4173 /* Getting the current syscall number. */
4174 if (handle_syscall_event (ecs) == 0)
4175 process_event_stop_test (ecs);
4176 return;
4177
4178 /* Before examining the threads further, step this thread to
4179 get it entirely out of the syscall. (We get notice of the
4180 event when the thread is just on the verge of exiting a
4181 syscall. Stepping one instruction seems to get it back
4182 into user code.) */
4183 case TARGET_WAITKIND_SYSCALL_RETURN:
4184 if (debug_infrun)
4185 fprintf_unfiltered (gdb_stdlog,
4186 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
4187 if (handle_syscall_event (ecs) == 0)
4188 process_event_stop_test (ecs);
4189 return;
4190
4191 case TARGET_WAITKIND_STOPPED:
4192 if (debug_infrun)
4193 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
4194 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
4195 handle_signal_stop (ecs);
4196 return;
4197
4198 case TARGET_WAITKIND_NO_HISTORY:
4199 if (debug_infrun)
4200 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
4201 /* Reverse execution: target ran out of history info. */
4202
4203 delete_just_stopped_threads_single_step_breakpoints ();
4204 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4205 observer_notify_no_history ();
4206 stop_waiting (ecs);
4207 return;
4208 }
4209 }
4210
4211 /* Come here when the program has stopped with a signal. */
4212
4213 static void
4214 handle_signal_stop (struct execution_control_state *ecs)
4215 {
4216 struct frame_info *frame;
4217 struct gdbarch *gdbarch;
4218 int stopped_by_watchpoint;
4219 enum stop_kind stop_soon;
4220 int random_signal;
4221
4222 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
4223
4224 /* Do we need to clean up the state of a thread that has
4225 completed a displaced single-step? (Doing so usually affects
4226 the PC, so do it here, before we set stop_pc.) */
4227 displaced_step_fixup (ecs->ptid,
4228 ecs->event_thread->suspend.stop_signal);
4229
4230 /* If we either finished a single-step or hit a breakpoint, but
4231 the user wanted this thread to be stopped, pretend we got a
4232 SIG0 (generic unsignaled stop). */
4233 if (ecs->event_thread->stop_requested
4234 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4235 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4236
4237 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4238
4239 if (debug_infrun)
4240 {
4241 struct regcache *regcache = get_thread_regcache (ecs->ptid);
4242 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4243 struct cleanup *old_chain = save_inferior_ptid ();
4244
4245 inferior_ptid = ecs->ptid;
4246
4247 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
4248 paddress (gdbarch, stop_pc));
4249 if (target_stopped_by_watchpoint ())
4250 {
4251 CORE_ADDR addr;
4252
4253 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
4254
4255 if (target_stopped_data_address (&current_target, &addr))
4256 fprintf_unfiltered (gdb_stdlog,
4257 "infrun: stopped data address = %s\n",
4258 paddress (gdbarch, addr));
4259 else
4260 fprintf_unfiltered (gdb_stdlog,
4261 "infrun: (no data address available)\n");
4262 }
4263
4264 do_cleanups (old_chain);
4265 }
4266
4267 /* This is originated from start_remote(), start_inferior() and
4268 shared libraries hook functions. */
4269 stop_soon = get_inferior_stop_soon (ecs->ptid);
4270 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4271 {
4272 if (!ptid_equal (ecs->ptid, inferior_ptid))
4273 context_switch (ecs->ptid);
4274 if (debug_infrun)
4275 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4276 stop_print_frame = 1;
4277 stop_waiting (ecs);
4278 return;
4279 }
4280
4281 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4282 && stop_after_trap)
4283 {
4284 if (!ptid_equal (ecs->ptid, inferior_ptid))
4285 context_switch (ecs->ptid);
4286 if (debug_infrun)
4287 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4288 stop_print_frame = 0;
4289 stop_waiting (ecs);
4290 return;
4291 }
4292
4293 /* This originates from attach_command(). We need to overwrite
4294 the stop_signal here, because some kernels don't ignore a
4295 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4296 See more comments in inferior.h. On the other hand, if we
4297 get a non-SIGSTOP, report it to the user - assume the backend
4298 will handle the SIGSTOP if it should show up later.
4299
4300 Also consider that the attach is complete when we see a
4301 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4302 target extended-remote report it instead of a SIGSTOP
4303 (e.g. gdbserver). We already rely on SIGTRAP being our
4304 signal, so this is no exception.
4305
4306 Also consider that the attach is complete when we see a
4307 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4308 the target to stop all threads of the inferior, in case the
4309 low level attach operation doesn't stop them implicitly. If
4310 they weren't stopped implicitly, then the stub will report a
4311 GDB_SIGNAL_0, meaning: stopped for no particular reason
4312 other than GDB's request. */
4313 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4314 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
4315 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4316 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
4317 {
4318 stop_print_frame = 1;
4319 stop_waiting (ecs);
4320 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4321 return;
4322 }
4323
4324 /* See if something interesting happened to the non-current thread. If
4325 so, then switch to that thread. */
4326 if (!ptid_equal (ecs->ptid, inferior_ptid))
4327 {
4328 if (debug_infrun)
4329 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
4330
4331 context_switch (ecs->ptid);
4332
4333 if (deprecated_context_hook)
4334 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
4335 }
4336
4337 /* At this point, get hold of the now-current thread's frame. */
4338 frame = get_current_frame ();
4339 gdbarch = get_frame_arch (frame);
4340
4341 /* Pull the single step breakpoints out of the target. */
4342 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4343 {
4344 struct regcache *regcache;
4345 struct address_space *aspace;
4346 CORE_ADDR pc;
4347
4348 regcache = get_thread_regcache (ecs->ptid);
4349 aspace = get_regcache_aspace (regcache);
4350 pc = regcache_read_pc (regcache);
4351
4352 /* However, before doing so, if this single-step breakpoint was
4353 actually for another thread, set this thread up for moving
4354 past it. */
4355 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
4356 aspace, pc))
4357 {
4358 if (single_step_breakpoint_inserted_here_p (aspace, pc))
4359 {
4360 if (debug_infrun)
4361 {
4362 fprintf_unfiltered (gdb_stdlog,
4363 "infrun: [%s] hit another thread's "
4364 "single-step breakpoint\n",
4365 target_pid_to_str (ecs->ptid));
4366 }
4367 ecs->hit_singlestep_breakpoint = 1;
4368 }
4369 }
4370 else
4371 {
4372 if (debug_infrun)
4373 {
4374 fprintf_unfiltered (gdb_stdlog,
4375 "infrun: [%s] hit its "
4376 "single-step breakpoint\n",
4377 target_pid_to_str (ecs->ptid));
4378 }
4379 }
4380 }
4381 delete_just_stopped_threads_single_step_breakpoints ();
4382
4383 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4384 && ecs->event_thread->control.trap_expected
4385 && ecs->event_thread->stepping_over_watchpoint)
4386 stopped_by_watchpoint = 0;
4387 else
4388 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4389
4390 /* If necessary, step over this watchpoint. We'll be back to display
4391 it in a moment. */
4392 if (stopped_by_watchpoint
4393 && (target_have_steppable_watchpoint
4394 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4395 {
4396 /* At this point, we are stopped at an instruction which has
4397 attempted to write to a piece of memory under control of
4398 a watchpoint. The instruction hasn't actually executed
4399 yet. If we were to evaluate the watchpoint expression
4400 now, we would get the old value, and therefore no change
4401 would seem to have occurred.
4402
4403 In order to make watchpoints work `right', we really need
4404 to complete the memory write, and then evaluate the
4405 watchpoint expression. We do this by single-stepping the
4406 target.
4407
4408 It may not be necessary to disable the watchpoint to step over
4409 it. For example, the PA can (with some kernel cooperation)
4410 single step over a watchpoint without disabling the watchpoint.
4411
4412 It is far more common to need to disable a watchpoint to step
4413 the inferior over it. If we have non-steppable watchpoints,
4414 we must disable the current watchpoint; it's simplest to
4415 disable all watchpoints.
4416
4417 Any breakpoint at PC must also be stepped over -- if there's
4418 one, it will have already triggered before the watchpoint
4419 triggered, and we either already reported it to the user, or
4420 it didn't cause a stop and we called keep_going. In either
4421 case, if there was a breakpoint at PC, we must be trying to
4422 step past it. */
4423 ecs->event_thread->stepping_over_watchpoint = 1;
4424 keep_going (ecs);
4425 return;
4426 }
4427
4428 ecs->event_thread->stepping_over_breakpoint = 0;
4429 ecs->event_thread->stepping_over_watchpoint = 0;
4430 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4431 ecs->event_thread->control.stop_step = 0;
4432 stop_print_frame = 1;
4433 stopped_by_random_signal = 0;
4434
4435 /* Hide inlined functions starting here, unless we just performed stepi or
4436 nexti. After stepi and nexti, always show the innermost frame (not any
4437 inline function call sites). */
4438 if (ecs->event_thread->control.step_range_end != 1)
4439 {
4440 struct address_space *aspace =
4441 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4442
4443 /* skip_inline_frames is expensive, so we avoid it if we can
4444 determine that the address is one where functions cannot have
4445 been inlined. This improves performance with inferiors that
4446 load a lot of shared libraries, because the solib event
4447 breakpoint is defined as the address of a function (i.e. not
4448 inline). Note that we have to check the previous PC as well
4449 as the current one to catch cases when we have just
4450 single-stepped off a breakpoint prior to reinstating it.
4451 Note that we're assuming that the code we single-step to is
4452 not inline, but that's not definitive: there's nothing
4453 preventing the event breakpoint function from containing
4454 inlined code, and the single-step ending up there. If the
4455 user had set a breakpoint on that inlined code, the missing
4456 skip_inline_frames call would break things. Fortunately
4457 that's an extremely unlikely scenario. */
4458 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4459 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4460 && ecs->event_thread->control.trap_expected
4461 && pc_at_non_inline_function (aspace,
4462 ecs->event_thread->prev_pc,
4463 &ecs->ws)))
4464 {
4465 skip_inline_frames (ecs->ptid);
4466
4467 /* Re-fetch current thread's frame in case that invalidated
4468 the frame cache. */
4469 frame = get_current_frame ();
4470 gdbarch = get_frame_arch (frame);
4471 }
4472 }
4473
4474 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4475 && ecs->event_thread->control.trap_expected
4476 && gdbarch_single_step_through_delay_p (gdbarch)
4477 && currently_stepping (ecs->event_thread))
4478 {
4479 /* We're trying to step off a breakpoint. Turns out that we're
4480 also on an instruction that needs to be stepped multiple
4481 times before it's been fully executing. E.g., architectures
4482 with a delay slot. It needs to be stepped twice, once for
4483 the instruction and once for the delay slot. */
4484 int step_through_delay
4485 = gdbarch_single_step_through_delay (gdbarch, frame);
4486
4487 if (debug_infrun && step_through_delay)
4488 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4489 if (ecs->event_thread->control.step_range_end == 0
4490 && step_through_delay)
4491 {
4492 /* The user issued a continue when stopped at a breakpoint.
4493 Set up for another trap and get out of here. */
4494 ecs->event_thread->stepping_over_breakpoint = 1;
4495 keep_going (ecs);
4496 return;
4497 }
4498 else if (step_through_delay)
4499 {
4500 /* The user issued a step when stopped at a breakpoint.
4501 Maybe we should stop, maybe we should not - the delay
4502 slot *might* correspond to a line of source. In any
4503 case, don't decide that here, just set
4504 ecs->stepping_over_breakpoint, making sure we
4505 single-step again before breakpoints are re-inserted. */
4506 ecs->event_thread->stepping_over_breakpoint = 1;
4507 }
4508 }
4509
4510 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4511 handles this event. */
4512 ecs->event_thread->control.stop_bpstat
4513 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4514 stop_pc, ecs->ptid, &ecs->ws);
4515
4516 /* Following in case break condition called a
4517 function. */
4518 stop_print_frame = 1;
4519
4520 /* This is where we handle "moribund" watchpoints. Unlike
4521 software breakpoints traps, hardware watchpoint traps are
4522 always distinguishable from random traps. If no high-level
4523 watchpoint is associated with the reported stop data address
4524 anymore, then the bpstat does not explain the signal ---
4525 simply make sure to ignore it if `stopped_by_watchpoint' is
4526 set. */
4527
4528 if (debug_infrun
4529 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4530 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4531 GDB_SIGNAL_TRAP)
4532 && stopped_by_watchpoint)
4533 fprintf_unfiltered (gdb_stdlog,
4534 "infrun: no user watchpoint explains "
4535 "watchpoint SIGTRAP, ignoring\n");
4536
4537 /* NOTE: cagney/2003-03-29: These checks for a random signal
4538 at one stage in the past included checks for an inferior
4539 function call's call dummy's return breakpoint. The original
4540 comment, that went with the test, read:
4541
4542 ``End of a stack dummy. Some systems (e.g. Sony news) give
4543 another signal besides SIGTRAP, so check here as well as
4544 above.''
4545
4546 If someone ever tries to get call dummys on a
4547 non-executable stack to work (where the target would stop
4548 with something like a SIGSEGV), then those tests might need
4549 to be re-instated. Given, however, that the tests were only
4550 enabled when momentary breakpoints were not being used, I
4551 suspect that it won't be the case.
4552
4553 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4554 be necessary for call dummies on a non-executable stack on
4555 SPARC. */
4556
4557 /* See if the breakpoints module can explain the signal. */
4558 random_signal
4559 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4560 ecs->event_thread->suspend.stop_signal);
4561
4562 /* Maybe this was a trap for a software breakpoint that has since
4563 been removed. */
4564 if (random_signal && target_stopped_by_sw_breakpoint ())
4565 {
4566 if (program_breakpoint_here_p (gdbarch, stop_pc))
4567 {
4568 struct regcache *regcache;
4569 int decr_pc;
4570
4571 /* Re-adjust PC to what the program would see if GDB was not
4572 debugging it. */
4573 regcache = get_thread_regcache (ecs->event_thread->ptid);
4574 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4575 if (decr_pc != 0)
4576 {
4577 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
4578
4579 if (record_full_is_used ())
4580 record_full_gdb_operation_disable_set ();
4581
4582 regcache_write_pc (regcache, stop_pc + decr_pc);
4583
4584 do_cleanups (old_cleanups);
4585 }
4586 }
4587 else
4588 {
4589 /* A delayed software breakpoint event. Ignore the trap. */
4590 if (debug_infrun)
4591 fprintf_unfiltered (gdb_stdlog,
4592 "infrun: delayed software breakpoint "
4593 "trap, ignoring\n");
4594 random_signal = 0;
4595 }
4596 }
4597
4598 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
4599 has since been removed. */
4600 if (random_signal && target_stopped_by_hw_breakpoint ())
4601 {
4602 /* A delayed hardware breakpoint event. Ignore the trap. */
4603 if (debug_infrun)
4604 fprintf_unfiltered (gdb_stdlog,
4605 "infrun: delayed hardware breakpoint/watchpoint "
4606 "trap, ignoring\n");
4607 random_signal = 0;
4608 }
4609
4610 /* If not, perhaps stepping/nexting can. */
4611 if (random_signal)
4612 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4613 && currently_stepping (ecs->event_thread));
4614
4615 /* Perhaps the thread hit a single-step breakpoint of _another_
4616 thread. Single-step breakpoints are transparent to the
4617 breakpoints module. */
4618 if (random_signal)
4619 random_signal = !ecs->hit_singlestep_breakpoint;
4620
4621 /* No? Perhaps we got a moribund watchpoint. */
4622 if (random_signal)
4623 random_signal = !stopped_by_watchpoint;
4624
4625 /* For the program's own signals, act according to
4626 the signal handling tables. */
4627
4628 if (random_signal)
4629 {
4630 /* Signal not for debugging purposes. */
4631 struct inferior *inf = find_inferior_ptid (ecs->ptid);
4632 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4633
4634 if (debug_infrun)
4635 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4636 gdb_signal_to_symbol_string (stop_signal));
4637
4638 stopped_by_random_signal = 1;
4639
4640 /* Always stop on signals if we're either just gaining control
4641 of the program, or the user explicitly requested this thread
4642 to remain stopped. */
4643 if (stop_soon != NO_STOP_QUIETLY
4644 || ecs->event_thread->stop_requested
4645 || (!inf->detaching
4646 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4647 {
4648 stop_waiting (ecs);
4649 return;
4650 }
4651
4652 /* Notify observers the signal has "handle print" set. Note we
4653 returned early above if stopping; normal_stop handles the
4654 printing in that case. */
4655 if (signal_print[ecs->event_thread->suspend.stop_signal])
4656 {
4657 /* The signal table tells us to print about this signal. */
4658 target_terminal_ours_for_output ();
4659 observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
4660 target_terminal_inferior ();
4661 }
4662
4663 /* Clear the signal if it should not be passed. */
4664 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4665 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4666
4667 if (ecs->event_thread->prev_pc == stop_pc
4668 && ecs->event_thread->control.trap_expected
4669 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4670 {
4671 /* We were just starting a new sequence, attempting to
4672 single-step off of a breakpoint and expecting a SIGTRAP.
4673 Instead this signal arrives. This signal will take us out
4674 of the stepping range so GDB needs to remember to, when
4675 the signal handler returns, resume stepping off that
4676 breakpoint. */
4677 /* To simplify things, "continue" is forced to use the same
4678 code paths as single-step - set a breakpoint at the
4679 signal return address and then, once hit, step off that
4680 breakpoint. */
4681 if (debug_infrun)
4682 fprintf_unfiltered (gdb_stdlog,
4683 "infrun: signal arrived while stepping over "
4684 "breakpoint\n");
4685
4686 insert_hp_step_resume_breakpoint_at_frame (frame);
4687 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4688 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4689 ecs->event_thread->control.trap_expected = 0;
4690
4691 /* If we were nexting/stepping some other thread, switch to
4692 it, so that we don't continue it, losing control. */
4693 if (!switch_back_to_stepped_thread (ecs))
4694 keep_going (ecs);
4695 return;
4696 }
4697
4698 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4699 && (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4700 || ecs->event_thread->control.step_range_end == 1)
4701 && frame_id_eq (get_stack_frame_id (frame),
4702 ecs->event_thread->control.step_stack_frame_id)
4703 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4704 {
4705 /* The inferior is about to take a signal that will take it
4706 out of the single step range. Set a breakpoint at the
4707 current PC (which is presumably where the signal handler
4708 will eventually return) and then allow the inferior to
4709 run free.
4710
4711 Note that this is only needed for a signal delivered
4712 while in the single-step range. Nested signals aren't a
4713 problem as they eventually all return. */
4714 if (debug_infrun)
4715 fprintf_unfiltered (gdb_stdlog,
4716 "infrun: signal may take us out of "
4717 "single-step range\n");
4718
4719 insert_hp_step_resume_breakpoint_at_frame (frame);
4720 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4721 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4722 ecs->event_thread->control.trap_expected = 0;
4723 keep_going (ecs);
4724 return;
4725 }
4726
4727 /* Note: step_resume_breakpoint may be non-NULL. This occures
4728 when either there's a nested signal, or when there's a
4729 pending signal enabled just as the signal handler returns
4730 (leaving the inferior at the step-resume-breakpoint without
4731 actually executing it). Either way continue until the
4732 breakpoint is really hit. */
4733
4734 if (!switch_back_to_stepped_thread (ecs))
4735 {
4736 if (debug_infrun)
4737 fprintf_unfiltered (gdb_stdlog,
4738 "infrun: random signal, keep going\n");
4739
4740 keep_going (ecs);
4741 }
4742 return;
4743 }
4744
4745 process_event_stop_test (ecs);
4746 }
4747
4748 /* Come here when we've got some debug event / signal we can explain
4749 (IOW, not a random signal), and test whether it should cause a
4750 stop, or whether we should resume the inferior (transparently).
4751 E.g., could be a breakpoint whose condition evaluates false; we
4752 could be still stepping within the line; etc. */
4753
4754 static void
4755 process_event_stop_test (struct execution_control_state *ecs)
4756 {
4757 struct symtab_and_line stop_pc_sal;
4758 struct frame_info *frame;
4759 struct gdbarch *gdbarch;
4760 CORE_ADDR jmp_buf_pc;
4761 struct bpstat_what what;
4762
4763 /* Handle cases caused by hitting a breakpoint. */
4764
4765 frame = get_current_frame ();
4766 gdbarch = get_frame_arch (frame);
4767
4768 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4769
4770 if (what.call_dummy)
4771 {
4772 stop_stack_dummy = what.call_dummy;
4773 }
4774
4775 /* If we hit an internal event that triggers symbol changes, the
4776 current frame will be invalidated within bpstat_what (e.g., if we
4777 hit an internal solib event). Re-fetch it. */
4778 frame = get_current_frame ();
4779 gdbarch = get_frame_arch (frame);
4780
4781 switch (what.main_action)
4782 {
4783 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4784 /* If we hit the breakpoint at longjmp while stepping, we
4785 install a momentary breakpoint at the target of the
4786 jmp_buf. */
4787
4788 if (debug_infrun)
4789 fprintf_unfiltered (gdb_stdlog,
4790 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4791
4792 ecs->event_thread->stepping_over_breakpoint = 1;
4793
4794 if (what.is_longjmp)
4795 {
4796 struct value *arg_value;
4797
4798 /* If we set the longjmp breakpoint via a SystemTap probe,
4799 then use it to extract the arguments. The destination PC
4800 is the third argument to the probe. */
4801 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4802 if (arg_value)
4803 {
4804 jmp_buf_pc = value_as_address (arg_value);
4805 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
4806 }
4807 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4808 || !gdbarch_get_longjmp_target (gdbarch,
4809 frame, &jmp_buf_pc))
4810 {
4811 if (debug_infrun)
4812 fprintf_unfiltered (gdb_stdlog,
4813 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4814 "(!gdbarch_get_longjmp_target)\n");
4815 keep_going (ecs);
4816 return;
4817 }
4818
4819 /* Insert a breakpoint at resume address. */
4820 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4821 }
4822 else
4823 check_exception_resume (ecs, frame);
4824 keep_going (ecs);
4825 return;
4826
4827 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4828 {
4829 struct frame_info *init_frame;
4830
4831 /* There are several cases to consider.
4832
4833 1. The initiating frame no longer exists. In this case we
4834 must stop, because the exception or longjmp has gone too
4835 far.
4836
4837 2. The initiating frame exists, and is the same as the
4838 current frame. We stop, because the exception or longjmp
4839 has been caught.
4840
4841 3. The initiating frame exists and is different from the
4842 current frame. This means the exception or longjmp has
4843 been caught beneath the initiating frame, so keep going.
4844
4845 4. longjmp breakpoint has been placed just to protect
4846 against stale dummy frames and user is not interested in
4847 stopping around longjmps. */
4848
4849 if (debug_infrun)
4850 fprintf_unfiltered (gdb_stdlog,
4851 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4852
4853 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4854 != NULL);
4855 delete_exception_resume_breakpoint (ecs->event_thread);
4856
4857 if (what.is_longjmp)
4858 {
4859 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
4860
4861 if (!frame_id_p (ecs->event_thread->initiating_frame))
4862 {
4863 /* Case 4. */
4864 keep_going (ecs);
4865 return;
4866 }
4867 }
4868
4869 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4870
4871 if (init_frame)
4872 {
4873 struct frame_id current_id
4874 = get_frame_id (get_current_frame ());
4875 if (frame_id_eq (current_id,
4876 ecs->event_thread->initiating_frame))
4877 {
4878 /* Case 2. Fall through. */
4879 }
4880 else
4881 {
4882 /* Case 3. */
4883 keep_going (ecs);
4884 return;
4885 }
4886 }
4887
4888 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4889 exists. */
4890 delete_step_resume_breakpoint (ecs->event_thread);
4891
4892 end_stepping_range (ecs);
4893 }
4894 return;
4895
4896 case BPSTAT_WHAT_SINGLE:
4897 if (debug_infrun)
4898 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4899 ecs->event_thread->stepping_over_breakpoint = 1;
4900 /* Still need to check other stuff, at least the case where we
4901 are stepping and step out of the right range. */
4902 break;
4903
4904 case BPSTAT_WHAT_STEP_RESUME:
4905 if (debug_infrun)
4906 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4907
4908 delete_step_resume_breakpoint (ecs->event_thread);
4909 if (ecs->event_thread->control.proceed_to_finish
4910 && execution_direction == EXEC_REVERSE)
4911 {
4912 struct thread_info *tp = ecs->event_thread;
4913
4914 /* We are finishing a function in reverse, and just hit the
4915 step-resume breakpoint at the start address of the
4916 function, and we're almost there -- just need to back up
4917 by one more single-step, which should take us back to the
4918 function call. */
4919 tp->control.step_range_start = tp->control.step_range_end = 1;
4920 keep_going (ecs);
4921 return;
4922 }
4923 fill_in_stop_func (gdbarch, ecs);
4924 if (stop_pc == ecs->stop_func_start
4925 && execution_direction == EXEC_REVERSE)
4926 {
4927 /* We are stepping over a function call in reverse, and just
4928 hit the step-resume breakpoint at the start address of
4929 the function. Go back to single-stepping, which should
4930 take us back to the function call. */
4931 ecs->event_thread->stepping_over_breakpoint = 1;
4932 keep_going (ecs);
4933 return;
4934 }
4935 break;
4936
4937 case BPSTAT_WHAT_STOP_NOISY:
4938 if (debug_infrun)
4939 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4940 stop_print_frame = 1;
4941
4942 /* Assume the thread stopped for a breapoint. We'll still check
4943 whether a/the breakpoint is there when the thread is next
4944 resumed. */
4945 ecs->event_thread->stepping_over_breakpoint = 1;
4946
4947 stop_waiting (ecs);
4948 return;
4949
4950 case BPSTAT_WHAT_STOP_SILENT:
4951 if (debug_infrun)
4952 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4953 stop_print_frame = 0;
4954
4955 /* Assume the thread stopped for a breapoint. We'll still check
4956 whether a/the breakpoint is there when the thread is next
4957 resumed. */
4958 ecs->event_thread->stepping_over_breakpoint = 1;
4959 stop_waiting (ecs);
4960 return;
4961
4962 case BPSTAT_WHAT_HP_STEP_RESUME:
4963 if (debug_infrun)
4964 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4965
4966 delete_step_resume_breakpoint (ecs->event_thread);
4967 if (ecs->event_thread->step_after_step_resume_breakpoint)
4968 {
4969 /* Back when the step-resume breakpoint was inserted, we
4970 were trying to single-step off a breakpoint. Go back to
4971 doing that. */
4972 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4973 ecs->event_thread->stepping_over_breakpoint = 1;
4974 keep_going (ecs);
4975 return;
4976 }
4977 break;
4978
4979 case BPSTAT_WHAT_KEEP_CHECKING:
4980 break;
4981 }
4982
4983 /* If we stepped a permanent breakpoint and we had a high priority
4984 step-resume breakpoint for the address we stepped, but we didn't
4985 hit it, then we must have stepped into the signal handler. The
4986 step-resume was only necessary to catch the case of _not_
4987 stepping into the handler, so delete it, and fall through to
4988 checking whether the step finished. */
4989 if (ecs->event_thread->stepped_breakpoint)
4990 {
4991 struct breakpoint *sr_bp
4992 = ecs->event_thread->control.step_resume_breakpoint;
4993
4994 if (sr_bp != NULL
4995 && sr_bp->loc->permanent
4996 && sr_bp->type == bp_hp_step_resume
4997 && sr_bp->loc->address == ecs->event_thread->prev_pc)
4998 {
4999 if (debug_infrun)
5000 fprintf_unfiltered (gdb_stdlog,
5001 "infrun: stepped permanent breakpoint, stopped in "
5002 "handler\n");
5003 delete_step_resume_breakpoint (ecs->event_thread);
5004 ecs->event_thread->step_after_step_resume_breakpoint = 0;
5005 }
5006 }
5007
5008 /* We come here if we hit a breakpoint but should not stop for it.
5009 Possibly we also were stepping and should stop for that. So fall
5010 through and test for stepping. But, if not stepping, do not
5011 stop. */
5012
5013 /* In all-stop mode, if we're currently stepping but have stopped in
5014 some other thread, we need to switch back to the stepped thread. */
5015 if (switch_back_to_stepped_thread (ecs))
5016 return;
5017
5018 if (ecs->event_thread->control.step_resume_breakpoint)
5019 {
5020 if (debug_infrun)
5021 fprintf_unfiltered (gdb_stdlog,
5022 "infrun: step-resume breakpoint is inserted\n");
5023
5024 /* Having a step-resume breakpoint overrides anything
5025 else having to do with stepping commands until
5026 that breakpoint is reached. */
5027 keep_going (ecs);
5028 return;
5029 }
5030
5031 if (ecs->event_thread->control.step_range_end == 0)
5032 {
5033 if (debug_infrun)
5034 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
5035 /* Likewise if we aren't even stepping. */
5036 keep_going (ecs);
5037 return;
5038 }
5039
5040 /* Re-fetch current thread's frame in case the code above caused
5041 the frame cache to be re-initialized, making our FRAME variable
5042 a dangling pointer. */
5043 frame = get_current_frame ();
5044 gdbarch = get_frame_arch (frame);
5045 fill_in_stop_func (gdbarch, ecs);
5046
5047 /* If stepping through a line, keep going if still within it.
5048
5049 Note that step_range_end is the address of the first instruction
5050 beyond the step range, and NOT the address of the last instruction
5051 within it!
5052
5053 Note also that during reverse execution, we may be stepping
5054 through a function epilogue and therefore must detect when
5055 the current-frame changes in the middle of a line. */
5056
5057 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
5058 && (execution_direction != EXEC_REVERSE
5059 || frame_id_eq (get_frame_id (frame),
5060 ecs->event_thread->control.step_frame_id)))
5061 {
5062 if (debug_infrun)
5063 fprintf_unfiltered
5064 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
5065 paddress (gdbarch, ecs->event_thread->control.step_range_start),
5066 paddress (gdbarch, ecs->event_thread->control.step_range_end));
5067
5068 /* Tentatively re-enable range stepping; `resume' disables it if
5069 necessary (e.g., if we're stepping over a breakpoint or we
5070 have software watchpoints). */
5071 ecs->event_thread->control.may_range_step = 1;
5072
5073 /* When stepping backward, stop at beginning of line range
5074 (unless it's the function entry point, in which case
5075 keep going back to the call point). */
5076 if (stop_pc == ecs->event_thread->control.step_range_start
5077 && stop_pc != ecs->stop_func_start
5078 && execution_direction == EXEC_REVERSE)
5079 end_stepping_range (ecs);
5080 else
5081 keep_going (ecs);
5082
5083 return;
5084 }
5085
5086 /* We stepped out of the stepping range. */
5087
5088 /* If we are stepping at the source level and entered the runtime
5089 loader dynamic symbol resolution code...
5090
5091 EXEC_FORWARD: we keep on single stepping until we exit the run
5092 time loader code and reach the callee's address.
5093
5094 EXEC_REVERSE: we've already executed the callee (backward), and
5095 the runtime loader code is handled just like any other
5096 undebuggable function call. Now we need only keep stepping
5097 backward through the trampoline code, and that's handled further
5098 down, so there is nothing for us to do here. */
5099
5100 if (execution_direction != EXEC_REVERSE
5101 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5102 && in_solib_dynsym_resolve_code (stop_pc))
5103 {
5104 CORE_ADDR pc_after_resolver =
5105 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
5106
5107 if (debug_infrun)
5108 fprintf_unfiltered (gdb_stdlog,
5109 "infrun: stepped into dynsym resolve code\n");
5110
5111 if (pc_after_resolver)
5112 {
5113 /* Set up a step-resume breakpoint at the address
5114 indicated by SKIP_SOLIB_RESOLVER. */
5115 struct symtab_and_line sr_sal;
5116
5117 init_sal (&sr_sal);
5118 sr_sal.pc = pc_after_resolver;
5119 sr_sal.pspace = get_frame_program_space (frame);
5120
5121 insert_step_resume_breakpoint_at_sal (gdbarch,
5122 sr_sal, null_frame_id);
5123 }
5124
5125 keep_going (ecs);
5126 return;
5127 }
5128
5129 if (ecs->event_thread->control.step_range_end != 1
5130 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5131 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5132 && get_frame_type (frame) == SIGTRAMP_FRAME)
5133 {
5134 if (debug_infrun)
5135 fprintf_unfiltered (gdb_stdlog,
5136 "infrun: stepped into signal trampoline\n");
5137 /* The inferior, while doing a "step" or "next", has ended up in
5138 a signal trampoline (either by a signal being delivered or by
5139 the signal handler returning). Just single-step until the
5140 inferior leaves the trampoline (either by calling the handler
5141 or returning). */
5142 keep_going (ecs);
5143 return;
5144 }
5145
5146 /* If we're in the return path from a shared library trampoline,
5147 we want to proceed through the trampoline when stepping. */
5148 /* macro/2012-04-25: This needs to come before the subroutine
5149 call check below as on some targets return trampolines look
5150 like subroutine calls (MIPS16 return thunks). */
5151 if (gdbarch_in_solib_return_trampoline (gdbarch,
5152 stop_pc, ecs->stop_func_name)
5153 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5154 {
5155 /* Determine where this trampoline returns. */
5156 CORE_ADDR real_stop_pc;
5157
5158 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5159
5160 if (debug_infrun)
5161 fprintf_unfiltered (gdb_stdlog,
5162 "infrun: stepped into solib return tramp\n");
5163
5164 /* Only proceed through if we know where it's going. */
5165 if (real_stop_pc)
5166 {
5167 /* And put the step-breakpoint there and go until there. */
5168 struct symtab_and_line sr_sal;
5169
5170 init_sal (&sr_sal); /* initialize to zeroes */
5171 sr_sal.pc = real_stop_pc;
5172 sr_sal.section = find_pc_overlay (sr_sal.pc);
5173 sr_sal.pspace = get_frame_program_space (frame);
5174
5175 /* Do not specify what the fp should be when we stop since
5176 on some machines the prologue is where the new fp value
5177 is established. */
5178 insert_step_resume_breakpoint_at_sal (gdbarch,
5179 sr_sal, null_frame_id);
5180
5181 /* Restart without fiddling with the step ranges or
5182 other state. */
5183 keep_going (ecs);
5184 return;
5185 }
5186 }
5187
5188 /* Check for subroutine calls. The check for the current frame
5189 equalling the step ID is not necessary - the check of the
5190 previous frame's ID is sufficient - but it is a common case and
5191 cheaper than checking the previous frame's ID.
5192
5193 NOTE: frame_id_eq will never report two invalid frame IDs as
5194 being equal, so to get into this block, both the current and
5195 previous frame must have valid frame IDs. */
5196 /* The outer_frame_id check is a heuristic to detect stepping
5197 through startup code. If we step over an instruction which
5198 sets the stack pointer from an invalid value to a valid value,
5199 we may detect that as a subroutine call from the mythical
5200 "outermost" function. This could be fixed by marking
5201 outermost frames as !stack_p,code_p,special_p. Then the
5202 initial outermost frame, before sp was valid, would
5203 have code_addr == &_start. See the comment in frame_id_eq
5204 for more. */
5205 if (!frame_id_eq (get_stack_frame_id (frame),
5206 ecs->event_thread->control.step_stack_frame_id)
5207 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
5208 ecs->event_thread->control.step_stack_frame_id)
5209 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
5210 outer_frame_id)
5211 || (ecs->event_thread->control.step_start_function
5212 != find_pc_function (stop_pc)))))
5213 {
5214 CORE_ADDR real_stop_pc;
5215
5216 if (debug_infrun)
5217 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
5218
5219 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
5220 {
5221 /* I presume that step_over_calls is only 0 when we're
5222 supposed to be stepping at the assembly language level
5223 ("stepi"). Just stop. */
5224 /* And this works the same backward as frontward. MVS */
5225 end_stepping_range (ecs);
5226 return;
5227 }
5228
5229 /* Reverse stepping through solib trampolines. */
5230
5231 if (execution_direction == EXEC_REVERSE
5232 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
5233 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5234 || (ecs->stop_func_start == 0
5235 && in_solib_dynsym_resolve_code (stop_pc))))
5236 {
5237 /* Any solib trampoline code can be handled in reverse
5238 by simply continuing to single-step. We have already
5239 executed the solib function (backwards), and a few
5240 steps will take us back through the trampoline to the
5241 caller. */
5242 keep_going (ecs);
5243 return;
5244 }
5245
5246 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5247 {
5248 /* We're doing a "next".
5249
5250 Normal (forward) execution: set a breakpoint at the
5251 callee's return address (the address at which the caller
5252 will resume).
5253
5254 Reverse (backward) execution. set the step-resume
5255 breakpoint at the start of the function that we just
5256 stepped into (backwards), and continue to there. When we
5257 get there, we'll need to single-step back to the caller. */
5258
5259 if (execution_direction == EXEC_REVERSE)
5260 {
5261 /* If we're already at the start of the function, we've either
5262 just stepped backward into a single instruction function,
5263 or stepped back out of a signal handler to the first instruction
5264 of the function. Just keep going, which will single-step back
5265 to the caller. */
5266 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
5267 {
5268 struct symtab_and_line sr_sal;
5269
5270 /* Normal function call return (static or dynamic). */
5271 init_sal (&sr_sal);
5272 sr_sal.pc = ecs->stop_func_start;
5273 sr_sal.pspace = get_frame_program_space (frame);
5274 insert_step_resume_breakpoint_at_sal (gdbarch,
5275 sr_sal, null_frame_id);
5276 }
5277 }
5278 else
5279 insert_step_resume_breakpoint_at_caller (frame);
5280
5281 keep_going (ecs);
5282 return;
5283 }
5284
5285 /* If we are in a function call trampoline (a stub between the
5286 calling routine and the real function), locate the real
5287 function. That's what tells us (a) whether we want to step
5288 into it at all, and (b) what prologue we want to run to the
5289 end of, if we do step into it. */
5290 real_stop_pc = skip_language_trampoline (frame, stop_pc);
5291 if (real_stop_pc == 0)
5292 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5293 if (real_stop_pc != 0)
5294 ecs->stop_func_start = real_stop_pc;
5295
5296 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
5297 {
5298 struct symtab_and_line sr_sal;
5299
5300 init_sal (&sr_sal);
5301 sr_sal.pc = ecs->stop_func_start;
5302 sr_sal.pspace = get_frame_program_space (frame);
5303
5304 insert_step_resume_breakpoint_at_sal (gdbarch,
5305 sr_sal, null_frame_id);
5306 keep_going (ecs);
5307 return;
5308 }
5309
5310 /* If we have line number information for the function we are
5311 thinking of stepping into and the function isn't on the skip
5312 list, step into it.
5313
5314 If there are several symtabs at that PC (e.g. with include
5315 files), just want to know whether *any* of them have line
5316 numbers. find_pc_line handles this. */
5317 {
5318 struct symtab_and_line tmp_sal;
5319
5320 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
5321 if (tmp_sal.line != 0
5322 && !function_name_is_marked_for_skip (ecs->stop_func_name,
5323 &tmp_sal))
5324 {
5325 if (execution_direction == EXEC_REVERSE)
5326 handle_step_into_function_backward (gdbarch, ecs);
5327 else
5328 handle_step_into_function (gdbarch, ecs);
5329 return;
5330 }
5331 }
5332
5333 /* If we have no line number and the step-stop-if-no-debug is
5334 set, we stop the step so that the user has a chance to switch
5335 in assembly mode. */
5336 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5337 && step_stop_if_no_debug)
5338 {
5339 end_stepping_range (ecs);
5340 return;
5341 }
5342
5343 if (execution_direction == EXEC_REVERSE)
5344 {
5345 /* If we're already at the start of the function, we've either just
5346 stepped backward into a single instruction function without line
5347 number info, or stepped back out of a signal handler to the first
5348 instruction of the function without line number info. Just keep
5349 going, which will single-step back to the caller. */
5350 if (ecs->stop_func_start != stop_pc)
5351 {
5352 /* Set a breakpoint at callee's start address.
5353 From there we can step once and be back in the caller. */
5354 struct symtab_and_line sr_sal;
5355
5356 init_sal (&sr_sal);
5357 sr_sal.pc = ecs->stop_func_start;
5358 sr_sal.pspace = get_frame_program_space (frame);
5359 insert_step_resume_breakpoint_at_sal (gdbarch,
5360 sr_sal, null_frame_id);
5361 }
5362 }
5363 else
5364 /* Set a breakpoint at callee's return address (the address
5365 at which the caller will resume). */
5366 insert_step_resume_breakpoint_at_caller (frame);
5367
5368 keep_going (ecs);
5369 return;
5370 }
5371
5372 /* Reverse stepping through solib trampolines. */
5373
5374 if (execution_direction == EXEC_REVERSE
5375 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5376 {
5377 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5378 || (ecs->stop_func_start == 0
5379 && in_solib_dynsym_resolve_code (stop_pc)))
5380 {
5381 /* Any solib trampoline code can be handled in reverse
5382 by simply continuing to single-step. We have already
5383 executed the solib function (backwards), and a few
5384 steps will take us back through the trampoline to the
5385 caller. */
5386 keep_going (ecs);
5387 return;
5388 }
5389 else if (in_solib_dynsym_resolve_code (stop_pc))
5390 {
5391 /* Stepped backward into the solib dynsym resolver.
5392 Set a breakpoint at its start and continue, then
5393 one more step will take us out. */
5394 struct symtab_and_line sr_sal;
5395
5396 init_sal (&sr_sal);
5397 sr_sal.pc = ecs->stop_func_start;
5398 sr_sal.pspace = get_frame_program_space (frame);
5399 insert_step_resume_breakpoint_at_sal (gdbarch,
5400 sr_sal, null_frame_id);
5401 keep_going (ecs);
5402 return;
5403 }
5404 }
5405
5406 stop_pc_sal = find_pc_line (stop_pc, 0);
5407
5408 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5409 the trampoline processing logic, however, there are some trampolines
5410 that have no names, so we should do trampoline handling first. */
5411 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5412 && ecs->stop_func_name == NULL
5413 && stop_pc_sal.line == 0)
5414 {
5415 if (debug_infrun)
5416 fprintf_unfiltered (gdb_stdlog,
5417 "infrun: stepped into undebuggable function\n");
5418
5419 /* The inferior just stepped into, or returned to, an
5420 undebuggable function (where there is no debugging information
5421 and no line number corresponding to the address where the
5422 inferior stopped). Since we want to skip this kind of code,
5423 we keep going until the inferior returns from this
5424 function - unless the user has asked us not to (via
5425 set step-mode) or we no longer know how to get back
5426 to the call site. */
5427 if (step_stop_if_no_debug
5428 || !frame_id_p (frame_unwind_caller_id (frame)))
5429 {
5430 /* If we have no line number and the step-stop-if-no-debug
5431 is set, we stop the step so that the user has a chance to
5432 switch in assembly mode. */
5433 end_stepping_range (ecs);
5434 return;
5435 }
5436 else
5437 {
5438 /* Set a breakpoint at callee's return address (the address
5439 at which the caller will resume). */
5440 insert_step_resume_breakpoint_at_caller (frame);
5441 keep_going (ecs);
5442 return;
5443 }
5444 }
5445
5446 if (ecs->event_thread->control.step_range_end == 1)
5447 {
5448 /* It is stepi or nexti. We always want to stop stepping after
5449 one instruction. */
5450 if (debug_infrun)
5451 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5452 end_stepping_range (ecs);
5453 return;
5454 }
5455
5456 if (stop_pc_sal.line == 0)
5457 {
5458 /* We have no line number information. That means to stop
5459 stepping (does this always happen right after one instruction,
5460 when we do "s" in a function with no line numbers,
5461 or can this happen as a result of a return or longjmp?). */
5462 if (debug_infrun)
5463 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5464 end_stepping_range (ecs);
5465 return;
5466 }
5467
5468 /* Look for "calls" to inlined functions, part one. If the inline
5469 frame machinery detected some skipped call sites, we have entered
5470 a new inline function. */
5471
5472 if (frame_id_eq (get_frame_id (get_current_frame ()),
5473 ecs->event_thread->control.step_frame_id)
5474 && inline_skipped_frames (ecs->ptid))
5475 {
5476 struct symtab_and_line call_sal;
5477
5478 if (debug_infrun)
5479 fprintf_unfiltered (gdb_stdlog,
5480 "infrun: stepped into inlined function\n");
5481
5482 find_frame_sal (get_current_frame (), &call_sal);
5483
5484 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5485 {
5486 /* For "step", we're going to stop. But if the call site
5487 for this inlined function is on the same source line as
5488 we were previously stepping, go down into the function
5489 first. Otherwise stop at the call site. */
5490
5491 if (call_sal.line == ecs->event_thread->current_line
5492 && call_sal.symtab == ecs->event_thread->current_symtab)
5493 step_into_inline_frame (ecs->ptid);
5494
5495 end_stepping_range (ecs);
5496 return;
5497 }
5498 else
5499 {
5500 /* For "next", we should stop at the call site if it is on a
5501 different source line. Otherwise continue through the
5502 inlined function. */
5503 if (call_sal.line == ecs->event_thread->current_line
5504 && call_sal.symtab == ecs->event_thread->current_symtab)
5505 keep_going (ecs);
5506 else
5507 end_stepping_range (ecs);
5508 return;
5509 }
5510 }
5511
5512 /* Look for "calls" to inlined functions, part two. If we are still
5513 in the same real function we were stepping through, but we have
5514 to go further up to find the exact frame ID, we are stepping
5515 through a more inlined call beyond its call site. */
5516
5517 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5518 && !frame_id_eq (get_frame_id (get_current_frame ()),
5519 ecs->event_thread->control.step_frame_id)
5520 && stepped_in_from (get_current_frame (),
5521 ecs->event_thread->control.step_frame_id))
5522 {
5523 if (debug_infrun)
5524 fprintf_unfiltered (gdb_stdlog,
5525 "infrun: stepping through inlined function\n");
5526
5527 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5528 keep_going (ecs);
5529 else
5530 end_stepping_range (ecs);
5531 return;
5532 }
5533
5534 if ((stop_pc == stop_pc_sal.pc)
5535 && (ecs->event_thread->current_line != stop_pc_sal.line
5536 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5537 {
5538 /* We are at the start of a different line. So stop. Note that
5539 we don't stop if we step into the middle of a different line.
5540 That is said to make things like for (;;) statements work
5541 better. */
5542 if (debug_infrun)
5543 fprintf_unfiltered (gdb_stdlog,
5544 "infrun: stepped to a different line\n");
5545 end_stepping_range (ecs);
5546 return;
5547 }
5548
5549 /* We aren't done stepping.
5550
5551 Optimize by setting the stepping range to the line.
5552 (We might not be in the original line, but if we entered a
5553 new line in mid-statement, we continue stepping. This makes
5554 things like for(;;) statements work better.) */
5555
5556 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5557 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5558 ecs->event_thread->control.may_range_step = 1;
5559 set_step_info (frame, stop_pc_sal);
5560
5561 if (debug_infrun)
5562 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5563 keep_going (ecs);
5564 }
5565
5566 /* In all-stop mode, if we're currently stepping but have stopped in
5567 some other thread, we may need to switch back to the stepped
5568 thread. Returns true we set the inferior running, false if we left
5569 it stopped (and the event needs further processing). */
5570
5571 static int
5572 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5573 {
5574 if (!non_stop)
5575 {
5576 struct thread_info *tp;
5577 struct thread_info *stepping_thread;
5578 struct thread_info *step_over;
5579
5580 /* If any thread is blocked on some internal breakpoint, and we
5581 simply need to step over that breakpoint to get it going
5582 again, do that first. */
5583
5584 /* However, if we see an event for the stepping thread, then we
5585 know all other threads have been moved past their breakpoints
5586 already. Let the caller check whether the step is finished,
5587 etc., before deciding to move it past a breakpoint. */
5588 if (ecs->event_thread->control.step_range_end != 0)
5589 return 0;
5590
5591 /* Check if the current thread is blocked on an incomplete
5592 step-over, interrupted by a random signal. */
5593 if (ecs->event_thread->control.trap_expected
5594 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5595 {
5596 if (debug_infrun)
5597 {
5598 fprintf_unfiltered (gdb_stdlog,
5599 "infrun: need to finish step-over of [%s]\n",
5600 target_pid_to_str (ecs->event_thread->ptid));
5601 }
5602 keep_going (ecs);
5603 return 1;
5604 }
5605
5606 /* Check if the current thread is blocked by a single-step
5607 breakpoint of another thread. */
5608 if (ecs->hit_singlestep_breakpoint)
5609 {
5610 if (debug_infrun)
5611 {
5612 fprintf_unfiltered (gdb_stdlog,
5613 "infrun: need to step [%s] over single-step "
5614 "breakpoint\n",
5615 target_pid_to_str (ecs->ptid));
5616 }
5617 keep_going (ecs);
5618 return 1;
5619 }
5620
5621 /* Otherwise, we no longer expect a trap in the current thread.
5622 Clear the trap_expected flag before switching back -- this is
5623 what keep_going does as well, if we call it. */
5624 ecs->event_thread->control.trap_expected = 0;
5625
5626 /* Likewise, clear the signal if it should not be passed. */
5627 if (!signal_program[ecs->event_thread->suspend.stop_signal])
5628 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5629
5630 /* If scheduler locking applies even if not stepping, there's no
5631 need to walk over threads. Above we've checked whether the
5632 current thread is stepping. If some other thread not the
5633 event thread is stepping, then it must be that scheduler
5634 locking is not in effect. */
5635 if (schedlock_applies (ecs->event_thread))
5636 return 0;
5637
5638 /* Look for the stepping/nexting thread, and check if any other
5639 thread other than the stepping thread needs to start a
5640 step-over. Do all step-overs before actually proceeding with
5641 step/next/etc. */
5642 stepping_thread = NULL;
5643 step_over = NULL;
5644 ALL_NON_EXITED_THREADS (tp)
5645 {
5646 /* Ignore threads of processes we're not resuming. */
5647 if (!sched_multi
5648 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
5649 continue;
5650
5651 /* When stepping over a breakpoint, we lock all threads
5652 except the one that needs to move past the breakpoint.
5653 If a non-event thread has this set, the "incomplete
5654 step-over" check above should have caught it earlier. */
5655 gdb_assert (!tp->control.trap_expected);
5656
5657 /* Did we find the stepping thread? */
5658 if (tp->control.step_range_end)
5659 {
5660 /* Yep. There should only one though. */
5661 gdb_assert (stepping_thread == NULL);
5662
5663 /* The event thread is handled at the top, before we
5664 enter this loop. */
5665 gdb_assert (tp != ecs->event_thread);
5666
5667 /* If some thread other than the event thread is
5668 stepping, then scheduler locking can't be in effect,
5669 otherwise we wouldn't have resumed the current event
5670 thread in the first place. */
5671 gdb_assert (!schedlock_applies (tp));
5672
5673 stepping_thread = tp;
5674 }
5675 else if (thread_still_needs_step_over (tp))
5676 {
5677 step_over = tp;
5678
5679 /* At the top we've returned early if the event thread
5680 is stepping. If some other thread not the event
5681 thread is stepping, then scheduler locking can't be
5682 in effect, and we can resume this thread. No need to
5683 keep looking for the stepping thread then. */
5684 break;
5685 }
5686 }
5687
5688 if (step_over != NULL)
5689 {
5690 tp = step_over;
5691 if (debug_infrun)
5692 {
5693 fprintf_unfiltered (gdb_stdlog,
5694 "infrun: need to step-over [%s]\n",
5695 target_pid_to_str (tp->ptid));
5696 }
5697
5698 /* Only the stepping thread should have this set. */
5699 gdb_assert (tp->control.step_range_end == 0);
5700
5701 ecs->ptid = tp->ptid;
5702 ecs->event_thread = tp;
5703 switch_to_thread (ecs->ptid);
5704 keep_going (ecs);
5705 return 1;
5706 }
5707
5708 if (stepping_thread != NULL)
5709 {
5710 struct frame_info *frame;
5711 struct gdbarch *gdbarch;
5712
5713 tp = stepping_thread;
5714
5715 /* If the stepping thread exited, then don't try to switch
5716 back and resume it, which could fail in several different
5717 ways depending on the target. Instead, just keep going.
5718
5719 We can find a stepping dead thread in the thread list in
5720 two cases:
5721
5722 - The target supports thread exit events, and when the
5723 target tries to delete the thread from the thread list,
5724 inferior_ptid pointed at the exiting thread. In such
5725 case, calling delete_thread does not really remove the
5726 thread from the list; instead, the thread is left listed,
5727 with 'exited' state.
5728
5729 - The target's debug interface does not support thread
5730 exit events, and so we have no idea whatsoever if the
5731 previously stepping thread is still alive. For that
5732 reason, we need to synchronously query the target
5733 now. */
5734 if (is_exited (tp->ptid)
5735 || !target_thread_alive (tp->ptid))
5736 {
5737 if (debug_infrun)
5738 fprintf_unfiltered (gdb_stdlog,
5739 "infrun: not switching back to "
5740 "stepped thread, it has vanished\n");
5741
5742 delete_thread (tp->ptid);
5743 keep_going (ecs);
5744 return 1;
5745 }
5746
5747 if (debug_infrun)
5748 fprintf_unfiltered (gdb_stdlog,
5749 "infrun: switching back to stepped thread\n");
5750
5751 ecs->event_thread = tp;
5752 ecs->ptid = tp->ptid;
5753 context_switch (ecs->ptid);
5754
5755 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5756 frame = get_current_frame ();
5757 gdbarch = get_frame_arch (frame);
5758
5759 /* If the PC of the thread we were trying to single-step has
5760 changed, then that thread has trapped or been signaled,
5761 but the event has not been reported to GDB yet. Re-poll
5762 the target looking for this particular thread's event
5763 (i.e. temporarily enable schedlock) by:
5764
5765 - setting a break at the current PC
5766 - resuming that particular thread, only (by setting
5767 trap expected)
5768
5769 This prevents us continuously moving the single-step
5770 breakpoint forward, one instruction at a time,
5771 overstepping. */
5772
5773 if (stop_pc != tp->prev_pc)
5774 {
5775 ptid_t resume_ptid;
5776
5777 if (debug_infrun)
5778 fprintf_unfiltered (gdb_stdlog,
5779 "infrun: expected thread advanced also\n");
5780
5781 /* Clear the info of the previous step-over, as it's no
5782 longer valid. It's what keep_going would do too, if
5783 we called it. Must do this before trying to insert
5784 the sss breakpoint, otherwise if we were previously
5785 trying to step over this exact address in another
5786 thread, the breakpoint ends up not installed. */
5787 clear_step_over_info ();
5788
5789 insert_single_step_breakpoint (get_frame_arch (frame),
5790 get_frame_address_space (frame),
5791 stop_pc);
5792
5793 resume_ptid = user_visible_resume_ptid (tp->control.stepping_command);
5794 do_target_resume (resume_ptid,
5795 currently_stepping (tp), GDB_SIGNAL_0);
5796 prepare_to_wait (ecs);
5797 }
5798 else
5799 {
5800 if (debug_infrun)
5801 fprintf_unfiltered (gdb_stdlog,
5802 "infrun: expected thread still "
5803 "hasn't advanced\n");
5804 keep_going (ecs);
5805 }
5806
5807 return 1;
5808 }
5809 }
5810 return 0;
5811 }
5812
5813 /* Is thread TP in the middle of single-stepping? */
5814
5815 static int
5816 currently_stepping (struct thread_info *tp)
5817 {
5818 return ((tp->control.step_range_end
5819 && tp->control.step_resume_breakpoint == NULL)
5820 || tp->control.trap_expected
5821 || tp->stepped_breakpoint
5822 || bpstat_should_step ());
5823 }
5824
5825 /* Inferior has stepped into a subroutine call with source code that
5826 we should not step over. Do step to the first line of code in
5827 it. */
5828
5829 static void
5830 handle_step_into_function (struct gdbarch *gdbarch,
5831 struct execution_control_state *ecs)
5832 {
5833 struct compunit_symtab *cust;
5834 struct symtab_and_line stop_func_sal, sr_sal;
5835
5836 fill_in_stop_func (gdbarch, ecs);
5837
5838 cust = find_pc_compunit_symtab (stop_pc);
5839 if (cust != NULL && compunit_language (cust) != language_asm)
5840 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5841 ecs->stop_func_start);
5842
5843 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5844 /* Use the step_resume_break to step until the end of the prologue,
5845 even if that involves jumps (as it seems to on the vax under
5846 4.2). */
5847 /* If the prologue ends in the middle of a source line, continue to
5848 the end of that source line (if it is still within the function).
5849 Otherwise, just go to end of prologue. */
5850 if (stop_func_sal.end
5851 && stop_func_sal.pc != ecs->stop_func_start
5852 && stop_func_sal.end < ecs->stop_func_end)
5853 ecs->stop_func_start = stop_func_sal.end;
5854
5855 /* Architectures which require breakpoint adjustment might not be able
5856 to place a breakpoint at the computed address. If so, the test
5857 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5858 ecs->stop_func_start to an address at which a breakpoint may be
5859 legitimately placed.
5860
5861 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5862 made, GDB will enter an infinite loop when stepping through
5863 optimized code consisting of VLIW instructions which contain
5864 subinstructions corresponding to different source lines. On
5865 FR-V, it's not permitted to place a breakpoint on any but the
5866 first subinstruction of a VLIW instruction. When a breakpoint is
5867 set, GDB will adjust the breakpoint address to the beginning of
5868 the VLIW instruction. Thus, we need to make the corresponding
5869 adjustment here when computing the stop address. */
5870
5871 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5872 {
5873 ecs->stop_func_start
5874 = gdbarch_adjust_breakpoint_address (gdbarch,
5875 ecs->stop_func_start);
5876 }
5877
5878 if (ecs->stop_func_start == stop_pc)
5879 {
5880 /* We are already there: stop now. */
5881 end_stepping_range (ecs);
5882 return;
5883 }
5884 else
5885 {
5886 /* Put the step-breakpoint there and go until there. */
5887 init_sal (&sr_sal); /* initialize to zeroes */
5888 sr_sal.pc = ecs->stop_func_start;
5889 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5890 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5891
5892 /* Do not specify what the fp should be when we stop since on
5893 some machines the prologue is where the new fp value is
5894 established. */
5895 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5896
5897 /* And make sure stepping stops right away then. */
5898 ecs->event_thread->control.step_range_end
5899 = ecs->event_thread->control.step_range_start;
5900 }
5901 keep_going (ecs);
5902 }
5903
5904 /* Inferior has stepped backward into a subroutine call with source
5905 code that we should not step over. Do step to the beginning of the
5906 last line of code in it. */
5907
5908 static void
5909 handle_step_into_function_backward (struct gdbarch *gdbarch,
5910 struct execution_control_state *ecs)
5911 {
5912 struct compunit_symtab *cust;
5913 struct symtab_and_line stop_func_sal;
5914
5915 fill_in_stop_func (gdbarch, ecs);
5916
5917 cust = find_pc_compunit_symtab (stop_pc);
5918 if (cust != NULL && compunit_language (cust) != language_asm)
5919 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5920 ecs->stop_func_start);
5921
5922 stop_func_sal = find_pc_line (stop_pc, 0);
5923
5924 /* OK, we're just going to keep stepping here. */
5925 if (stop_func_sal.pc == stop_pc)
5926 {
5927 /* We're there already. Just stop stepping now. */
5928 end_stepping_range (ecs);
5929 }
5930 else
5931 {
5932 /* Else just reset the step range and keep going.
5933 No step-resume breakpoint, they don't work for
5934 epilogues, which can have multiple entry paths. */
5935 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5936 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5937 keep_going (ecs);
5938 }
5939 return;
5940 }
5941
5942 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5943 This is used to both functions and to skip over code. */
5944
5945 static void
5946 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5947 struct symtab_and_line sr_sal,
5948 struct frame_id sr_id,
5949 enum bptype sr_type)
5950 {
5951 /* There should never be more than one step-resume or longjmp-resume
5952 breakpoint per thread, so we should never be setting a new
5953 step_resume_breakpoint when one is already active. */
5954 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5955 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5956
5957 if (debug_infrun)
5958 fprintf_unfiltered (gdb_stdlog,
5959 "infrun: inserting step-resume breakpoint at %s\n",
5960 paddress (gdbarch, sr_sal.pc));
5961
5962 inferior_thread ()->control.step_resume_breakpoint
5963 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5964 }
5965
5966 void
5967 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5968 struct symtab_and_line sr_sal,
5969 struct frame_id sr_id)
5970 {
5971 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5972 sr_sal, sr_id,
5973 bp_step_resume);
5974 }
5975
5976 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5977 This is used to skip a potential signal handler.
5978
5979 This is called with the interrupted function's frame. The signal
5980 handler, when it returns, will resume the interrupted function at
5981 RETURN_FRAME.pc. */
5982
5983 static void
5984 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5985 {
5986 struct symtab_and_line sr_sal;
5987 struct gdbarch *gdbarch;
5988
5989 gdb_assert (return_frame != NULL);
5990 init_sal (&sr_sal); /* initialize to zeros */
5991
5992 gdbarch = get_frame_arch (return_frame);
5993 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5994 sr_sal.section = find_pc_overlay (sr_sal.pc);
5995 sr_sal.pspace = get_frame_program_space (return_frame);
5996
5997 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5998 get_stack_frame_id (return_frame),
5999 bp_hp_step_resume);
6000 }
6001
6002 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
6003 is used to skip a function after stepping into it (for "next" or if
6004 the called function has no debugging information).
6005
6006 The current function has almost always been reached by single
6007 stepping a call or return instruction. NEXT_FRAME belongs to the
6008 current function, and the breakpoint will be set at the caller's
6009 resume address.
6010
6011 This is a separate function rather than reusing
6012 insert_hp_step_resume_breakpoint_at_frame in order to avoid
6013 get_prev_frame, which may stop prematurely (see the implementation
6014 of frame_unwind_caller_id for an example). */
6015
6016 static void
6017 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
6018 {
6019 struct symtab_and_line sr_sal;
6020 struct gdbarch *gdbarch;
6021
6022 /* We shouldn't have gotten here if we don't know where the call site
6023 is. */
6024 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
6025
6026 init_sal (&sr_sal); /* initialize to zeros */
6027
6028 gdbarch = frame_unwind_caller_arch (next_frame);
6029 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
6030 frame_unwind_caller_pc (next_frame));
6031 sr_sal.section = find_pc_overlay (sr_sal.pc);
6032 sr_sal.pspace = frame_unwind_program_space (next_frame);
6033
6034 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
6035 frame_unwind_caller_id (next_frame));
6036 }
6037
6038 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
6039 new breakpoint at the target of a jmp_buf. The handling of
6040 longjmp-resume uses the same mechanisms used for handling
6041 "step-resume" breakpoints. */
6042
6043 static void
6044 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
6045 {
6046 /* There should never be more than one longjmp-resume breakpoint per
6047 thread, so we should never be setting a new
6048 longjmp_resume_breakpoint when one is already active. */
6049 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
6050
6051 if (debug_infrun)
6052 fprintf_unfiltered (gdb_stdlog,
6053 "infrun: inserting longjmp-resume breakpoint at %s\n",
6054 paddress (gdbarch, pc));
6055
6056 inferior_thread ()->control.exception_resume_breakpoint =
6057 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
6058 }
6059
6060 /* Insert an exception resume breakpoint. TP is the thread throwing
6061 the exception. The block B is the block of the unwinder debug hook
6062 function. FRAME is the frame corresponding to the call to this
6063 function. SYM is the symbol of the function argument holding the
6064 target PC of the exception. */
6065
6066 static void
6067 insert_exception_resume_breakpoint (struct thread_info *tp,
6068 const struct block *b,
6069 struct frame_info *frame,
6070 struct symbol *sym)
6071 {
6072 TRY
6073 {
6074 struct symbol *vsym;
6075 struct value *value;
6076 CORE_ADDR handler;
6077 struct breakpoint *bp;
6078
6079 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
6080 value = read_var_value (vsym, frame);
6081 /* If the value was optimized out, revert to the old behavior. */
6082 if (! value_optimized_out (value))
6083 {
6084 handler = value_as_address (value);
6085
6086 if (debug_infrun)
6087 fprintf_unfiltered (gdb_stdlog,
6088 "infrun: exception resume at %lx\n",
6089 (unsigned long) handler);
6090
6091 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
6092 handler, bp_exception_resume);
6093
6094 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
6095 frame = NULL;
6096
6097 bp->thread = tp->num;
6098 inferior_thread ()->control.exception_resume_breakpoint = bp;
6099 }
6100 }
6101 CATCH (e, RETURN_MASK_ERROR)
6102 {
6103 /* We want to ignore errors here. */
6104 }
6105 END_CATCH
6106 }
6107
6108 /* A helper for check_exception_resume that sets an
6109 exception-breakpoint based on a SystemTap probe. */
6110
6111 static void
6112 insert_exception_resume_from_probe (struct thread_info *tp,
6113 const struct bound_probe *probe,
6114 struct frame_info *frame)
6115 {
6116 struct value *arg_value;
6117 CORE_ADDR handler;
6118 struct breakpoint *bp;
6119
6120 arg_value = probe_safe_evaluate_at_pc (frame, 1);
6121 if (!arg_value)
6122 return;
6123
6124 handler = value_as_address (arg_value);
6125
6126 if (debug_infrun)
6127 fprintf_unfiltered (gdb_stdlog,
6128 "infrun: exception resume at %s\n",
6129 paddress (get_objfile_arch (probe->objfile),
6130 handler));
6131
6132 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
6133 handler, bp_exception_resume);
6134 bp->thread = tp->num;
6135 inferior_thread ()->control.exception_resume_breakpoint = bp;
6136 }
6137
6138 /* This is called when an exception has been intercepted. Check to
6139 see whether the exception's destination is of interest, and if so,
6140 set an exception resume breakpoint there. */
6141
6142 static void
6143 check_exception_resume (struct execution_control_state *ecs,
6144 struct frame_info *frame)
6145 {
6146 struct bound_probe probe;
6147 struct symbol *func;
6148
6149 /* First see if this exception unwinding breakpoint was set via a
6150 SystemTap probe point. If so, the probe has two arguments: the
6151 CFA and the HANDLER. We ignore the CFA, extract the handler, and
6152 set a breakpoint there. */
6153 probe = find_probe_by_pc (get_frame_pc (frame));
6154 if (probe.probe)
6155 {
6156 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
6157 return;
6158 }
6159
6160 func = get_frame_function (frame);
6161 if (!func)
6162 return;
6163
6164 TRY
6165 {
6166 const struct block *b;
6167 struct block_iterator iter;
6168 struct symbol *sym;
6169 int argno = 0;
6170
6171 /* The exception breakpoint is a thread-specific breakpoint on
6172 the unwinder's debug hook, declared as:
6173
6174 void _Unwind_DebugHook (void *cfa, void *handler);
6175
6176 The CFA argument indicates the frame to which control is
6177 about to be transferred. HANDLER is the destination PC.
6178
6179 We ignore the CFA and set a temporary breakpoint at HANDLER.
6180 This is not extremely efficient but it avoids issues in gdb
6181 with computing the DWARF CFA, and it also works even in weird
6182 cases such as throwing an exception from inside a signal
6183 handler. */
6184
6185 b = SYMBOL_BLOCK_VALUE (func);
6186 ALL_BLOCK_SYMBOLS (b, iter, sym)
6187 {
6188 if (!SYMBOL_IS_ARGUMENT (sym))
6189 continue;
6190
6191 if (argno == 0)
6192 ++argno;
6193 else
6194 {
6195 insert_exception_resume_breakpoint (ecs->event_thread,
6196 b, frame, sym);
6197 break;
6198 }
6199 }
6200 }
6201 CATCH (e, RETURN_MASK_ERROR)
6202 {
6203 }
6204 END_CATCH
6205 }
6206
6207 static void
6208 stop_waiting (struct execution_control_state *ecs)
6209 {
6210 if (debug_infrun)
6211 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
6212
6213 clear_step_over_info ();
6214
6215 /* Let callers know we don't want to wait for the inferior anymore. */
6216 ecs->wait_some_more = 0;
6217 }
6218
6219 /* Called when we should continue running the inferior, because the
6220 current event doesn't cause a user visible stop. This does the
6221 resuming part; waiting for the next event is done elsewhere. */
6222
6223 static void
6224 keep_going (struct execution_control_state *ecs)
6225 {
6226 /* Make sure normal_stop is called if we get a QUIT handled before
6227 reaching resume. */
6228 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
6229
6230 /* Save the pc before execution, to compare with pc after stop. */
6231 ecs->event_thread->prev_pc
6232 = regcache_read_pc (get_thread_regcache (ecs->ptid));
6233
6234 if (ecs->event_thread->control.trap_expected
6235 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
6236 {
6237 /* We haven't yet gotten our trap, and either: intercepted a
6238 non-signal event (e.g., a fork); or took a signal which we
6239 are supposed to pass through to the inferior. Simply
6240 continue. */
6241 discard_cleanups (old_cleanups);
6242 resume (ecs->event_thread->suspend.stop_signal);
6243 }
6244 else
6245 {
6246 struct regcache *regcache = get_current_regcache ();
6247 int remove_bp;
6248 int remove_wps;
6249
6250 /* Either the trap was not expected, but we are continuing
6251 anyway (if we got a signal, the user asked it be passed to
6252 the child)
6253 -- or --
6254 We got our expected trap, but decided we should resume from
6255 it.
6256
6257 We're going to run this baby now!
6258
6259 Note that insert_breakpoints won't try to re-insert
6260 already inserted breakpoints. Therefore, we don't
6261 care if breakpoints were already inserted, or not. */
6262
6263 /* If we need to step over a breakpoint, and we're not using
6264 displaced stepping to do so, insert all breakpoints
6265 (watchpoints, etc.) but the one we're stepping over, step one
6266 instruction, and then re-insert the breakpoint when that step
6267 is finished. */
6268
6269 remove_bp = (ecs->hit_singlestep_breakpoint
6270 || thread_still_needs_step_over (ecs->event_thread));
6271 remove_wps = (ecs->event_thread->stepping_over_watchpoint
6272 && !target_have_steppable_watchpoint);
6273
6274 if (remove_bp && !use_displaced_stepping (get_regcache_arch (regcache)))
6275 {
6276 set_step_over_info (get_regcache_aspace (regcache),
6277 regcache_read_pc (regcache), remove_wps);
6278 }
6279 else if (remove_wps)
6280 set_step_over_info (NULL, 0, remove_wps);
6281 else
6282 clear_step_over_info ();
6283
6284 /* Stop stepping if inserting breakpoints fails. */
6285 TRY
6286 {
6287 insert_breakpoints ();
6288 }
6289 CATCH (e, RETURN_MASK_ERROR)
6290 {
6291 exception_print (gdb_stderr, e);
6292 stop_waiting (ecs);
6293 discard_cleanups (old_cleanups);
6294 return;
6295 }
6296 END_CATCH
6297
6298 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
6299
6300 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
6301 explicitly specifies that such a signal should be delivered
6302 to the target program). Typically, that would occur when a
6303 user is debugging a target monitor on a simulator: the target
6304 monitor sets a breakpoint; the simulator encounters this
6305 breakpoint and halts the simulation handing control to GDB;
6306 GDB, noting that the stop address doesn't map to any known
6307 breakpoint, returns control back to the simulator; the
6308 simulator then delivers the hardware equivalent of a
6309 GDB_SIGNAL_TRAP to the program being debugged. */
6310 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6311 && !signal_program[ecs->event_thread->suspend.stop_signal])
6312 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6313
6314 discard_cleanups (old_cleanups);
6315 resume (ecs->event_thread->suspend.stop_signal);
6316 }
6317
6318 prepare_to_wait (ecs);
6319 }
6320
6321 /* This function normally comes after a resume, before
6322 handle_inferior_event exits. It takes care of any last bits of
6323 housekeeping, and sets the all-important wait_some_more flag. */
6324
6325 static void
6326 prepare_to_wait (struct execution_control_state *ecs)
6327 {
6328 if (debug_infrun)
6329 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
6330
6331 /* This is the old end of the while loop. Let everybody know we
6332 want to wait for the inferior some more and get called again
6333 soon. */
6334 ecs->wait_some_more = 1;
6335 }
6336
6337 /* We are done with the step range of a step/next/si/ni command.
6338 Called once for each n of a "step n" operation. */
6339
6340 static void
6341 end_stepping_range (struct execution_control_state *ecs)
6342 {
6343 ecs->event_thread->control.stop_step = 1;
6344 stop_waiting (ecs);
6345 }
6346
6347 /* Several print_*_reason functions to print why the inferior has stopped.
6348 We always print something when the inferior exits, or receives a signal.
6349 The rest of the cases are dealt with later on in normal_stop and
6350 print_it_typical. Ideally there should be a call to one of these
6351 print_*_reason functions functions from handle_inferior_event each time
6352 stop_waiting is called.
6353
6354 Note that we don't call these directly, instead we delegate that to
6355 the interpreters, through observers. Interpreters then call these
6356 with whatever uiout is right. */
6357
6358 void
6359 print_end_stepping_range_reason (struct ui_out *uiout)
6360 {
6361 /* For CLI-like interpreters, print nothing. */
6362
6363 if (ui_out_is_mi_like_p (uiout))
6364 {
6365 ui_out_field_string (uiout, "reason",
6366 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
6367 }
6368 }
6369
6370 void
6371 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6372 {
6373 annotate_signalled ();
6374 if (ui_out_is_mi_like_p (uiout))
6375 ui_out_field_string
6376 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
6377 ui_out_text (uiout, "\nProgram terminated with signal ");
6378 annotate_signal_name ();
6379 ui_out_field_string (uiout, "signal-name",
6380 gdb_signal_to_name (siggnal));
6381 annotate_signal_name_end ();
6382 ui_out_text (uiout, ", ");
6383 annotate_signal_string ();
6384 ui_out_field_string (uiout, "signal-meaning",
6385 gdb_signal_to_string (siggnal));
6386 annotate_signal_string_end ();
6387 ui_out_text (uiout, ".\n");
6388 ui_out_text (uiout, "The program no longer exists.\n");
6389 }
6390
6391 void
6392 print_exited_reason (struct ui_out *uiout, int exitstatus)
6393 {
6394 struct inferior *inf = current_inferior ();
6395 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
6396
6397 annotate_exited (exitstatus);
6398 if (exitstatus)
6399 {
6400 if (ui_out_is_mi_like_p (uiout))
6401 ui_out_field_string (uiout, "reason",
6402 async_reason_lookup (EXEC_ASYNC_EXITED));
6403 ui_out_text (uiout, "[Inferior ");
6404 ui_out_text (uiout, plongest (inf->num));
6405 ui_out_text (uiout, " (");
6406 ui_out_text (uiout, pidstr);
6407 ui_out_text (uiout, ") exited with code ");
6408 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
6409 ui_out_text (uiout, "]\n");
6410 }
6411 else
6412 {
6413 if (ui_out_is_mi_like_p (uiout))
6414 ui_out_field_string
6415 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6416 ui_out_text (uiout, "[Inferior ");
6417 ui_out_text (uiout, plongest (inf->num));
6418 ui_out_text (uiout, " (");
6419 ui_out_text (uiout, pidstr);
6420 ui_out_text (uiout, ") exited normally]\n");
6421 }
6422 }
6423
6424 void
6425 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6426 {
6427 annotate_signal ();
6428
6429 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
6430 {
6431 struct thread_info *t = inferior_thread ();
6432
6433 ui_out_text (uiout, "\n[");
6434 ui_out_field_string (uiout, "thread-name",
6435 target_pid_to_str (t->ptid));
6436 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
6437 ui_out_text (uiout, " stopped");
6438 }
6439 else
6440 {
6441 ui_out_text (uiout, "\nProgram received signal ");
6442 annotate_signal_name ();
6443 if (ui_out_is_mi_like_p (uiout))
6444 ui_out_field_string
6445 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
6446 ui_out_field_string (uiout, "signal-name",
6447 gdb_signal_to_name (siggnal));
6448 annotate_signal_name_end ();
6449 ui_out_text (uiout, ", ");
6450 annotate_signal_string ();
6451 ui_out_field_string (uiout, "signal-meaning",
6452 gdb_signal_to_string (siggnal));
6453 annotate_signal_string_end ();
6454 }
6455 ui_out_text (uiout, ".\n");
6456 }
6457
6458 void
6459 print_no_history_reason (struct ui_out *uiout)
6460 {
6461 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
6462 }
6463
6464 /* Print current location without a level number, if we have changed
6465 functions or hit a breakpoint. Print source line if we have one.
6466 bpstat_print contains the logic deciding in detail what to print,
6467 based on the event(s) that just occurred. */
6468
6469 void
6470 print_stop_event (struct target_waitstatus *ws)
6471 {
6472 int bpstat_ret;
6473 int source_flag;
6474 int do_frame_printing = 1;
6475 struct thread_info *tp = inferior_thread ();
6476
6477 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6478 switch (bpstat_ret)
6479 {
6480 case PRINT_UNKNOWN:
6481 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6482 should) carry around the function and does (or should) use
6483 that when doing a frame comparison. */
6484 if (tp->control.stop_step
6485 && frame_id_eq (tp->control.step_frame_id,
6486 get_frame_id (get_current_frame ()))
6487 && tp->control.step_start_function == find_pc_function (stop_pc))
6488 {
6489 /* Finished step, just print source line. */
6490 source_flag = SRC_LINE;
6491 }
6492 else
6493 {
6494 /* Print location and source line. */
6495 source_flag = SRC_AND_LOC;
6496 }
6497 break;
6498 case PRINT_SRC_AND_LOC:
6499 /* Print location and source line. */
6500 source_flag = SRC_AND_LOC;
6501 break;
6502 case PRINT_SRC_ONLY:
6503 source_flag = SRC_LINE;
6504 break;
6505 case PRINT_NOTHING:
6506 /* Something bogus. */
6507 source_flag = SRC_LINE;
6508 do_frame_printing = 0;
6509 break;
6510 default:
6511 internal_error (__FILE__, __LINE__, _("Unknown value."));
6512 }
6513
6514 /* The behavior of this routine with respect to the source
6515 flag is:
6516 SRC_LINE: Print only source line
6517 LOCATION: Print only location
6518 SRC_AND_LOC: Print location and source line. */
6519 if (do_frame_printing)
6520 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6521
6522 /* Display the auto-display expressions. */
6523 do_displays ();
6524 }
6525
6526 /* Here to return control to GDB when the inferior stops for real.
6527 Print appropriate messages, remove breakpoints, give terminal our modes.
6528
6529 STOP_PRINT_FRAME nonzero means print the executing frame
6530 (pc, function, args, file, line number and line text).
6531 BREAKPOINTS_FAILED nonzero means stop was due to error
6532 attempting to insert breakpoints. */
6533
6534 void
6535 normal_stop (void)
6536 {
6537 struct target_waitstatus last;
6538 ptid_t last_ptid;
6539 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6540
6541 get_last_target_status (&last_ptid, &last);
6542
6543 /* If an exception is thrown from this point on, make sure to
6544 propagate GDB's knowledge of the executing state to the
6545 frontend/user running state. A QUIT is an easy exception to see
6546 here, so do this before any filtered output. */
6547 if (!non_stop)
6548 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6549 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6550 && last.kind != TARGET_WAITKIND_EXITED
6551 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6552 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6553
6554 /* As we're presenting a stop, and potentially removing breakpoints,
6555 update the thread list so we can tell whether there are threads
6556 running on the target. With target remote, for example, we can
6557 only learn about new threads when we explicitly update the thread
6558 list. Do this before notifying the interpreters about signal
6559 stops, end of stepping ranges, etc., so that the "new thread"
6560 output is emitted before e.g., "Program received signal FOO",
6561 instead of after. */
6562 update_thread_list ();
6563
6564 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
6565 observer_notify_signal_received (inferior_thread ()->suspend.stop_signal);
6566
6567 /* As with the notification of thread events, we want to delay
6568 notifying the user that we've switched thread context until
6569 the inferior actually stops.
6570
6571 There's no point in saying anything if the inferior has exited.
6572 Note that SIGNALLED here means "exited with a signal", not
6573 "received a signal".
6574
6575 Also skip saying anything in non-stop mode. In that mode, as we
6576 don't want GDB to switch threads behind the user's back, to avoid
6577 races where the user is typing a command to apply to thread x,
6578 but GDB switches to thread y before the user finishes entering
6579 the command, fetch_inferior_event installs a cleanup to restore
6580 the current thread back to the thread the user had selected right
6581 after this event is handled, so we're not really switching, only
6582 informing of a stop. */
6583 if (!non_stop
6584 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6585 && target_has_execution
6586 && last.kind != TARGET_WAITKIND_SIGNALLED
6587 && last.kind != TARGET_WAITKIND_EXITED
6588 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6589 {
6590 target_terminal_ours_for_output ();
6591 printf_filtered (_("[Switching to %s]\n"),
6592 target_pid_to_str (inferior_ptid));
6593 annotate_thread_changed ();
6594 previous_inferior_ptid = inferior_ptid;
6595 }
6596
6597 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6598 {
6599 gdb_assert (sync_execution || !target_can_async_p ());
6600
6601 target_terminal_ours_for_output ();
6602 printf_filtered (_("No unwaited-for children left.\n"));
6603 }
6604
6605 /* Note: this depends on the update_thread_list call above. */
6606 if (!breakpoints_should_be_inserted_now () && target_has_execution)
6607 {
6608 if (remove_breakpoints ())
6609 {
6610 target_terminal_ours_for_output ();
6611 printf_filtered (_("Cannot remove breakpoints because "
6612 "program is no longer writable.\nFurther "
6613 "execution is probably impossible.\n"));
6614 }
6615 }
6616
6617 /* If an auto-display called a function and that got a signal,
6618 delete that auto-display to avoid an infinite recursion. */
6619
6620 if (stopped_by_random_signal)
6621 disable_current_display ();
6622
6623 /* Notify observers if we finished a "step"-like command, etc. */
6624 if (target_has_execution
6625 && last.kind != TARGET_WAITKIND_SIGNALLED
6626 && last.kind != TARGET_WAITKIND_EXITED
6627 && inferior_thread ()->control.stop_step)
6628 {
6629 /* But not if in the middle of doing a "step n" operation for
6630 n > 1 */
6631 if (inferior_thread ()->step_multi)
6632 goto done;
6633
6634 observer_notify_end_stepping_range ();
6635 }
6636
6637 target_terminal_ours ();
6638 async_enable_stdin ();
6639
6640 /* Set the current source location. This will also happen if we
6641 display the frame below, but the current SAL will be incorrect
6642 during a user hook-stop function. */
6643 if (has_stack_frames () && !stop_stack_dummy)
6644 set_current_sal_from_frame (get_current_frame ());
6645
6646 /* Let the user/frontend see the threads as stopped, but do nothing
6647 if the thread was running an infcall. We may be e.g., evaluating
6648 a breakpoint condition. In that case, the thread had state
6649 THREAD_RUNNING before the infcall, and shall remain set to
6650 running, all without informing the user/frontend about state
6651 transition changes. If this is actually a call command, then the
6652 thread was originally already stopped, so there's no state to
6653 finish either. */
6654 if (target_has_execution && inferior_thread ()->control.in_infcall)
6655 discard_cleanups (old_chain);
6656 else
6657 do_cleanups (old_chain);
6658
6659 /* Look up the hook_stop and run it (CLI internally handles problem
6660 of stop_command's pre-hook not existing). */
6661 if (stop_command)
6662 catch_errors (hook_stop_stub, stop_command,
6663 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6664
6665 if (!has_stack_frames ())
6666 goto done;
6667
6668 if (last.kind == TARGET_WAITKIND_SIGNALLED
6669 || last.kind == TARGET_WAITKIND_EXITED)
6670 goto done;
6671
6672 /* Select innermost stack frame - i.e., current frame is frame 0,
6673 and current location is based on that.
6674 Don't do this on return from a stack dummy routine,
6675 or if the program has exited. */
6676
6677 if (!stop_stack_dummy)
6678 {
6679 select_frame (get_current_frame ());
6680
6681 /* If --batch-silent is enabled then there's no need to print the current
6682 source location, and to try risks causing an error message about
6683 missing source files. */
6684 if (stop_print_frame && !batch_silent)
6685 print_stop_event (&last);
6686 }
6687
6688 /* Save the function value return registers, if we care.
6689 We might be about to restore their previous contents. */
6690 if (inferior_thread ()->control.proceed_to_finish
6691 && execution_direction != EXEC_REVERSE)
6692 {
6693 /* This should not be necessary. */
6694 if (stop_registers)
6695 regcache_xfree (stop_registers);
6696
6697 /* NB: The copy goes through to the target picking up the value of
6698 all the registers. */
6699 stop_registers = regcache_dup (get_current_regcache ());
6700 }
6701
6702 if (stop_stack_dummy == STOP_STACK_DUMMY)
6703 {
6704 /* Pop the empty frame that contains the stack dummy.
6705 This also restores inferior state prior to the call
6706 (struct infcall_suspend_state). */
6707 struct frame_info *frame = get_current_frame ();
6708
6709 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6710 frame_pop (frame);
6711 /* frame_pop() calls reinit_frame_cache as the last thing it
6712 does which means there's currently no selected frame. We
6713 don't need to re-establish a selected frame if the dummy call
6714 returns normally, that will be done by
6715 restore_infcall_control_state. However, we do have to handle
6716 the case where the dummy call is returning after being
6717 stopped (e.g. the dummy call previously hit a breakpoint).
6718 We can't know which case we have so just always re-establish
6719 a selected frame here. */
6720 select_frame (get_current_frame ());
6721 }
6722
6723 done:
6724 annotate_stopped ();
6725
6726 /* Suppress the stop observer if we're in the middle of:
6727
6728 - a step n (n > 1), as there still more steps to be done.
6729
6730 - a "finish" command, as the observer will be called in
6731 finish_command_continuation, so it can include the inferior
6732 function's return value.
6733
6734 - calling an inferior function, as we pretend we inferior didn't
6735 run at all. The return value of the call is handled by the
6736 expression evaluator, through call_function_by_hand. */
6737
6738 if (!target_has_execution
6739 || last.kind == TARGET_WAITKIND_SIGNALLED
6740 || last.kind == TARGET_WAITKIND_EXITED
6741 || last.kind == TARGET_WAITKIND_NO_RESUMED
6742 || (!(inferior_thread ()->step_multi
6743 && inferior_thread ()->control.stop_step)
6744 && !(inferior_thread ()->control.stop_bpstat
6745 && inferior_thread ()->control.proceed_to_finish)
6746 && !inferior_thread ()->control.in_infcall))
6747 {
6748 if (!ptid_equal (inferior_ptid, null_ptid))
6749 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6750 stop_print_frame);
6751 else
6752 observer_notify_normal_stop (NULL, stop_print_frame);
6753 }
6754
6755 if (target_has_execution)
6756 {
6757 if (last.kind != TARGET_WAITKIND_SIGNALLED
6758 && last.kind != TARGET_WAITKIND_EXITED)
6759 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6760 Delete any breakpoint that is to be deleted at the next stop. */
6761 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6762 }
6763
6764 /* Try to get rid of automatically added inferiors that are no
6765 longer needed. Keeping those around slows down things linearly.
6766 Note that this never removes the current inferior. */
6767 prune_inferiors ();
6768 }
6769
6770 static int
6771 hook_stop_stub (void *cmd)
6772 {
6773 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6774 return (0);
6775 }
6776 \f
6777 int
6778 signal_stop_state (int signo)
6779 {
6780 return signal_stop[signo];
6781 }
6782
6783 int
6784 signal_print_state (int signo)
6785 {
6786 return signal_print[signo];
6787 }
6788
6789 int
6790 signal_pass_state (int signo)
6791 {
6792 return signal_program[signo];
6793 }
6794
6795 static void
6796 signal_cache_update (int signo)
6797 {
6798 if (signo == -1)
6799 {
6800 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6801 signal_cache_update (signo);
6802
6803 return;
6804 }
6805
6806 signal_pass[signo] = (signal_stop[signo] == 0
6807 && signal_print[signo] == 0
6808 && signal_program[signo] == 1
6809 && signal_catch[signo] == 0);
6810 }
6811
6812 int
6813 signal_stop_update (int signo, int state)
6814 {
6815 int ret = signal_stop[signo];
6816
6817 signal_stop[signo] = state;
6818 signal_cache_update (signo);
6819 return ret;
6820 }
6821
6822 int
6823 signal_print_update (int signo, int state)
6824 {
6825 int ret = signal_print[signo];
6826
6827 signal_print[signo] = state;
6828 signal_cache_update (signo);
6829 return ret;
6830 }
6831
6832 int
6833 signal_pass_update (int signo, int state)
6834 {
6835 int ret = signal_program[signo];
6836
6837 signal_program[signo] = state;
6838 signal_cache_update (signo);
6839 return ret;
6840 }
6841
6842 /* Update the global 'signal_catch' from INFO and notify the
6843 target. */
6844
6845 void
6846 signal_catch_update (const unsigned int *info)
6847 {
6848 int i;
6849
6850 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6851 signal_catch[i] = info[i] > 0;
6852 signal_cache_update (-1);
6853 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6854 }
6855
6856 static void
6857 sig_print_header (void)
6858 {
6859 printf_filtered (_("Signal Stop\tPrint\tPass "
6860 "to program\tDescription\n"));
6861 }
6862
6863 static void
6864 sig_print_info (enum gdb_signal oursig)
6865 {
6866 const char *name = gdb_signal_to_name (oursig);
6867 int name_padding = 13 - strlen (name);
6868
6869 if (name_padding <= 0)
6870 name_padding = 0;
6871
6872 printf_filtered ("%s", name);
6873 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6874 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6875 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6876 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6877 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6878 }
6879
6880 /* Specify how various signals in the inferior should be handled. */
6881
6882 static void
6883 handle_command (char *args, int from_tty)
6884 {
6885 char **argv;
6886 int digits, wordlen;
6887 int sigfirst, signum, siglast;
6888 enum gdb_signal oursig;
6889 int allsigs;
6890 int nsigs;
6891 unsigned char *sigs;
6892 struct cleanup *old_chain;
6893
6894 if (args == NULL)
6895 {
6896 error_no_arg (_("signal to handle"));
6897 }
6898
6899 /* Allocate and zero an array of flags for which signals to handle. */
6900
6901 nsigs = (int) GDB_SIGNAL_LAST;
6902 sigs = (unsigned char *) alloca (nsigs);
6903 memset (sigs, 0, nsigs);
6904
6905 /* Break the command line up into args. */
6906
6907 argv = gdb_buildargv (args);
6908 old_chain = make_cleanup_freeargv (argv);
6909
6910 /* Walk through the args, looking for signal oursigs, signal names, and
6911 actions. Signal numbers and signal names may be interspersed with
6912 actions, with the actions being performed for all signals cumulatively
6913 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6914
6915 while (*argv != NULL)
6916 {
6917 wordlen = strlen (*argv);
6918 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6919 {;
6920 }
6921 allsigs = 0;
6922 sigfirst = siglast = -1;
6923
6924 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6925 {
6926 /* Apply action to all signals except those used by the
6927 debugger. Silently skip those. */
6928 allsigs = 1;
6929 sigfirst = 0;
6930 siglast = nsigs - 1;
6931 }
6932 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6933 {
6934 SET_SIGS (nsigs, sigs, signal_stop);
6935 SET_SIGS (nsigs, sigs, signal_print);
6936 }
6937 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6938 {
6939 UNSET_SIGS (nsigs, sigs, signal_program);
6940 }
6941 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6942 {
6943 SET_SIGS (nsigs, sigs, signal_print);
6944 }
6945 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6946 {
6947 SET_SIGS (nsigs, sigs, signal_program);
6948 }
6949 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6950 {
6951 UNSET_SIGS (nsigs, sigs, signal_stop);
6952 }
6953 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6954 {
6955 SET_SIGS (nsigs, sigs, signal_program);
6956 }
6957 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6958 {
6959 UNSET_SIGS (nsigs, sigs, signal_print);
6960 UNSET_SIGS (nsigs, sigs, signal_stop);
6961 }
6962 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6963 {
6964 UNSET_SIGS (nsigs, sigs, signal_program);
6965 }
6966 else if (digits > 0)
6967 {
6968 /* It is numeric. The numeric signal refers to our own
6969 internal signal numbering from target.h, not to host/target
6970 signal number. This is a feature; users really should be
6971 using symbolic names anyway, and the common ones like
6972 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6973
6974 sigfirst = siglast = (int)
6975 gdb_signal_from_command (atoi (*argv));
6976 if ((*argv)[digits] == '-')
6977 {
6978 siglast = (int)
6979 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6980 }
6981 if (sigfirst > siglast)
6982 {
6983 /* Bet he didn't figure we'd think of this case... */
6984 signum = sigfirst;
6985 sigfirst = siglast;
6986 siglast = signum;
6987 }
6988 }
6989 else
6990 {
6991 oursig = gdb_signal_from_name (*argv);
6992 if (oursig != GDB_SIGNAL_UNKNOWN)
6993 {
6994 sigfirst = siglast = (int) oursig;
6995 }
6996 else
6997 {
6998 /* Not a number and not a recognized flag word => complain. */
6999 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
7000 }
7001 }
7002
7003 /* If any signal numbers or symbol names were found, set flags for
7004 which signals to apply actions to. */
7005
7006 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
7007 {
7008 switch ((enum gdb_signal) signum)
7009 {
7010 case GDB_SIGNAL_TRAP:
7011 case GDB_SIGNAL_INT:
7012 if (!allsigs && !sigs[signum])
7013 {
7014 if (query (_("%s is used by the debugger.\n\
7015 Are you sure you want to change it? "),
7016 gdb_signal_to_name ((enum gdb_signal) signum)))
7017 {
7018 sigs[signum] = 1;
7019 }
7020 else
7021 {
7022 printf_unfiltered (_("Not confirmed, unchanged.\n"));
7023 gdb_flush (gdb_stdout);
7024 }
7025 }
7026 break;
7027 case GDB_SIGNAL_0:
7028 case GDB_SIGNAL_DEFAULT:
7029 case GDB_SIGNAL_UNKNOWN:
7030 /* Make sure that "all" doesn't print these. */
7031 break;
7032 default:
7033 sigs[signum] = 1;
7034 break;
7035 }
7036 }
7037
7038 argv++;
7039 }
7040
7041 for (signum = 0; signum < nsigs; signum++)
7042 if (sigs[signum])
7043 {
7044 signal_cache_update (-1);
7045 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
7046 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
7047
7048 if (from_tty)
7049 {
7050 /* Show the results. */
7051 sig_print_header ();
7052 for (; signum < nsigs; signum++)
7053 if (sigs[signum])
7054 sig_print_info (signum);
7055 }
7056
7057 break;
7058 }
7059
7060 do_cleanups (old_chain);
7061 }
7062
7063 /* Complete the "handle" command. */
7064
7065 static VEC (char_ptr) *
7066 handle_completer (struct cmd_list_element *ignore,
7067 const char *text, const char *word)
7068 {
7069 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
7070 static const char * const keywords[] =
7071 {
7072 "all",
7073 "stop",
7074 "ignore",
7075 "print",
7076 "pass",
7077 "nostop",
7078 "noignore",
7079 "noprint",
7080 "nopass",
7081 NULL,
7082 };
7083
7084 vec_signals = signal_completer (ignore, text, word);
7085 vec_keywords = complete_on_enum (keywords, word, word);
7086
7087 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
7088 VEC_free (char_ptr, vec_signals);
7089 VEC_free (char_ptr, vec_keywords);
7090 return return_val;
7091 }
7092
7093 static void
7094 xdb_handle_command (char *args, int from_tty)
7095 {
7096 char **argv;
7097 struct cleanup *old_chain;
7098
7099 if (args == NULL)
7100 error_no_arg (_("xdb command"));
7101
7102 /* Break the command line up into args. */
7103
7104 argv = gdb_buildargv (args);
7105 old_chain = make_cleanup_freeargv (argv);
7106 if (argv[1] != (char *) NULL)
7107 {
7108 char *argBuf;
7109 int bufLen;
7110
7111 bufLen = strlen (argv[0]) + 20;
7112 argBuf = (char *) xmalloc (bufLen);
7113 if (argBuf)
7114 {
7115 int validFlag = 1;
7116 enum gdb_signal oursig;
7117
7118 oursig = gdb_signal_from_name (argv[0]);
7119 memset (argBuf, 0, bufLen);
7120 if (strcmp (argv[1], "Q") == 0)
7121 sprintf (argBuf, "%s %s", argv[0], "noprint");
7122 else
7123 {
7124 if (strcmp (argv[1], "s") == 0)
7125 {
7126 if (!signal_stop[oursig])
7127 sprintf (argBuf, "%s %s", argv[0], "stop");
7128 else
7129 sprintf (argBuf, "%s %s", argv[0], "nostop");
7130 }
7131 else if (strcmp (argv[1], "i") == 0)
7132 {
7133 if (!signal_program[oursig])
7134 sprintf (argBuf, "%s %s", argv[0], "pass");
7135 else
7136 sprintf (argBuf, "%s %s", argv[0], "nopass");
7137 }
7138 else if (strcmp (argv[1], "r") == 0)
7139 {
7140 if (!signal_print[oursig])
7141 sprintf (argBuf, "%s %s", argv[0], "print");
7142 else
7143 sprintf (argBuf, "%s %s", argv[0], "noprint");
7144 }
7145 else
7146 validFlag = 0;
7147 }
7148 if (validFlag)
7149 handle_command (argBuf, from_tty);
7150 else
7151 printf_filtered (_("Invalid signal handling flag.\n"));
7152 if (argBuf)
7153 xfree (argBuf);
7154 }
7155 }
7156 do_cleanups (old_chain);
7157 }
7158
7159 enum gdb_signal
7160 gdb_signal_from_command (int num)
7161 {
7162 if (num >= 1 && num <= 15)
7163 return (enum gdb_signal) num;
7164 error (_("Only signals 1-15 are valid as numeric signals.\n\
7165 Use \"info signals\" for a list of symbolic signals."));
7166 }
7167
7168 /* Print current contents of the tables set by the handle command.
7169 It is possible we should just be printing signals actually used
7170 by the current target (but for things to work right when switching
7171 targets, all signals should be in the signal tables). */
7172
7173 static void
7174 signals_info (char *signum_exp, int from_tty)
7175 {
7176 enum gdb_signal oursig;
7177
7178 sig_print_header ();
7179
7180 if (signum_exp)
7181 {
7182 /* First see if this is a symbol name. */
7183 oursig = gdb_signal_from_name (signum_exp);
7184 if (oursig == GDB_SIGNAL_UNKNOWN)
7185 {
7186 /* No, try numeric. */
7187 oursig =
7188 gdb_signal_from_command (parse_and_eval_long (signum_exp));
7189 }
7190 sig_print_info (oursig);
7191 return;
7192 }
7193
7194 printf_filtered ("\n");
7195 /* These ugly casts brought to you by the native VAX compiler. */
7196 for (oursig = GDB_SIGNAL_FIRST;
7197 (int) oursig < (int) GDB_SIGNAL_LAST;
7198 oursig = (enum gdb_signal) ((int) oursig + 1))
7199 {
7200 QUIT;
7201
7202 if (oursig != GDB_SIGNAL_UNKNOWN
7203 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
7204 sig_print_info (oursig);
7205 }
7206
7207 printf_filtered (_("\nUse the \"handle\" command "
7208 "to change these tables.\n"));
7209 }
7210
7211 /* Check if it makes sense to read $_siginfo from the current thread
7212 at this point. If not, throw an error. */
7213
7214 static void
7215 validate_siginfo_access (void)
7216 {
7217 /* No current inferior, no siginfo. */
7218 if (ptid_equal (inferior_ptid, null_ptid))
7219 error (_("No thread selected."));
7220
7221 /* Don't try to read from a dead thread. */
7222 if (is_exited (inferior_ptid))
7223 error (_("The current thread has terminated"));
7224
7225 /* ... or from a spinning thread. */
7226 if (is_running (inferior_ptid))
7227 error (_("Selected thread is running."));
7228 }
7229
7230 /* The $_siginfo convenience variable is a bit special. We don't know
7231 for sure the type of the value until we actually have a chance to
7232 fetch the data. The type can change depending on gdbarch, so it is
7233 also dependent on which thread you have selected.
7234
7235 1. making $_siginfo be an internalvar that creates a new value on
7236 access.
7237
7238 2. making the value of $_siginfo be an lval_computed value. */
7239
7240 /* This function implements the lval_computed support for reading a
7241 $_siginfo value. */
7242
7243 static void
7244 siginfo_value_read (struct value *v)
7245 {
7246 LONGEST transferred;
7247
7248 validate_siginfo_access ();
7249
7250 transferred =
7251 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
7252 NULL,
7253 value_contents_all_raw (v),
7254 value_offset (v),
7255 TYPE_LENGTH (value_type (v)));
7256
7257 if (transferred != TYPE_LENGTH (value_type (v)))
7258 error (_("Unable to read siginfo"));
7259 }
7260
7261 /* This function implements the lval_computed support for writing a
7262 $_siginfo value. */
7263
7264 static void
7265 siginfo_value_write (struct value *v, struct value *fromval)
7266 {
7267 LONGEST transferred;
7268
7269 validate_siginfo_access ();
7270
7271 transferred = target_write (&current_target,
7272 TARGET_OBJECT_SIGNAL_INFO,
7273 NULL,
7274 value_contents_all_raw (fromval),
7275 value_offset (v),
7276 TYPE_LENGTH (value_type (fromval)));
7277
7278 if (transferred != TYPE_LENGTH (value_type (fromval)))
7279 error (_("Unable to write siginfo"));
7280 }
7281
7282 static const struct lval_funcs siginfo_value_funcs =
7283 {
7284 siginfo_value_read,
7285 siginfo_value_write
7286 };
7287
7288 /* Return a new value with the correct type for the siginfo object of
7289 the current thread using architecture GDBARCH. Return a void value
7290 if there's no object available. */
7291
7292 static struct value *
7293 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
7294 void *ignore)
7295 {
7296 if (target_has_stack
7297 && !ptid_equal (inferior_ptid, null_ptid)
7298 && gdbarch_get_siginfo_type_p (gdbarch))
7299 {
7300 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7301
7302 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
7303 }
7304
7305 return allocate_value (builtin_type (gdbarch)->builtin_void);
7306 }
7307
7308 \f
7309 /* infcall_suspend_state contains state about the program itself like its
7310 registers and any signal it received when it last stopped.
7311 This state must be restored regardless of how the inferior function call
7312 ends (either successfully, or after it hits a breakpoint or signal)
7313 if the program is to properly continue where it left off. */
7314
7315 struct infcall_suspend_state
7316 {
7317 struct thread_suspend_state thread_suspend;
7318 #if 0 /* Currently unused and empty structures are not valid C. */
7319 struct inferior_suspend_state inferior_suspend;
7320 #endif
7321
7322 /* Other fields: */
7323 CORE_ADDR stop_pc;
7324 struct regcache *registers;
7325
7326 /* Format of SIGINFO_DATA or NULL if it is not present. */
7327 struct gdbarch *siginfo_gdbarch;
7328
7329 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
7330 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
7331 content would be invalid. */
7332 gdb_byte *siginfo_data;
7333 };
7334
7335 struct infcall_suspend_state *
7336 save_infcall_suspend_state (void)
7337 {
7338 struct infcall_suspend_state *inf_state;
7339 struct thread_info *tp = inferior_thread ();
7340 #if 0
7341 struct inferior *inf = current_inferior ();
7342 #endif
7343 struct regcache *regcache = get_current_regcache ();
7344 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7345 gdb_byte *siginfo_data = NULL;
7346
7347 if (gdbarch_get_siginfo_type_p (gdbarch))
7348 {
7349 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7350 size_t len = TYPE_LENGTH (type);
7351 struct cleanup *back_to;
7352
7353 siginfo_data = xmalloc (len);
7354 back_to = make_cleanup (xfree, siginfo_data);
7355
7356 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7357 siginfo_data, 0, len) == len)
7358 discard_cleanups (back_to);
7359 else
7360 {
7361 /* Errors ignored. */
7362 do_cleanups (back_to);
7363 siginfo_data = NULL;
7364 }
7365 }
7366
7367 inf_state = XCNEW (struct infcall_suspend_state);
7368
7369 if (siginfo_data)
7370 {
7371 inf_state->siginfo_gdbarch = gdbarch;
7372 inf_state->siginfo_data = siginfo_data;
7373 }
7374
7375 inf_state->thread_suspend = tp->suspend;
7376 #if 0 /* Currently unused and empty structures are not valid C. */
7377 inf_state->inferior_suspend = inf->suspend;
7378 #endif
7379
7380 /* run_inferior_call will not use the signal due to its `proceed' call with
7381 GDB_SIGNAL_0 anyway. */
7382 tp->suspend.stop_signal = GDB_SIGNAL_0;
7383
7384 inf_state->stop_pc = stop_pc;
7385
7386 inf_state->registers = regcache_dup (regcache);
7387
7388 return inf_state;
7389 }
7390
7391 /* Restore inferior session state to INF_STATE. */
7392
7393 void
7394 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7395 {
7396 struct thread_info *tp = inferior_thread ();
7397 #if 0
7398 struct inferior *inf = current_inferior ();
7399 #endif
7400 struct regcache *regcache = get_current_regcache ();
7401 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7402
7403 tp->suspend = inf_state->thread_suspend;
7404 #if 0 /* Currently unused and empty structures are not valid C. */
7405 inf->suspend = inf_state->inferior_suspend;
7406 #endif
7407
7408 stop_pc = inf_state->stop_pc;
7409
7410 if (inf_state->siginfo_gdbarch == gdbarch)
7411 {
7412 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7413
7414 /* Errors ignored. */
7415 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7416 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
7417 }
7418
7419 /* The inferior can be gone if the user types "print exit(0)"
7420 (and perhaps other times). */
7421 if (target_has_execution)
7422 /* NB: The register write goes through to the target. */
7423 regcache_cpy (regcache, inf_state->registers);
7424
7425 discard_infcall_suspend_state (inf_state);
7426 }
7427
7428 static void
7429 do_restore_infcall_suspend_state_cleanup (void *state)
7430 {
7431 restore_infcall_suspend_state (state);
7432 }
7433
7434 struct cleanup *
7435 make_cleanup_restore_infcall_suspend_state
7436 (struct infcall_suspend_state *inf_state)
7437 {
7438 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
7439 }
7440
7441 void
7442 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7443 {
7444 regcache_xfree (inf_state->registers);
7445 xfree (inf_state->siginfo_data);
7446 xfree (inf_state);
7447 }
7448
7449 struct regcache *
7450 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
7451 {
7452 return inf_state->registers;
7453 }
7454
7455 /* infcall_control_state contains state regarding gdb's control of the
7456 inferior itself like stepping control. It also contains session state like
7457 the user's currently selected frame. */
7458
7459 struct infcall_control_state
7460 {
7461 struct thread_control_state thread_control;
7462 struct inferior_control_state inferior_control;
7463
7464 /* Other fields: */
7465 enum stop_stack_kind stop_stack_dummy;
7466 int stopped_by_random_signal;
7467 int stop_after_trap;
7468
7469 /* ID if the selected frame when the inferior function call was made. */
7470 struct frame_id selected_frame_id;
7471 };
7472
7473 /* Save all of the information associated with the inferior<==>gdb
7474 connection. */
7475
7476 struct infcall_control_state *
7477 save_infcall_control_state (void)
7478 {
7479 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
7480 struct thread_info *tp = inferior_thread ();
7481 struct inferior *inf = current_inferior ();
7482
7483 inf_status->thread_control = tp->control;
7484 inf_status->inferior_control = inf->control;
7485
7486 tp->control.step_resume_breakpoint = NULL;
7487 tp->control.exception_resume_breakpoint = NULL;
7488
7489 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
7490 chain. If caller's caller is walking the chain, they'll be happier if we
7491 hand them back the original chain when restore_infcall_control_state is
7492 called. */
7493 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
7494
7495 /* Other fields: */
7496 inf_status->stop_stack_dummy = stop_stack_dummy;
7497 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7498 inf_status->stop_after_trap = stop_after_trap;
7499
7500 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7501
7502 return inf_status;
7503 }
7504
7505 static int
7506 restore_selected_frame (void *args)
7507 {
7508 struct frame_id *fid = (struct frame_id *) args;
7509 struct frame_info *frame;
7510
7511 frame = frame_find_by_id (*fid);
7512
7513 /* If inf_status->selected_frame_id is NULL, there was no previously
7514 selected frame. */
7515 if (frame == NULL)
7516 {
7517 warning (_("Unable to restore previously selected frame."));
7518 return 0;
7519 }
7520
7521 select_frame (frame);
7522
7523 return (1);
7524 }
7525
7526 /* Restore inferior session state to INF_STATUS. */
7527
7528 void
7529 restore_infcall_control_state (struct infcall_control_state *inf_status)
7530 {
7531 struct thread_info *tp = inferior_thread ();
7532 struct inferior *inf = current_inferior ();
7533
7534 if (tp->control.step_resume_breakpoint)
7535 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7536
7537 if (tp->control.exception_resume_breakpoint)
7538 tp->control.exception_resume_breakpoint->disposition
7539 = disp_del_at_next_stop;
7540
7541 /* Handle the bpstat_copy of the chain. */
7542 bpstat_clear (&tp->control.stop_bpstat);
7543
7544 tp->control = inf_status->thread_control;
7545 inf->control = inf_status->inferior_control;
7546
7547 /* Other fields: */
7548 stop_stack_dummy = inf_status->stop_stack_dummy;
7549 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7550 stop_after_trap = inf_status->stop_after_trap;
7551
7552 if (target_has_stack)
7553 {
7554 /* The point of catch_errors is that if the stack is clobbered,
7555 walking the stack might encounter a garbage pointer and
7556 error() trying to dereference it. */
7557 if (catch_errors
7558 (restore_selected_frame, &inf_status->selected_frame_id,
7559 "Unable to restore previously selected frame:\n",
7560 RETURN_MASK_ERROR) == 0)
7561 /* Error in restoring the selected frame. Select the innermost
7562 frame. */
7563 select_frame (get_current_frame ());
7564 }
7565
7566 xfree (inf_status);
7567 }
7568
7569 static void
7570 do_restore_infcall_control_state_cleanup (void *sts)
7571 {
7572 restore_infcall_control_state (sts);
7573 }
7574
7575 struct cleanup *
7576 make_cleanup_restore_infcall_control_state
7577 (struct infcall_control_state *inf_status)
7578 {
7579 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7580 }
7581
7582 void
7583 discard_infcall_control_state (struct infcall_control_state *inf_status)
7584 {
7585 if (inf_status->thread_control.step_resume_breakpoint)
7586 inf_status->thread_control.step_resume_breakpoint->disposition
7587 = disp_del_at_next_stop;
7588
7589 if (inf_status->thread_control.exception_resume_breakpoint)
7590 inf_status->thread_control.exception_resume_breakpoint->disposition
7591 = disp_del_at_next_stop;
7592
7593 /* See save_infcall_control_state for info on stop_bpstat. */
7594 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7595
7596 xfree (inf_status);
7597 }
7598 \f
7599 /* restore_inferior_ptid() will be used by the cleanup machinery
7600 to restore the inferior_ptid value saved in a call to
7601 save_inferior_ptid(). */
7602
7603 static void
7604 restore_inferior_ptid (void *arg)
7605 {
7606 ptid_t *saved_ptid_ptr = arg;
7607
7608 inferior_ptid = *saved_ptid_ptr;
7609 xfree (arg);
7610 }
7611
7612 /* Save the value of inferior_ptid so that it may be restored by a
7613 later call to do_cleanups(). Returns the struct cleanup pointer
7614 needed for later doing the cleanup. */
7615
7616 struct cleanup *
7617 save_inferior_ptid (void)
7618 {
7619 ptid_t *saved_ptid_ptr;
7620
7621 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7622 *saved_ptid_ptr = inferior_ptid;
7623 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7624 }
7625
7626 /* See infrun.h. */
7627
7628 void
7629 clear_exit_convenience_vars (void)
7630 {
7631 clear_internalvar (lookup_internalvar ("_exitsignal"));
7632 clear_internalvar (lookup_internalvar ("_exitcode"));
7633 }
7634 \f
7635
7636 /* User interface for reverse debugging:
7637 Set exec-direction / show exec-direction commands
7638 (returns error unless target implements to_set_exec_direction method). */
7639
7640 int execution_direction = EXEC_FORWARD;
7641 static const char exec_forward[] = "forward";
7642 static const char exec_reverse[] = "reverse";
7643 static const char *exec_direction = exec_forward;
7644 static const char *const exec_direction_names[] = {
7645 exec_forward,
7646 exec_reverse,
7647 NULL
7648 };
7649
7650 static void
7651 set_exec_direction_func (char *args, int from_tty,
7652 struct cmd_list_element *cmd)
7653 {
7654 if (target_can_execute_reverse)
7655 {
7656 if (!strcmp (exec_direction, exec_forward))
7657 execution_direction = EXEC_FORWARD;
7658 else if (!strcmp (exec_direction, exec_reverse))
7659 execution_direction = EXEC_REVERSE;
7660 }
7661 else
7662 {
7663 exec_direction = exec_forward;
7664 error (_("Target does not support this operation."));
7665 }
7666 }
7667
7668 static void
7669 show_exec_direction_func (struct ui_file *out, int from_tty,
7670 struct cmd_list_element *cmd, const char *value)
7671 {
7672 switch (execution_direction) {
7673 case EXEC_FORWARD:
7674 fprintf_filtered (out, _("Forward.\n"));
7675 break;
7676 case EXEC_REVERSE:
7677 fprintf_filtered (out, _("Reverse.\n"));
7678 break;
7679 default:
7680 internal_error (__FILE__, __LINE__,
7681 _("bogus execution_direction value: %d"),
7682 (int) execution_direction);
7683 }
7684 }
7685
7686 static void
7687 show_schedule_multiple (struct ui_file *file, int from_tty,
7688 struct cmd_list_element *c, const char *value)
7689 {
7690 fprintf_filtered (file, _("Resuming the execution of threads "
7691 "of all processes is %s.\n"), value);
7692 }
7693
7694 /* Implementation of `siginfo' variable. */
7695
7696 static const struct internalvar_funcs siginfo_funcs =
7697 {
7698 siginfo_make_value,
7699 NULL,
7700 NULL
7701 };
7702
7703 void
7704 _initialize_infrun (void)
7705 {
7706 int i;
7707 int numsigs;
7708 struct cmd_list_element *c;
7709
7710 add_info ("signals", signals_info, _("\
7711 What debugger does when program gets various signals.\n\
7712 Specify a signal as argument to print info on that signal only."));
7713 add_info_alias ("handle", "signals", 0);
7714
7715 c = add_com ("handle", class_run, handle_command, _("\
7716 Specify how to handle signals.\n\
7717 Usage: handle SIGNAL [ACTIONS]\n\
7718 Args are signals and actions to apply to those signals.\n\
7719 If no actions are specified, the current settings for the specified signals\n\
7720 will be displayed instead.\n\
7721 \n\
7722 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7723 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7724 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7725 The special arg \"all\" is recognized to mean all signals except those\n\
7726 used by the debugger, typically SIGTRAP and SIGINT.\n\
7727 \n\
7728 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7729 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7730 Stop means reenter debugger if this signal happens (implies print).\n\
7731 Print means print a message if this signal happens.\n\
7732 Pass means let program see this signal; otherwise program doesn't know.\n\
7733 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7734 Pass and Stop may be combined.\n\
7735 \n\
7736 Multiple signals may be specified. Signal numbers and signal names\n\
7737 may be interspersed with actions, with the actions being performed for\n\
7738 all signals cumulatively specified."));
7739 set_cmd_completer (c, handle_completer);
7740
7741 if (xdb_commands)
7742 {
7743 add_com ("lz", class_info, signals_info, _("\
7744 What debugger does when program gets various signals.\n\
7745 Specify a signal as argument to print info on that signal only."));
7746 add_com ("z", class_run, xdb_handle_command, _("\
7747 Specify how to handle a signal.\n\
7748 Args are signals and actions to apply to those signals.\n\
7749 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7750 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7751 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7752 The special arg \"all\" is recognized to mean all signals except those\n\
7753 used by the debugger, typically SIGTRAP and SIGINT.\n\
7754 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7755 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7756 nopass), \"Q\" (noprint)\n\
7757 Stop means reenter debugger if this signal happens (implies print).\n\
7758 Print means print a message if this signal happens.\n\
7759 Pass means let program see this signal; otherwise program doesn't know.\n\
7760 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7761 Pass and Stop may be combined."));
7762 }
7763
7764 if (!dbx_commands)
7765 stop_command = add_cmd ("stop", class_obscure,
7766 not_just_help_class_command, _("\
7767 There is no `stop' command, but you can set a hook on `stop'.\n\
7768 This allows you to set a list of commands to be run each time execution\n\
7769 of the program stops."), &cmdlist);
7770
7771 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7772 Set inferior debugging."), _("\
7773 Show inferior debugging."), _("\
7774 When non-zero, inferior specific debugging is enabled."),
7775 NULL,
7776 show_debug_infrun,
7777 &setdebuglist, &showdebuglist);
7778
7779 add_setshow_boolean_cmd ("displaced", class_maintenance,
7780 &debug_displaced, _("\
7781 Set displaced stepping debugging."), _("\
7782 Show displaced stepping debugging."), _("\
7783 When non-zero, displaced stepping specific debugging is enabled."),
7784 NULL,
7785 show_debug_displaced,
7786 &setdebuglist, &showdebuglist);
7787
7788 add_setshow_boolean_cmd ("non-stop", no_class,
7789 &non_stop_1, _("\
7790 Set whether gdb controls the inferior in non-stop mode."), _("\
7791 Show whether gdb controls the inferior in non-stop mode."), _("\
7792 When debugging a multi-threaded program and this setting is\n\
7793 off (the default, also called all-stop mode), when one thread stops\n\
7794 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7795 all other threads in the program while you interact with the thread of\n\
7796 interest. When you continue or step a thread, you can allow the other\n\
7797 threads to run, or have them remain stopped, but while you inspect any\n\
7798 thread's state, all threads stop.\n\
7799 \n\
7800 In non-stop mode, when one thread stops, other threads can continue\n\
7801 to run freely. You'll be able to step each thread independently,\n\
7802 leave it stopped or free to run as needed."),
7803 set_non_stop,
7804 show_non_stop,
7805 &setlist,
7806 &showlist);
7807
7808 numsigs = (int) GDB_SIGNAL_LAST;
7809 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7810 signal_print = (unsigned char *)
7811 xmalloc (sizeof (signal_print[0]) * numsigs);
7812 signal_program = (unsigned char *)
7813 xmalloc (sizeof (signal_program[0]) * numsigs);
7814 signal_catch = (unsigned char *)
7815 xmalloc (sizeof (signal_catch[0]) * numsigs);
7816 signal_pass = (unsigned char *)
7817 xmalloc (sizeof (signal_pass[0]) * numsigs);
7818 for (i = 0; i < numsigs; i++)
7819 {
7820 signal_stop[i] = 1;
7821 signal_print[i] = 1;
7822 signal_program[i] = 1;
7823 signal_catch[i] = 0;
7824 }
7825
7826 /* Signals caused by debugger's own actions
7827 should not be given to the program afterwards. */
7828 signal_program[GDB_SIGNAL_TRAP] = 0;
7829 signal_program[GDB_SIGNAL_INT] = 0;
7830
7831 /* Signals that are not errors should not normally enter the debugger. */
7832 signal_stop[GDB_SIGNAL_ALRM] = 0;
7833 signal_print[GDB_SIGNAL_ALRM] = 0;
7834 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7835 signal_print[GDB_SIGNAL_VTALRM] = 0;
7836 signal_stop[GDB_SIGNAL_PROF] = 0;
7837 signal_print[GDB_SIGNAL_PROF] = 0;
7838 signal_stop[GDB_SIGNAL_CHLD] = 0;
7839 signal_print[GDB_SIGNAL_CHLD] = 0;
7840 signal_stop[GDB_SIGNAL_IO] = 0;
7841 signal_print[GDB_SIGNAL_IO] = 0;
7842 signal_stop[GDB_SIGNAL_POLL] = 0;
7843 signal_print[GDB_SIGNAL_POLL] = 0;
7844 signal_stop[GDB_SIGNAL_URG] = 0;
7845 signal_print[GDB_SIGNAL_URG] = 0;
7846 signal_stop[GDB_SIGNAL_WINCH] = 0;
7847 signal_print[GDB_SIGNAL_WINCH] = 0;
7848 signal_stop[GDB_SIGNAL_PRIO] = 0;
7849 signal_print[GDB_SIGNAL_PRIO] = 0;
7850
7851 /* These signals are used internally by user-level thread
7852 implementations. (See signal(5) on Solaris.) Like the above
7853 signals, a healthy program receives and handles them as part of
7854 its normal operation. */
7855 signal_stop[GDB_SIGNAL_LWP] = 0;
7856 signal_print[GDB_SIGNAL_LWP] = 0;
7857 signal_stop[GDB_SIGNAL_WAITING] = 0;
7858 signal_print[GDB_SIGNAL_WAITING] = 0;
7859 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7860 signal_print[GDB_SIGNAL_CANCEL] = 0;
7861
7862 /* Update cached state. */
7863 signal_cache_update (-1);
7864
7865 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7866 &stop_on_solib_events, _("\
7867 Set stopping for shared library events."), _("\
7868 Show stopping for shared library events."), _("\
7869 If nonzero, gdb will give control to the user when the dynamic linker\n\
7870 notifies gdb of shared library events. The most common event of interest\n\
7871 to the user would be loading/unloading of a new library."),
7872 set_stop_on_solib_events,
7873 show_stop_on_solib_events,
7874 &setlist, &showlist);
7875
7876 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7877 follow_fork_mode_kind_names,
7878 &follow_fork_mode_string, _("\
7879 Set debugger response to a program call of fork or vfork."), _("\
7880 Show debugger response to a program call of fork or vfork."), _("\
7881 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7882 parent - the original process is debugged after a fork\n\
7883 child - the new process is debugged after a fork\n\
7884 The unfollowed process will continue to run.\n\
7885 By default, the debugger will follow the parent process."),
7886 NULL,
7887 show_follow_fork_mode_string,
7888 &setlist, &showlist);
7889
7890 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7891 follow_exec_mode_names,
7892 &follow_exec_mode_string, _("\
7893 Set debugger response to a program call of exec."), _("\
7894 Show debugger response to a program call of exec."), _("\
7895 An exec call replaces the program image of a process.\n\
7896 \n\
7897 follow-exec-mode can be:\n\
7898 \n\
7899 new - the debugger creates a new inferior and rebinds the process\n\
7900 to this new inferior. The program the process was running before\n\
7901 the exec call can be restarted afterwards by restarting the original\n\
7902 inferior.\n\
7903 \n\
7904 same - the debugger keeps the process bound to the same inferior.\n\
7905 The new executable image replaces the previous executable loaded in\n\
7906 the inferior. Restarting the inferior after the exec call restarts\n\
7907 the executable the process was running after the exec call.\n\
7908 \n\
7909 By default, the debugger will use the same inferior."),
7910 NULL,
7911 show_follow_exec_mode_string,
7912 &setlist, &showlist);
7913
7914 add_setshow_enum_cmd ("scheduler-locking", class_run,
7915 scheduler_enums, &scheduler_mode, _("\
7916 Set mode for locking scheduler during execution."), _("\
7917 Show mode for locking scheduler during execution."), _("\
7918 off == no locking (threads may preempt at any time)\n\
7919 on == full locking (no thread except the current thread may run)\n\
7920 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
7921 In this mode, other threads may run during other commands."),
7922 set_schedlock_func, /* traps on target vector */
7923 show_scheduler_mode,
7924 &setlist, &showlist);
7925
7926 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7927 Set mode for resuming threads of all processes."), _("\
7928 Show mode for resuming threads of all processes."), _("\
7929 When on, execution commands (such as 'continue' or 'next') resume all\n\
7930 threads of all processes. When off (which is the default), execution\n\
7931 commands only resume the threads of the current process. The set of\n\
7932 threads that are resumed is further refined by the scheduler-locking\n\
7933 mode (see help set scheduler-locking)."),
7934 NULL,
7935 show_schedule_multiple,
7936 &setlist, &showlist);
7937
7938 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7939 Set mode of the step operation."), _("\
7940 Show mode of the step operation."), _("\
7941 When set, doing a step over a function without debug line information\n\
7942 will stop at the first instruction of that function. Otherwise, the\n\
7943 function is skipped and the step command stops at a different source line."),
7944 NULL,
7945 show_step_stop_if_no_debug,
7946 &setlist, &showlist);
7947
7948 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7949 &can_use_displaced_stepping, _("\
7950 Set debugger's willingness to use displaced stepping."), _("\
7951 Show debugger's willingness to use displaced stepping."), _("\
7952 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7953 supported by the target architecture. If off, gdb will not use displaced\n\
7954 stepping to step over breakpoints, even if such is supported by the target\n\
7955 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7956 if the target architecture supports it and non-stop mode is active, but will not\n\
7957 use it in all-stop mode (see help set non-stop)."),
7958 NULL,
7959 show_can_use_displaced_stepping,
7960 &setlist, &showlist);
7961
7962 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7963 &exec_direction, _("Set direction of execution.\n\
7964 Options are 'forward' or 'reverse'."),
7965 _("Show direction of execution (forward/reverse)."),
7966 _("Tells gdb whether to execute forward or backward."),
7967 set_exec_direction_func, show_exec_direction_func,
7968 &setlist, &showlist);
7969
7970 /* Set/show detach-on-fork: user-settable mode. */
7971
7972 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7973 Set whether gdb will detach the child of a fork."), _("\
7974 Show whether gdb will detach the child of a fork."), _("\
7975 Tells gdb whether to detach the child of a fork."),
7976 NULL, NULL, &setlist, &showlist);
7977
7978 /* Set/show disable address space randomization mode. */
7979
7980 add_setshow_boolean_cmd ("disable-randomization", class_support,
7981 &disable_randomization, _("\
7982 Set disabling of debuggee's virtual address space randomization."), _("\
7983 Show disabling of debuggee's virtual address space randomization."), _("\
7984 When this mode is on (which is the default), randomization of the virtual\n\
7985 address space is disabled. Standalone programs run with the randomization\n\
7986 enabled by default on some platforms."),
7987 &set_disable_randomization,
7988 &show_disable_randomization,
7989 &setlist, &showlist);
7990
7991 /* ptid initializations */
7992 inferior_ptid = null_ptid;
7993 target_last_wait_ptid = minus_one_ptid;
7994
7995 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7996 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7997 observer_attach_thread_exit (infrun_thread_thread_exit);
7998 observer_attach_inferior_exit (infrun_inferior_exit);
7999
8000 /* Explicitly create without lookup, since that tries to create a
8001 value with a void typed value, and when we get here, gdbarch
8002 isn't initialized yet. At this point, we're quite sure there
8003 isn't another convenience variable of the same name. */
8004 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
8005
8006 add_setshow_boolean_cmd ("observer", no_class,
8007 &observer_mode_1, _("\
8008 Set whether gdb controls the inferior in observer mode."), _("\
8009 Show whether gdb controls the inferior in observer mode."), _("\
8010 In observer mode, GDB can get data from the inferior, but not\n\
8011 affect its execution. Registers and memory may not be changed,\n\
8012 breakpoints may not be set, and the program cannot be interrupted\n\
8013 or signalled."),
8014 set_observer_mode,
8015 show_observer_mode,
8016 &setlist,
8017 &showlist);
8018 }
This page took 0.314715 seconds and 4 git commands to generate.