Remove --xdb
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2015 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "breakpoint.h"
28 #include "gdb_wait.h"
29 #include "gdbcore.h"
30 #include "gdbcmd.h"
31 #include "cli/cli-script.h"
32 #include "target.h"
33 #include "gdbthread.h"
34 #include "annotate.h"
35 #include "symfile.h"
36 #include "top.h"
37 #include <signal.h>
38 #include "inf-loop.h"
39 #include "regcache.h"
40 #include "value.h"
41 #include "observer.h"
42 #include "language.h"
43 #include "solib.h"
44 #include "main.h"
45 #include "dictionary.h"
46 #include "block.h"
47 #include "mi/mi-common.h"
48 #include "event-top.h"
49 #include "record.h"
50 #include "record-full.h"
51 #include "inline-frame.h"
52 #include "jit.h"
53 #include "tracepoint.h"
54 #include "continuations.h"
55 #include "interps.h"
56 #include "skip.h"
57 #include "probe.h"
58 #include "objfiles.h"
59 #include "completer.h"
60 #include "target-descriptions.h"
61 #include "target-dcache.h"
62 #include "terminal.h"
63
64 /* Prototypes for local functions */
65
66 static void signals_info (char *, int);
67
68 static void handle_command (char *, int);
69
70 static void sig_print_info (enum gdb_signal);
71
72 static void sig_print_header (void);
73
74 static void resume_cleanups (void *);
75
76 static int hook_stop_stub (void *);
77
78 static int restore_selected_frame (void *);
79
80 static int follow_fork (void);
81
82 static int follow_fork_inferior (int follow_child, int detach_fork);
83
84 static void follow_inferior_reset_breakpoints (void);
85
86 static void set_schedlock_func (char *args, int from_tty,
87 struct cmd_list_element *c);
88
89 static int currently_stepping (struct thread_info *tp);
90
91 void _initialize_infrun (void);
92
93 void nullify_last_target_wait_ptid (void);
94
95 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
96
97 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
98
99 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
100
101 /* When set, stop the 'step' command if we enter a function which has
102 no line number information. The normal behavior is that we step
103 over such function. */
104 int step_stop_if_no_debug = 0;
105 static void
106 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
107 struct cmd_list_element *c, const char *value)
108 {
109 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
110 }
111
112 /* In asynchronous mode, but simulating synchronous execution. */
113
114 int sync_execution = 0;
115
116 /* proceed and normal_stop use this to notify the user when the
117 inferior stopped in a different thread than it had been running
118 in. */
119
120 static ptid_t previous_inferior_ptid;
121
122 /* If set (default for legacy reasons), when following a fork, GDB
123 will detach from one of the fork branches, child or parent.
124 Exactly which branch is detached depends on 'set follow-fork-mode'
125 setting. */
126
127 static int detach_fork = 1;
128
129 int debug_displaced = 0;
130 static void
131 show_debug_displaced (struct ui_file *file, int from_tty,
132 struct cmd_list_element *c, const char *value)
133 {
134 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
135 }
136
137 unsigned int debug_infrun = 0;
138 static void
139 show_debug_infrun (struct ui_file *file, int from_tty,
140 struct cmd_list_element *c, const char *value)
141 {
142 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
143 }
144
145
146 /* Support for disabling address space randomization. */
147
148 int disable_randomization = 1;
149
150 static void
151 show_disable_randomization (struct ui_file *file, int from_tty,
152 struct cmd_list_element *c, const char *value)
153 {
154 if (target_supports_disable_randomization ())
155 fprintf_filtered (file,
156 _("Disabling randomization of debuggee's "
157 "virtual address space is %s.\n"),
158 value);
159 else
160 fputs_filtered (_("Disabling randomization of debuggee's "
161 "virtual address space is unsupported on\n"
162 "this platform.\n"), file);
163 }
164
165 static void
166 set_disable_randomization (char *args, int from_tty,
167 struct cmd_list_element *c)
168 {
169 if (!target_supports_disable_randomization ())
170 error (_("Disabling randomization of debuggee's "
171 "virtual address space is unsupported on\n"
172 "this platform."));
173 }
174
175 /* User interface for non-stop mode. */
176
177 int non_stop = 0;
178 static int non_stop_1 = 0;
179
180 static void
181 set_non_stop (char *args, int from_tty,
182 struct cmd_list_element *c)
183 {
184 if (target_has_execution)
185 {
186 non_stop_1 = non_stop;
187 error (_("Cannot change this setting while the inferior is running."));
188 }
189
190 non_stop = non_stop_1;
191 }
192
193 static void
194 show_non_stop (struct ui_file *file, int from_tty,
195 struct cmd_list_element *c, const char *value)
196 {
197 fprintf_filtered (file,
198 _("Controlling the inferior in non-stop mode is %s.\n"),
199 value);
200 }
201
202 /* "Observer mode" is somewhat like a more extreme version of
203 non-stop, in which all GDB operations that might affect the
204 target's execution have been disabled. */
205
206 int observer_mode = 0;
207 static int observer_mode_1 = 0;
208
209 static void
210 set_observer_mode (char *args, int from_tty,
211 struct cmd_list_element *c)
212 {
213 if (target_has_execution)
214 {
215 observer_mode_1 = observer_mode;
216 error (_("Cannot change this setting while the inferior is running."));
217 }
218
219 observer_mode = observer_mode_1;
220
221 may_write_registers = !observer_mode;
222 may_write_memory = !observer_mode;
223 may_insert_breakpoints = !observer_mode;
224 may_insert_tracepoints = !observer_mode;
225 /* We can insert fast tracepoints in or out of observer mode,
226 but enable them if we're going into this mode. */
227 if (observer_mode)
228 may_insert_fast_tracepoints = 1;
229 may_stop = !observer_mode;
230 update_target_permissions ();
231
232 /* Going *into* observer mode we must force non-stop, then
233 going out we leave it that way. */
234 if (observer_mode)
235 {
236 pagination_enabled = 0;
237 non_stop = non_stop_1 = 1;
238 }
239
240 if (from_tty)
241 printf_filtered (_("Observer mode is now %s.\n"),
242 (observer_mode ? "on" : "off"));
243 }
244
245 static void
246 show_observer_mode (struct ui_file *file, int from_tty,
247 struct cmd_list_element *c, const char *value)
248 {
249 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
250 }
251
252 /* This updates the value of observer mode based on changes in
253 permissions. Note that we are deliberately ignoring the values of
254 may-write-registers and may-write-memory, since the user may have
255 reason to enable these during a session, for instance to turn on a
256 debugging-related global. */
257
258 void
259 update_observer_mode (void)
260 {
261 int newval;
262
263 newval = (!may_insert_breakpoints
264 && !may_insert_tracepoints
265 && may_insert_fast_tracepoints
266 && !may_stop
267 && non_stop);
268
269 /* Let the user know if things change. */
270 if (newval != observer_mode)
271 printf_filtered (_("Observer mode is now %s.\n"),
272 (newval ? "on" : "off"));
273
274 observer_mode = observer_mode_1 = newval;
275 }
276
277 /* Tables of how to react to signals; the user sets them. */
278
279 static unsigned char *signal_stop;
280 static unsigned char *signal_print;
281 static unsigned char *signal_program;
282
283 /* Table of signals that are registered with "catch signal". A
284 non-zero entry indicates that the signal is caught by some "catch
285 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
286 signals. */
287 static unsigned char *signal_catch;
288
289 /* Table of signals that the target may silently handle.
290 This is automatically determined from the flags above,
291 and simply cached here. */
292 static unsigned char *signal_pass;
293
294 #define SET_SIGS(nsigs,sigs,flags) \
295 do { \
296 int signum = (nsigs); \
297 while (signum-- > 0) \
298 if ((sigs)[signum]) \
299 (flags)[signum] = 1; \
300 } while (0)
301
302 #define UNSET_SIGS(nsigs,sigs,flags) \
303 do { \
304 int signum = (nsigs); \
305 while (signum-- > 0) \
306 if ((sigs)[signum]) \
307 (flags)[signum] = 0; \
308 } while (0)
309
310 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
311 this function is to avoid exporting `signal_program'. */
312
313 void
314 update_signals_program_target (void)
315 {
316 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
317 }
318
319 /* Value to pass to target_resume() to cause all threads to resume. */
320
321 #define RESUME_ALL minus_one_ptid
322
323 /* Command list pointer for the "stop" placeholder. */
324
325 static struct cmd_list_element *stop_command;
326
327 /* Nonzero if we want to give control to the user when we're notified
328 of shared library events by the dynamic linker. */
329 int stop_on_solib_events;
330
331 /* Enable or disable optional shared library event breakpoints
332 as appropriate when the above flag is changed. */
333
334 static void
335 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
336 {
337 update_solib_breakpoints ();
338 }
339
340 static void
341 show_stop_on_solib_events (struct ui_file *file, int from_tty,
342 struct cmd_list_element *c, const char *value)
343 {
344 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
345 value);
346 }
347
348 /* Nonzero means expecting a trace trap
349 and should stop the inferior and return silently when it happens. */
350
351 int stop_after_trap;
352
353 /* Save register contents here when executing a "finish" command or are
354 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
355 Thus this contains the return value from the called function (assuming
356 values are returned in a register). */
357
358 struct regcache *stop_registers;
359
360 /* Nonzero after stop if current stack frame should be printed. */
361
362 static int stop_print_frame;
363
364 /* This is a cached copy of the pid/waitstatus of the last event
365 returned by target_wait()/deprecated_target_wait_hook(). This
366 information is returned by get_last_target_status(). */
367 static ptid_t target_last_wait_ptid;
368 static struct target_waitstatus target_last_waitstatus;
369
370 static void context_switch (ptid_t ptid);
371
372 void init_thread_stepping_state (struct thread_info *tss);
373
374 static const char follow_fork_mode_child[] = "child";
375 static const char follow_fork_mode_parent[] = "parent";
376
377 static const char *const follow_fork_mode_kind_names[] = {
378 follow_fork_mode_child,
379 follow_fork_mode_parent,
380 NULL
381 };
382
383 static const char *follow_fork_mode_string = follow_fork_mode_parent;
384 static void
385 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
386 struct cmd_list_element *c, const char *value)
387 {
388 fprintf_filtered (file,
389 _("Debugger response to a program "
390 "call of fork or vfork is \"%s\".\n"),
391 value);
392 }
393 \f
394
395 /* Handle changes to the inferior list based on the type of fork,
396 which process is being followed, and whether the other process
397 should be detached. On entry inferior_ptid must be the ptid of
398 the fork parent. At return inferior_ptid is the ptid of the
399 followed inferior. */
400
401 static int
402 follow_fork_inferior (int follow_child, int detach_fork)
403 {
404 int has_vforked;
405 ptid_t parent_ptid, child_ptid;
406
407 has_vforked = (inferior_thread ()->pending_follow.kind
408 == TARGET_WAITKIND_VFORKED);
409 parent_ptid = inferior_ptid;
410 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
411
412 if (has_vforked
413 && !non_stop /* Non-stop always resumes both branches. */
414 && (!target_is_async_p () || sync_execution)
415 && !(follow_child || detach_fork || sched_multi))
416 {
417 /* The parent stays blocked inside the vfork syscall until the
418 child execs or exits. If we don't let the child run, then
419 the parent stays blocked. If we're telling the parent to run
420 in the foreground, the user will not be able to ctrl-c to get
421 back the terminal, effectively hanging the debug session. */
422 fprintf_filtered (gdb_stderr, _("\
423 Can not resume the parent process over vfork in the foreground while\n\
424 holding the child stopped. Try \"set detach-on-fork\" or \
425 \"set schedule-multiple\".\n"));
426 /* FIXME output string > 80 columns. */
427 return 1;
428 }
429
430 if (!follow_child)
431 {
432 /* Detach new forked process? */
433 if (detach_fork)
434 {
435 struct cleanup *old_chain;
436
437 /* Before detaching from the child, remove all breakpoints
438 from it. If we forked, then this has already been taken
439 care of by infrun.c. If we vforked however, any
440 breakpoint inserted in the parent is visible in the
441 child, even those added while stopped in a vfork
442 catchpoint. This will remove the breakpoints from the
443 parent also, but they'll be reinserted below. */
444 if (has_vforked)
445 {
446 /* Keep breakpoints list in sync. */
447 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
448 }
449
450 if (info_verbose || debug_infrun)
451 {
452 target_terminal_ours_for_output ();
453 fprintf_filtered (gdb_stdlog,
454 _("Detaching after %s from child %s.\n"),
455 has_vforked ? "vfork" : "fork",
456 target_pid_to_str (child_ptid));
457 }
458 }
459 else
460 {
461 struct inferior *parent_inf, *child_inf;
462 struct cleanup *old_chain;
463
464 /* Add process to GDB's tables. */
465 child_inf = add_inferior (ptid_get_pid (child_ptid));
466
467 parent_inf = current_inferior ();
468 child_inf->attach_flag = parent_inf->attach_flag;
469 copy_terminal_info (child_inf, parent_inf);
470 child_inf->gdbarch = parent_inf->gdbarch;
471 copy_inferior_target_desc_info (child_inf, parent_inf);
472
473 old_chain = save_inferior_ptid ();
474 save_current_program_space ();
475
476 inferior_ptid = child_ptid;
477 add_thread (inferior_ptid);
478 child_inf->symfile_flags = SYMFILE_NO_READ;
479
480 /* If this is a vfork child, then the address-space is
481 shared with the parent. */
482 if (has_vforked)
483 {
484 child_inf->pspace = parent_inf->pspace;
485 child_inf->aspace = parent_inf->aspace;
486
487 /* The parent will be frozen until the child is done
488 with the shared region. Keep track of the
489 parent. */
490 child_inf->vfork_parent = parent_inf;
491 child_inf->pending_detach = 0;
492 parent_inf->vfork_child = child_inf;
493 parent_inf->pending_detach = 0;
494 }
495 else
496 {
497 child_inf->aspace = new_address_space ();
498 child_inf->pspace = add_program_space (child_inf->aspace);
499 child_inf->removable = 1;
500 set_current_program_space (child_inf->pspace);
501 clone_program_space (child_inf->pspace, parent_inf->pspace);
502
503 /* Let the shared library layer (e.g., solib-svr4) learn
504 about this new process, relocate the cloned exec, pull
505 in shared libraries, and install the solib event
506 breakpoint. If a "cloned-VM" event was propagated
507 better throughout the core, this wouldn't be
508 required. */
509 solib_create_inferior_hook (0);
510 }
511
512 do_cleanups (old_chain);
513 }
514
515 if (has_vforked)
516 {
517 struct inferior *parent_inf;
518
519 parent_inf = current_inferior ();
520
521 /* If we detached from the child, then we have to be careful
522 to not insert breakpoints in the parent until the child
523 is done with the shared memory region. However, if we're
524 staying attached to the child, then we can and should
525 insert breakpoints, so that we can debug it. A
526 subsequent child exec or exit is enough to know when does
527 the child stops using the parent's address space. */
528 parent_inf->waiting_for_vfork_done = detach_fork;
529 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
530 }
531 }
532 else
533 {
534 /* Follow the child. */
535 struct inferior *parent_inf, *child_inf;
536 struct program_space *parent_pspace;
537
538 if (info_verbose || debug_infrun)
539 {
540 target_terminal_ours_for_output ();
541 fprintf_filtered (gdb_stdlog,
542 _("Attaching after %s %s to child %s.\n"),
543 target_pid_to_str (parent_ptid),
544 has_vforked ? "vfork" : "fork",
545 target_pid_to_str (child_ptid));
546 }
547
548 /* Add the new inferior first, so that the target_detach below
549 doesn't unpush the target. */
550
551 child_inf = add_inferior (ptid_get_pid (child_ptid));
552
553 parent_inf = current_inferior ();
554 child_inf->attach_flag = parent_inf->attach_flag;
555 copy_terminal_info (child_inf, parent_inf);
556 child_inf->gdbarch = parent_inf->gdbarch;
557 copy_inferior_target_desc_info (child_inf, parent_inf);
558
559 parent_pspace = parent_inf->pspace;
560
561 /* If we're vforking, we want to hold on to the parent until the
562 child exits or execs. At child exec or exit time we can
563 remove the old breakpoints from the parent and detach or
564 resume debugging it. Otherwise, detach the parent now; we'll
565 want to reuse it's program/address spaces, but we can't set
566 them to the child before removing breakpoints from the
567 parent, otherwise, the breakpoints module could decide to
568 remove breakpoints from the wrong process (since they'd be
569 assigned to the same address space). */
570
571 if (has_vforked)
572 {
573 gdb_assert (child_inf->vfork_parent == NULL);
574 gdb_assert (parent_inf->vfork_child == NULL);
575 child_inf->vfork_parent = parent_inf;
576 child_inf->pending_detach = 0;
577 parent_inf->vfork_child = child_inf;
578 parent_inf->pending_detach = detach_fork;
579 parent_inf->waiting_for_vfork_done = 0;
580 }
581 else if (detach_fork)
582 {
583 if (info_verbose || debug_infrun)
584 {
585 target_terminal_ours_for_output ();
586 fprintf_filtered (gdb_stdlog,
587 _("Detaching after fork from "
588 "child %s.\n"),
589 target_pid_to_str (child_ptid));
590 }
591
592 target_detach (NULL, 0);
593 }
594
595 /* Note that the detach above makes PARENT_INF dangling. */
596
597 /* Add the child thread to the appropriate lists, and switch to
598 this new thread, before cloning the program space, and
599 informing the solib layer about this new process. */
600
601 inferior_ptid = child_ptid;
602 add_thread (inferior_ptid);
603
604 /* If this is a vfork child, then the address-space is shared
605 with the parent. If we detached from the parent, then we can
606 reuse the parent's program/address spaces. */
607 if (has_vforked || detach_fork)
608 {
609 child_inf->pspace = parent_pspace;
610 child_inf->aspace = child_inf->pspace->aspace;
611 }
612 else
613 {
614 child_inf->aspace = new_address_space ();
615 child_inf->pspace = add_program_space (child_inf->aspace);
616 child_inf->removable = 1;
617 child_inf->symfile_flags = SYMFILE_NO_READ;
618 set_current_program_space (child_inf->pspace);
619 clone_program_space (child_inf->pspace, parent_pspace);
620
621 /* Let the shared library layer (e.g., solib-svr4) learn
622 about this new process, relocate the cloned exec, pull in
623 shared libraries, and install the solib event breakpoint.
624 If a "cloned-VM" event was propagated better throughout
625 the core, this wouldn't be required. */
626 solib_create_inferior_hook (0);
627 }
628 }
629
630 return target_follow_fork (follow_child, detach_fork);
631 }
632
633 /* Tell the target to follow the fork we're stopped at. Returns true
634 if the inferior should be resumed; false, if the target for some
635 reason decided it's best not to resume. */
636
637 static int
638 follow_fork (void)
639 {
640 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
641 int should_resume = 1;
642 struct thread_info *tp;
643
644 /* Copy user stepping state to the new inferior thread. FIXME: the
645 followed fork child thread should have a copy of most of the
646 parent thread structure's run control related fields, not just these.
647 Initialized to avoid "may be used uninitialized" warnings from gcc. */
648 struct breakpoint *step_resume_breakpoint = NULL;
649 struct breakpoint *exception_resume_breakpoint = NULL;
650 CORE_ADDR step_range_start = 0;
651 CORE_ADDR step_range_end = 0;
652 struct frame_id step_frame_id = { 0 };
653 struct interp *command_interp = NULL;
654
655 if (!non_stop)
656 {
657 ptid_t wait_ptid;
658 struct target_waitstatus wait_status;
659
660 /* Get the last target status returned by target_wait(). */
661 get_last_target_status (&wait_ptid, &wait_status);
662
663 /* If not stopped at a fork event, then there's nothing else to
664 do. */
665 if (wait_status.kind != TARGET_WAITKIND_FORKED
666 && wait_status.kind != TARGET_WAITKIND_VFORKED)
667 return 1;
668
669 /* Check if we switched over from WAIT_PTID, since the event was
670 reported. */
671 if (!ptid_equal (wait_ptid, minus_one_ptid)
672 && !ptid_equal (inferior_ptid, wait_ptid))
673 {
674 /* We did. Switch back to WAIT_PTID thread, to tell the
675 target to follow it (in either direction). We'll
676 afterwards refuse to resume, and inform the user what
677 happened. */
678 switch_to_thread (wait_ptid);
679 should_resume = 0;
680 }
681 }
682
683 tp = inferior_thread ();
684
685 /* If there were any forks/vforks that were caught and are now to be
686 followed, then do so now. */
687 switch (tp->pending_follow.kind)
688 {
689 case TARGET_WAITKIND_FORKED:
690 case TARGET_WAITKIND_VFORKED:
691 {
692 ptid_t parent, child;
693
694 /* If the user did a next/step, etc, over a fork call,
695 preserve the stepping state in the fork child. */
696 if (follow_child && should_resume)
697 {
698 step_resume_breakpoint = clone_momentary_breakpoint
699 (tp->control.step_resume_breakpoint);
700 step_range_start = tp->control.step_range_start;
701 step_range_end = tp->control.step_range_end;
702 step_frame_id = tp->control.step_frame_id;
703 exception_resume_breakpoint
704 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
705 command_interp = tp->control.command_interp;
706
707 /* For now, delete the parent's sr breakpoint, otherwise,
708 parent/child sr breakpoints are considered duplicates,
709 and the child version will not be installed. Remove
710 this when the breakpoints module becomes aware of
711 inferiors and address spaces. */
712 delete_step_resume_breakpoint (tp);
713 tp->control.step_range_start = 0;
714 tp->control.step_range_end = 0;
715 tp->control.step_frame_id = null_frame_id;
716 delete_exception_resume_breakpoint (tp);
717 tp->control.command_interp = NULL;
718 }
719
720 parent = inferior_ptid;
721 child = tp->pending_follow.value.related_pid;
722
723 /* Set up inferior(s) as specified by the caller, and tell the
724 target to do whatever is necessary to follow either parent
725 or child. */
726 if (follow_fork_inferior (follow_child, detach_fork))
727 {
728 /* Target refused to follow, or there's some other reason
729 we shouldn't resume. */
730 should_resume = 0;
731 }
732 else
733 {
734 /* This pending follow fork event is now handled, one way
735 or another. The previous selected thread may be gone
736 from the lists by now, but if it is still around, need
737 to clear the pending follow request. */
738 tp = find_thread_ptid (parent);
739 if (tp)
740 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
741
742 /* This makes sure we don't try to apply the "Switched
743 over from WAIT_PID" logic above. */
744 nullify_last_target_wait_ptid ();
745
746 /* If we followed the child, switch to it... */
747 if (follow_child)
748 {
749 switch_to_thread (child);
750
751 /* ... and preserve the stepping state, in case the
752 user was stepping over the fork call. */
753 if (should_resume)
754 {
755 tp = inferior_thread ();
756 tp->control.step_resume_breakpoint
757 = step_resume_breakpoint;
758 tp->control.step_range_start = step_range_start;
759 tp->control.step_range_end = step_range_end;
760 tp->control.step_frame_id = step_frame_id;
761 tp->control.exception_resume_breakpoint
762 = exception_resume_breakpoint;
763 tp->control.command_interp = command_interp;
764 }
765 else
766 {
767 /* If we get here, it was because we're trying to
768 resume from a fork catchpoint, but, the user
769 has switched threads away from the thread that
770 forked. In that case, the resume command
771 issued is most likely not applicable to the
772 child, so just warn, and refuse to resume. */
773 warning (_("Not resuming: switched threads "
774 "before following fork child.\n"));
775 }
776
777 /* Reset breakpoints in the child as appropriate. */
778 follow_inferior_reset_breakpoints ();
779 }
780 else
781 switch_to_thread (parent);
782 }
783 }
784 break;
785 case TARGET_WAITKIND_SPURIOUS:
786 /* Nothing to follow. */
787 break;
788 default:
789 internal_error (__FILE__, __LINE__,
790 "Unexpected pending_follow.kind %d\n",
791 tp->pending_follow.kind);
792 break;
793 }
794
795 return should_resume;
796 }
797
798 static void
799 follow_inferior_reset_breakpoints (void)
800 {
801 struct thread_info *tp = inferior_thread ();
802
803 /* Was there a step_resume breakpoint? (There was if the user
804 did a "next" at the fork() call.) If so, explicitly reset its
805 thread number. Cloned step_resume breakpoints are disabled on
806 creation, so enable it here now that it is associated with the
807 correct thread.
808
809 step_resumes are a form of bp that are made to be per-thread.
810 Since we created the step_resume bp when the parent process
811 was being debugged, and now are switching to the child process,
812 from the breakpoint package's viewpoint, that's a switch of
813 "threads". We must update the bp's notion of which thread
814 it is for, or it'll be ignored when it triggers. */
815
816 if (tp->control.step_resume_breakpoint)
817 {
818 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
819 tp->control.step_resume_breakpoint->loc->enabled = 1;
820 }
821
822 /* Treat exception_resume breakpoints like step_resume breakpoints. */
823 if (tp->control.exception_resume_breakpoint)
824 {
825 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
826 tp->control.exception_resume_breakpoint->loc->enabled = 1;
827 }
828
829 /* Reinsert all breakpoints in the child. The user may have set
830 breakpoints after catching the fork, in which case those
831 were never set in the child, but only in the parent. This makes
832 sure the inserted breakpoints match the breakpoint list. */
833
834 breakpoint_re_set ();
835 insert_breakpoints ();
836 }
837
838 /* The child has exited or execed: resume threads of the parent the
839 user wanted to be executing. */
840
841 static int
842 proceed_after_vfork_done (struct thread_info *thread,
843 void *arg)
844 {
845 int pid = * (int *) arg;
846
847 if (ptid_get_pid (thread->ptid) == pid
848 && is_running (thread->ptid)
849 && !is_executing (thread->ptid)
850 && !thread->stop_requested
851 && thread->suspend.stop_signal == GDB_SIGNAL_0)
852 {
853 if (debug_infrun)
854 fprintf_unfiltered (gdb_stdlog,
855 "infrun: resuming vfork parent thread %s\n",
856 target_pid_to_str (thread->ptid));
857
858 switch_to_thread (thread->ptid);
859 clear_proceed_status (0);
860 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
861 }
862
863 return 0;
864 }
865
866 /* Called whenever we notice an exec or exit event, to handle
867 detaching or resuming a vfork parent. */
868
869 static void
870 handle_vfork_child_exec_or_exit (int exec)
871 {
872 struct inferior *inf = current_inferior ();
873
874 if (inf->vfork_parent)
875 {
876 int resume_parent = -1;
877
878 /* This exec or exit marks the end of the shared memory region
879 between the parent and the child. If the user wanted to
880 detach from the parent, now is the time. */
881
882 if (inf->vfork_parent->pending_detach)
883 {
884 struct thread_info *tp;
885 struct cleanup *old_chain;
886 struct program_space *pspace;
887 struct address_space *aspace;
888
889 /* follow-fork child, detach-on-fork on. */
890
891 inf->vfork_parent->pending_detach = 0;
892
893 if (!exec)
894 {
895 /* If we're handling a child exit, then inferior_ptid
896 points at the inferior's pid, not to a thread. */
897 old_chain = save_inferior_ptid ();
898 save_current_program_space ();
899 save_current_inferior ();
900 }
901 else
902 old_chain = save_current_space_and_thread ();
903
904 /* We're letting loose of the parent. */
905 tp = any_live_thread_of_process (inf->vfork_parent->pid);
906 switch_to_thread (tp->ptid);
907
908 /* We're about to detach from the parent, which implicitly
909 removes breakpoints from its address space. There's a
910 catch here: we want to reuse the spaces for the child,
911 but, parent/child are still sharing the pspace at this
912 point, although the exec in reality makes the kernel give
913 the child a fresh set of new pages. The problem here is
914 that the breakpoints module being unaware of this, would
915 likely chose the child process to write to the parent
916 address space. Swapping the child temporarily away from
917 the spaces has the desired effect. Yes, this is "sort
918 of" a hack. */
919
920 pspace = inf->pspace;
921 aspace = inf->aspace;
922 inf->aspace = NULL;
923 inf->pspace = NULL;
924
925 if (debug_infrun || info_verbose)
926 {
927 target_terminal_ours_for_output ();
928
929 if (exec)
930 {
931 fprintf_filtered (gdb_stdlog,
932 _("Detaching vfork parent process "
933 "%d after child exec.\n"),
934 inf->vfork_parent->pid);
935 }
936 else
937 {
938 fprintf_filtered (gdb_stdlog,
939 _("Detaching vfork parent process "
940 "%d after child exit.\n"),
941 inf->vfork_parent->pid);
942 }
943 }
944
945 target_detach (NULL, 0);
946
947 /* Put it back. */
948 inf->pspace = pspace;
949 inf->aspace = aspace;
950
951 do_cleanups (old_chain);
952 }
953 else if (exec)
954 {
955 /* We're staying attached to the parent, so, really give the
956 child a new address space. */
957 inf->pspace = add_program_space (maybe_new_address_space ());
958 inf->aspace = inf->pspace->aspace;
959 inf->removable = 1;
960 set_current_program_space (inf->pspace);
961
962 resume_parent = inf->vfork_parent->pid;
963
964 /* Break the bonds. */
965 inf->vfork_parent->vfork_child = NULL;
966 }
967 else
968 {
969 struct cleanup *old_chain;
970 struct program_space *pspace;
971
972 /* If this is a vfork child exiting, then the pspace and
973 aspaces were shared with the parent. Since we're
974 reporting the process exit, we'll be mourning all that is
975 found in the address space, and switching to null_ptid,
976 preparing to start a new inferior. But, since we don't
977 want to clobber the parent's address/program spaces, we
978 go ahead and create a new one for this exiting
979 inferior. */
980
981 /* Switch to null_ptid, so that clone_program_space doesn't want
982 to read the selected frame of a dead process. */
983 old_chain = save_inferior_ptid ();
984 inferior_ptid = null_ptid;
985
986 /* This inferior is dead, so avoid giving the breakpoints
987 module the option to write through to it (cloning a
988 program space resets breakpoints). */
989 inf->aspace = NULL;
990 inf->pspace = NULL;
991 pspace = add_program_space (maybe_new_address_space ());
992 set_current_program_space (pspace);
993 inf->removable = 1;
994 inf->symfile_flags = SYMFILE_NO_READ;
995 clone_program_space (pspace, inf->vfork_parent->pspace);
996 inf->pspace = pspace;
997 inf->aspace = pspace->aspace;
998
999 /* Put back inferior_ptid. We'll continue mourning this
1000 inferior. */
1001 do_cleanups (old_chain);
1002
1003 resume_parent = inf->vfork_parent->pid;
1004 /* Break the bonds. */
1005 inf->vfork_parent->vfork_child = NULL;
1006 }
1007
1008 inf->vfork_parent = NULL;
1009
1010 gdb_assert (current_program_space == inf->pspace);
1011
1012 if (non_stop && resume_parent != -1)
1013 {
1014 /* If the user wanted the parent to be running, let it go
1015 free now. */
1016 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
1017
1018 if (debug_infrun)
1019 fprintf_unfiltered (gdb_stdlog,
1020 "infrun: resuming vfork parent process %d\n",
1021 resume_parent);
1022
1023 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
1024
1025 do_cleanups (old_chain);
1026 }
1027 }
1028 }
1029
1030 /* Enum strings for "set|show follow-exec-mode". */
1031
1032 static const char follow_exec_mode_new[] = "new";
1033 static const char follow_exec_mode_same[] = "same";
1034 static const char *const follow_exec_mode_names[] =
1035 {
1036 follow_exec_mode_new,
1037 follow_exec_mode_same,
1038 NULL,
1039 };
1040
1041 static const char *follow_exec_mode_string = follow_exec_mode_same;
1042 static void
1043 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1044 struct cmd_list_element *c, const char *value)
1045 {
1046 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1047 }
1048
1049 /* EXECD_PATHNAME is assumed to be non-NULL. */
1050
1051 static void
1052 follow_exec (ptid_t ptid, char *execd_pathname)
1053 {
1054 struct thread_info *th, *tmp;
1055 struct inferior *inf = current_inferior ();
1056 int pid = ptid_get_pid (ptid);
1057
1058 /* This is an exec event that we actually wish to pay attention to.
1059 Refresh our symbol table to the newly exec'd program, remove any
1060 momentary bp's, etc.
1061
1062 If there are breakpoints, they aren't really inserted now,
1063 since the exec() transformed our inferior into a fresh set
1064 of instructions.
1065
1066 We want to preserve symbolic breakpoints on the list, since
1067 we have hopes that they can be reset after the new a.out's
1068 symbol table is read.
1069
1070 However, any "raw" breakpoints must be removed from the list
1071 (e.g., the solib bp's), since their address is probably invalid
1072 now.
1073
1074 And, we DON'T want to call delete_breakpoints() here, since
1075 that may write the bp's "shadow contents" (the instruction
1076 value that was overwritten witha TRAP instruction). Since
1077 we now have a new a.out, those shadow contents aren't valid. */
1078
1079 mark_breakpoints_out ();
1080
1081 /* The target reports the exec event to the main thread, even if
1082 some other thread does the exec, and even if the main thread was
1083 stopped or already gone. We may still have non-leader threads of
1084 the process on our list. E.g., on targets that don't have thread
1085 exit events (like remote); or on native Linux in non-stop mode if
1086 there were only two threads in the inferior and the non-leader
1087 one is the one that execs (and nothing forces an update of the
1088 thread list up to here). When debugging remotely, it's best to
1089 avoid extra traffic, when possible, so avoid syncing the thread
1090 list with the target, and instead go ahead and delete all threads
1091 of the process but one that reported the event. Note this must
1092 be done before calling update_breakpoints_after_exec, as
1093 otherwise clearing the threads' resources would reference stale
1094 thread breakpoints -- it may have been one of these threads that
1095 stepped across the exec. We could just clear their stepping
1096 states, but as long as we're iterating, might as well delete
1097 them. Deleting them now rather than at the next user-visible
1098 stop provides a nicer sequence of events for user and MI
1099 notifications. */
1100 ALL_THREADS_SAFE (th, tmp)
1101 if (ptid_get_pid (th->ptid) == pid && !ptid_equal (th->ptid, ptid))
1102 delete_thread (th->ptid);
1103
1104 /* We also need to clear any left over stale state for the
1105 leader/event thread. E.g., if there was any step-resume
1106 breakpoint or similar, it's gone now. We cannot truly
1107 step-to-next statement through an exec(). */
1108 th = inferior_thread ();
1109 th->control.step_resume_breakpoint = NULL;
1110 th->control.exception_resume_breakpoint = NULL;
1111 th->control.single_step_breakpoints = NULL;
1112 th->control.step_range_start = 0;
1113 th->control.step_range_end = 0;
1114
1115 /* The user may have had the main thread held stopped in the
1116 previous image (e.g., schedlock on, or non-stop). Release
1117 it now. */
1118 th->stop_requested = 0;
1119
1120 update_breakpoints_after_exec ();
1121
1122 /* What is this a.out's name? */
1123 printf_unfiltered (_("%s is executing new program: %s\n"),
1124 target_pid_to_str (inferior_ptid),
1125 execd_pathname);
1126
1127 /* We've followed the inferior through an exec. Therefore, the
1128 inferior has essentially been killed & reborn. */
1129
1130 gdb_flush (gdb_stdout);
1131
1132 breakpoint_init_inferior (inf_execd);
1133
1134 if (gdb_sysroot && *gdb_sysroot)
1135 {
1136 char *name = alloca (strlen (gdb_sysroot)
1137 + strlen (execd_pathname)
1138 + 1);
1139
1140 strcpy (name, gdb_sysroot);
1141 strcat (name, execd_pathname);
1142 execd_pathname = name;
1143 }
1144
1145 /* Reset the shared library package. This ensures that we get a
1146 shlib event when the child reaches "_start", at which point the
1147 dld will have had a chance to initialize the child. */
1148 /* Also, loading a symbol file below may trigger symbol lookups, and
1149 we don't want those to be satisfied by the libraries of the
1150 previous incarnation of this process. */
1151 no_shared_libraries (NULL, 0);
1152
1153 if (follow_exec_mode_string == follow_exec_mode_new)
1154 {
1155 struct program_space *pspace;
1156
1157 /* The user wants to keep the old inferior and program spaces
1158 around. Create a new fresh one, and switch to it. */
1159
1160 inf = add_inferior (current_inferior ()->pid);
1161 pspace = add_program_space (maybe_new_address_space ());
1162 inf->pspace = pspace;
1163 inf->aspace = pspace->aspace;
1164
1165 exit_inferior_num_silent (current_inferior ()->num);
1166
1167 set_current_inferior (inf);
1168 set_current_program_space (pspace);
1169 }
1170 else
1171 {
1172 /* The old description may no longer be fit for the new image.
1173 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1174 old description; we'll read a new one below. No need to do
1175 this on "follow-exec-mode new", as the old inferior stays
1176 around (its description is later cleared/refetched on
1177 restart). */
1178 target_clear_description ();
1179 }
1180
1181 gdb_assert (current_program_space == inf->pspace);
1182
1183 /* That a.out is now the one to use. */
1184 exec_file_attach (execd_pathname, 0);
1185
1186 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
1187 (Position Independent Executable) main symbol file will get applied by
1188 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
1189 the breakpoints with the zero displacement. */
1190
1191 symbol_file_add (execd_pathname,
1192 (inf->symfile_flags
1193 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
1194 NULL, 0);
1195
1196 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
1197 set_initial_language ();
1198
1199 /* If the target can specify a description, read it. Must do this
1200 after flipping to the new executable (because the target supplied
1201 description must be compatible with the executable's
1202 architecture, and the old executable may e.g., be 32-bit, while
1203 the new one 64-bit), and before anything involving memory or
1204 registers. */
1205 target_find_description ();
1206
1207 solib_create_inferior_hook (0);
1208
1209 jit_inferior_created_hook ();
1210
1211 breakpoint_re_set ();
1212
1213 /* Reinsert all breakpoints. (Those which were symbolic have
1214 been reset to the proper address in the new a.out, thanks
1215 to symbol_file_command...). */
1216 insert_breakpoints ();
1217
1218 /* The next resume of this inferior should bring it to the shlib
1219 startup breakpoints. (If the user had also set bp's on
1220 "main" from the old (parent) process, then they'll auto-
1221 matically get reset there in the new process.). */
1222 }
1223
1224 /* Info about an instruction that is being stepped over. */
1225
1226 struct step_over_info
1227 {
1228 /* If we're stepping past a breakpoint, this is the address space
1229 and address of the instruction the breakpoint is set at. We'll
1230 skip inserting all breakpoints here. Valid iff ASPACE is
1231 non-NULL. */
1232 struct address_space *aspace;
1233 CORE_ADDR address;
1234
1235 /* The instruction being stepped over triggers a nonsteppable
1236 watchpoint. If true, we'll skip inserting watchpoints. */
1237 int nonsteppable_watchpoint_p;
1238 };
1239
1240 /* The step-over info of the location that is being stepped over.
1241
1242 Note that with async/breakpoint always-inserted mode, a user might
1243 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1244 being stepped over. As setting a new breakpoint inserts all
1245 breakpoints, we need to make sure the breakpoint being stepped over
1246 isn't inserted then. We do that by only clearing the step-over
1247 info when the step-over is actually finished (or aborted).
1248
1249 Presently GDB can only step over one breakpoint at any given time.
1250 Given threads that can't run code in the same address space as the
1251 breakpoint's can't really miss the breakpoint, GDB could be taught
1252 to step-over at most one breakpoint per address space (so this info
1253 could move to the address space object if/when GDB is extended).
1254 The set of breakpoints being stepped over will normally be much
1255 smaller than the set of all breakpoints, so a flag in the
1256 breakpoint location structure would be wasteful. A separate list
1257 also saves complexity and run-time, as otherwise we'd have to go
1258 through all breakpoint locations clearing their flag whenever we
1259 start a new sequence. Similar considerations weigh against storing
1260 this info in the thread object. Plus, not all step overs actually
1261 have breakpoint locations -- e.g., stepping past a single-step
1262 breakpoint, or stepping to complete a non-continuable
1263 watchpoint. */
1264 static struct step_over_info step_over_info;
1265
1266 /* Record the address of the breakpoint/instruction we're currently
1267 stepping over. */
1268
1269 static void
1270 set_step_over_info (struct address_space *aspace, CORE_ADDR address,
1271 int nonsteppable_watchpoint_p)
1272 {
1273 step_over_info.aspace = aspace;
1274 step_over_info.address = address;
1275 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1276 }
1277
1278 /* Called when we're not longer stepping over a breakpoint / an
1279 instruction, so all breakpoints are free to be (re)inserted. */
1280
1281 static void
1282 clear_step_over_info (void)
1283 {
1284 step_over_info.aspace = NULL;
1285 step_over_info.address = 0;
1286 step_over_info.nonsteppable_watchpoint_p = 0;
1287 }
1288
1289 /* See infrun.h. */
1290
1291 int
1292 stepping_past_instruction_at (struct address_space *aspace,
1293 CORE_ADDR address)
1294 {
1295 return (step_over_info.aspace != NULL
1296 && breakpoint_address_match (aspace, address,
1297 step_over_info.aspace,
1298 step_over_info.address));
1299 }
1300
1301 /* See infrun.h. */
1302
1303 int
1304 stepping_past_nonsteppable_watchpoint (void)
1305 {
1306 return step_over_info.nonsteppable_watchpoint_p;
1307 }
1308
1309 /* Returns true if step-over info is valid. */
1310
1311 static int
1312 step_over_info_valid_p (void)
1313 {
1314 return (step_over_info.aspace != NULL
1315 || stepping_past_nonsteppable_watchpoint ());
1316 }
1317
1318 \f
1319 /* Displaced stepping. */
1320
1321 /* In non-stop debugging mode, we must take special care to manage
1322 breakpoints properly; in particular, the traditional strategy for
1323 stepping a thread past a breakpoint it has hit is unsuitable.
1324 'Displaced stepping' is a tactic for stepping one thread past a
1325 breakpoint it has hit while ensuring that other threads running
1326 concurrently will hit the breakpoint as they should.
1327
1328 The traditional way to step a thread T off a breakpoint in a
1329 multi-threaded program in all-stop mode is as follows:
1330
1331 a0) Initially, all threads are stopped, and breakpoints are not
1332 inserted.
1333 a1) We single-step T, leaving breakpoints uninserted.
1334 a2) We insert breakpoints, and resume all threads.
1335
1336 In non-stop debugging, however, this strategy is unsuitable: we
1337 don't want to have to stop all threads in the system in order to
1338 continue or step T past a breakpoint. Instead, we use displaced
1339 stepping:
1340
1341 n0) Initially, T is stopped, other threads are running, and
1342 breakpoints are inserted.
1343 n1) We copy the instruction "under" the breakpoint to a separate
1344 location, outside the main code stream, making any adjustments
1345 to the instruction, register, and memory state as directed by
1346 T's architecture.
1347 n2) We single-step T over the instruction at its new location.
1348 n3) We adjust the resulting register and memory state as directed
1349 by T's architecture. This includes resetting T's PC to point
1350 back into the main instruction stream.
1351 n4) We resume T.
1352
1353 This approach depends on the following gdbarch methods:
1354
1355 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1356 indicate where to copy the instruction, and how much space must
1357 be reserved there. We use these in step n1.
1358
1359 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1360 address, and makes any necessary adjustments to the instruction,
1361 register contents, and memory. We use this in step n1.
1362
1363 - gdbarch_displaced_step_fixup adjusts registers and memory after
1364 we have successfuly single-stepped the instruction, to yield the
1365 same effect the instruction would have had if we had executed it
1366 at its original address. We use this in step n3.
1367
1368 - gdbarch_displaced_step_free_closure provides cleanup.
1369
1370 The gdbarch_displaced_step_copy_insn and
1371 gdbarch_displaced_step_fixup functions must be written so that
1372 copying an instruction with gdbarch_displaced_step_copy_insn,
1373 single-stepping across the copied instruction, and then applying
1374 gdbarch_displaced_insn_fixup should have the same effects on the
1375 thread's memory and registers as stepping the instruction in place
1376 would have. Exactly which responsibilities fall to the copy and
1377 which fall to the fixup is up to the author of those functions.
1378
1379 See the comments in gdbarch.sh for details.
1380
1381 Note that displaced stepping and software single-step cannot
1382 currently be used in combination, although with some care I think
1383 they could be made to. Software single-step works by placing
1384 breakpoints on all possible subsequent instructions; if the
1385 displaced instruction is a PC-relative jump, those breakpoints
1386 could fall in very strange places --- on pages that aren't
1387 executable, or at addresses that are not proper instruction
1388 boundaries. (We do generally let other threads run while we wait
1389 to hit the software single-step breakpoint, and they might
1390 encounter such a corrupted instruction.) One way to work around
1391 this would be to have gdbarch_displaced_step_copy_insn fully
1392 simulate the effect of PC-relative instructions (and return NULL)
1393 on architectures that use software single-stepping.
1394
1395 In non-stop mode, we can have independent and simultaneous step
1396 requests, so more than one thread may need to simultaneously step
1397 over a breakpoint. The current implementation assumes there is
1398 only one scratch space per process. In this case, we have to
1399 serialize access to the scratch space. If thread A wants to step
1400 over a breakpoint, but we are currently waiting for some other
1401 thread to complete a displaced step, we leave thread A stopped and
1402 place it in the displaced_step_request_queue. Whenever a displaced
1403 step finishes, we pick the next thread in the queue and start a new
1404 displaced step operation on it. See displaced_step_prepare and
1405 displaced_step_fixup for details. */
1406
1407 struct displaced_step_request
1408 {
1409 ptid_t ptid;
1410 struct displaced_step_request *next;
1411 };
1412
1413 /* Per-inferior displaced stepping state. */
1414 struct displaced_step_inferior_state
1415 {
1416 /* Pointer to next in linked list. */
1417 struct displaced_step_inferior_state *next;
1418
1419 /* The process this displaced step state refers to. */
1420 int pid;
1421
1422 /* A queue of pending displaced stepping requests. One entry per
1423 thread that needs to do a displaced step. */
1424 struct displaced_step_request *step_request_queue;
1425
1426 /* If this is not null_ptid, this is the thread carrying out a
1427 displaced single-step in process PID. This thread's state will
1428 require fixing up once it has completed its step. */
1429 ptid_t step_ptid;
1430
1431 /* The architecture the thread had when we stepped it. */
1432 struct gdbarch *step_gdbarch;
1433
1434 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1435 for post-step cleanup. */
1436 struct displaced_step_closure *step_closure;
1437
1438 /* The address of the original instruction, and the copy we
1439 made. */
1440 CORE_ADDR step_original, step_copy;
1441
1442 /* Saved contents of copy area. */
1443 gdb_byte *step_saved_copy;
1444 };
1445
1446 /* The list of states of processes involved in displaced stepping
1447 presently. */
1448 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1449
1450 /* Get the displaced stepping state of process PID. */
1451
1452 static struct displaced_step_inferior_state *
1453 get_displaced_stepping_state (int pid)
1454 {
1455 struct displaced_step_inferior_state *state;
1456
1457 for (state = displaced_step_inferior_states;
1458 state != NULL;
1459 state = state->next)
1460 if (state->pid == pid)
1461 return state;
1462
1463 return NULL;
1464 }
1465
1466 /* Return true if process PID has a thread doing a displaced step. */
1467
1468 static int
1469 displaced_step_in_progress (int pid)
1470 {
1471 struct displaced_step_inferior_state *displaced;
1472
1473 displaced = get_displaced_stepping_state (pid);
1474 if (displaced != NULL && !ptid_equal (displaced->step_ptid, null_ptid))
1475 return 1;
1476
1477 return 0;
1478 }
1479
1480 /* Add a new displaced stepping state for process PID to the displaced
1481 stepping state list, or return a pointer to an already existing
1482 entry, if it already exists. Never returns NULL. */
1483
1484 static struct displaced_step_inferior_state *
1485 add_displaced_stepping_state (int pid)
1486 {
1487 struct displaced_step_inferior_state *state;
1488
1489 for (state = displaced_step_inferior_states;
1490 state != NULL;
1491 state = state->next)
1492 if (state->pid == pid)
1493 return state;
1494
1495 state = xcalloc (1, sizeof (*state));
1496 state->pid = pid;
1497 state->next = displaced_step_inferior_states;
1498 displaced_step_inferior_states = state;
1499
1500 return state;
1501 }
1502
1503 /* If inferior is in displaced stepping, and ADDR equals to starting address
1504 of copy area, return corresponding displaced_step_closure. Otherwise,
1505 return NULL. */
1506
1507 struct displaced_step_closure*
1508 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1509 {
1510 struct displaced_step_inferior_state *displaced
1511 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1512
1513 /* If checking the mode of displaced instruction in copy area. */
1514 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1515 && (displaced->step_copy == addr))
1516 return displaced->step_closure;
1517
1518 return NULL;
1519 }
1520
1521 /* Remove the displaced stepping state of process PID. */
1522
1523 static void
1524 remove_displaced_stepping_state (int pid)
1525 {
1526 struct displaced_step_inferior_state *it, **prev_next_p;
1527
1528 gdb_assert (pid != 0);
1529
1530 it = displaced_step_inferior_states;
1531 prev_next_p = &displaced_step_inferior_states;
1532 while (it)
1533 {
1534 if (it->pid == pid)
1535 {
1536 *prev_next_p = it->next;
1537 xfree (it);
1538 return;
1539 }
1540
1541 prev_next_p = &it->next;
1542 it = *prev_next_p;
1543 }
1544 }
1545
1546 static void
1547 infrun_inferior_exit (struct inferior *inf)
1548 {
1549 remove_displaced_stepping_state (inf->pid);
1550 }
1551
1552 /* If ON, and the architecture supports it, GDB will use displaced
1553 stepping to step over breakpoints. If OFF, or if the architecture
1554 doesn't support it, GDB will instead use the traditional
1555 hold-and-step approach. If AUTO (which is the default), GDB will
1556 decide which technique to use to step over breakpoints depending on
1557 which of all-stop or non-stop mode is active --- displaced stepping
1558 in non-stop mode; hold-and-step in all-stop mode. */
1559
1560 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1561
1562 static void
1563 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1564 struct cmd_list_element *c,
1565 const char *value)
1566 {
1567 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1568 fprintf_filtered (file,
1569 _("Debugger's willingness to use displaced stepping "
1570 "to step over breakpoints is %s (currently %s).\n"),
1571 value, non_stop ? "on" : "off");
1572 else
1573 fprintf_filtered (file,
1574 _("Debugger's willingness to use displaced stepping "
1575 "to step over breakpoints is %s.\n"), value);
1576 }
1577
1578 /* Return non-zero if displaced stepping can/should be used to step
1579 over breakpoints. */
1580
1581 static int
1582 use_displaced_stepping (struct gdbarch *gdbarch)
1583 {
1584 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1585 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1586 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1587 && find_record_target () == NULL);
1588 }
1589
1590 /* Clean out any stray displaced stepping state. */
1591 static void
1592 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1593 {
1594 /* Indicate that there is no cleanup pending. */
1595 displaced->step_ptid = null_ptid;
1596
1597 if (displaced->step_closure)
1598 {
1599 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1600 displaced->step_closure);
1601 displaced->step_closure = NULL;
1602 }
1603 }
1604
1605 static void
1606 displaced_step_clear_cleanup (void *arg)
1607 {
1608 struct displaced_step_inferior_state *state = arg;
1609
1610 displaced_step_clear (state);
1611 }
1612
1613 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1614 void
1615 displaced_step_dump_bytes (struct ui_file *file,
1616 const gdb_byte *buf,
1617 size_t len)
1618 {
1619 int i;
1620
1621 for (i = 0; i < len; i++)
1622 fprintf_unfiltered (file, "%02x ", buf[i]);
1623 fputs_unfiltered ("\n", file);
1624 }
1625
1626 /* Prepare to single-step, using displaced stepping.
1627
1628 Note that we cannot use displaced stepping when we have a signal to
1629 deliver. If we have a signal to deliver and an instruction to step
1630 over, then after the step, there will be no indication from the
1631 target whether the thread entered a signal handler or ignored the
1632 signal and stepped over the instruction successfully --- both cases
1633 result in a simple SIGTRAP. In the first case we mustn't do a
1634 fixup, and in the second case we must --- but we can't tell which.
1635 Comments in the code for 'random signals' in handle_inferior_event
1636 explain how we handle this case instead.
1637
1638 Returns 1 if preparing was successful -- this thread is going to be
1639 stepped now; or 0 if displaced stepping this thread got queued. */
1640 static int
1641 displaced_step_prepare (ptid_t ptid)
1642 {
1643 struct cleanup *old_cleanups, *ignore_cleanups;
1644 struct thread_info *tp = find_thread_ptid (ptid);
1645 struct regcache *regcache = get_thread_regcache (ptid);
1646 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1647 CORE_ADDR original, copy;
1648 ULONGEST len;
1649 struct displaced_step_closure *closure;
1650 struct displaced_step_inferior_state *displaced;
1651 int status;
1652
1653 /* We should never reach this function if the architecture does not
1654 support displaced stepping. */
1655 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1656
1657 /* Disable range stepping while executing in the scratch pad. We
1658 want a single-step even if executing the displaced instruction in
1659 the scratch buffer lands within the stepping range (e.g., a
1660 jump/branch). */
1661 tp->control.may_range_step = 0;
1662
1663 /* We have to displaced step one thread at a time, as we only have
1664 access to a single scratch space per inferior. */
1665
1666 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1667
1668 if (!ptid_equal (displaced->step_ptid, null_ptid))
1669 {
1670 /* Already waiting for a displaced step to finish. Defer this
1671 request and place in queue. */
1672 struct displaced_step_request *req, *new_req;
1673
1674 if (debug_displaced)
1675 fprintf_unfiltered (gdb_stdlog,
1676 "displaced: defering step of %s\n",
1677 target_pid_to_str (ptid));
1678
1679 new_req = xmalloc (sizeof (*new_req));
1680 new_req->ptid = ptid;
1681 new_req->next = NULL;
1682
1683 if (displaced->step_request_queue)
1684 {
1685 for (req = displaced->step_request_queue;
1686 req && req->next;
1687 req = req->next)
1688 ;
1689 req->next = new_req;
1690 }
1691 else
1692 displaced->step_request_queue = new_req;
1693
1694 return 0;
1695 }
1696 else
1697 {
1698 if (debug_displaced)
1699 fprintf_unfiltered (gdb_stdlog,
1700 "displaced: stepping %s now\n",
1701 target_pid_to_str (ptid));
1702 }
1703
1704 displaced_step_clear (displaced);
1705
1706 old_cleanups = save_inferior_ptid ();
1707 inferior_ptid = ptid;
1708
1709 original = regcache_read_pc (regcache);
1710
1711 copy = gdbarch_displaced_step_location (gdbarch);
1712 len = gdbarch_max_insn_length (gdbarch);
1713
1714 /* Save the original contents of the copy area. */
1715 displaced->step_saved_copy = xmalloc (len);
1716 ignore_cleanups = make_cleanup (free_current_contents,
1717 &displaced->step_saved_copy);
1718 status = target_read_memory (copy, displaced->step_saved_copy, len);
1719 if (status != 0)
1720 throw_error (MEMORY_ERROR,
1721 _("Error accessing memory address %s (%s) for "
1722 "displaced-stepping scratch space."),
1723 paddress (gdbarch, copy), safe_strerror (status));
1724 if (debug_displaced)
1725 {
1726 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1727 paddress (gdbarch, copy));
1728 displaced_step_dump_bytes (gdb_stdlog,
1729 displaced->step_saved_copy,
1730 len);
1731 };
1732
1733 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1734 original, copy, regcache);
1735
1736 /* We don't support the fully-simulated case at present. */
1737 gdb_assert (closure);
1738
1739 /* Save the information we need to fix things up if the step
1740 succeeds. */
1741 displaced->step_ptid = ptid;
1742 displaced->step_gdbarch = gdbarch;
1743 displaced->step_closure = closure;
1744 displaced->step_original = original;
1745 displaced->step_copy = copy;
1746
1747 make_cleanup (displaced_step_clear_cleanup, displaced);
1748
1749 /* Resume execution at the copy. */
1750 regcache_write_pc (regcache, copy);
1751
1752 discard_cleanups (ignore_cleanups);
1753
1754 do_cleanups (old_cleanups);
1755
1756 if (debug_displaced)
1757 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1758 paddress (gdbarch, copy));
1759
1760 return 1;
1761 }
1762
1763 static void
1764 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1765 const gdb_byte *myaddr, int len)
1766 {
1767 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1768
1769 inferior_ptid = ptid;
1770 write_memory (memaddr, myaddr, len);
1771 do_cleanups (ptid_cleanup);
1772 }
1773
1774 /* Restore the contents of the copy area for thread PTID. */
1775
1776 static void
1777 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1778 ptid_t ptid)
1779 {
1780 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1781
1782 write_memory_ptid (ptid, displaced->step_copy,
1783 displaced->step_saved_copy, len);
1784 if (debug_displaced)
1785 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1786 target_pid_to_str (ptid),
1787 paddress (displaced->step_gdbarch,
1788 displaced->step_copy));
1789 }
1790
1791 static void
1792 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1793 {
1794 struct cleanup *old_cleanups;
1795 struct displaced_step_inferior_state *displaced
1796 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1797
1798 /* Was any thread of this process doing a displaced step? */
1799 if (displaced == NULL)
1800 return;
1801
1802 /* Was this event for the pid we displaced? */
1803 if (ptid_equal (displaced->step_ptid, null_ptid)
1804 || ! ptid_equal (displaced->step_ptid, event_ptid))
1805 return;
1806
1807 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1808
1809 displaced_step_restore (displaced, displaced->step_ptid);
1810
1811 /* Fixup may need to read memory/registers. Switch to the thread
1812 that we're fixing up. Also, target_stopped_by_watchpoint checks
1813 the current thread. */
1814 switch_to_thread (event_ptid);
1815
1816 /* Did the instruction complete successfully? */
1817 if (signal == GDB_SIGNAL_TRAP
1818 && !(target_stopped_by_watchpoint ()
1819 && (gdbarch_have_nonsteppable_watchpoint (displaced->step_gdbarch)
1820 || target_have_steppable_watchpoint)))
1821 {
1822 /* Fix up the resulting state. */
1823 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1824 displaced->step_closure,
1825 displaced->step_original,
1826 displaced->step_copy,
1827 get_thread_regcache (displaced->step_ptid));
1828 }
1829 else
1830 {
1831 /* Since the instruction didn't complete, all we can do is
1832 relocate the PC. */
1833 struct regcache *regcache = get_thread_regcache (event_ptid);
1834 CORE_ADDR pc = regcache_read_pc (regcache);
1835
1836 pc = displaced->step_original + (pc - displaced->step_copy);
1837 regcache_write_pc (regcache, pc);
1838 }
1839
1840 do_cleanups (old_cleanups);
1841
1842 displaced->step_ptid = null_ptid;
1843
1844 /* Are there any pending displaced stepping requests? If so, run
1845 one now. Leave the state object around, since we're likely to
1846 need it again soon. */
1847 while (displaced->step_request_queue)
1848 {
1849 struct displaced_step_request *head;
1850 ptid_t ptid;
1851 struct regcache *regcache;
1852 struct gdbarch *gdbarch;
1853 CORE_ADDR actual_pc;
1854 struct address_space *aspace;
1855
1856 head = displaced->step_request_queue;
1857 ptid = head->ptid;
1858 displaced->step_request_queue = head->next;
1859 xfree (head);
1860
1861 context_switch (ptid);
1862
1863 regcache = get_thread_regcache (ptid);
1864 actual_pc = regcache_read_pc (regcache);
1865 aspace = get_regcache_aspace (regcache);
1866
1867 if (breakpoint_here_p (aspace, actual_pc))
1868 {
1869 if (debug_displaced)
1870 fprintf_unfiltered (gdb_stdlog,
1871 "displaced: stepping queued %s now\n",
1872 target_pid_to_str (ptid));
1873
1874 displaced_step_prepare (ptid);
1875
1876 gdbarch = get_regcache_arch (regcache);
1877
1878 if (debug_displaced)
1879 {
1880 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1881 gdb_byte buf[4];
1882
1883 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1884 paddress (gdbarch, actual_pc));
1885 read_memory (actual_pc, buf, sizeof (buf));
1886 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1887 }
1888
1889 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1890 displaced->step_closure))
1891 target_resume (ptid, 1, GDB_SIGNAL_0);
1892 else
1893 target_resume (ptid, 0, GDB_SIGNAL_0);
1894
1895 /* Done, we're stepping a thread. */
1896 break;
1897 }
1898 else
1899 {
1900 int step;
1901 struct thread_info *tp = inferior_thread ();
1902
1903 /* The breakpoint we were sitting under has since been
1904 removed. */
1905 tp->control.trap_expected = 0;
1906
1907 /* Go back to what we were trying to do. */
1908 step = currently_stepping (tp);
1909
1910 if (debug_displaced)
1911 fprintf_unfiltered (gdb_stdlog,
1912 "displaced: breakpoint is gone: %s, step(%d)\n",
1913 target_pid_to_str (tp->ptid), step);
1914
1915 target_resume (ptid, step, GDB_SIGNAL_0);
1916 tp->suspend.stop_signal = GDB_SIGNAL_0;
1917
1918 /* This request was discarded. See if there's any other
1919 thread waiting for its turn. */
1920 }
1921 }
1922 }
1923
1924 /* Update global variables holding ptids to hold NEW_PTID if they were
1925 holding OLD_PTID. */
1926 static void
1927 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1928 {
1929 struct displaced_step_request *it;
1930 struct displaced_step_inferior_state *displaced;
1931
1932 if (ptid_equal (inferior_ptid, old_ptid))
1933 inferior_ptid = new_ptid;
1934
1935 for (displaced = displaced_step_inferior_states;
1936 displaced;
1937 displaced = displaced->next)
1938 {
1939 if (ptid_equal (displaced->step_ptid, old_ptid))
1940 displaced->step_ptid = new_ptid;
1941
1942 for (it = displaced->step_request_queue; it; it = it->next)
1943 if (ptid_equal (it->ptid, old_ptid))
1944 it->ptid = new_ptid;
1945 }
1946 }
1947
1948 \f
1949 /* Resuming. */
1950
1951 /* Things to clean up if we QUIT out of resume (). */
1952 static void
1953 resume_cleanups (void *ignore)
1954 {
1955 if (!ptid_equal (inferior_ptid, null_ptid))
1956 delete_single_step_breakpoints (inferior_thread ());
1957
1958 normal_stop ();
1959 }
1960
1961 static const char schedlock_off[] = "off";
1962 static const char schedlock_on[] = "on";
1963 static const char schedlock_step[] = "step";
1964 static const char *const scheduler_enums[] = {
1965 schedlock_off,
1966 schedlock_on,
1967 schedlock_step,
1968 NULL
1969 };
1970 static const char *scheduler_mode = schedlock_off;
1971 static void
1972 show_scheduler_mode (struct ui_file *file, int from_tty,
1973 struct cmd_list_element *c, const char *value)
1974 {
1975 fprintf_filtered (file,
1976 _("Mode for locking scheduler "
1977 "during execution is \"%s\".\n"),
1978 value);
1979 }
1980
1981 static void
1982 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1983 {
1984 if (!target_can_lock_scheduler)
1985 {
1986 scheduler_mode = schedlock_off;
1987 error (_("Target '%s' cannot support this command."), target_shortname);
1988 }
1989 }
1990
1991 /* True if execution commands resume all threads of all processes by
1992 default; otherwise, resume only threads of the current inferior
1993 process. */
1994 int sched_multi = 0;
1995
1996 /* Try to setup for software single stepping over the specified location.
1997 Return 1 if target_resume() should use hardware single step.
1998
1999 GDBARCH the current gdbarch.
2000 PC the location to step over. */
2001
2002 static int
2003 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2004 {
2005 int hw_step = 1;
2006
2007 if (execution_direction == EXEC_FORWARD
2008 && gdbarch_software_single_step_p (gdbarch)
2009 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
2010 {
2011 hw_step = 0;
2012 }
2013 return hw_step;
2014 }
2015
2016 /* See infrun.h. */
2017
2018 ptid_t
2019 user_visible_resume_ptid (int step)
2020 {
2021 ptid_t resume_ptid;
2022
2023 if (non_stop)
2024 {
2025 /* With non-stop mode on, threads are always handled
2026 individually. */
2027 resume_ptid = inferior_ptid;
2028 }
2029 else if ((scheduler_mode == schedlock_on)
2030 || (scheduler_mode == schedlock_step && step))
2031 {
2032 /* User-settable 'scheduler' mode requires solo thread
2033 resume. */
2034 resume_ptid = inferior_ptid;
2035 }
2036 else if (!sched_multi && target_supports_multi_process ())
2037 {
2038 /* Resume all threads of the current process (and none of other
2039 processes). */
2040 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
2041 }
2042 else
2043 {
2044 /* Resume all threads of all processes. */
2045 resume_ptid = RESUME_ALL;
2046 }
2047
2048 return resume_ptid;
2049 }
2050
2051 /* Wrapper for target_resume, that handles infrun-specific
2052 bookkeeping. */
2053
2054 static void
2055 do_target_resume (ptid_t resume_ptid, int step, enum gdb_signal sig)
2056 {
2057 struct thread_info *tp = inferior_thread ();
2058
2059 /* Install inferior's terminal modes. */
2060 target_terminal_inferior ();
2061
2062 /* Avoid confusing the next resume, if the next stop/resume
2063 happens to apply to another thread. */
2064 tp->suspend.stop_signal = GDB_SIGNAL_0;
2065
2066 /* Advise target which signals may be handled silently.
2067
2068 If we have removed breakpoints because we are stepping over one
2069 in-line (in any thread), we need to receive all signals to avoid
2070 accidentally skipping a breakpoint during execution of a signal
2071 handler.
2072
2073 Likewise if we're displaced stepping, otherwise a trap for a
2074 breakpoint in a signal handler might be confused with the
2075 displaced step finishing. We don't make the displaced_step_fixup
2076 step distinguish the cases instead, because:
2077
2078 - a backtrace while stopped in the signal handler would show the
2079 scratch pad as frame older than the signal handler, instead of
2080 the real mainline code.
2081
2082 - when the thread is later resumed, the signal handler would
2083 return to the scratch pad area, which would no longer be
2084 valid. */
2085 if (step_over_info_valid_p ()
2086 || displaced_step_in_progress (ptid_get_pid (tp->ptid)))
2087 target_pass_signals (0, NULL);
2088 else
2089 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2090
2091 target_resume (resume_ptid, step, sig);
2092 }
2093
2094 /* Resume the inferior, but allow a QUIT. This is useful if the user
2095 wants to interrupt some lengthy single-stepping operation
2096 (for child processes, the SIGINT goes to the inferior, and so
2097 we get a SIGINT random_signal, but for remote debugging and perhaps
2098 other targets, that's not true).
2099
2100 SIG is the signal to give the inferior (zero for none). */
2101 void
2102 resume (enum gdb_signal sig)
2103 {
2104 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
2105 struct regcache *regcache = get_current_regcache ();
2106 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2107 struct thread_info *tp = inferior_thread ();
2108 CORE_ADDR pc = regcache_read_pc (regcache);
2109 struct address_space *aspace = get_regcache_aspace (regcache);
2110 ptid_t resume_ptid;
2111 /* This represents the user's step vs continue request. When
2112 deciding whether "set scheduler-locking step" applies, it's the
2113 user's intention that counts. */
2114 const int user_step = tp->control.stepping_command;
2115 /* This represents what we'll actually request the target to do.
2116 This can decay from a step to a continue, if e.g., we need to
2117 implement single-stepping with breakpoints (software
2118 single-step). */
2119 int step;
2120
2121 tp->stepped_breakpoint = 0;
2122
2123 QUIT;
2124
2125 /* Depends on stepped_breakpoint. */
2126 step = currently_stepping (tp);
2127
2128 if (current_inferior ()->waiting_for_vfork_done)
2129 {
2130 /* Don't try to single-step a vfork parent that is waiting for
2131 the child to get out of the shared memory region (by exec'ing
2132 or exiting). This is particularly important on software
2133 single-step archs, as the child process would trip on the
2134 software single step breakpoint inserted for the parent
2135 process. Since the parent will not actually execute any
2136 instruction until the child is out of the shared region (such
2137 are vfork's semantics), it is safe to simply continue it.
2138 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2139 the parent, and tell it to `keep_going', which automatically
2140 re-sets it stepping. */
2141 if (debug_infrun)
2142 fprintf_unfiltered (gdb_stdlog,
2143 "infrun: resume : clear step\n");
2144 step = 0;
2145 }
2146
2147 if (debug_infrun)
2148 fprintf_unfiltered (gdb_stdlog,
2149 "infrun: resume (step=%d, signal=%s), "
2150 "trap_expected=%d, current thread [%s] at %s\n",
2151 step, gdb_signal_to_symbol_string (sig),
2152 tp->control.trap_expected,
2153 target_pid_to_str (inferior_ptid),
2154 paddress (gdbarch, pc));
2155
2156 /* Normally, by the time we reach `resume', the breakpoints are either
2157 removed or inserted, as appropriate. The exception is if we're sitting
2158 at a permanent breakpoint; we need to step over it, but permanent
2159 breakpoints can't be removed. So we have to test for it here. */
2160 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2161 {
2162 if (sig != GDB_SIGNAL_0)
2163 {
2164 /* We have a signal to pass to the inferior. The resume
2165 may, or may not take us to the signal handler. If this
2166 is a step, we'll need to stop in the signal handler, if
2167 there's one, (if the target supports stepping into
2168 handlers), or in the next mainline instruction, if
2169 there's no handler. If this is a continue, we need to be
2170 sure to run the handler with all breakpoints inserted.
2171 In all cases, set a breakpoint at the current address
2172 (where the handler returns to), and once that breakpoint
2173 is hit, resume skipping the permanent breakpoint. If
2174 that breakpoint isn't hit, then we've stepped into the
2175 signal handler (or hit some other event). We'll delete
2176 the step-resume breakpoint then. */
2177
2178 if (debug_infrun)
2179 fprintf_unfiltered (gdb_stdlog,
2180 "infrun: resume: skipping permanent breakpoint, "
2181 "deliver signal first\n");
2182
2183 clear_step_over_info ();
2184 tp->control.trap_expected = 0;
2185
2186 if (tp->control.step_resume_breakpoint == NULL)
2187 {
2188 /* Set a "high-priority" step-resume, as we don't want
2189 user breakpoints at PC to trigger (again) when this
2190 hits. */
2191 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2192 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2193
2194 tp->step_after_step_resume_breakpoint = step;
2195 }
2196
2197 insert_breakpoints ();
2198 }
2199 else
2200 {
2201 /* There's no signal to pass, we can go ahead and skip the
2202 permanent breakpoint manually. */
2203 if (debug_infrun)
2204 fprintf_unfiltered (gdb_stdlog,
2205 "infrun: resume: skipping permanent breakpoint\n");
2206 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2207 /* Update pc to reflect the new address from which we will
2208 execute instructions. */
2209 pc = regcache_read_pc (regcache);
2210
2211 if (step)
2212 {
2213 /* We've already advanced the PC, so the stepping part
2214 is done. Now we need to arrange for a trap to be
2215 reported to handle_inferior_event. Set a breakpoint
2216 at the current PC, and run to it. Don't update
2217 prev_pc, because if we end in
2218 switch_back_to_stepped_thread, we want the "expected
2219 thread advanced also" branch to be taken. IOW, we
2220 don't want this thread to step further from PC
2221 (overstep). */
2222 gdb_assert (!step_over_info_valid_p ());
2223 insert_single_step_breakpoint (gdbarch, aspace, pc);
2224 insert_breakpoints ();
2225
2226 resume_ptid = user_visible_resume_ptid (user_step);
2227 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
2228 discard_cleanups (old_cleanups);
2229 return;
2230 }
2231 }
2232 }
2233
2234 /* If we have a breakpoint to step over, make sure to do a single
2235 step only. Same if we have software watchpoints. */
2236 if (tp->control.trap_expected || bpstat_should_step ())
2237 tp->control.may_range_step = 0;
2238
2239 /* If enabled, step over breakpoints by executing a copy of the
2240 instruction at a different address.
2241
2242 We can't use displaced stepping when we have a signal to deliver;
2243 the comments for displaced_step_prepare explain why. The
2244 comments in the handle_inferior event for dealing with 'random
2245 signals' explain what we do instead.
2246
2247 We can't use displaced stepping when we are waiting for vfork_done
2248 event, displaced stepping breaks the vfork child similarly as single
2249 step software breakpoint. */
2250 if (use_displaced_stepping (gdbarch)
2251 && tp->control.trap_expected
2252 && !step_over_info_valid_p ()
2253 && sig == GDB_SIGNAL_0
2254 && !current_inferior ()->waiting_for_vfork_done)
2255 {
2256 struct displaced_step_inferior_state *displaced;
2257
2258 if (!displaced_step_prepare (inferior_ptid))
2259 {
2260 /* Got placed in displaced stepping queue. Will be resumed
2261 later when all the currently queued displaced stepping
2262 requests finish. The thread is not executing at this
2263 point, and the call to set_executing will be made later.
2264 But we need to call set_running here, since from the
2265 user/frontend's point of view, threads were set running.
2266 Unless we're calling an inferior function, as in that
2267 case we pretend the inferior doesn't run at all. */
2268 if (!tp->control.in_infcall)
2269 set_running (user_visible_resume_ptid (user_step), 1);
2270 discard_cleanups (old_cleanups);
2271 return;
2272 }
2273
2274 /* Update pc to reflect the new address from which we will execute
2275 instructions due to displaced stepping. */
2276 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
2277
2278 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
2279 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
2280 displaced->step_closure);
2281 }
2282
2283 /* Do we need to do it the hard way, w/temp breakpoints? */
2284 else if (step)
2285 step = maybe_software_singlestep (gdbarch, pc);
2286
2287 /* Currently, our software single-step implementation leads to different
2288 results than hardware single-stepping in one situation: when stepping
2289 into delivering a signal which has an associated signal handler,
2290 hardware single-step will stop at the first instruction of the handler,
2291 while software single-step will simply skip execution of the handler.
2292
2293 For now, this difference in behavior is accepted since there is no
2294 easy way to actually implement single-stepping into a signal handler
2295 without kernel support.
2296
2297 However, there is one scenario where this difference leads to follow-on
2298 problems: if we're stepping off a breakpoint by removing all breakpoints
2299 and then single-stepping. In this case, the software single-step
2300 behavior means that even if there is a *breakpoint* in the signal
2301 handler, GDB still would not stop.
2302
2303 Fortunately, we can at least fix this particular issue. We detect
2304 here the case where we are about to deliver a signal while software
2305 single-stepping with breakpoints removed. In this situation, we
2306 revert the decisions to remove all breakpoints and insert single-
2307 step breakpoints, and instead we install a step-resume breakpoint
2308 at the current address, deliver the signal without stepping, and
2309 once we arrive back at the step-resume breakpoint, actually step
2310 over the breakpoint we originally wanted to step over. */
2311 if (thread_has_single_step_breakpoints_set (tp)
2312 && sig != GDB_SIGNAL_0
2313 && step_over_info_valid_p ())
2314 {
2315 /* If we have nested signals or a pending signal is delivered
2316 immediately after a handler returns, might might already have
2317 a step-resume breakpoint set on the earlier handler. We cannot
2318 set another step-resume breakpoint; just continue on until the
2319 original breakpoint is hit. */
2320 if (tp->control.step_resume_breakpoint == NULL)
2321 {
2322 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2323 tp->step_after_step_resume_breakpoint = 1;
2324 }
2325
2326 delete_single_step_breakpoints (tp);
2327
2328 clear_step_over_info ();
2329 tp->control.trap_expected = 0;
2330
2331 insert_breakpoints ();
2332 }
2333
2334 /* If STEP is set, it's a request to use hardware stepping
2335 facilities. But in that case, we should never
2336 use singlestep breakpoint. */
2337 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2338
2339 /* Decide the set of threads to ask the target to resume. Start
2340 by assuming everything will be resumed, than narrow the set
2341 by applying increasingly restricting conditions. */
2342 resume_ptid = user_visible_resume_ptid (user_step);
2343
2344 /* Even if RESUME_PTID is a wildcard, and we end up resuming less
2345 (e.g., we might need to step over a breakpoint), from the
2346 user/frontend's point of view, all threads in RESUME_PTID are now
2347 running. Unless we're calling an inferior function, as in that
2348 case pretend we inferior doesn't run at all. */
2349 if (!tp->control.in_infcall)
2350 set_running (resume_ptid, 1);
2351
2352 /* Maybe resume a single thread after all. */
2353 if ((step || thread_has_single_step_breakpoints_set (tp))
2354 && tp->control.trap_expected)
2355 {
2356 /* We're allowing a thread to run past a breakpoint it has
2357 hit, by single-stepping the thread with the breakpoint
2358 removed. In which case, we need to single-step only this
2359 thread, and keep others stopped, as they can miss this
2360 breakpoint if allowed to run. */
2361 resume_ptid = inferior_ptid;
2362 }
2363
2364 if (execution_direction != EXEC_REVERSE
2365 && step && breakpoint_inserted_here_p (aspace, pc))
2366 {
2367 /* The only case we currently need to step a breakpoint
2368 instruction is when we have a signal to deliver. See
2369 handle_signal_stop where we handle random signals that could
2370 take out us out of the stepping range. Normally, in that
2371 case we end up continuing (instead of stepping) over the
2372 signal handler with a breakpoint at PC, but there are cases
2373 where we should _always_ single-step, even if we have a
2374 step-resume breakpoint, like when a software watchpoint is
2375 set. Assuming single-stepping and delivering a signal at the
2376 same time would takes us to the signal handler, then we could
2377 have removed the breakpoint at PC to step over it. However,
2378 some hardware step targets (like e.g., Mac OS) can't step
2379 into signal handlers, and for those, we need to leave the
2380 breakpoint at PC inserted, as otherwise if the handler
2381 recurses and executes PC again, it'll miss the breakpoint.
2382 So we leave the breakpoint inserted anyway, but we need to
2383 record that we tried to step a breakpoint instruction, so
2384 that adjust_pc_after_break doesn't end up confused. */
2385 gdb_assert (sig != GDB_SIGNAL_0);
2386
2387 tp->stepped_breakpoint = 1;
2388
2389 /* Most targets can step a breakpoint instruction, thus
2390 executing it normally. But if this one cannot, just
2391 continue and we will hit it anyway. */
2392 if (gdbarch_cannot_step_breakpoint (gdbarch))
2393 step = 0;
2394 }
2395
2396 if (debug_displaced
2397 && use_displaced_stepping (gdbarch)
2398 && tp->control.trap_expected
2399 && !step_over_info_valid_p ())
2400 {
2401 struct regcache *resume_regcache = get_thread_regcache (tp->ptid);
2402 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
2403 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2404 gdb_byte buf[4];
2405
2406 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2407 paddress (resume_gdbarch, actual_pc));
2408 read_memory (actual_pc, buf, sizeof (buf));
2409 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2410 }
2411
2412 if (tp->control.may_range_step)
2413 {
2414 /* If we're resuming a thread with the PC out of the step
2415 range, then we're doing some nested/finer run control
2416 operation, like stepping the thread out of the dynamic
2417 linker or the displaced stepping scratch pad. We
2418 shouldn't have allowed a range step then. */
2419 gdb_assert (pc_in_thread_step_range (pc, tp));
2420 }
2421
2422 do_target_resume (resume_ptid, step, sig);
2423 discard_cleanups (old_cleanups);
2424 }
2425 \f
2426 /* Proceeding. */
2427
2428 /* Clear out all variables saying what to do when inferior is continued.
2429 First do this, then set the ones you want, then call `proceed'. */
2430
2431 static void
2432 clear_proceed_status_thread (struct thread_info *tp)
2433 {
2434 if (debug_infrun)
2435 fprintf_unfiltered (gdb_stdlog,
2436 "infrun: clear_proceed_status_thread (%s)\n",
2437 target_pid_to_str (tp->ptid));
2438
2439 /* If this signal should not be seen by program, give it zero.
2440 Used for debugging signals. */
2441 if (!signal_pass_state (tp->suspend.stop_signal))
2442 tp->suspend.stop_signal = GDB_SIGNAL_0;
2443
2444 tp->control.trap_expected = 0;
2445 tp->control.step_range_start = 0;
2446 tp->control.step_range_end = 0;
2447 tp->control.may_range_step = 0;
2448 tp->control.step_frame_id = null_frame_id;
2449 tp->control.step_stack_frame_id = null_frame_id;
2450 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2451 tp->control.step_start_function = NULL;
2452 tp->stop_requested = 0;
2453
2454 tp->control.stop_step = 0;
2455
2456 tp->control.proceed_to_finish = 0;
2457
2458 tp->control.command_interp = NULL;
2459 tp->control.stepping_command = 0;
2460
2461 /* Discard any remaining commands or status from previous stop. */
2462 bpstat_clear (&tp->control.stop_bpstat);
2463 }
2464
2465 void
2466 clear_proceed_status (int step)
2467 {
2468 if (!non_stop)
2469 {
2470 struct thread_info *tp;
2471 ptid_t resume_ptid;
2472
2473 resume_ptid = user_visible_resume_ptid (step);
2474
2475 /* In all-stop mode, delete the per-thread status of all threads
2476 we're about to resume, implicitly and explicitly. */
2477 ALL_NON_EXITED_THREADS (tp)
2478 {
2479 if (!ptid_match (tp->ptid, resume_ptid))
2480 continue;
2481 clear_proceed_status_thread (tp);
2482 }
2483 }
2484
2485 if (!ptid_equal (inferior_ptid, null_ptid))
2486 {
2487 struct inferior *inferior;
2488
2489 if (non_stop)
2490 {
2491 /* If in non-stop mode, only delete the per-thread status of
2492 the current thread. */
2493 clear_proceed_status_thread (inferior_thread ());
2494 }
2495
2496 inferior = current_inferior ();
2497 inferior->control.stop_soon = NO_STOP_QUIETLY;
2498 }
2499
2500 stop_after_trap = 0;
2501
2502 clear_step_over_info ();
2503
2504 observer_notify_about_to_proceed ();
2505
2506 if (stop_registers)
2507 {
2508 regcache_xfree (stop_registers);
2509 stop_registers = NULL;
2510 }
2511 }
2512
2513 /* Returns true if TP is still stopped at a breakpoint that needs
2514 stepping-over in order to make progress. If the breakpoint is gone
2515 meanwhile, we can skip the whole step-over dance. */
2516
2517 static int
2518 thread_still_needs_step_over (struct thread_info *tp)
2519 {
2520 if (tp->stepping_over_breakpoint)
2521 {
2522 struct regcache *regcache = get_thread_regcache (tp->ptid);
2523
2524 if (breakpoint_here_p (get_regcache_aspace (regcache),
2525 regcache_read_pc (regcache))
2526 == ordinary_breakpoint_here)
2527 return 1;
2528
2529 tp->stepping_over_breakpoint = 0;
2530 }
2531
2532 return 0;
2533 }
2534
2535 /* Returns true if scheduler locking applies. STEP indicates whether
2536 we're about to do a step/next-like command to a thread. */
2537
2538 static int
2539 schedlock_applies (struct thread_info *tp)
2540 {
2541 return (scheduler_mode == schedlock_on
2542 || (scheduler_mode == schedlock_step
2543 && tp->control.stepping_command));
2544 }
2545
2546 /* Look a thread other than EXCEPT that has previously reported a
2547 breakpoint event, and thus needs a step-over in order to make
2548 progress. Returns NULL is none is found. */
2549
2550 static struct thread_info *
2551 find_thread_needs_step_over (struct thread_info *except)
2552 {
2553 struct thread_info *tp, *current;
2554
2555 /* With non-stop mode on, threads are always handled individually. */
2556 gdb_assert (! non_stop);
2557
2558 current = inferior_thread ();
2559
2560 /* If scheduler locking applies, we can avoid iterating over all
2561 threads. */
2562 if (schedlock_applies (except))
2563 {
2564 if (except != current
2565 && thread_still_needs_step_over (current))
2566 return current;
2567
2568 return NULL;
2569 }
2570
2571 ALL_NON_EXITED_THREADS (tp)
2572 {
2573 /* Ignore the EXCEPT thread. */
2574 if (tp == except)
2575 continue;
2576 /* Ignore threads of processes we're not resuming. */
2577 if (!sched_multi
2578 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2579 continue;
2580
2581 if (thread_still_needs_step_over (tp))
2582 return tp;
2583 }
2584
2585 return NULL;
2586 }
2587
2588 /* Basic routine for continuing the program in various fashions.
2589
2590 ADDR is the address to resume at, or -1 for resume where stopped.
2591 SIGGNAL is the signal to give it, or 0 for none,
2592 or -1 for act according to how it stopped.
2593 STEP is nonzero if should trap after one instruction.
2594 -1 means return after that and print nothing.
2595 You should probably set various step_... variables
2596 before calling here, if you are stepping.
2597
2598 You should call clear_proceed_status before calling proceed. */
2599
2600 void
2601 proceed (CORE_ADDR addr, enum gdb_signal siggnal)
2602 {
2603 struct regcache *regcache;
2604 struct gdbarch *gdbarch;
2605 struct thread_info *tp;
2606 CORE_ADDR pc;
2607 struct address_space *aspace;
2608
2609 /* If we're stopped at a fork/vfork, follow the branch set by the
2610 "set follow-fork-mode" command; otherwise, we'll just proceed
2611 resuming the current thread. */
2612 if (!follow_fork ())
2613 {
2614 /* The target for some reason decided not to resume. */
2615 normal_stop ();
2616 if (target_can_async_p ())
2617 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2618 return;
2619 }
2620
2621 /* We'll update this if & when we switch to a new thread. */
2622 previous_inferior_ptid = inferior_ptid;
2623
2624 regcache = get_current_regcache ();
2625 gdbarch = get_regcache_arch (regcache);
2626 aspace = get_regcache_aspace (regcache);
2627 pc = regcache_read_pc (regcache);
2628 tp = inferior_thread ();
2629
2630 /* Fill in with reasonable starting values. */
2631 init_thread_stepping_state (tp);
2632
2633 if (addr == (CORE_ADDR) -1)
2634 {
2635 if (pc == stop_pc
2636 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
2637 && execution_direction != EXEC_REVERSE)
2638 /* There is a breakpoint at the address we will resume at,
2639 step one instruction before inserting breakpoints so that
2640 we do not stop right away (and report a second hit at this
2641 breakpoint).
2642
2643 Note, we don't do this in reverse, because we won't
2644 actually be executing the breakpoint insn anyway.
2645 We'll be (un-)executing the previous instruction. */
2646 tp->stepping_over_breakpoint = 1;
2647 else if (gdbarch_single_step_through_delay_p (gdbarch)
2648 && gdbarch_single_step_through_delay (gdbarch,
2649 get_current_frame ()))
2650 /* We stepped onto an instruction that needs to be stepped
2651 again before re-inserting the breakpoint, do so. */
2652 tp->stepping_over_breakpoint = 1;
2653 }
2654 else
2655 {
2656 regcache_write_pc (regcache, addr);
2657 }
2658
2659 if (siggnal != GDB_SIGNAL_DEFAULT)
2660 tp->suspend.stop_signal = siggnal;
2661
2662 /* Record the interpreter that issued the execution command that
2663 caused this thread to resume. If the top level interpreter is
2664 MI/async, and the execution command was a CLI command
2665 (next/step/etc.), we'll want to print stop event output to the MI
2666 console channel (the stepped-to line, etc.), as if the user
2667 entered the execution command on a real GDB console. */
2668 inferior_thread ()->control.command_interp = command_interp ();
2669
2670 if (debug_infrun)
2671 fprintf_unfiltered (gdb_stdlog,
2672 "infrun: proceed (addr=%s, signal=%s)\n",
2673 paddress (gdbarch, addr),
2674 gdb_signal_to_symbol_string (siggnal));
2675
2676 if (non_stop)
2677 /* In non-stop, each thread is handled individually. The context
2678 must already be set to the right thread here. */
2679 ;
2680 else
2681 {
2682 struct thread_info *step_over;
2683
2684 /* In a multi-threaded task we may select another thread and
2685 then continue or step.
2686
2687 But if the old thread was stopped at a breakpoint, it will
2688 immediately cause another breakpoint stop without any
2689 execution (i.e. it will report a breakpoint hit incorrectly).
2690 So we must step over it first.
2691
2692 Look for a thread other than the current (TP) that reported a
2693 breakpoint hit and hasn't been resumed yet since. */
2694 step_over = find_thread_needs_step_over (tp);
2695 if (step_over != NULL)
2696 {
2697 if (debug_infrun)
2698 fprintf_unfiltered (gdb_stdlog,
2699 "infrun: need to step-over [%s] first\n",
2700 target_pid_to_str (step_over->ptid));
2701
2702 /* Store the prev_pc for the stepping thread too, needed by
2703 switch_back_to_stepped_thread. */
2704 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2705 switch_to_thread (step_over->ptid);
2706 tp = step_over;
2707 }
2708 }
2709
2710 /* If we need to step over a breakpoint, and we're not using
2711 displaced stepping to do so, insert all breakpoints (watchpoints,
2712 etc.) but the one we're stepping over, step one instruction, and
2713 then re-insert the breakpoint when that step is finished. */
2714 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2715 {
2716 struct regcache *regcache = get_current_regcache ();
2717
2718 set_step_over_info (get_regcache_aspace (regcache),
2719 regcache_read_pc (regcache), 0);
2720 }
2721 else
2722 clear_step_over_info ();
2723
2724 insert_breakpoints ();
2725
2726 tp->control.trap_expected = tp->stepping_over_breakpoint;
2727
2728 annotate_starting ();
2729
2730 /* Make sure that output from GDB appears before output from the
2731 inferior. */
2732 gdb_flush (gdb_stdout);
2733
2734 /* Refresh prev_pc value just prior to resuming. This used to be
2735 done in stop_waiting, however, setting prev_pc there did not handle
2736 scenarios such as inferior function calls or returning from
2737 a function via the return command. In those cases, the prev_pc
2738 value was not set properly for subsequent commands. The prev_pc value
2739 is used to initialize the starting line number in the ecs. With an
2740 invalid value, the gdb next command ends up stopping at the position
2741 represented by the next line table entry past our start position.
2742 On platforms that generate one line table entry per line, this
2743 is not a problem. However, on the ia64, the compiler generates
2744 extraneous line table entries that do not increase the line number.
2745 When we issue the gdb next command on the ia64 after an inferior call
2746 or a return command, we often end up a few instructions forward, still
2747 within the original line we started.
2748
2749 An attempt was made to refresh the prev_pc at the same time the
2750 execution_control_state is initialized (for instance, just before
2751 waiting for an inferior event). But this approach did not work
2752 because of platforms that use ptrace, where the pc register cannot
2753 be read unless the inferior is stopped. At that point, we are not
2754 guaranteed the inferior is stopped and so the regcache_read_pc() call
2755 can fail. Setting the prev_pc value here ensures the value is updated
2756 correctly when the inferior is stopped. */
2757 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2758
2759 /* Resume inferior. */
2760 resume (tp->suspend.stop_signal);
2761
2762 /* Wait for it to stop (if not standalone)
2763 and in any case decode why it stopped, and act accordingly. */
2764 /* Do this only if we are not using the event loop, or if the target
2765 does not support asynchronous execution. */
2766 if (!target_can_async_p ())
2767 {
2768 wait_for_inferior ();
2769 normal_stop ();
2770 }
2771 }
2772 \f
2773
2774 /* Start remote-debugging of a machine over a serial link. */
2775
2776 void
2777 start_remote (int from_tty)
2778 {
2779 struct inferior *inferior;
2780
2781 inferior = current_inferior ();
2782 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2783
2784 /* Always go on waiting for the target, regardless of the mode. */
2785 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2786 indicate to wait_for_inferior that a target should timeout if
2787 nothing is returned (instead of just blocking). Because of this,
2788 targets expecting an immediate response need to, internally, set
2789 things up so that the target_wait() is forced to eventually
2790 timeout. */
2791 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2792 differentiate to its caller what the state of the target is after
2793 the initial open has been performed. Here we're assuming that
2794 the target has stopped. It should be possible to eventually have
2795 target_open() return to the caller an indication that the target
2796 is currently running and GDB state should be set to the same as
2797 for an async run. */
2798 wait_for_inferior ();
2799
2800 /* Now that the inferior has stopped, do any bookkeeping like
2801 loading shared libraries. We want to do this before normal_stop,
2802 so that the displayed frame is up to date. */
2803 post_create_inferior (&current_target, from_tty);
2804
2805 normal_stop ();
2806 }
2807
2808 /* Initialize static vars when a new inferior begins. */
2809
2810 void
2811 init_wait_for_inferior (void)
2812 {
2813 /* These are meaningless until the first time through wait_for_inferior. */
2814
2815 breakpoint_init_inferior (inf_starting);
2816
2817 clear_proceed_status (0);
2818
2819 target_last_wait_ptid = minus_one_ptid;
2820
2821 previous_inferior_ptid = inferior_ptid;
2822
2823 /* Discard any skipped inlined frames. */
2824 clear_inline_frame_state (minus_one_ptid);
2825 }
2826
2827 \f
2828 /* Data to be passed around while handling an event. This data is
2829 discarded between events. */
2830 struct execution_control_state
2831 {
2832 ptid_t ptid;
2833 /* The thread that got the event, if this was a thread event; NULL
2834 otherwise. */
2835 struct thread_info *event_thread;
2836
2837 struct target_waitstatus ws;
2838 int stop_func_filled_in;
2839 CORE_ADDR stop_func_start;
2840 CORE_ADDR stop_func_end;
2841 const char *stop_func_name;
2842 int wait_some_more;
2843
2844 /* True if the event thread hit the single-step breakpoint of
2845 another thread. Thus the event doesn't cause a stop, the thread
2846 needs to be single-stepped past the single-step breakpoint before
2847 we can switch back to the original stepping thread. */
2848 int hit_singlestep_breakpoint;
2849 };
2850
2851 static void handle_inferior_event (struct execution_control_state *ecs);
2852
2853 static void handle_step_into_function (struct gdbarch *gdbarch,
2854 struct execution_control_state *ecs);
2855 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2856 struct execution_control_state *ecs);
2857 static void handle_signal_stop (struct execution_control_state *ecs);
2858 static void check_exception_resume (struct execution_control_state *,
2859 struct frame_info *);
2860
2861 static void end_stepping_range (struct execution_control_state *ecs);
2862 static void stop_waiting (struct execution_control_state *ecs);
2863 static void prepare_to_wait (struct execution_control_state *ecs);
2864 static void keep_going (struct execution_control_state *ecs);
2865 static void process_event_stop_test (struct execution_control_state *ecs);
2866 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2867
2868 /* Callback for iterate over threads. If the thread is stopped, but
2869 the user/frontend doesn't know about that yet, go through
2870 normal_stop, as if the thread had just stopped now. ARG points at
2871 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2872 ptid_is_pid(PTID) is true, applies to all threads of the process
2873 pointed at by PTID. Otherwise, apply only to the thread pointed by
2874 PTID. */
2875
2876 static int
2877 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2878 {
2879 ptid_t ptid = * (ptid_t *) arg;
2880
2881 if ((ptid_equal (info->ptid, ptid)
2882 || ptid_equal (minus_one_ptid, ptid)
2883 || (ptid_is_pid (ptid)
2884 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2885 && is_running (info->ptid)
2886 && !is_executing (info->ptid))
2887 {
2888 struct cleanup *old_chain;
2889 struct execution_control_state ecss;
2890 struct execution_control_state *ecs = &ecss;
2891
2892 memset (ecs, 0, sizeof (*ecs));
2893
2894 old_chain = make_cleanup_restore_current_thread ();
2895
2896 overlay_cache_invalid = 1;
2897 /* Flush target cache before starting to handle each event.
2898 Target was running and cache could be stale. This is just a
2899 heuristic. Running threads may modify target memory, but we
2900 don't get any event. */
2901 target_dcache_invalidate ();
2902
2903 /* Go through handle_inferior_event/normal_stop, so we always
2904 have consistent output as if the stop event had been
2905 reported. */
2906 ecs->ptid = info->ptid;
2907 ecs->event_thread = find_thread_ptid (info->ptid);
2908 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2909 ecs->ws.value.sig = GDB_SIGNAL_0;
2910
2911 handle_inferior_event (ecs);
2912
2913 if (!ecs->wait_some_more)
2914 {
2915 struct thread_info *tp;
2916
2917 normal_stop ();
2918
2919 /* Finish off the continuations. */
2920 tp = inferior_thread ();
2921 do_all_intermediate_continuations_thread (tp, 1);
2922 do_all_continuations_thread (tp, 1);
2923 }
2924
2925 do_cleanups (old_chain);
2926 }
2927
2928 return 0;
2929 }
2930
2931 /* This function is attached as a "thread_stop_requested" observer.
2932 Cleanup local state that assumed the PTID was to be resumed, and
2933 report the stop to the frontend. */
2934
2935 static void
2936 infrun_thread_stop_requested (ptid_t ptid)
2937 {
2938 struct displaced_step_inferior_state *displaced;
2939
2940 /* PTID was requested to stop. Remove it from the displaced
2941 stepping queue, so we don't try to resume it automatically. */
2942
2943 for (displaced = displaced_step_inferior_states;
2944 displaced;
2945 displaced = displaced->next)
2946 {
2947 struct displaced_step_request *it, **prev_next_p;
2948
2949 it = displaced->step_request_queue;
2950 prev_next_p = &displaced->step_request_queue;
2951 while (it)
2952 {
2953 if (ptid_match (it->ptid, ptid))
2954 {
2955 *prev_next_p = it->next;
2956 it->next = NULL;
2957 xfree (it);
2958 }
2959 else
2960 {
2961 prev_next_p = &it->next;
2962 }
2963
2964 it = *prev_next_p;
2965 }
2966 }
2967
2968 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2969 }
2970
2971 static void
2972 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2973 {
2974 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2975 nullify_last_target_wait_ptid ();
2976 }
2977
2978 /* Delete the step resume, single-step and longjmp/exception resume
2979 breakpoints of TP. */
2980
2981 static void
2982 delete_thread_infrun_breakpoints (struct thread_info *tp)
2983 {
2984 delete_step_resume_breakpoint (tp);
2985 delete_exception_resume_breakpoint (tp);
2986 delete_single_step_breakpoints (tp);
2987 }
2988
2989 /* If the target still has execution, call FUNC for each thread that
2990 just stopped. In all-stop, that's all the non-exited threads; in
2991 non-stop, that's the current thread, only. */
2992
2993 typedef void (*for_each_just_stopped_thread_callback_func)
2994 (struct thread_info *tp);
2995
2996 static void
2997 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
2998 {
2999 if (!target_has_execution || ptid_equal (inferior_ptid, null_ptid))
3000 return;
3001
3002 if (non_stop)
3003 {
3004 /* If in non-stop mode, only the current thread stopped. */
3005 func (inferior_thread ());
3006 }
3007 else
3008 {
3009 struct thread_info *tp;
3010
3011 /* In all-stop mode, all threads have stopped. */
3012 ALL_NON_EXITED_THREADS (tp)
3013 {
3014 func (tp);
3015 }
3016 }
3017 }
3018
3019 /* Delete the step resume and longjmp/exception resume breakpoints of
3020 the threads that just stopped. */
3021
3022 static void
3023 delete_just_stopped_threads_infrun_breakpoints (void)
3024 {
3025 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
3026 }
3027
3028 /* Delete the single-step breakpoints of the threads that just
3029 stopped. */
3030
3031 static void
3032 delete_just_stopped_threads_single_step_breakpoints (void)
3033 {
3034 for_each_just_stopped_thread (delete_single_step_breakpoints);
3035 }
3036
3037 /* A cleanup wrapper. */
3038
3039 static void
3040 delete_just_stopped_threads_infrun_breakpoints_cleanup (void *arg)
3041 {
3042 delete_just_stopped_threads_infrun_breakpoints ();
3043 }
3044
3045 /* Pretty print the results of target_wait, for debugging purposes. */
3046
3047 static void
3048 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3049 const struct target_waitstatus *ws)
3050 {
3051 char *status_string = target_waitstatus_to_string (ws);
3052 struct ui_file *tmp_stream = mem_fileopen ();
3053 char *text;
3054
3055 /* The text is split over several lines because it was getting too long.
3056 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3057 output as a unit; we want only one timestamp printed if debug_timestamp
3058 is set. */
3059
3060 fprintf_unfiltered (tmp_stream,
3061 "infrun: target_wait (%d.%ld.%ld",
3062 ptid_get_pid (waiton_ptid),
3063 ptid_get_lwp (waiton_ptid),
3064 ptid_get_tid (waiton_ptid));
3065 if (ptid_get_pid (waiton_ptid) != -1)
3066 fprintf_unfiltered (tmp_stream,
3067 " [%s]", target_pid_to_str (waiton_ptid));
3068 fprintf_unfiltered (tmp_stream, ", status) =\n");
3069 fprintf_unfiltered (tmp_stream,
3070 "infrun: %d.%ld.%ld [%s],\n",
3071 ptid_get_pid (result_ptid),
3072 ptid_get_lwp (result_ptid),
3073 ptid_get_tid (result_ptid),
3074 target_pid_to_str (result_ptid));
3075 fprintf_unfiltered (tmp_stream,
3076 "infrun: %s\n",
3077 status_string);
3078
3079 text = ui_file_xstrdup (tmp_stream, NULL);
3080
3081 /* This uses %s in part to handle %'s in the text, but also to avoid
3082 a gcc error: the format attribute requires a string literal. */
3083 fprintf_unfiltered (gdb_stdlog, "%s", text);
3084
3085 xfree (status_string);
3086 xfree (text);
3087 ui_file_delete (tmp_stream);
3088 }
3089
3090 /* Prepare and stabilize the inferior for detaching it. E.g.,
3091 detaching while a thread is displaced stepping is a recipe for
3092 crashing it, as nothing would readjust the PC out of the scratch
3093 pad. */
3094
3095 void
3096 prepare_for_detach (void)
3097 {
3098 struct inferior *inf = current_inferior ();
3099 ptid_t pid_ptid = pid_to_ptid (inf->pid);
3100 struct cleanup *old_chain_1;
3101 struct displaced_step_inferior_state *displaced;
3102
3103 displaced = get_displaced_stepping_state (inf->pid);
3104
3105 /* Is any thread of this process displaced stepping? If not,
3106 there's nothing else to do. */
3107 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
3108 return;
3109
3110 if (debug_infrun)
3111 fprintf_unfiltered (gdb_stdlog,
3112 "displaced-stepping in-process while detaching");
3113
3114 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
3115 inf->detaching = 1;
3116
3117 while (!ptid_equal (displaced->step_ptid, null_ptid))
3118 {
3119 struct cleanup *old_chain_2;
3120 struct execution_control_state ecss;
3121 struct execution_control_state *ecs;
3122
3123 ecs = &ecss;
3124 memset (ecs, 0, sizeof (*ecs));
3125
3126 overlay_cache_invalid = 1;
3127 /* Flush target cache before starting to handle each event.
3128 Target was running and cache could be stale. This is just a
3129 heuristic. Running threads may modify target memory, but we
3130 don't get any event. */
3131 target_dcache_invalidate ();
3132
3133 if (deprecated_target_wait_hook)
3134 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
3135 else
3136 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
3137
3138 if (debug_infrun)
3139 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3140
3141 /* If an error happens while handling the event, propagate GDB's
3142 knowledge of the executing state to the frontend/user running
3143 state. */
3144 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
3145 &minus_one_ptid);
3146
3147 /* Now figure out what to do with the result of the result. */
3148 handle_inferior_event (ecs);
3149
3150 /* No error, don't finish the state yet. */
3151 discard_cleanups (old_chain_2);
3152
3153 /* Breakpoints and watchpoints are not installed on the target
3154 at this point, and signals are passed directly to the
3155 inferior, so this must mean the process is gone. */
3156 if (!ecs->wait_some_more)
3157 {
3158 discard_cleanups (old_chain_1);
3159 error (_("Program exited while detaching"));
3160 }
3161 }
3162
3163 discard_cleanups (old_chain_1);
3164 }
3165
3166 /* Wait for control to return from inferior to debugger.
3167
3168 If inferior gets a signal, we may decide to start it up again
3169 instead of returning. That is why there is a loop in this function.
3170 When this function actually returns it means the inferior
3171 should be left stopped and GDB should read more commands. */
3172
3173 void
3174 wait_for_inferior (void)
3175 {
3176 struct cleanup *old_cleanups;
3177 struct cleanup *thread_state_chain;
3178
3179 if (debug_infrun)
3180 fprintf_unfiltered
3181 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
3182
3183 old_cleanups
3184 = make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup,
3185 NULL);
3186
3187 /* If an error happens while handling the event, propagate GDB's
3188 knowledge of the executing state to the frontend/user running
3189 state. */
3190 thread_state_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3191
3192 while (1)
3193 {
3194 struct execution_control_state ecss;
3195 struct execution_control_state *ecs = &ecss;
3196 ptid_t waiton_ptid = minus_one_ptid;
3197
3198 memset (ecs, 0, sizeof (*ecs));
3199
3200 overlay_cache_invalid = 1;
3201
3202 /* Flush target cache before starting to handle each event.
3203 Target was running and cache could be stale. This is just a
3204 heuristic. Running threads may modify target memory, but we
3205 don't get any event. */
3206 target_dcache_invalidate ();
3207
3208 if (deprecated_target_wait_hook)
3209 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
3210 else
3211 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
3212
3213 if (debug_infrun)
3214 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3215
3216 /* Now figure out what to do with the result of the result. */
3217 handle_inferior_event (ecs);
3218
3219 if (!ecs->wait_some_more)
3220 break;
3221 }
3222
3223 /* No error, don't finish the state yet. */
3224 discard_cleanups (thread_state_chain);
3225
3226 do_cleanups (old_cleanups);
3227 }
3228
3229 /* Cleanup that reinstalls the readline callback handler, if the
3230 target is running in the background. If while handling the target
3231 event something triggered a secondary prompt, like e.g., a
3232 pagination prompt, we'll have removed the callback handler (see
3233 gdb_readline_wrapper_line). Need to do this as we go back to the
3234 event loop, ready to process further input. Note this has no
3235 effect if the handler hasn't actually been removed, because calling
3236 rl_callback_handler_install resets the line buffer, thus losing
3237 input. */
3238
3239 static void
3240 reinstall_readline_callback_handler_cleanup (void *arg)
3241 {
3242 if (!interpreter_async)
3243 {
3244 /* We're not going back to the top level event loop yet. Don't
3245 install the readline callback, as it'd prep the terminal,
3246 readline-style (raw, noecho) (e.g., --batch). We'll install
3247 it the next time the prompt is displayed, when we're ready
3248 for input. */
3249 return;
3250 }
3251
3252 if (async_command_editing_p && !sync_execution)
3253 gdb_rl_callback_handler_reinstall ();
3254 }
3255
3256 /* Asynchronous version of wait_for_inferior. It is called by the
3257 event loop whenever a change of state is detected on the file
3258 descriptor corresponding to the target. It can be called more than
3259 once to complete a single execution command. In such cases we need
3260 to keep the state in a global variable ECSS. If it is the last time
3261 that this function is called for a single execution command, then
3262 report to the user that the inferior has stopped, and do the
3263 necessary cleanups. */
3264
3265 void
3266 fetch_inferior_event (void *client_data)
3267 {
3268 struct execution_control_state ecss;
3269 struct execution_control_state *ecs = &ecss;
3270 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
3271 struct cleanup *ts_old_chain;
3272 int was_sync = sync_execution;
3273 int cmd_done = 0;
3274 ptid_t waiton_ptid = minus_one_ptid;
3275
3276 memset (ecs, 0, sizeof (*ecs));
3277
3278 /* End up with readline processing input, if necessary. */
3279 make_cleanup (reinstall_readline_callback_handler_cleanup, NULL);
3280
3281 /* We're handling a live event, so make sure we're doing live
3282 debugging. If we're looking at traceframes while the target is
3283 running, we're going to need to get back to that mode after
3284 handling the event. */
3285 if (non_stop)
3286 {
3287 make_cleanup_restore_current_traceframe ();
3288 set_current_traceframe (-1);
3289 }
3290
3291 if (non_stop)
3292 /* In non-stop mode, the user/frontend should not notice a thread
3293 switch due to internal events. Make sure we reverse to the
3294 user selected thread and frame after handling the event and
3295 running any breakpoint commands. */
3296 make_cleanup_restore_current_thread ();
3297
3298 overlay_cache_invalid = 1;
3299 /* Flush target cache before starting to handle each event. Target
3300 was running and cache could be stale. This is just a heuristic.
3301 Running threads may modify target memory, but we don't get any
3302 event. */
3303 target_dcache_invalidate ();
3304
3305 make_cleanup_restore_integer (&execution_direction);
3306 execution_direction = target_execution_direction ();
3307
3308 if (deprecated_target_wait_hook)
3309 ecs->ptid =
3310 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3311 else
3312 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3313
3314 if (debug_infrun)
3315 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3316
3317 /* If an error happens while handling the event, propagate GDB's
3318 knowledge of the executing state to the frontend/user running
3319 state. */
3320 if (!non_stop)
3321 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3322 else
3323 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
3324
3325 /* Get executed before make_cleanup_restore_current_thread above to apply
3326 still for the thread which has thrown the exception. */
3327 make_bpstat_clear_actions_cleanup ();
3328
3329 make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup, NULL);
3330
3331 /* Now figure out what to do with the result of the result. */
3332 handle_inferior_event (ecs);
3333
3334 if (!ecs->wait_some_more)
3335 {
3336 struct inferior *inf = find_inferior_ptid (ecs->ptid);
3337
3338 delete_just_stopped_threads_infrun_breakpoints ();
3339
3340 /* We may not find an inferior if this was a process exit. */
3341 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3342 normal_stop ();
3343
3344 if (target_has_execution
3345 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
3346 && ecs->ws.kind != TARGET_WAITKIND_EXITED
3347 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3348 && ecs->event_thread->step_multi
3349 && ecs->event_thread->control.stop_step)
3350 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
3351 else
3352 {
3353 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
3354 cmd_done = 1;
3355 }
3356 }
3357
3358 /* No error, don't finish the thread states yet. */
3359 discard_cleanups (ts_old_chain);
3360
3361 /* Revert thread and frame. */
3362 do_cleanups (old_chain);
3363
3364 /* If the inferior was in sync execution mode, and now isn't,
3365 restore the prompt (a synchronous execution command has finished,
3366 and we're ready for input). */
3367 if (interpreter_async && was_sync && !sync_execution)
3368 observer_notify_sync_execution_done ();
3369
3370 if (cmd_done
3371 && !was_sync
3372 && exec_done_display_p
3373 && (ptid_equal (inferior_ptid, null_ptid)
3374 || !is_running (inferior_ptid)))
3375 printf_unfiltered (_("completed.\n"));
3376 }
3377
3378 /* Record the frame and location we're currently stepping through. */
3379 void
3380 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
3381 {
3382 struct thread_info *tp = inferior_thread ();
3383
3384 tp->control.step_frame_id = get_frame_id (frame);
3385 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
3386
3387 tp->current_symtab = sal.symtab;
3388 tp->current_line = sal.line;
3389 }
3390
3391 /* Clear context switchable stepping state. */
3392
3393 void
3394 init_thread_stepping_state (struct thread_info *tss)
3395 {
3396 tss->stepped_breakpoint = 0;
3397 tss->stepping_over_breakpoint = 0;
3398 tss->stepping_over_watchpoint = 0;
3399 tss->step_after_step_resume_breakpoint = 0;
3400 }
3401
3402 /* Set the cached copy of the last ptid/waitstatus. */
3403
3404 static void
3405 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
3406 {
3407 target_last_wait_ptid = ptid;
3408 target_last_waitstatus = status;
3409 }
3410
3411 /* Return the cached copy of the last pid/waitstatus returned by
3412 target_wait()/deprecated_target_wait_hook(). The data is actually
3413 cached by handle_inferior_event(), which gets called immediately
3414 after target_wait()/deprecated_target_wait_hook(). */
3415
3416 void
3417 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
3418 {
3419 *ptidp = target_last_wait_ptid;
3420 *status = target_last_waitstatus;
3421 }
3422
3423 void
3424 nullify_last_target_wait_ptid (void)
3425 {
3426 target_last_wait_ptid = minus_one_ptid;
3427 }
3428
3429 /* Switch thread contexts. */
3430
3431 static void
3432 context_switch (ptid_t ptid)
3433 {
3434 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3435 {
3436 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3437 target_pid_to_str (inferior_ptid));
3438 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3439 target_pid_to_str (ptid));
3440 }
3441
3442 switch_to_thread (ptid);
3443 }
3444
3445 static void
3446 adjust_pc_after_break (struct execution_control_state *ecs)
3447 {
3448 struct regcache *regcache;
3449 struct gdbarch *gdbarch;
3450 struct address_space *aspace;
3451 CORE_ADDR breakpoint_pc, decr_pc;
3452
3453 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3454 we aren't, just return.
3455
3456 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3457 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3458 implemented by software breakpoints should be handled through the normal
3459 breakpoint layer.
3460
3461 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3462 different signals (SIGILL or SIGEMT for instance), but it is less
3463 clear where the PC is pointing afterwards. It may not match
3464 gdbarch_decr_pc_after_break. I don't know any specific target that
3465 generates these signals at breakpoints (the code has been in GDB since at
3466 least 1992) so I can not guess how to handle them here.
3467
3468 In earlier versions of GDB, a target with
3469 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3470 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3471 target with both of these set in GDB history, and it seems unlikely to be
3472 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3473
3474 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3475 return;
3476
3477 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3478 return;
3479
3480 /* In reverse execution, when a breakpoint is hit, the instruction
3481 under it has already been de-executed. The reported PC always
3482 points at the breakpoint address, so adjusting it further would
3483 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3484 architecture:
3485
3486 B1 0x08000000 : INSN1
3487 B2 0x08000001 : INSN2
3488 0x08000002 : INSN3
3489 PC -> 0x08000003 : INSN4
3490
3491 Say you're stopped at 0x08000003 as above. Reverse continuing
3492 from that point should hit B2 as below. Reading the PC when the
3493 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3494 been de-executed already.
3495
3496 B1 0x08000000 : INSN1
3497 B2 PC -> 0x08000001 : INSN2
3498 0x08000002 : INSN3
3499 0x08000003 : INSN4
3500
3501 We can't apply the same logic as for forward execution, because
3502 we would wrongly adjust the PC to 0x08000000, since there's a
3503 breakpoint at PC - 1. We'd then report a hit on B1, although
3504 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3505 behaviour. */
3506 if (execution_direction == EXEC_REVERSE)
3507 return;
3508
3509 /* If the target can tell whether the thread hit a SW breakpoint,
3510 trust it. Targets that can tell also adjust the PC
3511 themselves. */
3512 if (target_supports_stopped_by_sw_breakpoint ())
3513 return;
3514
3515 /* Note that relying on whether a breakpoint is planted in memory to
3516 determine this can fail. E.g,. the breakpoint could have been
3517 removed since. Or the thread could have been told to step an
3518 instruction the size of a breakpoint instruction, and only
3519 _after_ was a breakpoint inserted at its address. */
3520
3521 /* If this target does not decrement the PC after breakpoints, then
3522 we have nothing to do. */
3523 regcache = get_thread_regcache (ecs->ptid);
3524 gdbarch = get_regcache_arch (regcache);
3525
3526 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3527 if (decr_pc == 0)
3528 return;
3529
3530 aspace = get_regcache_aspace (regcache);
3531
3532 /* Find the location where (if we've hit a breakpoint) the
3533 breakpoint would be. */
3534 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3535
3536 /* If the target can't tell whether a software breakpoint triggered,
3537 fallback to figuring it out based on breakpoints we think were
3538 inserted in the target, and on whether the thread was stepped or
3539 continued. */
3540
3541 /* Check whether there actually is a software breakpoint inserted at
3542 that location.
3543
3544 If in non-stop mode, a race condition is possible where we've
3545 removed a breakpoint, but stop events for that breakpoint were
3546 already queued and arrive later. To suppress those spurious
3547 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3548 and retire them after a number of stop events are reported. Note
3549 this is an heuristic and can thus get confused. The real fix is
3550 to get the "stopped by SW BP and needs adjustment" info out of
3551 the target/kernel (and thus never reach here; see above). */
3552 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3553 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3554 {
3555 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3556
3557 if (record_full_is_used ())
3558 record_full_gdb_operation_disable_set ();
3559
3560 /* When using hardware single-step, a SIGTRAP is reported for both
3561 a completed single-step and a software breakpoint. Need to
3562 differentiate between the two, as the latter needs adjusting
3563 but the former does not.
3564
3565 The SIGTRAP can be due to a completed hardware single-step only if
3566 - we didn't insert software single-step breakpoints
3567 - this thread is currently being stepped
3568
3569 If any of these events did not occur, we must have stopped due
3570 to hitting a software breakpoint, and have to back up to the
3571 breakpoint address.
3572
3573 As a special case, we could have hardware single-stepped a
3574 software breakpoint. In this case (prev_pc == breakpoint_pc),
3575 we also need to back up to the breakpoint address. */
3576
3577 if (thread_has_single_step_breakpoints_set (ecs->event_thread)
3578 || !currently_stepping (ecs->event_thread)
3579 || (ecs->event_thread->stepped_breakpoint
3580 && ecs->event_thread->prev_pc == breakpoint_pc))
3581 regcache_write_pc (regcache, breakpoint_pc);
3582
3583 do_cleanups (old_cleanups);
3584 }
3585 }
3586
3587 static int
3588 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3589 {
3590 for (frame = get_prev_frame (frame);
3591 frame != NULL;
3592 frame = get_prev_frame (frame))
3593 {
3594 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3595 return 1;
3596 if (get_frame_type (frame) != INLINE_FRAME)
3597 break;
3598 }
3599
3600 return 0;
3601 }
3602
3603 /* Auxiliary function that handles syscall entry/return events.
3604 It returns 1 if the inferior should keep going (and GDB
3605 should ignore the event), or 0 if the event deserves to be
3606 processed. */
3607
3608 static int
3609 handle_syscall_event (struct execution_control_state *ecs)
3610 {
3611 struct regcache *regcache;
3612 int syscall_number;
3613
3614 if (!ptid_equal (ecs->ptid, inferior_ptid))
3615 context_switch (ecs->ptid);
3616
3617 regcache = get_thread_regcache (ecs->ptid);
3618 syscall_number = ecs->ws.value.syscall_number;
3619 stop_pc = regcache_read_pc (regcache);
3620
3621 if (catch_syscall_enabled () > 0
3622 && catching_syscall_number (syscall_number) > 0)
3623 {
3624 if (debug_infrun)
3625 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3626 syscall_number);
3627
3628 ecs->event_thread->control.stop_bpstat
3629 = bpstat_stop_status (get_regcache_aspace (regcache),
3630 stop_pc, ecs->ptid, &ecs->ws);
3631
3632 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3633 {
3634 /* Catchpoint hit. */
3635 return 0;
3636 }
3637 }
3638
3639 /* If no catchpoint triggered for this, then keep going. */
3640 keep_going (ecs);
3641 return 1;
3642 }
3643
3644 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3645
3646 static void
3647 fill_in_stop_func (struct gdbarch *gdbarch,
3648 struct execution_control_state *ecs)
3649 {
3650 if (!ecs->stop_func_filled_in)
3651 {
3652 /* Don't care about return value; stop_func_start and stop_func_name
3653 will both be 0 if it doesn't work. */
3654 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3655 &ecs->stop_func_start, &ecs->stop_func_end);
3656 ecs->stop_func_start
3657 += gdbarch_deprecated_function_start_offset (gdbarch);
3658
3659 if (gdbarch_skip_entrypoint_p (gdbarch))
3660 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3661 ecs->stop_func_start);
3662
3663 ecs->stop_func_filled_in = 1;
3664 }
3665 }
3666
3667
3668 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3669
3670 static enum stop_kind
3671 get_inferior_stop_soon (ptid_t ptid)
3672 {
3673 struct inferior *inf = find_inferior_ptid (ptid);
3674
3675 gdb_assert (inf != NULL);
3676 return inf->control.stop_soon;
3677 }
3678
3679 /* Given an execution control state that has been freshly filled in by
3680 an event from the inferior, figure out what it means and take
3681 appropriate action.
3682
3683 The alternatives are:
3684
3685 1) stop_waiting and return; to really stop and return to the
3686 debugger.
3687
3688 2) keep_going and return; to wait for the next event (set
3689 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3690 once). */
3691
3692 static void
3693 handle_inferior_event (struct execution_control_state *ecs)
3694 {
3695 enum stop_kind stop_soon;
3696
3697 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3698 {
3699 /* We had an event in the inferior, but we are not interested in
3700 handling it at this level. The lower layers have already
3701 done what needs to be done, if anything.
3702
3703 One of the possible circumstances for this is when the
3704 inferior produces output for the console. The inferior has
3705 not stopped, and we are ignoring the event. Another possible
3706 circumstance is any event which the lower level knows will be
3707 reported multiple times without an intervening resume. */
3708 if (debug_infrun)
3709 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3710 prepare_to_wait (ecs);
3711 return;
3712 }
3713
3714 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3715 && target_can_async_p () && !sync_execution)
3716 {
3717 /* There were no unwaited-for children left in the target, but,
3718 we're not synchronously waiting for events either. Just
3719 ignore. Otherwise, if we were running a synchronous
3720 execution command, we need to cancel it and give the user
3721 back the terminal. */
3722 if (debug_infrun)
3723 fprintf_unfiltered (gdb_stdlog,
3724 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3725 prepare_to_wait (ecs);
3726 return;
3727 }
3728
3729 /* Cache the last pid/waitstatus. */
3730 set_last_target_status (ecs->ptid, ecs->ws);
3731
3732 /* Always clear state belonging to the previous time we stopped. */
3733 stop_stack_dummy = STOP_NONE;
3734
3735 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3736 {
3737 /* No unwaited-for children left. IOW, all resumed children
3738 have exited. */
3739 if (debug_infrun)
3740 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3741
3742 stop_print_frame = 0;
3743 stop_waiting (ecs);
3744 return;
3745 }
3746
3747 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3748 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3749 {
3750 ecs->event_thread = find_thread_ptid (ecs->ptid);
3751 /* If it's a new thread, add it to the thread database. */
3752 if (ecs->event_thread == NULL)
3753 ecs->event_thread = add_thread (ecs->ptid);
3754
3755 /* Disable range stepping. If the next step request could use a
3756 range, this will be end up re-enabled then. */
3757 ecs->event_thread->control.may_range_step = 0;
3758 }
3759
3760 /* Dependent on valid ECS->EVENT_THREAD. */
3761 adjust_pc_after_break (ecs);
3762
3763 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3764 reinit_frame_cache ();
3765
3766 breakpoint_retire_moribund ();
3767
3768 /* First, distinguish signals caused by the debugger from signals
3769 that have to do with the program's own actions. Note that
3770 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3771 on the operating system version. Here we detect when a SIGILL or
3772 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3773 something similar for SIGSEGV, since a SIGSEGV will be generated
3774 when we're trying to execute a breakpoint instruction on a
3775 non-executable stack. This happens for call dummy breakpoints
3776 for architectures like SPARC that place call dummies on the
3777 stack. */
3778 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3779 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3780 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3781 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3782 {
3783 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3784
3785 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3786 regcache_read_pc (regcache)))
3787 {
3788 if (debug_infrun)
3789 fprintf_unfiltered (gdb_stdlog,
3790 "infrun: Treating signal as SIGTRAP\n");
3791 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3792 }
3793 }
3794
3795 /* Mark the non-executing threads accordingly. In all-stop, all
3796 threads of all processes are stopped when we get any event
3797 reported. In non-stop mode, only the event thread stops. If
3798 we're handling a process exit in non-stop mode, there's nothing
3799 to do, as threads of the dead process are gone, and threads of
3800 any other process were left running. */
3801 if (!non_stop)
3802 set_executing (minus_one_ptid, 0);
3803 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3804 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3805 set_executing (ecs->ptid, 0);
3806
3807 switch (ecs->ws.kind)
3808 {
3809 case TARGET_WAITKIND_LOADED:
3810 if (debug_infrun)
3811 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3812 if (!ptid_equal (ecs->ptid, inferior_ptid))
3813 context_switch (ecs->ptid);
3814 /* Ignore gracefully during startup of the inferior, as it might
3815 be the shell which has just loaded some objects, otherwise
3816 add the symbols for the newly loaded objects. Also ignore at
3817 the beginning of an attach or remote session; we will query
3818 the full list of libraries once the connection is
3819 established. */
3820
3821 stop_soon = get_inferior_stop_soon (ecs->ptid);
3822 if (stop_soon == NO_STOP_QUIETLY)
3823 {
3824 struct regcache *regcache;
3825
3826 regcache = get_thread_regcache (ecs->ptid);
3827
3828 handle_solib_event ();
3829
3830 ecs->event_thread->control.stop_bpstat
3831 = bpstat_stop_status (get_regcache_aspace (regcache),
3832 stop_pc, ecs->ptid, &ecs->ws);
3833
3834 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3835 {
3836 /* A catchpoint triggered. */
3837 process_event_stop_test (ecs);
3838 return;
3839 }
3840
3841 /* If requested, stop when the dynamic linker notifies
3842 gdb of events. This allows the user to get control
3843 and place breakpoints in initializer routines for
3844 dynamically loaded objects (among other things). */
3845 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3846 if (stop_on_solib_events)
3847 {
3848 /* Make sure we print "Stopped due to solib-event" in
3849 normal_stop. */
3850 stop_print_frame = 1;
3851
3852 stop_waiting (ecs);
3853 return;
3854 }
3855 }
3856
3857 /* If we are skipping through a shell, or through shared library
3858 loading that we aren't interested in, resume the program. If
3859 we're running the program normally, also resume. */
3860 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3861 {
3862 /* Loading of shared libraries might have changed breakpoint
3863 addresses. Make sure new breakpoints are inserted. */
3864 if (stop_soon == NO_STOP_QUIETLY)
3865 insert_breakpoints ();
3866 resume (GDB_SIGNAL_0);
3867 prepare_to_wait (ecs);
3868 return;
3869 }
3870
3871 /* But stop if we're attaching or setting up a remote
3872 connection. */
3873 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3874 || stop_soon == STOP_QUIETLY_REMOTE)
3875 {
3876 if (debug_infrun)
3877 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3878 stop_waiting (ecs);
3879 return;
3880 }
3881
3882 internal_error (__FILE__, __LINE__,
3883 _("unhandled stop_soon: %d"), (int) stop_soon);
3884
3885 case TARGET_WAITKIND_SPURIOUS:
3886 if (debug_infrun)
3887 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3888 if (!ptid_equal (ecs->ptid, inferior_ptid))
3889 context_switch (ecs->ptid);
3890 resume (GDB_SIGNAL_0);
3891 prepare_to_wait (ecs);
3892 return;
3893
3894 case TARGET_WAITKIND_EXITED:
3895 case TARGET_WAITKIND_SIGNALLED:
3896 if (debug_infrun)
3897 {
3898 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3899 fprintf_unfiltered (gdb_stdlog,
3900 "infrun: TARGET_WAITKIND_EXITED\n");
3901 else
3902 fprintf_unfiltered (gdb_stdlog,
3903 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3904 }
3905
3906 inferior_ptid = ecs->ptid;
3907 set_current_inferior (find_inferior_ptid (ecs->ptid));
3908 set_current_program_space (current_inferior ()->pspace);
3909 handle_vfork_child_exec_or_exit (0);
3910 target_terminal_ours (); /* Must do this before mourn anyway. */
3911
3912 /* Clearing any previous state of convenience variables. */
3913 clear_exit_convenience_vars ();
3914
3915 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3916 {
3917 /* Record the exit code in the convenience variable $_exitcode, so
3918 that the user can inspect this again later. */
3919 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3920 (LONGEST) ecs->ws.value.integer);
3921
3922 /* Also record this in the inferior itself. */
3923 current_inferior ()->has_exit_code = 1;
3924 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3925
3926 /* Support the --return-child-result option. */
3927 return_child_result_value = ecs->ws.value.integer;
3928
3929 observer_notify_exited (ecs->ws.value.integer);
3930 }
3931 else
3932 {
3933 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3934 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3935
3936 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3937 {
3938 /* Set the value of the internal variable $_exitsignal,
3939 which holds the signal uncaught by the inferior. */
3940 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3941 gdbarch_gdb_signal_to_target (gdbarch,
3942 ecs->ws.value.sig));
3943 }
3944 else
3945 {
3946 /* We don't have access to the target's method used for
3947 converting between signal numbers (GDB's internal
3948 representation <-> target's representation).
3949 Therefore, we cannot do a good job at displaying this
3950 information to the user. It's better to just warn
3951 her about it (if infrun debugging is enabled), and
3952 give up. */
3953 if (debug_infrun)
3954 fprintf_filtered (gdb_stdlog, _("\
3955 Cannot fill $_exitsignal with the correct signal number.\n"));
3956 }
3957
3958 observer_notify_signal_exited (ecs->ws.value.sig);
3959 }
3960
3961 gdb_flush (gdb_stdout);
3962 target_mourn_inferior ();
3963 stop_print_frame = 0;
3964 stop_waiting (ecs);
3965 return;
3966
3967 /* The following are the only cases in which we keep going;
3968 the above cases end in a continue or goto. */
3969 case TARGET_WAITKIND_FORKED:
3970 case TARGET_WAITKIND_VFORKED:
3971 if (debug_infrun)
3972 {
3973 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3974 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3975 else
3976 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3977 }
3978
3979 /* Check whether the inferior is displaced stepping. */
3980 {
3981 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3982 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3983 struct displaced_step_inferior_state *displaced
3984 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3985
3986 /* If checking displaced stepping is supported, and thread
3987 ecs->ptid is displaced stepping. */
3988 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3989 {
3990 struct inferior *parent_inf
3991 = find_inferior_ptid (ecs->ptid);
3992 struct regcache *child_regcache;
3993 CORE_ADDR parent_pc;
3994
3995 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3996 indicating that the displaced stepping of syscall instruction
3997 has been done. Perform cleanup for parent process here. Note
3998 that this operation also cleans up the child process for vfork,
3999 because their pages are shared. */
4000 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
4001
4002 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
4003 {
4004 /* Restore scratch pad for child process. */
4005 displaced_step_restore (displaced, ecs->ws.value.related_pid);
4006 }
4007
4008 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
4009 the child's PC is also within the scratchpad. Set the child's PC
4010 to the parent's PC value, which has already been fixed up.
4011 FIXME: we use the parent's aspace here, although we're touching
4012 the child, because the child hasn't been added to the inferior
4013 list yet at this point. */
4014
4015 child_regcache
4016 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
4017 gdbarch,
4018 parent_inf->aspace);
4019 /* Read PC value of parent process. */
4020 parent_pc = regcache_read_pc (regcache);
4021
4022 if (debug_displaced)
4023 fprintf_unfiltered (gdb_stdlog,
4024 "displaced: write child pc from %s to %s\n",
4025 paddress (gdbarch,
4026 regcache_read_pc (child_regcache)),
4027 paddress (gdbarch, parent_pc));
4028
4029 regcache_write_pc (child_regcache, parent_pc);
4030 }
4031 }
4032
4033 if (!ptid_equal (ecs->ptid, inferior_ptid))
4034 context_switch (ecs->ptid);
4035
4036 /* Immediately detach breakpoints from the child before there's
4037 any chance of letting the user delete breakpoints from the
4038 breakpoint lists. If we don't do this early, it's easy to
4039 leave left over traps in the child, vis: "break foo; catch
4040 fork; c; <fork>; del; c; <child calls foo>". We only follow
4041 the fork on the last `continue', and by that time the
4042 breakpoint at "foo" is long gone from the breakpoint table.
4043 If we vforked, then we don't need to unpatch here, since both
4044 parent and child are sharing the same memory pages; we'll
4045 need to unpatch at follow/detach time instead to be certain
4046 that new breakpoints added between catchpoint hit time and
4047 vfork follow are detached. */
4048 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
4049 {
4050 /* This won't actually modify the breakpoint list, but will
4051 physically remove the breakpoints from the child. */
4052 detach_breakpoints (ecs->ws.value.related_pid);
4053 }
4054
4055 delete_just_stopped_threads_single_step_breakpoints ();
4056
4057 /* In case the event is caught by a catchpoint, remember that
4058 the event is to be followed at the next resume of the thread,
4059 and not immediately. */
4060 ecs->event_thread->pending_follow = ecs->ws;
4061
4062 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4063
4064 ecs->event_thread->control.stop_bpstat
4065 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4066 stop_pc, ecs->ptid, &ecs->ws);
4067
4068 /* If no catchpoint triggered for this, then keep going. Note
4069 that we're interested in knowing the bpstat actually causes a
4070 stop, not just if it may explain the signal. Software
4071 watchpoints, for example, always appear in the bpstat. */
4072 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4073 {
4074 ptid_t parent;
4075 ptid_t child;
4076 int should_resume;
4077 int follow_child
4078 = (follow_fork_mode_string == follow_fork_mode_child);
4079
4080 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4081
4082 should_resume = follow_fork ();
4083
4084 parent = ecs->ptid;
4085 child = ecs->ws.value.related_pid;
4086
4087 /* In non-stop mode, also resume the other branch. */
4088 if (non_stop && !detach_fork)
4089 {
4090 if (follow_child)
4091 switch_to_thread (parent);
4092 else
4093 switch_to_thread (child);
4094
4095 ecs->event_thread = inferior_thread ();
4096 ecs->ptid = inferior_ptid;
4097 keep_going (ecs);
4098 }
4099
4100 if (follow_child)
4101 switch_to_thread (child);
4102 else
4103 switch_to_thread (parent);
4104
4105 ecs->event_thread = inferior_thread ();
4106 ecs->ptid = inferior_ptid;
4107
4108 if (should_resume)
4109 keep_going (ecs);
4110 else
4111 stop_waiting (ecs);
4112 return;
4113 }
4114 process_event_stop_test (ecs);
4115 return;
4116
4117 case TARGET_WAITKIND_VFORK_DONE:
4118 /* Done with the shared memory region. Re-insert breakpoints in
4119 the parent, and keep going. */
4120
4121 if (debug_infrun)
4122 fprintf_unfiltered (gdb_stdlog,
4123 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
4124
4125 if (!ptid_equal (ecs->ptid, inferior_ptid))
4126 context_switch (ecs->ptid);
4127
4128 current_inferior ()->waiting_for_vfork_done = 0;
4129 current_inferior ()->pspace->breakpoints_not_allowed = 0;
4130 /* This also takes care of reinserting breakpoints in the
4131 previously locked inferior. */
4132 keep_going (ecs);
4133 return;
4134
4135 case TARGET_WAITKIND_EXECD:
4136 if (debug_infrun)
4137 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
4138
4139 if (!ptid_equal (ecs->ptid, inferior_ptid))
4140 context_switch (ecs->ptid);
4141
4142 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4143
4144 /* Do whatever is necessary to the parent branch of the vfork. */
4145 handle_vfork_child_exec_or_exit (1);
4146
4147 /* This causes the eventpoints and symbol table to be reset.
4148 Must do this now, before trying to determine whether to
4149 stop. */
4150 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
4151
4152 ecs->event_thread->control.stop_bpstat
4153 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4154 stop_pc, ecs->ptid, &ecs->ws);
4155
4156 /* Note that this may be referenced from inside
4157 bpstat_stop_status above, through inferior_has_execd. */
4158 xfree (ecs->ws.value.execd_pathname);
4159 ecs->ws.value.execd_pathname = NULL;
4160
4161 /* If no catchpoint triggered for this, then keep going. */
4162 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4163 {
4164 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4165 keep_going (ecs);
4166 return;
4167 }
4168 process_event_stop_test (ecs);
4169 return;
4170
4171 /* Be careful not to try to gather much state about a thread
4172 that's in a syscall. It's frequently a losing proposition. */
4173 case TARGET_WAITKIND_SYSCALL_ENTRY:
4174 if (debug_infrun)
4175 fprintf_unfiltered (gdb_stdlog,
4176 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
4177 /* Getting the current syscall number. */
4178 if (handle_syscall_event (ecs) == 0)
4179 process_event_stop_test (ecs);
4180 return;
4181
4182 /* Before examining the threads further, step this thread to
4183 get it entirely out of the syscall. (We get notice of the
4184 event when the thread is just on the verge of exiting a
4185 syscall. Stepping one instruction seems to get it back
4186 into user code.) */
4187 case TARGET_WAITKIND_SYSCALL_RETURN:
4188 if (debug_infrun)
4189 fprintf_unfiltered (gdb_stdlog,
4190 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
4191 if (handle_syscall_event (ecs) == 0)
4192 process_event_stop_test (ecs);
4193 return;
4194
4195 case TARGET_WAITKIND_STOPPED:
4196 if (debug_infrun)
4197 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
4198 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
4199 handle_signal_stop (ecs);
4200 return;
4201
4202 case TARGET_WAITKIND_NO_HISTORY:
4203 if (debug_infrun)
4204 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
4205 /* Reverse execution: target ran out of history info. */
4206
4207 delete_just_stopped_threads_single_step_breakpoints ();
4208 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4209 observer_notify_no_history ();
4210 stop_waiting (ecs);
4211 return;
4212 }
4213 }
4214
4215 /* Come here when the program has stopped with a signal. */
4216
4217 static void
4218 handle_signal_stop (struct execution_control_state *ecs)
4219 {
4220 struct frame_info *frame;
4221 struct gdbarch *gdbarch;
4222 int stopped_by_watchpoint;
4223 enum stop_kind stop_soon;
4224 int random_signal;
4225
4226 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
4227
4228 /* Do we need to clean up the state of a thread that has
4229 completed a displaced single-step? (Doing so usually affects
4230 the PC, so do it here, before we set stop_pc.) */
4231 displaced_step_fixup (ecs->ptid,
4232 ecs->event_thread->suspend.stop_signal);
4233
4234 /* If we either finished a single-step or hit a breakpoint, but
4235 the user wanted this thread to be stopped, pretend we got a
4236 SIG0 (generic unsignaled stop). */
4237 if (ecs->event_thread->stop_requested
4238 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4239 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4240
4241 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4242
4243 if (debug_infrun)
4244 {
4245 struct regcache *regcache = get_thread_regcache (ecs->ptid);
4246 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4247 struct cleanup *old_chain = save_inferior_ptid ();
4248
4249 inferior_ptid = ecs->ptid;
4250
4251 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
4252 paddress (gdbarch, stop_pc));
4253 if (target_stopped_by_watchpoint ())
4254 {
4255 CORE_ADDR addr;
4256
4257 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
4258
4259 if (target_stopped_data_address (&current_target, &addr))
4260 fprintf_unfiltered (gdb_stdlog,
4261 "infrun: stopped data address = %s\n",
4262 paddress (gdbarch, addr));
4263 else
4264 fprintf_unfiltered (gdb_stdlog,
4265 "infrun: (no data address available)\n");
4266 }
4267
4268 do_cleanups (old_chain);
4269 }
4270
4271 /* This is originated from start_remote(), start_inferior() and
4272 shared libraries hook functions. */
4273 stop_soon = get_inferior_stop_soon (ecs->ptid);
4274 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4275 {
4276 if (!ptid_equal (ecs->ptid, inferior_ptid))
4277 context_switch (ecs->ptid);
4278 if (debug_infrun)
4279 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4280 stop_print_frame = 1;
4281 stop_waiting (ecs);
4282 return;
4283 }
4284
4285 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4286 && stop_after_trap)
4287 {
4288 if (!ptid_equal (ecs->ptid, inferior_ptid))
4289 context_switch (ecs->ptid);
4290 if (debug_infrun)
4291 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4292 stop_print_frame = 0;
4293 stop_waiting (ecs);
4294 return;
4295 }
4296
4297 /* This originates from attach_command(). We need to overwrite
4298 the stop_signal here, because some kernels don't ignore a
4299 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4300 See more comments in inferior.h. On the other hand, if we
4301 get a non-SIGSTOP, report it to the user - assume the backend
4302 will handle the SIGSTOP if it should show up later.
4303
4304 Also consider that the attach is complete when we see a
4305 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4306 target extended-remote report it instead of a SIGSTOP
4307 (e.g. gdbserver). We already rely on SIGTRAP being our
4308 signal, so this is no exception.
4309
4310 Also consider that the attach is complete when we see a
4311 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4312 the target to stop all threads of the inferior, in case the
4313 low level attach operation doesn't stop them implicitly. If
4314 they weren't stopped implicitly, then the stub will report a
4315 GDB_SIGNAL_0, meaning: stopped for no particular reason
4316 other than GDB's request. */
4317 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4318 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
4319 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4320 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
4321 {
4322 stop_print_frame = 1;
4323 stop_waiting (ecs);
4324 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4325 return;
4326 }
4327
4328 /* See if something interesting happened to the non-current thread. If
4329 so, then switch to that thread. */
4330 if (!ptid_equal (ecs->ptid, inferior_ptid))
4331 {
4332 if (debug_infrun)
4333 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
4334
4335 context_switch (ecs->ptid);
4336
4337 if (deprecated_context_hook)
4338 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
4339 }
4340
4341 /* At this point, get hold of the now-current thread's frame. */
4342 frame = get_current_frame ();
4343 gdbarch = get_frame_arch (frame);
4344
4345 /* Pull the single step breakpoints out of the target. */
4346 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4347 {
4348 struct regcache *regcache;
4349 struct address_space *aspace;
4350 CORE_ADDR pc;
4351
4352 regcache = get_thread_regcache (ecs->ptid);
4353 aspace = get_regcache_aspace (regcache);
4354 pc = regcache_read_pc (regcache);
4355
4356 /* However, before doing so, if this single-step breakpoint was
4357 actually for another thread, set this thread up for moving
4358 past it. */
4359 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
4360 aspace, pc))
4361 {
4362 if (single_step_breakpoint_inserted_here_p (aspace, pc))
4363 {
4364 if (debug_infrun)
4365 {
4366 fprintf_unfiltered (gdb_stdlog,
4367 "infrun: [%s] hit another thread's "
4368 "single-step breakpoint\n",
4369 target_pid_to_str (ecs->ptid));
4370 }
4371 ecs->hit_singlestep_breakpoint = 1;
4372 }
4373 }
4374 else
4375 {
4376 if (debug_infrun)
4377 {
4378 fprintf_unfiltered (gdb_stdlog,
4379 "infrun: [%s] hit its "
4380 "single-step breakpoint\n",
4381 target_pid_to_str (ecs->ptid));
4382 }
4383 }
4384 }
4385 delete_just_stopped_threads_single_step_breakpoints ();
4386
4387 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4388 && ecs->event_thread->control.trap_expected
4389 && ecs->event_thread->stepping_over_watchpoint)
4390 stopped_by_watchpoint = 0;
4391 else
4392 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4393
4394 /* If necessary, step over this watchpoint. We'll be back to display
4395 it in a moment. */
4396 if (stopped_by_watchpoint
4397 && (target_have_steppable_watchpoint
4398 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4399 {
4400 /* At this point, we are stopped at an instruction which has
4401 attempted to write to a piece of memory under control of
4402 a watchpoint. The instruction hasn't actually executed
4403 yet. If we were to evaluate the watchpoint expression
4404 now, we would get the old value, and therefore no change
4405 would seem to have occurred.
4406
4407 In order to make watchpoints work `right', we really need
4408 to complete the memory write, and then evaluate the
4409 watchpoint expression. We do this by single-stepping the
4410 target.
4411
4412 It may not be necessary to disable the watchpoint to step over
4413 it. For example, the PA can (with some kernel cooperation)
4414 single step over a watchpoint without disabling the watchpoint.
4415
4416 It is far more common to need to disable a watchpoint to step
4417 the inferior over it. If we have non-steppable watchpoints,
4418 we must disable the current watchpoint; it's simplest to
4419 disable all watchpoints.
4420
4421 Any breakpoint at PC must also be stepped over -- if there's
4422 one, it will have already triggered before the watchpoint
4423 triggered, and we either already reported it to the user, or
4424 it didn't cause a stop and we called keep_going. In either
4425 case, if there was a breakpoint at PC, we must be trying to
4426 step past it. */
4427 ecs->event_thread->stepping_over_watchpoint = 1;
4428 keep_going (ecs);
4429 return;
4430 }
4431
4432 ecs->event_thread->stepping_over_breakpoint = 0;
4433 ecs->event_thread->stepping_over_watchpoint = 0;
4434 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4435 ecs->event_thread->control.stop_step = 0;
4436 stop_print_frame = 1;
4437 stopped_by_random_signal = 0;
4438
4439 /* Hide inlined functions starting here, unless we just performed stepi or
4440 nexti. After stepi and nexti, always show the innermost frame (not any
4441 inline function call sites). */
4442 if (ecs->event_thread->control.step_range_end != 1)
4443 {
4444 struct address_space *aspace =
4445 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4446
4447 /* skip_inline_frames is expensive, so we avoid it if we can
4448 determine that the address is one where functions cannot have
4449 been inlined. This improves performance with inferiors that
4450 load a lot of shared libraries, because the solib event
4451 breakpoint is defined as the address of a function (i.e. not
4452 inline). Note that we have to check the previous PC as well
4453 as the current one to catch cases when we have just
4454 single-stepped off a breakpoint prior to reinstating it.
4455 Note that we're assuming that the code we single-step to is
4456 not inline, but that's not definitive: there's nothing
4457 preventing the event breakpoint function from containing
4458 inlined code, and the single-step ending up there. If the
4459 user had set a breakpoint on that inlined code, the missing
4460 skip_inline_frames call would break things. Fortunately
4461 that's an extremely unlikely scenario. */
4462 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4463 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4464 && ecs->event_thread->control.trap_expected
4465 && pc_at_non_inline_function (aspace,
4466 ecs->event_thread->prev_pc,
4467 &ecs->ws)))
4468 {
4469 skip_inline_frames (ecs->ptid);
4470
4471 /* Re-fetch current thread's frame in case that invalidated
4472 the frame cache. */
4473 frame = get_current_frame ();
4474 gdbarch = get_frame_arch (frame);
4475 }
4476 }
4477
4478 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4479 && ecs->event_thread->control.trap_expected
4480 && gdbarch_single_step_through_delay_p (gdbarch)
4481 && currently_stepping (ecs->event_thread))
4482 {
4483 /* We're trying to step off a breakpoint. Turns out that we're
4484 also on an instruction that needs to be stepped multiple
4485 times before it's been fully executing. E.g., architectures
4486 with a delay slot. It needs to be stepped twice, once for
4487 the instruction and once for the delay slot. */
4488 int step_through_delay
4489 = gdbarch_single_step_through_delay (gdbarch, frame);
4490
4491 if (debug_infrun && step_through_delay)
4492 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4493 if (ecs->event_thread->control.step_range_end == 0
4494 && step_through_delay)
4495 {
4496 /* The user issued a continue when stopped at a breakpoint.
4497 Set up for another trap and get out of here. */
4498 ecs->event_thread->stepping_over_breakpoint = 1;
4499 keep_going (ecs);
4500 return;
4501 }
4502 else if (step_through_delay)
4503 {
4504 /* The user issued a step when stopped at a breakpoint.
4505 Maybe we should stop, maybe we should not - the delay
4506 slot *might* correspond to a line of source. In any
4507 case, don't decide that here, just set
4508 ecs->stepping_over_breakpoint, making sure we
4509 single-step again before breakpoints are re-inserted. */
4510 ecs->event_thread->stepping_over_breakpoint = 1;
4511 }
4512 }
4513
4514 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4515 handles this event. */
4516 ecs->event_thread->control.stop_bpstat
4517 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4518 stop_pc, ecs->ptid, &ecs->ws);
4519
4520 /* Following in case break condition called a
4521 function. */
4522 stop_print_frame = 1;
4523
4524 /* This is where we handle "moribund" watchpoints. Unlike
4525 software breakpoints traps, hardware watchpoint traps are
4526 always distinguishable from random traps. If no high-level
4527 watchpoint is associated with the reported stop data address
4528 anymore, then the bpstat does not explain the signal ---
4529 simply make sure to ignore it if `stopped_by_watchpoint' is
4530 set. */
4531
4532 if (debug_infrun
4533 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4534 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4535 GDB_SIGNAL_TRAP)
4536 && stopped_by_watchpoint)
4537 fprintf_unfiltered (gdb_stdlog,
4538 "infrun: no user watchpoint explains "
4539 "watchpoint SIGTRAP, ignoring\n");
4540
4541 /* NOTE: cagney/2003-03-29: These checks for a random signal
4542 at one stage in the past included checks for an inferior
4543 function call's call dummy's return breakpoint. The original
4544 comment, that went with the test, read:
4545
4546 ``End of a stack dummy. Some systems (e.g. Sony news) give
4547 another signal besides SIGTRAP, so check here as well as
4548 above.''
4549
4550 If someone ever tries to get call dummys on a
4551 non-executable stack to work (where the target would stop
4552 with something like a SIGSEGV), then those tests might need
4553 to be re-instated. Given, however, that the tests were only
4554 enabled when momentary breakpoints were not being used, I
4555 suspect that it won't be the case.
4556
4557 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4558 be necessary for call dummies on a non-executable stack on
4559 SPARC. */
4560
4561 /* See if the breakpoints module can explain the signal. */
4562 random_signal
4563 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4564 ecs->event_thread->suspend.stop_signal);
4565
4566 /* Maybe this was a trap for a software breakpoint that has since
4567 been removed. */
4568 if (random_signal && target_stopped_by_sw_breakpoint ())
4569 {
4570 if (program_breakpoint_here_p (gdbarch, stop_pc))
4571 {
4572 struct regcache *regcache;
4573 int decr_pc;
4574
4575 /* Re-adjust PC to what the program would see if GDB was not
4576 debugging it. */
4577 regcache = get_thread_regcache (ecs->event_thread->ptid);
4578 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4579 if (decr_pc != 0)
4580 {
4581 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
4582
4583 if (record_full_is_used ())
4584 record_full_gdb_operation_disable_set ();
4585
4586 regcache_write_pc (regcache, stop_pc + decr_pc);
4587
4588 do_cleanups (old_cleanups);
4589 }
4590 }
4591 else
4592 {
4593 /* A delayed software breakpoint event. Ignore the trap. */
4594 if (debug_infrun)
4595 fprintf_unfiltered (gdb_stdlog,
4596 "infrun: delayed software breakpoint "
4597 "trap, ignoring\n");
4598 random_signal = 0;
4599 }
4600 }
4601
4602 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
4603 has since been removed. */
4604 if (random_signal && target_stopped_by_hw_breakpoint ())
4605 {
4606 /* A delayed hardware breakpoint event. Ignore the trap. */
4607 if (debug_infrun)
4608 fprintf_unfiltered (gdb_stdlog,
4609 "infrun: delayed hardware breakpoint/watchpoint "
4610 "trap, ignoring\n");
4611 random_signal = 0;
4612 }
4613
4614 /* If not, perhaps stepping/nexting can. */
4615 if (random_signal)
4616 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4617 && currently_stepping (ecs->event_thread));
4618
4619 /* Perhaps the thread hit a single-step breakpoint of _another_
4620 thread. Single-step breakpoints are transparent to the
4621 breakpoints module. */
4622 if (random_signal)
4623 random_signal = !ecs->hit_singlestep_breakpoint;
4624
4625 /* No? Perhaps we got a moribund watchpoint. */
4626 if (random_signal)
4627 random_signal = !stopped_by_watchpoint;
4628
4629 /* For the program's own signals, act according to
4630 the signal handling tables. */
4631
4632 if (random_signal)
4633 {
4634 /* Signal not for debugging purposes. */
4635 struct inferior *inf = find_inferior_ptid (ecs->ptid);
4636 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4637
4638 if (debug_infrun)
4639 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4640 gdb_signal_to_symbol_string (stop_signal));
4641
4642 stopped_by_random_signal = 1;
4643
4644 /* Always stop on signals if we're either just gaining control
4645 of the program, or the user explicitly requested this thread
4646 to remain stopped. */
4647 if (stop_soon != NO_STOP_QUIETLY
4648 || ecs->event_thread->stop_requested
4649 || (!inf->detaching
4650 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4651 {
4652 stop_waiting (ecs);
4653 return;
4654 }
4655
4656 /* Notify observers the signal has "handle print" set. Note we
4657 returned early above if stopping; normal_stop handles the
4658 printing in that case. */
4659 if (signal_print[ecs->event_thread->suspend.stop_signal])
4660 {
4661 /* The signal table tells us to print about this signal. */
4662 target_terminal_ours_for_output ();
4663 observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
4664 target_terminal_inferior ();
4665 }
4666
4667 /* Clear the signal if it should not be passed. */
4668 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4669 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4670
4671 if (ecs->event_thread->prev_pc == stop_pc
4672 && ecs->event_thread->control.trap_expected
4673 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4674 {
4675 /* We were just starting a new sequence, attempting to
4676 single-step off of a breakpoint and expecting a SIGTRAP.
4677 Instead this signal arrives. This signal will take us out
4678 of the stepping range so GDB needs to remember to, when
4679 the signal handler returns, resume stepping off that
4680 breakpoint. */
4681 /* To simplify things, "continue" is forced to use the same
4682 code paths as single-step - set a breakpoint at the
4683 signal return address and then, once hit, step off that
4684 breakpoint. */
4685 if (debug_infrun)
4686 fprintf_unfiltered (gdb_stdlog,
4687 "infrun: signal arrived while stepping over "
4688 "breakpoint\n");
4689
4690 insert_hp_step_resume_breakpoint_at_frame (frame);
4691 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4692 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4693 ecs->event_thread->control.trap_expected = 0;
4694
4695 /* If we were nexting/stepping some other thread, switch to
4696 it, so that we don't continue it, losing control. */
4697 if (!switch_back_to_stepped_thread (ecs))
4698 keep_going (ecs);
4699 return;
4700 }
4701
4702 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4703 && (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4704 || ecs->event_thread->control.step_range_end == 1)
4705 && frame_id_eq (get_stack_frame_id (frame),
4706 ecs->event_thread->control.step_stack_frame_id)
4707 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4708 {
4709 /* The inferior is about to take a signal that will take it
4710 out of the single step range. Set a breakpoint at the
4711 current PC (which is presumably where the signal handler
4712 will eventually return) and then allow the inferior to
4713 run free.
4714
4715 Note that this is only needed for a signal delivered
4716 while in the single-step range. Nested signals aren't a
4717 problem as they eventually all return. */
4718 if (debug_infrun)
4719 fprintf_unfiltered (gdb_stdlog,
4720 "infrun: signal may take us out of "
4721 "single-step range\n");
4722
4723 insert_hp_step_resume_breakpoint_at_frame (frame);
4724 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4725 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4726 ecs->event_thread->control.trap_expected = 0;
4727 keep_going (ecs);
4728 return;
4729 }
4730
4731 /* Note: step_resume_breakpoint may be non-NULL. This occures
4732 when either there's a nested signal, or when there's a
4733 pending signal enabled just as the signal handler returns
4734 (leaving the inferior at the step-resume-breakpoint without
4735 actually executing it). Either way continue until the
4736 breakpoint is really hit. */
4737
4738 if (!switch_back_to_stepped_thread (ecs))
4739 {
4740 if (debug_infrun)
4741 fprintf_unfiltered (gdb_stdlog,
4742 "infrun: random signal, keep going\n");
4743
4744 keep_going (ecs);
4745 }
4746 return;
4747 }
4748
4749 process_event_stop_test (ecs);
4750 }
4751
4752 /* Come here when we've got some debug event / signal we can explain
4753 (IOW, not a random signal), and test whether it should cause a
4754 stop, or whether we should resume the inferior (transparently).
4755 E.g., could be a breakpoint whose condition evaluates false; we
4756 could be still stepping within the line; etc. */
4757
4758 static void
4759 process_event_stop_test (struct execution_control_state *ecs)
4760 {
4761 struct symtab_and_line stop_pc_sal;
4762 struct frame_info *frame;
4763 struct gdbarch *gdbarch;
4764 CORE_ADDR jmp_buf_pc;
4765 struct bpstat_what what;
4766
4767 /* Handle cases caused by hitting a breakpoint. */
4768
4769 frame = get_current_frame ();
4770 gdbarch = get_frame_arch (frame);
4771
4772 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4773
4774 if (what.call_dummy)
4775 {
4776 stop_stack_dummy = what.call_dummy;
4777 }
4778
4779 /* If we hit an internal event that triggers symbol changes, the
4780 current frame will be invalidated within bpstat_what (e.g., if we
4781 hit an internal solib event). Re-fetch it. */
4782 frame = get_current_frame ();
4783 gdbarch = get_frame_arch (frame);
4784
4785 switch (what.main_action)
4786 {
4787 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4788 /* If we hit the breakpoint at longjmp while stepping, we
4789 install a momentary breakpoint at the target of the
4790 jmp_buf. */
4791
4792 if (debug_infrun)
4793 fprintf_unfiltered (gdb_stdlog,
4794 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4795
4796 ecs->event_thread->stepping_over_breakpoint = 1;
4797
4798 if (what.is_longjmp)
4799 {
4800 struct value *arg_value;
4801
4802 /* If we set the longjmp breakpoint via a SystemTap probe,
4803 then use it to extract the arguments. The destination PC
4804 is the third argument to the probe. */
4805 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4806 if (arg_value)
4807 {
4808 jmp_buf_pc = value_as_address (arg_value);
4809 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
4810 }
4811 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4812 || !gdbarch_get_longjmp_target (gdbarch,
4813 frame, &jmp_buf_pc))
4814 {
4815 if (debug_infrun)
4816 fprintf_unfiltered (gdb_stdlog,
4817 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4818 "(!gdbarch_get_longjmp_target)\n");
4819 keep_going (ecs);
4820 return;
4821 }
4822
4823 /* Insert a breakpoint at resume address. */
4824 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4825 }
4826 else
4827 check_exception_resume (ecs, frame);
4828 keep_going (ecs);
4829 return;
4830
4831 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4832 {
4833 struct frame_info *init_frame;
4834
4835 /* There are several cases to consider.
4836
4837 1. The initiating frame no longer exists. In this case we
4838 must stop, because the exception or longjmp has gone too
4839 far.
4840
4841 2. The initiating frame exists, and is the same as the
4842 current frame. We stop, because the exception or longjmp
4843 has been caught.
4844
4845 3. The initiating frame exists and is different from the
4846 current frame. This means the exception or longjmp has
4847 been caught beneath the initiating frame, so keep going.
4848
4849 4. longjmp breakpoint has been placed just to protect
4850 against stale dummy frames and user is not interested in
4851 stopping around longjmps. */
4852
4853 if (debug_infrun)
4854 fprintf_unfiltered (gdb_stdlog,
4855 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4856
4857 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4858 != NULL);
4859 delete_exception_resume_breakpoint (ecs->event_thread);
4860
4861 if (what.is_longjmp)
4862 {
4863 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
4864
4865 if (!frame_id_p (ecs->event_thread->initiating_frame))
4866 {
4867 /* Case 4. */
4868 keep_going (ecs);
4869 return;
4870 }
4871 }
4872
4873 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4874
4875 if (init_frame)
4876 {
4877 struct frame_id current_id
4878 = get_frame_id (get_current_frame ());
4879 if (frame_id_eq (current_id,
4880 ecs->event_thread->initiating_frame))
4881 {
4882 /* Case 2. Fall through. */
4883 }
4884 else
4885 {
4886 /* Case 3. */
4887 keep_going (ecs);
4888 return;
4889 }
4890 }
4891
4892 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4893 exists. */
4894 delete_step_resume_breakpoint (ecs->event_thread);
4895
4896 end_stepping_range (ecs);
4897 }
4898 return;
4899
4900 case BPSTAT_WHAT_SINGLE:
4901 if (debug_infrun)
4902 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4903 ecs->event_thread->stepping_over_breakpoint = 1;
4904 /* Still need to check other stuff, at least the case where we
4905 are stepping and step out of the right range. */
4906 break;
4907
4908 case BPSTAT_WHAT_STEP_RESUME:
4909 if (debug_infrun)
4910 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4911
4912 delete_step_resume_breakpoint (ecs->event_thread);
4913 if (ecs->event_thread->control.proceed_to_finish
4914 && execution_direction == EXEC_REVERSE)
4915 {
4916 struct thread_info *tp = ecs->event_thread;
4917
4918 /* We are finishing a function in reverse, and just hit the
4919 step-resume breakpoint at the start address of the
4920 function, and we're almost there -- just need to back up
4921 by one more single-step, which should take us back to the
4922 function call. */
4923 tp->control.step_range_start = tp->control.step_range_end = 1;
4924 keep_going (ecs);
4925 return;
4926 }
4927 fill_in_stop_func (gdbarch, ecs);
4928 if (stop_pc == ecs->stop_func_start
4929 && execution_direction == EXEC_REVERSE)
4930 {
4931 /* We are stepping over a function call in reverse, and just
4932 hit the step-resume breakpoint at the start address of
4933 the function. Go back to single-stepping, which should
4934 take us back to the function call. */
4935 ecs->event_thread->stepping_over_breakpoint = 1;
4936 keep_going (ecs);
4937 return;
4938 }
4939 break;
4940
4941 case BPSTAT_WHAT_STOP_NOISY:
4942 if (debug_infrun)
4943 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4944 stop_print_frame = 1;
4945
4946 /* Assume the thread stopped for a breapoint. We'll still check
4947 whether a/the breakpoint is there when the thread is next
4948 resumed. */
4949 ecs->event_thread->stepping_over_breakpoint = 1;
4950
4951 stop_waiting (ecs);
4952 return;
4953
4954 case BPSTAT_WHAT_STOP_SILENT:
4955 if (debug_infrun)
4956 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4957 stop_print_frame = 0;
4958
4959 /* Assume the thread stopped for a breapoint. We'll still check
4960 whether a/the breakpoint is there when the thread is next
4961 resumed. */
4962 ecs->event_thread->stepping_over_breakpoint = 1;
4963 stop_waiting (ecs);
4964 return;
4965
4966 case BPSTAT_WHAT_HP_STEP_RESUME:
4967 if (debug_infrun)
4968 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4969
4970 delete_step_resume_breakpoint (ecs->event_thread);
4971 if (ecs->event_thread->step_after_step_resume_breakpoint)
4972 {
4973 /* Back when the step-resume breakpoint was inserted, we
4974 were trying to single-step off a breakpoint. Go back to
4975 doing that. */
4976 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4977 ecs->event_thread->stepping_over_breakpoint = 1;
4978 keep_going (ecs);
4979 return;
4980 }
4981 break;
4982
4983 case BPSTAT_WHAT_KEEP_CHECKING:
4984 break;
4985 }
4986
4987 /* If we stepped a permanent breakpoint and we had a high priority
4988 step-resume breakpoint for the address we stepped, but we didn't
4989 hit it, then we must have stepped into the signal handler. The
4990 step-resume was only necessary to catch the case of _not_
4991 stepping into the handler, so delete it, and fall through to
4992 checking whether the step finished. */
4993 if (ecs->event_thread->stepped_breakpoint)
4994 {
4995 struct breakpoint *sr_bp
4996 = ecs->event_thread->control.step_resume_breakpoint;
4997
4998 if (sr_bp != NULL
4999 && sr_bp->loc->permanent
5000 && sr_bp->type == bp_hp_step_resume
5001 && sr_bp->loc->address == ecs->event_thread->prev_pc)
5002 {
5003 if (debug_infrun)
5004 fprintf_unfiltered (gdb_stdlog,
5005 "infrun: stepped permanent breakpoint, stopped in "
5006 "handler\n");
5007 delete_step_resume_breakpoint (ecs->event_thread);
5008 ecs->event_thread->step_after_step_resume_breakpoint = 0;
5009 }
5010 }
5011
5012 /* We come here if we hit a breakpoint but should not stop for it.
5013 Possibly we also were stepping and should stop for that. So fall
5014 through and test for stepping. But, if not stepping, do not
5015 stop. */
5016
5017 /* In all-stop mode, if we're currently stepping but have stopped in
5018 some other thread, we need to switch back to the stepped thread. */
5019 if (switch_back_to_stepped_thread (ecs))
5020 return;
5021
5022 if (ecs->event_thread->control.step_resume_breakpoint)
5023 {
5024 if (debug_infrun)
5025 fprintf_unfiltered (gdb_stdlog,
5026 "infrun: step-resume breakpoint is inserted\n");
5027
5028 /* Having a step-resume breakpoint overrides anything
5029 else having to do with stepping commands until
5030 that breakpoint is reached. */
5031 keep_going (ecs);
5032 return;
5033 }
5034
5035 if (ecs->event_thread->control.step_range_end == 0)
5036 {
5037 if (debug_infrun)
5038 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
5039 /* Likewise if we aren't even stepping. */
5040 keep_going (ecs);
5041 return;
5042 }
5043
5044 /* Re-fetch current thread's frame in case the code above caused
5045 the frame cache to be re-initialized, making our FRAME variable
5046 a dangling pointer. */
5047 frame = get_current_frame ();
5048 gdbarch = get_frame_arch (frame);
5049 fill_in_stop_func (gdbarch, ecs);
5050
5051 /* If stepping through a line, keep going if still within it.
5052
5053 Note that step_range_end is the address of the first instruction
5054 beyond the step range, and NOT the address of the last instruction
5055 within it!
5056
5057 Note also that during reverse execution, we may be stepping
5058 through a function epilogue and therefore must detect when
5059 the current-frame changes in the middle of a line. */
5060
5061 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
5062 && (execution_direction != EXEC_REVERSE
5063 || frame_id_eq (get_frame_id (frame),
5064 ecs->event_thread->control.step_frame_id)))
5065 {
5066 if (debug_infrun)
5067 fprintf_unfiltered
5068 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
5069 paddress (gdbarch, ecs->event_thread->control.step_range_start),
5070 paddress (gdbarch, ecs->event_thread->control.step_range_end));
5071
5072 /* Tentatively re-enable range stepping; `resume' disables it if
5073 necessary (e.g., if we're stepping over a breakpoint or we
5074 have software watchpoints). */
5075 ecs->event_thread->control.may_range_step = 1;
5076
5077 /* When stepping backward, stop at beginning of line range
5078 (unless it's the function entry point, in which case
5079 keep going back to the call point). */
5080 if (stop_pc == ecs->event_thread->control.step_range_start
5081 && stop_pc != ecs->stop_func_start
5082 && execution_direction == EXEC_REVERSE)
5083 end_stepping_range (ecs);
5084 else
5085 keep_going (ecs);
5086
5087 return;
5088 }
5089
5090 /* We stepped out of the stepping range. */
5091
5092 /* If we are stepping at the source level and entered the runtime
5093 loader dynamic symbol resolution code...
5094
5095 EXEC_FORWARD: we keep on single stepping until we exit the run
5096 time loader code and reach the callee's address.
5097
5098 EXEC_REVERSE: we've already executed the callee (backward), and
5099 the runtime loader code is handled just like any other
5100 undebuggable function call. Now we need only keep stepping
5101 backward through the trampoline code, and that's handled further
5102 down, so there is nothing for us to do here. */
5103
5104 if (execution_direction != EXEC_REVERSE
5105 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5106 && in_solib_dynsym_resolve_code (stop_pc))
5107 {
5108 CORE_ADDR pc_after_resolver =
5109 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
5110
5111 if (debug_infrun)
5112 fprintf_unfiltered (gdb_stdlog,
5113 "infrun: stepped into dynsym resolve code\n");
5114
5115 if (pc_after_resolver)
5116 {
5117 /* Set up a step-resume breakpoint at the address
5118 indicated by SKIP_SOLIB_RESOLVER. */
5119 struct symtab_and_line sr_sal;
5120
5121 init_sal (&sr_sal);
5122 sr_sal.pc = pc_after_resolver;
5123 sr_sal.pspace = get_frame_program_space (frame);
5124
5125 insert_step_resume_breakpoint_at_sal (gdbarch,
5126 sr_sal, null_frame_id);
5127 }
5128
5129 keep_going (ecs);
5130 return;
5131 }
5132
5133 if (ecs->event_thread->control.step_range_end != 1
5134 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5135 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5136 && get_frame_type (frame) == SIGTRAMP_FRAME)
5137 {
5138 if (debug_infrun)
5139 fprintf_unfiltered (gdb_stdlog,
5140 "infrun: stepped into signal trampoline\n");
5141 /* The inferior, while doing a "step" or "next", has ended up in
5142 a signal trampoline (either by a signal being delivered or by
5143 the signal handler returning). Just single-step until the
5144 inferior leaves the trampoline (either by calling the handler
5145 or returning). */
5146 keep_going (ecs);
5147 return;
5148 }
5149
5150 /* If we're in the return path from a shared library trampoline,
5151 we want to proceed through the trampoline when stepping. */
5152 /* macro/2012-04-25: This needs to come before the subroutine
5153 call check below as on some targets return trampolines look
5154 like subroutine calls (MIPS16 return thunks). */
5155 if (gdbarch_in_solib_return_trampoline (gdbarch,
5156 stop_pc, ecs->stop_func_name)
5157 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5158 {
5159 /* Determine where this trampoline returns. */
5160 CORE_ADDR real_stop_pc;
5161
5162 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5163
5164 if (debug_infrun)
5165 fprintf_unfiltered (gdb_stdlog,
5166 "infrun: stepped into solib return tramp\n");
5167
5168 /* Only proceed through if we know where it's going. */
5169 if (real_stop_pc)
5170 {
5171 /* And put the step-breakpoint there and go until there. */
5172 struct symtab_and_line sr_sal;
5173
5174 init_sal (&sr_sal); /* initialize to zeroes */
5175 sr_sal.pc = real_stop_pc;
5176 sr_sal.section = find_pc_overlay (sr_sal.pc);
5177 sr_sal.pspace = get_frame_program_space (frame);
5178
5179 /* Do not specify what the fp should be when we stop since
5180 on some machines the prologue is where the new fp value
5181 is established. */
5182 insert_step_resume_breakpoint_at_sal (gdbarch,
5183 sr_sal, null_frame_id);
5184
5185 /* Restart without fiddling with the step ranges or
5186 other state. */
5187 keep_going (ecs);
5188 return;
5189 }
5190 }
5191
5192 /* Check for subroutine calls. The check for the current frame
5193 equalling the step ID is not necessary - the check of the
5194 previous frame's ID is sufficient - but it is a common case and
5195 cheaper than checking the previous frame's ID.
5196
5197 NOTE: frame_id_eq will never report two invalid frame IDs as
5198 being equal, so to get into this block, both the current and
5199 previous frame must have valid frame IDs. */
5200 /* The outer_frame_id check is a heuristic to detect stepping
5201 through startup code. If we step over an instruction which
5202 sets the stack pointer from an invalid value to a valid value,
5203 we may detect that as a subroutine call from the mythical
5204 "outermost" function. This could be fixed by marking
5205 outermost frames as !stack_p,code_p,special_p. Then the
5206 initial outermost frame, before sp was valid, would
5207 have code_addr == &_start. See the comment in frame_id_eq
5208 for more. */
5209 if (!frame_id_eq (get_stack_frame_id (frame),
5210 ecs->event_thread->control.step_stack_frame_id)
5211 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
5212 ecs->event_thread->control.step_stack_frame_id)
5213 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
5214 outer_frame_id)
5215 || (ecs->event_thread->control.step_start_function
5216 != find_pc_function (stop_pc)))))
5217 {
5218 CORE_ADDR real_stop_pc;
5219
5220 if (debug_infrun)
5221 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
5222
5223 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
5224 {
5225 /* I presume that step_over_calls is only 0 when we're
5226 supposed to be stepping at the assembly language level
5227 ("stepi"). Just stop. */
5228 /* And this works the same backward as frontward. MVS */
5229 end_stepping_range (ecs);
5230 return;
5231 }
5232
5233 /* Reverse stepping through solib trampolines. */
5234
5235 if (execution_direction == EXEC_REVERSE
5236 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
5237 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5238 || (ecs->stop_func_start == 0
5239 && in_solib_dynsym_resolve_code (stop_pc))))
5240 {
5241 /* Any solib trampoline code can be handled in reverse
5242 by simply continuing to single-step. We have already
5243 executed the solib function (backwards), and a few
5244 steps will take us back through the trampoline to the
5245 caller. */
5246 keep_going (ecs);
5247 return;
5248 }
5249
5250 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5251 {
5252 /* We're doing a "next".
5253
5254 Normal (forward) execution: set a breakpoint at the
5255 callee's return address (the address at which the caller
5256 will resume).
5257
5258 Reverse (backward) execution. set the step-resume
5259 breakpoint at the start of the function that we just
5260 stepped into (backwards), and continue to there. When we
5261 get there, we'll need to single-step back to the caller. */
5262
5263 if (execution_direction == EXEC_REVERSE)
5264 {
5265 /* If we're already at the start of the function, we've either
5266 just stepped backward into a single instruction function,
5267 or stepped back out of a signal handler to the first instruction
5268 of the function. Just keep going, which will single-step back
5269 to the caller. */
5270 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
5271 {
5272 struct symtab_and_line sr_sal;
5273
5274 /* Normal function call return (static or dynamic). */
5275 init_sal (&sr_sal);
5276 sr_sal.pc = ecs->stop_func_start;
5277 sr_sal.pspace = get_frame_program_space (frame);
5278 insert_step_resume_breakpoint_at_sal (gdbarch,
5279 sr_sal, null_frame_id);
5280 }
5281 }
5282 else
5283 insert_step_resume_breakpoint_at_caller (frame);
5284
5285 keep_going (ecs);
5286 return;
5287 }
5288
5289 /* If we are in a function call trampoline (a stub between the
5290 calling routine and the real function), locate the real
5291 function. That's what tells us (a) whether we want to step
5292 into it at all, and (b) what prologue we want to run to the
5293 end of, if we do step into it. */
5294 real_stop_pc = skip_language_trampoline (frame, stop_pc);
5295 if (real_stop_pc == 0)
5296 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5297 if (real_stop_pc != 0)
5298 ecs->stop_func_start = real_stop_pc;
5299
5300 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
5301 {
5302 struct symtab_and_line sr_sal;
5303
5304 init_sal (&sr_sal);
5305 sr_sal.pc = ecs->stop_func_start;
5306 sr_sal.pspace = get_frame_program_space (frame);
5307
5308 insert_step_resume_breakpoint_at_sal (gdbarch,
5309 sr_sal, null_frame_id);
5310 keep_going (ecs);
5311 return;
5312 }
5313
5314 /* If we have line number information for the function we are
5315 thinking of stepping into and the function isn't on the skip
5316 list, step into it.
5317
5318 If there are several symtabs at that PC (e.g. with include
5319 files), just want to know whether *any* of them have line
5320 numbers. find_pc_line handles this. */
5321 {
5322 struct symtab_and_line tmp_sal;
5323
5324 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
5325 if (tmp_sal.line != 0
5326 && !function_name_is_marked_for_skip (ecs->stop_func_name,
5327 &tmp_sal))
5328 {
5329 if (execution_direction == EXEC_REVERSE)
5330 handle_step_into_function_backward (gdbarch, ecs);
5331 else
5332 handle_step_into_function (gdbarch, ecs);
5333 return;
5334 }
5335 }
5336
5337 /* If we have no line number and the step-stop-if-no-debug is
5338 set, we stop the step so that the user has a chance to switch
5339 in assembly mode. */
5340 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5341 && step_stop_if_no_debug)
5342 {
5343 end_stepping_range (ecs);
5344 return;
5345 }
5346
5347 if (execution_direction == EXEC_REVERSE)
5348 {
5349 /* If we're already at the start of the function, we've either just
5350 stepped backward into a single instruction function without line
5351 number info, or stepped back out of a signal handler to the first
5352 instruction of the function without line number info. Just keep
5353 going, which will single-step back to the caller. */
5354 if (ecs->stop_func_start != stop_pc)
5355 {
5356 /* Set a breakpoint at callee's start address.
5357 From there we can step once and be back in the caller. */
5358 struct symtab_and_line sr_sal;
5359
5360 init_sal (&sr_sal);
5361 sr_sal.pc = ecs->stop_func_start;
5362 sr_sal.pspace = get_frame_program_space (frame);
5363 insert_step_resume_breakpoint_at_sal (gdbarch,
5364 sr_sal, null_frame_id);
5365 }
5366 }
5367 else
5368 /* Set a breakpoint at callee's return address (the address
5369 at which the caller will resume). */
5370 insert_step_resume_breakpoint_at_caller (frame);
5371
5372 keep_going (ecs);
5373 return;
5374 }
5375
5376 /* Reverse stepping through solib trampolines. */
5377
5378 if (execution_direction == EXEC_REVERSE
5379 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5380 {
5381 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5382 || (ecs->stop_func_start == 0
5383 && in_solib_dynsym_resolve_code (stop_pc)))
5384 {
5385 /* Any solib trampoline code can be handled in reverse
5386 by simply continuing to single-step. We have already
5387 executed the solib function (backwards), and a few
5388 steps will take us back through the trampoline to the
5389 caller. */
5390 keep_going (ecs);
5391 return;
5392 }
5393 else if (in_solib_dynsym_resolve_code (stop_pc))
5394 {
5395 /* Stepped backward into the solib dynsym resolver.
5396 Set a breakpoint at its start and continue, then
5397 one more step will take us out. */
5398 struct symtab_and_line sr_sal;
5399
5400 init_sal (&sr_sal);
5401 sr_sal.pc = ecs->stop_func_start;
5402 sr_sal.pspace = get_frame_program_space (frame);
5403 insert_step_resume_breakpoint_at_sal (gdbarch,
5404 sr_sal, null_frame_id);
5405 keep_going (ecs);
5406 return;
5407 }
5408 }
5409
5410 stop_pc_sal = find_pc_line (stop_pc, 0);
5411
5412 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5413 the trampoline processing logic, however, there are some trampolines
5414 that have no names, so we should do trampoline handling first. */
5415 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5416 && ecs->stop_func_name == NULL
5417 && stop_pc_sal.line == 0)
5418 {
5419 if (debug_infrun)
5420 fprintf_unfiltered (gdb_stdlog,
5421 "infrun: stepped into undebuggable function\n");
5422
5423 /* The inferior just stepped into, or returned to, an
5424 undebuggable function (where there is no debugging information
5425 and no line number corresponding to the address where the
5426 inferior stopped). Since we want to skip this kind of code,
5427 we keep going until the inferior returns from this
5428 function - unless the user has asked us not to (via
5429 set step-mode) or we no longer know how to get back
5430 to the call site. */
5431 if (step_stop_if_no_debug
5432 || !frame_id_p (frame_unwind_caller_id (frame)))
5433 {
5434 /* If we have no line number and the step-stop-if-no-debug
5435 is set, we stop the step so that the user has a chance to
5436 switch in assembly mode. */
5437 end_stepping_range (ecs);
5438 return;
5439 }
5440 else
5441 {
5442 /* Set a breakpoint at callee's return address (the address
5443 at which the caller will resume). */
5444 insert_step_resume_breakpoint_at_caller (frame);
5445 keep_going (ecs);
5446 return;
5447 }
5448 }
5449
5450 if (ecs->event_thread->control.step_range_end == 1)
5451 {
5452 /* It is stepi or nexti. We always want to stop stepping after
5453 one instruction. */
5454 if (debug_infrun)
5455 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5456 end_stepping_range (ecs);
5457 return;
5458 }
5459
5460 if (stop_pc_sal.line == 0)
5461 {
5462 /* We have no line number information. That means to stop
5463 stepping (does this always happen right after one instruction,
5464 when we do "s" in a function with no line numbers,
5465 or can this happen as a result of a return or longjmp?). */
5466 if (debug_infrun)
5467 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5468 end_stepping_range (ecs);
5469 return;
5470 }
5471
5472 /* Look for "calls" to inlined functions, part one. If the inline
5473 frame machinery detected some skipped call sites, we have entered
5474 a new inline function. */
5475
5476 if (frame_id_eq (get_frame_id (get_current_frame ()),
5477 ecs->event_thread->control.step_frame_id)
5478 && inline_skipped_frames (ecs->ptid))
5479 {
5480 struct symtab_and_line call_sal;
5481
5482 if (debug_infrun)
5483 fprintf_unfiltered (gdb_stdlog,
5484 "infrun: stepped into inlined function\n");
5485
5486 find_frame_sal (get_current_frame (), &call_sal);
5487
5488 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5489 {
5490 /* For "step", we're going to stop. But if the call site
5491 for this inlined function is on the same source line as
5492 we were previously stepping, go down into the function
5493 first. Otherwise stop at the call site. */
5494
5495 if (call_sal.line == ecs->event_thread->current_line
5496 && call_sal.symtab == ecs->event_thread->current_symtab)
5497 step_into_inline_frame (ecs->ptid);
5498
5499 end_stepping_range (ecs);
5500 return;
5501 }
5502 else
5503 {
5504 /* For "next", we should stop at the call site if it is on a
5505 different source line. Otherwise continue through the
5506 inlined function. */
5507 if (call_sal.line == ecs->event_thread->current_line
5508 && call_sal.symtab == ecs->event_thread->current_symtab)
5509 keep_going (ecs);
5510 else
5511 end_stepping_range (ecs);
5512 return;
5513 }
5514 }
5515
5516 /* Look for "calls" to inlined functions, part two. If we are still
5517 in the same real function we were stepping through, but we have
5518 to go further up to find the exact frame ID, we are stepping
5519 through a more inlined call beyond its call site. */
5520
5521 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5522 && !frame_id_eq (get_frame_id (get_current_frame ()),
5523 ecs->event_thread->control.step_frame_id)
5524 && stepped_in_from (get_current_frame (),
5525 ecs->event_thread->control.step_frame_id))
5526 {
5527 if (debug_infrun)
5528 fprintf_unfiltered (gdb_stdlog,
5529 "infrun: stepping through inlined function\n");
5530
5531 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5532 keep_going (ecs);
5533 else
5534 end_stepping_range (ecs);
5535 return;
5536 }
5537
5538 if ((stop_pc == stop_pc_sal.pc)
5539 && (ecs->event_thread->current_line != stop_pc_sal.line
5540 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5541 {
5542 /* We are at the start of a different line. So stop. Note that
5543 we don't stop if we step into the middle of a different line.
5544 That is said to make things like for (;;) statements work
5545 better. */
5546 if (debug_infrun)
5547 fprintf_unfiltered (gdb_stdlog,
5548 "infrun: stepped to a different line\n");
5549 end_stepping_range (ecs);
5550 return;
5551 }
5552
5553 /* We aren't done stepping.
5554
5555 Optimize by setting the stepping range to the line.
5556 (We might not be in the original line, but if we entered a
5557 new line in mid-statement, we continue stepping. This makes
5558 things like for(;;) statements work better.) */
5559
5560 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5561 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5562 ecs->event_thread->control.may_range_step = 1;
5563 set_step_info (frame, stop_pc_sal);
5564
5565 if (debug_infrun)
5566 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5567 keep_going (ecs);
5568 }
5569
5570 /* In all-stop mode, if we're currently stepping but have stopped in
5571 some other thread, we may need to switch back to the stepped
5572 thread. Returns true we set the inferior running, false if we left
5573 it stopped (and the event needs further processing). */
5574
5575 static int
5576 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5577 {
5578 if (!non_stop)
5579 {
5580 struct thread_info *tp;
5581 struct thread_info *stepping_thread;
5582 struct thread_info *step_over;
5583
5584 /* If any thread is blocked on some internal breakpoint, and we
5585 simply need to step over that breakpoint to get it going
5586 again, do that first. */
5587
5588 /* However, if we see an event for the stepping thread, then we
5589 know all other threads have been moved past their breakpoints
5590 already. Let the caller check whether the step is finished,
5591 etc., before deciding to move it past a breakpoint. */
5592 if (ecs->event_thread->control.step_range_end != 0)
5593 return 0;
5594
5595 /* Check if the current thread is blocked on an incomplete
5596 step-over, interrupted by a random signal. */
5597 if (ecs->event_thread->control.trap_expected
5598 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5599 {
5600 if (debug_infrun)
5601 {
5602 fprintf_unfiltered (gdb_stdlog,
5603 "infrun: need to finish step-over of [%s]\n",
5604 target_pid_to_str (ecs->event_thread->ptid));
5605 }
5606 keep_going (ecs);
5607 return 1;
5608 }
5609
5610 /* Check if the current thread is blocked by a single-step
5611 breakpoint of another thread. */
5612 if (ecs->hit_singlestep_breakpoint)
5613 {
5614 if (debug_infrun)
5615 {
5616 fprintf_unfiltered (gdb_stdlog,
5617 "infrun: need to step [%s] over single-step "
5618 "breakpoint\n",
5619 target_pid_to_str (ecs->ptid));
5620 }
5621 keep_going (ecs);
5622 return 1;
5623 }
5624
5625 /* Otherwise, we no longer expect a trap in the current thread.
5626 Clear the trap_expected flag before switching back -- this is
5627 what keep_going does as well, if we call it. */
5628 ecs->event_thread->control.trap_expected = 0;
5629
5630 /* Likewise, clear the signal if it should not be passed. */
5631 if (!signal_program[ecs->event_thread->suspend.stop_signal])
5632 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5633
5634 /* If scheduler locking applies even if not stepping, there's no
5635 need to walk over threads. Above we've checked whether the
5636 current thread is stepping. If some other thread not the
5637 event thread is stepping, then it must be that scheduler
5638 locking is not in effect. */
5639 if (schedlock_applies (ecs->event_thread))
5640 return 0;
5641
5642 /* Look for the stepping/nexting thread, and check if any other
5643 thread other than the stepping thread needs to start a
5644 step-over. Do all step-overs before actually proceeding with
5645 step/next/etc. */
5646 stepping_thread = NULL;
5647 step_over = NULL;
5648 ALL_NON_EXITED_THREADS (tp)
5649 {
5650 /* Ignore threads of processes we're not resuming. */
5651 if (!sched_multi
5652 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
5653 continue;
5654
5655 /* When stepping over a breakpoint, we lock all threads
5656 except the one that needs to move past the breakpoint.
5657 If a non-event thread has this set, the "incomplete
5658 step-over" check above should have caught it earlier. */
5659 gdb_assert (!tp->control.trap_expected);
5660
5661 /* Did we find the stepping thread? */
5662 if (tp->control.step_range_end)
5663 {
5664 /* Yep. There should only one though. */
5665 gdb_assert (stepping_thread == NULL);
5666
5667 /* The event thread is handled at the top, before we
5668 enter this loop. */
5669 gdb_assert (tp != ecs->event_thread);
5670
5671 /* If some thread other than the event thread is
5672 stepping, then scheduler locking can't be in effect,
5673 otherwise we wouldn't have resumed the current event
5674 thread in the first place. */
5675 gdb_assert (!schedlock_applies (tp));
5676
5677 stepping_thread = tp;
5678 }
5679 else if (thread_still_needs_step_over (tp))
5680 {
5681 step_over = tp;
5682
5683 /* At the top we've returned early if the event thread
5684 is stepping. If some other thread not the event
5685 thread is stepping, then scheduler locking can't be
5686 in effect, and we can resume this thread. No need to
5687 keep looking for the stepping thread then. */
5688 break;
5689 }
5690 }
5691
5692 if (step_over != NULL)
5693 {
5694 tp = step_over;
5695 if (debug_infrun)
5696 {
5697 fprintf_unfiltered (gdb_stdlog,
5698 "infrun: need to step-over [%s]\n",
5699 target_pid_to_str (tp->ptid));
5700 }
5701
5702 /* Only the stepping thread should have this set. */
5703 gdb_assert (tp->control.step_range_end == 0);
5704
5705 ecs->ptid = tp->ptid;
5706 ecs->event_thread = tp;
5707 switch_to_thread (ecs->ptid);
5708 keep_going (ecs);
5709 return 1;
5710 }
5711
5712 if (stepping_thread != NULL)
5713 {
5714 struct frame_info *frame;
5715 struct gdbarch *gdbarch;
5716
5717 tp = stepping_thread;
5718
5719 /* If the stepping thread exited, then don't try to switch
5720 back and resume it, which could fail in several different
5721 ways depending on the target. Instead, just keep going.
5722
5723 We can find a stepping dead thread in the thread list in
5724 two cases:
5725
5726 - The target supports thread exit events, and when the
5727 target tries to delete the thread from the thread list,
5728 inferior_ptid pointed at the exiting thread. In such
5729 case, calling delete_thread does not really remove the
5730 thread from the list; instead, the thread is left listed,
5731 with 'exited' state.
5732
5733 - The target's debug interface does not support thread
5734 exit events, and so we have no idea whatsoever if the
5735 previously stepping thread is still alive. For that
5736 reason, we need to synchronously query the target
5737 now. */
5738 if (is_exited (tp->ptid)
5739 || !target_thread_alive (tp->ptid))
5740 {
5741 if (debug_infrun)
5742 fprintf_unfiltered (gdb_stdlog,
5743 "infrun: not switching back to "
5744 "stepped thread, it has vanished\n");
5745
5746 delete_thread (tp->ptid);
5747 keep_going (ecs);
5748 return 1;
5749 }
5750
5751 if (debug_infrun)
5752 fprintf_unfiltered (gdb_stdlog,
5753 "infrun: switching back to stepped thread\n");
5754
5755 ecs->event_thread = tp;
5756 ecs->ptid = tp->ptid;
5757 context_switch (ecs->ptid);
5758
5759 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5760 frame = get_current_frame ();
5761 gdbarch = get_frame_arch (frame);
5762
5763 /* If the PC of the thread we were trying to single-step has
5764 changed, then that thread has trapped or been signaled,
5765 but the event has not been reported to GDB yet. Re-poll
5766 the target looking for this particular thread's event
5767 (i.e. temporarily enable schedlock) by:
5768
5769 - setting a break at the current PC
5770 - resuming that particular thread, only (by setting
5771 trap expected)
5772
5773 This prevents us continuously moving the single-step
5774 breakpoint forward, one instruction at a time,
5775 overstepping. */
5776
5777 if (stop_pc != tp->prev_pc)
5778 {
5779 ptid_t resume_ptid;
5780
5781 if (debug_infrun)
5782 fprintf_unfiltered (gdb_stdlog,
5783 "infrun: expected thread advanced also\n");
5784
5785 /* Clear the info of the previous step-over, as it's no
5786 longer valid. It's what keep_going would do too, if
5787 we called it. Must do this before trying to insert
5788 the sss breakpoint, otherwise if we were previously
5789 trying to step over this exact address in another
5790 thread, the breakpoint ends up not installed. */
5791 clear_step_over_info ();
5792
5793 insert_single_step_breakpoint (get_frame_arch (frame),
5794 get_frame_address_space (frame),
5795 stop_pc);
5796
5797 resume_ptid = user_visible_resume_ptid (tp->control.stepping_command);
5798 do_target_resume (resume_ptid,
5799 currently_stepping (tp), GDB_SIGNAL_0);
5800 prepare_to_wait (ecs);
5801 }
5802 else
5803 {
5804 if (debug_infrun)
5805 fprintf_unfiltered (gdb_stdlog,
5806 "infrun: expected thread still "
5807 "hasn't advanced\n");
5808 keep_going (ecs);
5809 }
5810
5811 return 1;
5812 }
5813 }
5814 return 0;
5815 }
5816
5817 /* Is thread TP in the middle of single-stepping? */
5818
5819 static int
5820 currently_stepping (struct thread_info *tp)
5821 {
5822 return ((tp->control.step_range_end
5823 && tp->control.step_resume_breakpoint == NULL)
5824 || tp->control.trap_expected
5825 || tp->stepped_breakpoint
5826 || bpstat_should_step ());
5827 }
5828
5829 /* Inferior has stepped into a subroutine call with source code that
5830 we should not step over. Do step to the first line of code in
5831 it. */
5832
5833 static void
5834 handle_step_into_function (struct gdbarch *gdbarch,
5835 struct execution_control_state *ecs)
5836 {
5837 struct compunit_symtab *cust;
5838 struct symtab_and_line stop_func_sal, sr_sal;
5839
5840 fill_in_stop_func (gdbarch, ecs);
5841
5842 cust = find_pc_compunit_symtab (stop_pc);
5843 if (cust != NULL && compunit_language (cust) != language_asm)
5844 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5845 ecs->stop_func_start);
5846
5847 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5848 /* Use the step_resume_break to step until the end of the prologue,
5849 even if that involves jumps (as it seems to on the vax under
5850 4.2). */
5851 /* If the prologue ends in the middle of a source line, continue to
5852 the end of that source line (if it is still within the function).
5853 Otherwise, just go to end of prologue. */
5854 if (stop_func_sal.end
5855 && stop_func_sal.pc != ecs->stop_func_start
5856 && stop_func_sal.end < ecs->stop_func_end)
5857 ecs->stop_func_start = stop_func_sal.end;
5858
5859 /* Architectures which require breakpoint adjustment might not be able
5860 to place a breakpoint at the computed address. If so, the test
5861 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5862 ecs->stop_func_start to an address at which a breakpoint may be
5863 legitimately placed.
5864
5865 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5866 made, GDB will enter an infinite loop when stepping through
5867 optimized code consisting of VLIW instructions which contain
5868 subinstructions corresponding to different source lines. On
5869 FR-V, it's not permitted to place a breakpoint on any but the
5870 first subinstruction of a VLIW instruction. When a breakpoint is
5871 set, GDB will adjust the breakpoint address to the beginning of
5872 the VLIW instruction. Thus, we need to make the corresponding
5873 adjustment here when computing the stop address. */
5874
5875 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5876 {
5877 ecs->stop_func_start
5878 = gdbarch_adjust_breakpoint_address (gdbarch,
5879 ecs->stop_func_start);
5880 }
5881
5882 if (ecs->stop_func_start == stop_pc)
5883 {
5884 /* We are already there: stop now. */
5885 end_stepping_range (ecs);
5886 return;
5887 }
5888 else
5889 {
5890 /* Put the step-breakpoint there and go until there. */
5891 init_sal (&sr_sal); /* initialize to zeroes */
5892 sr_sal.pc = ecs->stop_func_start;
5893 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5894 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5895
5896 /* Do not specify what the fp should be when we stop since on
5897 some machines the prologue is where the new fp value is
5898 established. */
5899 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5900
5901 /* And make sure stepping stops right away then. */
5902 ecs->event_thread->control.step_range_end
5903 = ecs->event_thread->control.step_range_start;
5904 }
5905 keep_going (ecs);
5906 }
5907
5908 /* Inferior has stepped backward into a subroutine call with source
5909 code that we should not step over. Do step to the beginning of the
5910 last line of code in it. */
5911
5912 static void
5913 handle_step_into_function_backward (struct gdbarch *gdbarch,
5914 struct execution_control_state *ecs)
5915 {
5916 struct compunit_symtab *cust;
5917 struct symtab_and_line stop_func_sal;
5918
5919 fill_in_stop_func (gdbarch, ecs);
5920
5921 cust = find_pc_compunit_symtab (stop_pc);
5922 if (cust != NULL && compunit_language (cust) != language_asm)
5923 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5924 ecs->stop_func_start);
5925
5926 stop_func_sal = find_pc_line (stop_pc, 0);
5927
5928 /* OK, we're just going to keep stepping here. */
5929 if (stop_func_sal.pc == stop_pc)
5930 {
5931 /* We're there already. Just stop stepping now. */
5932 end_stepping_range (ecs);
5933 }
5934 else
5935 {
5936 /* Else just reset the step range and keep going.
5937 No step-resume breakpoint, they don't work for
5938 epilogues, which can have multiple entry paths. */
5939 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5940 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5941 keep_going (ecs);
5942 }
5943 return;
5944 }
5945
5946 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5947 This is used to both functions and to skip over code. */
5948
5949 static void
5950 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5951 struct symtab_and_line sr_sal,
5952 struct frame_id sr_id,
5953 enum bptype sr_type)
5954 {
5955 /* There should never be more than one step-resume or longjmp-resume
5956 breakpoint per thread, so we should never be setting a new
5957 step_resume_breakpoint when one is already active. */
5958 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5959 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5960
5961 if (debug_infrun)
5962 fprintf_unfiltered (gdb_stdlog,
5963 "infrun: inserting step-resume breakpoint at %s\n",
5964 paddress (gdbarch, sr_sal.pc));
5965
5966 inferior_thread ()->control.step_resume_breakpoint
5967 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5968 }
5969
5970 void
5971 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5972 struct symtab_and_line sr_sal,
5973 struct frame_id sr_id)
5974 {
5975 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5976 sr_sal, sr_id,
5977 bp_step_resume);
5978 }
5979
5980 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5981 This is used to skip a potential signal handler.
5982
5983 This is called with the interrupted function's frame. The signal
5984 handler, when it returns, will resume the interrupted function at
5985 RETURN_FRAME.pc. */
5986
5987 static void
5988 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5989 {
5990 struct symtab_and_line sr_sal;
5991 struct gdbarch *gdbarch;
5992
5993 gdb_assert (return_frame != NULL);
5994 init_sal (&sr_sal); /* initialize to zeros */
5995
5996 gdbarch = get_frame_arch (return_frame);
5997 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5998 sr_sal.section = find_pc_overlay (sr_sal.pc);
5999 sr_sal.pspace = get_frame_program_space (return_frame);
6000
6001 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
6002 get_stack_frame_id (return_frame),
6003 bp_hp_step_resume);
6004 }
6005
6006 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
6007 is used to skip a function after stepping into it (for "next" or if
6008 the called function has no debugging information).
6009
6010 The current function has almost always been reached by single
6011 stepping a call or return instruction. NEXT_FRAME belongs to the
6012 current function, and the breakpoint will be set at the caller's
6013 resume address.
6014
6015 This is a separate function rather than reusing
6016 insert_hp_step_resume_breakpoint_at_frame in order to avoid
6017 get_prev_frame, which may stop prematurely (see the implementation
6018 of frame_unwind_caller_id for an example). */
6019
6020 static void
6021 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
6022 {
6023 struct symtab_and_line sr_sal;
6024 struct gdbarch *gdbarch;
6025
6026 /* We shouldn't have gotten here if we don't know where the call site
6027 is. */
6028 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
6029
6030 init_sal (&sr_sal); /* initialize to zeros */
6031
6032 gdbarch = frame_unwind_caller_arch (next_frame);
6033 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
6034 frame_unwind_caller_pc (next_frame));
6035 sr_sal.section = find_pc_overlay (sr_sal.pc);
6036 sr_sal.pspace = frame_unwind_program_space (next_frame);
6037
6038 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
6039 frame_unwind_caller_id (next_frame));
6040 }
6041
6042 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
6043 new breakpoint at the target of a jmp_buf. The handling of
6044 longjmp-resume uses the same mechanisms used for handling
6045 "step-resume" breakpoints. */
6046
6047 static void
6048 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
6049 {
6050 /* There should never be more than one longjmp-resume breakpoint per
6051 thread, so we should never be setting a new
6052 longjmp_resume_breakpoint when one is already active. */
6053 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
6054
6055 if (debug_infrun)
6056 fprintf_unfiltered (gdb_stdlog,
6057 "infrun: inserting longjmp-resume breakpoint at %s\n",
6058 paddress (gdbarch, pc));
6059
6060 inferior_thread ()->control.exception_resume_breakpoint =
6061 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
6062 }
6063
6064 /* Insert an exception resume breakpoint. TP is the thread throwing
6065 the exception. The block B is the block of the unwinder debug hook
6066 function. FRAME is the frame corresponding to the call to this
6067 function. SYM is the symbol of the function argument holding the
6068 target PC of the exception. */
6069
6070 static void
6071 insert_exception_resume_breakpoint (struct thread_info *tp,
6072 const struct block *b,
6073 struct frame_info *frame,
6074 struct symbol *sym)
6075 {
6076 TRY
6077 {
6078 struct symbol *vsym;
6079 struct value *value;
6080 CORE_ADDR handler;
6081 struct breakpoint *bp;
6082
6083 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
6084 value = read_var_value (vsym, frame);
6085 /* If the value was optimized out, revert to the old behavior. */
6086 if (! value_optimized_out (value))
6087 {
6088 handler = value_as_address (value);
6089
6090 if (debug_infrun)
6091 fprintf_unfiltered (gdb_stdlog,
6092 "infrun: exception resume at %lx\n",
6093 (unsigned long) handler);
6094
6095 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
6096 handler, bp_exception_resume);
6097
6098 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
6099 frame = NULL;
6100
6101 bp->thread = tp->num;
6102 inferior_thread ()->control.exception_resume_breakpoint = bp;
6103 }
6104 }
6105 CATCH (e, RETURN_MASK_ERROR)
6106 {
6107 /* We want to ignore errors here. */
6108 }
6109 END_CATCH
6110 }
6111
6112 /* A helper for check_exception_resume that sets an
6113 exception-breakpoint based on a SystemTap probe. */
6114
6115 static void
6116 insert_exception_resume_from_probe (struct thread_info *tp,
6117 const struct bound_probe *probe,
6118 struct frame_info *frame)
6119 {
6120 struct value *arg_value;
6121 CORE_ADDR handler;
6122 struct breakpoint *bp;
6123
6124 arg_value = probe_safe_evaluate_at_pc (frame, 1);
6125 if (!arg_value)
6126 return;
6127
6128 handler = value_as_address (arg_value);
6129
6130 if (debug_infrun)
6131 fprintf_unfiltered (gdb_stdlog,
6132 "infrun: exception resume at %s\n",
6133 paddress (get_objfile_arch (probe->objfile),
6134 handler));
6135
6136 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
6137 handler, bp_exception_resume);
6138 bp->thread = tp->num;
6139 inferior_thread ()->control.exception_resume_breakpoint = bp;
6140 }
6141
6142 /* This is called when an exception has been intercepted. Check to
6143 see whether the exception's destination is of interest, and if so,
6144 set an exception resume breakpoint there. */
6145
6146 static void
6147 check_exception_resume (struct execution_control_state *ecs,
6148 struct frame_info *frame)
6149 {
6150 struct bound_probe probe;
6151 struct symbol *func;
6152
6153 /* First see if this exception unwinding breakpoint was set via a
6154 SystemTap probe point. If so, the probe has two arguments: the
6155 CFA and the HANDLER. We ignore the CFA, extract the handler, and
6156 set a breakpoint there. */
6157 probe = find_probe_by_pc (get_frame_pc (frame));
6158 if (probe.probe)
6159 {
6160 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
6161 return;
6162 }
6163
6164 func = get_frame_function (frame);
6165 if (!func)
6166 return;
6167
6168 TRY
6169 {
6170 const struct block *b;
6171 struct block_iterator iter;
6172 struct symbol *sym;
6173 int argno = 0;
6174
6175 /* The exception breakpoint is a thread-specific breakpoint on
6176 the unwinder's debug hook, declared as:
6177
6178 void _Unwind_DebugHook (void *cfa, void *handler);
6179
6180 The CFA argument indicates the frame to which control is
6181 about to be transferred. HANDLER is the destination PC.
6182
6183 We ignore the CFA and set a temporary breakpoint at HANDLER.
6184 This is not extremely efficient but it avoids issues in gdb
6185 with computing the DWARF CFA, and it also works even in weird
6186 cases such as throwing an exception from inside a signal
6187 handler. */
6188
6189 b = SYMBOL_BLOCK_VALUE (func);
6190 ALL_BLOCK_SYMBOLS (b, iter, sym)
6191 {
6192 if (!SYMBOL_IS_ARGUMENT (sym))
6193 continue;
6194
6195 if (argno == 0)
6196 ++argno;
6197 else
6198 {
6199 insert_exception_resume_breakpoint (ecs->event_thread,
6200 b, frame, sym);
6201 break;
6202 }
6203 }
6204 }
6205 CATCH (e, RETURN_MASK_ERROR)
6206 {
6207 }
6208 END_CATCH
6209 }
6210
6211 static void
6212 stop_waiting (struct execution_control_state *ecs)
6213 {
6214 if (debug_infrun)
6215 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
6216
6217 clear_step_over_info ();
6218
6219 /* Let callers know we don't want to wait for the inferior anymore. */
6220 ecs->wait_some_more = 0;
6221 }
6222
6223 /* Called when we should continue running the inferior, because the
6224 current event doesn't cause a user visible stop. This does the
6225 resuming part; waiting for the next event is done elsewhere. */
6226
6227 static void
6228 keep_going (struct execution_control_state *ecs)
6229 {
6230 /* Make sure normal_stop is called if we get a QUIT handled before
6231 reaching resume. */
6232 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
6233
6234 /* Save the pc before execution, to compare with pc after stop. */
6235 ecs->event_thread->prev_pc
6236 = regcache_read_pc (get_thread_regcache (ecs->ptid));
6237
6238 if (ecs->event_thread->control.trap_expected
6239 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
6240 {
6241 /* We haven't yet gotten our trap, and either: intercepted a
6242 non-signal event (e.g., a fork); or took a signal which we
6243 are supposed to pass through to the inferior. Simply
6244 continue. */
6245 discard_cleanups (old_cleanups);
6246 resume (ecs->event_thread->suspend.stop_signal);
6247 }
6248 else
6249 {
6250 struct regcache *regcache = get_current_regcache ();
6251 int remove_bp;
6252 int remove_wps;
6253
6254 /* Either the trap was not expected, but we are continuing
6255 anyway (if we got a signal, the user asked it be passed to
6256 the child)
6257 -- or --
6258 We got our expected trap, but decided we should resume from
6259 it.
6260
6261 We're going to run this baby now!
6262
6263 Note that insert_breakpoints won't try to re-insert
6264 already inserted breakpoints. Therefore, we don't
6265 care if breakpoints were already inserted, or not. */
6266
6267 /* If we need to step over a breakpoint, and we're not using
6268 displaced stepping to do so, insert all breakpoints
6269 (watchpoints, etc.) but the one we're stepping over, step one
6270 instruction, and then re-insert the breakpoint when that step
6271 is finished. */
6272
6273 remove_bp = (ecs->hit_singlestep_breakpoint
6274 || thread_still_needs_step_over (ecs->event_thread));
6275 remove_wps = (ecs->event_thread->stepping_over_watchpoint
6276 && !target_have_steppable_watchpoint);
6277
6278 /* We can't use displaced stepping if we need to step past a
6279 watchpoint. The instruction copied to the scratch pad would
6280 still trigger the watchpoint. */
6281 if (remove_bp
6282 && (remove_wps
6283 || !use_displaced_stepping (get_regcache_arch (regcache))))
6284 {
6285 set_step_over_info (get_regcache_aspace (regcache),
6286 regcache_read_pc (regcache), remove_wps);
6287 }
6288 else if (remove_wps)
6289 set_step_over_info (NULL, 0, remove_wps);
6290 else
6291 clear_step_over_info ();
6292
6293 /* Stop stepping if inserting breakpoints fails. */
6294 TRY
6295 {
6296 insert_breakpoints ();
6297 }
6298 CATCH (e, RETURN_MASK_ERROR)
6299 {
6300 exception_print (gdb_stderr, e);
6301 stop_waiting (ecs);
6302 discard_cleanups (old_cleanups);
6303 return;
6304 }
6305 END_CATCH
6306
6307 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
6308
6309 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
6310 explicitly specifies that such a signal should be delivered
6311 to the target program). Typically, that would occur when a
6312 user is debugging a target monitor on a simulator: the target
6313 monitor sets a breakpoint; the simulator encounters this
6314 breakpoint and halts the simulation handing control to GDB;
6315 GDB, noting that the stop address doesn't map to any known
6316 breakpoint, returns control back to the simulator; the
6317 simulator then delivers the hardware equivalent of a
6318 GDB_SIGNAL_TRAP to the program being debugged. */
6319 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6320 && !signal_program[ecs->event_thread->suspend.stop_signal])
6321 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6322
6323 discard_cleanups (old_cleanups);
6324 resume (ecs->event_thread->suspend.stop_signal);
6325 }
6326
6327 prepare_to_wait (ecs);
6328 }
6329
6330 /* This function normally comes after a resume, before
6331 handle_inferior_event exits. It takes care of any last bits of
6332 housekeeping, and sets the all-important wait_some_more flag. */
6333
6334 static void
6335 prepare_to_wait (struct execution_control_state *ecs)
6336 {
6337 if (debug_infrun)
6338 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
6339
6340 /* This is the old end of the while loop. Let everybody know we
6341 want to wait for the inferior some more and get called again
6342 soon. */
6343 ecs->wait_some_more = 1;
6344 }
6345
6346 /* We are done with the step range of a step/next/si/ni command.
6347 Called once for each n of a "step n" operation. */
6348
6349 static void
6350 end_stepping_range (struct execution_control_state *ecs)
6351 {
6352 ecs->event_thread->control.stop_step = 1;
6353 stop_waiting (ecs);
6354 }
6355
6356 /* Several print_*_reason functions to print why the inferior has stopped.
6357 We always print something when the inferior exits, or receives a signal.
6358 The rest of the cases are dealt with later on in normal_stop and
6359 print_it_typical. Ideally there should be a call to one of these
6360 print_*_reason functions functions from handle_inferior_event each time
6361 stop_waiting is called.
6362
6363 Note that we don't call these directly, instead we delegate that to
6364 the interpreters, through observers. Interpreters then call these
6365 with whatever uiout is right. */
6366
6367 void
6368 print_end_stepping_range_reason (struct ui_out *uiout)
6369 {
6370 /* For CLI-like interpreters, print nothing. */
6371
6372 if (ui_out_is_mi_like_p (uiout))
6373 {
6374 ui_out_field_string (uiout, "reason",
6375 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
6376 }
6377 }
6378
6379 void
6380 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6381 {
6382 annotate_signalled ();
6383 if (ui_out_is_mi_like_p (uiout))
6384 ui_out_field_string
6385 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
6386 ui_out_text (uiout, "\nProgram terminated with signal ");
6387 annotate_signal_name ();
6388 ui_out_field_string (uiout, "signal-name",
6389 gdb_signal_to_name (siggnal));
6390 annotate_signal_name_end ();
6391 ui_out_text (uiout, ", ");
6392 annotate_signal_string ();
6393 ui_out_field_string (uiout, "signal-meaning",
6394 gdb_signal_to_string (siggnal));
6395 annotate_signal_string_end ();
6396 ui_out_text (uiout, ".\n");
6397 ui_out_text (uiout, "The program no longer exists.\n");
6398 }
6399
6400 void
6401 print_exited_reason (struct ui_out *uiout, int exitstatus)
6402 {
6403 struct inferior *inf = current_inferior ();
6404 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
6405
6406 annotate_exited (exitstatus);
6407 if (exitstatus)
6408 {
6409 if (ui_out_is_mi_like_p (uiout))
6410 ui_out_field_string (uiout, "reason",
6411 async_reason_lookup (EXEC_ASYNC_EXITED));
6412 ui_out_text (uiout, "[Inferior ");
6413 ui_out_text (uiout, plongest (inf->num));
6414 ui_out_text (uiout, " (");
6415 ui_out_text (uiout, pidstr);
6416 ui_out_text (uiout, ") exited with code ");
6417 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
6418 ui_out_text (uiout, "]\n");
6419 }
6420 else
6421 {
6422 if (ui_out_is_mi_like_p (uiout))
6423 ui_out_field_string
6424 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6425 ui_out_text (uiout, "[Inferior ");
6426 ui_out_text (uiout, plongest (inf->num));
6427 ui_out_text (uiout, " (");
6428 ui_out_text (uiout, pidstr);
6429 ui_out_text (uiout, ") exited normally]\n");
6430 }
6431 }
6432
6433 void
6434 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6435 {
6436 annotate_signal ();
6437
6438 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
6439 {
6440 struct thread_info *t = inferior_thread ();
6441
6442 ui_out_text (uiout, "\n[");
6443 ui_out_field_string (uiout, "thread-name",
6444 target_pid_to_str (t->ptid));
6445 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
6446 ui_out_text (uiout, " stopped");
6447 }
6448 else
6449 {
6450 ui_out_text (uiout, "\nProgram received signal ");
6451 annotate_signal_name ();
6452 if (ui_out_is_mi_like_p (uiout))
6453 ui_out_field_string
6454 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
6455 ui_out_field_string (uiout, "signal-name",
6456 gdb_signal_to_name (siggnal));
6457 annotate_signal_name_end ();
6458 ui_out_text (uiout, ", ");
6459 annotate_signal_string ();
6460 ui_out_field_string (uiout, "signal-meaning",
6461 gdb_signal_to_string (siggnal));
6462 annotate_signal_string_end ();
6463 }
6464 ui_out_text (uiout, ".\n");
6465 }
6466
6467 void
6468 print_no_history_reason (struct ui_out *uiout)
6469 {
6470 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
6471 }
6472
6473 /* Print current location without a level number, if we have changed
6474 functions or hit a breakpoint. Print source line if we have one.
6475 bpstat_print contains the logic deciding in detail what to print,
6476 based on the event(s) that just occurred. */
6477
6478 void
6479 print_stop_event (struct target_waitstatus *ws)
6480 {
6481 int bpstat_ret;
6482 int source_flag;
6483 int do_frame_printing = 1;
6484 struct thread_info *tp = inferior_thread ();
6485
6486 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6487 switch (bpstat_ret)
6488 {
6489 case PRINT_UNKNOWN:
6490 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6491 should) carry around the function and does (or should) use
6492 that when doing a frame comparison. */
6493 if (tp->control.stop_step
6494 && frame_id_eq (tp->control.step_frame_id,
6495 get_frame_id (get_current_frame ()))
6496 && tp->control.step_start_function == find_pc_function (stop_pc))
6497 {
6498 /* Finished step, just print source line. */
6499 source_flag = SRC_LINE;
6500 }
6501 else
6502 {
6503 /* Print location and source line. */
6504 source_flag = SRC_AND_LOC;
6505 }
6506 break;
6507 case PRINT_SRC_AND_LOC:
6508 /* Print location and source line. */
6509 source_flag = SRC_AND_LOC;
6510 break;
6511 case PRINT_SRC_ONLY:
6512 source_flag = SRC_LINE;
6513 break;
6514 case PRINT_NOTHING:
6515 /* Something bogus. */
6516 source_flag = SRC_LINE;
6517 do_frame_printing = 0;
6518 break;
6519 default:
6520 internal_error (__FILE__, __LINE__, _("Unknown value."));
6521 }
6522
6523 /* The behavior of this routine with respect to the source
6524 flag is:
6525 SRC_LINE: Print only source line
6526 LOCATION: Print only location
6527 SRC_AND_LOC: Print location and source line. */
6528 if (do_frame_printing)
6529 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6530
6531 /* Display the auto-display expressions. */
6532 do_displays ();
6533 }
6534
6535 /* Here to return control to GDB when the inferior stops for real.
6536 Print appropriate messages, remove breakpoints, give terminal our modes.
6537
6538 STOP_PRINT_FRAME nonzero means print the executing frame
6539 (pc, function, args, file, line number and line text).
6540 BREAKPOINTS_FAILED nonzero means stop was due to error
6541 attempting to insert breakpoints. */
6542
6543 void
6544 normal_stop (void)
6545 {
6546 struct target_waitstatus last;
6547 ptid_t last_ptid;
6548 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6549
6550 get_last_target_status (&last_ptid, &last);
6551
6552 /* If an exception is thrown from this point on, make sure to
6553 propagate GDB's knowledge of the executing state to the
6554 frontend/user running state. A QUIT is an easy exception to see
6555 here, so do this before any filtered output. */
6556 if (!non_stop)
6557 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6558 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6559 && last.kind != TARGET_WAITKIND_EXITED
6560 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6561 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6562
6563 /* As we're presenting a stop, and potentially removing breakpoints,
6564 update the thread list so we can tell whether there are threads
6565 running on the target. With target remote, for example, we can
6566 only learn about new threads when we explicitly update the thread
6567 list. Do this before notifying the interpreters about signal
6568 stops, end of stepping ranges, etc., so that the "new thread"
6569 output is emitted before e.g., "Program received signal FOO",
6570 instead of after. */
6571 update_thread_list ();
6572
6573 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
6574 observer_notify_signal_received (inferior_thread ()->suspend.stop_signal);
6575
6576 /* As with the notification of thread events, we want to delay
6577 notifying the user that we've switched thread context until
6578 the inferior actually stops.
6579
6580 There's no point in saying anything if the inferior has exited.
6581 Note that SIGNALLED here means "exited with a signal", not
6582 "received a signal".
6583
6584 Also skip saying anything in non-stop mode. In that mode, as we
6585 don't want GDB to switch threads behind the user's back, to avoid
6586 races where the user is typing a command to apply to thread x,
6587 but GDB switches to thread y before the user finishes entering
6588 the command, fetch_inferior_event installs a cleanup to restore
6589 the current thread back to the thread the user had selected right
6590 after this event is handled, so we're not really switching, only
6591 informing of a stop. */
6592 if (!non_stop
6593 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6594 && target_has_execution
6595 && last.kind != TARGET_WAITKIND_SIGNALLED
6596 && last.kind != TARGET_WAITKIND_EXITED
6597 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6598 {
6599 target_terminal_ours_for_output ();
6600 printf_filtered (_("[Switching to %s]\n"),
6601 target_pid_to_str (inferior_ptid));
6602 annotate_thread_changed ();
6603 previous_inferior_ptid = inferior_ptid;
6604 }
6605
6606 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6607 {
6608 gdb_assert (sync_execution || !target_can_async_p ());
6609
6610 target_terminal_ours_for_output ();
6611 printf_filtered (_("No unwaited-for children left.\n"));
6612 }
6613
6614 /* Note: this depends on the update_thread_list call above. */
6615 if (!breakpoints_should_be_inserted_now () && target_has_execution)
6616 {
6617 if (remove_breakpoints ())
6618 {
6619 target_terminal_ours_for_output ();
6620 printf_filtered (_("Cannot remove breakpoints because "
6621 "program is no longer writable.\nFurther "
6622 "execution is probably impossible.\n"));
6623 }
6624 }
6625
6626 /* If an auto-display called a function and that got a signal,
6627 delete that auto-display to avoid an infinite recursion. */
6628
6629 if (stopped_by_random_signal)
6630 disable_current_display ();
6631
6632 /* Notify observers if we finished a "step"-like command, etc. */
6633 if (target_has_execution
6634 && last.kind != TARGET_WAITKIND_SIGNALLED
6635 && last.kind != TARGET_WAITKIND_EXITED
6636 && inferior_thread ()->control.stop_step)
6637 {
6638 /* But not if in the middle of doing a "step n" operation for
6639 n > 1 */
6640 if (inferior_thread ()->step_multi)
6641 goto done;
6642
6643 observer_notify_end_stepping_range ();
6644 }
6645
6646 target_terminal_ours ();
6647 async_enable_stdin ();
6648
6649 /* Set the current source location. This will also happen if we
6650 display the frame below, but the current SAL will be incorrect
6651 during a user hook-stop function. */
6652 if (has_stack_frames () && !stop_stack_dummy)
6653 set_current_sal_from_frame (get_current_frame ());
6654
6655 /* Let the user/frontend see the threads as stopped, but do nothing
6656 if the thread was running an infcall. We may be e.g., evaluating
6657 a breakpoint condition. In that case, the thread had state
6658 THREAD_RUNNING before the infcall, and shall remain set to
6659 running, all without informing the user/frontend about state
6660 transition changes. If this is actually a call command, then the
6661 thread was originally already stopped, so there's no state to
6662 finish either. */
6663 if (target_has_execution && inferior_thread ()->control.in_infcall)
6664 discard_cleanups (old_chain);
6665 else
6666 do_cleanups (old_chain);
6667
6668 /* Look up the hook_stop and run it (CLI internally handles problem
6669 of stop_command's pre-hook not existing). */
6670 if (stop_command)
6671 catch_errors (hook_stop_stub, stop_command,
6672 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6673
6674 if (!has_stack_frames ())
6675 goto done;
6676
6677 if (last.kind == TARGET_WAITKIND_SIGNALLED
6678 || last.kind == TARGET_WAITKIND_EXITED)
6679 goto done;
6680
6681 /* Select innermost stack frame - i.e., current frame is frame 0,
6682 and current location is based on that.
6683 Don't do this on return from a stack dummy routine,
6684 or if the program has exited. */
6685
6686 if (!stop_stack_dummy)
6687 {
6688 select_frame (get_current_frame ());
6689
6690 /* If --batch-silent is enabled then there's no need to print the current
6691 source location, and to try risks causing an error message about
6692 missing source files. */
6693 if (stop_print_frame && !batch_silent)
6694 print_stop_event (&last);
6695 }
6696
6697 /* Save the function value return registers, if we care.
6698 We might be about to restore their previous contents. */
6699 if (inferior_thread ()->control.proceed_to_finish
6700 && execution_direction != EXEC_REVERSE)
6701 {
6702 /* This should not be necessary. */
6703 if (stop_registers)
6704 regcache_xfree (stop_registers);
6705
6706 /* NB: The copy goes through to the target picking up the value of
6707 all the registers. */
6708 stop_registers = regcache_dup (get_current_regcache ());
6709 }
6710
6711 if (stop_stack_dummy == STOP_STACK_DUMMY)
6712 {
6713 /* Pop the empty frame that contains the stack dummy.
6714 This also restores inferior state prior to the call
6715 (struct infcall_suspend_state). */
6716 struct frame_info *frame = get_current_frame ();
6717
6718 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6719 frame_pop (frame);
6720 /* frame_pop() calls reinit_frame_cache as the last thing it
6721 does which means there's currently no selected frame. We
6722 don't need to re-establish a selected frame if the dummy call
6723 returns normally, that will be done by
6724 restore_infcall_control_state. However, we do have to handle
6725 the case where the dummy call is returning after being
6726 stopped (e.g. the dummy call previously hit a breakpoint).
6727 We can't know which case we have so just always re-establish
6728 a selected frame here. */
6729 select_frame (get_current_frame ());
6730 }
6731
6732 done:
6733 annotate_stopped ();
6734
6735 /* Suppress the stop observer if we're in the middle of:
6736
6737 - a step n (n > 1), as there still more steps to be done.
6738
6739 - a "finish" command, as the observer will be called in
6740 finish_command_continuation, so it can include the inferior
6741 function's return value.
6742
6743 - calling an inferior function, as we pretend we inferior didn't
6744 run at all. The return value of the call is handled by the
6745 expression evaluator, through call_function_by_hand. */
6746
6747 if (!target_has_execution
6748 || last.kind == TARGET_WAITKIND_SIGNALLED
6749 || last.kind == TARGET_WAITKIND_EXITED
6750 || last.kind == TARGET_WAITKIND_NO_RESUMED
6751 || (!(inferior_thread ()->step_multi
6752 && inferior_thread ()->control.stop_step)
6753 && !(inferior_thread ()->control.stop_bpstat
6754 && inferior_thread ()->control.proceed_to_finish)
6755 && !inferior_thread ()->control.in_infcall))
6756 {
6757 if (!ptid_equal (inferior_ptid, null_ptid))
6758 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6759 stop_print_frame);
6760 else
6761 observer_notify_normal_stop (NULL, stop_print_frame);
6762 }
6763
6764 if (target_has_execution)
6765 {
6766 if (last.kind != TARGET_WAITKIND_SIGNALLED
6767 && last.kind != TARGET_WAITKIND_EXITED)
6768 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6769 Delete any breakpoint that is to be deleted at the next stop. */
6770 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6771 }
6772
6773 /* Try to get rid of automatically added inferiors that are no
6774 longer needed. Keeping those around slows down things linearly.
6775 Note that this never removes the current inferior. */
6776 prune_inferiors ();
6777 }
6778
6779 static int
6780 hook_stop_stub (void *cmd)
6781 {
6782 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6783 return (0);
6784 }
6785 \f
6786 int
6787 signal_stop_state (int signo)
6788 {
6789 return signal_stop[signo];
6790 }
6791
6792 int
6793 signal_print_state (int signo)
6794 {
6795 return signal_print[signo];
6796 }
6797
6798 int
6799 signal_pass_state (int signo)
6800 {
6801 return signal_program[signo];
6802 }
6803
6804 static void
6805 signal_cache_update (int signo)
6806 {
6807 if (signo == -1)
6808 {
6809 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6810 signal_cache_update (signo);
6811
6812 return;
6813 }
6814
6815 signal_pass[signo] = (signal_stop[signo] == 0
6816 && signal_print[signo] == 0
6817 && signal_program[signo] == 1
6818 && signal_catch[signo] == 0);
6819 }
6820
6821 int
6822 signal_stop_update (int signo, int state)
6823 {
6824 int ret = signal_stop[signo];
6825
6826 signal_stop[signo] = state;
6827 signal_cache_update (signo);
6828 return ret;
6829 }
6830
6831 int
6832 signal_print_update (int signo, int state)
6833 {
6834 int ret = signal_print[signo];
6835
6836 signal_print[signo] = state;
6837 signal_cache_update (signo);
6838 return ret;
6839 }
6840
6841 int
6842 signal_pass_update (int signo, int state)
6843 {
6844 int ret = signal_program[signo];
6845
6846 signal_program[signo] = state;
6847 signal_cache_update (signo);
6848 return ret;
6849 }
6850
6851 /* Update the global 'signal_catch' from INFO and notify the
6852 target. */
6853
6854 void
6855 signal_catch_update (const unsigned int *info)
6856 {
6857 int i;
6858
6859 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6860 signal_catch[i] = info[i] > 0;
6861 signal_cache_update (-1);
6862 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6863 }
6864
6865 static void
6866 sig_print_header (void)
6867 {
6868 printf_filtered (_("Signal Stop\tPrint\tPass "
6869 "to program\tDescription\n"));
6870 }
6871
6872 static void
6873 sig_print_info (enum gdb_signal oursig)
6874 {
6875 const char *name = gdb_signal_to_name (oursig);
6876 int name_padding = 13 - strlen (name);
6877
6878 if (name_padding <= 0)
6879 name_padding = 0;
6880
6881 printf_filtered ("%s", name);
6882 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6883 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6884 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6885 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6886 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6887 }
6888
6889 /* Specify how various signals in the inferior should be handled. */
6890
6891 static void
6892 handle_command (char *args, int from_tty)
6893 {
6894 char **argv;
6895 int digits, wordlen;
6896 int sigfirst, signum, siglast;
6897 enum gdb_signal oursig;
6898 int allsigs;
6899 int nsigs;
6900 unsigned char *sigs;
6901 struct cleanup *old_chain;
6902
6903 if (args == NULL)
6904 {
6905 error_no_arg (_("signal to handle"));
6906 }
6907
6908 /* Allocate and zero an array of flags for which signals to handle. */
6909
6910 nsigs = (int) GDB_SIGNAL_LAST;
6911 sigs = (unsigned char *) alloca (nsigs);
6912 memset (sigs, 0, nsigs);
6913
6914 /* Break the command line up into args. */
6915
6916 argv = gdb_buildargv (args);
6917 old_chain = make_cleanup_freeargv (argv);
6918
6919 /* Walk through the args, looking for signal oursigs, signal names, and
6920 actions. Signal numbers and signal names may be interspersed with
6921 actions, with the actions being performed for all signals cumulatively
6922 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6923
6924 while (*argv != NULL)
6925 {
6926 wordlen = strlen (*argv);
6927 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6928 {;
6929 }
6930 allsigs = 0;
6931 sigfirst = siglast = -1;
6932
6933 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6934 {
6935 /* Apply action to all signals except those used by the
6936 debugger. Silently skip those. */
6937 allsigs = 1;
6938 sigfirst = 0;
6939 siglast = nsigs - 1;
6940 }
6941 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6942 {
6943 SET_SIGS (nsigs, sigs, signal_stop);
6944 SET_SIGS (nsigs, sigs, signal_print);
6945 }
6946 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6947 {
6948 UNSET_SIGS (nsigs, sigs, signal_program);
6949 }
6950 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6951 {
6952 SET_SIGS (nsigs, sigs, signal_print);
6953 }
6954 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6955 {
6956 SET_SIGS (nsigs, sigs, signal_program);
6957 }
6958 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6959 {
6960 UNSET_SIGS (nsigs, sigs, signal_stop);
6961 }
6962 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6963 {
6964 SET_SIGS (nsigs, sigs, signal_program);
6965 }
6966 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6967 {
6968 UNSET_SIGS (nsigs, sigs, signal_print);
6969 UNSET_SIGS (nsigs, sigs, signal_stop);
6970 }
6971 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6972 {
6973 UNSET_SIGS (nsigs, sigs, signal_program);
6974 }
6975 else if (digits > 0)
6976 {
6977 /* It is numeric. The numeric signal refers to our own
6978 internal signal numbering from target.h, not to host/target
6979 signal number. This is a feature; users really should be
6980 using symbolic names anyway, and the common ones like
6981 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6982
6983 sigfirst = siglast = (int)
6984 gdb_signal_from_command (atoi (*argv));
6985 if ((*argv)[digits] == '-')
6986 {
6987 siglast = (int)
6988 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6989 }
6990 if (sigfirst > siglast)
6991 {
6992 /* Bet he didn't figure we'd think of this case... */
6993 signum = sigfirst;
6994 sigfirst = siglast;
6995 siglast = signum;
6996 }
6997 }
6998 else
6999 {
7000 oursig = gdb_signal_from_name (*argv);
7001 if (oursig != GDB_SIGNAL_UNKNOWN)
7002 {
7003 sigfirst = siglast = (int) oursig;
7004 }
7005 else
7006 {
7007 /* Not a number and not a recognized flag word => complain. */
7008 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
7009 }
7010 }
7011
7012 /* If any signal numbers or symbol names were found, set flags for
7013 which signals to apply actions to. */
7014
7015 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
7016 {
7017 switch ((enum gdb_signal) signum)
7018 {
7019 case GDB_SIGNAL_TRAP:
7020 case GDB_SIGNAL_INT:
7021 if (!allsigs && !sigs[signum])
7022 {
7023 if (query (_("%s is used by the debugger.\n\
7024 Are you sure you want to change it? "),
7025 gdb_signal_to_name ((enum gdb_signal) signum)))
7026 {
7027 sigs[signum] = 1;
7028 }
7029 else
7030 {
7031 printf_unfiltered (_("Not confirmed, unchanged.\n"));
7032 gdb_flush (gdb_stdout);
7033 }
7034 }
7035 break;
7036 case GDB_SIGNAL_0:
7037 case GDB_SIGNAL_DEFAULT:
7038 case GDB_SIGNAL_UNKNOWN:
7039 /* Make sure that "all" doesn't print these. */
7040 break;
7041 default:
7042 sigs[signum] = 1;
7043 break;
7044 }
7045 }
7046
7047 argv++;
7048 }
7049
7050 for (signum = 0; signum < nsigs; signum++)
7051 if (sigs[signum])
7052 {
7053 signal_cache_update (-1);
7054 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
7055 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
7056
7057 if (from_tty)
7058 {
7059 /* Show the results. */
7060 sig_print_header ();
7061 for (; signum < nsigs; signum++)
7062 if (sigs[signum])
7063 sig_print_info (signum);
7064 }
7065
7066 break;
7067 }
7068
7069 do_cleanups (old_chain);
7070 }
7071
7072 /* Complete the "handle" command. */
7073
7074 static VEC (char_ptr) *
7075 handle_completer (struct cmd_list_element *ignore,
7076 const char *text, const char *word)
7077 {
7078 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
7079 static const char * const keywords[] =
7080 {
7081 "all",
7082 "stop",
7083 "ignore",
7084 "print",
7085 "pass",
7086 "nostop",
7087 "noignore",
7088 "noprint",
7089 "nopass",
7090 NULL,
7091 };
7092
7093 vec_signals = signal_completer (ignore, text, word);
7094 vec_keywords = complete_on_enum (keywords, word, word);
7095
7096 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
7097 VEC_free (char_ptr, vec_signals);
7098 VEC_free (char_ptr, vec_keywords);
7099 return return_val;
7100 }
7101
7102 enum gdb_signal
7103 gdb_signal_from_command (int num)
7104 {
7105 if (num >= 1 && num <= 15)
7106 return (enum gdb_signal) num;
7107 error (_("Only signals 1-15 are valid as numeric signals.\n\
7108 Use \"info signals\" for a list of symbolic signals."));
7109 }
7110
7111 /* Print current contents of the tables set by the handle command.
7112 It is possible we should just be printing signals actually used
7113 by the current target (but for things to work right when switching
7114 targets, all signals should be in the signal tables). */
7115
7116 static void
7117 signals_info (char *signum_exp, int from_tty)
7118 {
7119 enum gdb_signal oursig;
7120
7121 sig_print_header ();
7122
7123 if (signum_exp)
7124 {
7125 /* First see if this is a symbol name. */
7126 oursig = gdb_signal_from_name (signum_exp);
7127 if (oursig == GDB_SIGNAL_UNKNOWN)
7128 {
7129 /* No, try numeric. */
7130 oursig =
7131 gdb_signal_from_command (parse_and_eval_long (signum_exp));
7132 }
7133 sig_print_info (oursig);
7134 return;
7135 }
7136
7137 printf_filtered ("\n");
7138 /* These ugly casts brought to you by the native VAX compiler. */
7139 for (oursig = GDB_SIGNAL_FIRST;
7140 (int) oursig < (int) GDB_SIGNAL_LAST;
7141 oursig = (enum gdb_signal) ((int) oursig + 1))
7142 {
7143 QUIT;
7144
7145 if (oursig != GDB_SIGNAL_UNKNOWN
7146 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
7147 sig_print_info (oursig);
7148 }
7149
7150 printf_filtered (_("\nUse the \"handle\" command "
7151 "to change these tables.\n"));
7152 }
7153
7154 /* Check if it makes sense to read $_siginfo from the current thread
7155 at this point. If not, throw an error. */
7156
7157 static void
7158 validate_siginfo_access (void)
7159 {
7160 /* No current inferior, no siginfo. */
7161 if (ptid_equal (inferior_ptid, null_ptid))
7162 error (_("No thread selected."));
7163
7164 /* Don't try to read from a dead thread. */
7165 if (is_exited (inferior_ptid))
7166 error (_("The current thread has terminated"));
7167
7168 /* ... or from a spinning thread. */
7169 if (is_running (inferior_ptid))
7170 error (_("Selected thread is running."));
7171 }
7172
7173 /* The $_siginfo convenience variable is a bit special. We don't know
7174 for sure the type of the value until we actually have a chance to
7175 fetch the data. The type can change depending on gdbarch, so it is
7176 also dependent on which thread you have selected.
7177
7178 1. making $_siginfo be an internalvar that creates a new value on
7179 access.
7180
7181 2. making the value of $_siginfo be an lval_computed value. */
7182
7183 /* This function implements the lval_computed support for reading a
7184 $_siginfo value. */
7185
7186 static void
7187 siginfo_value_read (struct value *v)
7188 {
7189 LONGEST transferred;
7190
7191 validate_siginfo_access ();
7192
7193 transferred =
7194 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
7195 NULL,
7196 value_contents_all_raw (v),
7197 value_offset (v),
7198 TYPE_LENGTH (value_type (v)));
7199
7200 if (transferred != TYPE_LENGTH (value_type (v)))
7201 error (_("Unable to read siginfo"));
7202 }
7203
7204 /* This function implements the lval_computed support for writing a
7205 $_siginfo value. */
7206
7207 static void
7208 siginfo_value_write (struct value *v, struct value *fromval)
7209 {
7210 LONGEST transferred;
7211
7212 validate_siginfo_access ();
7213
7214 transferred = target_write (&current_target,
7215 TARGET_OBJECT_SIGNAL_INFO,
7216 NULL,
7217 value_contents_all_raw (fromval),
7218 value_offset (v),
7219 TYPE_LENGTH (value_type (fromval)));
7220
7221 if (transferred != TYPE_LENGTH (value_type (fromval)))
7222 error (_("Unable to write siginfo"));
7223 }
7224
7225 static const struct lval_funcs siginfo_value_funcs =
7226 {
7227 siginfo_value_read,
7228 siginfo_value_write
7229 };
7230
7231 /* Return a new value with the correct type for the siginfo object of
7232 the current thread using architecture GDBARCH. Return a void value
7233 if there's no object available. */
7234
7235 static struct value *
7236 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
7237 void *ignore)
7238 {
7239 if (target_has_stack
7240 && !ptid_equal (inferior_ptid, null_ptid)
7241 && gdbarch_get_siginfo_type_p (gdbarch))
7242 {
7243 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7244
7245 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
7246 }
7247
7248 return allocate_value (builtin_type (gdbarch)->builtin_void);
7249 }
7250
7251 \f
7252 /* infcall_suspend_state contains state about the program itself like its
7253 registers and any signal it received when it last stopped.
7254 This state must be restored regardless of how the inferior function call
7255 ends (either successfully, or after it hits a breakpoint or signal)
7256 if the program is to properly continue where it left off. */
7257
7258 struct infcall_suspend_state
7259 {
7260 struct thread_suspend_state thread_suspend;
7261 #if 0 /* Currently unused and empty structures are not valid C. */
7262 struct inferior_suspend_state inferior_suspend;
7263 #endif
7264
7265 /* Other fields: */
7266 CORE_ADDR stop_pc;
7267 struct regcache *registers;
7268
7269 /* Format of SIGINFO_DATA or NULL if it is not present. */
7270 struct gdbarch *siginfo_gdbarch;
7271
7272 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
7273 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
7274 content would be invalid. */
7275 gdb_byte *siginfo_data;
7276 };
7277
7278 struct infcall_suspend_state *
7279 save_infcall_suspend_state (void)
7280 {
7281 struct infcall_suspend_state *inf_state;
7282 struct thread_info *tp = inferior_thread ();
7283 #if 0
7284 struct inferior *inf = current_inferior ();
7285 #endif
7286 struct regcache *regcache = get_current_regcache ();
7287 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7288 gdb_byte *siginfo_data = NULL;
7289
7290 if (gdbarch_get_siginfo_type_p (gdbarch))
7291 {
7292 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7293 size_t len = TYPE_LENGTH (type);
7294 struct cleanup *back_to;
7295
7296 siginfo_data = xmalloc (len);
7297 back_to = make_cleanup (xfree, siginfo_data);
7298
7299 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7300 siginfo_data, 0, len) == len)
7301 discard_cleanups (back_to);
7302 else
7303 {
7304 /* Errors ignored. */
7305 do_cleanups (back_to);
7306 siginfo_data = NULL;
7307 }
7308 }
7309
7310 inf_state = XCNEW (struct infcall_suspend_state);
7311
7312 if (siginfo_data)
7313 {
7314 inf_state->siginfo_gdbarch = gdbarch;
7315 inf_state->siginfo_data = siginfo_data;
7316 }
7317
7318 inf_state->thread_suspend = tp->suspend;
7319 #if 0 /* Currently unused and empty structures are not valid C. */
7320 inf_state->inferior_suspend = inf->suspend;
7321 #endif
7322
7323 /* run_inferior_call will not use the signal due to its `proceed' call with
7324 GDB_SIGNAL_0 anyway. */
7325 tp->suspend.stop_signal = GDB_SIGNAL_0;
7326
7327 inf_state->stop_pc = stop_pc;
7328
7329 inf_state->registers = regcache_dup (regcache);
7330
7331 return inf_state;
7332 }
7333
7334 /* Restore inferior session state to INF_STATE. */
7335
7336 void
7337 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7338 {
7339 struct thread_info *tp = inferior_thread ();
7340 #if 0
7341 struct inferior *inf = current_inferior ();
7342 #endif
7343 struct regcache *regcache = get_current_regcache ();
7344 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7345
7346 tp->suspend = inf_state->thread_suspend;
7347 #if 0 /* Currently unused and empty structures are not valid C. */
7348 inf->suspend = inf_state->inferior_suspend;
7349 #endif
7350
7351 stop_pc = inf_state->stop_pc;
7352
7353 if (inf_state->siginfo_gdbarch == gdbarch)
7354 {
7355 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7356
7357 /* Errors ignored. */
7358 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7359 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
7360 }
7361
7362 /* The inferior can be gone if the user types "print exit(0)"
7363 (and perhaps other times). */
7364 if (target_has_execution)
7365 /* NB: The register write goes through to the target. */
7366 regcache_cpy (regcache, inf_state->registers);
7367
7368 discard_infcall_suspend_state (inf_state);
7369 }
7370
7371 static void
7372 do_restore_infcall_suspend_state_cleanup (void *state)
7373 {
7374 restore_infcall_suspend_state (state);
7375 }
7376
7377 struct cleanup *
7378 make_cleanup_restore_infcall_suspend_state
7379 (struct infcall_suspend_state *inf_state)
7380 {
7381 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
7382 }
7383
7384 void
7385 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7386 {
7387 regcache_xfree (inf_state->registers);
7388 xfree (inf_state->siginfo_data);
7389 xfree (inf_state);
7390 }
7391
7392 struct regcache *
7393 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
7394 {
7395 return inf_state->registers;
7396 }
7397
7398 /* infcall_control_state contains state regarding gdb's control of the
7399 inferior itself like stepping control. It also contains session state like
7400 the user's currently selected frame. */
7401
7402 struct infcall_control_state
7403 {
7404 struct thread_control_state thread_control;
7405 struct inferior_control_state inferior_control;
7406
7407 /* Other fields: */
7408 enum stop_stack_kind stop_stack_dummy;
7409 int stopped_by_random_signal;
7410 int stop_after_trap;
7411
7412 /* ID if the selected frame when the inferior function call was made. */
7413 struct frame_id selected_frame_id;
7414 };
7415
7416 /* Save all of the information associated with the inferior<==>gdb
7417 connection. */
7418
7419 struct infcall_control_state *
7420 save_infcall_control_state (void)
7421 {
7422 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
7423 struct thread_info *tp = inferior_thread ();
7424 struct inferior *inf = current_inferior ();
7425
7426 inf_status->thread_control = tp->control;
7427 inf_status->inferior_control = inf->control;
7428
7429 tp->control.step_resume_breakpoint = NULL;
7430 tp->control.exception_resume_breakpoint = NULL;
7431
7432 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
7433 chain. If caller's caller is walking the chain, they'll be happier if we
7434 hand them back the original chain when restore_infcall_control_state is
7435 called. */
7436 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
7437
7438 /* Other fields: */
7439 inf_status->stop_stack_dummy = stop_stack_dummy;
7440 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7441 inf_status->stop_after_trap = stop_after_trap;
7442
7443 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7444
7445 return inf_status;
7446 }
7447
7448 static int
7449 restore_selected_frame (void *args)
7450 {
7451 struct frame_id *fid = (struct frame_id *) args;
7452 struct frame_info *frame;
7453
7454 frame = frame_find_by_id (*fid);
7455
7456 /* If inf_status->selected_frame_id is NULL, there was no previously
7457 selected frame. */
7458 if (frame == NULL)
7459 {
7460 warning (_("Unable to restore previously selected frame."));
7461 return 0;
7462 }
7463
7464 select_frame (frame);
7465
7466 return (1);
7467 }
7468
7469 /* Restore inferior session state to INF_STATUS. */
7470
7471 void
7472 restore_infcall_control_state (struct infcall_control_state *inf_status)
7473 {
7474 struct thread_info *tp = inferior_thread ();
7475 struct inferior *inf = current_inferior ();
7476
7477 if (tp->control.step_resume_breakpoint)
7478 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7479
7480 if (tp->control.exception_resume_breakpoint)
7481 tp->control.exception_resume_breakpoint->disposition
7482 = disp_del_at_next_stop;
7483
7484 /* Handle the bpstat_copy of the chain. */
7485 bpstat_clear (&tp->control.stop_bpstat);
7486
7487 tp->control = inf_status->thread_control;
7488 inf->control = inf_status->inferior_control;
7489
7490 /* Other fields: */
7491 stop_stack_dummy = inf_status->stop_stack_dummy;
7492 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7493 stop_after_trap = inf_status->stop_after_trap;
7494
7495 if (target_has_stack)
7496 {
7497 /* The point of catch_errors is that if the stack is clobbered,
7498 walking the stack might encounter a garbage pointer and
7499 error() trying to dereference it. */
7500 if (catch_errors
7501 (restore_selected_frame, &inf_status->selected_frame_id,
7502 "Unable to restore previously selected frame:\n",
7503 RETURN_MASK_ERROR) == 0)
7504 /* Error in restoring the selected frame. Select the innermost
7505 frame. */
7506 select_frame (get_current_frame ());
7507 }
7508
7509 xfree (inf_status);
7510 }
7511
7512 static void
7513 do_restore_infcall_control_state_cleanup (void *sts)
7514 {
7515 restore_infcall_control_state (sts);
7516 }
7517
7518 struct cleanup *
7519 make_cleanup_restore_infcall_control_state
7520 (struct infcall_control_state *inf_status)
7521 {
7522 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7523 }
7524
7525 void
7526 discard_infcall_control_state (struct infcall_control_state *inf_status)
7527 {
7528 if (inf_status->thread_control.step_resume_breakpoint)
7529 inf_status->thread_control.step_resume_breakpoint->disposition
7530 = disp_del_at_next_stop;
7531
7532 if (inf_status->thread_control.exception_resume_breakpoint)
7533 inf_status->thread_control.exception_resume_breakpoint->disposition
7534 = disp_del_at_next_stop;
7535
7536 /* See save_infcall_control_state for info on stop_bpstat. */
7537 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7538
7539 xfree (inf_status);
7540 }
7541 \f
7542 /* restore_inferior_ptid() will be used by the cleanup machinery
7543 to restore the inferior_ptid value saved in a call to
7544 save_inferior_ptid(). */
7545
7546 static void
7547 restore_inferior_ptid (void *arg)
7548 {
7549 ptid_t *saved_ptid_ptr = arg;
7550
7551 inferior_ptid = *saved_ptid_ptr;
7552 xfree (arg);
7553 }
7554
7555 /* Save the value of inferior_ptid so that it may be restored by a
7556 later call to do_cleanups(). Returns the struct cleanup pointer
7557 needed for later doing the cleanup. */
7558
7559 struct cleanup *
7560 save_inferior_ptid (void)
7561 {
7562 ptid_t *saved_ptid_ptr;
7563
7564 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7565 *saved_ptid_ptr = inferior_ptid;
7566 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7567 }
7568
7569 /* See infrun.h. */
7570
7571 void
7572 clear_exit_convenience_vars (void)
7573 {
7574 clear_internalvar (lookup_internalvar ("_exitsignal"));
7575 clear_internalvar (lookup_internalvar ("_exitcode"));
7576 }
7577 \f
7578
7579 /* User interface for reverse debugging:
7580 Set exec-direction / show exec-direction commands
7581 (returns error unless target implements to_set_exec_direction method). */
7582
7583 int execution_direction = EXEC_FORWARD;
7584 static const char exec_forward[] = "forward";
7585 static const char exec_reverse[] = "reverse";
7586 static const char *exec_direction = exec_forward;
7587 static const char *const exec_direction_names[] = {
7588 exec_forward,
7589 exec_reverse,
7590 NULL
7591 };
7592
7593 static void
7594 set_exec_direction_func (char *args, int from_tty,
7595 struct cmd_list_element *cmd)
7596 {
7597 if (target_can_execute_reverse)
7598 {
7599 if (!strcmp (exec_direction, exec_forward))
7600 execution_direction = EXEC_FORWARD;
7601 else if (!strcmp (exec_direction, exec_reverse))
7602 execution_direction = EXEC_REVERSE;
7603 }
7604 else
7605 {
7606 exec_direction = exec_forward;
7607 error (_("Target does not support this operation."));
7608 }
7609 }
7610
7611 static void
7612 show_exec_direction_func (struct ui_file *out, int from_tty,
7613 struct cmd_list_element *cmd, const char *value)
7614 {
7615 switch (execution_direction) {
7616 case EXEC_FORWARD:
7617 fprintf_filtered (out, _("Forward.\n"));
7618 break;
7619 case EXEC_REVERSE:
7620 fprintf_filtered (out, _("Reverse.\n"));
7621 break;
7622 default:
7623 internal_error (__FILE__, __LINE__,
7624 _("bogus execution_direction value: %d"),
7625 (int) execution_direction);
7626 }
7627 }
7628
7629 static void
7630 show_schedule_multiple (struct ui_file *file, int from_tty,
7631 struct cmd_list_element *c, const char *value)
7632 {
7633 fprintf_filtered (file, _("Resuming the execution of threads "
7634 "of all processes is %s.\n"), value);
7635 }
7636
7637 /* Implementation of `siginfo' variable. */
7638
7639 static const struct internalvar_funcs siginfo_funcs =
7640 {
7641 siginfo_make_value,
7642 NULL,
7643 NULL
7644 };
7645
7646 void
7647 _initialize_infrun (void)
7648 {
7649 int i;
7650 int numsigs;
7651 struct cmd_list_element *c;
7652
7653 add_info ("signals", signals_info, _("\
7654 What debugger does when program gets various signals.\n\
7655 Specify a signal as argument to print info on that signal only."));
7656 add_info_alias ("handle", "signals", 0);
7657
7658 c = add_com ("handle", class_run, handle_command, _("\
7659 Specify how to handle signals.\n\
7660 Usage: handle SIGNAL [ACTIONS]\n\
7661 Args are signals and actions to apply to those signals.\n\
7662 If no actions are specified, the current settings for the specified signals\n\
7663 will be displayed instead.\n\
7664 \n\
7665 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7666 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7667 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7668 The special arg \"all\" is recognized to mean all signals except those\n\
7669 used by the debugger, typically SIGTRAP and SIGINT.\n\
7670 \n\
7671 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7672 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7673 Stop means reenter debugger if this signal happens (implies print).\n\
7674 Print means print a message if this signal happens.\n\
7675 Pass means let program see this signal; otherwise program doesn't know.\n\
7676 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7677 Pass and Stop may be combined.\n\
7678 \n\
7679 Multiple signals may be specified. Signal numbers and signal names\n\
7680 may be interspersed with actions, with the actions being performed for\n\
7681 all signals cumulatively specified."));
7682 set_cmd_completer (c, handle_completer);
7683
7684 if (!dbx_commands)
7685 stop_command = add_cmd ("stop", class_obscure,
7686 not_just_help_class_command, _("\
7687 There is no `stop' command, but you can set a hook on `stop'.\n\
7688 This allows you to set a list of commands to be run each time execution\n\
7689 of the program stops."), &cmdlist);
7690
7691 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7692 Set inferior debugging."), _("\
7693 Show inferior debugging."), _("\
7694 When non-zero, inferior specific debugging is enabled."),
7695 NULL,
7696 show_debug_infrun,
7697 &setdebuglist, &showdebuglist);
7698
7699 add_setshow_boolean_cmd ("displaced", class_maintenance,
7700 &debug_displaced, _("\
7701 Set displaced stepping debugging."), _("\
7702 Show displaced stepping debugging."), _("\
7703 When non-zero, displaced stepping specific debugging is enabled."),
7704 NULL,
7705 show_debug_displaced,
7706 &setdebuglist, &showdebuglist);
7707
7708 add_setshow_boolean_cmd ("non-stop", no_class,
7709 &non_stop_1, _("\
7710 Set whether gdb controls the inferior in non-stop mode."), _("\
7711 Show whether gdb controls the inferior in non-stop mode."), _("\
7712 When debugging a multi-threaded program and this setting is\n\
7713 off (the default, also called all-stop mode), when one thread stops\n\
7714 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7715 all other threads in the program while you interact with the thread of\n\
7716 interest. When you continue or step a thread, you can allow the other\n\
7717 threads to run, or have them remain stopped, but while you inspect any\n\
7718 thread's state, all threads stop.\n\
7719 \n\
7720 In non-stop mode, when one thread stops, other threads can continue\n\
7721 to run freely. You'll be able to step each thread independently,\n\
7722 leave it stopped or free to run as needed."),
7723 set_non_stop,
7724 show_non_stop,
7725 &setlist,
7726 &showlist);
7727
7728 numsigs = (int) GDB_SIGNAL_LAST;
7729 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7730 signal_print = (unsigned char *)
7731 xmalloc (sizeof (signal_print[0]) * numsigs);
7732 signal_program = (unsigned char *)
7733 xmalloc (sizeof (signal_program[0]) * numsigs);
7734 signal_catch = (unsigned char *)
7735 xmalloc (sizeof (signal_catch[0]) * numsigs);
7736 signal_pass = (unsigned char *)
7737 xmalloc (sizeof (signal_pass[0]) * numsigs);
7738 for (i = 0; i < numsigs; i++)
7739 {
7740 signal_stop[i] = 1;
7741 signal_print[i] = 1;
7742 signal_program[i] = 1;
7743 signal_catch[i] = 0;
7744 }
7745
7746 /* Signals caused by debugger's own actions
7747 should not be given to the program afterwards. */
7748 signal_program[GDB_SIGNAL_TRAP] = 0;
7749 signal_program[GDB_SIGNAL_INT] = 0;
7750
7751 /* Signals that are not errors should not normally enter the debugger. */
7752 signal_stop[GDB_SIGNAL_ALRM] = 0;
7753 signal_print[GDB_SIGNAL_ALRM] = 0;
7754 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7755 signal_print[GDB_SIGNAL_VTALRM] = 0;
7756 signal_stop[GDB_SIGNAL_PROF] = 0;
7757 signal_print[GDB_SIGNAL_PROF] = 0;
7758 signal_stop[GDB_SIGNAL_CHLD] = 0;
7759 signal_print[GDB_SIGNAL_CHLD] = 0;
7760 signal_stop[GDB_SIGNAL_IO] = 0;
7761 signal_print[GDB_SIGNAL_IO] = 0;
7762 signal_stop[GDB_SIGNAL_POLL] = 0;
7763 signal_print[GDB_SIGNAL_POLL] = 0;
7764 signal_stop[GDB_SIGNAL_URG] = 0;
7765 signal_print[GDB_SIGNAL_URG] = 0;
7766 signal_stop[GDB_SIGNAL_WINCH] = 0;
7767 signal_print[GDB_SIGNAL_WINCH] = 0;
7768 signal_stop[GDB_SIGNAL_PRIO] = 0;
7769 signal_print[GDB_SIGNAL_PRIO] = 0;
7770
7771 /* These signals are used internally by user-level thread
7772 implementations. (See signal(5) on Solaris.) Like the above
7773 signals, a healthy program receives and handles them as part of
7774 its normal operation. */
7775 signal_stop[GDB_SIGNAL_LWP] = 0;
7776 signal_print[GDB_SIGNAL_LWP] = 0;
7777 signal_stop[GDB_SIGNAL_WAITING] = 0;
7778 signal_print[GDB_SIGNAL_WAITING] = 0;
7779 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7780 signal_print[GDB_SIGNAL_CANCEL] = 0;
7781
7782 /* Update cached state. */
7783 signal_cache_update (-1);
7784
7785 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7786 &stop_on_solib_events, _("\
7787 Set stopping for shared library events."), _("\
7788 Show stopping for shared library events."), _("\
7789 If nonzero, gdb will give control to the user when the dynamic linker\n\
7790 notifies gdb of shared library events. The most common event of interest\n\
7791 to the user would be loading/unloading of a new library."),
7792 set_stop_on_solib_events,
7793 show_stop_on_solib_events,
7794 &setlist, &showlist);
7795
7796 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7797 follow_fork_mode_kind_names,
7798 &follow_fork_mode_string, _("\
7799 Set debugger response to a program call of fork or vfork."), _("\
7800 Show debugger response to a program call of fork or vfork."), _("\
7801 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7802 parent - the original process is debugged after a fork\n\
7803 child - the new process is debugged after a fork\n\
7804 The unfollowed process will continue to run.\n\
7805 By default, the debugger will follow the parent process."),
7806 NULL,
7807 show_follow_fork_mode_string,
7808 &setlist, &showlist);
7809
7810 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7811 follow_exec_mode_names,
7812 &follow_exec_mode_string, _("\
7813 Set debugger response to a program call of exec."), _("\
7814 Show debugger response to a program call of exec."), _("\
7815 An exec call replaces the program image of a process.\n\
7816 \n\
7817 follow-exec-mode can be:\n\
7818 \n\
7819 new - the debugger creates a new inferior and rebinds the process\n\
7820 to this new inferior. The program the process was running before\n\
7821 the exec call can be restarted afterwards by restarting the original\n\
7822 inferior.\n\
7823 \n\
7824 same - the debugger keeps the process bound to the same inferior.\n\
7825 The new executable image replaces the previous executable loaded in\n\
7826 the inferior. Restarting the inferior after the exec call restarts\n\
7827 the executable the process was running after the exec call.\n\
7828 \n\
7829 By default, the debugger will use the same inferior."),
7830 NULL,
7831 show_follow_exec_mode_string,
7832 &setlist, &showlist);
7833
7834 add_setshow_enum_cmd ("scheduler-locking", class_run,
7835 scheduler_enums, &scheduler_mode, _("\
7836 Set mode for locking scheduler during execution."), _("\
7837 Show mode for locking scheduler during execution."), _("\
7838 off == no locking (threads may preempt at any time)\n\
7839 on == full locking (no thread except the current thread may run)\n\
7840 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
7841 In this mode, other threads may run during other commands."),
7842 set_schedlock_func, /* traps on target vector */
7843 show_scheduler_mode,
7844 &setlist, &showlist);
7845
7846 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7847 Set mode for resuming threads of all processes."), _("\
7848 Show mode for resuming threads of all processes."), _("\
7849 When on, execution commands (such as 'continue' or 'next') resume all\n\
7850 threads of all processes. When off (which is the default), execution\n\
7851 commands only resume the threads of the current process. The set of\n\
7852 threads that are resumed is further refined by the scheduler-locking\n\
7853 mode (see help set scheduler-locking)."),
7854 NULL,
7855 show_schedule_multiple,
7856 &setlist, &showlist);
7857
7858 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7859 Set mode of the step operation."), _("\
7860 Show mode of the step operation."), _("\
7861 When set, doing a step over a function without debug line information\n\
7862 will stop at the first instruction of that function. Otherwise, the\n\
7863 function is skipped and the step command stops at a different source line."),
7864 NULL,
7865 show_step_stop_if_no_debug,
7866 &setlist, &showlist);
7867
7868 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7869 &can_use_displaced_stepping, _("\
7870 Set debugger's willingness to use displaced stepping."), _("\
7871 Show debugger's willingness to use displaced stepping."), _("\
7872 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7873 supported by the target architecture. If off, gdb will not use displaced\n\
7874 stepping to step over breakpoints, even if such is supported by the target\n\
7875 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7876 if the target architecture supports it and non-stop mode is active, but will not\n\
7877 use it in all-stop mode (see help set non-stop)."),
7878 NULL,
7879 show_can_use_displaced_stepping,
7880 &setlist, &showlist);
7881
7882 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7883 &exec_direction, _("Set direction of execution.\n\
7884 Options are 'forward' or 'reverse'."),
7885 _("Show direction of execution (forward/reverse)."),
7886 _("Tells gdb whether to execute forward or backward."),
7887 set_exec_direction_func, show_exec_direction_func,
7888 &setlist, &showlist);
7889
7890 /* Set/show detach-on-fork: user-settable mode. */
7891
7892 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7893 Set whether gdb will detach the child of a fork."), _("\
7894 Show whether gdb will detach the child of a fork."), _("\
7895 Tells gdb whether to detach the child of a fork."),
7896 NULL, NULL, &setlist, &showlist);
7897
7898 /* Set/show disable address space randomization mode. */
7899
7900 add_setshow_boolean_cmd ("disable-randomization", class_support,
7901 &disable_randomization, _("\
7902 Set disabling of debuggee's virtual address space randomization."), _("\
7903 Show disabling of debuggee's virtual address space randomization."), _("\
7904 When this mode is on (which is the default), randomization of the virtual\n\
7905 address space is disabled. Standalone programs run with the randomization\n\
7906 enabled by default on some platforms."),
7907 &set_disable_randomization,
7908 &show_disable_randomization,
7909 &setlist, &showlist);
7910
7911 /* ptid initializations */
7912 inferior_ptid = null_ptid;
7913 target_last_wait_ptid = minus_one_ptid;
7914
7915 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7916 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7917 observer_attach_thread_exit (infrun_thread_thread_exit);
7918 observer_attach_inferior_exit (infrun_inferior_exit);
7919
7920 /* Explicitly create without lookup, since that tries to create a
7921 value with a void typed value, and when we get here, gdbarch
7922 isn't initialized yet. At this point, we're quite sure there
7923 isn't another convenience variable of the same name. */
7924 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7925
7926 add_setshow_boolean_cmd ("observer", no_class,
7927 &observer_mode_1, _("\
7928 Set whether gdb controls the inferior in observer mode."), _("\
7929 Show whether gdb controls the inferior in observer mode."), _("\
7930 In observer mode, GDB can get data from the inferior, but not\n\
7931 affect its execution. Registers and memory may not be changed,\n\
7932 breakpoints may not be set, and the program cannot be interrupted\n\
7933 or signalled."),
7934 set_observer_mode,
7935 show_observer_mode,
7936 &setlist,
7937 &showlist);
7938 }
This page took 0.214023 seconds and 4 git commands to generate.