Split TRY_CATCH into TRY + CATCH
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2015 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "breakpoint.h"
28 #include "gdb_wait.h"
29 #include "gdbcore.h"
30 #include "gdbcmd.h"
31 #include "cli/cli-script.h"
32 #include "target.h"
33 #include "gdbthread.h"
34 #include "annotate.h"
35 #include "symfile.h"
36 #include "top.h"
37 #include <signal.h>
38 #include "inf-loop.h"
39 #include "regcache.h"
40 #include "value.h"
41 #include "observer.h"
42 #include "language.h"
43 #include "solib.h"
44 #include "main.h"
45 #include "dictionary.h"
46 #include "block.h"
47 #include "mi/mi-common.h"
48 #include "event-top.h"
49 #include "record.h"
50 #include "record-full.h"
51 #include "inline-frame.h"
52 #include "jit.h"
53 #include "tracepoint.h"
54 #include "continuations.h"
55 #include "interps.h"
56 #include "skip.h"
57 #include "probe.h"
58 #include "objfiles.h"
59 #include "completer.h"
60 #include "target-descriptions.h"
61 #include "target-dcache.h"
62 #include "terminal.h"
63
64 /* Prototypes for local functions */
65
66 static void signals_info (char *, int);
67
68 static void handle_command (char *, int);
69
70 static void sig_print_info (enum gdb_signal);
71
72 static void sig_print_header (void);
73
74 static void resume_cleanups (void *);
75
76 static int hook_stop_stub (void *);
77
78 static int restore_selected_frame (void *);
79
80 static int follow_fork (void);
81
82 static int follow_fork_inferior (int follow_child, int detach_fork);
83
84 static void follow_inferior_reset_breakpoints (void);
85
86 static void set_schedlock_func (char *args, int from_tty,
87 struct cmd_list_element *c);
88
89 static int currently_stepping (struct thread_info *tp);
90
91 static void xdb_handle_command (char *args, int from_tty);
92
93 void _initialize_infrun (void);
94
95 void nullify_last_target_wait_ptid (void);
96
97 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
98
99 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
100
101 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
102
103 /* When set, stop the 'step' command if we enter a function which has
104 no line number information. The normal behavior is that we step
105 over such function. */
106 int step_stop_if_no_debug = 0;
107 static void
108 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
109 struct cmd_list_element *c, const char *value)
110 {
111 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
112 }
113
114 /* In asynchronous mode, but simulating synchronous execution. */
115
116 int sync_execution = 0;
117
118 /* proceed and normal_stop use this to notify the user when the
119 inferior stopped in a different thread than it had been running
120 in. */
121
122 static ptid_t previous_inferior_ptid;
123
124 /* If set (default for legacy reasons), when following a fork, GDB
125 will detach from one of the fork branches, child or parent.
126 Exactly which branch is detached depends on 'set follow-fork-mode'
127 setting. */
128
129 static int detach_fork = 1;
130
131 int debug_displaced = 0;
132 static void
133 show_debug_displaced (struct ui_file *file, int from_tty,
134 struct cmd_list_element *c, const char *value)
135 {
136 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
137 }
138
139 unsigned int debug_infrun = 0;
140 static void
141 show_debug_infrun (struct ui_file *file, int from_tty,
142 struct cmd_list_element *c, const char *value)
143 {
144 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
145 }
146
147
148 /* Support for disabling address space randomization. */
149
150 int disable_randomization = 1;
151
152 static void
153 show_disable_randomization (struct ui_file *file, int from_tty,
154 struct cmd_list_element *c, const char *value)
155 {
156 if (target_supports_disable_randomization ())
157 fprintf_filtered (file,
158 _("Disabling randomization of debuggee's "
159 "virtual address space is %s.\n"),
160 value);
161 else
162 fputs_filtered (_("Disabling randomization of debuggee's "
163 "virtual address space is unsupported on\n"
164 "this platform.\n"), file);
165 }
166
167 static void
168 set_disable_randomization (char *args, int from_tty,
169 struct cmd_list_element *c)
170 {
171 if (!target_supports_disable_randomization ())
172 error (_("Disabling randomization of debuggee's "
173 "virtual address space is unsupported on\n"
174 "this platform."));
175 }
176
177 /* User interface for non-stop mode. */
178
179 int non_stop = 0;
180 static int non_stop_1 = 0;
181
182 static void
183 set_non_stop (char *args, int from_tty,
184 struct cmd_list_element *c)
185 {
186 if (target_has_execution)
187 {
188 non_stop_1 = non_stop;
189 error (_("Cannot change this setting while the inferior is running."));
190 }
191
192 non_stop = non_stop_1;
193 }
194
195 static void
196 show_non_stop (struct ui_file *file, int from_tty,
197 struct cmd_list_element *c, const char *value)
198 {
199 fprintf_filtered (file,
200 _("Controlling the inferior in non-stop mode is %s.\n"),
201 value);
202 }
203
204 /* "Observer mode" is somewhat like a more extreme version of
205 non-stop, in which all GDB operations that might affect the
206 target's execution have been disabled. */
207
208 int observer_mode = 0;
209 static int observer_mode_1 = 0;
210
211 static void
212 set_observer_mode (char *args, int from_tty,
213 struct cmd_list_element *c)
214 {
215 if (target_has_execution)
216 {
217 observer_mode_1 = observer_mode;
218 error (_("Cannot change this setting while the inferior is running."));
219 }
220
221 observer_mode = observer_mode_1;
222
223 may_write_registers = !observer_mode;
224 may_write_memory = !observer_mode;
225 may_insert_breakpoints = !observer_mode;
226 may_insert_tracepoints = !observer_mode;
227 /* We can insert fast tracepoints in or out of observer mode,
228 but enable them if we're going into this mode. */
229 if (observer_mode)
230 may_insert_fast_tracepoints = 1;
231 may_stop = !observer_mode;
232 update_target_permissions ();
233
234 /* Going *into* observer mode we must force non-stop, then
235 going out we leave it that way. */
236 if (observer_mode)
237 {
238 pagination_enabled = 0;
239 non_stop = non_stop_1 = 1;
240 }
241
242 if (from_tty)
243 printf_filtered (_("Observer mode is now %s.\n"),
244 (observer_mode ? "on" : "off"));
245 }
246
247 static void
248 show_observer_mode (struct ui_file *file, int from_tty,
249 struct cmd_list_element *c, const char *value)
250 {
251 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
252 }
253
254 /* This updates the value of observer mode based on changes in
255 permissions. Note that we are deliberately ignoring the values of
256 may-write-registers and may-write-memory, since the user may have
257 reason to enable these during a session, for instance to turn on a
258 debugging-related global. */
259
260 void
261 update_observer_mode (void)
262 {
263 int newval;
264
265 newval = (!may_insert_breakpoints
266 && !may_insert_tracepoints
267 && may_insert_fast_tracepoints
268 && !may_stop
269 && non_stop);
270
271 /* Let the user know if things change. */
272 if (newval != observer_mode)
273 printf_filtered (_("Observer mode is now %s.\n"),
274 (newval ? "on" : "off"));
275
276 observer_mode = observer_mode_1 = newval;
277 }
278
279 /* Tables of how to react to signals; the user sets them. */
280
281 static unsigned char *signal_stop;
282 static unsigned char *signal_print;
283 static unsigned char *signal_program;
284
285 /* Table of signals that are registered with "catch signal". A
286 non-zero entry indicates that the signal is caught by some "catch
287 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
288 signals. */
289 static unsigned char *signal_catch;
290
291 /* Table of signals that the target may silently handle.
292 This is automatically determined from the flags above,
293 and simply cached here. */
294 static unsigned char *signal_pass;
295
296 #define SET_SIGS(nsigs,sigs,flags) \
297 do { \
298 int signum = (nsigs); \
299 while (signum-- > 0) \
300 if ((sigs)[signum]) \
301 (flags)[signum] = 1; \
302 } while (0)
303
304 #define UNSET_SIGS(nsigs,sigs,flags) \
305 do { \
306 int signum = (nsigs); \
307 while (signum-- > 0) \
308 if ((sigs)[signum]) \
309 (flags)[signum] = 0; \
310 } while (0)
311
312 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
313 this function is to avoid exporting `signal_program'. */
314
315 void
316 update_signals_program_target (void)
317 {
318 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
319 }
320
321 /* Value to pass to target_resume() to cause all threads to resume. */
322
323 #define RESUME_ALL minus_one_ptid
324
325 /* Command list pointer for the "stop" placeholder. */
326
327 static struct cmd_list_element *stop_command;
328
329 /* Function inferior was in as of last step command. */
330
331 static struct symbol *step_start_function;
332
333 /* Nonzero if we want to give control to the user when we're notified
334 of shared library events by the dynamic linker. */
335 int stop_on_solib_events;
336
337 /* Enable or disable optional shared library event breakpoints
338 as appropriate when the above flag is changed. */
339
340 static void
341 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
342 {
343 update_solib_breakpoints ();
344 }
345
346 static void
347 show_stop_on_solib_events (struct ui_file *file, int from_tty,
348 struct cmd_list_element *c, const char *value)
349 {
350 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
351 value);
352 }
353
354 /* Nonzero means expecting a trace trap
355 and should stop the inferior and return silently when it happens. */
356
357 int stop_after_trap;
358
359 /* Save register contents here when executing a "finish" command or are
360 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
361 Thus this contains the return value from the called function (assuming
362 values are returned in a register). */
363
364 struct regcache *stop_registers;
365
366 /* Nonzero after stop if current stack frame should be printed. */
367
368 static int stop_print_frame;
369
370 /* This is a cached copy of the pid/waitstatus of the last event
371 returned by target_wait()/deprecated_target_wait_hook(). This
372 information is returned by get_last_target_status(). */
373 static ptid_t target_last_wait_ptid;
374 static struct target_waitstatus target_last_waitstatus;
375
376 static void context_switch (ptid_t ptid);
377
378 void init_thread_stepping_state (struct thread_info *tss);
379
380 static const char follow_fork_mode_child[] = "child";
381 static const char follow_fork_mode_parent[] = "parent";
382
383 static const char *const follow_fork_mode_kind_names[] = {
384 follow_fork_mode_child,
385 follow_fork_mode_parent,
386 NULL
387 };
388
389 static const char *follow_fork_mode_string = follow_fork_mode_parent;
390 static void
391 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
392 struct cmd_list_element *c, const char *value)
393 {
394 fprintf_filtered (file,
395 _("Debugger response to a program "
396 "call of fork or vfork is \"%s\".\n"),
397 value);
398 }
399 \f
400
401 /* Handle changes to the inferior list based on the type of fork,
402 which process is being followed, and whether the other process
403 should be detached. On entry inferior_ptid must be the ptid of
404 the fork parent. At return inferior_ptid is the ptid of the
405 followed inferior. */
406
407 static int
408 follow_fork_inferior (int follow_child, int detach_fork)
409 {
410 int has_vforked;
411 ptid_t parent_ptid, child_ptid;
412
413 has_vforked = (inferior_thread ()->pending_follow.kind
414 == TARGET_WAITKIND_VFORKED);
415 parent_ptid = inferior_ptid;
416 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
417
418 if (has_vforked
419 && !non_stop /* Non-stop always resumes both branches. */
420 && (!target_is_async_p () || sync_execution)
421 && !(follow_child || detach_fork || sched_multi))
422 {
423 /* The parent stays blocked inside the vfork syscall until the
424 child execs or exits. If we don't let the child run, then
425 the parent stays blocked. If we're telling the parent to run
426 in the foreground, the user will not be able to ctrl-c to get
427 back the terminal, effectively hanging the debug session. */
428 fprintf_filtered (gdb_stderr, _("\
429 Can not resume the parent process over vfork in the foreground while\n\
430 holding the child stopped. Try \"set detach-on-fork\" or \
431 \"set schedule-multiple\".\n"));
432 /* FIXME output string > 80 columns. */
433 return 1;
434 }
435
436 if (!follow_child)
437 {
438 /* Detach new forked process? */
439 if (detach_fork)
440 {
441 struct cleanup *old_chain;
442
443 /* Before detaching from the child, remove all breakpoints
444 from it. If we forked, then this has already been taken
445 care of by infrun.c. If we vforked however, any
446 breakpoint inserted in the parent is visible in the
447 child, even those added while stopped in a vfork
448 catchpoint. This will remove the breakpoints from the
449 parent also, but they'll be reinserted below. */
450 if (has_vforked)
451 {
452 /* Keep breakpoints list in sync. */
453 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
454 }
455
456 if (info_verbose || debug_infrun)
457 {
458 target_terminal_ours_for_output ();
459 fprintf_filtered (gdb_stdlog,
460 _("Detaching after %s from child %s.\n"),
461 has_vforked ? "vfork" : "fork",
462 target_pid_to_str (child_ptid));
463 }
464 }
465 else
466 {
467 struct inferior *parent_inf, *child_inf;
468 struct cleanup *old_chain;
469
470 /* Add process to GDB's tables. */
471 child_inf = add_inferior (ptid_get_pid (child_ptid));
472
473 parent_inf = current_inferior ();
474 child_inf->attach_flag = parent_inf->attach_flag;
475 copy_terminal_info (child_inf, parent_inf);
476 child_inf->gdbarch = parent_inf->gdbarch;
477 copy_inferior_target_desc_info (child_inf, parent_inf);
478
479 old_chain = save_inferior_ptid ();
480 save_current_program_space ();
481
482 inferior_ptid = child_ptid;
483 add_thread (inferior_ptid);
484 child_inf->symfile_flags = SYMFILE_NO_READ;
485
486 /* If this is a vfork child, then the address-space is
487 shared with the parent. */
488 if (has_vforked)
489 {
490 child_inf->pspace = parent_inf->pspace;
491 child_inf->aspace = parent_inf->aspace;
492
493 /* The parent will be frozen until the child is done
494 with the shared region. Keep track of the
495 parent. */
496 child_inf->vfork_parent = parent_inf;
497 child_inf->pending_detach = 0;
498 parent_inf->vfork_child = child_inf;
499 parent_inf->pending_detach = 0;
500 }
501 else
502 {
503 child_inf->aspace = new_address_space ();
504 child_inf->pspace = add_program_space (child_inf->aspace);
505 child_inf->removable = 1;
506 set_current_program_space (child_inf->pspace);
507 clone_program_space (child_inf->pspace, parent_inf->pspace);
508
509 /* Let the shared library layer (e.g., solib-svr4) learn
510 about this new process, relocate the cloned exec, pull
511 in shared libraries, and install the solib event
512 breakpoint. If a "cloned-VM" event was propagated
513 better throughout the core, this wouldn't be
514 required. */
515 solib_create_inferior_hook (0);
516 }
517
518 do_cleanups (old_chain);
519 }
520
521 if (has_vforked)
522 {
523 struct inferior *parent_inf;
524
525 parent_inf = current_inferior ();
526
527 /* If we detached from the child, then we have to be careful
528 to not insert breakpoints in the parent until the child
529 is done with the shared memory region. However, if we're
530 staying attached to the child, then we can and should
531 insert breakpoints, so that we can debug it. A
532 subsequent child exec or exit is enough to know when does
533 the child stops using the parent's address space. */
534 parent_inf->waiting_for_vfork_done = detach_fork;
535 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
536 }
537 }
538 else
539 {
540 /* Follow the child. */
541 struct inferior *parent_inf, *child_inf;
542 struct program_space *parent_pspace;
543
544 if (info_verbose || debug_infrun)
545 {
546 target_terminal_ours_for_output ();
547 fprintf_filtered (gdb_stdlog,
548 _("Attaching after %s %s to child %s.\n"),
549 target_pid_to_str (parent_ptid),
550 has_vforked ? "vfork" : "fork",
551 target_pid_to_str (child_ptid));
552 }
553
554 /* Add the new inferior first, so that the target_detach below
555 doesn't unpush the target. */
556
557 child_inf = add_inferior (ptid_get_pid (child_ptid));
558
559 parent_inf = current_inferior ();
560 child_inf->attach_flag = parent_inf->attach_flag;
561 copy_terminal_info (child_inf, parent_inf);
562 child_inf->gdbarch = parent_inf->gdbarch;
563 copy_inferior_target_desc_info (child_inf, parent_inf);
564
565 parent_pspace = parent_inf->pspace;
566
567 /* If we're vforking, we want to hold on to the parent until the
568 child exits or execs. At child exec or exit time we can
569 remove the old breakpoints from the parent and detach or
570 resume debugging it. Otherwise, detach the parent now; we'll
571 want to reuse it's program/address spaces, but we can't set
572 them to the child before removing breakpoints from the
573 parent, otherwise, the breakpoints module could decide to
574 remove breakpoints from the wrong process (since they'd be
575 assigned to the same address space). */
576
577 if (has_vforked)
578 {
579 gdb_assert (child_inf->vfork_parent == NULL);
580 gdb_assert (parent_inf->vfork_child == NULL);
581 child_inf->vfork_parent = parent_inf;
582 child_inf->pending_detach = 0;
583 parent_inf->vfork_child = child_inf;
584 parent_inf->pending_detach = detach_fork;
585 parent_inf->waiting_for_vfork_done = 0;
586 }
587 else if (detach_fork)
588 {
589 if (info_verbose || debug_infrun)
590 {
591 target_terminal_ours_for_output ();
592 fprintf_filtered (gdb_stdlog,
593 _("Detaching after fork from "
594 "child %s.\n"),
595 target_pid_to_str (child_ptid));
596 }
597
598 target_detach (NULL, 0);
599 }
600
601 /* Note that the detach above makes PARENT_INF dangling. */
602
603 /* Add the child thread to the appropriate lists, and switch to
604 this new thread, before cloning the program space, and
605 informing the solib layer about this new process. */
606
607 inferior_ptid = child_ptid;
608 add_thread (inferior_ptid);
609
610 /* If this is a vfork child, then the address-space is shared
611 with the parent. If we detached from the parent, then we can
612 reuse the parent's program/address spaces. */
613 if (has_vforked || detach_fork)
614 {
615 child_inf->pspace = parent_pspace;
616 child_inf->aspace = child_inf->pspace->aspace;
617 }
618 else
619 {
620 child_inf->aspace = new_address_space ();
621 child_inf->pspace = add_program_space (child_inf->aspace);
622 child_inf->removable = 1;
623 child_inf->symfile_flags = SYMFILE_NO_READ;
624 set_current_program_space (child_inf->pspace);
625 clone_program_space (child_inf->pspace, parent_pspace);
626
627 /* Let the shared library layer (e.g., solib-svr4) learn
628 about this new process, relocate the cloned exec, pull in
629 shared libraries, and install the solib event breakpoint.
630 If a "cloned-VM" event was propagated better throughout
631 the core, this wouldn't be required. */
632 solib_create_inferior_hook (0);
633 }
634 }
635
636 return target_follow_fork (follow_child, detach_fork);
637 }
638
639 /* Tell the target to follow the fork we're stopped at. Returns true
640 if the inferior should be resumed; false, if the target for some
641 reason decided it's best not to resume. */
642
643 static int
644 follow_fork (void)
645 {
646 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
647 int should_resume = 1;
648 struct thread_info *tp;
649
650 /* Copy user stepping state to the new inferior thread. FIXME: the
651 followed fork child thread should have a copy of most of the
652 parent thread structure's run control related fields, not just these.
653 Initialized to avoid "may be used uninitialized" warnings from gcc. */
654 struct breakpoint *step_resume_breakpoint = NULL;
655 struct breakpoint *exception_resume_breakpoint = NULL;
656 CORE_ADDR step_range_start = 0;
657 CORE_ADDR step_range_end = 0;
658 struct frame_id step_frame_id = { 0 };
659 struct interp *command_interp = NULL;
660
661 if (!non_stop)
662 {
663 ptid_t wait_ptid;
664 struct target_waitstatus wait_status;
665
666 /* Get the last target status returned by target_wait(). */
667 get_last_target_status (&wait_ptid, &wait_status);
668
669 /* If not stopped at a fork event, then there's nothing else to
670 do. */
671 if (wait_status.kind != TARGET_WAITKIND_FORKED
672 && wait_status.kind != TARGET_WAITKIND_VFORKED)
673 return 1;
674
675 /* Check if we switched over from WAIT_PTID, since the event was
676 reported. */
677 if (!ptid_equal (wait_ptid, minus_one_ptid)
678 && !ptid_equal (inferior_ptid, wait_ptid))
679 {
680 /* We did. Switch back to WAIT_PTID thread, to tell the
681 target to follow it (in either direction). We'll
682 afterwards refuse to resume, and inform the user what
683 happened. */
684 switch_to_thread (wait_ptid);
685 should_resume = 0;
686 }
687 }
688
689 tp = inferior_thread ();
690
691 /* If there were any forks/vforks that were caught and are now to be
692 followed, then do so now. */
693 switch (tp->pending_follow.kind)
694 {
695 case TARGET_WAITKIND_FORKED:
696 case TARGET_WAITKIND_VFORKED:
697 {
698 ptid_t parent, child;
699
700 /* If the user did a next/step, etc, over a fork call,
701 preserve the stepping state in the fork child. */
702 if (follow_child && should_resume)
703 {
704 step_resume_breakpoint = clone_momentary_breakpoint
705 (tp->control.step_resume_breakpoint);
706 step_range_start = tp->control.step_range_start;
707 step_range_end = tp->control.step_range_end;
708 step_frame_id = tp->control.step_frame_id;
709 exception_resume_breakpoint
710 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
711 command_interp = tp->control.command_interp;
712
713 /* For now, delete the parent's sr breakpoint, otherwise,
714 parent/child sr breakpoints are considered duplicates,
715 and the child version will not be installed. Remove
716 this when the breakpoints module becomes aware of
717 inferiors and address spaces. */
718 delete_step_resume_breakpoint (tp);
719 tp->control.step_range_start = 0;
720 tp->control.step_range_end = 0;
721 tp->control.step_frame_id = null_frame_id;
722 delete_exception_resume_breakpoint (tp);
723 tp->control.command_interp = NULL;
724 }
725
726 parent = inferior_ptid;
727 child = tp->pending_follow.value.related_pid;
728
729 /* Set up inferior(s) as specified by the caller, and tell the
730 target to do whatever is necessary to follow either parent
731 or child. */
732 if (follow_fork_inferior (follow_child, detach_fork))
733 {
734 /* Target refused to follow, or there's some other reason
735 we shouldn't resume. */
736 should_resume = 0;
737 }
738 else
739 {
740 /* This pending follow fork event is now handled, one way
741 or another. The previous selected thread may be gone
742 from the lists by now, but if it is still around, need
743 to clear the pending follow request. */
744 tp = find_thread_ptid (parent);
745 if (tp)
746 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
747
748 /* This makes sure we don't try to apply the "Switched
749 over from WAIT_PID" logic above. */
750 nullify_last_target_wait_ptid ();
751
752 /* If we followed the child, switch to it... */
753 if (follow_child)
754 {
755 switch_to_thread (child);
756
757 /* ... and preserve the stepping state, in case the
758 user was stepping over the fork call. */
759 if (should_resume)
760 {
761 tp = inferior_thread ();
762 tp->control.step_resume_breakpoint
763 = step_resume_breakpoint;
764 tp->control.step_range_start = step_range_start;
765 tp->control.step_range_end = step_range_end;
766 tp->control.step_frame_id = step_frame_id;
767 tp->control.exception_resume_breakpoint
768 = exception_resume_breakpoint;
769 tp->control.command_interp = command_interp;
770 }
771 else
772 {
773 /* If we get here, it was because we're trying to
774 resume from a fork catchpoint, but, the user
775 has switched threads away from the thread that
776 forked. In that case, the resume command
777 issued is most likely not applicable to the
778 child, so just warn, and refuse to resume. */
779 warning (_("Not resuming: switched threads "
780 "before following fork child.\n"));
781 }
782
783 /* Reset breakpoints in the child as appropriate. */
784 follow_inferior_reset_breakpoints ();
785 }
786 else
787 switch_to_thread (parent);
788 }
789 }
790 break;
791 case TARGET_WAITKIND_SPURIOUS:
792 /* Nothing to follow. */
793 break;
794 default:
795 internal_error (__FILE__, __LINE__,
796 "Unexpected pending_follow.kind %d\n",
797 tp->pending_follow.kind);
798 break;
799 }
800
801 return should_resume;
802 }
803
804 static void
805 follow_inferior_reset_breakpoints (void)
806 {
807 struct thread_info *tp = inferior_thread ();
808
809 /* Was there a step_resume breakpoint? (There was if the user
810 did a "next" at the fork() call.) If so, explicitly reset its
811 thread number. Cloned step_resume breakpoints are disabled on
812 creation, so enable it here now that it is associated with the
813 correct thread.
814
815 step_resumes are a form of bp that are made to be per-thread.
816 Since we created the step_resume bp when the parent process
817 was being debugged, and now are switching to the child process,
818 from the breakpoint package's viewpoint, that's a switch of
819 "threads". We must update the bp's notion of which thread
820 it is for, or it'll be ignored when it triggers. */
821
822 if (tp->control.step_resume_breakpoint)
823 {
824 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
825 tp->control.step_resume_breakpoint->loc->enabled = 1;
826 }
827
828 /* Treat exception_resume breakpoints like step_resume breakpoints. */
829 if (tp->control.exception_resume_breakpoint)
830 {
831 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
832 tp->control.exception_resume_breakpoint->loc->enabled = 1;
833 }
834
835 /* Reinsert all breakpoints in the child. The user may have set
836 breakpoints after catching the fork, in which case those
837 were never set in the child, but only in the parent. This makes
838 sure the inserted breakpoints match the breakpoint list. */
839
840 breakpoint_re_set ();
841 insert_breakpoints ();
842 }
843
844 /* The child has exited or execed: resume threads of the parent the
845 user wanted to be executing. */
846
847 static int
848 proceed_after_vfork_done (struct thread_info *thread,
849 void *arg)
850 {
851 int pid = * (int *) arg;
852
853 if (ptid_get_pid (thread->ptid) == pid
854 && is_running (thread->ptid)
855 && !is_executing (thread->ptid)
856 && !thread->stop_requested
857 && thread->suspend.stop_signal == GDB_SIGNAL_0)
858 {
859 if (debug_infrun)
860 fprintf_unfiltered (gdb_stdlog,
861 "infrun: resuming vfork parent thread %s\n",
862 target_pid_to_str (thread->ptid));
863
864 switch_to_thread (thread->ptid);
865 clear_proceed_status (0);
866 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
867 }
868
869 return 0;
870 }
871
872 /* Called whenever we notice an exec or exit event, to handle
873 detaching or resuming a vfork parent. */
874
875 static void
876 handle_vfork_child_exec_or_exit (int exec)
877 {
878 struct inferior *inf = current_inferior ();
879
880 if (inf->vfork_parent)
881 {
882 int resume_parent = -1;
883
884 /* This exec or exit marks the end of the shared memory region
885 between the parent and the child. If the user wanted to
886 detach from the parent, now is the time. */
887
888 if (inf->vfork_parent->pending_detach)
889 {
890 struct thread_info *tp;
891 struct cleanup *old_chain;
892 struct program_space *pspace;
893 struct address_space *aspace;
894
895 /* follow-fork child, detach-on-fork on. */
896
897 inf->vfork_parent->pending_detach = 0;
898
899 if (!exec)
900 {
901 /* If we're handling a child exit, then inferior_ptid
902 points at the inferior's pid, not to a thread. */
903 old_chain = save_inferior_ptid ();
904 save_current_program_space ();
905 save_current_inferior ();
906 }
907 else
908 old_chain = save_current_space_and_thread ();
909
910 /* We're letting loose of the parent. */
911 tp = any_live_thread_of_process (inf->vfork_parent->pid);
912 switch_to_thread (tp->ptid);
913
914 /* We're about to detach from the parent, which implicitly
915 removes breakpoints from its address space. There's a
916 catch here: we want to reuse the spaces for the child,
917 but, parent/child are still sharing the pspace at this
918 point, although the exec in reality makes the kernel give
919 the child a fresh set of new pages. The problem here is
920 that the breakpoints module being unaware of this, would
921 likely chose the child process to write to the parent
922 address space. Swapping the child temporarily away from
923 the spaces has the desired effect. Yes, this is "sort
924 of" a hack. */
925
926 pspace = inf->pspace;
927 aspace = inf->aspace;
928 inf->aspace = NULL;
929 inf->pspace = NULL;
930
931 if (debug_infrun || info_verbose)
932 {
933 target_terminal_ours_for_output ();
934
935 if (exec)
936 {
937 fprintf_filtered (gdb_stdlog,
938 _("Detaching vfork parent process "
939 "%d after child exec.\n"),
940 inf->vfork_parent->pid);
941 }
942 else
943 {
944 fprintf_filtered (gdb_stdlog,
945 _("Detaching vfork parent process "
946 "%d after child exit.\n"),
947 inf->vfork_parent->pid);
948 }
949 }
950
951 target_detach (NULL, 0);
952
953 /* Put it back. */
954 inf->pspace = pspace;
955 inf->aspace = aspace;
956
957 do_cleanups (old_chain);
958 }
959 else if (exec)
960 {
961 /* We're staying attached to the parent, so, really give the
962 child a new address space. */
963 inf->pspace = add_program_space (maybe_new_address_space ());
964 inf->aspace = inf->pspace->aspace;
965 inf->removable = 1;
966 set_current_program_space (inf->pspace);
967
968 resume_parent = inf->vfork_parent->pid;
969
970 /* Break the bonds. */
971 inf->vfork_parent->vfork_child = NULL;
972 }
973 else
974 {
975 struct cleanup *old_chain;
976 struct program_space *pspace;
977
978 /* If this is a vfork child exiting, then the pspace and
979 aspaces were shared with the parent. Since we're
980 reporting the process exit, we'll be mourning all that is
981 found in the address space, and switching to null_ptid,
982 preparing to start a new inferior. But, since we don't
983 want to clobber the parent's address/program spaces, we
984 go ahead and create a new one for this exiting
985 inferior. */
986
987 /* Switch to null_ptid, so that clone_program_space doesn't want
988 to read the selected frame of a dead process. */
989 old_chain = save_inferior_ptid ();
990 inferior_ptid = null_ptid;
991
992 /* This inferior is dead, so avoid giving the breakpoints
993 module the option to write through to it (cloning a
994 program space resets breakpoints). */
995 inf->aspace = NULL;
996 inf->pspace = NULL;
997 pspace = add_program_space (maybe_new_address_space ());
998 set_current_program_space (pspace);
999 inf->removable = 1;
1000 inf->symfile_flags = SYMFILE_NO_READ;
1001 clone_program_space (pspace, inf->vfork_parent->pspace);
1002 inf->pspace = pspace;
1003 inf->aspace = pspace->aspace;
1004
1005 /* Put back inferior_ptid. We'll continue mourning this
1006 inferior. */
1007 do_cleanups (old_chain);
1008
1009 resume_parent = inf->vfork_parent->pid;
1010 /* Break the bonds. */
1011 inf->vfork_parent->vfork_child = NULL;
1012 }
1013
1014 inf->vfork_parent = NULL;
1015
1016 gdb_assert (current_program_space == inf->pspace);
1017
1018 if (non_stop && resume_parent != -1)
1019 {
1020 /* If the user wanted the parent to be running, let it go
1021 free now. */
1022 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
1023
1024 if (debug_infrun)
1025 fprintf_unfiltered (gdb_stdlog,
1026 "infrun: resuming vfork parent process %d\n",
1027 resume_parent);
1028
1029 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
1030
1031 do_cleanups (old_chain);
1032 }
1033 }
1034 }
1035
1036 /* Enum strings for "set|show follow-exec-mode". */
1037
1038 static const char follow_exec_mode_new[] = "new";
1039 static const char follow_exec_mode_same[] = "same";
1040 static const char *const follow_exec_mode_names[] =
1041 {
1042 follow_exec_mode_new,
1043 follow_exec_mode_same,
1044 NULL,
1045 };
1046
1047 static const char *follow_exec_mode_string = follow_exec_mode_same;
1048 static void
1049 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1050 struct cmd_list_element *c, const char *value)
1051 {
1052 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1053 }
1054
1055 /* EXECD_PATHNAME is assumed to be non-NULL. */
1056
1057 static void
1058 follow_exec (ptid_t ptid, char *execd_pathname)
1059 {
1060 struct thread_info *th, *tmp;
1061 struct inferior *inf = current_inferior ();
1062 int pid = ptid_get_pid (ptid);
1063
1064 /* This is an exec event that we actually wish to pay attention to.
1065 Refresh our symbol table to the newly exec'd program, remove any
1066 momentary bp's, etc.
1067
1068 If there are breakpoints, they aren't really inserted now,
1069 since the exec() transformed our inferior into a fresh set
1070 of instructions.
1071
1072 We want to preserve symbolic breakpoints on the list, since
1073 we have hopes that they can be reset after the new a.out's
1074 symbol table is read.
1075
1076 However, any "raw" breakpoints must be removed from the list
1077 (e.g., the solib bp's), since their address is probably invalid
1078 now.
1079
1080 And, we DON'T want to call delete_breakpoints() here, since
1081 that may write the bp's "shadow contents" (the instruction
1082 value that was overwritten witha TRAP instruction). Since
1083 we now have a new a.out, those shadow contents aren't valid. */
1084
1085 mark_breakpoints_out ();
1086
1087 /* The target reports the exec event to the main thread, even if
1088 some other thread does the exec, and even if the main thread was
1089 stopped or already gone. We may still have non-leader threads of
1090 the process on our list. E.g., on targets that don't have thread
1091 exit events (like remote); or on native Linux in non-stop mode if
1092 there were only two threads in the inferior and the non-leader
1093 one is the one that execs (and nothing forces an update of the
1094 thread list up to here). When debugging remotely, it's best to
1095 avoid extra traffic, when possible, so avoid syncing the thread
1096 list with the target, and instead go ahead and delete all threads
1097 of the process but one that reported the event. Note this must
1098 be done before calling update_breakpoints_after_exec, as
1099 otherwise clearing the threads' resources would reference stale
1100 thread breakpoints -- it may have been one of these threads that
1101 stepped across the exec. We could just clear their stepping
1102 states, but as long as we're iterating, might as well delete
1103 them. Deleting them now rather than at the next user-visible
1104 stop provides a nicer sequence of events for user and MI
1105 notifications. */
1106 ALL_NON_EXITED_THREADS_SAFE (th, tmp)
1107 if (ptid_get_pid (th->ptid) == pid && !ptid_equal (th->ptid, ptid))
1108 delete_thread (th->ptid);
1109
1110 /* We also need to clear any left over stale state for the
1111 leader/event thread. E.g., if there was any step-resume
1112 breakpoint or similar, it's gone now. We cannot truly
1113 step-to-next statement through an exec(). */
1114 th = inferior_thread ();
1115 th->control.step_resume_breakpoint = NULL;
1116 th->control.exception_resume_breakpoint = NULL;
1117 th->control.single_step_breakpoints = NULL;
1118 th->control.step_range_start = 0;
1119 th->control.step_range_end = 0;
1120
1121 /* The user may have had the main thread held stopped in the
1122 previous image (e.g., schedlock on, or non-stop). Release
1123 it now. */
1124 th->stop_requested = 0;
1125
1126 update_breakpoints_after_exec ();
1127
1128 /* What is this a.out's name? */
1129 printf_unfiltered (_("%s is executing new program: %s\n"),
1130 target_pid_to_str (inferior_ptid),
1131 execd_pathname);
1132
1133 /* We've followed the inferior through an exec. Therefore, the
1134 inferior has essentially been killed & reborn. */
1135
1136 gdb_flush (gdb_stdout);
1137
1138 breakpoint_init_inferior (inf_execd);
1139
1140 if (gdb_sysroot && *gdb_sysroot)
1141 {
1142 char *name = alloca (strlen (gdb_sysroot)
1143 + strlen (execd_pathname)
1144 + 1);
1145
1146 strcpy (name, gdb_sysroot);
1147 strcat (name, execd_pathname);
1148 execd_pathname = name;
1149 }
1150
1151 /* Reset the shared library package. This ensures that we get a
1152 shlib event when the child reaches "_start", at which point the
1153 dld will have had a chance to initialize the child. */
1154 /* Also, loading a symbol file below may trigger symbol lookups, and
1155 we don't want those to be satisfied by the libraries of the
1156 previous incarnation of this process. */
1157 no_shared_libraries (NULL, 0);
1158
1159 if (follow_exec_mode_string == follow_exec_mode_new)
1160 {
1161 struct program_space *pspace;
1162
1163 /* The user wants to keep the old inferior and program spaces
1164 around. Create a new fresh one, and switch to it. */
1165
1166 inf = add_inferior (current_inferior ()->pid);
1167 pspace = add_program_space (maybe_new_address_space ());
1168 inf->pspace = pspace;
1169 inf->aspace = pspace->aspace;
1170
1171 exit_inferior_num_silent (current_inferior ()->num);
1172
1173 set_current_inferior (inf);
1174 set_current_program_space (pspace);
1175 }
1176 else
1177 {
1178 /* The old description may no longer be fit for the new image.
1179 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1180 old description; we'll read a new one below. No need to do
1181 this on "follow-exec-mode new", as the old inferior stays
1182 around (its description is later cleared/refetched on
1183 restart). */
1184 target_clear_description ();
1185 }
1186
1187 gdb_assert (current_program_space == inf->pspace);
1188
1189 /* That a.out is now the one to use. */
1190 exec_file_attach (execd_pathname, 0);
1191
1192 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
1193 (Position Independent Executable) main symbol file will get applied by
1194 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
1195 the breakpoints with the zero displacement. */
1196
1197 symbol_file_add (execd_pathname,
1198 (inf->symfile_flags
1199 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
1200 NULL, 0);
1201
1202 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
1203 set_initial_language ();
1204
1205 /* If the target can specify a description, read it. Must do this
1206 after flipping to the new executable (because the target supplied
1207 description must be compatible with the executable's
1208 architecture, and the old executable may e.g., be 32-bit, while
1209 the new one 64-bit), and before anything involving memory or
1210 registers. */
1211 target_find_description ();
1212
1213 solib_create_inferior_hook (0);
1214
1215 jit_inferior_created_hook ();
1216
1217 breakpoint_re_set ();
1218
1219 /* Reinsert all breakpoints. (Those which were symbolic have
1220 been reset to the proper address in the new a.out, thanks
1221 to symbol_file_command...). */
1222 insert_breakpoints ();
1223
1224 /* The next resume of this inferior should bring it to the shlib
1225 startup breakpoints. (If the user had also set bp's on
1226 "main" from the old (parent) process, then they'll auto-
1227 matically get reset there in the new process.). */
1228 }
1229
1230 /* Info about an instruction that is being stepped over. */
1231
1232 struct step_over_info
1233 {
1234 /* If we're stepping past a breakpoint, this is the address space
1235 and address of the instruction the breakpoint is set at. We'll
1236 skip inserting all breakpoints here. Valid iff ASPACE is
1237 non-NULL. */
1238 struct address_space *aspace;
1239 CORE_ADDR address;
1240
1241 /* The instruction being stepped over triggers a nonsteppable
1242 watchpoint. If true, we'll skip inserting watchpoints. */
1243 int nonsteppable_watchpoint_p;
1244 };
1245
1246 /* The step-over info of the location that is being stepped over.
1247
1248 Note that with async/breakpoint always-inserted mode, a user might
1249 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1250 being stepped over. As setting a new breakpoint inserts all
1251 breakpoints, we need to make sure the breakpoint being stepped over
1252 isn't inserted then. We do that by only clearing the step-over
1253 info when the step-over is actually finished (or aborted).
1254
1255 Presently GDB can only step over one breakpoint at any given time.
1256 Given threads that can't run code in the same address space as the
1257 breakpoint's can't really miss the breakpoint, GDB could be taught
1258 to step-over at most one breakpoint per address space (so this info
1259 could move to the address space object if/when GDB is extended).
1260 The set of breakpoints being stepped over will normally be much
1261 smaller than the set of all breakpoints, so a flag in the
1262 breakpoint location structure would be wasteful. A separate list
1263 also saves complexity and run-time, as otherwise we'd have to go
1264 through all breakpoint locations clearing their flag whenever we
1265 start a new sequence. Similar considerations weigh against storing
1266 this info in the thread object. Plus, not all step overs actually
1267 have breakpoint locations -- e.g., stepping past a single-step
1268 breakpoint, or stepping to complete a non-continuable
1269 watchpoint. */
1270 static struct step_over_info step_over_info;
1271
1272 /* Record the address of the breakpoint/instruction we're currently
1273 stepping over. */
1274
1275 static void
1276 set_step_over_info (struct address_space *aspace, CORE_ADDR address,
1277 int nonsteppable_watchpoint_p)
1278 {
1279 step_over_info.aspace = aspace;
1280 step_over_info.address = address;
1281 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1282 }
1283
1284 /* Called when we're not longer stepping over a breakpoint / an
1285 instruction, so all breakpoints are free to be (re)inserted. */
1286
1287 static void
1288 clear_step_over_info (void)
1289 {
1290 step_over_info.aspace = NULL;
1291 step_over_info.address = 0;
1292 step_over_info.nonsteppable_watchpoint_p = 0;
1293 }
1294
1295 /* See infrun.h. */
1296
1297 int
1298 stepping_past_instruction_at (struct address_space *aspace,
1299 CORE_ADDR address)
1300 {
1301 return (step_over_info.aspace != NULL
1302 && breakpoint_address_match (aspace, address,
1303 step_over_info.aspace,
1304 step_over_info.address));
1305 }
1306
1307 /* See infrun.h. */
1308
1309 int
1310 stepping_past_nonsteppable_watchpoint (void)
1311 {
1312 return step_over_info.nonsteppable_watchpoint_p;
1313 }
1314
1315 /* Returns true if step-over info is valid. */
1316
1317 static int
1318 step_over_info_valid_p (void)
1319 {
1320 return (step_over_info.aspace != NULL
1321 || stepping_past_nonsteppable_watchpoint ());
1322 }
1323
1324 \f
1325 /* Displaced stepping. */
1326
1327 /* In non-stop debugging mode, we must take special care to manage
1328 breakpoints properly; in particular, the traditional strategy for
1329 stepping a thread past a breakpoint it has hit is unsuitable.
1330 'Displaced stepping' is a tactic for stepping one thread past a
1331 breakpoint it has hit while ensuring that other threads running
1332 concurrently will hit the breakpoint as they should.
1333
1334 The traditional way to step a thread T off a breakpoint in a
1335 multi-threaded program in all-stop mode is as follows:
1336
1337 a0) Initially, all threads are stopped, and breakpoints are not
1338 inserted.
1339 a1) We single-step T, leaving breakpoints uninserted.
1340 a2) We insert breakpoints, and resume all threads.
1341
1342 In non-stop debugging, however, this strategy is unsuitable: we
1343 don't want to have to stop all threads in the system in order to
1344 continue or step T past a breakpoint. Instead, we use displaced
1345 stepping:
1346
1347 n0) Initially, T is stopped, other threads are running, and
1348 breakpoints are inserted.
1349 n1) We copy the instruction "under" the breakpoint to a separate
1350 location, outside the main code stream, making any adjustments
1351 to the instruction, register, and memory state as directed by
1352 T's architecture.
1353 n2) We single-step T over the instruction at its new location.
1354 n3) We adjust the resulting register and memory state as directed
1355 by T's architecture. This includes resetting T's PC to point
1356 back into the main instruction stream.
1357 n4) We resume T.
1358
1359 This approach depends on the following gdbarch methods:
1360
1361 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1362 indicate where to copy the instruction, and how much space must
1363 be reserved there. We use these in step n1.
1364
1365 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1366 address, and makes any necessary adjustments to the instruction,
1367 register contents, and memory. We use this in step n1.
1368
1369 - gdbarch_displaced_step_fixup adjusts registers and memory after
1370 we have successfuly single-stepped the instruction, to yield the
1371 same effect the instruction would have had if we had executed it
1372 at its original address. We use this in step n3.
1373
1374 - gdbarch_displaced_step_free_closure provides cleanup.
1375
1376 The gdbarch_displaced_step_copy_insn and
1377 gdbarch_displaced_step_fixup functions must be written so that
1378 copying an instruction with gdbarch_displaced_step_copy_insn,
1379 single-stepping across the copied instruction, and then applying
1380 gdbarch_displaced_insn_fixup should have the same effects on the
1381 thread's memory and registers as stepping the instruction in place
1382 would have. Exactly which responsibilities fall to the copy and
1383 which fall to the fixup is up to the author of those functions.
1384
1385 See the comments in gdbarch.sh for details.
1386
1387 Note that displaced stepping and software single-step cannot
1388 currently be used in combination, although with some care I think
1389 they could be made to. Software single-step works by placing
1390 breakpoints on all possible subsequent instructions; if the
1391 displaced instruction is a PC-relative jump, those breakpoints
1392 could fall in very strange places --- on pages that aren't
1393 executable, or at addresses that are not proper instruction
1394 boundaries. (We do generally let other threads run while we wait
1395 to hit the software single-step breakpoint, and they might
1396 encounter such a corrupted instruction.) One way to work around
1397 this would be to have gdbarch_displaced_step_copy_insn fully
1398 simulate the effect of PC-relative instructions (and return NULL)
1399 on architectures that use software single-stepping.
1400
1401 In non-stop mode, we can have independent and simultaneous step
1402 requests, so more than one thread may need to simultaneously step
1403 over a breakpoint. The current implementation assumes there is
1404 only one scratch space per process. In this case, we have to
1405 serialize access to the scratch space. If thread A wants to step
1406 over a breakpoint, but we are currently waiting for some other
1407 thread to complete a displaced step, we leave thread A stopped and
1408 place it in the displaced_step_request_queue. Whenever a displaced
1409 step finishes, we pick the next thread in the queue and start a new
1410 displaced step operation on it. See displaced_step_prepare and
1411 displaced_step_fixup for details. */
1412
1413 struct displaced_step_request
1414 {
1415 ptid_t ptid;
1416 struct displaced_step_request *next;
1417 };
1418
1419 /* Per-inferior displaced stepping state. */
1420 struct displaced_step_inferior_state
1421 {
1422 /* Pointer to next in linked list. */
1423 struct displaced_step_inferior_state *next;
1424
1425 /* The process this displaced step state refers to. */
1426 int pid;
1427
1428 /* A queue of pending displaced stepping requests. One entry per
1429 thread that needs to do a displaced step. */
1430 struct displaced_step_request *step_request_queue;
1431
1432 /* If this is not null_ptid, this is the thread carrying out a
1433 displaced single-step in process PID. This thread's state will
1434 require fixing up once it has completed its step. */
1435 ptid_t step_ptid;
1436
1437 /* The architecture the thread had when we stepped it. */
1438 struct gdbarch *step_gdbarch;
1439
1440 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1441 for post-step cleanup. */
1442 struct displaced_step_closure *step_closure;
1443
1444 /* The address of the original instruction, and the copy we
1445 made. */
1446 CORE_ADDR step_original, step_copy;
1447
1448 /* Saved contents of copy area. */
1449 gdb_byte *step_saved_copy;
1450 };
1451
1452 /* The list of states of processes involved in displaced stepping
1453 presently. */
1454 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1455
1456 /* Get the displaced stepping state of process PID. */
1457
1458 static struct displaced_step_inferior_state *
1459 get_displaced_stepping_state (int pid)
1460 {
1461 struct displaced_step_inferior_state *state;
1462
1463 for (state = displaced_step_inferior_states;
1464 state != NULL;
1465 state = state->next)
1466 if (state->pid == pid)
1467 return state;
1468
1469 return NULL;
1470 }
1471
1472 /* Add a new displaced stepping state for process PID to the displaced
1473 stepping state list, or return a pointer to an already existing
1474 entry, if it already exists. Never returns NULL. */
1475
1476 static struct displaced_step_inferior_state *
1477 add_displaced_stepping_state (int pid)
1478 {
1479 struct displaced_step_inferior_state *state;
1480
1481 for (state = displaced_step_inferior_states;
1482 state != NULL;
1483 state = state->next)
1484 if (state->pid == pid)
1485 return state;
1486
1487 state = xcalloc (1, sizeof (*state));
1488 state->pid = pid;
1489 state->next = displaced_step_inferior_states;
1490 displaced_step_inferior_states = state;
1491
1492 return state;
1493 }
1494
1495 /* If inferior is in displaced stepping, and ADDR equals to starting address
1496 of copy area, return corresponding displaced_step_closure. Otherwise,
1497 return NULL. */
1498
1499 struct displaced_step_closure*
1500 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1501 {
1502 struct displaced_step_inferior_state *displaced
1503 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1504
1505 /* If checking the mode of displaced instruction in copy area. */
1506 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1507 && (displaced->step_copy == addr))
1508 return displaced->step_closure;
1509
1510 return NULL;
1511 }
1512
1513 /* Remove the displaced stepping state of process PID. */
1514
1515 static void
1516 remove_displaced_stepping_state (int pid)
1517 {
1518 struct displaced_step_inferior_state *it, **prev_next_p;
1519
1520 gdb_assert (pid != 0);
1521
1522 it = displaced_step_inferior_states;
1523 prev_next_p = &displaced_step_inferior_states;
1524 while (it)
1525 {
1526 if (it->pid == pid)
1527 {
1528 *prev_next_p = it->next;
1529 xfree (it);
1530 return;
1531 }
1532
1533 prev_next_p = &it->next;
1534 it = *prev_next_p;
1535 }
1536 }
1537
1538 static void
1539 infrun_inferior_exit (struct inferior *inf)
1540 {
1541 remove_displaced_stepping_state (inf->pid);
1542 }
1543
1544 /* If ON, and the architecture supports it, GDB will use displaced
1545 stepping to step over breakpoints. If OFF, or if the architecture
1546 doesn't support it, GDB will instead use the traditional
1547 hold-and-step approach. If AUTO (which is the default), GDB will
1548 decide which technique to use to step over breakpoints depending on
1549 which of all-stop or non-stop mode is active --- displaced stepping
1550 in non-stop mode; hold-and-step in all-stop mode. */
1551
1552 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1553
1554 static void
1555 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1556 struct cmd_list_element *c,
1557 const char *value)
1558 {
1559 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1560 fprintf_filtered (file,
1561 _("Debugger's willingness to use displaced stepping "
1562 "to step over breakpoints is %s (currently %s).\n"),
1563 value, non_stop ? "on" : "off");
1564 else
1565 fprintf_filtered (file,
1566 _("Debugger's willingness to use displaced stepping "
1567 "to step over breakpoints is %s.\n"), value);
1568 }
1569
1570 /* Return non-zero if displaced stepping can/should be used to step
1571 over breakpoints. */
1572
1573 static int
1574 use_displaced_stepping (struct gdbarch *gdbarch)
1575 {
1576 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1577 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1578 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1579 && find_record_target () == NULL);
1580 }
1581
1582 /* Clean out any stray displaced stepping state. */
1583 static void
1584 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1585 {
1586 /* Indicate that there is no cleanup pending. */
1587 displaced->step_ptid = null_ptid;
1588
1589 if (displaced->step_closure)
1590 {
1591 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1592 displaced->step_closure);
1593 displaced->step_closure = NULL;
1594 }
1595 }
1596
1597 static void
1598 displaced_step_clear_cleanup (void *arg)
1599 {
1600 struct displaced_step_inferior_state *state = arg;
1601
1602 displaced_step_clear (state);
1603 }
1604
1605 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1606 void
1607 displaced_step_dump_bytes (struct ui_file *file,
1608 const gdb_byte *buf,
1609 size_t len)
1610 {
1611 int i;
1612
1613 for (i = 0; i < len; i++)
1614 fprintf_unfiltered (file, "%02x ", buf[i]);
1615 fputs_unfiltered ("\n", file);
1616 }
1617
1618 /* Prepare to single-step, using displaced stepping.
1619
1620 Note that we cannot use displaced stepping when we have a signal to
1621 deliver. If we have a signal to deliver and an instruction to step
1622 over, then after the step, there will be no indication from the
1623 target whether the thread entered a signal handler or ignored the
1624 signal and stepped over the instruction successfully --- both cases
1625 result in a simple SIGTRAP. In the first case we mustn't do a
1626 fixup, and in the second case we must --- but we can't tell which.
1627 Comments in the code for 'random signals' in handle_inferior_event
1628 explain how we handle this case instead.
1629
1630 Returns 1 if preparing was successful -- this thread is going to be
1631 stepped now; or 0 if displaced stepping this thread got queued. */
1632 static int
1633 displaced_step_prepare (ptid_t ptid)
1634 {
1635 struct cleanup *old_cleanups, *ignore_cleanups;
1636 struct thread_info *tp = find_thread_ptid (ptid);
1637 struct regcache *regcache = get_thread_regcache (ptid);
1638 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1639 CORE_ADDR original, copy;
1640 ULONGEST len;
1641 struct displaced_step_closure *closure;
1642 struct displaced_step_inferior_state *displaced;
1643 int status;
1644
1645 /* We should never reach this function if the architecture does not
1646 support displaced stepping. */
1647 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1648
1649 /* Disable range stepping while executing in the scratch pad. We
1650 want a single-step even if executing the displaced instruction in
1651 the scratch buffer lands within the stepping range (e.g., a
1652 jump/branch). */
1653 tp->control.may_range_step = 0;
1654
1655 /* We have to displaced step one thread at a time, as we only have
1656 access to a single scratch space per inferior. */
1657
1658 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1659
1660 if (!ptid_equal (displaced->step_ptid, null_ptid))
1661 {
1662 /* Already waiting for a displaced step to finish. Defer this
1663 request and place in queue. */
1664 struct displaced_step_request *req, *new_req;
1665
1666 if (debug_displaced)
1667 fprintf_unfiltered (gdb_stdlog,
1668 "displaced: defering step of %s\n",
1669 target_pid_to_str (ptid));
1670
1671 new_req = xmalloc (sizeof (*new_req));
1672 new_req->ptid = ptid;
1673 new_req->next = NULL;
1674
1675 if (displaced->step_request_queue)
1676 {
1677 for (req = displaced->step_request_queue;
1678 req && req->next;
1679 req = req->next)
1680 ;
1681 req->next = new_req;
1682 }
1683 else
1684 displaced->step_request_queue = new_req;
1685
1686 return 0;
1687 }
1688 else
1689 {
1690 if (debug_displaced)
1691 fprintf_unfiltered (gdb_stdlog,
1692 "displaced: stepping %s now\n",
1693 target_pid_to_str (ptid));
1694 }
1695
1696 displaced_step_clear (displaced);
1697
1698 old_cleanups = save_inferior_ptid ();
1699 inferior_ptid = ptid;
1700
1701 original = regcache_read_pc (regcache);
1702
1703 copy = gdbarch_displaced_step_location (gdbarch);
1704 len = gdbarch_max_insn_length (gdbarch);
1705
1706 /* Save the original contents of the copy area. */
1707 displaced->step_saved_copy = xmalloc (len);
1708 ignore_cleanups = make_cleanup (free_current_contents,
1709 &displaced->step_saved_copy);
1710 status = target_read_memory (copy, displaced->step_saved_copy, len);
1711 if (status != 0)
1712 throw_error (MEMORY_ERROR,
1713 _("Error accessing memory address %s (%s) for "
1714 "displaced-stepping scratch space."),
1715 paddress (gdbarch, copy), safe_strerror (status));
1716 if (debug_displaced)
1717 {
1718 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1719 paddress (gdbarch, copy));
1720 displaced_step_dump_bytes (gdb_stdlog,
1721 displaced->step_saved_copy,
1722 len);
1723 };
1724
1725 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1726 original, copy, regcache);
1727
1728 /* We don't support the fully-simulated case at present. */
1729 gdb_assert (closure);
1730
1731 /* Save the information we need to fix things up if the step
1732 succeeds. */
1733 displaced->step_ptid = ptid;
1734 displaced->step_gdbarch = gdbarch;
1735 displaced->step_closure = closure;
1736 displaced->step_original = original;
1737 displaced->step_copy = copy;
1738
1739 make_cleanup (displaced_step_clear_cleanup, displaced);
1740
1741 /* Resume execution at the copy. */
1742 regcache_write_pc (regcache, copy);
1743
1744 discard_cleanups (ignore_cleanups);
1745
1746 do_cleanups (old_cleanups);
1747
1748 if (debug_displaced)
1749 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1750 paddress (gdbarch, copy));
1751
1752 return 1;
1753 }
1754
1755 static void
1756 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1757 const gdb_byte *myaddr, int len)
1758 {
1759 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1760
1761 inferior_ptid = ptid;
1762 write_memory (memaddr, myaddr, len);
1763 do_cleanups (ptid_cleanup);
1764 }
1765
1766 /* Restore the contents of the copy area for thread PTID. */
1767
1768 static void
1769 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1770 ptid_t ptid)
1771 {
1772 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1773
1774 write_memory_ptid (ptid, displaced->step_copy,
1775 displaced->step_saved_copy, len);
1776 if (debug_displaced)
1777 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1778 target_pid_to_str (ptid),
1779 paddress (displaced->step_gdbarch,
1780 displaced->step_copy));
1781 }
1782
1783 static void
1784 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1785 {
1786 struct cleanup *old_cleanups;
1787 struct displaced_step_inferior_state *displaced
1788 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1789
1790 /* Was any thread of this process doing a displaced step? */
1791 if (displaced == NULL)
1792 return;
1793
1794 /* Was this event for the pid we displaced? */
1795 if (ptid_equal (displaced->step_ptid, null_ptid)
1796 || ! ptid_equal (displaced->step_ptid, event_ptid))
1797 return;
1798
1799 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1800
1801 displaced_step_restore (displaced, displaced->step_ptid);
1802
1803 /* Did the instruction complete successfully? */
1804 if (signal == GDB_SIGNAL_TRAP)
1805 {
1806 /* Fixup may need to read memory/registers. Switch to the
1807 thread that we're fixing up. */
1808 switch_to_thread (event_ptid);
1809
1810 /* Fix up the resulting state. */
1811 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1812 displaced->step_closure,
1813 displaced->step_original,
1814 displaced->step_copy,
1815 get_thread_regcache (displaced->step_ptid));
1816 }
1817 else
1818 {
1819 /* Since the instruction didn't complete, all we can do is
1820 relocate the PC. */
1821 struct regcache *regcache = get_thread_regcache (event_ptid);
1822 CORE_ADDR pc = regcache_read_pc (regcache);
1823
1824 pc = displaced->step_original + (pc - displaced->step_copy);
1825 regcache_write_pc (regcache, pc);
1826 }
1827
1828 do_cleanups (old_cleanups);
1829
1830 displaced->step_ptid = null_ptid;
1831
1832 /* Are there any pending displaced stepping requests? If so, run
1833 one now. Leave the state object around, since we're likely to
1834 need it again soon. */
1835 while (displaced->step_request_queue)
1836 {
1837 struct displaced_step_request *head;
1838 ptid_t ptid;
1839 struct regcache *regcache;
1840 struct gdbarch *gdbarch;
1841 CORE_ADDR actual_pc;
1842 struct address_space *aspace;
1843
1844 head = displaced->step_request_queue;
1845 ptid = head->ptid;
1846 displaced->step_request_queue = head->next;
1847 xfree (head);
1848
1849 context_switch (ptid);
1850
1851 regcache = get_thread_regcache (ptid);
1852 actual_pc = regcache_read_pc (regcache);
1853 aspace = get_regcache_aspace (regcache);
1854
1855 if (breakpoint_here_p (aspace, actual_pc))
1856 {
1857 if (debug_displaced)
1858 fprintf_unfiltered (gdb_stdlog,
1859 "displaced: stepping queued %s now\n",
1860 target_pid_to_str (ptid));
1861
1862 displaced_step_prepare (ptid);
1863
1864 gdbarch = get_regcache_arch (regcache);
1865
1866 if (debug_displaced)
1867 {
1868 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1869 gdb_byte buf[4];
1870
1871 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1872 paddress (gdbarch, actual_pc));
1873 read_memory (actual_pc, buf, sizeof (buf));
1874 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1875 }
1876
1877 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1878 displaced->step_closure))
1879 target_resume (ptid, 1, GDB_SIGNAL_0);
1880 else
1881 target_resume (ptid, 0, GDB_SIGNAL_0);
1882
1883 /* Done, we're stepping a thread. */
1884 break;
1885 }
1886 else
1887 {
1888 int step;
1889 struct thread_info *tp = inferior_thread ();
1890
1891 /* The breakpoint we were sitting under has since been
1892 removed. */
1893 tp->control.trap_expected = 0;
1894
1895 /* Go back to what we were trying to do. */
1896 step = currently_stepping (tp);
1897
1898 if (debug_displaced)
1899 fprintf_unfiltered (gdb_stdlog,
1900 "displaced: breakpoint is gone: %s, step(%d)\n",
1901 target_pid_to_str (tp->ptid), step);
1902
1903 target_resume (ptid, step, GDB_SIGNAL_0);
1904 tp->suspend.stop_signal = GDB_SIGNAL_0;
1905
1906 /* This request was discarded. See if there's any other
1907 thread waiting for its turn. */
1908 }
1909 }
1910 }
1911
1912 /* Update global variables holding ptids to hold NEW_PTID if they were
1913 holding OLD_PTID. */
1914 static void
1915 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1916 {
1917 struct displaced_step_request *it;
1918 struct displaced_step_inferior_state *displaced;
1919
1920 if (ptid_equal (inferior_ptid, old_ptid))
1921 inferior_ptid = new_ptid;
1922
1923 for (displaced = displaced_step_inferior_states;
1924 displaced;
1925 displaced = displaced->next)
1926 {
1927 if (ptid_equal (displaced->step_ptid, old_ptid))
1928 displaced->step_ptid = new_ptid;
1929
1930 for (it = displaced->step_request_queue; it; it = it->next)
1931 if (ptid_equal (it->ptid, old_ptid))
1932 it->ptid = new_ptid;
1933 }
1934 }
1935
1936 \f
1937 /* Resuming. */
1938
1939 /* Things to clean up if we QUIT out of resume (). */
1940 static void
1941 resume_cleanups (void *ignore)
1942 {
1943 if (!ptid_equal (inferior_ptid, null_ptid))
1944 delete_single_step_breakpoints (inferior_thread ());
1945
1946 normal_stop ();
1947 }
1948
1949 static const char schedlock_off[] = "off";
1950 static const char schedlock_on[] = "on";
1951 static const char schedlock_step[] = "step";
1952 static const char *const scheduler_enums[] = {
1953 schedlock_off,
1954 schedlock_on,
1955 schedlock_step,
1956 NULL
1957 };
1958 static const char *scheduler_mode = schedlock_off;
1959 static void
1960 show_scheduler_mode (struct ui_file *file, int from_tty,
1961 struct cmd_list_element *c, const char *value)
1962 {
1963 fprintf_filtered (file,
1964 _("Mode for locking scheduler "
1965 "during execution is \"%s\".\n"),
1966 value);
1967 }
1968
1969 static void
1970 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1971 {
1972 if (!target_can_lock_scheduler)
1973 {
1974 scheduler_mode = schedlock_off;
1975 error (_("Target '%s' cannot support this command."), target_shortname);
1976 }
1977 }
1978
1979 /* True if execution commands resume all threads of all processes by
1980 default; otherwise, resume only threads of the current inferior
1981 process. */
1982 int sched_multi = 0;
1983
1984 /* Try to setup for software single stepping over the specified location.
1985 Return 1 if target_resume() should use hardware single step.
1986
1987 GDBARCH the current gdbarch.
1988 PC the location to step over. */
1989
1990 static int
1991 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1992 {
1993 int hw_step = 1;
1994
1995 if (execution_direction == EXEC_FORWARD
1996 && gdbarch_software_single_step_p (gdbarch)
1997 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1998 {
1999 hw_step = 0;
2000 }
2001 return hw_step;
2002 }
2003
2004 ptid_t
2005 user_visible_resume_ptid (int step)
2006 {
2007 /* By default, resume all threads of all processes. */
2008 ptid_t resume_ptid = RESUME_ALL;
2009
2010 /* Maybe resume only all threads of the current process. */
2011 if (!sched_multi && target_supports_multi_process ())
2012 {
2013 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
2014 }
2015
2016 /* Maybe resume a single thread after all. */
2017 if (non_stop)
2018 {
2019 /* With non-stop mode on, threads are always handled
2020 individually. */
2021 resume_ptid = inferior_ptid;
2022 }
2023 else if ((scheduler_mode == schedlock_on)
2024 || (scheduler_mode == schedlock_step && step))
2025 {
2026 /* User-settable 'scheduler' mode requires solo thread resume. */
2027 resume_ptid = inferior_ptid;
2028 }
2029
2030 /* We may actually resume fewer threads at first, e.g., if a thread
2031 is stopped at a breakpoint that needs stepping-off, but that
2032 should not be visible to the user/frontend, and neither should
2033 the frontend/user be allowed to proceed any of the threads that
2034 happen to be stopped for internal run control handling, if a
2035 previous command wanted them resumed. */
2036 return resume_ptid;
2037 }
2038
2039 /* Resume the inferior, but allow a QUIT. This is useful if the user
2040 wants to interrupt some lengthy single-stepping operation
2041 (for child processes, the SIGINT goes to the inferior, and so
2042 we get a SIGINT random_signal, but for remote debugging and perhaps
2043 other targets, that's not true).
2044
2045 STEP nonzero if we should step (zero to continue instead).
2046 SIG is the signal to give the inferior (zero for none). */
2047 void
2048 resume (int step, enum gdb_signal sig)
2049 {
2050 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
2051 struct regcache *regcache = get_current_regcache ();
2052 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2053 struct thread_info *tp = inferior_thread ();
2054 CORE_ADDR pc = regcache_read_pc (regcache);
2055 struct address_space *aspace = get_regcache_aspace (regcache);
2056 ptid_t resume_ptid;
2057 /* From here on, this represents the caller's step vs continue
2058 request, while STEP represents what we'll actually request the
2059 target to do. STEP can decay from a step to a continue, if e.g.,
2060 we need to implement single-stepping with breakpoints (software
2061 single-step). When deciding whether "set scheduler-locking step"
2062 applies, it's the callers intention that counts. */
2063 const int entry_step = step;
2064
2065 tp->stepped_breakpoint = 0;
2066
2067 QUIT;
2068
2069 if (current_inferior ()->waiting_for_vfork_done)
2070 {
2071 /* Don't try to single-step a vfork parent that is waiting for
2072 the child to get out of the shared memory region (by exec'ing
2073 or exiting). This is particularly important on software
2074 single-step archs, as the child process would trip on the
2075 software single step breakpoint inserted for the parent
2076 process. Since the parent will not actually execute any
2077 instruction until the child is out of the shared region (such
2078 are vfork's semantics), it is safe to simply continue it.
2079 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2080 the parent, and tell it to `keep_going', which automatically
2081 re-sets it stepping. */
2082 if (debug_infrun)
2083 fprintf_unfiltered (gdb_stdlog,
2084 "infrun: resume : clear step\n");
2085 step = 0;
2086 }
2087
2088 if (debug_infrun)
2089 fprintf_unfiltered (gdb_stdlog,
2090 "infrun: resume (step=%d, signal=%s), "
2091 "trap_expected=%d, current thread [%s] at %s\n",
2092 step, gdb_signal_to_symbol_string (sig),
2093 tp->control.trap_expected,
2094 target_pid_to_str (inferior_ptid),
2095 paddress (gdbarch, pc));
2096
2097 /* Normally, by the time we reach `resume', the breakpoints are either
2098 removed or inserted, as appropriate. The exception is if we're sitting
2099 at a permanent breakpoint; we need to step over it, but permanent
2100 breakpoints can't be removed. So we have to test for it here. */
2101 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2102 {
2103 if (sig != GDB_SIGNAL_0)
2104 {
2105 /* We have a signal to pass to the inferior. The resume
2106 may, or may not take us to the signal handler. If this
2107 is a step, we'll need to stop in the signal handler, if
2108 there's one, (if the target supports stepping into
2109 handlers), or in the next mainline instruction, if
2110 there's no handler. If this is a continue, we need to be
2111 sure to run the handler with all breakpoints inserted.
2112 In all cases, set a breakpoint at the current address
2113 (where the handler returns to), and once that breakpoint
2114 is hit, resume skipping the permanent breakpoint. If
2115 that breakpoint isn't hit, then we've stepped into the
2116 signal handler (or hit some other event). We'll delete
2117 the step-resume breakpoint then. */
2118
2119 if (debug_infrun)
2120 fprintf_unfiltered (gdb_stdlog,
2121 "infrun: resume: skipping permanent breakpoint, "
2122 "deliver signal first\n");
2123
2124 clear_step_over_info ();
2125 tp->control.trap_expected = 0;
2126
2127 if (tp->control.step_resume_breakpoint == NULL)
2128 {
2129 /* Set a "high-priority" step-resume, as we don't want
2130 user breakpoints at PC to trigger (again) when this
2131 hits. */
2132 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2133 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2134
2135 tp->step_after_step_resume_breakpoint = step;
2136 }
2137
2138 insert_breakpoints ();
2139 }
2140 else
2141 {
2142 /* There's no signal to pass, we can go ahead and skip the
2143 permanent breakpoint manually. */
2144 if (debug_infrun)
2145 fprintf_unfiltered (gdb_stdlog,
2146 "infrun: resume: skipping permanent breakpoint\n");
2147 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2148 /* Update pc to reflect the new address from which we will
2149 execute instructions. */
2150 pc = regcache_read_pc (regcache);
2151
2152 if (step)
2153 {
2154 /* We've already advanced the PC, so the stepping part
2155 is done. Now we need to arrange for a trap to be
2156 reported to handle_inferior_event. Set a breakpoint
2157 at the current PC, and run to it. Don't update
2158 prev_pc, because if we end in
2159 switch_back_to_stepping, we want the "expected thread
2160 advanced also" branch to be taken. IOW, we don't
2161 want this thread to step further from PC
2162 (overstep). */
2163 insert_single_step_breakpoint (gdbarch, aspace, pc);
2164 insert_breakpoints ();
2165
2166 tp->suspend.stop_signal = GDB_SIGNAL_0;
2167 /* We're continuing with all breakpoints inserted. It's
2168 safe to let the target bypass signals. */
2169 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2170 /* ... and safe to let other threads run, according to
2171 schedlock. */
2172 resume_ptid = user_visible_resume_ptid (entry_step);
2173 target_resume (resume_ptid, 0, GDB_SIGNAL_0);
2174 discard_cleanups (old_cleanups);
2175 return;
2176 }
2177 }
2178 }
2179
2180 /* If we have a breakpoint to step over, make sure to do a single
2181 step only. Same if we have software watchpoints. */
2182 if (tp->control.trap_expected || bpstat_should_step ())
2183 tp->control.may_range_step = 0;
2184
2185 /* If enabled, step over breakpoints by executing a copy of the
2186 instruction at a different address.
2187
2188 We can't use displaced stepping when we have a signal to deliver;
2189 the comments for displaced_step_prepare explain why. The
2190 comments in the handle_inferior event for dealing with 'random
2191 signals' explain what we do instead.
2192
2193 We can't use displaced stepping when we are waiting for vfork_done
2194 event, displaced stepping breaks the vfork child similarly as single
2195 step software breakpoint. */
2196 if (use_displaced_stepping (gdbarch)
2197 && tp->control.trap_expected
2198 && sig == GDB_SIGNAL_0
2199 && !current_inferior ()->waiting_for_vfork_done)
2200 {
2201 struct displaced_step_inferior_state *displaced;
2202
2203 if (!displaced_step_prepare (inferior_ptid))
2204 {
2205 /* Got placed in displaced stepping queue. Will be resumed
2206 later when all the currently queued displaced stepping
2207 requests finish. The thread is not executing at this
2208 point, and the call to set_executing will be made later.
2209 But we need to call set_running here, since from the
2210 user/frontend's point of view, threads were set running.
2211 Unless we're calling an inferior function, as in that
2212 case we pretend the inferior doesn't run at all. */
2213 if (!tp->control.in_infcall)
2214 set_running (user_visible_resume_ptid (entry_step), 1);
2215 discard_cleanups (old_cleanups);
2216 return;
2217 }
2218
2219 /* Update pc to reflect the new address from which we will execute
2220 instructions due to displaced stepping. */
2221 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
2222
2223 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
2224 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
2225 displaced->step_closure);
2226 }
2227
2228 /* Do we need to do it the hard way, w/temp breakpoints? */
2229 else if (step)
2230 step = maybe_software_singlestep (gdbarch, pc);
2231
2232 /* Currently, our software single-step implementation leads to different
2233 results than hardware single-stepping in one situation: when stepping
2234 into delivering a signal which has an associated signal handler,
2235 hardware single-step will stop at the first instruction of the handler,
2236 while software single-step will simply skip execution of the handler.
2237
2238 For now, this difference in behavior is accepted since there is no
2239 easy way to actually implement single-stepping into a signal handler
2240 without kernel support.
2241
2242 However, there is one scenario where this difference leads to follow-on
2243 problems: if we're stepping off a breakpoint by removing all breakpoints
2244 and then single-stepping. In this case, the software single-step
2245 behavior means that even if there is a *breakpoint* in the signal
2246 handler, GDB still would not stop.
2247
2248 Fortunately, we can at least fix this particular issue. We detect
2249 here the case where we are about to deliver a signal while software
2250 single-stepping with breakpoints removed. In this situation, we
2251 revert the decisions to remove all breakpoints and insert single-
2252 step breakpoints, and instead we install a step-resume breakpoint
2253 at the current address, deliver the signal without stepping, and
2254 once we arrive back at the step-resume breakpoint, actually step
2255 over the breakpoint we originally wanted to step over. */
2256 if (thread_has_single_step_breakpoints_set (tp)
2257 && sig != GDB_SIGNAL_0
2258 && step_over_info_valid_p ())
2259 {
2260 /* If we have nested signals or a pending signal is delivered
2261 immediately after a handler returns, might might already have
2262 a step-resume breakpoint set on the earlier handler. We cannot
2263 set another step-resume breakpoint; just continue on until the
2264 original breakpoint is hit. */
2265 if (tp->control.step_resume_breakpoint == NULL)
2266 {
2267 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2268 tp->step_after_step_resume_breakpoint = 1;
2269 }
2270
2271 delete_single_step_breakpoints (tp);
2272
2273 clear_step_over_info ();
2274 tp->control.trap_expected = 0;
2275
2276 insert_breakpoints ();
2277 }
2278
2279 /* If STEP is set, it's a request to use hardware stepping
2280 facilities. But in that case, we should never
2281 use singlestep breakpoint. */
2282 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2283
2284 /* Decide the set of threads to ask the target to resume. Start
2285 by assuming everything will be resumed, than narrow the set
2286 by applying increasingly restricting conditions. */
2287 resume_ptid = user_visible_resume_ptid (entry_step);
2288
2289 /* Even if RESUME_PTID is a wildcard, and we end up resuming less
2290 (e.g., we might need to step over a breakpoint), from the
2291 user/frontend's point of view, all threads in RESUME_PTID are now
2292 running. Unless we're calling an inferior function, as in that
2293 case pretend we inferior doesn't run at all. */
2294 if (!tp->control.in_infcall)
2295 set_running (resume_ptid, 1);
2296
2297 /* Maybe resume a single thread after all. */
2298 if ((step || thread_has_single_step_breakpoints_set (tp))
2299 && tp->control.trap_expected)
2300 {
2301 /* We're allowing a thread to run past a breakpoint it has
2302 hit, by single-stepping the thread with the breakpoint
2303 removed. In which case, we need to single-step only this
2304 thread, and keep others stopped, as they can miss this
2305 breakpoint if allowed to run. */
2306 resume_ptid = inferior_ptid;
2307 }
2308
2309 if (execution_direction != EXEC_REVERSE
2310 && step && breakpoint_inserted_here_p (aspace, pc))
2311 {
2312 /* The only case we currently need to step a breakpoint
2313 instruction is when we have a signal to deliver. See
2314 handle_signal_stop where we handle random signals that could
2315 take out us out of the stepping range. Normally, in that
2316 case we end up continuing (instead of stepping) over the
2317 signal handler with a breakpoint at PC, but there are cases
2318 where we should _always_ single-step, even if we have a
2319 step-resume breakpoint, like when a software watchpoint is
2320 set. Assuming single-stepping and delivering a signal at the
2321 same time would takes us to the signal handler, then we could
2322 have removed the breakpoint at PC to step over it. However,
2323 some hardware step targets (like e.g., Mac OS) can't step
2324 into signal handlers, and for those, we need to leave the
2325 breakpoint at PC inserted, as otherwise if the handler
2326 recurses and executes PC again, it'll miss the breakpoint.
2327 So we leave the breakpoint inserted anyway, but we need to
2328 record that we tried to step a breakpoint instruction, so
2329 that adjust_pc_after_break doesn't end up confused. */
2330 gdb_assert (sig != GDB_SIGNAL_0);
2331
2332 tp->stepped_breakpoint = 1;
2333
2334 /* Most targets can step a breakpoint instruction, thus
2335 executing it normally. But if this one cannot, just
2336 continue and we will hit it anyway. */
2337 if (gdbarch_cannot_step_breakpoint (gdbarch))
2338 step = 0;
2339 }
2340
2341 if (debug_displaced
2342 && use_displaced_stepping (gdbarch)
2343 && tp->control.trap_expected)
2344 {
2345 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
2346 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
2347 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2348 gdb_byte buf[4];
2349
2350 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2351 paddress (resume_gdbarch, actual_pc));
2352 read_memory (actual_pc, buf, sizeof (buf));
2353 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2354 }
2355
2356 if (tp->control.may_range_step)
2357 {
2358 /* If we're resuming a thread with the PC out of the step
2359 range, then we're doing some nested/finer run control
2360 operation, like stepping the thread out of the dynamic
2361 linker or the displaced stepping scratch pad. We
2362 shouldn't have allowed a range step then. */
2363 gdb_assert (pc_in_thread_step_range (pc, tp));
2364 }
2365
2366 /* Install inferior's terminal modes. */
2367 target_terminal_inferior ();
2368
2369 /* Avoid confusing the next resume, if the next stop/resume
2370 happens to apply to another thread. */
2371 tp->suspend.stop_signal = GDB_SIGNAL_0;
2372
2373 /* Advise target which signals may be handled silently. If we have
2374 removed breakpoints because we are stepping over one (in any
2375 thread), we need to receive all signals to avoid accidentally
2376 skipping a breakpoint during execution of a signal handler. */
2377 if (step_over_info_valid_p ())
2378 target_pass_signals (0, NULL);
2379 else
2380 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2381
2382 target_resume (resume_ptid, step, sig);
2383
2384 discard_cleanups (old_cleanups);
2385 }
2386 \f
2387 /* Proceeding. */
2388
2389 /* Clear out all variables saying what to do when inferior is continued.
2390 First do this, then set the ones you want, then call `proceed'. */
2391
2392 static void
2393 clear_proceed_status_thread (struct thread_info *tp)
2394 {
2395 if (debug_infrun)
2396 fprintf_unfiltered (gdb_stdlog,
2397 "infrun: clear_proceed_status_thread (%s)\n",
2398 target_pid_to_str (tp->ptid));
2399
2400 /* If this signal should not be seen by program, give it zero.
2401 Used for debugging signals. */
2402 if (!signal_pass_state (tp->suspend.stop_signal))
2403 tp->suspend.stop_signal = GDB_SIGNAL_0;
2404
2405 tp->control.trap_expected = 0;
2406 tp->control.step_range_start = 0;
2407 tp->control.step_range_end = 0;
2408 tp->control.may_range_step = 0;
2409 tp->control.step_frame_id = null_frame_id;
2410 tp->control.step_stack_frame_id = null_frame_id;
2411 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2412 tp->stop_requested = 0;
2413
2414 tp->control.stop_step = 0;
2415
2416 tp->control.proceed_to_finish = 0;
2417
2418 tp->control.command_interp = NULL;
2419
2420 /* Discard any remaining commands or status from previous stop. */
2421 bpstat_clear (&tp->control.stop_bpstat);
2422 }
2423
2424 void
2425 clear_proceed_status (int step)
2426 {
2427 if (!non_stop)
2428 {
2429 struct thread_info *tp;
2430 ptid_t resume_ptid;
2431
2432 resume_ptid = user_visible_resume_ptid (step);
2433
2434 /* In all-stop mode, delete the per-thread status of all threads
2435 we're about to resume, implicitly and explicitly. */
2436 ALL_NON_EXITED_THREADS (tp)
2437 {
2438 if (!ptid_match (tp->ptid, resume_ptid))
2439 continue;
2440 clear_proceed_status_thread (tp);
2441 }
2442 }
2443
2444 if (!ptid_equal (inferior_ptid, null_ptid))
2445 {
2446 struct inferior *inferior;
2447
2448 if (non_stop)
2449 {
2450 /* If in non-stop mode, only delete the per-thread status of
2451 the current thread. */
2452 clear_proceed_status_thread (inferior_thread ());
2453 }
2454
2455 inferior = current_inferior ();
2456 inferior->control.stop_soon = NO_STOP_QUIETLY;
2457 }
2458
2459 stop_after_trap = 0;
2460
2461 clear_step_over_info ();
2462
2463 observer_notify_about_to_proceed ();
2464
2465 if (stop_registers)
2466 {
2467 regcache_xfree (stop_registers);
2468 stop_registers = NULL;
2469 }
2470 }
2471
2472 /* Returns true if TP is still stopped at a breakpoint that needs
2473 stepping-over in order to make progress. If the breakpoint is gone
2474 meanwhile, we can skip the whole step-over dance. */
2475
2476 static int
2477 thread_still_needs_step_over (struct thread_info *tp)
2478 {
2479 if (tp->stepping_over_breakpoint)
2480 {
2481 struct regcache *regcache = get_thread_regcache (tp->ptid);
2482
2483 if (breakpoint_here_p (get_regcache_aspace (regcache),
2484 regcache_read_pc (regcache))
2485 == ordinary_breakpoint_here)
2486 return 1;
2487
2488 tp->stepping_over_breakpoint = 0;
2489 }
2490
2491 return 0;
2492 }
2493
2494 /* Returns true if scheduler locking applies. STEP indicates whether
2495 we're about to do a step/next-like command to a thread. */
2496
2497 static int
2498 schedlock_applies (int step)
2499 {
2500 return (scheduler_mode == schedlock_on
2501 || (scheduler_mode == schedlock_step
2502 && step));
2503 }
2504
2505 /* Look a thread other than EXCEPT that has previously reported a
2506 breakpoint event, and thus needs a step-over in order to make
2507 progress. Returns NULL is none is found. STEP indicates whether
2508 we're about to step the current thread, in order to decide whether
2509 "set scheduler-locking step" applies. */
2510
2511 static struct thread_info *
2512 find_thread_needs_step_over (int step, struct thread_info *except)
2513 {
2514 struct thread_info *tp, *current;
2515
2516 /* With non-stop mode on, threads are always handled individually. */
2517 gdb_assert (! non_stop);
2518
2519 current = inferior_thread ();
2520
2521 /* If scheduler locking applies, we can avoid iterating over all
2522 threads. */
2523 if (schedlock_applies (step))
2524 {
2525 if (except != current
2526 && thread_still_needs_step_over (current))
2527 return current;
2528
2529 return NULL;
2530 }
2531
2532 ALL_NON_EXITED_THREADS (tp)
2533 {
2534 /* Ignore the EXCEPT thread. */
2535 if (tp == except)
2536 continue;
2537 /* Ignore threads of processes we're not resuming. */
2538 if (!sched_multi
2539 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2540 continue;
2541
2542 if (thread_still_needs_step_over (tp))
2543 return tp;
2544 }
2545
2546 return NULL;
2547 }
2548
2549 /* Basic routine for continuing the program in various fashions.
2550
2551 ADDR is the address to resume at, or -1 for resume where stopped.
2552 SIGGNAL is the signal to give it, or 0 for none,
2553 or -1 for act according to how it stopped.
2554 STEP is nonzero if should trap after one instruction.
2555 -1 means return after that and print nothing.
2556 You should probably set various step_... variables
2557 before calling here, if you are stepping.
2558
2559 You should call clear_proceed_status before calling proceed. */
2560
2561 void
2562 proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
2563 {
2564 struct regcache *regcache;
2565 struct gdbarch *gdbarch;
2566 struct thread_info *tp;
2567 CORE_ADDR pc;
2568 struct address_space *aspace;
2569
2570 /* If we're stopped at a fork/vfork, follow the branch set by the
2571 "set follow-fork-mode" command; otherwise, we'll just proceed
2572 resuming the current thread. */
2573 if (!follow_fork ())
2574 {
2575 /* The target for some reason decided not to resume. */
2576 normal_stop ();
2577 if (target_can_async_p ())
2578 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2579 return;
2580 }
2581
2582 /* We'll update this if & when we switch to a new thread. */
2583 previous_inferior_ptid = inferior_ptid;
2584
2585 regcache = get_current_regcache ();
2586 gdbarch = get_regcache_arch (regcache);
2587 aspace = get_regcache_aspace (regcache);
2588 pc = regcache_read_pc (regcache);
2589 tp = inferior_thread ();
2590
2591 if (step > 0)
2592 step_start_function = find_pc_function (pc);
2593 if (step < 0)
2594 stop_after_trap = 1;
2595
2596 /* Fill in with reasonable starting values. */
2597 init_thread_stepping_state (tp);
2598
2599 if (addr == (CORE_ADDR) -1)
2600 {
2601 if (pc == stop_pc
2602 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
2603 && execution_direction != EXEC_REVERSE)
2604 /* There is a breakpoint at the address we will resume at,
2605 step one instruction before inserting breakpoints so that
2606 we do not stop right away (and report a second hit at this
2607 breakpoint).
2608
2609 Note, we don't do this in reverse, because we won't
2610 actually be executing the breakpoint insn anyway.
2611 We'll be (un-)executing the previous instruction. */
2612 tp->stepping_over_breakpoint = 1;
2613 else if (gdbarch_single_step_through_delay_p (gdbarch)
2614 && gdbarch_single_step_through_delay (gdbarch,
2615 get_current_frame ()))
2616 /* We stepped onto an instruction that needs to be stepped
2617 again before re-inserting the breakpoint, do so. */
2618 tp->stepping_over_breakpoint = 1;
2619 }
2620 else
2621 {
2622 regcache_write_pc (regcache, addr);
2623 }
2624
2625 if (siggnal != GDB_SIGNAL_DEFAULT)
2626 tp->suspend.stop_signal = siggnal;
2627
2628 /* Record the interpreter that issued the execution command that
2629 caused this thread to resume. If the top level interpreter is
2630 MI/async, and the execution command was a CLI command
2631 (next/step/etc.), we'll want to print stop event output to the MI
2632 console channel (the stepped-to line, etc.), as if the user
2633 entered the execution command on a real GDB console. */
2634 inferior_thread ()->control.command_interp = command_interp ();
2635
2636 if (debug_infrun)
2637 fprintf_unfiltered (gdb_stdlog,
2638 "infrun: proceed (addr=%s, signal=%s, step=%d)\n",
2639 paddress (gdbarch, addr),
2640 gdb_signal_to_symbol_string (siggnal), step);
2641
2642 if (non_stop)
2643 /* In non-stop, each thread is handled individually. The context
2644 must already be set to the right thread here. */
2645 ;
2646 else
2647 {
2648 struct thread_info *step_over;
2649
2650 /* In a multi-threaded task we may select another thread and
2651 then continue or step.
2652
2653 But if the old thread was stopped at a breakpoint, it will
2654 immediately cause another breakpoint stop without any
2655 execution (i.e. it will report a breakpoint hit incorrectly).
2656 So we must step over it first.
2657
2658 Look for a thread other than the current (TP) that reported a
2659 breakpoint hit and hasn't been resumed yet since. */
2660 step_over = find_thread_needs_step_over (step, tp);
2661 if (step_over != NULL)
2662 {
2663 if (debug_infrun)
2664 fprintf_unfiltered (gdb_stdlog,
2665 "infrun: need to step-over [%s] first\n",
2666 target_pid_to_str (step_over->ptid));
2667
2668 /* Store the prev_pc for the stepping thread too, needed by
2669 switch_back_to_stepping thread. */
2670 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2671 switch_to_thread (step_over->ptid);
2672 tp = step_over;
2673 }
2674 }
2675
2676 /* If we need to step over a breakpoint, and we're not using
2677 displaced stepping to do so, insert all breakpoints (watchpoints,
2678 etc.) but the one we're stepping over, step one instruction, and
2679 then re-insert the breakpoint when that step is finished. */
2680 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2681 {
2682 struct regcache *regcache = get_current_regcache ();
2683
2684 set_step_over_info (get_regcache_aspace (regcache),
2685 regcache_read_pc (regcache), 0);
2686 }
2687 else
2688 clear_step_over_info ();
2689
2690 insert_breakpoints ();
2691
2692 tp->control.trap_expected = tp->stepping_over_breakpoint;
2693
2694 annotate_starting ();
2695
2696 /* Make sure that output from GDB appears before output from the
2697 inferior. */
2698 gdb_flush (gdb_stdout);
2699
2700 /* Refresh prev_pc value just prior to resuming. This used to be
2701 done in stop_waiting, however, setting prev_pc there did not handle
2702 scenarios such as inferior function calls or returning from
2703 a function via the return command. In those cases, the prev_pc
2704 value was not set properly for subsequent commands. The prev_pc value
2705 is used to initialize the starting line number in the ecs. With an
2706 invalid value, the gdb next command ends up stopping at the position
2707 represented by the next line table entry past our start position.
2708 On platforms that generate one line table entry per line, this
2709 is not a problem. However, on the ia64, the compiler generates
2710 extraneous line table entries that do not increase the line number.
2711 When we issue the gdb next command on the ia64 after an inferior call
2712 or a return command, we often end up a few instructions forward, still
2713 within the original line we started.
2714
2715 An attempt was made to refresh the prev_pc at the same time the
2716 execution_control_state is initialized (for instance, just before
2717 waiting for an inferior event). But this approach did not work
2718 because of platforms that use ptrace, where the pc register cannot
2719 be read unless the inferior is stopped. At that point, we are not
2720 guaranteed the inferior is stopped and so the regcache_read_pc() call
2721 can fail. Setting the prev_pc value here ensures the value is updated
2722 correctly when the inferior is stopped. */
2723 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2724
2725 /* Resume inferior. */
2726 resume (tp->control.trap_expected || step || bpstat_should_step (),
2727 tp->suspend.stop_signal);
2728
2729 /* Wait for it to stop (if not standalone)
2730 and in any case decode why it stopped, and act accordingly. */
2731 /* Do this only if we are not using the event loop, or if the target
2732 does not support asynchronous execution. */
2733 if (!target_can_async_p ())
2734 {
2735 wait_for_inferior ();
2736 normal_stop ();
2737 }
2738 }
2739 \f
2740
2741 /* Start remote-debugging of a machine over a serial link. */
2742
2743 void
2744 start_remote (int from_tty)
2745 {
2746 struct inferior *inferior;
2747
2748 inferior = current_inferior ();
2749 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2750
2751 /* Always go on waiting for the target, regardless of the mode. */
2752 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2753 indicate to wait_for_inferior that a target should timeout if
2754 nothing is returned (instead of just blocking). Because of this,
2755 targets expecting an immediate response need to, internally, set
2756 things up so that the target_wait() is forced to eventually
2757 timeout. */
2758 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2759 differentiate to its caller what the state of the target is after
2760 the initial open has been performed. Here we're assuming that
2761 the target has stopped. It should be possible to eventually have
2762 target_open() return to the caller an indication that the target
2763 is currently running and GDB state should be set to the same as
2764 for an async run. */
2765 wait_for_inferior ();
2766
2767 /* Now that the inferior has stopped, do any bookkeeping like
2768 loading shared libraries. We want to do this before normal_stop,
2769 so that the displayed frame is up to date. */
2770 post_create_inferior (&current_target, from_tty);
2771
2772 normal_stop ();
2773 }
2774
2775 /* Initialize static vars when a new inferior begins. */
2776
2777 void
2778 init_wait_for_inferior (void)
2779 {
2780 /* These are meaningless until the first time through wait_for_inferior. */
2781
2782 breakpoint_init_inferior (inf_starting);
2783
2784 clear_proceed_status (0);
2785
2786 target_last_wait_ptid = minus_one_ptid;
2787
2788 previous_inferior_ptid = inferior_ptid;
2789
2790 /* Discard any skipped inlined frames. */
2791 clear_inline_frame_state (minus_one_ptid);
2792 }
2793
2794 \f
2795 /* Data to be passed around while handling an event. This data is
2796 discarded between events. */
2797 struct execution_control_state
2798 {
2799 ptid_t ptid;
2800 /* The thread that got the event, if this was a thread event; NULL
2801 otherwise. */
2802 struct thread_info *event_thread;
2803
2804 struct target_waitstatus ws;
2805 int stop_func_filled_in;
2806 CORE_ADDR stop_func_start;
2807 CORE_ADDR stop_func_end;
2808 const char *stop_func_name;
2809 int wait_some_more;
2810
2811 /* True if the event thread hit the single-step breakpoint of
2812 another thread. Thus the event doesn't cause a stop, the thread
2813 needs to be single-stepped past the single-step breakpoint before
2814 we can switch back to the original stepping thread. */
2815 int hit_singlestep_breakpoint;
2816 };
2817
2818 static void handle_inferior_event (struct execution_control_state *ecs);
2819
2820 static void handle_step_into_function (struct gdbarch *gdbarch,
2821 struct execution_control_state *ecs);
2822 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2823 struct execution_control_state *ecs);
2824 static void handle_signal_stop (struct execution_control_state *ecs);
2825 static void check_exception_resume (struct execution_control_state *,
2826 struct frame_info *);
2827
2828 static void end_stepping_range (struct execution_control_state *ecs);
2829 static void stop_waiting (struct execution_control_state *ecs);
2830 static void prepare_to_wait (struct execution_control_state *ecs);
2831 static void keep_going (struct execution_control_state *ecs);
2832 static void process_event_stop_test (struct execution_control_state *ecs);
2833 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2834
2835 /* Callback for iterate over threads. If the thread is stopped, but
2836 the user/frontend doesn't know about that yet, go through
2837 normal_stop, as if the thread had just stopped now. ARG points at
2838 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2839 ptid_is_pid(PTID) is true, applies to all threads of the process
2840 pointed at by PTID. Otherwise, apply only to the thread pointed by
2841 PTID. */
2842
2843 static int
2844 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2845 {
2846 ptid_t ptid = * (ptid_t *) arg;
2847
2848 if ((ptid_equal (info->ptid, ptid)
2849 || ptid_equal (minus_one_ptid, ptid)
2850 || (ptid_is_pid (ptid)
2851 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2852 && is_running (info->ptid)
2853 && !is_executing (info->ptid))
2854 {
2855 struct cleanup *old_chain;
2856 struct execution_control_state ecss;
2857 struct execution_control_state *ecs = &ecss;
2858
2859 memset (ecs, 0, sizeof (*ecs));
2860
2861 old_chain = make_cleanup_restore_current_thread ();
2862
2863 overlay_cache_invalid = 1;
2864 /* Flush target cache before starting to handle each event.
2865 Target was running and cache could be stale. This is just a
2866 heuristic. Running threads may modify target memory, but we
2867 don't get any event. */
2868 target_dcache_invalidate ();
2869
2870 /* Go through handle_inferior_event/normal_stop, so we always
2871 have consistent output as if the stop event had been
2872 reported. */
2873 ecs->ptid = info->ptid;
2874 ecs->event_thread = find_thread_ptid (info->ptid);
2875 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2876 ecs->ws.value.sig = GDB_SIGNAL_0;
2877
2878 handle_inferior_event (ecs);
2879
2880 if (!ecs->wait_some_more)
2881 {
2882 struct thread_info *tp;
2883
2884 normal_stop ();
2885
2886 /* Finish off the continuations. */
2887 tp = inferior_thread ();
2888 do_all_intermediate_continuations_thread (tp, 1);
2889 do_all_continuations_thread (tp, 1);
2890 }
2891
2892 do_cleanups (old_chain);
2893 }
2894
2895 return 0;
2896 }
2897
2898 /* This function is attached as a "thread_stop_requested" observer.
2899 Cleanup local state that assumed the PTID was to be resumed, and
2900 report the stop to the frontend. */
2901
2902 static void
2903 infrun_thread_stop_requested (ptid_t ptid)
2904 {
2905 struct displaced_step_inferior_state *displaced;
2906
2907 /* PTID was requested to stop. Remove it from the displaced
2908 stepping queue, so we don't try to resume it automatically. */
2909
2910 for (displaced = displaced_step_inferior_states;
2911 displaced;
2912 displaced = displaced->next)
2913 {
2914 struct displaced_step_request *it, **prev_next_p;
2915
2916 it = displaced->step_request_queue;
2917 prev_next_p = &displaced->step_request_queue;
2918 while (it)
2919 {
2920 if (ptid_match (it->ptid, ptid))
2921 {
2922 *prev_next_p = it->next;
2923 it->next = NULL;
2924 xfree (it);
2925 }
2926 else
2927 {
2928 prev_next_p = &it->next;
2929 }
2930
2931 it = *prev_next_p;
2932 }
2933 }
2934
2935 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2936 }
2937
2938 static void
2939 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2940 {
2941 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2942 nullify_last_target_wait_ptid ();
2943 }
2944
2945 /* Delete the step resume, single-step and longjmp/exception resume
2946 breakpoints of TP. */
2947
2948 static void
2949 delete_thread_infrun_breakpoints (struct thread_info *tp)
2950 {
2951 delete_step_resume_breakpoint (tp);
2952 delete_exception_resume_breakpoint (tp);
2953 delete_single_step_breakpoints (tp);
2954 }
2955
2956 /* If the target still has execution, call FUNC for each thread that
2957 just stopped. In all-stop, that's all the non-exited threads; in
2958 non-stop, that's the current thread, only. */
2959
2960 typedef void (*for_each_just_stopped_thread_callback_func)
2961 (struct thread_info *tp);
2962
2963 static void
2964 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
2965 {
2966 if (!target_has_execution || ptid_equal (inferior_ptid, null_ptid))
2967 return;
2968
2969 if (non_stop)
2970 {
2971 /* If in non-stop mode, only the current thread stopped. */
2972 func (inferior_thread ());
2973 }
2974 else
2975 {
2976 struct thread_info *tp;
2977
2978 /* In all-stop mode, all threads have stopped. */
2979 ALL_NON_EXITED_THREADS (tp)
2980 {
2981 func (tp);
2982 }
2983 }
2984 }
2985
2986 /* Delete the step resume and longjmp/exception resume breakpoints of
2987 the threads that just stopped. */
2988
2989 static void
2990 delete_just_stopped_threads_infrun_breakpoints (void)
2991 {
2992 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
2993 }
2994
2995 /* Delete the single-step breakpoints of the threads that just
2996 stopped. */
2997
2998 static void
2999 delete_just_stopped_threads_single_step_breakpoints (void)
3000 {
3001 for_each_just_stopped_thread (delete_single_step_breakpoints);
3002 }
3003
3004 /* A cleanup wrapper. */
3005
3006 static void
3007 delete_just_stopped_threads_infrun_breakpoints_cleanup (void *arg)
3008 {
3009 delete_just_stopped_threads_infrun_breakpoints ();
3010 }
3011
3012 /* Pretty print the results of target_wait, for debugging purposes. */
3013
3014 static void
3015 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3016 const struct target_waitstatus *ws)
3017 {
3018 char *status_string = target_waitstatus_to_string (ws);
3019 struct ui_file *tmp_stream = mem_fileopen ();
3020 char *text;
3021
3022 /* The text is split over several lines because it was getting too long.
3023 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3024 output as a unit; we want only one timestamp printed if debug_timestamp
3025 is set. */
3026
3027 fprintf_unfiltered (tmp_stream,
3028 "infrun: target_wait (%d", ptid_get_pid (waiton_ptid));
3029 if (ptid_get_pid (waiton_ptid) != -1)
3030 fprintf_unfiltered (tmp_stream,
3031 " [%s]", target_pid_to_str (waiton_ptid));
3032 fprintf_unfiltered (tmp_stream, ", status) =\n");
3033 fprintf_unfiltered (tmp_stream,
3034 "infrun: %d [%s],\n",
3035 ptid_get_pid (result_ptid),
3036 target_pid_to_str (result_ptid));
3037 fprintf_unfiltered (tmp_stream,
3038 "infrun: %s\n",
3039 status_string);
3040
3041 text = ui_file_xstrdup (tmp_stream, NULL);
3042
3043 /* This uses %s in part to handle %'s in the text, but also to avoid
3044 a gcc error: the format attribute requires a string literal. */
3045 fprintf_unfiltered (gdb_stdlog, "%s", text);
3046
3047 xfree (status_string);
3048 xfree (text);
3049 ui_file_delete (tmp_stream);
3050 }
3051
3052 /* Prepare and stabilize the inferior for detaching it. E.g.,
3053 detaching while a thread is displaced stepping is a recipe for
3054 crashing it, as nothing would readjust the PC out of the scratch
3055 pad. */
3056
3057 void
3058 prepare_for_detach (void)
3059 {
3060 struct inferior *inf = current_inferior ();
3061 ptid_t pid_ptid = pid_to_ptid (inf->pid);
3062 struct cleanup *old_chain_1;
3063 struct displaced_step_inferior_state *displaced;
3064
3065 displaced = get_displaced_stepping_state (inf->pid);
3066
3067 /* Is any thread of this process displaced stepping? If not,
3068 there's nothing else to do. */
3069 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
3070 return;
3071
3072 if (debug_infrun)
3073 fprintf_unfiltered (gdb_stdlog,
3074 "displaced-stepping in-process while detaching");
3075
3076 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
3077 inf->detaching = 1;
3078
3079 while (!ptid_equal (displaced->step_ptid, null_ptid))
3080 {
3081 struct cleanup *old_chain_2;
3082 struct execution_control_state ecss;
3083 struct execution_control_state *ecs;
3084
3085 ecs = &ecss;
3086 memset (ecs, 0, sizeof (*ecs));
3087
3088 overlay_cache_invalid = 1;
3089 /* Flush target cache before starting to handle each event.
3090 Target was running and cache could be stale. This is just a
3091 heuristic. Running threads may modify target memory, but we
3092 don't get any event. */
3093 target_dcache_invalidate ();
3094
3095 if (deprecated_target_wait_hook)
3096 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
3097 else
3098 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
3099
3100 if (debug_infrun)
3101 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3102
3103 /* If an error happens while handling the event, propagate GDB's
3104 knowledge of the executing state to the frontend/user running
3105 state. */
3106 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
3107 &minus_one_ptid);
3108
3109 /* Now figure out what to do with the result of the result. */
3110 handle_inferior_event (ecs);
3111
3112 /* No error, don't finish the state yet. */
3113 discard_cleanups (old_chain_2);
3114
3115 /* Breakpoints and watchpoints are not installed on the target
3116 at this point, and signals are passed directly to the
3117 inferior, so this must mean the process is gone. */
3118 if (!ecs->wait_some_more)
3119 {
3120 discard_cleanups (old_chain_1);
3121 error (_("Program exited while detaching"));
3122 }
3123 }
3124
3125 discard_cleanups (old_chain_1);
3126 }
3127
3128 /* Wait for control to return from inferior to debugger.
3129
3130 If inferior gets a signal, we may decide to start it up again
3131 instead of returning. That is why there is a loop in this function.
3132 When this function actually returns it means the inferior
3133 should be left stopped and GDB should read more commands. */
3134
3135 void
3136 wait_for_inferior (void)
3137 {
3138 struct cleanup *old_cleanups;
3139
3140 if (debug_infrun)
3141 fprintf_unfiltered
3142 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
3143
3144 old_cleanups
3145 = make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup,
3146 NULL);
3147
3148 while (1)
3149 {
3150 struct execution_control_state ecss;
3151 struct execution_control_state *ecs = &ecss;
3152 struct cleanup *old_chain;
3153 ptid_t waiton_ptid = minus_one_ptid;
3154
3155 memset (ecs, 0, sizeof (*ecs));
3156
3157 overlay_cache_invalid = 1;
3158
3159 /* Flush target cache before starting to handle each event.
3160 Target was running and cache could be stale. This is just a
3161 heuristic. Running threads may modify target memory, but we
3162 don't get any event. */
3163 target_dcache_invalidate ();
3164
3165 if (deprecated_target_wait_hook)
3166 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
3167 else
3168 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
3169
3170 if (debug_infrun)
3171 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3172
3173 /* If an error happens while handling the event, propagate GDB's
3174 knowledge of the executing state to the frontend/user running
3175 state. */
3176 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3177
3178 /* Now figure out what to do with the result of the result. */
3179 handle_inferior_event (ecs);
3180
3181 /* No error, don't finish the state yet. */
3182 discard_cleanups (old_chain);
3183
3184 if (!ecs->wait_some_more)
3185 break;
3186 }
3187
3188 do_cleanups (old_cleanups);
3189 }
3190
3191 /* Cleanup that reinstalls the readline callback handler, if the
3192 target is running in the background. If while handling the target
3193 event something triggered a secondary prompt, like e.g., a
3194 pagination prompt, we'll have removed the callback handler (see
3195 gdb_readline_wrapper_line). Need to do this as we go back to the
3196 event loop, ready to process further input. Note this has no
3197 effect if the handler hasn't actually been removed, because calling
3198 rl_callback_handler_install resets the line buffer, thus losing
3199 input. */
3200
3201 static void
3202 reinstall_readline_callback_handler_cleanup (void *arg)
3203 {
3204 if (!interpreter_async)
3205 {
3206 /* We're not going back to the top level event loop yet. Don't
3207 install the readline callback, as it'd prep the terminal,
3208 readline-style (raw, noecho) (e.g., --batch). We'll install
3209 it the next time the prompt is displayed, when we're ready
3210 for input. */
3211 return;
3212 }
3213
3214 if (async_command_editing_p && !sync_execution)
3215 gdb_rl_callback_handler_reinstall ();
3216 }
3217
3218 /* Asynchronous version of wait_for_inferior. It is called by the
3219 event loop whenever a change of state is detected on the file
3220 descriptor corresponding to the target. It can be called more than
3221 once to complete a single execution command. In such cases we need
3222 to keep the state in a global variable ECSS. If it is the last time
3223 that this function is called for a single execution command, then
3224 report to the user that the inferior has stopped, and do the
3225 necessary cleanups. */
3226
3227 void
3228 fetch_inferior_event (void *client_data)
3229 {
3230 struct execution_control_state ecss;
3231 struct execution_control_state *ecs = &ecss;
3232 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
3233 struct cleanup *ts_old_chain;
3234 int was_sync = sync_execution;
3235 int cmd_done = 0;
3236 ptid_t waiton_ptid = minus_one_ptid;
3237
3238 memset (ecs, 0, sizeof (*ecs));
3239
3240 /* End up with readline processing input, if necessary. */
3241 make_cleanup (reinstall_readline_callback_handler_cleanup, NULL);
3242
3243 /* We're handling a live event, so make sure we're doing live
3244 debugging. If we're looking at traceframes while the target is
3245 running, we're going to need to get back to that mode after
3246 handling the event. */
3247 if (non_stop)
3248 {
3249 make_cleanup_restore_current_traceframe ();
3250 set_current_traceframe (-1);
3251 }
3252
3253 if (non_stop)
3254 /* In non-stop mode, the user/frontend should not notice a thread
3255 switch due to internal events. Make sure we reverse to the
3256 user selected thread and frame after handling the event and
3257 running any breakpoint commands. */
3258 make_cleanup_restore_current_thread ();
3259
3260 overlay_cache_invalid = 1;
3261 /* Flush target cache before starting to handle each event. Target
3262 was running and cache could be stale. This is just a heuristic.
3263 Running threads may modify target memory, but we don't get any
3264 event. */
3265 target_dcache_invalidate ();
3266
3267 make_cleanup_restore_integer (&execution_direction);
3268 execution_direction = target_execution_direction ();
3269
3270 if (deprecated_target_wait_hook)
3271 ecs->ptid =
3272 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3273 else
3274 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3275
3276 if (debug_infrun)
3277 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3278
3279 /* If an error happens while handling the event, propagate GDB's
3280 knowledge of the executing state to the frontend/user running
3281 state. */
3282 if (!non_stop)
3283 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3284 else
3285 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
3286
3287 /* Get executed before make_cleanup_restore_current_thread above to apply
3288 still for the thread which has thrown the exception. */
3289 make_bpstat_clear_actions_cleanup ();
3290
3291 make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup, NULL);
3292
3293 /* Now figure out what to do with the result of the result. */
3294 handle_inferior_event (ecs);
3295
3296 if (!ecs->wait_some_more)
3297 {
3298 struct inferior *inf = find_inferior_ptid (ecs->ptid);
3299
3300 delete_just_stopped_threads_infrun_breakpoints ();
3301
3302 /* We may not find an inferior if this was a process exit. */
3303 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3304 normal_stop ();
3305
3306 if (target_has_execution
3307 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
3308 && ecs->ws.kind != TARGET_WAITKIND_EXITED
3309 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3310 && ecs->event_thread->step_multi
3311 && ecs->event_thread->control.stop_step)
3312 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
3313 else
3314 {
3315 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
3316 cmd_done = 1;
3317 }
3318 }
3319
3320 /* No error, don't finish the thread states yet. */
3321 discard_cleanups (ts_old_chain);
3322
3323 /* Revert thread and frame. */
3324 do_cleanups (old_chain);
3325
3326 /* If the inferior was in sync execution mode, and now isn't,
3327 restore the prompt (a synchronous execution command has finished,
3328 and we're ready for input). */
3329 if (interpreter_async && was_sync && !sync_execution)
3330 observer_notify_sync_execution_done ();
3331
3332 if (cmd_done
3333 && !was_sync
3334 && exec_done_display_p
3335 && (ptid_equal (inferior_ptid, null_ptid)
3336 || !is_running (inferior_ptid)))
3337 printf_unfiltered (_("completed.\n"));
3338 }
3339
3340 /* Record the frame and location we're currently stepping through. */
3341 void
3342 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
3343 {
3344 struct thread_info *tp = inferior_thread ();
3345
3346 tp->control.step_frame_id = get_frame_id (frame);
3347 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
3348
3349 tp->current_symtab = sal.symtab;
3350 tp->current_line = sal.line;
3351 }
3352
3353 /* Clear context switchable stepping state. */
3354
3355 void
3356 init_thread_stepping_state (struct thread_info *tss)
3357 {
3358 tss->stepped_breakpoint = 0;
3359 tss->stepping_over_breakpoint = 0;
3360 tss->stepping_over_watchpoint = 0;
3361 tss->step_after_step_resume_breakpoint = 0;
3362 }
3363
3364 /* Set the cached copy of the last ptid/waitstatus. */
3365
3366 static void
3367 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
3368 {
3369 target_last_wait_ptid = ptid;
3370 target_last_waitstatus = status;
3371 }
3372
3373 /* Return the cached copy of the last pid/waitstatus returned by
3374 target_wait()/deprecated_target_wait_hook(). The data is actually
3375 cached by handle_inferior_event(), which gets called immediately
3376 after target_wait()/deprecated_target_wait_hook(). */
3377
3378 void
3379 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
3380 {
3381 *ptidp = target_last_wait_ptid;
3382 *status = target_last_waitstatus;
3383 }
3384
3385 void
3386 nullify_last_target_wait_ptid (void)
3387 {
3388 target_last_wait_ptid = minus_one_ptid;
3389 }
3390
3391 /* Switch thread contexts. */
3392
3393 static void
3394 context_switch (ptid_t ptid)
3395 {
3396 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3397 {
3398 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3399 target_pid_to_str (inferior_ptid));
3400 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3401 target_pid_to_str (ptid));
3402 }
3403
3404 switch_to_thread (ptid);
3405 }
3406
3407 static void
3408 adjust_pc_after_break (struct execution_control_state *ecs)
3409 {
3410 struct regcache *regcache;
3411 struct gdbarch *gdbarch;
3412 struct address_space *aspace;
3413 CORE_ADDR breakpoint_pc, decr_pc;
3414
3415 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3416 we aren't, just return.
3417
3418 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3419 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3420 implemented by software breakpoints should be handled through the normal
3421 breakpoint layer.
3422
3423 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3424 different signals (SIGILL or SIGEMT for instance), but it is less
3425 clear where the PC is pointing afterwards. It may not match
3426 gdbarch_decr_pc_after_break. I don't know any specific target that
3427 generates these signals at breakpoints (the code has been in GDB since at
3428 least 1992) so I can not guess how to handle them here.
3429
3430 In earlier versions of GDB, a target with
3431 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3432 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3433 target with both of these set in GDB history, and it seems unlikely to be
3434 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3435
3436 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3437 return;
3438
3439 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3440 return;
3441
3442 /* In reverse execution, when a breakpoint is hit, the instruction
3443 under it has already been de-executed. The reported PC always
3444 points at the breakpoint address, so adjusting it further would
3445 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3446 architecture:
3447
3448 B1 0x08000000 : INSN1
3449 B2 0x08000001 : INSN2
3450 0x08000002 : INSN3
3451 PC -> 0x08000003 : INSN4
3452
3453 Say you're stopped at 0x08000003 as above. Reverse continuing
3454 from that point should hit B2 as below. Reading the PC when the
3455 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3456 been de-executed already.
3457
3458 B1 0x08000000 : INSN1
3459 B2 PC -> 0x08000001 : INSN2
3460 0x08000002 : INSN3
3461 0x08000003 : INSN4
3462
3463 We can't apply the same logic as for forward execution, because
3464 we would wrongly adjust the PC to 0x08000000, since there's a
3465 breakpoint at PC - 1. We'd then report a hit on B1, although
3466 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3467 behaviour. */
3468 if (execution_direction == EXEC_REVERSE)
3469 return;
3470
3471 /* If the target can tell whether the thread hit a SW breakpoint,
3472 trust it. Targets that can tell also adjust the PC
3473 themselves. */
3474 if (target_supports_stopped_by_sw_breakpoint ())
3475 return;
3476
3477 /* Note that relying on whether a breakpoint is planted in memory to
3478 determine this can fail. E.g,. the breakpoint could have been
3479 removed since. Or the thread could have been told to step an
3480 instruction the size of a breakpoint instruction, and only
3481 _after_ was a breakpoint inserted at its address. */
3482
3483 /* If this target does not decrement the PC after breakpoints, then
3484 we have nothing to do. */
3485 regcache = get_thread_regcache (ecs->ptid);
3486 gdbarch = get_regcache_arch (regcache);
3487
3488 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3489 if (decr_pc == 0)
3490 return;
3491
3492 aspace = get_regcache_aspace (regcache);
3493
3494 /* Find the location where (if we've hit a breakpoint) the
3495 breakpoint would be. */
3496 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3497
3498 /* If the target can't tell whether a software breakpoint triggered,
3499 fallback to figuring it out based on breakpoints we think were
3500 inserted in the target, and on whether the thread was stepped or
3501 continued. */
3502
3503 /* Check whether there actually is a software breakpoint inserted at
3504 that location.
3505
3506 If in non-stop mode, a race condition is possible where we've
3507 removed a breakpoint, but stop events for that breakpoint were
3508 already queued and arrive later. To suppress those spurious
3509 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3510 and retire them after a number of stop events are reported. Note
3511 this is an heuristic and can thus get confused. The real fix is
3512 to get the "stopped by SW BP and needs adjustment" info out of
3513 the target/kernel (and thus never reach here; see above). */
3514 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3515 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3516 {
3517 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3518
3519 if (record_full_is_used ())
3520 record_full_gdb_operation_disable_set ();
3521
3522 /* When using hardware single-step, a SIGTRAP is reported for both
3523 a completed single-step and a software breakpoint. Need to
3524 differentiate between the two, as the latter needs adjusting
3525 but the former does not.
3526
3527 The SIGTRAP can be due to a completed hardware single-step only if
3528 - we didn't insert software single-step breakpoints
3529 - this thread is currently being stepped
3530
3531 If any of these events did not occur, we must have stopped due
3532 to hitting a software breakpoint, and have to back up to the
3533 breakpoint address.
3534
3535 As a special case, we could have hardware single-stepped a
3536 software breakpoint. In this case (prev_pc == breakpoint_pc),
3537 we also need to back up to the breakpoint address. */
3538
3539 if (thread_has_single_step_breakpoints_set (ecs->event_thread)
3540 || !currently_stepping (ecs->event_thread)
3541 || (ecs->event_thread->stepped_breakpoint
3542 && ecs->event_thread->prev_pc == breakpoint_pc))
3543 regcache_write_pc (regcache, breakpoint_pc);
3544
3545 do_cleanups (old_cleanups);
3546 }
3547 }
3548
3549 static int
3550 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3551 {
3552 for (frame = get_prev_frame (frame);
3553 frame != NULL;
3554 frame = get_prev_frame (frame))
3555 {
3556 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3557 return 1;
3558 if (get_frame_type (frame) != INLINE_FRAME)
3559 break;
3560 }
3561
3562 return 0;
3563 }
3564
3565 /* Auxiliary function that handles syscall entry/return events.
3566 It returns 1 if the inferior should keep going (and GDB
3567 should ignore the event), or 0 if the event deserves to be
3568 processed. */
3569
3570 static int
3571 handle_syscall_event (struct execution_control_state *ecs)
3572 {
3573 struct regcache *regcache;
3574 int syscall_number;
3575
3576 if (!ptid_equal (ecs->ptid, inferior_ptid))
3577 context_switch (ecs->ptid);
3578
3579 regcache = get_thread_regcache (ecs->ptid);
3580 syscall_number = ecs->ws.value.syscall_number;
3581 stop_pc = regcache_read_pc (regcache);
3582
3583 if (catch_syscall_enabled () > 0
3584 && catching_syscall_number (syscall_number) > 0)
3585 {
3586 if (debug_infrun)
3587 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3588 syscall_number);
3589
3590 ecs->event_thread->control.stop_bpstat
3591 = bpstat_stop_status (get_regcache_aspace (regcache),
3592 stop_pc, ecs->ptid, &ecs->ws);
3593
3594 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3595 {
3596 /* Catchpoint hit. */
3597 return 0;
3598 }
3599 }
3600
3601 /* If no catchpoint triggered for this, then keep going. */
3602 keep_going (ecs);
3603 return 1;
3604 }
3605
3606 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3607
3608 static void
3609 fill_in_stop_func (struct gdbarch *gdbarch,
3610 struct execution_control_state *ecs)
3611 {
3612 if (!ecs->stop_func_filled_in)
3613 {
3614 /* Don't care about return value; stop_func_start and stop_func_name
3615 will both be 0 if it doesn't work. */
3616 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3617 &ecs->stop_func_start, &ecs->stop_func_end);
3618 ecs->stop_func_start
3619 += gdbarch_deprecated_function_start_offset (gdbarch);
3620
3621 if (gdbarch_skip_entrypoint_p (gdbarch))
3622 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3623 ecs->stop_func_start);
3624
3625 ecs->stop_func_filled_in = 1;
3626 }
3627 }
3628
3629
3630 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3631
3632 static enum stop_kind
3633 get_inferior_stop_soon (ptid_t ptid)
3634 {
3635 struct inferior *inf = find_inferior_ptid (ptid);
3636
3637 gdb_assert (inf != NULL);
3638 return inf->control.stop_soon;
3639 }
3640
3641 /* Given an execution control state that has been freshly filled in by
3642 an event from the inferior, figure out what it means and take
3643 appropriate action.
3644
3645 The alternatives are:
3646
3647 1) stop_waiting and return; to really stop and return to the
3648 debugger.
3649
3650 2) keep_going and return; to wait for the next event (set
3651 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3652 once). */
3653
3654 static void
3655 handle_inferior_event (struct execution_control_state *ecs)
3656 {
3657 enum stop_kind stop_soon;
3658
3659 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3660 {
3661 /* We had an event in the inferior, but we are not interested in
3662 handling it at this level. The lower layers have already
3663 done what needs to be done, if anything.
3664
3665 One of the possible circumstances for this is when the
3666 inferior produces output for the console. The inferior has
3667 not stopped, and we are ignoring the event. Another possible
3668 circumstance is any event which the lower level knows will be
3669 reported multiple times without an intervening resume. */
3670 if (debug_infrun)
3671 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3672 prepare_to_wait (ecs);
3673 return;
3674 }
3675
3676 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3677 && target_can_async_p () && !sync_execution)
3678 {
3679 /* There were no unwaited-for children left in the target, but,
3680 we're not synchronously waiting for events either. Just
3681 ignore. Otherwise, if we were running a synchronous
3682 execution command, we need to cancel it and give the user
3683 back the terminal. */
3684 if (debug_infrun)
3685 fprintf_unfiltered (gdb_stdlog,
3686 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3687 prepare_to_wait (ecs);
3688 return;
3689 }
3690
3691 /* Cache the last pid/waitstatus. */
3692 set_last_target_status (ecs->ptid, ecs->ws);
3693
3694 /* Always clear state belonging to the previous time we stopped. */
3695 stop_stack_dummy = STOP_NONE;
3696
3697 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3698 {
3699 /* No unwaited-for children left. IOW, all resumed children
3700 have exited. */
3701 if (debug_infrun)
3702 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3703
3704 stop_print_frame = 0;
3705 stop_waiting (ecs);
3706 return;
3707 }
3708
3709 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3710 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3711 {
3712 ecs->event_thread = find_thread_ptid (ecs->ptid);
3713 /* If it's a new thread, add it to the thread database. */
3714 if (ecs->event_thread == NULL)
3715 ecs->event_thread = add_thread (ecs->ptid);
3716
3717 /* Disable range stepping. If the next step request could use a
3718 range, this will be end up re-enabled then. */
3719 ecs->event_thread->control.may_range_step = 0;
3720 }
3721
3722 /* Dependent on valid ECS->EVENT_THREAD. */
3723 adjust_pc_after_break (ecs);
3724
3725 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3726 reinit_frame_cache ();
3727
3728 breakpoint_retire_moribund ();
3729
3730 /* First, distinguish signals caused by the debugger from signals
3731 that have to do with the program's own actions. Note that
3732 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3733 on the operating system version. Here we detect when a SIGILL or
3734 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3735 something similar for SIGSEGV, since a SIGSEGV will be generated
3736 when we're trying to execute a breakpoint instruction on a
3737 non-executable stack. This happens for call dummy breakpoints
3738 for architectures like SPARC that place call dummies on the
3739 stack. */
3740 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3741 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3742 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3743 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3744 {
3745 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3746
3747 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3748 regcache_read_pc (regcache)))
3749 {
3750 if (debug_infrun)
3751 fprintf_unfiltered (gdb_stdlog,
3752 "infrun: Treating signal as SIGTRAP\n");
3753 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3754 }
3755 }
3756
3757 /* Mark the non-executing threads accordingly. In all-stop, all
3758 threads of all processes are stopped when we get any event
3759 reported. In non-stop mode, only the event thread stops. If
3760 we're handling a process exit in non-stop mode, there's nothing
3761 to do, as threads of the dead process are gone, and threads of
3762 any other process were left running. */
3763 if (!non_stop)
3764 set_executing (minus_one_ptid, 0);
3765 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3766 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3767 set_executing (ecs->ptid, 0);
3768
3769 switch (ecs->ws.kind)
3770 {
3771 case TARGET_WAITKIND_LOADED:
3772 if (debug_infrun)
3773 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3774 if (!ptid_equal (ecs->ptid, inferior_ptid))
3775 context_switch (ecs->ptid);
3776 /* Ignore gracefully during startup of the inferior, as it might
3777 be the shell which has just loaded some objects, otherwise
3778 add the symbols for the newly loaded objects. Also ignore at
3779 the beginning of an attach or remote session; we will query
3780 the full list of libraries once the connection is
3781 established. */
3782
3783 stop_soon = get_inferior_stop_soon (ecs->ptid);
3784 if (stop_soon == NO_STOP_QUIETLY)
3785 {
3786 struct regcache *regcache;
3787
3788 regcache = get_thread_regcache (ecs->ptid);
3789
3790 handle_solib_event ();
3791
3792 ecs->event_thread->control.stop_bpstat
3793 = bpstat_stop_status (get_regcache_aspace (regcache),
3794 stop_pc, ecs->ptid, &ecs->ws);
3795
3796 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3797 {
3798 /* A catchpoint triggered. */
3799 process_event_stop_test (ecs);
3800 return;
3801 }
3802
3803 /* If requested, stop when the dynamic linker notifies
3804 gdb of events. This allows the user to get control
3805 and place breakpoints in initializer routines for
3806 dynamically loaded objects (among other things). */
3807 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3808 if (stop_on_solib_events)
3809 {
3810 /* Make sure we print "Stopped due to solib-event" in
3811 normal_stop. */
3812 stop_print_frame = 1;
3813
3814 stop_waiting (ecs);
3815 return;
3816 }
3817 }
3818
3819 /* If we are skipping through a shell, or through shared library
3820 loading that we aren't interested in, resume the program. If
3821 we're running the program normally, also resume. */
3822 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3823 {
3824 /* Loading of shared libraries might have changed breakpoint
3825 addresses. Make sure new breakpoints are inserted. */
3826 if (stop_soon == NO_STOP_QUIETLY)
3827 insert_breakpoints ();
3828 resume (0, GDB_SIGNAL_0);
3829 prepare_to_wait (ecs);
3830 return;
3831 }
3832
3833 /* But stop if we're attaching or setting up a remote
3834 connection. */
3835 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3836 || stop_soon == STOP_QUIETLY_REMOTE)
3837 {
3838 if (debug_infrun)
3839 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3840 stop_waiting (ecs);
3841 return;
3842 }
3843
3844 internal_error (__FILE__, __LINE__,
3845 _("unhandled stop_soon: %d"), (int) stop_soon);
3846
3847 case TARGET_WAITKIND_SPURIOUS:
3848 if (debug_infrun)
3849 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3850 if (!ptid_equal (ecs->ptid, inferior_ptid))
3851 context_switch (ecs->ptid);
3852 resume (0, GDB_SIGNAL_0);
3853 prepare_to_wait (ecs);
3854 return;
3855
3856 case TARGET_WAITKIND_EXITED:
3857 case TARGET_WAITKIND_SIGNALLED:
3858 if (debug_infrun)
3859 {
3860 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3861 fprintf_unfiltered (gdb_stdlog,
3862 "infrun: TARGET_WAITKIND_EXITED\n");
3863 else
3864 fprintf_unfiltered (gdb_stdlog,
3865 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3866 }
3867
3868 inferior_ptid = ecs->ptid;
3869 set_current_inferior (find_inferior_ptid (ecs->ptid));
3870 set_current_program_space (current_inferior ()->pspace);
3871 handle_vfork_child_exec_or_exit (0);
3872 target_terminal_ours (); /* Must do this before mourn anyway. */
3873
3874 /* Clearing any previous state of convenience variables. */
3875 clear_exit_convenience_vars ();
3876
3877 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3878 {
3879 /* Record the exit code in the convenience variable $_exitcode, so
3880 that the user can inspect this again later. */
3881 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3882 (LONGEST) ecs->ws.value.integer);
3883
3884 /* Also record this in the inferior itself. */
3885 current_inferior ()->has_exit_code = 1;
3886 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3887
3888 /* Support the --return-child-result option. */
3889 return_child_result_value = ecs->ws.value.integer;
3890
3891 observer_notify_exited (ecs->ws.value.integer);
3892 }
3893 else
3894 {
3895 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3896 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3897
3898 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3899 {
3900 /* Set the value of the internal variable $_exitsignal,
3901 which holds the signal uncaught by the inferior. */
3902 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3903 gdbarch_gdb_signal_to_target (gdbarch,
3904 ecs->ws.value.sig));
3905 }
3906 else
3907 {
3908 /* We don't have access to the target's method used for
3909 converting between signal numbers (GDB's internal
3910 representation <-> target's representation).
3911 Therefore, we cannot do a good job at displaying this
3912 information to the user. It's better to just warn
3913 her about it (if infrun debugging is enabled), and
3914 give up. */
3915 if (debug_infrun)
3916 fprintf_filtered (gdb_stdlog, _("\
3917 Cannot fill $_exitsignal with the correct signal number.\n"));
3918 }
3919
3920 observer_notify_signal_exited (ecs->ws.value.sig);
3921 }
3922
3923 gdb_flush (gdb_stdout);
3924 target_mourn_inferior ();
3925 stop_print_frame = 0;
3926 stop_waiting (ecs);
3927 return;
3928
3929 /* The following are the only cases in which we keep going;
3930 the above cases end in a continue or goto. */
3931 case TARGET_WAITKIND_FORKED:
3932 case TARGET_WAITKIND_VFORKED:
3933 if (debug_infrun)
3934 {
3935 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3936 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3937 else
3938 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3939 }
3940
3941 /* Check whether the inferior is displaced stepping. */
3942 {
3943 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3944 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3945 struct displaced_step_inferior_state *displaced
3946 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3947
3948 /* If checking displaced stepping is supported, and thread
3949 ecs->ptid is displaced stepping. */
3950 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3951 {
3952 struct inferior *parent_inf
3953 = find_inferior_ptid (ecs->ptid);
3954 struct regcache *child_regcache;
3955 CORE_ADDR parent_pc;
3956
3957 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3958 indicating that the displaced stepping of syscall instruction
3959 has been done. Perform cleanup for parent process here. Note
3960 that this operation also cleans up the child process for vfork,
3961 because their pages are shared. */
3962 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3963
3964 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3965 {
3966 /* Restore scratch pad for child process. */
3967 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3968 }
3969
3970 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3971 the child's PC is also within the scratchpad. Set the child's PC
3972 to the parent's PC value, which has already been fixed up.
3973 FIXME: we use the parent's aspace here, although we're touching
3974 the child, because the child hasn't been added to the inferior
3975 list yet at this point. */
3976
3977 child_regcache
3978 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3979 gdbarch,
3980 parent_inf->aspace);
3981 /* Read PC value of parent process. */
3982 parent_pc = regcache_read_pc (regcache);
3983
3984 if (debug_displaced)
3985 fprintf_unfiltered (gdb_stdlog,
3986 "displaced: write child pc from %s to %s\n",
3987 paddress (gdbarch,
3988 regcache_read_pc (child_regcache)),
3989 paddress (gdbarch, parent_pc));
3990
3991 regcache_write_pc (child_regcache, parent_pc);
3992 }
3993 }
3994
3995 if (!ptid_equal (ecs->ptid, inferior_ptid))
3996 context_switch (ecs->ptid);
3997
3998 /* Immediately detach breakpoints from the child before there's
3999 any chance of letting the user delete breakpoints from the
4000 breakpoint lists. If we don't do this early, it's easy to
4001 leave left over traps in the child, vis: "break foo; catch
4002 fork; c; <fork>; del; c; <child calls foo>". We only follow
4003 the fork on the last `continue', and by that time the
4004 breakpoint at "foo" is long gone from the breakpoint table.
4005 If we vforked, then we don't need to unpatch here, since both
4006 parent and child are sharing the same memory pages; we'll
4007 need to unpatch at follow/detach time instead to be certain
4008 that new breakpoints added between catchpoint hit time and
4009 vfork follow are detached. */
4010 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
4011 {
4012 /* This won't actually modify the breakpoint list, but will
4013 physically remove the breakpoints from the child. */
4014 detach_breakpoints (ecs->ws.value.related_pid);
4015 }
4016
4017 delete_just_stopped_threads_single_step_breakpoints ();
4018
4019 /* In case the event is caught by a catchpoint, remember that
4020 the event is to be followed at the next resume of the thread,
4021 and not immediately. */
4022 ecs->event_thread->pending_follow = ecs->ws;
4023
4024 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4025
4026 ecs->event_thread->control.stop_bpstat
4027 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4028 stop_pc, ecs->ptid, &ecs->ws);
4029
4030 /* If no catchpoint triggered for this, then keep going. Note
4031 that we're interested in knowing the bpstat actually causes a
4032 stop, not just if it may explain the signal. Software
4033 watchpoints, for example, always appear in the bpstat. */
4034 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4035 {
4036 ptid_t parent;
4037 ptid_t child;
4038 int should_resume;
4039 int follow_child
4040 = (follow_fork_mode_string == follow_fork_mode_child);
4041
4042 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4043
4044 should_resume = follow_fork ();
4045
4046 parent = ecs->ptid;
4047 child = ecs->ws.value.related_pid;
4048
4049 /* In non-stop mode, also resume the other branch. */
4050 if (non_stop && !detach_fork)
4051 {
4052 if (follow_child)
4053 switch_to_thread (parent);
4054 else
4055 switch_to_thread (child);
4056
4057 ecs->event_thread = inferior_thread ();
4058 ecs->ptid = inferior_ptid;
4059 keep_going (ecs);
4060 }
4061
4062 if (follow_child)
4063 switch_to_thread (child);
4064 else
4065 switch_to_thread (parent);
4066
4067 ecs->event_thread = inferior_thread ();
4068 ecs->ptid = inferior_ptid;
4069
4070 if (should_resume)
4071 keep_going (ecs);
4072 else
4073 stop_waiting (ecs);
4074 return;
4075 }
4076 process_event_stop_test (ecs);
4077 return;
4078
4079 case TARGET_WAITKIND_VFORK_DONE:
4080 /* Done with the shared memory region. Re-insert breakpoints in
4081 the parent, and keep going. */
4082
4083 if (debug_infrun)
4084 fprintf_unfiltered (gdb_stdlog,
4085 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
4086
4087 if (!ptid_equal (ecs->ptid, inferior_ptid))
4088 context_switch (ecs->ptid);
4089
4090 current_inferior ()->waiting_for_vfork_done = 0;
4091 current_inferior ()->pspace->breakpoints_not_allowed = 0;
4092 /* This also takes care of reinserting breakpoints in the
4093 previously locked inferior. */
4094 keep_going (ecs);
4095 return;
4096
4097 case TARGET_WAITKIND_EXECD:
4098 if (debug_infrun)
4099 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
4100
4101 if (!ptid_equal (ecs->ptid, inferior_ptid))
4102 context_switch (ecs->ptid);
4103
4104 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4105
4106 /* Do whatever is necessary to the parent branch of the vfork. */
4107 handle_vfork_child_exec_or_exit (1);
4108
4109 /* This causes the eventpoints and symbol table to be reset.
4110 Must do this now, before trying to determine whether to
4111 stop. */
4112 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
4113
4114 ecs->event_thread->control.stop_bpstat
4115 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4116 stop_pc, ecs->ptid, &ecs->ws);
4117
4118 /* Note that this may be referenced from inside
4119 bpstat_stop_status above, through inferior_has_execd. */
4120 xfree (ecs->ws.value.execd_pathname);
4121 ecs->ws.value.execd_pathname = NULL;
4122
4123 /* If no catchpoint triggered for this, then keep going. */
4124 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4125 {
4126 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4127 keep_going (ecs);
4128 return;
4129 }
4130 process_event_stop_test (ecs);
4131 return;
4132
4133 /* Be careful not to try to gather much state about a thread
4134 that's in a syscall. It's frequently a losing proposition. */
4135 case TARGET_WAITKIND_SYSCALL_ENTRY:
4136 if (debug_infrun)
4137 fprintf_unfiltered (gdb_stdlog,
4138 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
4139 /* Getting the current syscall number. */
4140 if (handle_syscall_event (ecs) == 0)
4141 process_event_stop_test (ecs);
4142 return;
4143
4144 /* Before examining the threads further, step this thread to
4145 get it entirely out of the syscall. (We get notice of the
4146 event when the thread is just on the verge of exiting a
4147 syscall. Stepping one instruction seems to get it back
4148 into user code.) */
4149 case TARGET_WAITKIND_SYSCALL_RETURN:
4150 if (debug_infrun)
4151 fprintf_unfiltered (gdb_stdlog,
4152 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
4153 if (handle_syscall_event (ecs) == 0)
4154 process_event_stop_test (ecs);
4155 return;
4156
4157 case TARGET_WAITKIND_STOPPED:
4158 if (debug_infrun)
4159 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
4160 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
4161 handle_signal_stop (ecs);
4162 return;
4163
4164 case TARGET_WAITKIND_NO_HISTORY:
4165 if (debug_infrun)
4166 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
4167 /* Reverse execution: target ran out of history info. */
4168
4169 delete_just_stopped_threads_single_step_breakpoints ();
4170 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4171 observer_notify_no_history ();
4172 stop_waiting (ecs);
4173 return;
4174 }
4175 }
4176
4177 /* Come here when the program has stopped with a signal. */
4178
4179 static void
4180 handle_signal_stop (struct execution_control_state *ecs)
4181 {
4182 struct frame_info *frame;
4183 struct gdbarch *gdbarch;
4184 int stopped_by_watchpoint;
4185 enum stop_kind stop_soon;
4186 int random_signal;
4187
4188 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
4189
4190 /* Do we need to clean up the state of a thread that has
4191 completed a displaced single-step? (Doing so usually affects
4192 the PC, so do it here, before we set stop_pc.) */
4193 displaced_step_fixup (ecs->ptid,
4194 ecs->event_thread->suspend.stop_signal);
4195
4196 /* If we either finished a single-step or hit a breakpoint, but
4197 the user wanted this thread to be stopped, pretend we got a
4198 SIG0 (generic unsignaled stop). */
4199 if (ecs->event_thread->stop_requested
4200 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4201 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4202
4203 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4204
4205 if (debug_infrun)
4206 {
4207 struct regcache *regcache = get_thread_regcache (ecs->ptid);
4208 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4209 struct cleanup *old_chain = save_inferior_ptid ();
4210
4211 inferior_ptid = ecs->ptid;
4212
4213 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
4214 paddress (gdbarch, stop_pc));
4215 if (target_stopped_by_watchpoint ())
4216 {
4217 CORE_ADDR addr;
4218
4219 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
4220
4221 if (target_stopped_data_address (&current_target, &addr))
4222 fprintf_unfiltered (gdb_stdlog,
4223 "infrun: stopped data address = %s\n",
4224 paddress (gdbarch, addr));
4225 else
4226 fprintf_unfiltered (gdb_stdlog,
4227 "infrun: (no data address available)\n");
4228 }
4229
4230 do_cleanups (old_chain);
4231 }
4232
4233 /* This is originated from start_remote(), start_inferior() and
4234 shared libraries hook functions. */
4235 stop_soon = get_inferior_stop_soon (ecs->ptid);
4236 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4237 {
4238 if (!ptid_equal (ecs->ptid, inferior_ptid))
4239 context_switch (ecs->ptid);
4240 if (debug_infrun)
4241 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4242 stop_print_frame = 1;
4243 stop_waiting (ecs);
4244 return;
4245 }
4246
4247 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4248 && stop_after_trap)
4249 {
4250 if (!ptid_equal (ecs->ptid, inferior_ptid))
4251 context_switch (ecs->ptid);
4252 if (debug_infrun)
4253 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4254 stop_print_frame = 0;
4255 stop_waiting (ecs);
4256 return;
4257 }
4258
4259 /* This originates from attach_command(). We need to overwrite
4260 the stop_signal here, because some kernels don't ignore a
4261 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4262 See more comments in inferior.h. On the other hand, if we
4263 get a non-SIGSTOP, report it to the user - assume the backend
4264 will handle the SIGSTOP if it should show up later.
4265
4266 Also consider that the attach is complete when we see a
4267 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4268 target extended-remote report it instead of a SIGSTOP
4269 (e.g. gdbserver). We already rely on SIGTRAP being our
4270 signal, so this is no exception.
4271
4272 Also consider that the attach is complete when we see a
4273 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4274 the target to stop all threads of the inferior, in case the
4275 low level attach operation doesn't stop them implicitly. If
4276 they weren't stopped implicitly, then the stub will report a
4277 GDB_SIGNAL_0, meaning: stopped for no particular reason
4278 other than GDB's request. */
4279 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4280 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
4281 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4282 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
4283 {
4284 stop_print_frame = 1;
4285 stop_waiting (ecs);
4286 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4287 return;
4288 }
4289
4290 /* See if something interesting happened to the non-current thread. If
4291 so, then switch to that thread. */
4292 if (!ptid_equal (ecs->ptid, inferior_ptid))
4293 {
4294 if (debug_infrun)
4295 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
4296
4297 context_switch (ecs->ptid);
4298
4299 if (deprecated_context_hook)
4300 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
4301 }
4302
4303 /* At this point, get hold of the now-current thread's frame. */
4304 frame = get_current_frame ();
4305 gdbarch = get_frame_arch (frame);
4306
4307 /* Pull the single step breakpoints out of the target. */
4308 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4309 {
4310 struct regcache *regcache;
4311 struct address_space *aspace;
4312 CORE_ADDR pc;
4313
4314 regcache = get_thread_regcache (ecs->ptid);
4315 aspace = get_regcache_aspace (regcache);
4316 pc = regcache_read_pc (regcache);
4317
4318 /* However, before doing so, if this single-step breakpoint was
4319 actually for another thread, set this thread up for moving
4320 past it. */
4321 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
4322 aspace, pc))
4323 {
4324 if (single_step_breakpoint_inserted_here_p (aspace, pc))
4325 {
4326 if (debug_infrun)
4327 {
4328 fprintf_unfiltered (gdb_stdlog,
4329 "infrun: [%s] hit another thread's "
4330 "single-step breakpoint\n",
4331 target_pid_to_str (ecs->ptid));
4332 }
4333 ecs->hit_singlestep_breakpoint = 1;
4334 }
4335 }
4336 else
4337 {
4338 if (debug_infrun)
4339 {
4340 fprintf_unfiltered (gdb_stdlog,
4341 "infrun: [%s] hit its "
4342 "single-step breakpoint\n",
4343 target_pid_to_str (ecs->ptid));
4344 }
4345 }
4346 }
4347 delete_just_stopped_threads_single_step_breakpoints ();
4348
4349 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4350 && ecs->event_thread->control.trap_expected
4351 && ecs->event_thread->stepping_over_watchpoint)
4352 stopped_by_watchpoint = 0;
4353 else
4354 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4355
4356 /* If necessary, step over this watchpoint. We'll be back to display
4357 it in a moment. */
4358 if (stopped_by_watchpoint
4359 && (target_have_steppable_watchpoint
4360 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4361 {
4362 /* At this point, we are stopped at an instruction which has
4363 attempted to write to a piece of memory under control of
4364 a watchpoint. The instruction hasn't actually executed
4365 yet. If we were to evaluate the watchpoint expression
4366 now, we would get the old value, and therefore no change
4367 would seem to have occurred.
4368
4369 In order to make watchpoints work `right', we really need
4370 to complete the memory write, and then evaluate the
4371 watchpoint expression. We do this by single-stepping the
4372 target.
4373
4374 It may not be necessary to disable the watchpoint to step over
4375 it. For example, the PA can (with some kernel cooperation)
4376 single step over a watchpoint without disabling the watchpoint.
4377
4378 It is far more common to need to disable a watchpoint to step
4379 the inferior over it. If we have non-steppable watchpoints,
4380 we must disable the current watchpoint; it's simplest to
4381 disable all watchpoints.
4382
4383 Any breakpoint at PC must also be stepped over -- if there's
4384 one, it will have already triggered before the watchpoint
4385 triggered, and we either already reported it to the user, or
4386 it didn't cause a stop and we called keep_going. In either
4387 case, if there was a breakpoint at PC, we must be trying to
4388 step past it. */
4389 ecs->event_thread->stepping_over_watchpoint = 1;
4390 keep_going (ecs);
4391 return;
4392 }
4393
4394 ecs->event_thread->stepping_over_breakpoint = 0;
4395 ecs->event_thread->stepping_over_watchpoint = 0;
4396 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4397 ecs->event_thread->control.stop_step = 0;
4398 stop_print_frame = 1;
4399 stopped_by_random_signal = 0;
4400
4401 /* Hide inlined functions starting here, unless we just performed stepi or
4402 nexti. After stepi and nexti, always show the innermost frame (not any
4403 inline function call sites). */
4404 if (ecs->event_thread->control.step_range_end != 1)
4405 {
4406 struct address_space *aspace =
4407 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4408
4409 /* skip_inline_frames is expensive, so we avoid it if we can
4410 determine that the address is one where functions cannot have
4411 been inlined. This improves performance with inferiors that
4412 load a lot of shared libraries, because the solib event
4413 breakpoint is defined as the address of a function (i.e. not
4414 inline). Note that we have to check the previous PC as well
4415 as the current one to catch cases when we have just
4416 single-stepped off a breakpoint prior to reinstating it.
4417 Note that we're assuming that the code we single-step to is
4418 not inline, but that's not definitive: there's nothing
4419 preventing the event breakpoint function from containing
4420 inlined code, and the single-step ending up there. If the
4421 user had set a breakpoint on that inlined code, the missing
4422 skip_inline_frames call would break things. Fortunately
4423 that's an extremely unlikely scenario. */
4424 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4425 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4426 && ecs->event_thread->control.trap_expected
4427 && pc_at_non_inline_function (aspace,
4428 ecs->event_thread->prev_pc,
4429 &ecs->ws)))
4430 {
4431 skip_inline_frames (ecs->ptid);
4432
4433 /* Re-fetch current thread's frame in case that invalidated
4434 the frame cache. */
4435 frame = get_current_frame ();
4436 gdbarch = get_frame_arch (frame);
4437 }
4438 }
4439
4440 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4441 && ecs->event_thread->control.trap_expected
4442 && gdbarch_single_step_through_delay_p (gdbarch)
4443 && currently_stepping (ecs->event_thread))
4444 {
4445 /* We're trying to step off a breakpoint. Turns out that we're
4446 also on an instruction that needs to be stepped multiple
4447 times before it's been fully executing. E.g., architectures
4448 with a delay slot. It needs to be stepped twice, once for
4449 the instruction and once for the delay slot. */
4450 int step_through_delay
4451 = gdbarch_single_step_through_delay (gdbarch, frame);
4452
4453 if (debug_infrun && step_through_delay)
4454 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4455 if (ecs->event_thread->control.step_range_end == 0
4456 && step_through_delay)
4457 {
4458 /* The user issued a continue when stopped at a breakpoint.
4459 Set up for another trap and get out of here. */
4460 ecs->event_thread->stepping_over_breakpoint = 1;
4461 keep_going (ecs);
4462 return;
4463 }
4464 else if (step_through_delay)
4465 {
4466 /* The user issued a step when stopped at a breakpoint.
4467 Maybe we should stop, maybe we should not - the delay
4468 slot *might* correspond to a line of source. In any
4469 case, don't decide that here, just set
4470 ecs->stepping_over_breakpoint, making sure we
4471 single-step again before breakpoints are re-inserted. */
4472 ecs->event_thread->stepping_over_breakpoint = 1;
4473 }
4474 }
4475
4476 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4477 handles this event. */
4478 ecs->event_thread->control.stop_bpstat
4479 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4480 stop_pc, ecs->ptid, &ecs->ws);
4481
4482 /* Following in case break condition called a
4483 function. */
4484 stop_print_frame = 1;
4485
4486 /* This is where we handle "moribund" watchpoints. Unlike
4487 software breakpoints traps, hardware watchpoint traps are
4488 always distinguishable from random traps. If no high-level
4489 watchpoint is associated with the reported stop data address
4490 anymore, then the bpstat does not explain the signal ---
4491 simply make sure to ignore it if `stopped_by_watchpoint' is
4492 set. */
4493
4494 if (debug_infrun
4495 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4496 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4497 GDB_SIGNAL_TRAP)
4498 && stopped_by_watchpoint)
4499 fprintf_unfiltered (gdb_stdlog,
4500 "infrun: no user watchpoint explains "
4501 "watchpoint SIGTRAP, ignoring\n");
4502
4503 /* NOTE: cagney/2003-03-29: These checks for a random signal
4504 at one stage in the past included checks for an inferior
4505 function call's call dummy's return breakpoint. The original
4506 comment, that went with the test, read:
4507
4508 ``End of a stack dummy. Some systems (e.g. Sony news) give
4509 another signal besides SIGTRAP, so check here as well as
4510 above.''
4511
4512 If someone ever tries to get call dummys on a
4513 non-executable stack to work (where the target would stop
4514 with something like a SIGSEGV), then those tests might need
4515 to be re-instated. Given, however, that the tests were only
4516 enabled when momentary breakpoints were not being used, I
4517 suspect that it won't be the case.
4518
4519 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4520 be necessary for call dummies on a non-executable stack on
4521 SPARC. */
4522
4523 /* See if the breakpoints module can explain the signal. */
4524 random_signal
4525 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4526 ecs->event_thread->suspend.stop_signal);
4527
4528 /* Maybe this was a trap for a software breakpoint that has since
4529 been removed. */
4530 if (random_signal && target_stopped_by_sw_breakpoint ())
4531 {
4532 if (program_breakpoint_here_p (gdbarch, stop_pc))
4533 {
4534 struct regcache *regcache;
4535 int decr_pc;
4536
4537 /* Re-adjust PC to what the program would see if GDB was not
4538 debugging it. */
4539 regcache = get_thread_regcache (ecs->event_thread->ptid);
4540 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4541 if (decr_pc != 0)
4542 {
4543 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
4544
4545 if (record_full_is_used ())
4546 record_full_gdb_operation_disable_set ();
4547
4548 regcache_write_pc (regcache, stop_pc + decr_pc);
4549
4550 do_cleanups (old_cleanups);
4551 }
4552 }
4553 else
4554 {
4555 /* A delayed software breakpoint event. Ignore the trap. */
4556 if (debug_infrun)
4557 fprintf_unfiltered (gdb_stdlog,
4558 "infrun: delayed software breakpoint "
4559 "trap, ignoring\n");
4560 random_signal = 0;
4561 }
4562 }
4563
4564 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
4565 has since been removed. */
4566 if (random_signal && target_stopped_by_hw_breakpoint ())
4567 {
4568 /* A delayed hardware breakpoint event. Ignore the trap. */
4569 if (debug_infrun)
4570 fprintf_unfiltered (gdb_stdlog,
4571 "infrun: delayed hardware breakpoint/watchpoint "
4572 "trap, ignoring\n");
4573 random_signal = 0;
4574 }
4575
4576 /* If not, perhaps stepping/nexting can. */
4577 if (random_signal)
4578 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4579 && currently_stepping (ecs->event_thread));
4580
4581 /* Perhaps the thread hit a single-step breakpoint of _another_
4582 thread. Single-step breakpoints are transparent to the
4583 breakpoints module. */
4584 if (random_signal)
4585 random_signal = !ecs->hit_singlestep_breakpoint;
4586
4587 /* No? Perhaps we got a moribund watchpoint. */
4588 if (random_signal)
4589 random_signal = !stopped_by_watchpoint;
4590
4591 /* For the program's own signals, act according to
4592 the signal handling tables. */
4593
4594 if (random_signal)
4595 {
4596 /* Signal not for debugging purposes. */
4597 struct inferior *inf = find_inferior_ptid (ecs->ptid);
4598 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4599
4600 if (debug_infrun)
4601 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4602 gdb_signal_to_symbol_string (stop_signal));
4603
4604 stopped_by_random_signal = 1;
4605
4606 /* Always stop on signals if we're either just gaining control
4607 of the program, or the user explicitly requested this thread
4608 to remain stopped. */
4609 if (stop_soon != NO_STOP_QUIETLY
4610 || ecs->event_thread->stop_requested
4611 || (!inf->detaching
4612 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4613 {
4614 stop_waiting (ecs);
4615 return;
4616 }
4617
4618 /* Notify observers the signal has "handle print" set. Note we
4619 returned early above if stopping; normal_stop handles the
4620 printing in that case. */
4621 if (signal_print[ecs->event_thread->suspend.stop_signal])
4622 {
4623 /* The signal table tells us to print about this signal. */
4624 target_terminal_ours_for_output ();
4625 observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
4626 target_terminal_inferior ();
4627 }
4628
4629 /* Clear the signal if it should not be passed. */
4630 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4631 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4632
4633 if (ecs->event_thread->prev_pc == stop_pc
4634 && ecs->event_thread->control.trap_expected
4635 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4636 {
4637 /* We were just starting a new sequence, attempting to
4638 single-step off of a breakpoint and expecting a SIGTRAP.
4639 Instead this signal arrives. This signal will take us out
4640 of the stepping range so GDB needs to remember to, when
4641 the signal handler returns, resume stepping off that
4642 breakpoint. */
4643 /* To simplify things, "continue" is forced to use the same
4644 code paths as single-step - set a breakpoint at the
4645 signal return address and then, once hit, step off that
4646 breakpoint. */
4647 if (debug_infrun)
4648 fprintf_unfiltered (gdb_stdlog,
4649 "infrun: signal arrived while stepping over "
4650 "breakpoint\n");
4651
4652 insert_hp_step_resume_breakpoint_at_frame (frame);
4653 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4654 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4655 ecs->event_thread->control.trap_expected = 0;
4656
4657 /* If we were nexting/stepping some other thread, switch to
4658 it, so that we don't continue it, losing control. */
4659 if (!switch_back_to_stepped_thread (ecs))
4660 keep_going (ecs);
4661 return;
4662 }
4663
4664 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4665 && (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4666 || ecs->event_thread->control.step_range_end == 1)
4667 && frame_id_eq (get_stack_frame_id (frame),
4668 ecs->event_thread->control.step_stack_frame_id)
4669 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4670 {
4671 /* The inferior is about to take a signal that will take it
4672 out of the single step range. Set a breakpoint at the
4673 current PC (which is presumably where the signal handler
4674 will eventually return) and then allow the inferior to
4675 run free.
4676
4677 Note that this is only needed for a signal delivered
4678 while in the single-step range. Nested signals aren't a
4679 problem as they eventually all return. */
4680 if (debug_infrun)
4681 fprintf_unfiltered (gdb_stdlog,
4682 "infrun: signal may take us out of "
4683 "single-step range\n");
4684
4685 insert_hp_step_resume_breakpoint_at_frame (frame);
4686 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4687 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4688 ecs->event_thread->control.trap_expected = 0;
4689 keep_going (ecs);
4690 return;
4691 }
4692
4693 /* Note: step_resume_breakpoint may be non-NULL. This occures
4694 when either there's a nested signal, or when there's a
4695 pending signal enabled just as the signal handler returns
4696 (leaving the inferior at the step-resume-breakpoint without
4697 actually executing it). Either way continue until the
4698 breakpoint is really hit. */
4699
4700 if (!switch_back_to_stepped_thread (ecs))
4701 {
4702 if (debug_infrun)
4703 fprintf_unfiltered (gdb_stdlog,
4704 "infrun: random signal, keep going\n");
4705
4706 keep_going (ecs);
4707 }
4708 return;
4709 }
4710
4711 process_event_stop_test (ecs);
4712 }
4713
4714 /* Come here when we've got some debug event / signal we can explain
4715 (IOW, not a random signal), and test whether it should cause a
4716 stop, or whether we should resume the inferior (transparently).
4717 E.g., could be a breakpoint whose condition evaluates false; we
4718 could be still stepping within the line; etc. */
4719
4720 static void
4721 process_event_stop_test (struct execution_control_state *ecs)
4722 {
4723 struct symtab_and_line stop_pc_sal;
4724 struct frame_info *frame;
4725 struct gdbarch *gdbarch;
4726 CORE_ADDR jmp_buf_pc;
4727 struct bpstat_what what;
4728
4729 /* Handle cases caused by hitting a breakpoint. */
4730
4731 frame = get_current_frame ();
4732 gdbarch = get_frame_arch (frame);
4733
4734 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4735
4736 if (what.call_dummy)
4737 {
4738 stop_stack_dummy = what.call_dummy;
4739 }
4740
4741 /* If we hit an internal event that triggers symbol changes, the
4742 current frame will be invalidated within bpstat_what (e.g., if we
4743 hit an internal solib event). Re-fetch it. */
4744 frame = get_current_frame ();
4745 gdbarch = get_frame_arch (frame);
4746
4747 switch (what.main_action)
4748 {
4749 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4750 /* If we hit the breakpoint at longjmp while stepping, we
4751 install a momentary breakpoint at the target of the
4752 jmp_buf. */
4753
4754 if (debug_infrun)
4755 fprintf_unfiltered (gdb_stdlog,
4756 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4757
4758 ecs->event_thread->stepping_over_breakpoint = 1;
4759
4760 if (what.is_longjmp)
4761 {
4762 struct value *arg_value;
4763
4764 /* If we set the longjmp breakpoint via a SystemTap probe,
4765 then use it to extract the arguments. The destination PC
4766 is the third argument to the probe. */
4767 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4768 if (arg_value)
4769 {
4770 jmp_buf_pc = value_as_address (arg_value);
4771 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
4772 }
4773 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4774 || !gdbarch_get_longjmp_target (gdbarch,
4775 frame, &jmp_buf_pc))
4776 {
4777 if (debug_infrun)
4778 fprintf_unfiltered (gdb_stdlog,
4779 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4780 "(!gdbarch_get_longjmp_target)\n");
4781 keep_going (ecs);
4782 return;
4783 }
4784
4785 /* Insert a breakpoint at resume address. */
4786 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4787 }
4788 else
4789 check_exception_resume (ecs, frame);
4790 keep_going (ecs);
4791 return;
4792
4793 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4794 {
4795 struct frame_info *init_frame;
4796
4797 /* There are several cases to consider.
4798
4799 1. The initiating frame no longer exists. In this case we
4800 must stop, because the exception or longjmp has gone too
4801 far.
4802
4803 2. The initiating frame exists, and is the same as the
4804 current frame. We stop, because the exception or longjmp
4805 has been caught.
4806
4807 3. The initiating frame exists and is different from the
4808 current frame. This means the exception or longjmp has
4809 been caught beneath the initiating frame, so keep going.
4810
4811 4. longjmp breakpoint has been placed just to protect
4812 against stale dummy frames and user is not interested in
4813 stopping around longjmps. */
4814
4815 if (debug_infrun)
4816 fprintf_unfiltered (gdb_stdlog,
4817 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4818
4819 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4820 != NULL);
4821 delete_exception_resume_breakpoint (ecs->event_thread);
4822
4823 if (what.is_longjmp)
4824 {
4825 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
4826
4827 if (!frame_id_p (ecs->event_thread->initiating_frame))
4828 {
4829 /* Case 4. */
4830 keep_going (ecs);
4831 return;
4832 }
4833 }
4834
4835 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4836
4837 if (init_frame)
4838 {
4839 struct frame_id current_id
4840 = get_frame_id (get_current_frame ());
4841 if (frame_id_eq (current_id,
4842 ecs->event_thread->initiating_frame))
4843 {
4844 /* Case 2. Fall through. */
4845 }
4846 else
4847 {
4848 /* Case 3. */
4849 keep_going (ecs);
4850 return;
4851 }
4852 }
4853
4854 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4855 exists. */
4856 delete_step_resume_breakpoint (ecs->event_thread);
4857
4858 end_stepping_range (ecs);
4859 }
4860 return;
4861
4862 case BPSTAT_WHAT_SINGLE:
4863 if (debug_infrun)
4864 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4865 ecs->event_thread->stepping_over_breakpoint = 1;
4866 /* Still need to check other stuff, at least the case where we
4867 are stepping and step out of the right range. */
4868 break;
4869
4870 case BPSTAT_WHAT_STEP_RESUME:
4871 if (debug_infrun)
4872 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4873
4874 delete_step_resume_breakpoint (ecs->event_thread);
4875 if (ecs->event_thread->control.proceed_to_finish
4876 && execution_direction == EXEC_REVERSE)
4877 {
4878 struct thread_info *tp = ecs->event_thread;
4879
4880 /* We are finishing a function in reverse, and just hit the
4881 step-resume breakpoint at the start address of the
4882 function, and we're almost there -- just need to back up
4883 by one more single-step, which should take us back to the
4884 function call. */
4885 tp->control.step_range_start = tp->control.step_range_end = 1;
4886 keep_going (ecs);
4887 return;
4888 }
4889 fill_in_stop_func (gdbarch, ecs);
4890 if (stop_pc == ecs->stop_func_start
4891 && execution_direction == EXEC_REVERSE)
4892 {
4893 /* We are stepping over a function call in reverse, and just
4894 hit the step-resume breakpoint at the start address of
4895 the function. Go back to single-stepping, which should
4896 take us back to the function call. */
4897 ecs->event_thread->stepping_over_breakpoint = 1;
4898 keep_going (ecs);
4899 return;
4900 }
4901 break;
4902
4903 case BPSTAT_WHAT_STOP_NOISY:
4904 if (debug_infrun)
4905 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4906 stop_print_frame = 1;
4907
4908 /* Assume the thread stopped for a breapoint. We'll still check
4909 whether a/the breakpoint is there when the thread is next
4910 resumed. */
4911 ecs->event_thread->stepping_over_breakpoint = 1;
4912
4913 stop_waiting (ecs);
4914 return;
4915
4916 case BPSTAT_WHAT_STOP_SILENT:
4917 if (debug_infrun)
4918 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4919 stop_print_frame = 0;
4920
4921 /* Assume the thread stopped for a breapoint. We'll still check
4922 whether a/the breakpoint is there when the thread is next
4923 resumed. */
4924 ecs->event_thread->stepping_over_breakpoint = 1;
4925 stop_waiting (ecs);
4926 return;
4927
4928 case BPSTAT_WHAT_HP_STEP_RESUME:
4929 if (debug_infrun)
4930 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4931
4932 delete_step_resume_breakpoint (ecs->event_thread);
4933 if (ecs->event_thread->step_after_step_resume_breakpoint)
4934 {
4935 /* Back when the step-resume breakpoint was inserted, we
4936 were trying to single-step off a breakpoint. Go back to
4937 doing that. */
4938 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4939 ecs->event_thread->stepping_over_breakpoint = 1;
4940 keep_going (ecs);
4941 return;
4942 }
4943 break;
4944
4945 case BPSTAT_WHAT_KEEP_CHECKING:
4946 break;
4947 }
4948
4949 /* If we stepped a permanent breakpoint and we had a high priority
4950 step-resume breakpoint for the address we stepped, but we didn't
4951 hit it, then we must have stepped into the signal handler. The
4952 step-resume was only necessary to catch the case of _not_
4953 stepping into the handler, so delete it, and fall through to
4954 checking whether the step finished. */
4955 if (ecs->event_thread->stepped_breakpoint)
4956 {
4957 struct breakpoint *sr_bp
4958 = ecs->event_thread->control.step_resume_breakpoint;
4959
4960 if (sr_bp->loc->permanent
4961 && sr_bp->type == bp_hp_step_resume
4962 && sr_bp->loc->address == ecs->event_thread->prev_pc)
4963 {
4964 if (debug_infrun)
4965 fprintf_unfiltered (gdb_stdlog,
4966 "infrun: stepped permanent breakpoint, stopped in "
4967 "handler\n");
4968 delete_step_resume_breakpoint (ecs->event_thread);
4969 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4970 }
4971 }
4972
4973 /* We come here if we hit a breakpoint but should not stop for it.
4974 Possibly we also were stepping and should stop for that. So fall
4975 through and test for stepping. But, if not stepping, do not
4976 stop. */
4977
4978 /* In all-stop mode, if we're currently stepping but have stopped in
4979 some other thread, we need to switch back to the stepped thread. */
4980 if (switch_back_to_stepped_thread (ecs))
4981 return;
4982
4983 if (ecs->event_thread->control.step_resume_breakpoint)
4984 {
4985 if (debug_infrun)
4986 fprintf_unfiltered (gdb_stdlog,
4987 "infrun: step-resume breakpoint is inserted\n");
4988
4989 /* Having a step-resume breakpoint overrides anything
4990 else having to do with stepping commands until
4991 that breakpoint is reached. */
4992 keep_going (ecs);
4993 return;
4994 }
4995
4996 if (ecs->event_thread->control.step_range_end == 0)
4997 {
4998 if (debug_infrun)
4999 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
5000 /* Likewise if we aren't even stepping. */
5001 keep_going (ecs);
5002 return;
5003 }
5004
5005 /* Re-fetch current thread's frame in case the code above caused
5006 the frame cache to be re-initialized, making our FRAME variable
5007 a dangling pointer. */
5008 frame = get_current_frame ();
5009 gdbarch = get_frame_arch (frame);
5010 fill_in_stop_func (gdbarch, ecs);
5011
5012 /* If stepping through a line, keep going if still within it.
5013
5014 Note that step_range_end is the address of the first instruction
5015 beyond the step range, and NOT the address of the last instruction
5016 within it!
5017
5018 Note also that during reverse execution, we may be stepping
5019 through a function epilogue and therefore must detect when
5020 the current-frame changes in the middle of a line. */
5021
5022 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
5023 && (execution_direction != EXEC_REVERSE
5024 || frame_id_eq (get_frame_id (frame),
5025 ecs->event_thread->control.step_frame_id)))
5026 {
5027 if (debug_infrun)
5028 fprintf_unfiltered
5029 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
5030 paddress (gdbarch, ecs->event_thread->control.step_range_start),
5031 paddress (gdbarch, ecs->event_thread->control.step_range_end));
5032
5033 /* Tentatively re-enable range stepping; `resume' disables it if
5034 necessary (e.g., if we're stepping over a breakpoint or we
5035 have software watchpoints). */
5036 ecs->event_thread->control.may_range_step = 1;
5037
5038 /* When stepping backward, stop at beginning of line range
5039 (unless it's the function entry point, in which case
5040 keep going back to the call point). */
5041 if (stop_pc == ecs->event_thread->control.step_range_start
5042 && stop_pc != ecs->stop_func_start
5043 && execution_direction == EXEC_REVERSE)
5044 end_stepping_range (ecs);
5045 else
5046 keep_going (ecs);
5047
5048 return;
5049 }
5050
5051 /* We stepped out of the stepping range. */
5052
5053 /* If we are stepping at the source level and entered the runtime
5054 loader dynamic symbol resolution code...
5055
5056 EXEC_FORWARD: we keep on single stepping until we exit the run
5057 time loader code and reach the callee's address.
5058
5059 EXEC_REVERSE: we've already executed the callee (backward), and
5060 the runtime loader code is handled just like any other
5061 undebuggable function call. Now we need only keep stepping
5062 backward through the trampoline code, and that's handled further
5063 down, so there is nothing for us to do here. */
5064
5065 if (execution_direction != EXEC_REVERSE
5066 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5067 && in_solib_dynsym_resolve_code (stop_pc))
5068 {
5069 CORE_ADDR pc_after_resolver =
5070 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
5071
5072 if (debug_infrun)
5073 fprintf_unfiltered (gdb_stdlog,
5074 "infrun: stepped into dynsym resolve code\n");
5075
5076 if (pc_after_resolver)
5077 {
5078 /* Set up a step-resume breakpoint at the address
5079 indicated by SKIP_SOLIB_RESOLVER. */
5080 struct symtab_and_line sr_sal;
5081
5082 init_sal (&sr_sal);
5083 sr_sal.pc = pc_after_resolver;
5084 sr_sal.pspace = get_frame_program_space (frame);
5085
5086 insert_step_resume_breakpoint_at_sal (gdbarch,
5087 sr_sal, null_frame_id);
5088 }
5089
5090 keep_going (ecs);
5091 return;
5092 }
5093
5094 if (ecs->event_thread->control.step_range_end != 1
5095 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5096 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5097 && get_frame_type (frame) == SIGTRAMP_FRAME)
5098 {
5099 if (debug_infrun)
5100 fprintf_unfiltered (gdb_stdlog,
5101 "infrun: stepped into signal trampoline\n");
5102 /* The inferior, while doing a "step" or "next", has ended up in
5103 a signal trampoline (either by a signal being delivered or by
5104 the signal handler returning). Just single-step until the
5105 inferior leaves the trampoline (either by calling the handler
5106 or returning). */
5107 keep_going (ecs);
5108 return;
5109 }
5110
5111 /* If we're in the return path from a shared library trampoline,
5112 we want to proceed through the trampoline when stepping. */
5113 /* macro/2012-04-25: This needs to come before the subroutine
5114 call check below as on some targets return trampolines look
5115 like subroutine calls (MIPS16 return thunks). */
5116 if (gdbarch_in_solib_return_trampoline (gdbarch,
5117 stop_pc, ecs->stop_func_name)
5118 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5119 {
5120 /* Determine where this trampoline returns. */
5121 CORE_ADDR real_stop_pc;
5122
5123 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5124
5125 if (debug_infrun)
5126 fprintf_unfiltered (gdb_stdlog,
5127 "infrun: stepped into solib return tramp\n");
5128
5129 /* Only proceed through if we know where it's going. */
5130 if (real_stop_pc)
5131 {
5132 /* And put the step-breakpoint there and go until there. */
5133 struct symtab_and_line sr_sal;
5134
5135 init_sal (&sr_sal); /* initialize to zeroes */
5136 sr_sal.pc = real_stop_pc;
5137 sr_sal.section = find_pc_overlay (sr_sal.pc);
5138 sr_sal.pspace = get_frame_program_space (frame);
5139
5140 /* Do not specify what the fp should be when we stop since
5141 on some machines the prologue is where the new fp value
5142 is established. */
5143 insert_step_resume_breakpoint_at_sal (gdbarch,
5144 sr_sal, null_frame_id);
5145
5146 /* Restart without fiddling with the step ranges or
5147 other state. */
5148 keep_going (ecs);
5149 return;
5150 }
5151 }
5152
5153 /* Check for subroutine calls. The check for the current frame
5154 equalling the step ID is not necessary - the check of the
5155 previous frame's ID is sufficient - but it is a common case and
5156 cheaper than checking the previous frame's ID.
5157
5158 NOTE: frame_id_eq will never report two invalid frame IDs as
5159 being equal, so to get into this block, both the current and
5160 previous frame must have valid frame IDs. */
5161 /* The outer_frame_id check is a heuristic to detect stepping
5162 through startup code. If we step over an instruction which
5163 sets the stack pointer from an invalid value to a valid value,
5164 we may detect that as a subroutine call from the mythical
5165 "outermost" function. This could be fixed by marking
5166 outermost frames as !stack_p,code_p,special_p. Then the
5167 initial outermost frame, before sp was valid, would
5168 have code_addr == &_start. See the comment in frame_id_eq
5169 for more. */
5170 if (!frame_id_eq (get_stack_frame_id (frame),
5171 ecs->event_thread->control.step_stack_frame_id)
5172 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
5173 ecs->event_thread->control.step_stack_frame_id)
5174 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
5175 outer_frame_id)
5176 || step_start_function != find_pc_function (stop_pc))))
5177 {
5178 CORE_ADDR real_stop_pc;
5179
5180 if (debug_infrun)
5181 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
5182
5183 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
5184 {
5185 /* I presume that step_over_calls is only 0 when we're
5186 supposed to be stepping at the assembly language level
5187 ("stepi"). Just stop. */
5188 /* And this works the same backward as frontward. MVS */
5189 end_stepping_range (ecs);
5190 return;
5191 }
5192
5193 /* Reverse stepping through solib trampolines. */
5194
5195 if (execution_direction == EXEC_REVERSE
5196 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
5197 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5198 || (ecs->stop_func_start == 0
5199 && in_solib_dynsym_resolve_code (stop_pc))))
5200 {
5201 /* Any solib trampoline code can be handled in reverse
5202 by simply continuing to single-step. We have already
5203 executed the solib function (backwards), and a few
5204 steps will take us back through the trampoline to the
5205 caller. */
5206 keep_going (ecs);
5207 return;
5208 }
5209
5210 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5211 {
5212 /* We're doing a "next".
5213
5214 Normal (forward) execution: set a breakpoint at the
5215 callee's return address (the address at which the caller
5216 will resume).
5217
5218 Reverse (backward) execution. set the step-resume
5219 breakpoint at the start of the function that we just
5220 stepped into (backwards), and continue to there. When we
5221 get there, we'll need to single-step back to the caller. */
5222
5223 if (execution_direction == EXEC_REVERSE)
5224 {
5225 /* If we're already at the start of the function, we've either
5226 just stepped backward into a single instruction function,
5227 or stepped back out of a signal handler to the first instruction
5228 of the function. Just keep going, which will single-step back
5229 to the caller. */
5230 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
5231 {
5232 struct symtab_and_line sr_sal;
5233
5234 /* Normal function call return (static or dynamic). */
5235 init_sal (&sr_sal);
5236 sr_sal.pc = ecs->stop_func_start;
5237 sr_sal.pspace = get_frame_program_space (frame);
5238 insert_step_resume_breakpoint_at_sal (gdbarch,
5239 sr_sal, null_frame_id);
5240 }
5241 }
5242 else
5243 insert_step_resume_breakpoint_at_caller (frame);
5244
5245 keep_going (ecs);
5246 return;
5247 }
5248
5249 /* If we are in a function call trampoline (a stub between the
5250 calling routine and the real function), locate the real
5251 function. That's what tells us (a) whether we want to step
5252 into it at all, and (b) what prologue we want to run to the
5253 end of, if we do step into it. */
5254 real_stop_pc = skip_language_trampoline (frame, stop_pc);
5255 if (real_stop_pc == 0)
5256 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5257 if (real_stop_pc != 0)
5258 ecs->stop_func_start = real_stop_pc;
5259
5260 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
5261 {
5262 struct symtab_and_line sr_sal;
5263
5264 init_sal (&sr_sal);
5265 sr_sal.pc = ecs->stop_func_start;
5266 sr_sal.pspace = get_frame_program_space (frame);
5267
5268 insert_step_resume_breakpoint_at_sal (gdbarch,
5269 sr_sal, null_frame_id);
5270 keep_going (ecs);
5271 return;
5272 }
5273
5274 /* If we have line number information for the function we are
5275 thinking of stepping into and the function isn't on the skip
5276 list, step into it.
5277
5278 If there are several symtabs at that PC (e.g. with include
5279 files), just want to know whether *any* of them have line
5280 numbers. find_pc_line handles this. */
5281 {
5282 struct symtab_and_line tmp_sal;
5283
5284 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
5285 if (tmp_sal.line != 0
5286 && !function_name_is_marked_for_skip (ecs->stop_func_name,
5287 &tmp_sal))
5288 {
5289 if (execution_direction == EXEC_REVERSE)
5290 handle_step_into_function_backward (gdbarch, ecs);
5291 else
5292 handle_step_into_function (gdbarch, ecs);
5293 return;
5294 }
5295 }
5296
5297 /* If we have no line number and the step-stop-if-no-debug is
5298 set, we stop the step so that the user has a chance to switch
5299 in assembly mode. */
5300 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5301 && step_stop_if_no_debug)
5302 {
5303 end_stepping_range (ecs);
5304 return;
5305 }
5306
5307 if (execution_direction == EXEC_REVERSE)
5308 {
5309 /* If we're already at the start of the function, we've either just
5310 stepped backward into a single instruction function without line
5311 number info, or stepped back out of a signal handler to the first
5312 instruction of the function without line number info. Just keep
5313 going, which will single-step back to the caller. */
5314 if (ecs->stop_func_start != stop_pc)
5315 {
5316 /* Set a breakpoint at callee's start address.
5317 From there we can step once and be back in the caller. */
5318 struct symtab_and_line sr_sal;
5319
5320 init_sal (&sr_sal);
5321 sr_sal.pc = ecs->stop_func_start;
5322 sr_sal.pspace = get_frame_program_space (frame);
5323 insert_step_resume_breakpoint_at_sal (gdbarch,
5324 sr_sal, null_frame_id);
5325 }
5326 }
5327 else
5328 /* Set a breakpoint at callee's return address (the address
5329 at which the caller will resume). */
5330 insert_step_resume_breakpoint_at_caller (frame);
5331
5332 keep_going (ecs);
5333 return;
5334 }
5335
5336 /* Reverse stepping through solib trampolines. */
5337
5338 if (execution_direction == EXEC_REVERSE
5339 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5340 {
5341 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5342 || (ecs->stop_func_start == 0
5343 && in_solib_dynsym_resolve_code (stop_pc)))
5344 {
5345 /* Any solib trampoline code can be handled in reverse
5346 by simply continuing to single-step. We have already
5347 executed the solib function (backwards), and a few
5348 steps will take us back through the trampoline to the
5349 caller. */
5350 keep_going (ecs);
5351 return;
5352 }
5353 else if (in_solib_dynsym_resolve_code (stop_pc))
5354 {
5355 /* Stepped backward into the solib dynsym resolver.
5356 Set a breakpoint at its start and continue, then
5357 one more step will take us out. */
5358 struct symtab_and_line sr_sal;
5359
5360 init_sal (&sr_sal);
5361 sr_sal.pc = ecs->stop_func_start;
5362 sr_sal.pspace = get_frame_program_space (frame);
5363 insert_step_resume_breakpoint_at_sal (gdbarch,
5364 sr_sal, null_frame_id);
5365 keep_going (ecs);
5366 return;
5367 }
5368 }
5369
5370 stop_pc_sal = find_pc_line (stop_pc, 0);
5371
5372 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5373 the trampoline processing logic, however, there are some trampolines
5374 that have no names, so we should do trampoline handling first. */
5375 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5376 && ecs->stop_func_name == NULL
5377 && stop_pc_sal.line == 0)
5378 {
5379 if (debug_infrun)
5380 fprintf_unfiltered (gdb_stdlog,
5381 "infrun: stepped into undebuggable function\n");
5382
5383 /* The inferior just stepped into, or returned to, an
5384 undebuggable function (where there is no debugging information
5385 and no line number corresponding to the address where the
5386 inferior stopped). Since we want to skip this kind of code,
5387 we keep going until the inferior returns from this
5388 function - unless the user has asked us not to (via
5389 set step-mode) or we no longer know how to get back
5390 to the call site. */
5391 if (step_stop_if_no_debug
5392 || !frame_id_p (frame_unwind_caller_id (frame)))
5393 {
5394 /* If we have no line number and the step-stop-if-no-debug
5395 is set, we stop the step so that the user has a chance to
5396 switch in assembly mode. */
5397 end_stepping_range (ecs);
5398 return;
5399 }
5400 else
5401 {
5402 /* Set a breakpoint at callee's return address (the address
5403 at which the caller will resume). */
5404 insert_step_resume_breakpoint_at_caller (frame);
5405 keep_going (ecs);
5406 return;
5407 }
5408 }
5409
5410 if (ecs->event_thread->control.step_range_end == 1)
5411 {
5412 /* It is stepi or nexti. We always want to stop stepping after
5413 one instruction. */
5414 if (debug_infrun)
5415 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5416 end_stepping_range (ecs);
5417 return;
5418 }
5419
5420 if (stop_pc_sal.line == 0)
5421 {
5422 /* We have no line number information. That means to stop
5423 stepping (does this always happen right after one instruction,
5424 when we do "s" in a function with no line numbers,
5425 or can this happen as a result of a return or longjmp?). */
5426 if (debug_infrun)
5427 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5428 end_stepping_range (ecs);
5429 return;
5430 }
5431
5432 /* Look for "calls" to inlined functions, part one. If the inline
5433 frame machinery detected some skipped call sites, we have entered
5434 a new inline function. */
5435
5436 if (frame_id_eq (get_frame_id (get_current_frame ()),
5437 ecs->event_thread->control.step_frame_id)
5438 && inline_skipped_frames (ecs->ptid))
5439 {
5440 struct symtab_and_line call_sal;
5441
5442 if (debug_infrun)
5443 fprintf_unfiltered (gdb_stdlog,
5444 "infrun: stepped into inlined function\n");
5445
5446 find_frame_sal (get_current_frame (), &call_sal);
5447
5448 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5449 {
5450 /* For "step", we're going to stop. But if the call site
5451 for this inlined function is on the same source line as
5452 we were previously stepping, go down into the function
5453 first. Otherwise stop at the call site. */
5454
5455 if (call_sal.line == ecs->event_thread->current_line
5456 && call_sal.symtab == ecs->event_thread->current_symtab)
5457 step_into_inline_frame (ecs->ptid);
5458
5459 end_stepping_range (ecs);
5460 return;
5461 }
5462 else
5463 {
5464 /* For "next", we should stop at the call site if it is on a
5465 different source line. Otherwise continue through the
5466 inlined function. */
5467 if (call_sal.line == ecs->event_thread->current_line
5468 && call_sal.symtab == ecs->event_thread->current_symtab)
5469 keep_going (ecs);
5470 else
5471 end_stepping_range (ecs);
5472 return;
5473 }
5474 }
5475
5476 /* Look for "calls" to inlined functions, part two. If we are still
5477 in the same real function we were stepping through, but we have
5478 to go further up to find the exact frame ID, we are stepping
5479 through a more inlined call beyond its call site. */
5480
5481 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5482 && !frame_id_eq (get_frame_id (get_current_frame ()),
5483 ecs->event_thread->control.step_frame_id)
5484 && stepped_in_from (get_current_frame (),
5485 ecs->event_thread->control.step_frame_id))
5486 {
5487 if (debug_infrun)
5488 fprintf_unfiltered (gdb_stdlog,
5489 "infrun: stepping through inlined function\n");
5490
5491 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5492 keep_going (ecs);
5493 else
5494 end_stepping_range (ecs);
5495 return;
5496 }
5497
5498 if ((stop_pc == stop_pc_sal.pc)
5499 && (ecs->event_thread->current_line != stop_pc_sal.line
5500 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5501 {
5502 /* We are at the start of a different line. So stop. Note that
5503 we don't stop if we step into the middle of a different line.
5504 That is said to make things like for (;;) statements work
5505 better. */
5506 if (debug_infrun)
5507 fprintf_unfiltered (gdb_stdlog,
5508 "infrun: stepped to a different line\n");
5509 end_stepping_range (ecs);
5510 return;
5511 }
5512
5513 /* We aren't done stepping.
5514
5515 Optimize by setting the stepping range to the line.
5516 (We might not be in the original line, but if we entered a
5517 new line in mid-statement, we continue stepping. This makes
5518 things like for(;;) statements work better.) */
5519
5520 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5521 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5522 ecs->event_thread->control.may_range_step = 1;
5523 set_step_info (frame, stop_pc_sal);
5524
5525 if (debug_infrun)
5526 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5527 keep_going (ecs);
5528 }
5529
5530 /* In all-stop mode, if we're currently stepping but have stopped in
5531 some other thread, we may need to switch back to the stepped
5532 thread. Returns true we set the inferior running, false if we left
5533 it stopped (and the event needs further processing). */
5534
5535 static int
5536 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5537 {
5538 if (!non_stop)
5539 {
5540 struct thread_info *tp;
5541 struct thread_info *stepping_thread;
5542 struct thread_info *step_over;
5543
5544 /* If any thread is blocked on some internal breakpoint, and we
5545 simply need to step over that breakpoint to get it going
5546 again, do that first. */
5547
5548 /* However, if we see an event for the stepping thread, then we
5549 know all other threads have been moved past their breakpoints
5550 already. Let the caller check whether the step is finished,
5551 etc., before deciding to move it past a breakpoint. */
5552 if (ecs->event_thread->control.step_range_end != 0)
5553 return 0;
5554
5555 /* Check if the current thread is blocked on an incomplete
5556 step-over, interrupted by a random signal. */
5557 if (ecs->event_thread->control.trap_expected
5558 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5559 {
5560 if (debug_infrun)
5561 {
5562 fprintf_unfiltered (gdb_stdlog,
5563 "infrun: need to finish step-over of [%s]\n",
5564 target_pid_to_str (ecs->event_thread->ptid));
5565 }
5566 keep_going (ecs);
5567 return 1;
5568 }
5569
5570 /* Check if the current thread is blocked by a single-step
5571 breakpoint of another thread. */
5572 if (ecs->hit_singlestep_breakpoint)
5573 {
5574 if (debug_infrun)
5575 {
5576 fprintf_unfiltered (gdb_stdlog,
5577 "infrun: need to step [%s] over single-step "
5578 "breakpoint\n",
5579 target_pid_to_str (ecs->ptid));
5580 }
5581 keep_going (ecs);
5582 return 1;
5583 }
5584
5585 /* Otherwise, we no longer expect a trap in the current thread.
5586 Clear the trap_expected flag before switching back -- this is
5587 what keep_going does as well, if we call it. */
5588 ecs->event_thread->control.trap_expected = 0;
5589
5590 /* Likewise, clear the signal if it should not be passed. */
5591 if (!signal_program[ecs->event_thread->suspend.stop_signal])
5592 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5593
5594 /* If scheduler locking applies even if not stepping, there's no
5595 need to walk over threads. Above we've checked whether the
5596 current thread is stepping. If some other thread not the
5597 event thread is stepping, then it must be that scheduler
5598 locking is not in effect. */
5599 if (schedlock_applies (0))
5600 return 0;
5601
5602 /* Look for the stepping/nexting thread, and check if any other
5603 thread other than the stepping thread needs to start a
5604 step-over. Do all step-overs before actually proceeding with
5605 step/next/etc. */
5606 stepping_thread = NULL;
5607 step_over = NULL;
5608 ALL_NON_EXITED_THREADS (tp)
5609 {
5610 /* Ignore threads of processes we're not resuming. */
5611 if (!sched_multi
5612 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
5613 continue;
5614
5615 /* When stepping over a breakpoint, we lock all threads
5616 except the one that needs to move past the breakpoint.
5617 If a non-event thread has this set, the "incomplete
5618 step-over" check above should have caught it earlier. */
5619 gdb_assert (!tp->control.trap_expected);
5620
5621 /* Did we find the stepping thread? */
5622 if (tp->control.step_range_end)
5623 {
5624 /* Yep. There should only one though. */
5625 gdb_assert (stepping_thread == NULL);
5626
5627 /* The event thread is handled at the top, before we
5628 enter this loop. */
5629 gdb_assert (tp != ecs->event_thread);
5630
5631 /* If some thread other than the event thread is
5632 stepping, then scheduler locking can't be in effect,
5633 otherwise we wouldn't have resumed the current event
5634 thread in the first place. */
5635 gdb_assert (!schedlock_applies (currently_stepping (tp)));
5636
5637 stepping_thread = tp;
5638 }
5639 else if (thread_still_needs_step_over (tp))
5640 {
5641 step_over = tp;
5642
5643 /* At the top we've returned early if the event thread
5644 is stepping. If some other thread not the event
5645 thread is stepping, then scheduler locking can't be
5646 in effect, and we can resume this thread. No need to
5647 keep looking for the stepping thread then. */
5648 break;
5649 }
5650 }
5651
5652 if (step_over != NULL)
5653 {
5654 tp = step_over;
5655 if (debug_infrun)
5656 {
5657 fprintf_unfiltered (gdb_stdlog,
5658 "infrun: need to step-over [%s]\n",
5659 target_pid_to_str (tp->ptid));
5660 }
5661
5662 /* Only the stepping thread should have this set. */
5663 gdb_assert (tp->control.step_range_end == 0);
5664
5665 ecs->ptid = tp->ptid;
5666 ecs->event_thread = tp;
5667 switch_to_thread (ecs->ptid);
5668 keep_going (ecs);
5669 return 1;
5670 }
5671
5672 if (stepping_thread != NULL)
5673 {
5674 struct frame_info *frame;
5675 struct gdbarch *gdbarch;
5676
5677 tp = stepping_thread;
5678
5679 /* If the stepping thread exited, then don't try to switch
5680 back and resume it, which could fail in several different
5681 ways depending on the target. Instead, just keep going.
5682
5683 We can find a stepping dead thread in the thread list in
5684 two cases:
5685
5686 - The target supports thread exit events, and when the
5687 target tries to delete the thread from the thread list,
5688 inferior_ptid pointed at the exiting thread. In such
5689 case, calling delete_thread does not really remove the
5690 thread from the list; instead, the thread is left listed,
5691 with 'exited' state.
5692
5693 - The target's debug interface does not support thread
5694 exit events, and so we have no idea whatsoever if the
5695 previously stepping thread is still alive. For that
5696 reason, we need to synchronously query the target
5697 now. */
5698 if (is_exited (tp->ptid)
5699 || !target_thread_alive (tp->ptid))
5700 {
5701 if (debug_infrun)
5702 fprintf_unfiltered (gdb_stdlog,
5703 "infrun: not switching back to "
5704 "stepped thread, it has vanished\n");
5705
5706 delete_thread (tp->ptid);
5707 keep_going (ecs);
5708 return 1;
5709 }
5710
5711 if (debug_infrun)
5712 fprintf_unfiltered (gdb_stdlog,
5713 "infrun: switching back to stepped thread\n");
5714
5715 ecs->event_thread = tp;
5716 ecs->ptid = tp->ptid;
5717 context_switch (ecs->ptid);
5718
5719 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5720 frame = get_current_frame ();
5721 gdbarch = get_frame_arch (frame);
5722
5723 /* If the PC of the thread we were trying to single-step has
5724 changed, then that thread has trapped or been signaled,
5725 but the event has not been reported to GDB yet. Re-poll
5726 the target looking for this particular thread's event
5727 (i.e. temporarily enable schedlock) by:
5728
5729 - setting a break at the current PC
5730 - resuming that particular thread, only (by setting
5731 trap expected)
5732
5733 This prevents us continuously moving the single-step
5734 breakpoint forward, one instruction at a time,
5735 overstepping. */
5736
5737 if (stop_pc != tp->prev_pc)
5738 {
5739 if (debug_infrun)
5740 fprintf_unfiltered (gdb_stdlog,
5741 "infrun: expected thread advanced also\n");
5742
5743 /* Clear the info of the previous step-over, as it's no
5744 longer valid. It's what keep_going would do too, if
5745 we called it. Must do this before trying to insert
5746 the sss breakpoint, otherwise if we were previously
5747 trying to step over this exact address in another
5748 thread, the breakpoint ends up not installed. */
5749 clear_step_over_info ();
5750
5751 insert_single_step_breakpoint (get_frame_arch (frame),
5752 get_frame_address_space (frame),
5753 stop_pc);
5754 ecs->event_thread->control.trap_expected = 1;
5755
5756 resume (0, GDB_SIGNAL_0);
5757 prepare_to_wait (ecs);
5758 }
5759 else
5760 {
5761 if (debug_infrun)
5762 fprintf_unfiltered (gdb_stdlog,
5763 "infrun: expected thread still "
5764 "hasn't advanced\n");
5765 keep_going (ecs);
5766 }
5767
5768 return 1;
5769 }
5770 }
5771 return 0;
5772 }
5773
5774 /* Is thread TP in the middle of single-stepping? */
5775
5776 static int
5777 currently_stepping (struct thread_info *tp)
5778 {
5779 return ((tp->control.step_range_end
5780 && tp->control.step_resume_breakpoint == NULL)
5781 || tp->control.trap_expected
5782 || tp->stepped_breakpoint
5783 || bpstat_should_step ());
5784 }
5785
5786 /* Inferior has stepped into a subroutine call with source code that
5787 we should not step over. Do step to the first line of code in
5788 it. */
5789
5790 static void
5791 handle_step_into_function (struct gdbarch *gdbarch,
5792 struct execution_control_state *ecs)
5793 {
5794 struct compunit_symtab *cust;
5795 struct symtab_and_line stop_func_sal, sr_sal;
5796
5797 fill_in_stop_func (gdbarch, ecs);
5798
5799 cust = find_pc_compunit_symtab (stop_pc);
5800 if (cust != NULL && compunit_language (cust) != language_asm)
5801 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5802 ecs->stop_func_start);
5803
5804 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5805 /* Use the step_resume_break to step until the end of the prologue,
5806 even if that involves jumps (as it seems to on the vax under
5807 4.2). */
5808 /* If the prologue ends in the middle of a source line, continue to
5809 the end of that source line (if it is still within the function).
5810 Otherwise, just go to end of prologue. */
5811 if (stop_func_sal.end
5812 && stop_func_sal.pc != ecs->stop_func_start
5813 && stop_func_sal.end < ecs->stop_func_end)
5814 ecs->stop_func_start = stop_func_sal.end;
5815
5816 /* Architectures which require breakpoint adjustment might not be able
5817 to place a breakpoint at the computed address. If so, the test
5818 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5819 ecs->stop_func_start to an address at which a breakpoint may be
5820 legitimately placed.
5821
5822 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5823 made, GDB will enter an infinite loop when stepping through
5824 optimized code consisting of VLIW instructions which contain
5825 subinstructions corresponding to different source lines. On
5826 FR-V, it's not permitted to place a breakpoint on any but the
5827 first subinstruction of a VLIW instruction. When a breakpoint is
5828 set, GDB will adjust the breakpoint address to the beginning of
5829 the VLIW instruction. Thus, we need to make the corresponding
5830 adjustment here when computing the stop address. */
5831
5832 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5833 {
5834 ecs->stop_func_start
5835 = gdbarch_adjust_breakpoint_address (gdbarch,
5836 ecs->stop_func_start);
5837 }
5838
5839 if (ecs->stop_func_start == stop_pc)
5840 {
5841 /* We are already there: stop now. */
5842 end_stepping_range (ecs);
5843 return;
5844 }
5845 else
5846 {
5847 /* Put the step-breakpoint there and go until there. */
5848 init_sal (&sr_sal); /* initialize to zeroes */
5849 sr_sal.pc = ecs->stop_func_start;
5850 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5851 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5852
5853 /* Do not specify what the fp should be when we stop since on
5854 some machines the prologue is where the new fp value is
5855 established. */
5856 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5857
5858 /* And make sure stepping stops right away then. */
5859 ecs->event_thread->control.step_range_end
5860 = ecs->event_thread->control.step_range_start;
5861 }
5862 keep_going (ecs);
5863 }
5864
5865 /* Inferior has stepped backward into a subroutine call with source
5866 code that we should not step over. Do step to the beginning of the
5867 last line of code in it. */
5868
5869 static void
5870 handle_step_into_function_backward (struct gdbarch *gdbarch,
5871 struct execution_control_state *ecs)
5872 {
5873 struct compunit_symtab *cust;
5874 struct symtab_and_line stop_func_sal;
5875
5876 fill_in_stop_func (gdbarch, ecs);
5877
5878 cust = find_pc_compunit_symtab (stop_pc);
5879 if (cust != NULL && compunit_language (cust) != language_asm)
5880 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5881 ecs->stop_func_start);
5882
5883 stop_func_sal = find_pc_line (stop_pc, 0);
5884
5885 /* OK, we're just going to keep stepping here. */
5886 if (stop_func_sal.pc == stop_pc)
5887 {
5888 /* We're there already. Just stop stepping now. */
5889 end_stepping_range (ecs);
5890 }
5891 else
5892 {
5893 /* Else just reset the step range and keep going.
5894 No step-resume breakpoint, they don't work for
5895 epilogues, which can have multiple entry paths. */
5896 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5897 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5898 keep_going (ecs);
5899 }
5900 return;
5901 }
5902
5903 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5904 This is used to both functions and to skip over code. */
5905
5906 static void
5907 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5908 struct symtab_and_line sr_sal,
5909 struct frame_id sr_id,
5910 enum bptype sr_type)
5911 {
5912 /* There should never be more than one step-resume or longjmp-resume
5913 breakpoint per thread, so we should never be setting a new
5914 step_resume_breakpoint when one is already active. */
5915 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5916 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5917
5918 if (debug_infrun)
5919 fprintf_unfiltered (gdb_stdlog,
5920 "infrun: inserting step-resume breakpoint at %s\n",
5921 paddress (gdbarch, sr_sal.pc));
5922
5923 inferior_thread ()->control.step_resume_breakpoint
5924 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5925 }
5926
5927 void
5928 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5929 struct symtab_and_line sr_sal,
5930 struct frame_id sr_id)
5931 {
5932 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5933 sr_sal, sr_id,
5934 bp_step_resume);
5935 }
5936
5937 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5938 This is used to skip a potential signal handler.
5939
5940 This is called with the interrupted function's frame. The signal
5941 handler, when it returns, will resume the interrupted function at
5942 RETURN_FRAME.pc. */
5943
5944 static void
5945 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5946 {
5947 struct symtab_and_line sr_sal;
5948 struct gdbarch *gdbarch;
5949
5950 gdb_assert (return_frame != NULL);
5951 init_sal (&sr_sal); /* initialize to zeros */
5952
5953 gdbarch = get_frame_arch (return_frame);
5954 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5955 sr_sal.section = find_pc_overlay (sr_sal.pc);
5956 sr_sal.pspace = get_frame_program_space (return_frame);
5957
5958 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5959 get_stack_frame_id (return_frame),
5960 bp_hp_step_resume);
5961 }
5962
5963 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5964 is used to skip a function after stepping into it (for "next" or if
5965 the called function has no debugging information).
5966
5967 The current function has almost always been reached by single
5968 stepping a call or return instruction. NEXT_FRAME belongs to the
5969 current function, and the breakpoint will be set at the caller's
5970 resume address.
5971
5972 This is a separate function rather than reusing
5973 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5974 get_prev_frame, which may stop prematurely (see the implementation
5975 of frame_unwind_caller_id for an example). */
5976
5977 static void
5978 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5979 {
5980 struct symtab_and_line sr_sal;
5981 struct gdbarch *gdbarch;
5982
5983 /* We shouldn't have gotten here if we don't know where the call site
5984 is. */
5985 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5986
5987 init_sal (&sr_sal); /* initialize to zeros */
5988
5989 gdbarch = frame_unwind_caller_arch (next_frame);
5990 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5991 frame_unwind_caller_pc (next_frame));
5992 sr_sal.section = find_pc_overlay (sr_sal.pc);
5993 sr_sal.pspace = frame_unwind_program_space (next_frame);
5994
5995 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5996 frame_unwind_caller_id (next_frame));
5997 }
5998
5999 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
6000 new breakpoint at the target of a jmp_buf. The handling of
6001 longjmp-resume uses the same mechanisms used for handling
6002 "step-resume" breakpoints. */
6003
6004 static void
6005 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
6006 {
6007 /* There should never be more than one longjmp-resume breakpoint per
6008 thread, so we should never be setting a new
6009 longjmp_resume_breakpoint when one is already active. */
6010 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
6011
6012 if (debug_infrun)
6013 fprintf_unfiltered (gdb_stdlog,
6014 "infrun: inserting longjmp-resume breakpoint at %s\n",
6015 paddress (gdbarch, pc));
6016
6017 inferior_thread ()->control.exception_resume_breakpoint =
6018 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
6019 }
6020
6021 /* Insert an exception resume breakpoint. TP is the thread throwing
6022 the exception. The block B is the block of the unwinder debug hook
6023 function. FRAME is the frame corresponding to the call to this
6024 function. SYM is the symbol of the function argument holding the
6025 target PC of the exception. */
6026
6027 static void
6028 insert_exception_resume_breakpoint (struct thread_info *tp,
6029 const struct block *b,
6030 struct frame_info *frame,
6031 struct symbol *sym)
6032 {
6033 TRY
6034 {
6035 struct symbol *vsym;
6036 struct value *value;
6037 CORE_ADDR handler;
6038 struct breakpoint *bp;
6039
6040 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
6041 value = read_var_value (vsym, frame);
6042 /* If the value was optimized out, revert to the old behavior. */
6043 if (! value_optimized_out (value))
6044 {
6045 handler = value_as_address (value);
6046
6047 if (debug_infrun)
6048 fprintf_unfiltered (gdb_stdlog,
6049 "infrun: exception resume at %lx\n",
6050 (unsigned long) handler);
6051
6052 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
6053 handler, bp_exception_resume);
6054
6055 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
6056 frame = NULL;
6057
6058 bp->thread = tp->num;
6059 inferior_thread ()->control.exception_resume_breakpoint = bp;
6060 }
6061 }
6062 CATCH (e, RETURN_MASK_ERROR)
6063 {
6064 /* We want to ignore errors here. */
6065 }
6066 END_CATCH
6067 }
6068
6069 /* A helper for check_exception_resume that sets an
6070 exception-breakpoint based on a SystemTap probe. */
6071
6072 static void
6073 insert_exception_resume_from_probe (struct thread_info *tp,
6074 const struct bound_probe *probe,
6075 struct frame_info *frame)
6076 {
6077 struct value *arg_value;
6078 CORE_ADDR handler;
6079 struct breakpoint *bp;
6080
6081 arg_value = probe_safe_evaluate_at_pc (frame, 1);
6082 if (!arg_value)
6083 return;
6084
6085 handler = value_as_address (arg_value);
6086
6087 if (debug_infrun)
6088 fprintf_unfiltered (gdb_stdlog,
6089 "infrun: exception resume at %s\n",
6090 paddress (get_objfile_arch (probe->objfile),
6091 handler));
6092
6093 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
6094 handler, bp_exception_resume);
6095 bp->thread = tp->num;
6096 inferior_thread ()->control.exception_resume_breakpoint = bp;
6097 }
6098
6099 /* This is called when an exception has been intercepted. Check to
6100 see whether the exception's destination is of interest, and if so,
6101 set an exception resume breakpoint there. */
6102
6103 static void
6104 check_exception_resume (struct execution_control_state *ecs,
6105 struct frame_info *frame)
6106 {
6107 struct bound_probe probe;
6108 struct symbol *func;
6109
6110 /* First see if this exception unwinding breakpoint was set via a
6111 SystemTap probe point. If so, the probe has two arguments: the
6112 CFA and the HANDLER. We ignore the CFA, extract the handler, and
6113 set a breakpoint there. */
6114 probe = find_probe_by_pc (get_frame_pc (frame));
6115 if (probe.probe)
6116 {
6117 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
6118 return;
6119 }
6120
6121 func = get_frame_function (frame);
6122 if (!func)
6123 return;
6124
6125 TRY
6126 {
6127 const struct block *b;
6128 struct block_iterator iter;
6129 struct symbol *sym;
6130 int argno = 0;
6131
6132 /* The exception breakpoint is a thread-specific breakpoint on
6133 the unwinder's debug hook, declared as:
6134
6135 void _Unwind_DebugHook (void *cfa, void *handler);
6136
6137 The CFA argument indicates the frame to which control is
6138 about to be transferred. HANDLER is the destination PC.
6139
6140 We ignore the CFA and set a temporary breakpoint at HANDLER.
6141 This is not extremely efficient but it avoids issues in gdb
6142 with computing the DWARF CFA, and it also works even in weird
6143 cases such as throwing an exception from inside a signal
6144 handler. */
6145
6146 b = SYMBOL_BLOCK_VALUE (func);
6147 ALL_BLOCK_SYMBOLS (b, iter, sym)
6148 {
6149 if (!SYMBOL_IS_ARGUMENT (sym))
6150 continue;
6151
6152 if (argno == 0)
6153 ++argno;
6154 else
6155 {
6156 insert_exception_resume_breakpoint (ecs->event_thread,
6157 b, frame, sym);
6158 break;
6159 }
6160 }
6161 }
6162 CATCH (e, RETURN_MASK_ERROR)
6163 {
6164 }
6165 END_CATCH
6166 }
6167
6168 static void
6169 stop_waiting (struct execution_control_state *ecs)
6170 {
6171 if (debug_infrun)
6172 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
6173
6174 clear_step_over_info ();
6175
6176 /* Let callers know we don't want to wait for the inferior anymore. */
6177 ecs->wait_some_more = 0;
6178 }
6179
6180 /* Called when we should continue running the inferior, because the
6181 current event doesn't cause a user visible stop. This does the
6182 resuming part; waiting for the next event is done elsewhere. */
6183
6184 static void
6185 keep_going (struct execution_control_state *ecs)
6186 {
6187 /* Make sure normal_stop is called if we get a QUIT handled before
6188 reaching resume. */
6189 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
6190
6191 /* Save the pc before execution, to compare with pc after stop. */
6192 ecs->event_thread->prev_pc
6193 = regcache_read_pc (get_thread_regcache (ecs->ptid));
6194
6195 if (ecs->event_thread->control.trap_expected
6196 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
6197 {
6198 /* We haven't yet gotten our trap, and either: intercepted a
6199 non-signal event (e.g., a fork); or took a signal which we
6200 are supposed to pass through to the inferior. Simply
6201 continue. */
6202 discard_cleanups (old_cleanups);
6203 resume (currently_stepping (ecs->event_thread),
6204 ecs->event_thread->suspend.stop_signal);
6205 }
6206 else
6207 {
6208 struct regcache *regcache = get_current_regcache ();
6209 int remove_bp;
6210 int remove_wps;
6211
6212 /* Either the trap was not expected, but we are continuing
6213 anyway (if we got a signal, the user asked it be passed to
6214 the child)
6215 -- or --
6216 We got our expected trap, but decided we should resume from
6217 it.
6218
6219 We're going to run this baby now!
6220
6221 Note that insert_breakpoints won't try to re-insert
6222 already inserted breakpoints. Therefore, we don't
6223 care if breakpoints were already inserted, or not. */
6224
6225 /* If we need to step over a breakpoint, and we're not using
6226 displaced stepping to do so, insert all breakpoints
6227 (watchpoints, etc.) but the one we're stepping over, step one
6228 instruction, and then re-insert the breakpoint when that step
6229 is finished. */
6230
6231 remove_bp = (ecs->hit_singlestep_breakpoint
6232 || thread_still_needs_step_over (ecs->event_thread));
6233 remove_wps = (ecs->event_thread->stepping_over_watchpoint
6234 && !target_have_steppable_watchpoint);
6235
6236 if (remove_bp && !use_displaced_stepping (get_regcache_arch (regcache)))
6237 {
6238 set_step_over_info (get_regcache_aspace (regcache),
6239 regcache_read_pc (regcache), remove_wps);
6240 }
6241 else if (remove_wps)
6242 set_step_over_info (NULL, 0, remove_wps);
6243 else
6244 clear_step_over_info ();
6245
6246 /* Stop stepping if inserting breakpoints fails. */
6247 TRY
6248 {
6249 insert_breakpoints ();
6250 }
6251 CATCH (e, RETURN_MASK_ERROR)
6252 {
6253 exception_print (gdb_stderr, e);
6254 stop_waiting (ecs);
6255 return;
6256 }
6257 END_CATCH
6258
6259 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
6260
6261 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
6262 explicitly specifies that such a signal should be delivered
6263 to the target program). Typically, that would occur when a
6264 user is debugging a target monitor on a simulator: the target
6265 monitor sets a breakpoint; the simulator encounters this
6266 breakpoint and halts the simulation handing control to GDB;
6267 GDB, noting that the stop address doesn't map to any known
6268 breakpoint, returns control back to the simulator; the
6269 simulator then delivers the hardware equivalent of a
6270 GDB_SIGNAL_TRAP to the program being debugged. */
6271 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6272 && !signal_program[ecs->event_thread->suspend.stop_signal])
6273 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6274
6275 discard_cleanups (old_cleanups);
6276 resume (currently_stepping (ecs->event_thread),
6277 ecs->event_thread->suspend.stop_signal);
6278 }
6279
6280 prepare_to_wait (ecs);
6281 }
6282
6283 /* This function normally comes after a resume, before
6284 handle_inferior_event exits. It takes care of any last bits of
6285 housekeeping, and sets the all-important wait_some_more flag. */
6286
6287 static void
6288 prepare_to_wait (struct execution_control_state *ecs)
6289 {
6290 if (debug_infrun)
6291 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
6292
6293 /* This is the old end of the while loop. Let everybody know we
6294 want to wait for the inferior some more and get called again
6295 soon. */
6296 ecs->wait_some_more = 1;
6297 }
6298
6299 /* We are done with the step range of a step/next/si/ni command.
6300 Called once for each n of a "step n" operation. */
6301
6302 static void
6303 end_stepping_range (struct execution_control_state *ecs)
6304 {
6305 ecs->event_thread->control.stop_step = 1;
6306 stop_waiting (ecs);
6307 }
6308
6309 /* Several print_*_reason functions to print why the inferior has stopped.
6310 We always print something when the inferior exits, or receives a signal.
6311 The rest of the cases are dealt with later on in normal_stop and
6312 print_it_typical. Ideally there should be a call to one of these
6313 print_*_reason functions functions from handle_inferior_event each time
6314 stop_waiting is called.
6315
6316 Note that we don't call these directly, instead we delegate that to
6317 the interpreters, through observers. Interpreters then call these
6318 with whatever uiout is right. */
6319
6320 void
6321 print_end_stepping_range_reason (struct ui_out *uiout)
6322 {
6323 /* For CLI-like interpreters, print nothing. */
6324
6325 if (ui_out_is_mi_like_p (uiout))
6326 {
6327 ui_out_field_string (uiout, "reason",
6328 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
6329 }
6330 }
6331
6332 void
6333 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6334 {
6335 annotate_signalled ();
6336 if (ui_out_is_mi_like_p (uiout))
6337 ui_out_field_string
6338 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
6339 ui_out_text (uiout, "\nProgram terminated with signal ");
6340 annotate_signal_name ();
6341 ui_out_field_string (uiout, "signal-name",
6342 gdb_signal_to_name (siggnal));
6343 annotate_signal_name_end ();
6344 ui_out_text (uiout, ", ");
6345 annotate_signal_string ();
6346 ui_out_field_string (uiout, "signal-meaning",
6347 gdb_signal_to_string (siggnal));
6348 annotate_signal_string_end ();
6349 ui_out_text (uiout, ".\n");
6350 ui_out_text (uiout, "The program no longer exists.\n");
6351 }
6352
6353 void
6354 print_exited_reason (struct ui_out *uiout, int exitstatus)
6355 {
6356 struct inferior *inf = current_inferior ();
6357 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
6358
6359 annotate_exited (exitstatus);
6360 if (exitstatus)
6361 {
6362 if (ui_out_is_mi_like_p (uiout))
6363 ui_out_field_string (uiout, "reason",
6364 async_reason_lookup (EXEC_ASYNC_EXITED));
6365 ui_out_text (uiout, "[Inferior ");
6366 ui_out_text (uiout, plongest (inf->num));
6367 ui_out_text (uiout, " (");
6368 ui_out_text (uiout, pidstr);
6369 ui_out_text (uiout, ") exited with code ");
6370 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
6371 ui_out_text (uiout, "]\n");
6372 }
6373 else
6374 {
6375 if (ui_out_is_mi_like_p (uiout))
6376 ui_out_field_string
6377 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6378 ui_out_text (uiout, "[Inferior ");
6379 ui_out_text (uiout, plongest (inf->num));
6380 ui_out_text (uiout, " (");
6381 ui_out_text (uiout, pidstr);
6382 ui_out_text (uiout, ") exited normally]\n");
6383 }
6384 }
6385
6386 void
6387 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6388 {
6389 annotate_signal ();
6390
6391 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
6392 {
6393 struct thread_info *t = inferior_thread ();
6394
6395 ui_out_text (uiout, "\n[");
6396 ui_out_field_string (uiout, "thread-name",
6397 target_pid_to_str (t->ptid));
6398 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
6399 ui_out_text (uiout, " stopped");
6400 }
6401 else
6402 {
6403 ui_out_text (uiout, "\nProgram received signal ");
6404 annotate_signal_name ();
6405 if (ui_out_is_mi_like_p (uiout))
6406 ui_out_field_string
6407 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
6408 ui_out_field_string (uiout, "signal-name",
6409 gdb_signal_to_name (siggnal));
6410 annotate_signal_name_end ();
6411 ui_out_text (uiout, ", ");
6412 annotate_signal_string ();
6413 ui_out_field_string (uiout, "signal-meaning",
6414 gdb_signal_to_string (siggnal));
6415 annotate_signal_string_end ();
6416 }
6417 ui_out_text (uiout, ".\n");
6418 }
6419
6420 void
6421 print_no_history_reason (struct ui_out *uiout)
6422 {
6423 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
6424 }
6425
6426 /* Print current location without a level number, if we have changed
6427 functions or hit a breakpoint. Print source line if we have one.
6428 bpstat_print contains the logic deciding in detail what to print,
6429 based on the event(s) that just occurred. */
6430
6431 void
6432 print_stop_event (struct target_waitstatus *ws)
6433 {
6434 int bpstat_ret;
6435 int source_flag;
6436 int do_frame_printing = 1;
6437 struct thread_info *tp = inferior_thread ();
6438
6439 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6440 switch (bpstat_ret)
6441 {
6442 case PRINT_UNKNOWN:
6443 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6444 should) carry around the function and does (or should) use
6445 that when doing a frame comparison. */
6446 if (tp->control.stop_step
6447 && frame_id_eq (tp->control.step_frame_id,
6448 get_frame_id (get_current_frame ()))
6449 && step_start_function == find_pc_function (stop_pc))
6450 {
6451 /* Finished step, just print source line. */
6452 source_flag = SRC_LINE;
6453 }
6454 else
6455 {
6456 /* Print location and source line. */
6457 source_flag = SRC_AND_LOC;
6458 }
6459 break;
6460 case PRINT_SRC_AND_LOC:
6461 /* Print location and source line. */
6462 source_flag = SRC_AND_LOC;
6463 break;
6464 case PRINT_SRC_ONLY:
6465 source_flag = SRC_LINE;
6466 break;
6467 case PRINT_NOTHING:
6468 /* Something bogus. */
6469 source_flag = SRC_LINE;
6470 do_frame_printing = 0;
6471 break;
6472 default:
6473 internal_error (__FILE__, __LINE__, _("Unknown value."));
6474 }
6475
6476 /* The behavior of this routine with respect to the source
6477 flag is:
6478 SRC_LINE: Print only source line
6479 LOCATION: Print only location
6480 SRC_AND_LOC: Print location and source line. */
6481 if (do_frame_printing)
6482 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6483
6484 /* Display the auto-display expressions. */
6485 do_displays ();
6486 }
6487
6488 /* Here to return control to GDB when the inferior stops for real.
6489 Print appropriate messages, remove breakpoints, give terminal our modes.
6490
6491 STOP_PRINT_FRAME nonzero means print the executing frame
6492 (pc, function, args, file, line number and line text).
6493 BREAKPOINTS_FAILED nonzero means stop was due to error
6494 attempting to insert breakpoints. */
6495
6496 void
6497 normal_stop (void)
6498 {
6499 struct target_waitstatus last;
6500 ptid_t last_ptid;
6501 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6502
6503 get_last_target_status (&last_ptid, &last);
6504
6505 /* If an exception is thrown from this point on, make sure to
6506 propagate GDB's knowledge of the executing state to the
6507 frontend/user running state. A QUIT is an easy exception to see
6508 here, so do this before any filtered output. */
6509 if (!non_stop)
6510 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6511 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6512 && last.kind != TARGET_WAITKIND_EXITED
6513 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6514 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6515
6516 /* As we're presenting a stop, and potentially removing breakpoints,
6517 update the thread list so we can tell whether there are threads
6518 running on the target. With target remote, for example, we can
6519 only learn about new threads when we explicitly update the thread
6520 list. Do this before notifying the interpreters about signal
6521 stops, end of stepping ranges, etc., so that the "new thread"
6522 output is emitted before e.g., "Program received signal FOO",
6523 instead of after. */
6524 update_thread_list ();
6525
6526 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
6527 observer_notify_signal_received (inferior_thread ()->suspend.stop_signal);
6528
6529 /* As with the notification of thread events, we want to delay
6530 notifying the user that we've switched thread context until
6531 the inferior actually stops.
6532
6533 There's no point in saying anything if the inferior has exited.
6534 Note that SIGNALLED here means "exited with a signal", not
6535 "received a signal".
6536
6537 Also skip saying anything in non-stop mode. In that mode, as we
6538 don't want GDB to switch threads behind the user's back, to avoid
6539 races where the user is typing a command to apply to thread x,
6540 but GDB switches to thread y before the user finishes entering
6541 the command, fetch_inferior_event installs a cleanup to restore
6542 the current thread back to the thread the user had selected right
6543 after this event is handled, so we're not really switching, only
6544 informing of a stop. */
6545 if (!non_stop
6546 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6547 && target_has_execution
6548 && last.kind != TARGET_WAITKIND_SIGNALLED
6549 && last.kind != TARGET_WAITKIND_EXITED
6550 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6551 {
6552 target_terminal_ours_for_output ();
6553 printf_filtered (_("[Switching to %s]\n"),
6554 target_pid_to_str (inferior_ptid));
6555 annotate_thread_changed ();
6556 previous_inferior_ptid = inferior_ptid;
6557 }
6558
6559 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6560 {
6561 gdb_assert (sync_execution || !target_can_async_p ());
6562
6563 target_terminal_ours_for_output ();
6564 printf_filtered (_("No unwaited-for children left.\n"));
6565 }
6566
6567 /* Note: this depends on the update_thread_list call above. */
6568 if (!breakpoints_should_be_inserted_now () && target_has_execution)
6569 {
6570 if (remove_breakpoints ())
6571 {
6572 target_terminal_ours_for_output ();
6573 printf_filtered (_("Cannot remove breakpoints because "
6574 "program is no longer writable.\nFurther "
6575 "execution is probably impossible.\n"));
6576 }
6577 }
6578
6579 /* If an auto-display called a function and that got a signal,
6580 delete that auto-display to avoid an infinite recursion. */
6581
6582 if (stopped_by_random_signal)
6583 disable_current_display ();
6584
6585 /* Notify observers if we finished a "step"-like command, etc. */
6586 if (target_has_execution
6587 && last.kind != TARGET_WAITKIND_SIGNALLED
6588 && last.kind != TARGET_WAITKIND_EXITED
6589 && inferior_thread ()->control.stop_step)
6590 {
6591 /* But not if in the middle of doing a "step n" operation for
6592 n > 1 */
6593 if (inferior_thread ()->step_multi)
6594 goto done;
6595
6596 observer_notify_end_stepping_range ();
6597 }
6598
6599 target_terminal_ours ();
6600 async_enable_stdin ();
6601
6602 /* Set the current source location. This will also happen if we
6603 display the frame below, but the current SAL will be incorrect
6604 during a user hook-stop function. */
6605 if (has_stack_frames () && !stop_stack_dummy)
6606 set_current_sal_from_frame (get_current_frame ());
6607
6608 /* Let the user/frontend see the threads as stopped, but do nothing
6609 if the thread was running an infcall. We may be e.g., evaluating
6610 a breakpoint condition. In that case, the thread had state
6611 THREAD_RUNNING before the infcall, and shall remain set to
6612 running, all without informing the user/frontend about state
6613 transition changes. If this is actually a call command, then the
6614 thread was originally already stopped, so there's no state to
6615 finish either. */
6616 if (target_has_execution && inferior_thread ()->control.in_infcall)
6617 discard_cleanups (old_chain);
6618 else
6619 do_cleanups (old_chain);
6620
6621 /* Look up the hook_stop and run it (CLI internally handles problem
6622 of stop_command's pre-hook not existing). */
6623 if (stop_command)
6624 catch_errors (hook_stop_stub, stop_command,
6625 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6626
6627 if (!has_stack_frames ())
6628 goto done;
6629
6630 if (last.kind == TARGET_WAITKIND_SIGNALLED
6631 || last.kind == TARGET_WAITKIND_EXITED)
6632 goto done;
6633
6634 /* Select innermost stack frame - i.e., current frame is frame 0,
6635 and current location is based on that.
6636 Don't do this on return from a stack dummy routine,
6637 or if the program has exited. */
6638
6639 if (!stop_stack_dummy)
6640 {
6641 select_frame (get_current_frame ());
6642
6643 /* If --batch-silent is enabled then there's no need to print the current
6644 source location, and to try risks causing an error message about
6645 missing source files. */
6646 if (stop_print_frame && !batch_silent)
6647 print_stop_event (&last);
6648 }
6649
6650 /* Save the function value return registers, if we care.
6651 We might be about to restore their previous contents. */
6652 if (inferior_thread ()->control.proceed_to_finish
6653 && execution_direction != EXEC_REVERSE)
6654 {
6655 /* This should not be necessary. */
6656 if (stop_registers)
6657 regcache_xfree (stop_registers);
6658
6659 /* NB: The copy goes through to the target picking up the value of
6660 all the registers. */
6661 stop_registers = regcache_dup (get_current_regcache ());
6662 }
6663
6664 if (stop_stack_dummy == STOP_STACK_DUMMY)
6665 {
6666 /* Pop the empty frame that contains the stack dummy.
6667 This also restores inferior state prior to the call
6668 (struct infcall_suspend_state). */
6669 struct frame_info *frame = get_current_frame ();
6670
6671 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6672 frame_pop (frame);
6673 /* frame_pop() calls reinit_frame_cache as the last thing it
6674 does which means there's currently no selected frame. We
6675 don't need to re-establish a selected frame if the dummy call
6676 returns normally, that will be done by
6677 restore_infcall_control_state. However, we do have to handle
6678 the case where the dummy call is returning after being
6679 stopped (e.g. the dummy call previously hit a breakpoint).
6680 We can't know which case we have so just always re-establish
6681 a selected frame here. */
6682 select_frame (get_current_frame ());
6683 }
6684
6685 done:
6686 annotate_stopped ();
6687
6688 /* Suppress the stop observer if we're in the middle of:
6689
6690 - a step n (n > 1), as there still more steps to be done.
6691
6692 - a "finish" command, as the observer will be called in
6693 finish_command_continuation, so it can include the inferior
6694 function's return value.
6695
6696 - calling an inferior function, as we pretend we inferior didn't
6697 run at all. The return value of the call is handled by the
6698 expression evaluator, through call_function_by_hand. */
6699
6700 if (!target_has_execution
6701 || last.kind == TARGET_WAITKIND_SIGNALLED
6702 || last.kind == TARGET_WAITKIND_EXITED
6703 || last.kind == TARGET_WAITKIND_NO_RESUMED
6704 || (!(inferior_thread ()->step_multi
6705 && inferior_thread ()->control.stop_step)
6706 && !(inferior_thread ()->control.stop_bpstat
6707 && inferior_thread ()->control.proceed_to_finish)
6708 && !inferior_thread ()->control.in_infcall))
6709 {
6710 if (!ptid_equal (inferior_ptid, null_ptid))
6711 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6712 stop_print_frame);
6713 else
6714 observer_notify_normal_stop (NULL, stop_print_frame);
6715 }
6716
6717 if (target_has_execution)
6718 {
6719 if (last.kind != TARGET_WAITKIND_SIGNALLED
6720 && last.kind != TARGET_WAITKIND_EXITED)
6721 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6722 Delete any breakpoint that is to be deleted at the next stop. */
6723 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6724 }
6725
6726 /* Try to get rid of automatically added inferiors that are no
6727 longer needed. Keeping those around slows down things linearly.
6728 Note that this never removes the current inferior. */
6729 prune_inferiors ();
6730 }
6731
6732 static int
6733 hook_stop_stub (void *cmd)
6734 {
6735 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6736 return (0);
6737 }
6738 \f
6739 int
6740 signal_stop_state (int signo)
6741 {
6742 return signal_stop[signo];
6743 }
6744
6745 int
6746 signal_print_state (int signo)
6747 {
6748 return signal_print[signo];
6749 }
6750
6751 int
6752 signal_pass_state (int signo)
6753 {
6754 return signal_program[signo];
6755 }
6756
6757 static void
6758 signal_cache_update (int signo)
6759 {
6760 if (signo == -1)
6761 {
6762 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6763 signal_cache_update (signo);
6764
6765 return;
6766 }
6767
6768 signal_pass[signo] = (signal_stop[signo] == 0
6769 && signal_print[signo] == 0
6770 && signal_program[signo] == 1
6771 && signal_catch[signo] == 0);
6772 }
6773
6774 int
6775 signal_stop_update (int signo, int state)
6776 {
6777 int ret = signal_stop[signo];
6778
6779 signal_stop[signo] = state;
6780 signal_cache_update (signo);
6781 return ret;
6782 }
6783
6784 int
6785 signal_print_update (int signo, int state)
6786 {
6787 int ret = signal_print[signo];
6788
6789 signal_print[signo] = state;
6790 signal_cache_update (signo);
6791 return ret;
6792 }
6793
6794 int
6795 signal_pass_update (int signo, int state)
6796 {
6797 int ret = signal_program[signo];
6798
6799 signal_program[signo] = state;
6800 signal_cache_update (signo);
6801 return ret;
6802 }
6803
6804 /* Update the global 'signal_catch' from INFO and notify the
6805 target. */
6806
6807 void
6808 signal_catch_update (const unsigned int *info)
6809 {
6810 int i;
6811
6812 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6813 signal_catch[i] = info[i] > 0;
6814 signal_cache_update (-1);
6815 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6816 }
6817
6818 static void
6819 sig_print_header (void)
6820 {
6821 printf_filtered (_("Signal Stop\tPrint\tPass "
6822 "to program\tDescription\n"));
6823 }
6824
6825 static void
6826 sig_print_info (enum gdb_signal oursig)
6827 {
6828 const char *name = gdb_signal_to_name (oursig);
6829 int name_padding = 13 - strlen (name);
6830
6831 if (name_padding <= 0)
6832 name_padding = 0;
6833
6834 printf_filtered ("%s", name);
6835 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6836 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6837 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6838 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6839 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6840 }
6841
6842 /* Specify how various signals in the inferior should be handled. */
6843
6844 static void
6845 handle_command (char *args, int from_tty)
6846 {
6847 char **argv;
6848 int digits, wordlen;
6849 int sigfirst, signum, siglast;
6850 enum gdb_signal oursig;
6851 int allsigs;
6852 int nsigs;
6853 unsigned char *sigs;
6854 struct cleanup *old_chain;
6855
6856 if (args == NULL)
6857 {
6858 error_no_arg (_("signal to handle"));
6859 }
6860
6861 /* Allocate and zero an array of flags for which signals to handle. */
6862
6863 nsigs = (int) GDB_SIGNAL_LAST;
6864 sigs = (unsigned char *) alloca (nsigs);
6865 memset (sigs, 0, nsigs);
6866
6867 /* Break the command line up into args. */
6868
6869 argv = gdb_buildargv (args);
6870 old_chain = make_cleanup_freeargv (argv);
6871
6872 /* Walk through the args, looking for signal oursigs, signal names, and
6873 actions. Signal numbers and signal names may be interspersed with
6874 actions, with the actions being performed for all signals cumulatively
6875 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6876
6877 while (*argv != NULL)
6878 {
6879 wordlen = strlen (*argv);
6880 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6881 {;
6882 }
6883 allsigs = 0;
6884 sigfirst = siglast = -1;
6885
6886 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6887 {
6888 /* Apply action to all signals except those used by the
6889 debugger. Silently skip those. */
6890 allsigs = 1;
6891 sigfirst = 0;
6892 siglast = nsigs - 1;
6893 }
6894 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6895 {
6896 SET_SIGS (nsigs, sigs, signal_stop);
6897 SET_SIGS (nsigs, sigs, signal_print);
6898 }
6899 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6900 {
6901 UNSET_SIGS (nsigs, sigs, signal_program);
6902 }
6903 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6904 {
6905 SET_SIGS (nsigs, sigs, signal_print);
6906 }
6907 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6908 {
6909 SET_SIGS (nsigs, sigs, signal_program);
6910 }
6911 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6912 {
6913 UNSET_SIGS (nsigs, sigs, signal_stop);
6914 }
6915 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6916 {
6917 SET_SIGS (nsigs, sigs, signal_program);
6918 }
6919 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6920 {
6921 UNSET_SIGS (nsigs, sigs, signal_print);
6922 UNSET_SIGS (nsigs, sigs, signal_stop);
6923 }
6924 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6925 {
6926 UNSET_SIGS (nsigs, sigs, signal_program);
6927 }
6928 else if (digits > 0)
6929 {
6930 /* It is numeric. The numeric signal refers to our own
6931 internal signal numbering from target.h, not to host/target
6932 signal number. This is a feature; users really should be
6933 using symbolic names anyway, and the common ones like
6934 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6935
6936 sigfirst = siglast = (int)
6937 gdb_signal_from_command (atoi (*argv));
6938 if ((*argv)[digits] == '-')
6939 {
6940 siglast = (int)
6941 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6942 }
6943 if (sigfirst > siglast)
6944 {
6945 /* Bet he didn't figure we'd think of this case... */
6946 signum = sigfirst;
6947 sigfirst = siglast;
6948 siglast = signum;
6949 }
6950 }
6951 else
6952 {
6953 oursig = gdb_signal_from_name (*argv);
6954 if (oursig != GDB_SIGNAL_UNKNOWN)
6955 {
6956 sigfirst = siglast = (int) oursig;
6957 }
6958 else
6959 {
6960 /* Not a number and not a recognized flag word => complain. */
6961 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6962 }
6963 }
6964
6965 /* If any signal numbers or symbol names were found, set flags for
6966 which signals to apply actions to. */
6967
6968 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6969 {
6970 switch ((enum gdb_signal) signum)
6971 {
6972 case GDB_SIGNAL_TRAP:
6973 case GDB_SIGNAL_INT:
6974 if (!allsigs && !sigs[signum])
6975 {
6976 if (query (_("%s is used by the debugger.\n\
6977 Are you sure you want to change it? "),
6978 gdb_signal_to_name ((enum gdb_signal) signum)))
6979 {
6980 sigs[signum] = 1;
6981 }
6982 else
6983 {
6984 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6985 gdb_flush (gdb_stdout);
6986 }
6987 }
6988 break;
6989 case GDB_SIGNAL_0:
6990 case GDB_SIGNAL_DEFAULT:
6991 case GDB_SIGNAL_UNKNOWN:
6992 /* Make sure that "all" doesn't print these. */
6993 break;
6994 default:
6995 sigs[signum] = 1;
6996 break;
6997 }
6998 }
6999
7000 argv++;
7001 }
7002
7003 for (signum = 0; signum < nsigs; signum++)
7004 if (sigs[signum])
7005 {
7006 signal_cache_update (-1);
7007 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
7008 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
7009
7010 if (from_tty)
7011 {
7012 /* Show the results. */
7013 sig_print_header ();
7014 for (; signum < nsigs; signum++)
7015 if (sigs[signum])
7016 sig_print_info (signum);
7017 }
7018
7019 break;
7020 }
7021
7022 do_cleanups (old_chain);
7023 }
7024
7025 /* Complete the "handle" command. */
7026
7027 static VEC (char_ptr) *
7028 handle_completer (struct cmd_list_element *ignore,
7029 const char *text, const char *word)
7030 {
7031 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
7032 static const char * const keywords[] =
7033 {
7034 "all",
7035 "stop",
7036 "ignore",
7037 "print",
7038 "pass",
7039 "nostop",
7040 "noignore",
7041 "noprint",
7042 "nopass",
7043 NULL,
7044 };
7045
7046 vec_signals = signal_completer (ignore, text, word);
7047 vec_keywords = complete_on_enum (keywords, word, word);
7048
7049 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
7050 VEC_free (char_ptr, vec_signals);
7051 VEC_free (char_ptr, vec_keywords);
7052 return return_val;
7053 }
7054
7055 static void
7056 xdb_handle_command (char *args, int from_tty)
7057 {
7058 char **argv;
7059 struct cleanup *old_chain;
7060
7061 if (args == NULL)
7062 error_no_arg (_("xdb command"));
7063
7064 /* Break the command line up into args. */
7065
7066 argv = gdb_buildargv (args);
7067 old_chain = make_cleanup_freeargv (argv);
7068 if (argv[1] != (char *) NULL)
7069 {
7070 char *argBuf;
7071 int bufLen;
7072
7073 bufLen = strlen (argv[0]) + 20;
7074 argBuf = (char *) xmalloc (bufLen);
7075 if (argBuf)
7076 {
7077 int validFlag = 1;
7078 enum gdb_signal oursig;
7079
7080 oursig = gdb_signal_from_name (argv[0]);
7081 memset (argBuf, 0, bufLen);
7082 if (strcmp (argv[1], "Q") == 0)
7083 sprintf (argBuf, "%s %s", argv[0], "noprint");
7084 else
7085 {
7086 if (strcmp (argv[1], "s") == 0)
7087 {
7088 if (!signal_stop[oursig])
7089 sprintf (argBuf, "%s %s", argv[0], "stop");
7090 else
7091 sprintf (argBuf, "%s %s", argv[0], "nostop");
7092 }
7093 else if (strcmp (argv[1], "i") == 0)
7094 {
7095 if (!signal_program[oursig])
7096 sprintf (argBuf, "%s %s", argv[0], "pass");
7097 else
7098 sprintf (argBuf, "%s %s", argv[0], "nopass");
7099 }
7100 else if (strcmp (argv[1], "r") == 0)
7101 {
7102 if (!signal_print[oursig])
7103 sprintf (argBuf, "%s %s", argv[0], "print");
7104 else
7105 sprintf (argBuf, "%s %s", argv[0], "noprint");
7106 }
7107 else
7108 validFlag = 0;
7109 }
7110 if (validFlag)
7111 handle_command (argBuf, from_tty);
7112 else
7113 printf_filtered (_("Invalid signal handling flag.\n"));
7114 if (argBuf)
7115 xfree (argBuf);
7116 }
7117 }
7118 do_cleanups (old_chain);
7119 }
7120
7121 enum gdb_signal
7122 gdb_signal_from_command (int num)
7123 {
7124 if (num >= 1 && num <= 15)
7125 return (enum gdb_signal) num;
7126 error (_("Only signals 1-15 are valid as numeric signals.\n\
7127 Use \"info signals\" for a list of symbolic signals."));
7128 }
7129
7130 /* Print current contents of the tables set by the handle command.
7131 It is possible we should just be printing signals actually used
7132 by the current target (but for things to work right when switching
7133 targets, all signals should be in the signal tables). */
7134
7135 static void
7136 signals_info (char *signum_exp, int from_tty)
7137 {
7138 enum gdb_signal oursig;
7139
7140 sig_print_header ();
7141
7142 if (signum_exp)
7143 {
7144 /* First see if this is a symbol name. */
7145 oursig = gdb_signal_from_name (signum_exp);
7146 if (oursig == GDB_SIGNAL_UNKNOWN)
7147 {
7148 /* No, try numeric. */
7149 oursig =
7150 gdb_signal_from_command (parse_and_eval_long (signum_exp));
7151 }
7152 sig_print_info (oursig);
7153 return;
7154 }
7155
7156 printf_filtered ("\n");
7157 /* These ugly casts brought to you by the native VAX compiler. */
7158 for (oursig = GDB_SIGNAL_FIRST;
7159 (int) oursig < (int) GDB_SIGNAL_LAST;
7160 oursig = (enum gdb_signal) ((int) oursig + 1))
7161 {
7162 QUIT;
7163
7164 if (oursig != GDB_SIGNAL_UNKNOWN
7165 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
7166 sig_print_info (oursig);
7167 }
7168
7169 printf_filtered (_("\nUse the \"handle\" command "
7170 "to change these tables.\n"));
7171 }
7172
7173 /* Check if it makes sense to read $_siginfo from the current thread
7174 at this point. If not, throw an error. */
7175
7176 static void
7177 validate_siginfo_access (void)
7178 {
7179 /* No current inferior, no siginfo. */
7180 if (ptid_equal (inferior_ptid, null_ptid))
7181 error (_("No thread selected."));
7182
7183 /* Don't try to read from a dead thread. */
7184 if (is_exited (inferior_ptid))
7185 error (_("The current thread has terminated"));
7186
7187 /* ... or from a spinning thread. */
7188 if (is_running (inferior_ptid))
7189 error (_("Selected thread is running."));
7190 }
7191
7192 /* The $_siginfo convenience variable is a bit special. We don't know
7193 for sure the type of the value until we actually have a chance to
7194 fetch the data. The type can change depending on gdbarch, so it is
7195 also dependent on which thread you have selected.
7196
7197 1. making $_siginfo be an internalvar that creates a new value on
7198 access.
7199
7200 2. making the value of $_siginfo be an lval_computed value. */
7201
7202 /* This function implements the lval_computed support for reading a
7203 $_siginfo value. */
7204
7205 static void
7206 siginfo_value_read (struct value *v)
7207 {
7208 LONGEST transferred;
7209
7210 validate_siginfo_access ();
7211
7212 transferred =
7213 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
7214 NULL,
7215 value_contents_all_raw (v),
7216 value_offset (v),
7217 TYPE_LENGTH (value_type (v)));
7218
7219 if (transferred != TYPE_LENGTH (value_type (v)))
7220 error (_("Unable to read siginfo"));
7221 }
7222
7223 /* This function implements the lval_computed support for writing a
7224 $_siginfo value. */
7225
7226 static void
7227 siginfo_value_write (struct value *v, struct value *fromval)
7228 {
7229 LONGEST transferred;
7230
7231 validate_siginfo_access ();
7232
7233 transferred = target_write (&current_target,
7234 TARGET_OBJECT_SIGNAL_INFO,
7235 NULL,
7236 value_contents_all_raw (fromval),
7237 value_offset (v),
7238 TYPE_LENGTH (value_type (fromval)));
7239
7240 if (transferred != TYPE_LENGTH (value_type (fromval)))
7241 error (_("Unable to write siginfo"));
7242 }
7243
7244 static const struct lval_funcs siginfo_value_funcs =
7245 {
7246 siginfo_value_read,
7247 siginfo_value_write
7248 };
7249
7250 /* Return a new value with the correct type for the siginfo object of
7251 the current thread using architecture GDBARCH. Return a void value
7252 if there's no object available. */
7253
7254 static struct value *
7255 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
7256 void *ignore)
7257 {
7258 if (target_has_stack
7259 && !ptid_equal (inferior_ptid, null_ptid)
7260 && gdbarch_get_siginfo_type_p (gdbarch))
7261 {
7262 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7263
7264 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
7265 }
7266
7267 return allocate_value (builtin_type (gdbarch)->builtin_void);
7268 }
7269
7270 \f
7271 /* infcall_suspend_state contains state about the program itself like its
7272 registers and any signal it received when it last stopped.
7273 This state must be restored regardless of how the inferior function call
7274 ends (either successfully, or after it hits a breakpoint or signal)
7275 if the program is to properly continue where it left off. */
7276
7277 struct infcall_suspend_state
7278 {
7279 struct thread_suspend_state thread_suspend;
7280 #if 0 /* Currently unused and empty structures are not valid C. */
7281 struct inferior_suspend_state inferior_suspend;
7282 #endif
7283
7284 /* Other fields: */
7285 CORE_ADDR stop_pc;
7286 struct regcache *registers;
7287
7288 /* Format of SIGINFO_DATA or NULL if it is not present. */
7289 struct gdbarch *siginfo_gdbarch;
7290
7291 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
7292 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
7293 content would be invalid. */
7294 gdb_byte *siginfo_data;
7295 };
7296
7297 struct infcall_suspend_state *
7298 save_infcall_suspend_state (void)
7299 {
7300 struct infcall_suspend_state *inf_state;
7301 struct thread_info *tp = inferior_thread ();
7302 #if 0
7303 struct inferior *inf = current_inferior ();
7304 #endif
7305 struct regcache *regcache = get_current_regcache ();
7306 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7307 gdb_byte *siginfo_data = NULL;
7308
7309 if (gdbarch_get_siginfo_type_p (gdbarch))
7310 {
7311 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7312 size_t len = TYPE_LENGTH (type);
7313 struct cleanup *back_to;
7314
7315 siginfo_data = xmalloc (len);
7316 back_to = make_cleanup (xfree, siginfo_data);
7317
7318 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7319 siginfo_data, 0, len) == len)
7320 discard_cleanups (back_to);
7321 else
7322 {
7323 /* Errors ignored. */
7324 do_cleanups (back_to);
7325 siginfo_data = NULL;
7326 }
7327 }
7328
7329 inf_state = XCNEW (struct infcall_suspend_state);
7330
7331 if (siginfo_data)
7332 {
7333 inf_state->siginfo_gdbarch = gdbarch;
7334 inf_state->siginfo_data = siginfo_data;
7335 }
7336
7337 inf_state->thread_suspend = tp->suspend;
7338 #if 0 /* Currently unused and empty structures are not valid C. */
7339 inf_state->inferior_suspend = inf->suspend;
7340 #endif
7341
7342 /* run_inferior_call will not use the signal due to its `proceed' call with
7343 GDB_SIGNAL_0 anyway. */
7344 tp->suspend.stop_signal = GDB_SIGNAL_0;
7345
7346 inf_state->stop_pc = stop_pc;
7347
7348 inf_state->registers = regcache_dup (regcache);
7349
7350 return inf_state;
7351 }
7352
7353 /* Restore inferior session state to INF_STATE. */
7354
7355 void
7356 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7357 {
7358 struct thread_info *tp = inferior_thread ();
7359 #if 0
7360 struct inferior *inf = current_inferior ();
7361 #endif
7362 struct regcache *regcache = get_current_regcache ();
7363 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7364
7365 tp->suspend = inf_state->thread_suspend;
7366 #if 0 /* Currently unused and empty structures are not valid C. */
7367 inf->suspend = inf_state->inferior_suspend;
7368 #endif
7369
7370 stop_pc = inf_state->stop_pc;
7371
7372 if (inf_state->siginfo_gdbarch == gdbarch)
7373 {
7374 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7375
7376 /* Errors ignored. */
7377 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7378 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
7379 }
7380
7381 /* The inferior can be gone if the user types "print exit(0)"
7382 (and perhaps other times). */
7383 if (target_has_execution)
7384 /* NB: The register write goes through to the target. */
7385 regcache_cpy (regcache, inf_state->registers);
7386
7387 discard_infcall_suspend_state (inf_state);
7388 }
7389
7390 static void
7391 do_restore_infcall_suspend_state_cleanup (void *state)
7392 {
7393 restore_infcall_suspend_state (state);
7394 }
7395
7396 struct cleanup *
7397 make_cleanup_restore_infcall_suspend_state
7398 (struct infcall_suspend_state *inf_state)
7399 {
7400 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
7401 }
7402
7403 void
7404 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7405 {
7406 regcache_xfree (inf_state->registers);
7407 xfree (inf_state->siginfo_data);
7408 xfree (inf_state);
7409 }
7410
7411 struct regcache *
7412 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
7413 {
7414 return inf_state->registers;
7415 }
7416
7417 /* infcall_control_state contains state regarding gdb's control of the
7418 inferior itself like stepping control. It also contains session state like
7419 the user's currently selected frame. */
7420
7421 struct infcall_control_state
7422 {
7423 struct thread_control_state thread_control;
7424 struct inferior_control_state inferior_control;
7425
7426 /* Other fields: */
7427 enum stop_stack_kind stop_stack_dummy;
7428 int stopped_by_random_signal;
7429 int stop_after_trap;
7430
7431 /* ID if the selected frame when the inferior function call was made. */
7432 struct frame_id selected_frame_id;
7433 };
7434
7435 /* Save all of the information associated with the inferior<==>gdb
7436 connection. */
7437
7438 struct infcall_control_state *
7439 save_infcall_control_state (void)
7440 {
7441 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
7442 struct thread_info *tp = inferior_thread ();
7443 struct inferior *inf = current_inferior ();
7444
7445 inf_status->thread_control = tp->control;
7446 inf_status->inferior_control = inf->control;
7447
7448 tp->control.step_resume_breakpoint = NULL;
7449 tp->control.exception_resume_breakpoint = NULL;
7450
7451 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
7452 chain. If caller's caller is walking the chain, they'll be happier if we
7453 hand them back the original chain when restore_infcall_control_state is
7454 called. */
7455 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
7456
7457 /* Other fields: */
7458 inf_status->stop_stack_dummy = stop_stack_dummy;
7459 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7460 inf_status->stop_after_trap = stop_after_trap;
7461
7462 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7463
7464 return inf_status;
7465 }
7466
7467 static int
7468 restore_selected_frame (void *args)
7469 {
7470 struct frame_id *fid = (struct frame_id *) args;
7471 struct frame_info *frame;
7472
7473 frame = frame_find_by_id (*fid);
7474
7475 /* If inf_status->selected_frame_id is NULL, there was no previously
7476 selected frame. */
7477 if (frame == NULL)
7478 {
7479 warning (_("Unable to restore previously selected frame."));
7480 return 0;
7481 }
7482
7483 select_frame (frame);
7484
7485 return (1);
7486 }
7487
7488 /* Restore inferior session state to INF_STATUS. */
7489
7490 void
7491 restore_infcall_control_state (struct infcall_control_state *inf_status)
7492 {
7493 struct thread_info *tp = inferior_thread ();
7494 struct inferior *inf = current_inferior ();
7495
7496 if (tp->control.step_resume_breakpoint)
7497 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7498
7499 if (tp->control.exception_resume_breakpoint)
7500 tp->control.exception_resume_breakpoint->disposition
7501 = disp_del_at_next_stop;
7502
7503 /* Handle the bpstat_copy of the chain. */
7504 bpstat_clear (&tp->control.stop_bpstat);
7505
7506 tp->control = inf_status->thread_control;
7507 inf->control = inf_status->inferior_control;
7508
7509 /* Other fields: */
7510 stop_stack_dummy = inf_status->stop_stack_dummy;
7511 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7512 stop_after_trap = inf_status->stop_after_trap;
7513
7514 if (target_has_stack)
7515 {
7516 /* The point of catch_errors is that if the stack is clobbered,
7517 walking the stack might encounter a garbage pointer and
7518 error() trying to dereference it. */
7519 if (catch_errors
7520 (restore_selected_frame, &inf_status->selected_frame_id,
7521 "Unable to restore previously selected frame:\n",
7522 RETURN_MASK_ERROR) == 0)
7523 /* Error in restoring the selected frame. Select the innermost
7524 frame. */
7525 select_frame (get_current_frame ());
7526 }
7527
7528 xfree (inf_status);
7529 }
7530
7531 static void
7532 do_restore_infcall_control_state_cleanup (void *sts)
7533 {
7534 restore_infcall_control_state (sts);
7535 }
7536
7537 struct cleanup *
7538 make_cleanup_restore_infcall_control_state
7539 (struct infcall_control_state *inf_status)
7540 {
7541 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7542 }
7543
7544 void
7545 discard_infcall_control_state (struct infcall_control_state *inf_status)
7546 {
7547 if (inf_status->thread_control.step_resume_breakpoint)
7548 inf_status->thread_control.step_resume_breakpoint->disposition
7549 = disp_del_at_next_stop;
7550
7551 if (inf_status->thread_control.exception_resume_breakpoint)
7552 inf_status->thread_control.exception_resume_breakpoint->disposition
7553 = disp_del_at_next_stop;
7554
7555 /* See save_infcall_control_state for info on stop_bpstat. */
7556 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7557
7558 xfree (inf_status);
7559 }
7560 \f
7561 /* restore_inferior_ptid() will be used by the cleanup machinery
7562 to restore the inferior_ptid value saved in a call to
7563 save_inferior_ptid(). */
7564
7565 static void
7566 restore_inferior_ptid (void *arg)
7567 {
7568 ptid_t *saved_ptid_ptr = arg;
7569
7570 inferior_ptid = *saved_ptid_ptr;
7571 xfree (arg);
7572 }
7573
7574 /* Save the value of inferior_ptid so that it may be restored by a
7575 later call to do_cleanups(). Returns the struct cleanup pointer
7576 needed for later doing the cleanup. */
7577
7578 struct cleanup *
7579 save_inferior_ptid (void)
7580 {
7581 ptid_t *saved_ptid_ptr;
7582
7583 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7584 *saved_ptid_ptr = inferior_ptid;
7585 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7586 }
7587
7588 /* See infrun.h. */
7589
7590 void
7591 clear_exit_convenience_vars (void)
7592 {
7593 clear_internalvar (lookup_internalvar ("_exitsignal"));
7594 clear_internalvar (lookup_internalvar ("_exitcode"));
7595 }
7596 \f
7597
7598 /* User interface for reverse debugging:
7599 Set exec-direction / show exec-direction commands
7600 (returns error unless target implements to_set_exec_direction method). */
7601
7602 int execution_direction = EXEC_FORWARD;
7603 static const char exec_forward[] = "forward";
7604 static const char exec_reverse[] = "reverse";
7605 static const char *exec_direction = exec_forward;
7606 static const char *const exec_direction_names[] = {
7607 exec_forward,
7608 exec_reverse,
7609 NULL
7610 };
7611
7612 static void
7613 set_exec_direction_func (char *args, int from_tty,
7614 struct cmd_list_element *cmd)
7615 {
7616 if (target_can_execute_reverse)
7617 {
7618 if (!strcmp (exec_direction, exec_forward))
7619 execution_direction = EXEC_FORWARD;
7620 else if (!strcmp (exec_direction, exec_reverse))
7621 execution_direction = EXEC_REVERSE;
7622 }
7623 else
7624 {
7625 exec_direction = exec_forward;
7626 error (_("Target does not support this operation."));
7627 }
7628 }
7629
7630 static void
7631 show_exec_direction_func (struct ui_file *out, int from_tty,
7632 struct cmd_list_element *cmd, const char *value)
7633 {
7634 switch (execution_direction) {
7635 case EXEC_FORWARD:
7636 fprintf_filtered (out, _("Forward.\n"));
7637 break;
7638 case EXEC_REVERSE:
7639 fprintf_filtered (out, _("Reverse.\n"));
7640 break;
7641 default:
7642 internal_error (__FILE__, __LINE__,
7643 _("bogus execution_direction value: %d"),
7644 (int) execution_direction);
7645 }
7646 }
7647
7648 static void
7649 show_schedule_multiple (struct ui_file *file, int from_tty,
7650 struct cmd_list_element *c, const char *value)
7651 {
7652 fprintf_filtered (file, _("Resuming the execution of threads "
7653 "of all processes is %s.\n"), value);
7654 }
7655
7656 /* Implementation of `siginfo' variable. */
7657
7658 static const struct internalvar_funcs siginfo_funcs =
7659 {
7660 siginfo_make_value,
7661 NULL,
7662 NULL
7663 };
7664
7665 void
7666 _initialize_infrun (void)
7667 {
7668 int i;
7669 int numsigs;
7670 struct cmd_list_element *c;
7671
7672 add_info ("signals", signals_info, _("\
7673 What debugger does when program gets various signals.\n\
7674 Specify a signal as argument to print info on that signal only."));
7675 add_info_alias ("handle", "signals", 0);
7676
7677 c = add_com ("handle", class_run, handle_command, _("\
7678 Specify how to handle signals.\n\
7679 Usage: handle SIGNAL [ACTIONS]\n\
7680 Args are signals and actions to apply to those signals.\n\
7681 If no actions are specified, the current settings for the specified signals\n\
7682 will be displayed instead.\n\
7683 \n\
7684 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7685 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7686 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7687 The special arg \"all\" is recognized to mean all signals except those\n\
7688 used by the debugger, typically SIGTRAP and SIGINT.\n\
7689 \n\
7690 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7691 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7692 Stop means reenter debugger if this signal happens (implies print).\n\
7693 Print means print a message if this signal happens.\n\
7694 Pass means let program see this signal; otherwise program doesn't know.\n\
7695 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7696 Pass and Stop may be combined.\n\
7697 \n\
7698 Multiple signals may be specified. Signal numbers and signal names\n\
7699 may be interspersed with actions, with the actions being performed for\n\
7700 all signals cumulatively specified."));
7701 set_cmd_completer (c, handle_completer);
7702
7703 if (xdb_commands)
7704 {
7705 add_com ("lz", class_info, signals_info, _("\
7706 What debugger does when program gets various signals.\n\
7707 Specify a signal as argument to print info on that signal only."));
7708 add_com ("z", class_run, xdb_handle_command, _("\
7709 Specify how to handle a signal.\n\
7710 Args are signals and actions to apply to those signals.\n\
7711 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7712 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7713 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7714 The special arg \"all\" is recognized to mean all signals except those\n\
7715 used by the debugger, typically SIGTRAP and SIGINT.\n\
7716 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7717 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7718 nopass), \"Q\" (noprint)\n\
7719 Stop means reenter debugger if this signal happens (implies print).\n\
7720 Print means print a message if this signal happens.\n\
7721 Pass means let program see this signal; otherwise program doesn't know.\n\
7722 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7723 Pass and Stop may be combined."));
7724 }
7725
7726 if (!dbx_commands)
7727 stop_command = add_cmd ("stop", class_obscure,
7728 not_just_help_class_command, _("\
7729 There is no `stop' command, but you can set a hook on `stop'.\n\
7730 This allows you to set a list of commands to be run each time execution\n\
7731 of the program stops."), &cmdlist);
7732
7733 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7734 Set inferior debugging."), _("\
7735 Show inferior debugging."), _("\
7736 When non-zero, inferior specific debugging is enabled."),
7737 NULL,
7738 show_debug_infrun,
7739 &setdebuglist, &showdebuglist);
7740
7741 add_setshow_boolean_cmd ("displaced", class_maintenance,
7742 &debug_displaced, _("\
7743 Set displaced stepping debugging."), _("\
7744 Show displaced stepping debugging."), _("\
7745 When non-zero, displaced stepping specific debugging is enabled."),
7746 NULL,
7747 show_debug_displaced,
7748 &setdebuglist, &showdebuglist);
7749
7750 add_setshow_boolean_cmd ("non-stop", no_class,
7751 &non_stop_1, _("\
7752 Set whether gdb controls the inferior in non-stop mode."), _("\
7753 Show whether gdb controls the inferior in non-stop mode."), _("\
7754 When debugging a multi-threaded program and this setting is\n\
7755 off (the default, also called all-stop mode), when one thread stops\n\
7756 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7757 all other threads in the program while you interact with the thread of\n\
7758 interest. When you continue or step a thread, you can allow the other\n\
7759 threads to run, or have them remain stopped, but while you inspect any\n\
7760 thread's state, all threads stop.\n\
7761 \n\
7762 In non-stop mode, when one thread stops, other threads can continue\n\
7763 to run freely. You'll be able to step each thread independently,\n\
7764 leave it stopped or free to run as needed."),
7765 set_non_stop,
7766 show_non_stop,
7767 &setlist,
7768 &showlist);
7769
7770 numsigs = (int) GDB_SIGNAL_LAST;
7771 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7772 signal_print = (unsigned char *)
7773 xmalloc (sizeof (signal_print[0]) * numsigs);
7774 signal_program = (unsigned char *)
7775 xmalloc (sizeof (signal_program[0]) * numsigs);
7776 signal_catch = (unsigned char *)
7777 xmalloc (sizeof (signal_catch[0]) * numsigs);
7778 signal_pass = (unsigned char *)
7779 xmalloc (sizeof (signal_pass[0]) * numsigs);
7780 for (i = 0; i < numsigs; i++)
7781 {
7782 signal_stop[i] = 1;
7783 signal_print[i] = 1;
7784 signal_program[i] = 1;
7785 signal_catch[i] = 0;
7786 }
7787
7788 /* Signals caused by debugger's own actions
7789 should not be given to the program afterwards. */
7790 signal_program[GDB_SIGNAL_TRAP] = 0;
7791 signal_program[GDB_SIGNAL_INT] = 0;
7792
7793 /* Signals that are not errors should not normally enter the debugger. */
7794 signal_stop[GDB_SIGNAL_ALRM] = 0;
7795 signal_print[GDB_SIGNAL_ALRM] = 0;
7796 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7797 signal_print[GDB_SIGNAL_VTALRM] = 0;
7798 signal_stop[GDB_SIGNAL_PROF] = 0;
7799 signal_print[GDB_SIGNAL_PROF] = 0;
7800 signal_stop[GDB_SIGNAL_CHLD] = 0;
7801 signal_print[GDB_SIGNAL_CHLD] = 0;
7802 signal_stop[GDB_SIGNAL_IO] = 0;
7803 signal_print[GDB_SIGNAL_IO] = 0;
7804 signal_stop[GDB_SIGNAL_POLL] = 0;
7805 signal_print[GDB_SIGNAL_POLL] = 0;
7806 signal_stop[GDB_SIGNAL_URG] = 0;
7807 signal_print[GDB_SIGNAL_URG] = 0;
7808 signal_stop[GDB_SIGNAL_WINCH] = 0;
7809 signal_print[GDB_SIGNAL_WINCH] = 0;
7810 signal_stop[GDB_SIGNAL_PRIO] = 0;
7811 signal_print[GDB_SIGNAL_PRIO] = 0;
7812
7813 /* These signals are used internally by user-level thread
7814 implementations. (See signal(5) on Solaris.) Like the above
7815 signals, a healthy program receives and handles them as part of
7816 its normal operation. */
7817 signal_stop[GDB_SIGNAL_LWP] = 0;
7818 signal_print[GDB_SIGNAL_LWP] = 0;
7819 signal_stop[GDB_SIGNAL_WAITING] = 0;
7820 signal_print[GDB_SIGNAL_WAITING] = 0;
7821 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7822 signal_print[GDB_SIGNAL_CANCEL] = 0;
7823
7824 /* Update cached state. */
7825 signal_cache_update (-1);
7826
7827 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7828 &stop_on_solib_events, _("\
7829 Set stopping for shared library events."), _("\
7830 Show stopping for shared library events."), _("\
7831 If nonzero, gdb will give control to the user when the dynamic linker\n\
7832 notifies gdb of shared library events. The most common event of interest\n\
7833 to the user would be loading/unloading of a new library."),
7834 set_stop_on_solib_events,
7835 show_stop_on_solib_events,
7836 &setlist, &showlist);
7837
7838 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7839 follow_fork_mode_kind_names,
7840 &follow_fork_mode_string, _("\
7841 Set debugger response to a program call of fork or vfork."), _("\
7842 Show debugger response to a program call of fork or vfork."), _("\
7843 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7844 parent - the original process is debugged after a fork\n\
7845 child - the new process is debugged after a fork\n\
7846 The unfollowed process will continue to run.\n\
7847 By default, the debugger will follow the parent process."),
7848 NULL,
7849 show_follow_fork_mode_string,
7850 &setlist, &showlist);
7851
7852 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7853 follow_exec_mode_names,
7854 &follow_exec_mode_string, _("\
7855 Set debugger response to a program call of exec."), _("\
7856 Show debugger response to a program call of exec."), _("\
7857 An exec call replaces the program image of a process.\n\
7858 \n\
7859 follow-exec-mode can be:\n\
7860 \n\
7861 new - the debugger creates a new inferior and rebinds the process\n\
7862 to this new inferior. The program the process was running before\n\
7863 the exec call can be restarted afterwards by restarting the original\n\
7864 inferior.\n\
7865 \n\
7866 same - the debugger keeps the process bound to the same inferior.\n\
7867 The new executable image replaces the previous executable loaded in\n\
7868 the inferior. Restarting the inferior after the exec call restarts\n\
7869 the executable the process was running after the exec call.\n\
7870 \n\
7871 By default, the debugger will use the same inferior."),
7872 NULL,
7873 show_follow_exec_mode_string,
7874 &setlist, &showlist);
7875
7876 add_setshow_enum_cmd ("scheduler-locking", class_run,
7877 scheduler_enums, &scheduler_mode, _("\
7878 Set mode for locking scheduler during execution."), _("\
7879 Show mode for locking scheduler during execution."), _("\
7880 off == no locking (threads may preempt at any time)\n\
7881 on == full locking (no thread except the current thread may run)\n\
7882 step == scheduler locked during every single-step operation.\n\
7883 In this mode, no other thread may run during a step command.\n\
7884 Other threads may run while stepping over a function call ('next')."),
7885 set_schedlock_func, /* traps on target vector */
7886 show_scheduler_mode,
7887 &setlist, &showlist);
7888
7889 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7890 Set mode for resuming threads of all processes."), _("\
7891 Show mode for resuming threads of all processes."), _("\
7892 When on, execution commands (such as 'continue' or 'next') resume all\n\
7893 threads of all processes. When off (which is the default), execution\n\
7894 commands only resume the threads of the current process. The set of\n\
7895 threads that are resumed is further refined by the scheduler-locking\n\
7896 mode (see help set scheduler-locking)."),
7897 NULL,
7898 show_schedule_multiple,
7899 &setlist, &showlist);
7900
7901 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7902 Set mode of the step operation."), _("\
7903 Show mode of the step operation."), _("\
7904 When set, doing a step over a function without debug line information\n\
7905 will stop at the first instruction of that function. Otherwise, the\n\
7906 function is skipped and the step command stops at a different source line."),
7907 NULL,
7908 show_step_stop_if_no_debug,
7909 &setlist, &showlist);
7910
7911 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7912 &can_use_displaced_stepping, _("\
7913 Set debugger's willingness to use displaced stepping."), _("\
7914 Show debugger's willingness to use displaced stepping."), _("\
7915 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7916 supported by the target architecture. If off, gdb will not use displaced\n\
7917 stepping to step over breakpoints, even if such is supported by the target\n\
7918 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7919 if the target architecture supports it and non-stop mode is active, but will not\n\
7920 use it in all-stop mode (see help set non-stop)."),
7921 NULL,
7922 show_can_use_displaced_stepping,
7923 &setlist, &showlist);
7924
7925 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7926 &exec_direction, _("Set direction of execution.\n\
7927 Options are 'forward' or 'reverse'."),
7928 _("Show direction of execution (forward/reverse)."),
7929 _("Tells gdb whether to execute forward or backward."),
7930 set_exec_direction_func, show_exec_direction_func,
7931 &setlist, &showlist);
7932
7933 /* Set/show detach-on-fork: user-settable mode. */
7934
7935 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7936 Set whether gdb will detach the child of a fork."), _("\
7937 Show whether gdb will detach the child of a fork."), _("\
7938 Tells gdb whether to detach the child of a fork."),
7939 NULL, NULL, &setlist, &showlist);
7940
7941 /* Set/show disable address space randomization mode. */
7942
7943 add_setshow_boolean_cmd ("disable-randomization", class_support,
7944 &disable_randomization, _("\
7945 Set disabling of debuggee's virtual address space randomization."), _("\
7946 Show disabling of debuggee's virtual address space randomization."), _("\
7947 When this mode is on (which is the default), randomization of the virtual\n\
7948 address space is disabled. Standalone programs run with the randomization\n\
7949 enabled by default on some platforms."),
7950 &set_disable_randomization,
7951 &show_disable_randomization,
7952 &setlist, &showlist);
7953
7954 /* ptid initializations */
7955 inferior_ptid = null_ptid;
7956 target_last_wait_ptid = minus_one_ptid;
7957
7958 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7959 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7960 observer_attach_thread_exit (infrun_thread_thread_exit);
7961 observer_attach_inferior_exit (infrun_inferior_exit);
7962
7963 /* Explicitly create without lookup, since that tries to create a
7964 value with a void typed value, and when we get here, gdbarch
7965 isn't initialized yet. At this point, we're quite sure there
7966 isn't another convenience variable of the same name. */
7967 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7968
7969 add_setshow_boolean_cmd ("observer", no_class,
7970 &observer_mode_1, _("\
7971 Set whether gdb controls the inferior in observer mode."), _("\
7972 Show whether gdb controls the inferior in observer mode."), _("\
7973 In observer mode, GDB can get data from the inferior, but not\n\
7974 affect its execution. Registers and memory may not be changed,\n\
7975 breakpoints may not be set, and the program cannot be interrupted\n\
7976 or signalled."),
7977 set_observer_mode,
7978 show_observer_mode,
7979 &setlist,
7980 &showlist);
7981 }
This page took 0.26912 seconds and 4 git commands to generate.