dc7ff3e26cede639881fa3a34380cd588ebf782a
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2015 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "breakpoint.h"
28 #include "gdb_wait.h"
29 #include "gdbcore.h"
30 #include "gdbcmd.h"
31 #include "cli/cli-script.h"
32 #include "target.h"
33 #include "gdbthread.h"
34 #include "annotate.h"
35 #include "symfile.h"
36 #include "top.h"
37 #include <signal.h>
38 #include "inf-loop.h"
39 #include "regcache.h"
40 #include "value.h"
41 #include "observer.h"
42 #include "language.h"
43 #include "solib.h"
44 #include "main.h"
45 #include "dictionary.h"
46 #include "block.h"
47 #include "mi/mi-common.h"
48 #include "event-top.h"
49 #include "record.h"
50 #include "record-full.h"
51 #include "inline-frame.h"
52 #include "jit.h"
53 #include "tracepoint.h"
54 #include "continuations.h"
55 #include "interps.h"
56 #include "skip.h"
57 #include "probe.h"
58 #include "objfiles.h"
59 #include "completer.h"
60 #include "target-descriptions.h"
61 #include "target-dcache.h"
62 #include "terminal.h"
63 #include "solist.h"
64
65 /* Prototypes for local functions */
66
67 static void signals_info (char *, int);
68
69 static void handle_command (char *, int);
70
71 static void sig_print_info (enum gdb_signal);
72
73 static void sig_print_header (void);
74
75 static void resume_cleanups (void *);
76
77 static int hook_stop_stub (void *);
78
79 static int restore_selected_frame (void *);
80
81 static int follow_fork (void);
82
83 static int follow_fork_inferior (int follow_child, int detach_fork);
84
85 static void follow_inferior_reset_breakpoints (void);
86
87 static void set_schedlock_func (char *args, int from_tty,
88 struct cmd_list_element *c);
89
90 static int currently_stepping (struct thread_info *tp);
91
92 void _initialize_infrun (void);
93
94 void nullify_last_target_wait_ptid (void);
95
96 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
97
98 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
99
100 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
101
102 static int maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
103
104 /* When set, stop the 'step' command if we enter a function which has
105 no line number information. The normal behavior is that we step
106 over such function. */
107 int step_stop_if_no_debug = 0;
108 static void
109 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
110 struct cmd_list_element *c, const char *value)
111 {
112 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
113 }
114
115 /* In asynchronous mode, but simulating synchronous execution. */
116
117 int sync_execution = 0;
118
119 /* proceed and normal_stop use this to notify the user when the
120 inferior stopped in a different thread than it had been running
121 in. */
122
123 static ptid_t previous_inferior_ptid;
124
125 /* If set (default for legacy reasons), when following a fork, GDB
126 will detach from one of the fork branches, child or parent.
127 Exactly which branch is detached depends on 'set follow-fork-mode'
128 setting. */
129
130 static int detach_fork = 1;
131
132 int debug_displaced = 0;
133 static void
134 show_debug_displaced (struct ui_file *file, int from_tty,
135 struct cmd_list_element *c, const char *value)
136 {
137 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
138 }
139
140 unsigned int debug_infrun = 0;
141 static void
142 show_debug_infrun (struct ui_file *file, int from_tty,
143 struct cmd_list_element *c, const char *value)
144 {
145 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
146 }
147
148
149 /* Support for disabling address space randomization. */
150
151 int disable_randomization = 1;
152
153 static void
154 show_disable_randomization (struct ui_file *file, int from_tty,
155 struct cmd_list_element *c, const char *value)
156 {
157 if (target_supports_disable_randomization ())
158 fprintf_filtered (file,
159 _("Disabling randomization of debuggee's "
160 "virtual address space is %s.\n"),
161 value);
162 else
163 fputs_filtered (_("Disabling randomization of debuggee's "
164 "virtual address space is unsupported on\n"
165 "this platform.\n"), file);
166 }
167
168 static void
169 set_disable_randomization (char *args, int from_tty,
170 struct cmd_list_element *c)
171 {
172 if (!target_supports_disable_randomization ())
173 error (_("Disabling randomization of debuggee's "
174 "virtual address space is unsupported on\n"
175 "this platform."));
176 }
177
178 /* User interface for non-stop mode. */
179
180 int non_stop = 0;
181 static int non_stop_1 = 0;
182
183 static void
184 set_non_stop (char *args, int from_tty,
185 struct cmd_list_element *c)
186 {
187 if (target_has_execution)
188 {
189 non_stop_1 = non_stop;
190 error (_("Cannot change this setting while the inferior is running."));
191 }
192
193 non_stop = non_stop_1;
194 }
195
196 static void
197 show_non_stop (struct ui_file *file, int from_tty,
198 struct cmd_list_element *c, const char *value)
199 {
200 fprintf_filtered (file,
201 _("Controlling the inferior in non-stop mode is %s.\n"),
202 value);
203 }
204
205 /* "Observer mode" is somewhat like a more extreme version of
206 non-stop, in which all GDB operations that might affect the
207 target's execution have been disabled. */
208
209 int observer_mode = 0;
210 static int observer_mode_1 = 0;
211
212 static void
213 set_observer_mode (char *args, int from_tty,
214 struct cmd_list_element *c)
215 {
216 if (target_has_execution)
217 {
218 observer_mode_1 = observer_mode;
219 error (_("Cannot change this setting while the inferior is running."));
220 }
221
222 observer_mode = observer_mode_1;
223
224 may_write_registers = !observer_mode;
225 may_write_memory = !observer_mode;
226 may_insert_breakpoints = !observer_mode;
227 may_insert_tracepoints = !observer_mode;
228 /* We can insert fast tracepoints in or out of observer mode,
229 but enable them if we're going into this mode. */
230 if (observer_mode)
231 may_insert_fast_tracepoints = 1;
232 may_stop = !observer_mode;
233 update_target_permissions ();
234
235 /* Going *into* observer mode we must force non-stop, then
236 going out we leave it that way. */
237 if (observer_mode)
238 {
239 pagination_enabled = 0;
240 non_stop = non_stop_1 = 1;
241 }
242
243 if (from_tty)
244 printf_filtered (_("Observer mode is now %s.\n"),
245 (observer_mode ? "on" : "off"));
246 }
247
248 static void
249 show_observer_mode (struct ui_file *file, int from_tty,
250 struct cmd_list_element *c, const char *value)
251 {
252 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
253 }
254
255 /* This updates the value of observer mode based on changes in
256 permissions. Note that we are deliberately ignoring the values of
257 may-write-registers and may-write-memory, since the user may have
258 reason to enable these during a session, for instance to turn on a
259 debugging-related global. */
260
261 void
262 update_observer_mode (void)
263 {
264 int newval;
265
266 newval = (!may_insert_breakpoints
267 && !may_insert_tracepoints
268 && may_insert_fast_tracepoints
269 && !may_stop
270 && non_stop);
271
272 /* Let the user know if things change. */
273 if (newval != observer_mode)
274 printf_filtered (_("Observer mode is now %s.\n"),
275 (newval ? "on" : "off"));
276
277 observer_mode = observer_mode_1 = newval;
278 }
279
280 /* Tables of how to react to signals; the user sets them. */
281
282 static unsigned char *signal_stop;
283 static unsigned char *signal_print;
284 static unsigned char *signal_program;
285
286 /* Table of signals that are registered with "catch signal". A
287 non-zero entry indicates that the signal is caught by some "catch
288 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
289 signals. */
290 static unsigned char *signal_catch;
291
292 /* Table of signals that the target may silently handle.
293 This is automatically determined from the flags above,
294 and simply cached here. */
295 static unsigned char *signal_pass;
296
297 #define SET_SIGS(nsigs,sigs,flags) \
298 do { \
299 int signum = (nsigs); \
300 while (signum-- > 0) \
301 if ((sigs)[signum]) \
302 (flags)[signum] = 1; \
303 } while (0)
304
305 #define UNSET_SIGS(nsigs,sigs,flags) \
306 do { \
307 int signum = (nsigs); \
308 while (signum-- > 0) \
309 if ((sigs)[signum]) \
310 (flags)[signum] = 0; \
311 } while (0)
312
313 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
314 this function is to avoid exporting `signal_program'. */
315
316 void
317 update_signals_program_target (void)
318 {
319 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
320 }
321
322 /* Value to pass to target_resume() to cause all threads to resume. */
323
324 #define RESUME_ALL minus_one_ptid
325
326 /* Command list pointer for the "stop" placeholder. */
327
328 static struct cmd_list_element *stop_command;
329
330 /* Nonzero if we want to give control to the user when we're notified
331 of shared library events by the dynamic linker. */
332 int stop_on_solib_events;
333
334 /* Enable or disable optional shared library event breakpoints
335 as appropriate when the above flag is changed. */
336
337 static void
338 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
339 {
340 update_solib_breakpoints ();
341 }
342
343 static void
344 show_stop_on_solib_events (struct ui_file *file, int from_tty,
345 struct cmd_list_element *c, const char *value)
346 {
347 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
348 value);
349 }
350
351 /* Nonzero means expecting a trace trap
352 and should stop the inferior and return silently when it happens. */
353
354 int stop_after_trap;
355
356 /* Nonzero after stop if current stack frame should be printed. */
357
358 static int stop_print_frame;
359
360 /* This is a cached copy of the pid/waitstatus of the last event
361 returned by target_wait()/deprecated_target_wait_hook(). This
362 information is returned by get_last_target_status(). */
363 static ptid_t target_last_wait_ptid;
364 static struct target_waitstatus target_last_waitstatus;
365
366 static void context_switch (ptid_t ptid);
367
368 void init_thread_stepping_state (struct thread_info *tss);
369
370 static const char follow_fork_mode_child[] = "child";
371 static const char follow_fork_mode_parent[] = "parent";
372
373 static const char *const follow_fork_mode_kind_names[] = {
374 follow_fork_mode_child,
375 follow_fork_mode_parent,
376 NULL
377 };
378
379 static const char *follow_fork_mode_string = follow_fork_mode_parent;
380 static void
381 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
382 struct cmd_list_element *c, const char *value)
383 {
384 fprintf_filtered (file,
385 _("Debugger response to a program "
386 "call of fork or vfork is \"%s\".\n"),
387 value);
388 }
389 \f
390
391 /* Handle changes to the inferior list based on the type of fork,
392 which process is being followed, and whether the other process
393 should be detached. On entry inferior_ptid must be the ptid of
394 the fork parent. At return inferior_ptid is the ptid of the
395 followed inferior. */
396
397 static int
398 follow_fork_inferior (int follow_child, int detach_fork)
399 {
400 int has_vforked;
401 ptid_t parent_ptid, child_ptid;
402
403 has_vforked = (inferior_thread ()->pending_follow.kind
404 == TARGET_WAITKIND_VFORKED);
405 parent_ptid = inferior_ptid;
406 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
407
408 if (has_vforked
409 && !non_stop /* Non-stop always resumes both branches. */
410 && (!target_is_async_p () || sync_execution)
411 && !(follow_child || detach_fork || sched_multi))
412 {
413 /* The parent stays blocked inside the vfork syscall until the
414 child execs or exits. If we don't let the child run, then
415 the parent stays blocked. If we're telling the parent to run
416 in the foreground, the user will not be able to ctrl-c to get
417 back the terminal, effectively hanging the debug session. */
418 fprintf_filtered (gdb_stderr, _("\
419 Can not resume the parent process over vfork in the foreground while\n\
420 holding the child stopped. Try \"set detach-on-fork\" or \
421 \"set schedule-multiple\".\n"));
422 /* FIXME output string > 80 columns. */
423 return 1;
424 }
425
426 if (!follow_child)
427 {
428 /* Detach new forked process? */
429 if (detach_fork)
430 {
431 struct cleanup *old_chain;
432
433 /* Before detaching from the child, remove all breakpoints
434 from it. If we forked, then this has already been taken
435 care of by infrun.c. If we vforked however, any
436 breakpoint inserted in the parent is visible in the
437 child, even those added while stopped in a vfork
438 catchpoint. This will remove the breakpoints from the
439 parent also, but they'll be reinserted below. */
440 if (has_vforked)
441 {
442 /* Keep breakpoints list in sync. */
443 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
444 }
445
446 if (info_verbose || debug_infrun)
447 {
448 /* Ensure that we have a process ptid. */
449 ptid_t process_ptid = pid_to_ptid (ptid_get_pid (child_ptid));
450
451 target_terminal_ours_for_output ();
452 fprintf_filtered (gdb_stdlog,
453 _("Detaching after %s from child %s.\n"),
454 has_vforked ? "vfork" : "fork",
455 target_pid_to_str (process_ptid));
456 }
457 }
458 else
459 {
460 struct inferior *parent_inf, *child_inf;
461 struct cleanup *old_chain;
462
463 /* Add process to GDB's tables. */
464 child_inf = add_inferior (ptid_get_pid (child_ptid));
465
466 parent_inf = current_inferior ();
467 child_inf->attach_flag = parent_inf->attach_flag;
468 copy_terminal_info (child_inf, parent_inf);
469 child_inf->gdbarch = parent_inf->gdbarch;
470 copy_inferior_target_desc_info (child_inf, parent_inf);
471
472 old_chain = save_inferior_ptid ();
473 save_current_program_space ();
474
475 inferior_ptid = child_ptid;
476 add_thread (inferior_ptid);
477 child_inf->symfile_flags = SYMFILE_NO_READ;
478
479 /* If this is a vfork child, then the address-space is
480 shared with the parent. */
481 if (has_vforked)
482 {
483 child_inf->pspace = parent_inf->pspace;
484 child_inf->aspace = parent_inf->aspace;
485
486 /* The parent will be frozen until the child is done
487 with the shared region. Keep track of the
488 parent. */
489 child_inf->vfork_parent = parent_inf;
490 child_inf->pending_detach = 0;
491 parent_inf->vfork_child = child_inf;
492 parent_inf->pending_detach = 0;
493 }
494 else
495 {
496 child_inf->aspace = new_address_space ();
497 child_inf->pspace = add_program_space (child_inf->aspace);
498 child_inf->removable = 1;
499 set_current_program_space (child_inf->pspace);
500 clone_program_space (child_inf->pspace, parent_inf->pspace);
501
502 /* Let the shared library layer (e.g., solib-svr4) learn
503 about this new process, relocate the cloned exec, pull
504 in shared libraries, and install the solib event
505 breakpoint. If a "cloned-VM" event was propagated
506 better throughout the core, this wouldn't be
507 required. */
508 solib_create_inferior_hook (0);
509 }
510
511 do_cleanups (old_chain);
512 }
513
514 if (has_vforked)
515 {
516 struct inferior *parent_inf;
517
518 parent_inf = current_inferior ();
519
520 /* If we detached from the child, then we have to be careful
521 to not insert breakpoints in the parent until the child
522 is done with the shared memory region. However, if we're
523 staying attached to the child, then we can and should
524 insert breakpoints, so that we can debug it. A
525 subsequent child exec or exit is enough to know when does
526 the child stops using the parent's address space. */
527 parent_inf->waiting_for_vfork_done = detach_fork;
528 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
529 }
530 }
531 else
532 {
533 /* Follow the child. */
534 struct inferior *parent_inf, *child_inf;
535 struct program_space *parent_pspace;
536
537 if (info_verbose || debug_infrun)
538 {
539 target_terminal_ours_for_output ();
540 fprintf_filtered (gdb_stdlog,
541 _("Attaching after %s %s to child %s.\n"),
542 target_pid_to_str (parent_ptid),
543 has_vforked ? "vfork" : "fork",
544 target_pid_to_str (child_ptid));
545 }
546
547 /* Add the new inferior first, so that the target_detach below
548 doesn't unpush the target. */
549
550 child_inf = add_inferior (ptid_get_pid (child_ptid));
551
552 parent_inf = current_inferior ();
553 child_inf->attach_flag = parent_inf->attach_flag;
554 copy_terminal_info (child_inf, parent_inf);
555 child_inf->gdbarch = parent_inf->gdbarch;
556 copy_inferior_target_desc_info (child_inf, parent_inf);
557
558 parent_pspace = parent_inf->pspace;
559
560 /* If we're vforking, we want to hold on to the parent until the
561 child exits or execs. At child exec or exit time we can
562 remove the old breakpoints from the parent and detach or
563 resume debugging it. Otherwise, detach the parent now; we'll
564 want to reuse it's program/address spaces, but we can't set
565 them to the child before removing breakpoints from the
566 parent, otherwise, the breakpoints module could decide to
567 remove breakpoints from the wrong process (since they'd be
568 assigned to the same address space). */
569
570 if (has_vforked)
571 {
572 gdb_assert (child_inf->vfork_parent == NULL);
573 gdb_assert (parent_inf->vfork_child == NULL);
574 child_inf->vfork_parent = parent_inf;
575 child_inf->pending_detach = 0;
576 parent_inf->vfork_child = child_inf;
577 parent_inf->pending_detach = detach_fork;
578 parent_inf->waiting_for_vfork_done = 0;
579 }
580 else if (detach_fork)
581 {
582 if (info_verbose || debug_infrun)
583 {
584 /* Ensure that we have a process ptid. */
585 ptid_t process_ptid = pid_to_ptid (ptid_get_pid (child_ptid));
586
587 target_terminal_ours_for_output ();
588 fprintf_filtered (gdb_stdlog,
589 _("Detaching after fork from "
590 "child %s.\n"),
591 target_pid_to_str (process_ptid));
592 }
593
594 target_detach (NULL, 0);
595 }
596
597 /* Note that the detach above makes PARENT_INF dangling. */
598
599 /* Add the child thread to the appropriate lists, and switch to
600 this new thread, before cloning the program space, and
601 informing the solib layer about this new process. */
602
603 inferior_ptid = child_ptid;
604 add_thread (inferior_ptid);
605
606 /* If this is a vfork child, then the address-space is shared
607 with the parent. If we detached from the parent, then we can
608 reuse the parent's program/address spaces. */
609 if (has_vforked || detach_fork)
610 {
611 child_inf->pspace = parent_pspace;
612 child_inf->aspace = child_inf->pspace->aspace;
613 }
614 else
615 {
616 child_inf->aspace = new_address_space ();
617 child_inf->pspace = add_program_space (child_inf->aspace);
618 child_inf->removable = 1;
619 child_inf->symfile_flags = SYMFILE_NO_READ;
620 set_current_program_space (child_inf->pspace);
621 clone_program_space (child_inf->pspace, parent_pspace);
622
623 /* Let the shared library layer (e.g., solib-svr4) learn
624 about this new process, relocate the cloned exec, pull in
625 shared libraries, and install the solib event breakpoint.
626 If a "cloned-VM" event was propagated better throughout
627 the core, this wouldn't be required. */
628 solib_create_inferior_hook (0);
629 }
630 }
631
632 return target_follow_fork (follow_child, detach_fork);
633 }
634
635 /* Tell the target to follow the fork we're stopped at. Returns true
636 if the inferior should be resumed; false, if the target for some
637 reason decided it's best not to resume. */
638
639 static int
640 follow_fork (void)
641 {
642 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
643 int should_resume = 1;
644 struct thread_info *tp;
645
646 /* Copy user stepping state to the new inferior thread. FIXME: the
647 followed fork child thread should have a copy of most of the
648 parent thread structure's run control related fields, not just these.
649 Initialized to avoid "may be used uninitialized" warnings from gcc. */
650 struct breakpoint *step_resume_breakpoint = NULL;
651 struct breakpoint *exception_resume_breakpoint = NULL;
652 CORE_ADDR step_range_start = 0;
653 CORE_ADDR step_range_end = 0;
654 struct frame_id step_frame_id = { 0 };
655 struct interp *command_interp = NULL;
656
657 if (!non_stop)
658 {
659 ptid_t wait_ptid;
660 struct target_waitstatus wait_status;
661
662 /* Get the last target status returned by target_wait(). */
663 get_last_target_status (&wait_ptid, &wait_status);
664
665 /* If not stopped at a fork event, then there's nothing else to
666 do. */
667 if (wait_status.kind != TARGET_WAITKIND_FORKED
668 && wait_status.kind != TARGET_WAITKIND_VFORKED)
669 return 1;
670
671 /* Check if we switched over from WAIT_PTID, since the event was
672 reported. */
673 if (!ptid_equal (wait_ptid, minus_one_ptid)
674 && !ptid_equal (inferior_ptid, wait_ptid))
675 {
676 /* We did. Switch back to WAIT_PTID thread, to tell the
677 target to follow it (in either direction). We'll
678 afterwards refuse to resume, and inform the user what
679 happened. */
680 switch_to_thread (wait_ptid);
681 should_resume = 0;
682 }
683 }
684
685 tp = inferior_thread ();
686
687 /* If there were any forks/vforks that were caught and are now to be
688 followed, then do so now. */
689 switch (tp->pending_follow.kind)
690 {
691 case TARGET_WAITKIND_FORKED:
692 case TARGET_WAITKIND_VFORKED:
693 {
694 ptid_t parent, child;
695
696 /* If the user did a next/step, etc, over a fork call,
697 preserve the stepping state in the fork child. */
698 if (follow_child && should_resume)
699 {
700 step_resume_breakpoint = clone_momentary_breakpoint
701 (tp->control.step_resume_breakpoint);
702 step_range_start = tp->control.step_range_start;
703 step_range_end = tp->control.step_range_end;
704 step_frame_id = tp->control.step_frame_id;
705 exception_resume_breakpoint
706 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
707 command_interp = tp->control.command_interp;
708
709 /* For now, delete the parent's sr breakpoint, otherwise,
710 parent/child sr breakpoints are considered duplicates,
711 and the child version will not be installed. Remove
712 this when the breakpoints module becomes aware of
713 inferiors and address spaces. */
714 delete_step_resume_breakpoint (tp);
715 tp->control.step_range_start = 0;
716 tp->control.step_range_end = 0;
717 tp->control.step_frame_id = null_frame_id;
718 delete_exception_resume_breakpoint (tp);
719 tp->control.command_interp = NULL;
720 }
721
722 parent = inferior_ptid;
723 child = tp->pending_follow.value.related_pid;
724
725 /* Set up inferior(s) as specified by the caller, and tell the
726 target to do whatever is necessary to follow either parent
727 or child. */
728 if (follow_fork_inferior (follow_child, detach_fork))
729 {
730 /* Target refused to follow, or there's some other reason
731 we shouldn't resume. */
732 should_resume = 0;
733 }
734 else
735 {
736 /* This pending follow fork event is now handled, one way
737 or another. The previous selected thread may be gone
738 from the lists by now, but if it is still around, need
739 to clear the pending follow request. */
740 tp = find_thread_ptid (parent);
741 if (tp)
742 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
743
744 /* This makes sure we don't try to apply the "Switched
745 over from WAIT_PID" logic above. */
746 nullify_last_target_wait_ptid ();
747
748 /* If we followed the child, switch to it... */
749 if (follow_child)
750 {
751 switch_to_thread (child);
752
753 /* ... and preserve the stepping state, in case the
754 user was stepping over the fork call. */
755 if (should_resume)
756 {
757 tp = inferior_thread ();
758 tp->control.step_resume_breakpoint
759 = step_resume_breakpoint;
760 tp->control.step_range_start = step_range_start;
761 tp->control.step_range_end = step_range_end;
762 tp->control.step_frame_id = step_frame_id;
763 tp->control.exception_resume_breakpoint
764 = exception_resume_breakpoint;
765 tp->control.command_interp = command_interp;
766 }
767 else
768 {
769 /* If we get here, it was because we're trying to
770 resume from a fork catchpoint, but, the user
771 has switched threads away from the thread that
772 forked. In that case, the resume command
773 issued is most likely not applicable to the
774 child, so just warn, and refuse to resume. */
775 warning (_("Not resuming: switched threads "
776 "before following fork child.\n"));
777 }
778
779 /* Reset breakpoints in the child as appropriate. */
780 follow_inferior_reset_breakpoints ();
781 }
782 else
783 switch_to_thread (parent);
784 }
785 }
786 break;
787 case TARGET_WAITKIND_SPURIOUS:
788 /* Nothing to follow. */
789 break;
790 default:
791 internal_error (__FILE__, __LINE__,
792 "Unexpected pending_follow.kind %d\n",
793 tp->pending_follow.kind);
794 break;
795 }
796
797 return should_resume;
798 }
799
800 static void
801 follow_inferior_reset_breakpoints (void)
802 {
803 struct thread_info *tp = inferior_thread ();
804
805 /* Was there a step_resume breakpoint? (There was if the user
806 did a "next" at the fork() call.) If so, explicitly reset its
807 thread number. Cloned step_resume breakpoints are disabled on
808 creation, so enable it here now that it is associated with the
809 correct thread.
810
811 step_resumes are a form of bp that are made to be per-thread.
812 Since we created the step_resume bp when the parent process
813 was being debugged, and now are switching to the child process,
814 from the breakpoint package's viewpoint, that's a switch of
815 "threads". We must update the bp's notion of which thread
816 it is for, or it'll be ignored when it triggers. */
817
818 if (tp->control.step_resume_breakpoint)
819 {
820 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
821 tp->control.step_resume_breakpoint->loc->enabled = 1;
822 }
823
824 /* Treat exception_resume breakpoints like step_resume breakpoints. */
825 if (tp->control.exception_resume_breakpoint)
826 {
827 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
828 tp->control.exception_resume_breakpoint->loc->enabled = 1;
829 }
830
831 /* Reinsert all breakpoints in the child. The user may have set
832 breakpoints after catching the fork, in which case those
833 were never set in the child, but only in the parent. This makes
834 sure the inserted breakpoints match the breakpoint list. */
835
836 breakpoint_re_set ();
837 insert_breakpoints ();
838 }
839
840 /* The child has exited or execed: resume threads of the parent the
841 user wanted to be executing. */
842
843 static int
844 proceed_after_vfork_done (struct thread_info *thread,
845 void *arg)
846 {
847 int pid = * (int *) arg;
848
849 if (ptid_get_pid (thread->ptid) == pid
850 && is_running (thread->ptid)
851 && !is_executing (thread->ptid)
852 && !thread->stop_requested
853 && thread->suspend.stop_signal == GDB_SIGNAL_0)
854 {
855 if (debug_infrun)
856 fprintf_unfiltered (gdb_stdlog,
857 "infrun: resuming vfork parent thread %s\n",
858 target_pid_to_str (thread->ptid));
859
860 switch_to_thread (thread->ptid);
861 clear_proceed_status (0);
862 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
863 }
864
865 return 0;
866 }
867
868 /* Called whenever we notice an exec or exit event, to handle
869 detaching or resuming a vfork parent. */
870
871 static void
872 handle_vfork_child_exec_or_exit (int exec)
873 {
874 struct inferior *inf = current_inferior ();
875
876 if (inf->vfork_parent)
877 {
878 int resume_parent = -1;
879
880 /* This exec or exit marks the end of the shared memory region
881 between the parent and the child. If the user wanted to
882 detach from the parent, now is the time. */
883
884 if (inf->vfork_parent->pending_detach)
885 {
886 struct thread_info *tp;
887 struct cleanup *old_chain;
888 struct program_space *pspace;
889 struct address_space *aspace;
890
891 /* follow-fork child, detach-on-fork on. */
892
893 inf->vfork_parent->pending_detach = 0;
894
895 if (!exec)
896 {
897 /* If we're handling a child exit, then inferior_ptid
898 points at the inferior's pid, not to a thread. */
899 old_chain = save_inferior_ptid ();
900 save_current_program_space ();
901 save_current_inferior ();
902 }
903 else
904 old_chain = save_current_space_and_thread ();
905
906 /* We're letting loose of the parent. */
907 tp = any_live_thread_of_process (inf->vfork_parent->pid);
908 switch_to_thread (tp->ptid);
909
910 /* We're about to detach from the parent, which implicitly
911 removes breakpoints from its address space. There's a
912 catch here: we want to reuse the spaces for the child,
913 but, parent/child are still sharing the pspace at this
914 point, although the exec in reality makes the kernel give
915 the child a fresh set of new pages. The problem here is
916 that the breakpoints module being unaware of this, would
917 likely chose the child process to write to the parent
918 address space. Swapping the child temporarily away from
919 the spaces has the desired effect. Yes, this is "sort
920 of" a hack. */
921
922 pspace = inf->pspace;
923 aspace = inf->aspace;
924 inf->aspace = NULL;
925 inf->pspace = NULL;
926
927 if (debug_infrun || info_verbose)
928 {
929 target_terminal_ours_for_output ();
930
931 if (exec)
932 {
933 fprintf_filtered (gdb_stdlog,
934 _("Detaching vfork parent process "
935 "%d after child exec.\n"),
936 inf->vfork_parent->pid);
937 }
938 else
939 {
940 fprintf_filtered (gdb_stdlog,
941 _("Detaching vfork parent process "
942 "%d after child exit.\n"),
943 inf->vfork_parent->pid);
944 }
945 }
946
947 target_detach (NULL, 0);
948
949 /* Put it back. */
950 inf->pspace = pspace;
951 inf->aspace = aspace;
952
953 do_cleanups (old_chain);
954 }
955 else if (exec)
956 {
957 /* We're staying attached to the parent, so, really give the
958 child a new address space. */
959 inf->pspace = add_program_space (maybe_new_address_space ());
960 inf->aspace = inf->pspace->aspace;
961 inf->removable = 1;
962 set_current_program_space (inf->pspace);
963
964 resume_parent = inf->vfork_parent->pid;
965
966 /* Break the bonds. */
967 inf->vfork_parent->vfork_child = NULL;
968 }
969 else
970 {
971 struct cleanup *old_chain;
972 struct program_space *pspace;
973
974 /* If this is a vfork child exiting, then the pspace and
975 aspaces were shared with the parent. Since we're
976 reporting the process exit, we'll be mourning all that is
977 found in the address space, and switching to null_ptid,
978 preparing to start a new inferior. But, since we don't
979 want to clobber the parent's address/program spaces, we
980 go ahead and create a new one for this exiting
981 inferior. */
982
983 /* Switch to null_ptid, so that clone_program_space doesn't want
984 to read the selected frame of a dead process. */
985 old_chain = save_inferior_ptid ();
986 inferior_ptid = null_ptid;
987
988 /* This inferior is dead, so avoid giving the breakpoints
989 module the option to write through to it (cloning a
990 program space resets breakpoints). */
991 inf->aspace = NULL;
992 inf->pspace = NULL;
993 pspace = add_program_space (maybe_new_address_space ());
994 set_current_program_space (pspace);
995 inf->removable = 1;
996 inf->symfile_flags = SYMFILE_NO_READ;
997 clone_program_space (pspace, inf->vfork_parent->pspace);
998 inf->pspace = pspace;
999 inf->aspace = pspace->aspace;
1000
1001 /* Put back inferior_ptid. We'll continue mourning this
1002 inferior. */
1003 do_cleanups (old_chain);
1004
1005 resume_parent = inf->vfork_parent->pid;
1006 /* Break the bonds. */
1007 inf->vfork_parent->vfork_child = NULL;
1008 }
1009
1010 inf->vfork_parent = NULL;
1011
1012 gdb_assert (current_program_space == inf->pspace);
1013
1014 if (non_stop && resume_parent != -1)
1015 {
1016 /* If the user wanted the parent to be running, let it go
1017 free now. */
1018 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
1019
1020 if (debug_infrun)
1021 fprintf_unfiltered (gdb_stdlog,
1022 "infrun: resuming vfork parent process %d\n",
1023 resume_parent);
1024
1025 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
1026
1027 do_cleanups (old_chain);
1028 }
1029 }
1030 }
1031
1032 /* Enum strings for "set|show follow-exec-mode". */
1033
1034 static const char follow_exec_mode_new[] = "new";
1035 static const char follow_exec_mode_same[] = "same";
1036 static const char *const follow_exec_mode_names[] =
1037 {
1038 follow_exec_mode_new,
1039 follow_exec_mode_same,
1040 NULL,
1041 };
1042
1043 static const char *follow_exec_mode_string = follow_exec_mode_same;
1044 static void
1045 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1046 struct cmd_list_element *c, const char *value)
1047 {
1048 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1049 }
1050
1051 /* EXECD_PATHNAME is assumed to be non-NULL. */
1052
1053 static void
1054 follow_exec (ptid_t ptid, char *execd_pathname)
1055 {
1056 struct thread_info *th, *tmp;
1057 struct inferior *inf = current_inferior ();
1058 int pid = ptid_get_pid (ptid);
1059
1060 /* This is an exec event that we actually wish to pay attention to.
1061 Refresh our symbol table to the newly exec'd program, remove any
1062 momentary bp's, etc.
1063
1064 If there are breakpoints, they aren't really inserted now,
1065 since the exec() transformed our inferior into a fresh set
1066 of instructions.
1067
1068 We want to preserve symbolic breakpoints on the list, since
1069 we have hopes that they can be reset after the new a.out's
1070 symbol table is read.
1071
1072 However, any "raw" breakpoints must be removed from the list
1073 (e.g., the solib bp's), since their address is probably invalid
1074 now.
1075
1076 And, we DON'T want to call delete_breakpoints() here, since
1077 that may write the bp's "shadow contents" (the instruction
1078 value that was overwritten witha TRAP instruction). Since
1079 we now have a new a.out, those shadow contents aren't valid. */
1080
1081 mark_breakpoints_out ();
1082
1083 /* The target reports the exec event to the main thread, even if
1084 some other thread does the exec, and even if the main thread was
1085 stopped or already gone. We may still have non-leader threads of
1086 the process on our list. E.g., on targets that don't have thread
1087 exit events (like remote); or on native Linux in non-stop mode if
1088 there were only two threads in the inferior and the non-leader
1089 one is the one that execs (and nothing forces an update of the
1090 thread list up to here). When debugging remotely, it's best to
1091 avoid extra traffic, when possible, so avoid syncing the thread
1092 list with the target, and instead go ahead and delete all threads
1093 of the process but one that reported the event. Note this must
1094 be done before calling update_breakpoints_after_exec, as
1095 otherwise clearing the threads' resources would reference stale
1096 thread breakpoints -- it may have been one of these threads that
1097 stepped across the exec. We could just clear their stepping
1098 states, but as long as we're iterating, might as well delete
1099 them. Deleting them now rather than at the next user-visible
1100 stop provides a nicer sequence of events for user and MI
1101 notifications. */
1102 ALL_THREADS_SAFE (th, tmp)
1103 if (ptid_get_pid (th->ptid) == pid && !ptid_equal (th->ptid, ptid))
1104 delete_thread (th->ptid);
1105
1106 /* We also need to clear any left over stale state for the
1107 leader/event thread. E.g., if there was any step-resume
1108 breakpoint or similar, it's gone now. We cannot truly
1109 step-to-next statement through an exec(). */
1110 th = inferior_thread ();
1111 th->control.step_resume_breakpoint = NULL;
1112 th->control.exception_resume_breakpoint = NULL;
1113 th->control.single_step_breakpoints = NULL;
1114 th->control.step_range_start = 0;
1115 th->control.step_range_end = 0;
1116
1117 /* The user may have had the main thread held stopped in the
1118 previous image (e.g., schedlock on, or non-stop). Release
1119 it now. */
1120 th->stop_requested = 0;
1121
1122 update_breakpoints_after_exec ();
1123
1124 /* What is this a.out's name? */
1125 printf_unfiltered (_("%s is executing new program: %s\n"),
1126 target_pid_to_str (inferior_ptid),
1127 execd_pathname);
1128
1129 /* We've followed the inferior through an exec. Therefore, the
1130 inferior has essentially been killed & reborn. */
1131
1132 gdb_flush (gdb_stdout);
1133
1134 breakpoint_init_inferior (inf_execd);
1135
1136 if (*gdb_sysroot != '\0')
1137 {
1138 char *name = exec_file_find (execd_pathname, NULL);
1139
1140 execd_pathname = alloca (strlen (name) + 1);
1141 strcpy (execd_pathname, name);
1142 xfree (name);
1143 }
1144
1145 /* Reset the shared library package. This ensures that we get a
1146 shlib event when the child reaches "_start", at which point the
1147 dld will have had a chance to initialize the child. */
1148 /* Also, loading a symbol file below may trigger symbol lookups, and
1149 we don't want those to be satisfied by the libraries of the
1150 previous incarnation of this process. */
1151 no_shared_libraries (NULL, 0);
1152
1153 if (follow_exec_mode_string == follow_exec_mode_new)
1154 {
1155 struct program_space *pspace;
1156
1157 /* The user wants to keep the old inferior and program spaces
1158 around. Create a new fresh one, and switch to it. */
1159
1160 inf = add_inferior (current_inferior ()->pid);
1161 pspace = add_program_space (maybe_new_address_space ());
1162 inf->pspace = pspace;
1163 inf->aspace = pspace->aspace;
1164
1165 exit_inferior_num_silent (current_inferior ()->num);
1166
1167 set_current_inferior (inf);
1168 set_current_program_space (pspace);
1169 }
1170 else
1171 {
1172 /* The old description may no longer be fit for the new image.
1173 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1174 old description; we'll read a new one below. No need to do
1175 this on "follow-exec-mode new", as the old inferior stays
1176 around (its description is later cleared/refetched on
1177 restart). */
1178 target_clear_description ();
1179 }
1180
1181 gdb_assert (current_program_space == inf->pspace);
1182
1183 /* That a.out is now the one to use. */
1184 exec_file_attach (execd_pathname, 0);
1185
1186 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
1187 (Position Independent Executable) main symbol file will get applied by
1188 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
1189 the breakpoints with the zero displacement. */
1190
1191 symbol_file_add (execd_pathname,
1192 (inf->symfile_flags
1193 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
1194 NULL, 0);
1195
1196 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
1197 set_initial_language ();
1198
1199 /* If the target can specify a description, read it. Must do this
1200 after flipping to the new executable (because the target supplied
1201 description must be compatible with the executable's
1202 architecture, and the old executable may e.g., be 32-bit, while
1203 the new one 64-bit), and before anything involving memory or
1204 registers. */
1205 target_find_description ();
1206
1207 solib_create_inferior_hook (0);
1208
1209 jit_inferior_created_hook ();
1210
1211 breakpoint_re_set ();
1212
1213 /* Reinsert all breakpoints. (Those which were symbolic have
1214 been reset to the proper address in the new a.out, thanks
1215 to symbol_file_command...). */
1216 insert_breakpoints ();
1217
1218 /* The next resume of this inferior should bring it to the shlib
1219 startup breakpoints. (If the user had also set bp's on
1220 "main" from the old (parent) process, then they'll auto-
1221 matically get reset there in the new process.). */
1222 }
1223
1224 /* The queue of threads that need to do a step-over operation to get
1225 past e.g., a breakpoint. What technique is used to step over the
1226 breakpoint/watchpoint does not matter -- all threads end up in the
1227 same queue, to maintain rough temporal order of execution, in order
1228 to avoid starvation, otherwise, we could e.g., find ourselves
1229 constantly stepping the same couple threads past their breakpoints
1230 over and over, if the single-step finish fast enough. */
1231 struct thread_info *step_over_queue_head;
1232
1233 /* Bit flags indicating what the thread needs to step over. */
1234
1235 enum step_over_what
1236 {
1237 /* Step over a breakpoint. */
1238 STEP_OVER_BREAKPOINT = 1,
1239
1240 /* Step past a non-continuable watchpoint, in order to let the
1241 instruction execute so we can evaluate the watchpoint
1242 expression. */
1243 STEP_OVER_WATCHPOINT = 2
1244 };
1245
1246 /* Info about an instruction that is being stepped over. */
1247
1248 struct step_over_info
1249 {
1250 /* If we're stepping past a breakpoint, this is the address space
1251 and address of the instruction the breakpoint is set at. We'll
1252 skip inserting all breakpoints here. Valid iff ASPACE is
1253 non-NULL. */
1254 struct address_space *aspace;
1255 CORE_ADDR address;
1256
1257 /* The instruction being stepped over triggers a nonsteppable
1258 watchpoint. If true, we'll skip inserting watchpoints. */
1259 int nonsteppable_watchpoint_p;
1260 };
1261
1262 /* The step-over info of the location that is being stepped over.
1263
1264 Note that with async/breakpoint always-inserted mode, a user might
1265 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1266 being stepped over. As setting a new breakpoint inserts all
1267 breakpoints, we need to make sure the breakpoint being stepped over
1268 isn't inserted then. We do that by only clearing the step-over
1269 info when the step-over is actually finished (or aborted).
1270
1271 Presently GDB can only step over one breakpoint at any given time.
1272 Given threads that can't run code in the same address space as the
1273 breakpoint's can't really miss the breakpoint, GDB could be taught
1274 to step-over at most one breakpoint per address space (so this info
1275 could move to the address space object if/when GDB is extended).
1276 The set of breakpoints being stepped over will normally be much
1277 smaller than the set of all breakpoints, so a flag in the
1278 breakpoint location structure would be wasteful. A separate list
1279 also saves complexity and run-time, as otherwise we'd have to go
1280 through all breakpoint locations clearing their flag whenever we
1281 start a new sequence. Similar considerations weigh against storing
1282 this info in the thread object. Plus, not all step overs actually
1283 have breakpoint locations -- e.g., stepping past a single-step
1284 breakpoint, or stepping to complete a non-continuable
1285 watchpoint. */
1286 static struct step_over_info step_over_info;
1287
1288 /* Record the address of the breakpoint/instruction we're currently
1289 stepping over. */
1290
1291 static void
1292 set_step_over_info (struct address_space *aspace, CORE_ADDR address,
1293 int nonsteppable_watchpoint_p)
1294 {
1295 step_over_info.aspace = aspace;
1296 step_over_info.address = address;
1297 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1298 }
1299
1300 /* Called when we're not longer stepping over a breakpoint / an
1301 instruction, so all breakpoints are free to be (re)inserted. */
1302
1303 static void
1304 clear_step_over_info (void)
1305 {
1306 step_over_info.aspace = NULL;
1307 step_over_info.address = 0;
1308 step_over_info.nonsteppable_watchpoint_p = 0;
1309 }
1310
1311 /* See infrun.h. */
1312
1313 int
1314 stepping_past_instruction_at (struct address_space *aspace,
1315 CORE_ADDR address)
1316 {
1317 return (step_over_info.aspace != NULL
1318 && breakpoint_address_match (aspace, address,
1319 step_over_info.aspace,
1320 step_over_info.address));
1321 }
1322
1323 /* See infrun.h. */
1324
1325 int
1326 stepping_past_nonsteppable_watchpoint (void)
1327 {
1328 return step_over_info.nonsteppable_watchpoint_p;
1329 }
1330
1331 /* Returns true if step-over info is valid. */
1332
1333 static int
1334 step_over_info_valid_p (void)
1335 {
1336 return (step_over_info.aspace != NULL
1337 || stepping_past_nonsteppable_watchpoint ());
1338 }
1339
1340 \f
1341 /* Displaced stepping. */
1342
1343 /* In non-stop debugging mode, we must take special care to manage
1344 breakpoints properly; in particular, the traditional strategy for
1345 stepping a thread past a breakpoint it has hit is unsuitable.
1346 'Displaced stepping' is a tactic for stepping one thread past a
1347 breakpoint it has hit while ensuring that other threads running
1348 concurrently will hit the breakpoint as they should.
1349
1350 The traditional way to step a thread T off a breakpoint in a
1351 multi-threaded program in all-stop mode is as follows:
1352
1353 a0) Initially, all threads are stopped, and breakpoints are not
1354 inserted.
1355 a1) We single-step T, leaving breakpoints uninserted.
1356 a2) We insert breakpoints, and resume all threads.
1357
1358 In non-stop debugging, however, this strategy is unsuitable: we
1359 don't want to have to stop all threads in the system in order to
1360 continue or step T past a breakpoint. Instead, we use displaced
1361 stepping:
1362
1363 n0) Initially, T is stopped, other threads are running, and
1364 breakpoints are inserted.
1365 n1) We copy the instruction "under" the breakpoint to a separate
1366 location, outside the main code stream, making any adjustments
1367 to the instruction, register, and memory state as directed by
1368 T's architecture.
1369 n2) We single-step T over the instruction at its new location.
1370 n3) We adjust the resulting register and memory state as directed
1371 by T's architecture. This includes resetting T's PC to point
1372 back into the main instruction stream.
1373 n4) We resume T.
1374
1375 This approach depends on the following gdbarch methods:
1376
1377 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1378 indicate where to copy the instruction, and how much space must
1379 be reserved there. We use these in step n1.
1380
1381 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1382 address, and makes any necessary adjustments to the instruction,
1383 register contents, and memory. We use this in step n1.
1384
1385 - gdbarch_displaced_step_fixup adjusts registers and memory after
1386 we have successfuly single-stepped the instruction, to yield the
1387 same effect the instruction would have had if we had executed it
1388 at its original address. We use this in step n3.
1389
1390 - gdbarch_displaced_step_free_closure provides cleanup.
1391
1392 The gdbarch_displaced_step_copy_insn and
1393 gdbarch_displaced_step_fixup functions must be written so that
1394 copying an instruction with gdbarch_displaced_step_copy_insn,
1395 single-stepping across the copied instruction, and then applying
1396 gdbarch_displaced_insn_fixup should have the same effects on the
1397 thread's memory and registers as stepping the instruction in place
1398 would have. Exactly which responsibilities fall to the copy and
1399 which fall to the fixup is up to the author of those functions.
1400
1401 See the comments in gdbarch.sh for details.
1402
1403 Note that displaced stepping and software single-step cannot
1404 currently be used in combination, although with some care I think
1405 they could be made to. Software single-step works by placing
1406 breakpoints on all possible subsequent instructions; if the
1407 displaced instruction is a PC-relative jump, those breakpoints
1408 could fall in very strange places --- on pages that aren't
1409 executable, or at addresses that are not proper instruction
1410 boundaries. (We do generally let other threads run while we wait
1411 to hit the software single-step breakpoint, and they might
1412 encounter such a corrupted instruction.) One way to work around
1413 this would be to have gdbarch_displaced_step_copy_insn fully
1414 simulate the effect of PC-relative instructions (and return NULL)
1415 on architectures that use software single-stepping.
1416
1417 In non-stop mode, we can have independent and simultaneous step
1418 requests, so more than one thread may need to simultaneously step
1419 over a breakpoint. The current implementation assumes there is
1420 only one scratch space per process. In this case, we have to
1421 serialize access to the scratch space. If thread A wants to step
1422 over a breakpoint, but we are currently waiting for some other
1423 thread to complete a displaced step, we leave thread A stopped and
1424 place it in the displaced_step_request_queue. Whenever a displaced
1425 step finishes, we pick the next thread in the queue and start a new
1426 displaced step operation on it. See displaced_step_prepare and
1427 displaced_step_fixup for details. */
1428
1429 /* Per-inferior displaced stepping state. */
1430 struct displaced_step_inferior_state
1431 {
1432 /* Pointer to next in linked list. */
1433 struct displaced_step_inferior_state *next;
1434
1435 /* The process this displaced step state refers to. */
1436 int pid;
1437
1438 /* If this is not null_ptid, this is the thread carrying out a
1439 displaced single-step in process PID. This thread's state will
1440 require fixing up once it has completed its step. */
1441 ptid_t step_ptid;
1442
1443 /* The architecture the thread had when we stepped it. */
1444 struct gdbarch *step_gdbarch;
1445
1446 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1447 for post-step cleanup. */
1448 struct displaced_step_closure *step_closure;
1449
1450 /* The address of the original instruction, and the copy we
1451 made. */
1452 CORE_ADDR step_original, step_copy;
1453
1454 /* Saved contents of copy area. */
1455 gdb_byte *step_saved_copy;
1456 };
1457
1458 /* The list of states of processes involved in displaced stepping
1459 presently. */
1460 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1461
1462 /* Get the displaced stepping state of process PID. */
1463
1464 static struct displaced_step_inferior_state *
1465 get_displaced_stepping_state (int pid)
1466 {
1467 struct displaced_step_inferior_state *state;
1468
1469 for (state = displaced_step_inferior_states;
1470 state != NULL;
1471 state = state->next)
1472 if (state->pid == pid)
1473 return state;
1474
1475 return NULL;
1476 }
1477
1478 /* Return true if process PID has a thread doing a displaced step. */
1479
1480 static int
1481 displaced_step_in_progress (int pid)
1482 {
1483 struct displaced_step_inferior_state *displaced;
1484
1485 displaced = get_displaced_stepping_state (pid);
1486 if (displaced != NULL && !ptid_equal (displaced->step_ptid, null_ptid))
1487 return 1;
1488
1489 return 0;
1490 }
1491
1492 /* Add a new displaced stepping state for process PID to the displaced
1493 stepping state list, or return a pointer to an already existing
1494 entry, if it already exists. Never returns NULL. */
1495
1496 static struct displaced_step_inferior_state *
1497 add_displaced_stepping_state (int pid)
1498 {
1499 struct displaced_step_inferior_state *state;
1500
1501 for (state = displaced_step_inferior_states;
1502 state != NULL;
1503 state = state->next)
1504 if (state->pid == pid)
1505 return state;
1506
1507 state = xcalloc (1, sizeof (*state));
1508 state->pid = pid;
1509 state->next = displaced_step_inferior_states;
1510 displaced_step_inferior_states = state;
1511
1512 return state;
1513 }
1514
1515 /* If inferior is in displaced stepping, and ADDR equals to starting address
1516 of copy area, return corresponding displaced_step_closure. Otherwise,
1517 return NULL. */
1518
1519 struct displaced_step_closure*
1520 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1521 {
1522 struct displaced_step_inferior_state *displaced
1523 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1524
1525 /* If checking the mode of displaced instruction in copy area. */
1526 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1527 && (displaced->step_copy == addr))
1528 return displaced->step_closure;
1529
1530 return NULL;
1531 }
1532
1533 /* Remove the displaced stepping state of process PID. */
1534
1535 static void
1536 remove_displaced_stepping_state (int pid)
1537 {
1538 struct displaced_step_inferior_state *it, **prev_next_p;
1539
1540 gdb_assert (pid != 0);
1541
1542 it = displaced_step_inferior_states;
1543 prev_next_p = &displaced_step_inferior_states;
1544 while (it)
1545 {
1546 if (it->pid == pid)
1547 {
1548 *prev_next_p = it->next;
1549 xfree (it);
1550 return;
1551 }
1552
1553 prev_next_p = &it->next;
1554 it = *prev_next_p;
1555 }
1556 }
1557
1558 static void
1559 infrun_inferior_exit (struct inferior *inf)
1560 {
1561 remove_displaced_stepping_state (inf->pid);
1562 }
1563
1564 /* If ON, and the architecture supports it, GDB will use displaced
1565 stepping to step over breakpoints. If OFF, or if the architecture
1566 doesn't support it, GDB will instead use the traditional
1567 hold-and-step approach. If AUTO (which is the default), GDB will
1568 decide which technique to use to step over breakpoints depending on
1569 which of all-stop or non-stop mode is active --- displaced stepping
1570 in non-stop mode; hold-and-step in all-stop mode. */
1571
1572 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1573
1574 static void
1575 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1576 struct cmd_list_element *c,
1577 const char *value)
1578 {
1579 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1580 fprintf_filtered (file,
1581 _("Debugger's willingness to use displaced stepping "
1582 "to step over breakpoints is %s (currently %s).\n"),
1583 value, non_stop ? "on" : "off");
1584 else
1585 fprintf_filtered (file,
1586 _("Debugger's willingness to use displaced stepping "
1587 "to step over breakpoints is %s.\n"), value);
1588 }
1589
1590 /* Return non-zero if displaced stepping can/should be used to step
1591 over breakpoints. */
1592
1593 static int
1594 use_displaced_stepping (struct gdbarch *gdbarch)
1595 {
1596 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1597 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1598 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1599 && find_record_target () == NULL);
1600 }
1601
1602 /* Clean out any stray displaced stepping state. */
1603 static void
1604 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1605 {
1606 /* Indicate that there is no cleanup pending. */
1607 displaced->step_ptid = null_ptid;
1608
1609 if (displaced->step_closure)
1610 {
1611 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1612 displaced->step_closure);
1613 displaced->step_closure = NULL;
1614 }
1615 }
1616
1617 static void
1618 displaced_step_clear_cleanup (void *arg)
1619 {
1620 struct displaced_step_inferior_state *state = arg;
1621
1622 displaced_step_clear (state);
1623 }
1624
1625 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1626 void
1627 displaced_step_dump_bytes (struct ui_file *file,
1628 const gdb_byte *buf,
1629 size_t len)
1630 {
1631 int i;
1632
1633 for (i = 0; i < len; i++)
1634 fprintf_unfiltered (file, "%02x ", buf[i]);
1635 fputs_unfiltered ("\n", file);
1636 }
1637
1638 /* Prepare to single-step, using displaced stepping.
1639
1640 Note that we cannot use displaced stepping when we have a signal to
1641 deliver. If we have a signal to deliver and an instruction to step
1642 over, then after the step, there will be no indication from the
1643 target whether the thread entered a signal handler or ignored the
1644 signal and stepped over the instruction successfully --- both cases
1645 result in a simple SIGTRAP. In the first case we mustn't do a
1646 fixup, and in the second case we must --- but we can't tell which.
1647 Comments in the code for 'random signals' in handle_inferior_event
1648 explain how we handle this case instead.
1649
1650 Returns 1 if preparing was successful -- this thread is going to be
1651 stepped now; or 0 if displaced stepping this thread got queued. */
1652 static int
1653 displaced_step_prepare (ptid_t ptid)
1654 {
1655 struct cleanup *old_cleanups, *ignore_cleanups;
1656 struct thread_info *tp = find_thread_ptid (ptid);
1657 struct regcache *regcache = get_thread_regcache (ptid);
1658 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1659 CORE_ADDR original, copy;
1660 ULONGEST len;
1661 struct displaced_step_closure *closure;
1662 struct displaced_step_inferior_state *displaced;
1663 int status;
1664
1665 /* We should never reach this function if the architecture does not
1666 support displaced stepping. */
1667 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1668
1669 /* Nor if the thread isn't meant to step over a breakpoint. */
1670 gdb_assert (tp->control.trap_expected);
1671
1672 /* Disable range stepping while executing in the scratch pad. We
1673 want a single-step even if executing the displaced instruction in
1674 the scratch buffer lands within the stepping range (e.g., a
1675 jump/branch). */
1676 tp->control.may_range_step = 0;
1677
1678 /* We have to displaced step one thread at a time, as we only have
1679 access to a single scratch space per inferior. */
1680
1681 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1682
1683 if (!ptid_equal (displaced->step_ptid, null_ptid))
1684 {
1685 /* Already waiting for a displaced step to finish. Defer this
1686 request and place in queue. */
1687
1688 if (debug_displaced)
1689 fprintf_unfiltered (gdb_stdlog,
1690 "displaced: deferring step of %s\n",
1691 target_pid_to_str (ptid));
1692
1693 thread_step_over_chain_enqueue (tp);
1694 return 0;
1695 }
1696 else
1697 {
1698 if (debug_displaced)
1699 fprintf_unfiltered (gdb_stdlog,
1700 "displaced: stepping %s now\n",
1701 target_pid_to_str (ptid));
1702 }
1703
1704 displaced_step_clear (displaced);
1705
1706 old_cleanups = save_inferior_ptid ();
1707 inferior_ptid = ptid;
1708
1709 original = regcache_read_pc (regcache);
1710
1711 copy = gdbarch_displaced_step_location (gdbarch);
1712 len = gdbarch_max_insn_length (gdbarch);
1713
1714 /* Save the original contents of the copy area. */
1715 displaced->step_saved_copy = xmalloc (len);
1716 ignore_cleanups = make_cleanup (free_current_contents,
1717 &displaced->step_saved_copy);
1718 status = target_read_memory (copy, displaced->step_saved_copy, len);
1719 if (status != 0)
1720 throw_error (MEMORY_ERROR,
1721 _("Error accessing memory address %s (%s) for "
1722 "displaced-stepping scratch space."),
1723 paddress (gdbarch, copy), safe_strerror (status));
1724 if (debug_displaced)
1725 {
1726 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1727 paddress (gdbarch, copy));
1728 displaced_step_dump_bytes (gdb_stdlog,
1729 displaced->step_saved_copy,
1730 len);
1731 };
1732
1733 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1734 original, copy, regcache);
1735
1736 /* We don't support the fully-simulated case at present. */
1737 gdb_assert (closure);
1738
1739 /* Save the information we need to fix things up if the step
1740 succeeds. */
1741 displaced->step_ptid = ptid;
1742 displaced->step_gdbarch = gdbarch;
1743 displaced->step_closure = closure;
1744 displaced->step_original = original;
1745 displaced->step_copy = copy;
1746
1747 make_cleanup (displaced_step_clear_cleanup, displaced);
1748
1749 /* Resume execution at the copy. */
1750 regcache_write_pc (regcache, copy);
1751
1752 discard_cleanups (ignore_cleanups);
1753
1754 do_cleanups (old_cleanups);
1755
1756 if (debug_displaced)
1757 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1758 paddress (gdbarch, copy));
1759
1760 return 1;
1761 }
1762
1763 static void
1764 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1765 const gdb_byte *myaddr, int len)
1766 {
1767 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1768
1769 inferior_ptid = ptid;
1770 write_memory (memaddr, myaddr, len);
1771 do_cleanups (ptid_cleanup);
1772 }
1773
1774 /* Restore the contents of the copy area for thread PTID. */
1775
1776 static void
1777 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1778 ptid_t ptid)
1779 {
1780 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1781
1782 write_memory_ptid (ptid, displaced->step_copy,
1783 displaced->step_saved_copy, len);
1784 if (debug_displaced)
1785 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1786 target_pid_to_str (ptid),
1787 paddress (displaced->step_gdbarch,
1788 displaced->step_copy));
1789 }
1790
1791 static void
1792 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1793 {
1794 struct cleanup *old_cleanups;
1795 struct displaced_step_inferior_state *displaced
1796 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1797
1798 /* Was any thread of this process doing a displaced step? */
1799 if (displaced == NULL)
1800 return;
1801
1802 /* Was this event for the pid we displaced? */
1803 if (ptid_equal (displaced->step_ptid, null_ptid)
1804 || ! ptid_equal (displaced->step_ptid, event_ptid))
1805 return;
1806
1807 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1808
1809 displaced_step_restore (displaced, displaced->step_ptid);
1810
1811 /* Fixup may need to read memory/registers. Switch to the thread
1812 that we're fixing up. Also, target_stopped_by_watchpoint checks
1813 the current thread. */
1814 switch_to_thread (event_ptid);
1815
1816 /* Did the instruction complete successfully? */
1817 if (signal == GDB_SIGNAL_TRAP
1818 && !(target_stopped_by_watchpoint ()
1819 && (gdbarch_have_nonsteppable_watchpoint (displaced->step_gdbarch)
1820 || target_have_steppable_watchpoint)))
1821 {
1822 /* Fix up the resulting state. */
1823 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1824 displaced->step_closure,
1825 displaced->step_original,
1826 displaced->step_copy,
1827 get_thread_regcache (displaced->step_ptid));
1828 }
1829 else
1830 {
1831 /* Since the instruction didn't complete, all we can do is
1832 relocate the PC. */
1833 struct regcache *regcache = get_thread_regcache (event_ptid);
1834 CORE_ADDR pc = regcache_read_pc (regcache);
1835
1836 pc = displaced->step_original + (pc - displaced->step_copy);
1837 regcache_write_pc (regcache, pc);
1838 }
1839
1840 do_cleanups (old_cleanups);
1841
1842 displaced->step_ptid = null_ptid;
1843 }
1844
1845 /* Data to be passed around while handling an event. This data is
1846 discarded between events. */
1847 struct execution_control_state
1848 {
1849 ptid_t ptid;
1850 /* The thread that got the event, if this was a thread event; NULL
1851 otherwise. */
1852 struct thread_info *event_thread;
1853
1854 struct target_waitstatus ws;
1855 int stop_func_filled_in;
1856 CORE_ADDR stop_func_start;
1857 CORE_ADDR stop_func_end;
1858 const char *stop_func_name;
1859 int wait_some_more;
1860
1861 /* True if the event thread hit the single-step breakpoint of
1862 another thread. Thus the event doesn't cause a stop, the thread
1863 needs to be single-stepped past the single-step breakpoint before
1864 we can switch back to the original stepping thread. */
1865 int hit_singlestep_breakpoint;
1866 };
1867
1868 /* Clear ECS and set it to point at TP. */
1869
1870 static void
1871 reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1872 {
1873 memset (ecs, 0, sizeof (*ecs));
1874 ecs->event_thread = tp;
1875 ecs->ptid = tp->ptid;
1876 }
1877
1878 static void keep_going_pass_signal (struct execution_control_state *ecs);
1879 static void prepare_to_wait (struct execution_control_state *ecs);
1880 static int keep_going_stepped_thread (struct thread_info *tp);
1881 static int thread_still_needs_step_over (struct thread_info *tp);
1882
1883 /* Are there any pending step-over requests? If so, run all we can
1884 now and return true. Otherwise, return false. */
1885
1886 static int
1887 start_step_over (void)
1888 {
1889 struct thread_info *tp, *next;
1890
1891 for (tp = step_over_queue_head; tp != NULL; tp = next)
1892 {
1893 struct execution_control_state ecss;
1894 struct execution_control_state *ecs = &ecss;
1895
1896 next = thread_step_over_chain_next (tp);
1897
1898 /* If this inferior already has a displaced step in process,
1899 don't start a new one. */
1900 if (displaced_step_in_progress (ptid_get_pid (tp->ptid)))
1901 continue;
1902
1903 thread_step_over_chain_remove (tp);
1904
1905 if (step_over_queue_head == NULL)
1906 {
1907 if (debug_infrun)
1908 fprintf_unfiltered (gdb_stdlog,
1909 "infrun: step-over queue now empty\n");
1910 }
1911
1912 if (tp->control.trap_expected || tp->executing)
1913 {
1914 internal_error (__FILE__, __LINE__,
1915 "[%s] has inconsistent state: "
1916 "trap_expected=%d, executing=%d\n",
1917 target_pid_to_str (tp->ptid),
1918 tp->control.trap_expected,
1919 tp->executing);
1920 }
1921
1922 if (debug_infrun)
1923 fprintf_unfiltered (gdb_stdlog,
1924 "infrun: resuming [%s] for step-over\n",
1925 target_pid_to_str (tp->ptid));
1926
1927 /* keep_going_pass_signal skips the step-over if the breakpoint
1928 is no longer inserted. In all-stop, we want to keep looking
1929 for a thread that needs a step-over instead of resuming TP,
1930 because we wouldn't be able to resume anything else until the
1931 target stops again. In non-stop, the resume always resumes
1932 only TP, so it's OK to let the thread resume freely. */
1933 if (!non_stop && !thread_still_needs_step_over (tp))
1934 continue;
1935
1936 switch_to_thread (tp->ptid);
1937 reset_ecs (ecs, tp);
1938 keep_going_pass_signal (ecs);
1939
1940 if (!ecs->wait_some_more)
1941 error (_("Command aborted."));
1942
1943 if (!non_stop)
1944 {
1945 /* On all-stop, shouldn't have resumed unless we needed a
1946 step over. */
1947 gdb_assert (tp->control.trap_expected
1948 || tp->step_after_step_resume_breakpoint);
1949
1950 /* With remote targets (at least), in all-stop, we can't
1951 issue any further remote commands until the program stops
1952 again. */
1953 return 1;
1954 }
1955
1956 /* Either the thread no longer needed a step-over, or a new
1957 displaced stepping sequence started. Even in the latter
1958 case, continue looking. Maybe we can also start another
1959 displaced step on a thread of other process. */
1960 }
1961
1962 return 0;
1963 }
1964
1965 /* Update global variables holding ptids to hold NEW_PTID if they were
1966 holding OLD_PTID. */
1967 static void
1968 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1969 {
1970 struct displaced_step_request *it;
1971 struct displaced_step_inferior_state *displaced;
1972
1973 if (ptid_equal (inferior_ptid, old_ptid))
1974 inferior_ptid = new_ptid;
1975
1976 for (displaced = displaced_step_inferior_states;
1977 displaced;
1978 displaced = displaced->next)
1979 {
1980 if (ptid_equal (displaced->step_ptid, old_ptid))
1981 displaced->step_ptid = new_ptid;
1982 }
1983 }
1984
1985 \f
1986 /* Resuming. */
1987
1988 /* Things to clean up if we QUIT out of resume (). */
1989 static void
1990 resume_cleanups (void *ignore)
1991 {
1992 if (!ptid_equal (inferior_ptid, null_ptid))
1993 delete_single_step_breakpoints (inferior_thread ());
1994
1995 normal_stop ();
1996 }
1997
1998 static const char schedlock_off[] = "off";
1999 static const char schedlock_on[] = "on";
2000 static const char schedlock_step[] = "step";
2001 static const char *const scheduler_enums[] = {
2002 schedlock_off,
2003 schedlock_on,
2004 schedlock_step,
2005 NULL
2006 };
2007 static const char *scheduler_mode = schedlock_off;
2008 static void
2009 show_scheduler_mode (struct ui_file *file, int from_tty,
2010 struct cmd_list_element *c, const char *value)
2011 {
2012 fprintf_filtered (file,
2013 _("Mode for locking scheduler "
2014 "during execution is \"%s\".\n"),
2015 value);
2016 }
2017
2018 static void
2019 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
2020 {
2021 if (!target_can_lock_scheduler)
2022 {
2023 scheduler_mode = schedlock_off;
2024 error (_("Target '%s' cannot support this command."), target_shortname);
2025 }
2026 }
2027
2028 /* True if execution commands resume all threads of all processes by
2029 default; otherwise, resume only threads of the current inferior
2030 process. */
2031 int sched_multi = 0;
2032
2033 /* Try to setup for software single stepping over the specified location.
2034 Return 1 if target_resume() should use hardware single step.
2035
2036 GDBARCH the current gdbarch.
2037 PC the location to step over. */
2038
2039 static int
2040 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2041 {
2042 int hw_step = 1;
2043
2044 if (execution_direction == EXEC_FORWARD
2045 && gdbarch_software_single_step_p (gdbarch)
2046 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
2047 {
2048 hw_step = 0;
2049 }
2050 return hw_step;
2051 }
2052
2053 /* See infrun.h. */
2054
2055 ptid_t
2056 user_visible_resume_ptid (int step)
2057 {
2058 ptid_t resume_ptid;
2059
2060 if (non_stop)
2061 {
2062 /* With non-stop mode on, threads are always handled
2063 individually. */
2064 resume_ptid = inferior_ptid;
2065 }
2066 else if ((scheduler_mode == schedlock_on)
2067 || (scheduler_mode == schedlock_step && step))
2068 {
2069 /* User-settable 'scheduler' mode requires solo thread
2070 resume. */
2071 resume_ptid = inferior_ptid;
2072 }
2073 else if (!sched_multi && target_supports_multi_process ())
2074 {
2075 /* Resume all threads of the current process (and none of other
2076 processes). */
2077 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
2078 }
2079 else
2080 {
2081 /* Resume all threads of all processes. */
2082 resume_ptid = RESUME_ALL;
2083 }
2084
2085 return resume_ptid;
2086 }
2087
2088 /* Wrapper for target_resume, that handles infrun-specific
2089 bookkeeping. */
2090
2091 static void
2092 do_target_resume (ptid_t resume_ptid, int step, enum gdb_signal sig)
2093 {
2094 struct thread_info *tp = inferior_thread ();
2095
2096 /* Install inferior's terminal modes. */
2097 target_terminal_inferior ();
2098
2099 /* Avoid confusing the next resume, if the next stop/resume
2100 happens to apply to another thread. */
2101 tp->suspend.stop_signal = GDB_SIGNAL_0;
2102
2103 /* Advise target which signals may be handled silently.
2104
2105 If we have removed breakpoints because we are stepping over one
2106 in-line (in any thread), we need to receive all signals to avoid
2107 accidentally skipping a breakpoint during execution of a signal
2108 handler.
2109
2110 Likewise if we're displaced stepping, otherwise a trap for a
2111 breakpoint in a signal handler might be confused with the
2112 displaced step finishing. We don't make the displaced_step_fixup
2113 step distinguish the cases instead, because:
2114
2115 - a backtrace while stopped in the signal handler would show the
2116 scratch pad as frame older than the signal handler, instead of
2117 the real mainline code.
2118
2119 - when the thread is later resumed, the signal handler would
2120 return to the scratch pad area, which would no longer be
2121 valid. */
2122 if (step_over_info_valid_p ()
2123 || displaced_step_in_progress (ptid_get_pid (tp->ptid)))
2124 target_pass_signals (0, NULL);
2125 else
2126 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2127
2128 target_resume (resume_ptid, step, sig);
2129 }
2130
2131 /* Resume the inferior, but allow a QUIT. This is useful if the user
2132 wants to interrupt some lengthy single-stepping operation
2133 (for child processes, the SIGINT goes to the inferior, and so
2134 we get a SIGINT random_signal, but for remote debugging and perhaps
2135 other targets, that's not true).
2136
2137 SIG is the signal to give the inferior (zero for none). */
2138 void
2139 resume (enum gdb_signal sig)
2140 {
2141 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
2142 struct regcache *regcache = get_current_regcache ();
2143 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2144 struct thread_info *tp = inferior_thread ();
2145 CORE_ADDR pc = regcache_read_pc (regcache);
2146 struct address_space *aspace = get_regcache_aspace (regcache);
2147 ptid_t resume_ptid;
2148 /* This represents the user's step vs continue request. When
2149 deciding whether "set scheduler-locking step" applies, it's the
2150 user's intention that counts. */
2151 const int user_step = tp->control.stepping_command;
2152 /* This represents what we'll actually request the target to do.
2153 This can decay from a step to a continue, if e.g., we need to
2154 implement single-stepping with breakpoints (software
2155 single-step). */
2156 int step;
2157
2158 tp->stepped_breakpoint = 0;
2159
2160 gdb_assert (!thread_is_in_step_over_chain (tp));
2161
2162 QUIT;
2163
2164 /* Depends on stepped_breakpoint. */
2165 step = currently_stepping (tp);
2166
2167 if (current_inferior ()->waiting_for_vfork_done)
2168 {
2169 /* Don't try to single-step a vfork parent that is waiting for
2170 the child to get out of the shared memory region (by exec'ing
2171 or exiting). This is particularly important on software
2172 single-step archs, as the child process would trip on the
2173 software single step breakpoint inserted for the parent
2174 process. Since the parent will not actually execute any
2175 instruction until the child is out of the shared region (such
2176 are vfork's semantics), it is safe to simply continue it.
2177 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2178 the parent, and tell it to `keep_going', which automatically
2179 re-sets it stepping. */
2180 if (debug_infrun)
2181 fprintf_unfiltered (gdb_stdlog,
2182 "infrun: resume : clear step\n");
2183 step = 0;
2184 }
2185
2186 if (debug_infrun)
2187 fprintf_unfiltered (gdb_stdlog,
2188 "infrun: resume (step=%d, signal=%s), "
2189 "trap_expected=%d, current thread [%s] at %s\n",
2190 step, gdb_signal_to_symbol_string (sig),
2191 tp->control.trap_expected,
2192 target_pid_to_str (inferior_ptid),
2193 paddress (gdbarch, pc));
2194
2195 /* Normally, by the time we reach `resume', the breakpoints are either
2196 removed or inserted, as appropriate. The exception is if we're sitting
2197 at a permanent breakpoint; we need to step over it, but permanent
2198 breakpoints can't be removed. So we have to test for it here. */
2199 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2200 {
2201 if (sig != GDB_SIGNAL_0)
2202 {
2203 /* We have a signal to pass to the inferior. The resume
2204 may, or may not take us to the signal handler. If this
2205 is a step, we'll need to stop in the signal handler, if
2206 there's one, (if the target supports stepping into
2207 handlers), or in the next mainline instruction, if
2208 there's no handler. If this is a continue, we need to be
2209 sure to run the handler with all breakpoints inserted.
2210 In all cases, set a breakpoint at the current address
2211 (where the handler returns to), and once that breakpoint
2212 is hit, resume skipping the permanent breakpoint. If
2213 that breakpoint isn't hit, then we've stepped into the
2214 signal handler (or hit some other event). We'll delete
2215 the step-resume breakpoint then. */
2216
2217 if (debug_infrun)
2218 fprintf_unfiltered (gdb_stdlog,
2219 "infrun: resume: skipping permanent breakpoint, "
2220 "deliver signal first\n");
2221
2222 clear_step_over_info ();
2223 tp->control.trap_expected = 0;
2224
2225 if (tp->control.step_resume_breakpoint == NULL)
2226 {
2227 /* Set a "high-priority" step-resume, as we don't want
2228 user breakpoints at PC to trigger (again) when this
2229 hits. */
2230 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2231 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2232
2233 tp->step_after_step_resume_breakpoint = step;
2234 }
2235
2236 insert_breakpoints ();
2237 }
2238 else
2239 {
2240 /* There's no signal to pass, we can go ahead and skip the
2241 permanent breakpoint manually. */
2242 if (debug_infrun)
2243 fprintf_unfiltered (gdb_stdlog,
2244 "infrun: resume: skipping permanent breakpoint\n");
2245 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2246 /* Update pc to reflect the new address from which we will
2247 execute instructions. */
2248 pc = regcache_read_pc (regcache);
2249
2250 if (step)
2251 {
2252 /* We've already advanced the PC, so the stepping part
2253 is done. Now we need to arrange for a trap to be
2254 reported to handle_inferior_event. Set a breakpoint
2255 at the current PC, and run to it. Don't update
2256 prev_pc, because if we end in
2257 switch_back_to_stepped_thread, we want the "expected
2258 thread advanced also" branch to be taken. IOW, we
2259 don't want this thread to step further from PC
2260 (overstep). */
2261 gdb_assert (!step_over_info_valid_p ());
2262 insert_single_step_breakpoint (gdbarch, aspace, pc);
2263 insert_breakpoints ();
2264
2265 resume_ptid = user_visible_resume_ptid (user_step);
2266 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
2267 discard_cleanups (old_cleanups);
2268 return;
2269 }
2270 }
2271 }
2272
2273 /* If we have a breakpoint to step over, make sure to do a single
2274 step only. Same if we have software watchpoints. */
2275 if (tp->control.trap_expected || bpstat_should_step ())
2276 tp->control.may_range_step = 0;
2277
2278 /* If enabled, step over breakpoints by executing a copy of the
2279 instruction at a different address.
2280
2281 We can't use displaced stepping when we have a signal to deliver;
2282 the comments for displaced_step_prepare explain why. The
2283 comments in the handle_inferior event for dealing with 'random
2284 signals' explain what we do instead.
2285
2286 We can't use displaced stepping when we are waiting for vfork_done
2287 event, displaced stepping breaks the vfork child similarly as single
2288 step software breakpoint. */
2289 if (use_displaced_stepping (gdbarch)
2290 && tp->control.trap_expected
2291 && !step_over_info_valid_p ()
2292 && sig == GDB_SIGNAL_0
2293 && !current_inferior ()->waiting_for_vfork_done)
2294 {
2295 struct displaced_step_inferior_state *displaced;
2296
2297 if (!displaced_step_prepare (inferior_ptid))
2298 {
2299 if (debug_infrun)
2300 fprintf_unfiltered (gdb_stdlog,
2301 "Got placed in step-over queue\n");
2302
2303 tp->control.trap_expected = 0;
2304 discard_cleanups (old_cleanups);
2305 return;
2306 }
2307
2308 /* Update pc to reflect the new address from which we will execute
2309 instructions due to displaced stepping. */
2310 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
2311
2312 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
2313 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
2314 displaced->step_closure);
2315 }
2316
2317 /* Do we need to do it the hard way, w/temp breakpoints? */
2318 else if (step)
2319 step = maybe_software_singlestep (gdbarch, pc);
2320
2321 /* Currently, our software single-step implementation leads to different
2322 results than hardware single-stepping in one situation: when stepping
2323 into delivering a signal which has an associated signal handler,
2324 hardware single-step will stop at the first instruction of the handler,
2325 while software single-step will simply skip execution of the handler.
2326
2327 For now, this difference in behavior is accepted since there is no
2328 easy way to actually implement single-stepping into a signal handler
2329 without kernel support.
2330
2331 However, there is one scenario where this difference leads to follow-on
2332 problems: if we're stepping off a breakpoint by removing all breakpoints
2333 and then single-stepping. In this case, the software single-step
2334 behavior means that even if there is a *breakpoint* in the signal
2335 handler, GDB still would not stop.
2336
2337 Fortunately, we can at least fix this particular issue. We detect
2338 here the case where we are about to deliver a signal while software
2339 single-stepping with breakpoints removed. In this situation, we
2340 revert the decisions to remove all breakpoints and insert single-
2341 step breakpoints, and instead we install a step-resume breakpoint
2342 at the current address, deliver the signal without stepping, and
2343 once we arrive back at the step-resume breakpoint, actually step
2344 over the breakpoint we originally wanted to step over. */
2345 if (thread_has_single_step_breakpoints_set (tp)
2346 && sig != GDB_SIGNAL_0
2347 && step_over_info_valid_p ())
2348 {
2349 /* If we have nested signals or a pending signal is delivered
2350 immediately after a handler returns, might might already have
2351 a step-resume breakpoint set on the earlier handler. We cannot
2352 set another step-resume breakpoint; just continue on until the
2353 original breakpoint is hit. */
2354 if (tp->control.step_resume_breakpoint == NULL)
2355 {
2356 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2357 tp->step_after_step_resume_breakpoint = 1;
2358 }
2359
2360 delete_single_step_breakpoints (tp);
2361
2362 clear_step_over_info ();
2363 tp->control.trap_expected = 0;
2364
2365 insert_breakpoints ();
2366 }
2367
2368 /* If STEP is set, it's a request to use hardware stepping
2369 facilities. But in that case, we should never
2370 use singlestep breakpoint. */
2371 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2372
2373 /* Decide the set of threads to ask the target to resume. Start
2374 by assuming everything will be resumed, than narrow the set
2375 by applying increasingly restricting conditions. */
2376 resume_ptid = user_visible_resume_ptid (user_step);
2377
2378 /* Maybe resume a single thread after all. */
2379 if ((step || thread_has_single_step_breakpoints_set (tp))
2380 && tp->control.trap_expected)
2381 {
2382 /* We're allowing a thread to run past a breakpoint it has
2383 hit, by single-stepping the thread with the breakpoint
2384 removed. In which case, we need to single-step only this
2385 thread, and keep others stopped, as they can miss this
2386 breakpoint if allowed to run. */
2387 resume_ptid = inferior_ptid;
2388 }
2389
2390 if (execution_direction != EXEC_REVERSE
2391 && step && breakpoint_inserted_here_p (aspace, pc))
2392 {
2393 /* The only case we currently need to step a breakpoint
2394 instruction is when we have a signal to deliver. See
2395 handle_signal_stop where we handle random signals that could
2396 take out us out of the stepping range. Normally, in that
2397 case we end up continuing (instead of stepping) over the
2398 signal handler with a breakpoint at PC, but there are cases
2399 where we should _always_ single-step, even if we have a
2400 step-resume breakpoint, like when a software watchpoint is
2401 set. Assuming single-stepping and delivering a signal at the
2402 same time would takes us to the signal handler, then we could
2403 have removed the breakpoint at PC to step over it. However,
2404 some hardware step targets (like e.g., Mac OS) can't step
2405 into signal handlers, and for those, we need to leave the
2406 breakpoint at PC inserted, as otherwise if the handler
2407 recurses and executes PC again, it'll miss the breakpoint.
2408 So we leave the breakpoint inserted anyway, but we need to
2409 record that we tried to step a breakpoint instruction, so
2410 that adjust_pc_after_break doesn't end up confused. */
2411 gdb_assert (sig != GDB_SIGNAL_0);
2412
2413 tp->stepped_breakpoint = 1;
2414
2415 /* Most targets can step a breakpoint instruction, thus
2416 executing it normally. But if this one cannot, just
2417 continue and we will hit it anyway. */
2418 if (gdbarch_cannot_step_breakpoint (gdbarch))
2419 step = 0;
2420 }
2421
2422 if (debug_displaced
2423 && use_displaced_stepping (gdbarch)
2424 && tp->control.trap_expected
2425 && !step_over_info_valid_p ())
2426 {
2427 struct regcache *resume_regcache = get_thread_regcache (tp->ptid);
2428 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
2429 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2430 gdb_byte buf[4];
2431
2432 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2433 paddress (resume_gdbarch, actual_pc));
2434 read_memory (actual_pc, buf, sizeof (buf));
2435 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2436 }
2437
2438 if (tp->control.may_range_step)
2439 {
2440 /* If we're resuming a thread with the PC out of the step
2441 range, then we're doing some nested/finer run control
2442 operation, like stepping the thread out of the dynamic
2443 linker or the displaced stepping scratch pad. We
2444 shouldn't have allowed a range step then. */
2445 gdb_assert (pc_in_thread_step_range (pc, tp));
2446 }
2447
2448 do_target_resume (resume_ptid, step, sig);
2449 discard_cleanups (old_cleanups);
2450 }
2451 \f
2452 /* Proceeding. */
2453
2454 /* Clear out all variables saying what to do when inferior is continued.
2455 First do this, then set the ones you want, then call `proceed'. */
2456
2457 static void
2458 clear_proceed_status_thread (struct thread_info *tp)
2459 {
2460 if (debug_infrun)
2461 fprintf_unfiltered (gdb_stdlog,
2462 "infrun: clear_proceed_status_thread (%s)\n",
2463 target_pid_to_str (tp->ptid));
2464
2465 /* If this signal should not be seen by program, give it zero.
2466 Used for debugging signals. */
2467 if (!signal_pass_state (tp->suspend.stop_signal))
2468 tp->suspend.stop_signal = GDB_SIGNAL_0;
2469
2470 tp->control.trap_expected = 0;
2471 tp->control.step_range_start = 0;
2472 tp->control.step_range_end = 0;
2473 tp->control.may_range_step = 0;
2474 tp->control.step_frame_id = null_frame_id;
2475 tp->control.step_stack_frame_id = null_frame_id;
2476 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2477 tp->control.step_start_function = NULL;
2478 tp->stop_requested = 0;
2479
2480 tp->control.stop_step = 0;
2481
2482 tp->control.proceed_to_finish = 0;
2483
2484 tp->control.command_interp = NULL;
2485 tp->control.stepping_command = 0;
2486
2487 /* Discard any remaining commands or status from previous stop. */
2488 bpstat_clear (&tp->control.stop_bpstat);
2489 }
2490
2491 void
2492 clear_proceed_status (int step)
2493 {
2494 if (!non_stop)
2495 {
2496 struct thread_info *tp;
2497 ptid_t resume_ptid;
2498
2499 resume_ptid = user_visible_resume_ptid (step);
2500
2501 /* In all-stop mode, delete the per-thread status of all threads
2502 we're about to resume, implicitly and explicitly. */
2503 ALL_NON_EXITED_THREADS (tp)
2504 {
2505 if (!ptid_match (tp->ptid, resume_ptid))
2506 continue;
2507 clear_proceed_status_thread (tp);
2508 }
2509 }
2510
2511 if (!ptid_equal (inferior_ptid, null_ptid))
2512 {
2513 struct inferior *inferior;
2514
2515 if (non_stop)
2516 {
2517 /* If in non-stop mode, only delete the per-thread status of
2518 the current thread. */
2519 clear_proceed_status_thread (inferior_thread ());
2520 }
2521
2522 inferior = current_inferior ();
2523 inferior->control.stop_soon = NO_STOP_QUIETLY;
2524 }
2525
2526 stop_after_trap = 0;
2527
2528 clear_step_over_info ();
2529
2530 observer_notify_about_to_proceed ();
2531 }
2532
2533 /* Returns true if TP is still stopped at a breakpoint that needs
2534 stepping-over in order to make progress. If the breakpoint is gone
2535 meanwhile, we can skip the whole step-over dance. */
2536
2537 static int
2538 thread_still_needs_step_over_bp (struct thread_info *tp)
2539 {
2540 if (tp->stepping_over_breakpoint)
2541 {
2542 struct regcache *regcache = get_thread_regcache (tp->ptid);
2543
2544 if (breakpoint_here_p (get_regcache_aspace (regcache),
2545 regcache_read_pc (regcache))
2546 == ordinary_breakpoint_here)
2547 return 1;
2548
2549 tp->stepping_over_breakpoint = 0;
2550 }
2551
2552 return 0;
2553 }
2554
2555 /* Check whether thread TP still needs to start a step-over in order
2556 to make progress when resumed. Returns an bitwise or of enum
2557 step_over_what bits, indicating what needs to be stepped over. */
2558
2559 static int
2560 thread_still_needs_step_over (struct thread_info *tp)
2561 {
2562 struct inferior *inf = find_inferior_ptid (tp->ptid);
2563 int what = 0;
2564
2565 if (thread_still_needs_step_over_bp (tp))
2566 what |= STEP_OVER_BREAKPOINT;
2567
2568 if (tp->stepping_over_watchpoint
2569 && !target_have_steppable_watchpoint)
2570 what |= STEP_OVER_WATCHPOINT;
2571
2572 return what;
2573 }
2574
2575 /* Returns true if scheduler locking applies. STEP indicates whether
2576 we're about to do a step/next-like command to a thread. */
2577
2578 static int
2579 schedlock_applies (struct thread_info *tp)
2580 {
2581 return (scheduler_mode == schedlock_on
2582 || (scheduler_mode == schedlock_step
2583 && tp->control.stepping_command));
2584 }
2585
2586 /* Basic routine for continuing the program in various fashions.
2587
2588 ADDR is the address to resume at, or -1 for resume where stopped.
2589 SIGGNAL is the signal to give it, or 0 for none,
2590 or -1 for act according to how it stopped.
2591 STEP is nonzero if should trap after one instruction.
2592 -1 means return after that and print nothing.
2593 You should probably set various step_... variables
2594 before calling here, if you are stepping.
2595
2596 You should call clear_proceed_status before calling proceed. */
2597
2598 void
2599 proceed (CORE_ADDR addr, enum gdb_signal siggnal)
2600 {
2601 struct regcache *regcache;
2602 struct gdbarch *gdbarch;
2603 struct thread_info *tp;
2604 CORE_ADDR pc;
2605 struct address_space *aspace;
2606 ptid_t resume_ptid;
2607 struct execution_control_state ecss;
2608 struct execution_control_state *ecs = &ecss;
2609 struct cleanup *old_chain;
2610 int started;
2611
2612 /* If we're stopped at a fork/vfork, follow the branch set by the
2613 "set follow-fork-mode" command; otherwise, we'll just proceed
2614 resuming the current thread. */
2615 if (!follow_fork ())
2616 {
2617 /* The target for some reason decided not to resume. */
2618 normal_stop ();
2619 if (target_can_async_p ())
2620 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2621 return;
2622 }
2623
2624 /* We'll update this if & when we switch to a new thread. */
2625 previous_inferior_ptid = inferior_ptid;
2626
2627 regcache = get_current_regcache ();
2628 gdbarch = get_regcache_arch (regcache);
2629 aspace = get_regcache_aspace (regcache);
2630 pc = regcache_read_pc (regcache);
2631 tp = inferior_thread ();
2632
2633 /* Fill in with reasonable starting values. */
2634 init_thread_stepping_state (tp);
2635
2636 gdb_assert (!thread_is_in_step_over_chain (tp));
2637
2638 if (addr == (CORE_ADDR) -1)
2639 {
2640 if (pc == stop_pc
2641 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
2642 && execution_direction != EXEC_REVERSE)
2643 /* There is a breakpoint at the address we will resume at,
2644 step one instruction before inserting breakpoints so that
2645 we do not stop right away (and report a second hit at this
2646 breakpoint).
2647
2648 Note, we don't do this in reverse, because we won't
2649 actually be executing the breakpoint insn anyway.
2650 We'll be (un-)executing the previous instruction. */
2651 tp->stepping_over_breakpoint = 1;
2652 else if (gdbarch_single_step_through_delay_p (gdbarch)
2653 && gdbarch_single_step_through_delay (gdbarch,
2654 get_current_frame ()))
2655 /* We stepped onto an instruction that needs to be stepped
2656 again before re-inserting the breakpoint, do so. */
2657 tp->stepping_over_breakpoint = 1;
2658 }
2659 else
2660 {
2661 regcache_write_pc (regcache, addr);
2662 }
2663
2664 if (siggnal != GDB_SIGNAL_DEFAULT)
2665 tp->suspend.stop_signal = siggnal;
2666
2667 /* Record the interpreter that issued the execution command that
2668 caused this thread to resume. If the top level interpreter is
2669 MI/async, and the execution command was a CLI command
2670 (next/step/etc.), we'll want to print stop event output to the MI
2671 console channel (the stepped-to line, etc.), as if the user
2672 entered the execution command on a real GDB console. */
2673 tp->control.command_interp = command_interp ();
2674
2675 resume_ptid = user_visible_resume_ptid (tp->control.stepping_command);
2676
2677 /* If an exception is thrown from this point on, make sure to
2678 propagate GDB's knowledge of the executing state to the
2679 frontend/user running state. */
2680 old_chain = make_cleanup (finish_thread_state_cleanup, &resume_ptid);
2681
2682 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
2683 threads (e.g., we might need to set threads stepping over
2684 breakpoints first), from the user/frontend's point of view, all
2685 threads in RESUME_PTID are now running. Unless we're calling an
2686 inferior function, as in that case we pretend the inferior
2687 doesn't run at all. */
2688 if (!tp->control.in_infcall)
2689 set_running (resume_ptid, 1);
2690
2691 if (debug_infrun)
2692 fprintf_unfiltered (gdb_stdlog,
2693 "infrun: proceed (addr=%s, signal=%s)\n",
2694 paddress (gdbarch, addr),
2695 gdb_signal_to_symbol_string (siggnal));
2696
2697 annotate_starting ();
2698
2699 /* Make sure that output from GDB appears before output from the
2700 inferior. */
2701 gdb_flush (gdb_stdout);
2702
2703 /* In a multi-threaded task we may select another thread and
2704 then continue or step.
2705
2706 But if a thread that we're resuming had stopped at a breakpoint,
2707 it will immediately cause another breakpoint stop without any
2708 execution (i.e. it will report a breakpoint hit incorrectly). So
2709 we must step over it first.
2710
2711 Look for threads other than the current (TP) that reported a
2712 breakpoint hit and haven't been resumed yet since. */
2713
2714 /* If scheduler locking applies, we can avoid iterating over all
2715 threads. */
2716 if (!non_stop && !schedlock_applies (tp))
2717 {
2718 struct thread_info *current = tp;
2719
2720 ALL_NON_EXITED_THREADS (tp)
2721 {
2722 /* Ignore the current thread here. It's handled
2723 afterwards. */
2724 if (tp == current)
2725 continue;
2726
2727 /* Ignore threads of processes we're not resuming. */
2728 if (!ptid_match (tp->ptid, resume_ptid))
2729 continue;
2730
2731 if (!thread_still_needs_step_over (tp))
2732 continue;
2733
2734 gdb_assert (!thread_is_in_step_over_chain (tp));
2735
2736 if (debug_infrun)
2737 fprintf_unfiltered (gdb_stdlog,
2738 "infrun: need to step-over [%s] first\n",
2739 target_pid_to_str (tp->ptid));
2740
2741 thread_step_over_chain_enqueue (tp);
2742 }
2743
2744 tp = current;
2745 }
2746
2747 /* Enqueue the current thread last, so that we move all other
2748 threads over their breakpoints first. */
2749 if (tp->stepping_over_breakpoint)
2750 thread_step_over_chain_enqueue (tp);
2751
2752 /* If the thread isn't started, we'll still need to set its prev_pc,
2753 so that switch_back_to_stepped_thread knows the thread hasn't
2754 advanced. Must do this before resuming any thread, as in
2755 all-stop/remote, once we resume we can't send any other packet
2756 until the target stops again. */
2757 tp->prev_pc = regcache_read_pc (regcache);
2758
2759 started = start_step_over ();
2760
2761 if (step_over_info_valid_p ())
2762 {
2763 /* Either this thread started a new in-line step over, or some
2764 other thread was already doing one. In either case, don't
2765 resume anything else until the step-over is finished. */
2766 }
2767 else if (started && !non_stop)
2768 {
2769 /* A new displaced stepping sequence was started. In all-stop,
2770 we can't talk to the target anymore until it next stops. */
2771 }
2772 else if (!tp->executing && !thread_is_in_step_over_chain (tp))
2773 {
2774 /* The thread wasn't started, and isn't queued, run it now. */
2775 reset_ecs (ecs, tp);
2776 switch_to_thread (tp->ptid);
2777 keep_going_pass_signal (ecs);
2778 if (!ecs->wait_some_more)
2779 error ("Command aborted.");
2780 }
2781
2782 discard_cleanups (old_chain);
2783
2784 /* Wait for it to stop (if not standalone)
2785 and in any case decode why it stopped, and act accordingly. */
2786 /* Do this only if we are not using the event loop, or if the target
2787 does not support asynchronous execution. */
2788 if (!target_can_async_p ())
2789 {
2790 wait_for_inferior ();
2791 normal_stop ();
2792 }
2793 }
2794 \f
2795
2796 /* Start remote-debugging of a machine over a serial link. */
2797
2798 void
2799 start_remote (int from_tty)
2800 {
2801 struct inferior *inferior;
2802
2803 inferior = current_inferior ();
2804 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2805
2806 /* Always go on waiting for the target, regardless of the mode. */
2807 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2808 indicate to wait_for_inferior that a target should timeout if
2809 nothing is returned (instead of just blocking). Because of this,
2810 targets expecting an immediate response need to, internally, set
2811 things up so that the target_wait() is forced to eventually
2812 timeout. */
2813 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2814 differentiate to its caller what the state of the target is after
2815 the initial open has been performed. Here we're assuming that
2816 the target has stopped. It should be possible to eventually have
2817 target_open() return to the caller an indication that the target
2818 is currently running and GDB state should be set to the same as
2819 for an async run. */
2820 wait_for_inferior ();
2821
2822 /* Now that the inferior has stopped, do any bookkeeping like
2823 loading shared libraries. We want to do this before normal_stop,
2824 so that the displayed frame is up to date. */
2825 post_create_inferior (&current_target, from_tty);
2826
2827 normal_stop ();
2828 }
2829
2830 /* Initialize static vars when a new inferior begins. */
2831
2832 void
2833 init_wait_for_inferior (void)
2834 {
2835 /* These are meaningless until the first time through wait_for_inferior. */
2836
2837 breakpoint_init_inferior (inf_starting);
2838
2839 clear_proceed_status (0);
2840
2841 target_last_wait_ptid = minus_one_ptid;
2842
2843 previous_inferior_ptid = inferior_ptid;
2844
2845 /* Discard any skipped inlined frames. */
2846 clear_inline_frame_state (minus_one_ptid);
2847 }
2848
2849 \f
2850
2851 static void handle_inferior_event (struct execution_control_state *ecs);
2852
2853 static void handle_step_into_function (struct gdbarch *gdbarch,
2854 struct execution_control_state *ecs);
2855 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2856 struct execution_control_state *ecs);
2857 static void handle_signal_stop (struct execution_control_state *ecs);
2858 static void check_exception_resume (struct execution_control_state *,
2859 struct frame_info *);
2860
2861 static void end_stepping_range (struct execution_control_state *ecs);
2862 static void stop_waiting (struct execution_control_state *ecs);
2863 static void keep_going (struct execution_control_state *ecs);
2864 static void process_event_stop_test (struct execution_control_state *ecs);
2865 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2866
2867 /* Callback for iterate over threads. If the thread is stopped, but
2868 the user/frontend doesn't know about that yet, go through
2869 normal_stop, as if the thread had just stopped now. ARG points at
2870 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2871 ptid_is_pid(PTID) is true, applies to all threads of the process
2872 pointed at by PTID. Otherwise, apply only to the thread pointed by
2873 PTID. */
2874
2875 static int
2876 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2877 {
2878 ptid_t ptid = * (ptid_t *) arg;
2879
2880 if ((ptid_equal (info->ptid, ptid)
2881 || ptid_equal (minus_one_ptid, ptid)
2882 || (ptid_is_pid (ptid)
2883 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2884 && is_running (info->ptid)
2885 && !is_executing (info->ptid))
2886 {
2887 struct cleanup *old_chain;
2888 struct execution_control_state ecss;
2889 struct execution_control_state *ecs = &ecss;
2890
2891 memset (ecs, 0, sizeof (*ecs));
2892
2893 old_chain = make_cleanup_restore_current_thread ();
2894
2895 overlay_cache_invalid = 1;
2896 /* Flush target cache before starting to handle each event.
2897 Target was running and cache could be stale. This is just a
2898 heuristic. Running threads may modify target memory, but we
2899 don't get any event. */
2900 target_dcache_invalidate ();
2901
2902 /* Go through handle_inferior_event/normal_stop, so we always
2903 have consistent output as if the stop event had been
2904 reported. */
2905 ecs->ptid = info->ptid;
2906 ecs->event_thread = find_thread_ptid (info->ptid);
2907 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2908 ecs->ws.value.sig = GDB_SIGNAL_0;
2909
2910 handle_inferior_event (ecs);
2911
2912 if (!ecs->wait_some_more)
2913 {
2914 struct thread_info *tp;
2915
2916 normal_stop ();
2917
2918 /* Finish off the continuations. */
2919 tp = inferior_thread ();
2920 do_all_intermediate_continuations_thread (tp, 1);
2921 do_all_continuations_thread (tp, 1);
2922 }
2923
2924 do_cleanups (old_chain);
2925 }
2926
2927 return 0;
2928 }
2929
2930 /* This function is attached as a "thread_stop_requested" observer.
2931 Cleanup local state that assumed the PTID was to be resumed, and
2932 report the stop to the frontend. */
2933
2934 static void
2935 infrun_thread_stop_requested (ptid_t ptid)
2936 {
2937 struct thread_info *tp;
2938
2939 /* PTID was requested to stop. Remove matching threads from the
2940 step-over queue, so we don't try to resume them
2941 automatically. */
2942 ALL_NON_EXITED_THREADS (tp)
2943 if (ptid_match (tp->ptid, ptid))
2944 {
2945 if (thread_is_in_step_over_chain (tp))
2946 thread_step_over_chain_remove (tp);
2947 }
2948
2949 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2950 }
2951
2952 static void
2953 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2954 {
2955 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2956 nullify_last_target_wait_ptid ();
2957 }
2958
2959 /* Delete the step resume, single-step and longjmp/exception resume
2960 breakpoints of TP. */
2961
2962 static void
2963 delete_thread_infrun_breakpoints (struct thread_info *tp)
2964 {
2965 delete_step_resume_breakpoint (tp);
2966 delete_exception_resume_breakpoint (tp);
2967 delete_single_step_breakpoints (tp);
2968 }
2969
2970 /* If the target still has execution, call FUNC for each thread that
2971 just stopped. In all-stop, that's all the non-exited threads; in
2972 non-stop, that's the current thread, only. */
2973
2974 typedef void (*for_each_just_stopped_thread_callback_func)
2975 (struct thread_info *tp);
2976
2977 static void
2978 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
2979 {
2980 if (!target_has_execution || ptid_equal (inferior_ptid, null_ptid))
2981 return;
2982
2983 if (non_stop)
2984 {
2985 /* If in non-stop mode, only the current thread stopped. */
2986 func (inferior_thread ());
2987 }
2988 else
2989 {
2990 struct thread_info *tp;
2991
2992 /* In all-stop mode, all threads have stopped. */
2993 ALL_NON_EXITED_THREADS (tp)
2994 {
2995 func (tp);
2996 }
2997 }
2998 }
2999
3000 /* Delete the step resume and longjmp/exception resume breakpoints of
3001 the threads that just stopped. */
3002
3003 static void
3004 delete_just_stopped_threads_infrun_breakpoints (void)
3005 {
3006 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
3007 }
3008
3009 /* Delete the single-step breakpoints of the threads that just
3010 stopped. */
3011
3012 static void
3013 delete_just_stopped_threads_single_step_breakpoints (void)
3014 {
3015 for_each_just_stopped_thread (delete_single_step_breakpoints);
3016 }
3017
3018 /* A cleanup wrapper. */
3019
3020 static void
3021 delete_just_stopped_threads_infrun_breakpoints_cleanup (void *arg)
3022 {
3023 delete_just_stopped_threads_infrun_breakpoints ();
3024 }
3025
3026 /* Pretty print the results of target_wait, for debugging purposes. */
3027
3028 static void
3029 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3030 const struct target_waitstatus *ws)
3031 {
3032 char *status_string = target_waitstatus_to_string (ws);
3033 struct ui_file *tmp_stream = mem_fileopen ();
3034 char *text;
3035
3036 /* The text is split over several lines because it was getting too long.
3037 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3038 output as a unit; we want only one timestamp printed if debug_timestamp
3039 is set. */
3040
3041 fprintf_unfiltered (tmp_stream,
3042 "infrun: target_wait (%d.%ld.%ld",
3043 ptid_get_pid (waiton_ptid),
3044 ptid_get_lwp (waiton_ptid),
3045 ptid_get_tid (waiton_ptid));
3046 if (ptid_get_pid (waiton_ptid) != -1)
3047 fprintf_unfiltered (tmp_stream,
3048 " [%s]", target_pid_to_str (waiton_ptid));
3049 fprintf_unfiltered (tmp_stream, ", status) =\n");
3050 fprintf_unfiltered (tmp_stream,
3051 "infrun: %d.%ld.%ld [%s],\n",
3052 ptid_get_pid (result_ptid),
3053 ptid_get_lwp (result_ptid),
3054 ptid_get_tid (result_ptid),
3055 target_pid_to_str (result_ptid));
3056 fprintf_unfiltered (tmp_stream,
3057 "infrun: %s\n",
3058 status_string);
3059
3060 text = ui_file_xstrdup (tmp_stream, NULL);
3061
3062 /* This uses %s in part to handle %'s in the text, but also to avoid
3063 a gcc error: the format attribute requires a string literal. */
3064 fprintf_unfiltered (gdb_stdlog, "%s", text);
3065
3066 xfree (status_string);
3067 xfree (text);
3068 ui_file_delete (tmp_stream);
3069 }
3070
3071 /* Prepare and stabilize the inferior for detaching it. E.g.,
3072 detaching while a thread is displaced stepping is a recipe for
3073 crashing it, as nothing would readjust the PC out of the scratch
3074 pad. */
3075
3076 void
3077 prepare_for_detach (void)
3078 {
3079 struct inferior *inf = current_inferior ();
3080 ptid_t pid_ptid = pid_to_ptid (inf->pid);
3081 struct cleanup *old_chain_1;
3082 struct displaced_step_inferior_state *displaced;
3083
3084 displaced = get_displaced_stepping_state (inf->pid);
3085
3086 /* Is any thread of this process displaced stepping? If not,
3087 there's nothing else to do. */
3088 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
3089 return;
3090
3091 if (debug_infrun)
3092 fprintf_unfiltered (gdb_stdlog,
3093 "displaced-stepping in-process while detaching");
3094
3095 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
3096 inf->detaching = 1;
3097
3098 while (!ptid_equal (displaced->step_ptid, null_ptid))
3099 {
3100 struct cleanup *old_chain_2;
3101 struct execution_control_state ecss;
3102 struct execution_control_state *ecs;
3103
3104 ecs = &ecss;
3105 memset (ecs, 0, sizeof (*ecs));
3106
3107 overlay_cache_invalid = 1;
3108 /* Flush target cache before starting to handle each event.
3109 Target was running and cache could be stale. This is just a
3110 heuristic. Running threads may modify target memory, but we
3111 don't get any event. */
3112 target_dcache_invalidate ();
3113
3114 if (deprecated_target_wait_hook)
3115 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
3116 else
3117 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
3118
3119 if (debug_infrun)
3120 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3121
3122 /* If an error happens while handling the event, propagate GDB's
3123 knowledge of the executing state to the frontend/user running
3124 state. */
3125 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
3126 &minus_one_ptid);
3127
3128 /* Now figure out what to do with the result of the result. */
3129 handle_inferior_event (ecs);
3130
3131 /* No error, don't finish the state yet. */
3132 discard_cleanups (old_chain_2);
3133
3134 /* Breakpoints and watchpoints are not installed on the target
3135 at this point, and signals are passed directly to the
3136 inferior, so this must mean the process is gone. */
3137 if (!ecs->wait_some_more)
3138 {
3139 discard_cleanups (old_chain_1);
3140 error (_("Program exited while detaching"));
3141 }
3142 }
3143
3144 discard_cleanups (old_chain_1);
3145 }
3146
3147 /* Wait for control to return from inferior to debugger.
3148
3149 If inferior gets a signal, we may decide to start it up again
3150 instead of returning. That is why there is a loop in this function.
3151 When this function actually returns it means the inferior
3152 should be left stopped and GDB should read more commands. */
3153
3154 void
3155 wait_for_inferior (void)
3156 {
3157 struct cleanup *old_cleanups;
3158 struct cleanup *thread_state_chain;
3159
3160 if (debug_infrun)
3161 fprintf_unfiltered
3162 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
3163
3164 old_cleanups
3165 = make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup,
3166 NULL);
3167
3168 /* If an error happens while handling the event, propagate GDB's
3169 knowledge of the executing state to the frontend/user running
3170 state. */
3171 thread_state_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3172
3173 while (1)
3174 {
3175 struct execution_control_state ecss;
3176 struct execution_control_state *ecs = &ecss;
3177 ptid_t waiton_ptid = minus_one_ptid;
3178
3179 memset (ecs, 0, sizeof (*ecs));
3180
3181 overlay_cache_invalid = 1;
3182
3183 /* Flush target cache before starting to handle each event.
3184 Target was running and cache could be stale. This is just a
3185 heuristic. Running threads may modify target memory, but we
3186 don't get any event. */
3187 target_dcache_invalidate ();
3188
3189 if (deprecated_target_wait_hook)
3190 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
3191 else
3192 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
3193
3194 if (debug_infrun)
3195 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3196
3197 /* Now figure out what to do with the result of the result. */
3198 handle_inferior_event (ecs);
3199
3200 if (!ecs->wait_some_more)
3201 break;
3202 }
3203
3204 /* No error, don't finish the state yet. */
3205 discard_cleanups (thread_state_chain);
3206
3207 do_cleanups (old_cleanups);
3208 }
3209
3210 /* Cleanup that reinstalls the readline callback handler, if the
3211 target is running in the background. If while handling the target
3212 event something triggered a secondary prompt, like e.g., a
3213 pagination prompt, we'll have removed the callback handler (see
3214 gdb_readline_wrapper_line). Need to do this as we go back to the
3215 event loop, ready to process further input. Note this has no
3216 effect if the handler hasn't actually been removed, because calling
3217 rl_callback_handler_install resets the line buffer, thus losing
3218 input. */
3219
3220 static void
3221 reinstall_readline_callback_handler_cleanup (void *arg)
3222 {
3223 if (!interpreter_async)
3224 {
3225 /* We're not going back to the top level event loop yet. Don't
3226 install the readline callback, as it'd prep the terminal,
3227 readline-style (raw, noecho) (e.g., --batch). We'll install
3228 it the next time the prompt is displayed, when we're ready
3229 for input. */
3230 return;
3231 }
3232
3233 if (async_command_editing_p && !sync_execution)
3234 gdb_rl_callback_handler_reinstall ();
3235 }
3236
3237 /* Asynchronous version of wait_for_inferior. It is called by the
3238 event loop whenever a change of state is detected on the file
3239 descriptor corresponding to the target. It can be called more than
3240 once to complete a single execution command. In such cases we need
3241 to keep the state in a global variable ECSS. If it is the last time
3242 that this function is called for a single execution command, then
3243 report to the user that the inferior has stopped, and do the
3244 necessary cleanups. */
3245
3246 void
3247 fetch_inferior_event (void *client_data)
3248 {
3249 struct execution_control_state ecss;
3250 struct execution_control_state *ecs = &ecss;
3251 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
3252 struct cleanup *ts_old_chain;
3253 int was_sync = sync_execution;
3254 int cmd_done = 0;
3255 ptid_t waiton_ptid = minus_one_ptid;
3256
3257 memset (ecs, 0, sizeof (*ecs));
3258
3259 /* End up with readline processing input, if necessary. */
3260 make_cleanup (reinstall_readline_callback_handler_cleanup, NULL);
3261
3262 /* We're handling a live event, so make sure we're doing live
3263 debugging. If we're looking at traceframes while the target is
3264 running, we're going to need to get back to that mode after
3265 handling the event. */
3266 if (non_stop)
3267 {
3268 make_cleanup_restore_current_traceframe ();
3269 set_current_traceframe (-1);
3270 }
3271
3272 if (non_stop)
3273 /* In non-stop mode, the user/frontend should not notice a thread
3274 switch due to internal events. Make sure we reverse to the
3275 user selected thread and frame after handling the event and
3276 running any breakpoint commands. */
3277 make_cleanup_restore_current_thread ();
3278
3279 overlay_cache_invalid = 1;
3280 /* Flush target cache before starting to handle each event. Target
3281 was running and cache could be stale. This is just a heuristic.
3282 Running threads may modify target memory, but we don't get any
3283 event. */
3284 target_dcache_invalidate ();
3285
3286 make_cleanup_restore_integer (&execution_direction);
3287 execution_direction = target_execution_direction ();
3288
3289 if (deprecated_target_wait_hook)
3290 ecs->ptid =
3291 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3292 else
3293 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3294
3295 if (debug_infrun)
3296 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3297
3298 /* If an error happens while handling the event, propagate GDB's
3299 knowledge of the executing state to the frontend/user running
3300 state. */
3301 if (!non_stop)
3302 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3303 else
3304 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
3305
3306 /* Get executed before make_cleanup_restore_current_thread above to apply
3307 still for the thread which has thrown the exception. */
3308 make_bpstat_clear_actions_cleanup ();
3309
3310 make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup, NULL);
3311
3312 /* Now figure out what to do with the result of the result. */
3313 handle_inferior_event (ecs);
3314
3315 if (!ecs->wait_some_more)
3316 {
3317 struct inferior *inf = find_inferior_ptid (ecs->ptid);
3318
3319 delete_just_stopped_threads_infrun_breakpoints ();
3320
3321 /* We may not find an inferior if this was a process exit. */
3322 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3323 normal_stop ();
3324
3325 if (target_has_execution
3326 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
3327 && ecs->ws.kind != TARGET_WAITKIND_EXITED
3328 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3329 && ecs->event_thread->step_multi
3330 && ecs->event_thread->control.stop_step)
3331 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
3332 else
3333 {
3334 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
3335 cmd_done = 1;
3336 }
3337 }
3338
3339 /* No error, don't finish the thread states yet. */
3340 discard_cleanups (ts_old_chain);
3341
3342 /* Revert thread and frame. */
3343 do_cleanups (old_chain);
3344
3345 /* If the inferior was in sync execution mode, and now isn't,
3346 restore the prompt (a synchronous execution command has finished,
3347 and we're ready for input). */
3348 if (interpreter_async && was_sync && !sync_execution)
3349 observer_notify_sync_execution_done ();
3350
3351 if (cmd_done
3352 && !was_sync
3353 && exec_done_display_p
3354 && (ptid_equal (inferior_ptid, null_ptid)
3355 || !is_running (inferior_ptid)))
3356 printf_unfiltered (_("completed.\n"));
3357 }
3358
3359 /* Record the frame and location we're currently stepping through. */
3360 void
3361 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
3362 {
3363 struct thread_info *tp = inferior_thread ();
3364
3365 tp->control.step_frame_id = get_frame_id (frame);
3366 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
3367
3368 tp->current_symtab = sal.symtab;
3369 tp->current_line = sal.line;
3370 }
3371
3372 /* Clear context switchable stepping state. */
3373
3374 void
3375 init_thread_stepping_state (struct thread_info *tss)
3376 {
3377 tss->stepped_breakpoint = 0;
3378 tss->stepping_over_breakpoint = 0;
3379 tss->stepping_over_watchpoint = 0;
3380 tss->step_after_step_resume_breakpoint = 0;
3381 }
3382
3383 /* Set the cached copy of the last ptid/waitstatus. */
3384
3385 static void
3386 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
3387 {
3388 target_last_wait_ptid = ptid;
3389 target_last_waitstatus = status;
3390 }
3391
3392 /* Return the cached copy of the last pid/waitstatus returned by
3393 target_wait()/deprecated_target_wait_hook(). The data is actually
3394 cached by handle_inferior_event(), which gets called immediately
3395 after target_wait()/deprecated_target_wait_hook(). */
3396
3397 void
3398 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
3399 {
3400 *ptidp = target_last_wait_ptid;
3401 *status = target_last_waitstatus;
3402 }
3403
3404 void
3405 nullify_last_target_wait_ptid (void)
3406 {
3407 target_last_wait_ptid = minus_one_ptid;
3408 }
3409
3410 /* Switch thread contexts. */
3411
3412 static void
3413 context_switch (ptid_t ptid)
3414 {
3415 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3416 {
3417 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3418 target_pid_to_str (inferior_ptid));
3419 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3420 target_pid_to_str (ptid));
3421 }
3422
3423 switch_to_thread (ptid);
3424 }
3425
3426 /* If the target can't tell whether we've hit breakpoints
3427 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
3428 check whether that could have been caused by a breakpoint. If so,
3429 adjust the PC, per gdbarch_decr_pc_after_break. */
3430
3431 static void
3432 adjust_pc_after_break (struct thread_info *thread,
3433 struct target_waitstatus *ws)
3434 {
3435 struct regcache *regcache;
3436 struct gdbarch *gdbarch;
3437 struct address_space *aspace;
3438 CORE_ADDR breakpoint_pc, decr_pc;
3439
3440 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3441 we aren't, just return.
3442
3443 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3444 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3445 implemented by software breakpoints should be handled through the normal
3446 breakpoint layer.
3447
3448 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3449 different signals (SIGILL or SIGEMT for instance), but it is less
3450 clear where the PC is pointing afterwards. It may not match
3451 gdbarch_decr_pc_after_break. I don't know any specific target that
3452 generates these signals at breakpoints (the code has been in GDB since at
3453 least 1992) so I can not guess how to handle them here.
3454
3455 In earlier versions of GDB, a target with
3456 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3457 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3458 target with both of these set in GDB history, and it seems unlikely to be
3459 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3460
3461 if (ws->kind != TARGET_WAITKIND_STOPPED)
3462 return;
3463
3464 if (ws->value.sig != GDB_SIGNAL_TRAP)
3465 return;
3466
3467 /* In reverse execution, when a breakpoint is hit, the instruction
3468 under it has already been de-executed. The reported PC always
3469 points at the breakpoint address, so adjusting it further would
3470 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3471 architecture:
3472
3473 B1 0x08000000 : INSN1
3474 B2 0x08000001 : INSN2
3475 0x08000002 : INSN3
3476 PC -> 0x08000003 : INSN4
3477
3478 Say you're stopped at 0x08000003 as above. Reverse continuing
3479 from that point should hit B2 as below. Reading the PC when the
3480 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3481 been de-executed already.
3482
3483 B1 0x08000000 : INSN1
3484 B2 PC -> 0x08000001 : INSN2
3485 0x08000002 : INSN3
3486 0x08000003 : INSN4
3487
3488 We can't apply the same logic as for forward execution, because
3489 we would wrongly adjust the PC to 0x08000000, since there's a
3490 breakpoint at PC - 1. We'd then report a hit on B1, although
3491 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3492 behaviour. */
3493 if (execution_direction == EXEC_REVERSE)
3494 return;
3495
3496 /* If the target can tell whether the thread hit a SW breakpoint,
3497 trust it. Targets that can tell also adjust the PC
3498 themselves. */
3499 if (target_supports_stopped_by_sw_breakpoint ())
3500 return;
3501
3502 /* Note that relying on whether a breakpoint is planted in memory to
3503 determine this can fail. E.g,. the breakpoint could have been
3504 removed since. Or the thread could have been told to step an
3505 instruction the size of a breakpoint instruction, and only
3506 _after_ was a breakpoint inserted at its address. */
3507
3508 /* If this target does not decrement the PC after breakpoints, then
3509 we have nothing to do. */
3510 regcache = get_thread_regcache (thread->ptid);
3511 gdbarch = get_regcache_arch (regcache);
3512
3513 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3514 if (decr_pc == 0)
3515 return;
3516
3517 aspace = get_regcache_aspace (regcache);
3518
3519 /* Find the location where (if we've hit a breakpoint) the
3520 breakpoint would be. */
3521 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3522
3523 /* If the target can't tell whether a software breakpoint triggered,
3524 fallback to figuring it out based on breakpoints we think were
3525 inserted in the target, and on whether the thread was stepped or
3526 continued. */
3527
3528 /* Check whether there actually is a software breakpoint inserted at
3529 that location.
3530
3531 If in non-stop mode, a race condition is possible where we've
3532 removed a breakpoint, but stop events for that breakpoint were
3533 already queued and arrive later. To suppress those spurious
3534 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3535 and retire them after a number of stop events are reported. Note
3536 this is an heuristic and can thus get confused. The real fix is
3537 to get the "stopped by SW BP and needs adjustment" info out of
3538 the target/kernel (and thus never reach here; see above). */
3539 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3540 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3541 {
3542 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3543
3544 if (record_full_is_used ())
3545 record_full_gdb_operation_disable_set ();
3546
3547 /* When using hardware single-step, a SIGTRAP is reported for both
3548 a completed single-step and a software breakpoint. Need to
3549 differentiate between the two, as the latter needs adjusting
3550 but the former does not.
3551
3552 The SIGTRAP can be due to a completed hardware single-step only if
3553 - we didn't insert software single-step breakpoints
3554 - this thread is currently being stepped
3555
3556 If any of these events did not occur, we must have stopped due
3557 to hitting a software breakpoint, and have to back up to the
3558 breakpoint address.
3559
3560 As a special case, we could have hardware single-stepped a
3561 software breakpoint. In this case (prev_pc == breakpoint_pc),
3562 we also need to back up to the breakpoint address. */
3563
3564 if (thread_has_single_step_breakpoints_set (thread)
3565 || !currently_stepping (thread)
3566 || (thread->stepped_breakpoint
3567 && thread->prev_pc == breakpoint_pc))
3568 regcache_write_pc (regcache, breakpoint_pc);
3569
3570 do_cleanups (old_cleanups);
3571 }
3572 }
3573
3574 static int
3575 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3576 {
3577 for (frame = get_prev_frame (frame);
3578 frame != NULL;
3579 frame = get_prev_frame (frame))
3580 {
3581 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3582 return 1;
3583 if (get_frame_type (frame) != INLINE_FRAME)
3584 break;
3585 }
3586
3587 return 0;
3588 }
3589
3590 /* Auxiliary function that handles syscall entry/return events.
3591 It returns 1 if the inferior should keep going (and GDB
3592 should ignore the event), or 0 if the event deserves to be
3593 processed. */
3594
3595 static int
3596 handle_syscall_event (struct execution_control_state *ecs)
3597 {
3598 struct regcache *regcache;
3599 int syscall_number;
3600
3601 if (!ptid_equal (ecs->ptid, inferior_ptid))
3602 context_switch (ecs->ptid);
3603
3604 regcache = get_thread_regcache (ecs->ptid);
3605 syscall_number = ecs->ws.value.syscall_number;
3606 stop_pc = regcache_read_pc (regcache);
3607
3608 if (catch_syscall_enabled () > 0
3609 && catching_syscall_number (syscall_number) > 0)
3610 {
3611 if (debug_infrun)
3612 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3613 syscall_number);
3614
3615 ecs->event_thread->control.stop_bpstat
3616 = bpstat_stop_status (get_regcache_aspace (regcache),
3617 stop_pc, ecs->ptid, &ecs->ws);
3618
3619 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3620 {
3621 /* Catchpoint hit. */
3622 return 0;
3623 }
3624 }
3625
3626 /* If no catchpoint triggered for this, then keep going. */
3627 keep_going (ecs);
3628 return 1;
3629 }
3630
3631 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3632
3633 static void
3634 fill_in_stop_func (struct gdbarch *gdbarch,
3635 struct execution_control_state *ecs)
3636 {
3637 if (!ecs->stop_func_filled_in)
3638 {
3639 /* Don't care about return value; stop_func_start and stop_func_name
3640 will both be 0 if it doesn't work. */
3641 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3642 &ecs->stop_func_start, &ecs->stop_func_end);
3643 ecs->stop_func_start
3644 += gdbarch_deprecated_function_start_offset (gdbarch);
3645
3646 if (gdbarch_skip_entrypoint_p (gdbarch))
3647 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3648 ecs->stop_func_start);
3649
3650 ecs->stop_func_filled_in = 1;
3651 }
3652 }
3653
3654
3655 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3656
3657 static enum stop_kind
3658 get_inferior_stop_soon (ptid_t ptid)
3659 {
3660 struct inferior *inf = find_inferior_ptid (ptid);
3661
3662 gdb_assert (inf != NULL);
3663 return inf->control.stop_soon;
3664 }
3665
3666 /* Given an execution control state that has been freshly filled in by
3667 an event from the inferior, figure out what it means and take
3668 appropriate action.
3669
3670 The alternatives are:
3671
3672 1) stop_waiting and return; to really stop and return to the
3673 debugger.
3674
3675 2) keep_going and return; to wait for the next event (set
3676 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3677 once). */
3678
3679 static void
3680 handle_inferior_event_1 (struct execution_control_state *ecs)
3681 {
3682 enum stop_kind stop_soon;
3683
3684 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3685 {
3686 /* We had an event in the inferior, but we are not interested in
3687 handling it at this level. The lower layers have already
3688 done what needs to be done, if anything.
3689
3690 One of the possible circumstances for this is when the
3691 inferior produces output for the console. The inferior has
3692 not stopped, and we are ignoring the event. Another possible
3693 circumstance is any event which the lower level knows will be
3694 reported multiple times without an intervening resume. */
3695 if (debug_infrun)
3696 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3697 prepare_to_wait (ecs);
3698 return;
3699 }
3700
3701 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3702 && target_can_async_p () && !sync_execution)
3703 {
3704 /* There were no unwaited-for children left in the target, but,
3705 we're not synchronously waiting for events either. Just
3706 ignore. Otherwise, if we were running a synchronous
3707 execution command, we need to cancel it and give the user
3708 back the terminal. */
3709 if (debug_infrun)
3710 fprintf_unfiltered (gdb_stdlog,
3711 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3712 prepare_to_wait (ecs);
3713 return;
3714 }
3715
3716 /* Cache the last pid/waitstatus. */
3717 set_last_target_status (ecs->ptid, ecs->ws);
3718
3719 /* Always clear state belonging to the previous time we stopped. */
3720 stop_stack_dummy = STOP_NONE;
3721
3722 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3723 {
3724 /* No unwaited-for children left. IOW, all resumed children
3725 have exited. */
3726 if (debug_infrun)
3727 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3728
3729 stop_print_frame = 0;
3730 stop_waiting (ecs);
3731 return;
3732 }
3733
3734 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3735 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3736 {
3737 ecs->event_thread = find_thread_ptid (ecs->ptid);
3738 /* If it's a new thread, add it to the thread database. */
3739 if (ecs->event_thread == NULL)
3740 ecs->event_thread = add_thread (ecs->ptid);
3741
3742 /* Disable range stepping. If the next step request could use a
3743 range, this will be end up re-enabled then. */
3744 ecs->event_thread->control.may_range_step = 0;
3745 }
3746
3747 /* Dependent on valid ECS->EVENT_THREAD. */
3748 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
3749
3750 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3751 reinit_frame_cache ();
3752
3753 breakpoint_retire_moribund ();
3754
3755 /* First, distinguish signals caused by the debugger from signals
3756 that have to do with the program's own actions. Note that
3757 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3758 on the operating system version. Here we detect when a SIGILL or
3759 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3760 something similar for SIGSEGV, since a SIGSEGV will be generated
3761 when we're trying to execute a breakpoint instruction on a
3762 non-executable stack. This happens for call dummy breakpoints
3763 for architectures like SPARC that place call dummies on the
3764 stack. */
3765 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3766 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3767 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3768 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3769 {
3770 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3771
3772 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3773 regcache_read_pc (regcache)))
3774 {
3775 if (debug_infrun)
3776 fprintf_unfiltered (gdb_stdlog,
3777 "infrun: Treating signal as SIGTRAP\n");
3778 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3779 }
3780 }
3781
3782 /* Mark the non-executing threads accordingly. In all-stop, all
3783 threads of all processes are stopped when we get any event
3784 reported. In non-stop mode, only the event thread stops. */
3785 if (!non_stop)
3786 set_executing (minus_one_ptid, 0);
3787 else if (ecs->ws.kind == TARGET_WAITKIND_SIGNALLED
3788 || ecs->ws.kind == TARGET_WAITKIND_EXITED)
3789 {
3790 ptid_t pid_ptid;
3791
3792 /* If we're handling a process exit in non-stop mode, even
3793 though threads haven't been deleted yet, one would think that
3794 there is nothing to do, as threads of the dead process will
3795 be soon deleted, and threads of any other process were left
3796 running. However, on some targets, threads survive a process
3797 exit event. E.g., for the "checkpoint" command, when the
3798 current checkpoint/fork exits, linux-fork.c automatically
3799 switches to another fork from within target_mourn_inferior,
3800 by associating the same inferior/thread to another fork. We
3801 haven't mourned yet at this point, but we must mark any
3802 threads left in the process as not-executing so that
3803 finish_thread_state marks them stopped (in the user's
3804 perspective) if/when we present the stop to the user. */
3805 pid_ptid = pid_to_ptid (ptid_get_pid (ecs->ptid));
3806 set_executing (pid_ptid, 0);
3807 }
3808 else
3809 set_executing (ecs->ptid, 0);
3810
3811 switch (ecs->ws.kind)
3812 {
3813 case TARGET_WAITKIND_LOADED:
3814 if (debug_infrun)
3815 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3816 if (!ptid_equal (ecs->ptid, inferior_ptid))
3817 context_switch (ecs->ptid);
3818 /* Ignore gracefully during startup of the inferior, as it might
3819 be the shell which has just loaded some objects, otherwise
3820 add the symbols for the newly loaded objects. Also ignore at
3821 the beginning of an attach or remote session; we will query
3822 the full list of libraries once the connection is
3823 established. */
3824
3825 stop_soon = get_inferior_stop_soon (ecs->ptid);
3826 if (stop_soon == NO_STOP_QUIETLY)
3827 {
3828 struct regcache *regcache;
3829
3830 regcache = get_thread_regcache (ecs->ptid);
3831
3832 handle_solib_event ();
3833
3834 ecs->event_thread->control.stop_bpstat
3835 = bpstat_stop_status (get_regcache_aspace (regcache),
3836 stop_pc, ecs->ptid, &ecs->ws);
3837
3838 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3839 {
3840 /* A catchpoint triggered. */
3841 process_event_stop_test (ecs);
3842 return;
3843 }
3844
3845 /* If requested, stop when the dynamic linker notifies
3846 gdb of events. This allows the user to get control
3847 and place breakpoints in initializer routines for
3848 dynamically loaded objects (among other things). */
3849 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3850 if (stop_on_solib_events)
3851 {
3852 /* Make sure we print "Stopped due to solib-event" in
3853 normal_stop. */
3854 stop_print_frame = 1;
3855
3856 stop_waiting (ecs);
3857 return;
3858 }
3859 }
3860
3861 /* If we are skipping through a shell, or through shared library
3862 loading that we aren't interested in, resume the program. If
3863 we're running the program normally, also resume. */
3864 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3865 {
3866 /* Loading of shared libraries might have changed breakpoint
3867 addresses. Make sure new breakpoints are inserted. */
3868 if (stop_soon == NO_STOP_QUIETLY)
3869 insert_breakpoints ();
3870 resume (GDB_SIGNAL_0);
3871 prepare_to_wait (ecs);
3872 return;
3873 }
3874
3875 /* But stop if we're attaching or setting up a remote
3876 connection. */
3877 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3878 || stop_soon == STOP_QUIETLY_REMOTE)
3879 {
3880 if (debug_infrun)
3881 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3882 stop_waiting (ecs);
3883 return;
3884 }
3885
3886 internal_error (__FILE__, __LINE__,
3887 _("unhandled stop_soon: %d"), (int) stop_soon);
3888
3889 case TARGET_WAITKIND_SPURIOUS:
3890 if (debug_infrun)
3891 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3892 if (!ptid_equal (ecs->ptid, inferior_ptid))
3893 context_switch (ecs->ptid);
3894 resume (GDB_SIGNAL_0);
3895 prepare_to_wait (ecs);
3896 return;
3897
3898 case TARGET_WAITKIND_EXITED:
3899 case TARGET_WAITKIND_SIGNALLED:
3900 if (debug_infrun)
3901 {
3902 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3903 fprintf_unfiltered (gdb_stdlog,
3904 "infrun: TARGET_WAITKIND_EXITED\n");
3905 else
3906 fprintf_unfiltered (gdb_stdlog,
3907 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3908 }
3909
3910 inferior_ptid = ecs->ptid;
3911 set_current_inferior (find_inferior_ptid (ecs->ptid));
3912 set_current_program_space (current_inferior ()->pspace);
3913 handle_vfork_child_exec_or_exit (0);
3914 target_terminal_ours (); /* Must do this before mourn anyway. */
3915
3916 /* Clearing any previous state of convenience variables. */
3917 clear_exit_convenience_vars ();
3918
3919 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3920 {
3921 /* Record the exit code in the convenience variable $_exitcode, so
3922 that the user can inspect this again later. */
3923 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3924 (LONGEST) ecs->ws.value.integer);
3925
3926 /* Also record this in the inferior itself. */
3927 current_inferior ()->has_exit_code = 1;
3928 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3929
3930 /* Support the --return-child-result option. */
3931 return_child_result_value = ecs->ws.value.integer;
3932
3933 observer_notify_exited (ecs->ws.value.integer);
3934 }
3935 else
3936 {
3937 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3938 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3939
3940 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3941 {
3942 /* Set the value of the internal variable $_exitsignal,
3943 which holds the signal uncaught by the inferior. */
3944 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3945 gdbarch_gdb_signal_to_target (gdbarch,
3946 ecs->ws.value.sig));
3947 }
3948 else
3949 {
3950 /* We don't have access to the target's method used for
3951 converting between signal numbers (GDB's internal
3952 representation <-> target's representation).
3953 Therefore, we cannot do a good job at displaying this
3954 information to the user. It's better to just warn
3955 her about it (if infrun debugging is enabled), and
3956 give up. */
3957 if (debug_infrun)
3958 fprintf_filtered (gdb_stdlog, _("\
3959 Cannot fill $_exitsignal with the correct signal number.\n"));
3960 }
3961
3962 observer_notify_signal_exited (ecs->ws.value.sig);
3963 }
3964
3965 gdb_flush (gdb_stdout);
3966 target_mourn_inferior ();
3967 stop_print_frame = 0;
3968 stop_waiting (ecs);
3969 return;
3970
3971 /* The following are the only cases in which we keep going;
3972 the above cases end in a continue or goto. */
3973 case TARGET_WAITKIND_FORKED:
3974 case TARGET_WAITKIND_VFORKED:
3975 if (debug_infrun)
3976 {
3977 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3978 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3979 else
3980 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3981 }
3982
3983 /* Check whether the inferior is displaced stepping. */
3984 {
3985 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3986 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3987 struct displaced_step_inferior_state *displaced
3988 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3989
3990 /* If checking displaced stepping is supported, and thread
3991 ecs->ptid is displaced stepping. */
3992 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3993 {
3994 struct inferior *parent_inf
3995 = find_inferior_ptid (ecs->ptid);
3996 struct regcache *child_regcache;
3997 CORE_ADDR parent_pc;
3998
3999 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
4000 indicating that the displaced stepping of syscall instruction
4001 has been done. Perform cleanup for parent process here. Note
4002 that this operation also cleans up the child process for vfork,
4003 because their pages are shared. */
4004 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
4005 /* Start a new step-over in another thread if there's one
4006 that needs it. */
4007 start_step_over ();
4008
4009 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
4010 {
4011 /* Restore scratch pad for child process. */
4012 displaced_step_restore (displaced, ecs->ws.value.related_pid);
4013 }
4014
4015 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
4016 the child's PC is also within the scratchpad. Set the child's PC
4017 to the parent's PC value, which has already been fixed up.
4018 FIXME: we use the parent's aspace here, although we're touching
4019 the child, because the child hasn't been added to the inferior
4020 list yet at this point. */
4021
4022 child_regcache
4023 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
4024 gdbarch,
4025 parent_inf->aspace);
4026 /* Read PC value of parent process. */
4027 parent_pc = regcache_read_pc (regcache);
4028
4029 if (debug_displaced)
4030 fprintf_unfiltered (gdb_stdlog,
4031 "displaced: write child pc from %s to %s\n",
4032 paddress (gdbarch,
4033 regcache_read_pc (child_regcache)),
4034 paddress (gdbarch, parent_pc));
4035
4036 regcache_write_pc (child_regcache, parent_pc);
4037 }
4038 }
4039
4040 if (!ptid_equal (ecs->ptid, inferior_ptid))
4041 context_switch (ecs->ptid);
4042
4043 /* Immediately detach breakpoints from the child before there's
4044 any chance of letting the user delete breakpoints from the
4045 breakpoint lists. If we don't do this early, it's easy to
4046 leave left over traps in the child, vis: "break foo; catch
4047 fork; c; <fork>; del; c; <child calls foo>". We only follow
4048 the fork on the last `continue', and by that time the
4049 breakpoint at "foo" is long gone from the breakpoint table.
4050 If we vforked, then we don't need to unpatch here, since both
4051 parent and child are sharing the same memory pages; we'll
4052 need to unpatch at follow/detach time instead to be certain
4053 that new breakpoints added between catchpoint hit time and
4054 vfork follow are detached. */
4055 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
4056 {
4057 /* This won't actually modify the breakpoint list, but will
4058 physically remove the breakpoints from the child. */
4059 detach_breakpoints (ecs->ws.value.related_pid);
4060 }
4061
4062 delete_just_stopped_threads_single_step_breakpoints ();
4063
4064 /* In case the event is caught by a catchpoint, remember that
4065 the event is to be followed at the next resume of the thread,
4066 and not immediately. */
4067 ecs->event_thread->pending_follow = ecs->ws;
4068
4069 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4070
4071 ecs->event_thread->control.stop_bpstat
4072 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4073 stop_pc, ecs->ptid, &ecs->ws);
4074
4075 /* If no catchpoint triggered for this, then keep going. Note
4076 that we're interested in knowing the bpstat actually causes a
4077 stop, not just if it may explain the signal. Software
4078 watchpoints, for example, always appear in the bpstat. */
4079 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4080 {
4081 ptid_t parent;
4082 ptid_t child;
4083 int should_resume;
4084 int follow_child
4085 = (follow_fork_mode_string == follow_fork_mode_child);
4086
4087 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4088
4089 should_resume = follow_fork ();
4090
4091 parent = ecs->ptid;
4092 child = ecs->ws.value.related_pid;
4093
4094 /* In non-stop mode, also resume the other branch. */
4095 if (non_stop && !detach_fork)
4096 {
4097 if (follow_child)
4098 switch_to_thread (parent);
4099 else
4100 switch_to_thread (child);
4101
4102 ecs->event_thread = inferior_thread ();
4103 ecs->ptid = inferior_ptid;
4104 keep_going (ecs);
4105 }
4106
4107 if (follow_child)
4108 switch_to_thread (child);
4109 else
4110 switch_to_thread (parent);
4111
4112 ecs->event_thread = inferior_thread ();
4113 ecs->ptid = inferior_ptid;
4114
4115 if (should_resume)
4116 keep_going (ecs);
4117 else
4118 stop_waiting (ecs);
4119 return;
4120 }
4121 process_event_stop_test (ecs);
4122 return;
4123
4124 case TARGET_WAITKIND_VFORK_DONE:
4125 /* Done with the shared memory region. Re-insert breakpoints in
4126 the parent, and keep going. */
4127
4128 if (debug_infrun)
4129 fprintf_unfiltered (gdb_stdlog,
4130 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
4131
4132 if (!ptid_equal (ecs->ptid, inferior_ptid))
4133 context_switch (ecs->ptid);
4134
4135 current_inferior ()->waiting_for_vfork_done = 0;
4136 current_inferior ()->pspace->breakpoints_not_allowed = 0;
4137 /* This also takes care of reinserting breakpoints in the
4138 previously locked inferior. */
4139 keep_going (ecs);
4140 return;
4141
4142 case TARGET_WAITKIND_EXECD:
4143 if (debug_infrun)
4144 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
4145
4146 if (!ptid_equal (ecs->ptid, inferior_ptid))
4147 context_switch (ecs->ptid);
4148
4149 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4150
4151 /* Do whatever is necessary to the parent branch of the vfork. */
4152 handle_vfork_child_exec_or_exit (1);
4153
4154 /* This causes the eventpoints and symbol table to be reset.
4155 Must do this now, before trying to determine whether to
4156 stop. */
4157 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
4158
4159 ecs->event_thread->control.stop_bpstat
4160 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4161 stop_pc, ecs->ptid, &ecs->ws);
4162
4163 /* Note that this may be referenced from inside
4164 bpstat_stop_status above, through inferior_has_execd. */
4165 xfree (ecs->ws.value.execd_pathname);
4166 ecs->ws.value.execd_pathname = NULL;
4167
4168 /* If no catchpoint triggered for this, then keep going. */
4169 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4170 {
4171 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4172 keep_going (ecs);
4173 return;
4174 }
4175 process_event_stop_test (ecs);
4176 return;
4177
4178 /* Be careful not to try to gather much state about a thread
4179 that's in a syscall. It's frequently a losing proposition. */
4180 case TARGET_WAITKIND_SYSCALL_ENTRY:
4181 if (debug_infrun)
4182 fprintf_unfiltered (gdb_stdlog,
4183 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
4184 /* Getting the current syscall number. */
4185 if (handle_syscall_event (ecs) == 0)
4186 process_event_stop_test (ecs);
4187 return;
4188
4189 /* Before examining the threads further, step this thread to
4190 get it entirely out of the syscall. (We get notice of the
4191 event when the thread is just on the verge of exiting a
4192 syscall. Stepping one instruction seems to get it back
4193 into user code.) */
4194 case TARGET_WAITKIND_SYSCALL_RETURN:
4195 if (debug_infrun)
4196 fprintf_unfiltered (gdb_stdlog,
4197 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
4198 if (handle_syscall_event (ecs) == 0)
4199 process_event_stop_test (ecs);
4200 return;
4201
4202 case TARGET_WAITKIND_STOPPED:
4203 if (debug_infrun)
4204 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
4205 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
4206 handle_signal_stop (ecs);
4207 return;
4208
4209 case TARGET_WAITKIND_NO_HISTORY:
4210 if (debug_infrun)
4211 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
4212 /* Reverse execution: target ran out of history info. */
4213
4214 delete_just_stopped_threads_single_step_breakpoints ();
4215 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4216 observer_notify_no_history ();
4217 stop_waiting (ecs);
4218 return;
4219 }
4220 }
4221
4222 /* A wrapper around handle_inferior_event_1, which also makes sure
4223 that all temporary struct value objects that were created during
4224 the handling of the event get deleted at the end. */
4225
4226 static void
4227 handle_inferior_event (struct execution_control_state *ecs)
4228 {
4229 struct value *mark = value_mark ();
4230
4231 handle_inferior_event_1 (ecs);
4232 /* Purge all temporary values created during the event handling,
4233 as it could be a long time before we return to the command level
4234 where such values would otherwise be purged. */
4235 value_free_to_mark (mark);
4236 }
4237
4238 /* Called when we get an event that may finish an in-line or
4239 out-of-line (displaced stepping) step-over started previously. */
4240
4241 static void
4242 finish_step_over (struct execution_control_state *ecs)
4243 {
4244 displaced_step_fixup (ecs->ptid,
4245 ecs->event_thread->suspend.stop_signal);
4246
4247 if (step_over_info_valid_p ())
4248 {
4249 /* If we're stepping over a breakpoint with all threads locked,
4250 then only the thread that was stepped should be reporting
4251 back an event. */
4252 gdb_assert (ecs->event_thread->control.trap_expected);
4253
4254 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4255 clear_step_over_info ();
4256 }
4257
4258 if (!non_stop)
4259 return;
4260
4261 /* Start a new step-over in another thread if there's one that
4262 needs it. */
4263 start_step_over ();
4264 }
4265
4266 /* Come here when the program has stopped with a signal. */
4267
4268 static void
4269 handle_signal_stop (struct execution_control_state *ecs)
4270 {
4271 struct frame_info *frame;
4272 struct gdbarch *gdbarch;
4273 int stopped_by_watchpoint;
4274 enum stop_kind stop_soon;
4275 int random_signal;
4276
4277 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
4278
4279 /* Do we need to clean up the state of a thread that has
4280 completed a displaced single-step? (Doing so usually affects
4281 the PC, so do it here, before we set stop_pc.) */
4282 finish_step_over (ecs);
4283
4284 /* If we either finished a single-step or hit a breakpoint, but
4285 the user wanted this thread to be stopped, pretend we got a
4286 SIG0 (generic unsignaled stop). */
4287 if (ecs->event_thread->stop_requested
4288 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4289 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4290
4291 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4292
4293 if (debug_infrun)
4294 {
4295 struct regcache *regcache = get_thread_regcache (ecs->ptid);
4296 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4297 struct cleanup *old_chain = save_inferior_ptid ();
4298
4299 inferior_ptid = ecs->ptid;
4300
4301 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
4302 paddress (gdbarch, stop_pc));
4303 if (target_stopped_by_watchpoint ())
4304 {
4305 CORE_ADDR addr;
4306
4307 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
4308
4309 if (target_stopped_data_address (&current_target, &addr))
4310 fprintf_unfiltered (gdb_stdlog,
4311 "infrun: stopped data address = %s\n",
4312 paddress (gdbarch, addr));
4313 else
4314 fprintf_unfiltered (gdb_stdlog,
4315 "infrun: (no data address available)\n");
4316 }
4317
4318 do_cleanups (old_chain);
4319 }
4320
4321 /* This is originated from start_remote(), start_inferior() and
4322 shared libraries hook functions. */
4323 stop_soon = get_inferior_stop_soon (ecs->ptid);
4324 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4325 {
4326 if (!ptid_equal (ecs->ptid, inferior_ptid))
4327 context_switch (ecs->ptid);
4328 if (debug_infrun)
4329 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4330 stop_print_frame = 1;
4331 stop_waiting (ecs);
4332 return;
4333 }
4334
4335 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4336 && stop_after_trap)
4337 {
4338 if (!ptid_equal (ecs->ptid, inferior_ptid))
4339 context_switch (ecs->ptid);
4340 if (debug_infrun)
4341 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4342 stop_print_frame = 0;
4343 stop_waiting (ecs);
4344 return;
4345 }
4346
4347 /* This originates from attach_command(). We need to overwrite
4348 the stop_signal here, because some kernels don't ignore a
4349 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4350 See more comments in inferior.h. On the other hand, if we
4351 get a non-SIGSTOP, report it to the user - assume the backend
4352 will handle the SIGSTOP if it should show up later.
4353
4354 Also consider that the attach is complete when we see a
4355 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4356 target extended-remote report it instead of a SIGSTOP
4357 (e.g. gdbserver). We already rely on SIGTRAP being our
4358 signal, so this is no exception.
4359
4360 Also consider that the attach is complete when we see a
4361 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4362 the target to stop all threads of the inferior, in case the
4363 low level attach operation doesn't stop them implicitly. If
4364 they weren't stopped implicitly, then the stub will report a
4365 GDB_SIGNAL_0, meaning: stopped for no particular reason
4366 other than GDB's request. */
4367 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4368 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
4369 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4370 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
4371 {
4372 stop_print_frame = 1;
4373 stop_waiting (ecs);
4374 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4375 return;
4376 }
4377
4378 /* See if something interesting happened to the non-current thread. If
4379 so, then switch to that thread. */
4380 if (!ptid_equal (ecs->ptid, inferior_ptid))
4381 {
4382 if (debug_infrun)
4383 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
4384
4385 context_switch (ecs->ptid);
4386
4387 if (deprecated_context_hook)
4388 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
4389 }
4390
4391 /* At this point, get hold of the now-current thread's frame. */
4392 frame = get_current_frame ();
4393 gdbarch = get_frame_arch (frame);
4394
4395 /* Pull the single step breakpoints out of the target. */
4396 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4397 {
4398 struct regcache *regcache;
4399 struct address_space *aspace;
4400 CORE_ADDR pc;
4401
4402 regcache = get_thread_regcache (ecs->ptid);
4403 aspace = get_regcache_aspace (regcache);
4404 pc = regcache_read_pc (regcache);
4405
4406 /* However, before doing so, if this single-step breakpoint was
4407 actually for another thread, set this thread up for moving
4408 past it. */
4409 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
4410 aspace, pc))
4411 {
4412 if (single_step_breakpoint_inserted_here_p (aspace, pc))
4413 {
4414 if (debug_infrun)
4415 {
4416 fprintf_unfiltered (gdb_stdlog,
4417 "infrun: [%s] hit another thread's "
4418 "single-step breakpoint\n",
4419 target_pid_to_str (ecs->ptid));
4420 }
4421 ecs->hit_singlestep_breakpoint = 1;
4422 }
4423 }
4424 else
4425 {
4426 if (debug_infrun)
4427 {
4428 fprintf_unfiltered (gdb_stdlog,
4429 "infrun: [%s] hit its "
4430 "single-step breakpoint\n",
4431 target_pid_to_str (ecs->ptid));
4432 }
4433 }
4434 }
4435 delete_just_stopped_threads_single_step_breakpoints ();
4436
4437 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4438 && ecs->event_thread->control.trap_expected
4439 && ecs->event_thread->stepping_over_watchpoint)
4440 stopped_by_watchpoint = 0;
4441 else
4442 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4443
4444 /* If necessary, step over this watchpoint. We'll be back to display
4445 it in a moment. */
4446 if (stopped_by_watchpoint
4447 && (target_have_steppable_watchpoint
4448 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4449 {
4450 /* At this point, we are stopped at an instruction which has
4451 attempted to write to a piece of memory under control of
4452 a watchpoint. The instruction hasn't actually executed
4453 yet. If we were to evaluate the watchpoint expression
4454 now, we would get the old value, and therefore no change
4455 would seem to have occurred.
4456
4457 In order to make watchpoints work `right', we really need
4458 to complete the memory write, and then evaluate the
4459 watchpoint expression. We do this by single-stepping the
4460 target.
4461
4462 It may not be necessary to disable the watchpoint to step over
4463 it. For example, the PA can (with some kernel cooperation)
4464 single step over a watchpoint without disabling the watchpoint.
4465
4466 It is far more common to need to disable a watchpoint to step
4467 the inferior over it. If we have non-steppable watchpoints,
4468 we must disable the current watchpoint; it's simplest to
4469 disable all watchpoints.
4470
4471 Any breakpoint at PC must also be stepped over -- if there's
4472 one, it will have already triggered before the watchpoint
4473 triggered, and we either already reported it to the user, or
4474 it didn't cause a stop and we called keep_going. In either
4475 case, if there was a breakpoint at PC, we must be trying to
4476 step past it. */
4477 ecs->event_thread->stepping_over_watchpoint = 1;
4478 keep_going (ecs);
4479 return;
4480 }
4481
4482 ecs->event_thread->stepping_over_breakpoint = 0;
4483 ecs->event_thread->stepping_over_watchpoint = 0;
4484 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4485 ecs->event_thread->control.stop_step = 0;
4486 stop_print_frame = 1;
4487 stopped_by_random_signal = 0;
4488
4489 /* Hide inlined functions starting here, unless we just performed stepi or
4490 nexti. After stepi and nexti, always show the innermost frame (not any
4491 inline function call sites). */
4492 if (ecs->event_thread->control.step_range_end != 1)
4493 {
4494 struct address_space *aspace =
4495 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4496
4497 /* skip_inline_frames is expensive, so we avoid it if we can
4498 determine that the address is one where functions cannot have
4499 been inlined. This improves performance with inferiors that
4500 load a lot of shared libraries, because the solib event
4501 breakpoint is defined as the address of a function (i.e. not
4502 inline). Note that we have to check the previous PC as well
4503 as the current one to catch cases when we have just
4504 single-stepped off a breakpoint prior to reinstating it.
4505 Note that we're assuming that the code we single-step to is
4506 not inline, but that's not definitive: there's nothing
4507 preventing the event breakpoint function from containing
4508 inlined code, and the single-step ending up there. If the
4509 user had set a breakpoint on that inlined code, the missing
4510 skip_inline_frames call would break things. Fortunately
4511 that's an extremely unlikely scenario. */
4512 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4513 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4514 && ecs->event_thread->control.trap_expected
4515 && pc_at_non_inline_function (aspace,
4516 ecs->event_thread->prev_pc,
4517 &ecs->ws)))
4518 {
4519 skip_inline_frames (ecs->ptid);
4520
4521 /* Re-fetch current thread's frame in case that invalidated
4522 the frame cache. */
4523 frame = get_current_frame ();
4524 gdbarch = get_frame_arch (frame);
4525 }
4526 }
4527
4528 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4529 && ecs->event_thread->control.trap_expected
4530 && gdbarch_single_step_through_delay_p (gdbarch)
4531 && currently_stepping (ecs->event_thread))
4532 {
4533 /* We're trying to step off a breakpoint. Turns out that we're
4534 also on an instruction that needs to be stepped multiple
4535 times before it's been fully executing. E.g., architectures
4536 with a delay slot. It needs to be stepped twice, once for
4537 the instruction and once for the delay slot. */
4538 int step_through_delay
4539 = gdbarch_single_step_through_delay (gdbarch, frame);
4540
4541 if (debug_infrun && step_through_delay)
4542 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4543 if (ecs->event_thread->control.step_range_end == 0
4544 && step_through_delay)
4545 {
4546 /* The user issued a continue when stopped at a breakpoint.
4547 Set up for another trap and get out of here. */
4548 ecs->event_thread->stepping_over_breakpoint = 1;
4549 keep_going (ecs);
4550 return;
4551 }
4552 else if (step_through_delay)
4553 {
4554 /* The user issued a step when stopped at a breakpoint.
4555 Maybe we should stop, maybe we should not - the delay
4556 slot *might* correspond to a line of source. In any
4557 case, don't decide that here, just set
4558 ecs->stepping_over_breakpoint, making sure we
4559 single-step again before breakpoints are re-inserted. */
4560 ecs->event_thread->stepping_over_breakpoint = 1;
4561 }
4562 }
4563
4564 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4565 handles this event. */
4566 ecs->event_thread->control.stop_bpstat
4567 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4568 stop_pc, ecs->ptid, &ecs->ws);
4569
4570 /* Following in case break condition called a
4571 function. */
4572 stop_print_frame = 1;
4573
4574 /* This is where we handle "moribund" watchpoints. Unlike
4575 software breakpoints traps, hardware watchpoint traps are
4576 always distinguishable from random traps. If no high-level
4577 watchpoint is associated with the reported stop data address
4578 anymore, then the bpstat does not explain the signal ---
4579 simply make sure to ignore it if `stopped_by_watchpoint' is
4580 set. */
4581
4582 if (debug_infrun
4583 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4584 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4585 GDB_SIGNAL_TRAP)
4586 && stopped_by_watchpoint)
4587 fprintf_unfiltered (gdb_stdlog,
4588 "infrun: no user watchpoint explains "
4589 "watchpoint SIGTRAP, ignoring\n");
4590
4591 /* NOTE: cagney/2003-03-29: These checks for a random signal
4592 at one stage in the past included checks for an inferior
4593 function call's call dummy's return breakpoint. The original
4594 comment, that went with the test, read:
4595
4596 ``End of a stack dummy. Some systems (e.g. Sony news) give
4597 another signal besides SIGTRAP, so check here as well as
4598 above.''
4599
4600 If someone ever tries to get call dummys on a
4601 non-executable stack to work (where the target would stop
4602 with something like a SIGSEGV), then those tests might need
4603 to be re-instated. Given, however, that the tests were only
4604 enabled when momentary breakpoints were not being used, I
4605 suspect that it won't be the case.
4606
4607 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4608 be necessary for call dummies on a non-executable stack on
4609 SPARC. */
4610
4611 /* See if the breakpoints module can explain the signal. */
4612 random_signal
4613 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4614 ecs->event_thread->suspend.stop_signal);
4615
4616 /* Maybe this was a trap for a software breakpoint that has since
4617 been removed. */
4618 if (random_signal && target_stopped_by_sw_breakpoint ())
4619 {
4620 if (program_breakpoint_here_p (gdbarch, stop_pc))
4621 {
4622 struct regcache *regcache;
4623 int decr_pc;
4624
4625 /* Re-adjust PC to what the program would see if GDB was not
4626 debugging it. */
4627 regcache = get_thread_regcache (ecs->event_thread->ptid);
4628 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4629 if (decr_pc != 0)
4630 {
4631 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
4632
4633 if (record_full_is_used ())
4634 record_full_gdb_operation_disable_set ();
4635
4636 regcache_write_pc (regcache, stop_pc + decr_pc);
4637
4638 do_cleanups (old_cleanups);
4639 }
4640 }
4641 else
4642 {
4643 /* A delayed software breakpoint event. Ignore the trap. */
4644 if (debug_infrun)
4645 fprintf_unfiltered (gdb_stdlog,
4646 "infrun: delayed software breakpoint "
4647 "trap, ignoring\n");
4648 random_signal = 0;
4649 }
4650 }
4651
4652 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
4653 has since been removed. */
4654 if (random_signal && target_stopped_by_hw_breakpoint ())
4655 {
4656 /* A delayed hardware breakpoint event. Ignore the trap. */
4657 if (debug_infrun)
4658 fprintf_unfiltered (gdb_stdlog,
4659 "infrun: delayed hardware breakpoint/watchpoint "
4660 "trap, ignoring\n");
4661 random_signal = 0;
4662 }
4663
4664 /* If not, perhaps stepping/nexting can. */
4665 if (random_signal)
4666 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4667 && currently_stepping (ecs->event_thread));
4668
4669 /* Perhaps the thread hit a single-step breakpoint of _another_
4670 thread. Single-step breakpoints are transparent to the
4671 breakpoints module. */
4672 if (random_signal)
4673 random_signal = !ecs->hit_singlestep_breakpoint;
4674
4675 /* No? Perhaps we got a moribund watchpoint. */
4676 if (random_signal)
4677 random_signal = !stopped_by_watchpoint;
4678
4679 /* For the program's own signals, act according to
4680 the signal handling tables. */
4681
4682 if (random_signal)
4683 {
4684 /* Signal not for debugging purposes. */
4685 struct inferior *inf = find_inferior_ptid (ecs->ptid);
4686 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4687
4688 if (debug_infrun)
4689 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4690 gdb_signal_to_symbol_string (stop_signal));
4691
4692 stopped_by_random_signal = 1;
4693
4694 /* Always stop on signals if we're either just gaining control
4695 of the program, or the user explicitly requested this thread
4696 to remain stopped. */
4697 if (stop_soon != NO_STOP_QUIETLY
4698 || ecs->event_thread->stop_requested
4699 || (!inf->detaching
4700 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4701 {
4702 stop_waiting (ecs);
4703 return;
4704 }
4705
4706 /* Notify observers the signal has "handle print" set. Note we
4707 returned early above if stopping; normal_stop handles the
4708 printing in that case. */
4709 if (signal_print[ecs->event_thread->suspend.stop_signal])
4710 {
4711 /* The signal table tells us to print about this signal. */
4712 target_terminal_ours_for_output ();
4713 observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
4714 target_terminal_inferior ();
4715 }
4716
4717 /* Clear the signal if it should not be passed. */
4718 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4719 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4720
4721 if (ecs->event_thread->prev_pc == stop_pc
4722 && ecs->event_thread->control.trap_expected
4723 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4724 {
4725 /* We were just starting a new sequence, attempting to
4726 single-step off of a breakpoint and expecting a SIGTRAP.
4727 Instead this signal arrives. This signal will take us out
4728 of the stepping range so GDB needs to remember to, when
4729 the signal handler returns, resume stepping off that
4730 breakpoint. */
4731 /* To simplify things, "continue" is forced to use the same
4732 code paths as single-step - set a breakpoint at the
4733 signal return address and then, once hit, step off that
4734 breakpoint. */
4735 if (debug_infrun)
4736 fprintf_unfiltered (gdb_stdlog,
4737 "infrun: signal arrived while stepping over "
4738 "breakpoint\n");
4739
4740 insert_hp_step_resume_breakpoint_at_frame (frame);
4741 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4742 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4743 ecs->event_thread->control.trap_expected = 0;
4744
4745 /* If we were nexting/stepping some other thread, switch to
4746 it, so that we don't continue it, losing control. */
4747 if (!switch_back_to_stepped_thread (ecs))
4748 keep_going (ecs);
4749 return;
4750 }
4751
4752 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4753 && (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4754 || ecs->event_thread->control.step_range_end == 1)
4755 && frame_id_eq (get_stack_frame_id (frame),
4756 ecs->event_thread->control.step_stack_frame_id)
4757 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4758 {
4759 /* The inferior is about to take a signal that will take it
4760 out of the single step range. Set a breakpoint at the
4761 current PC (which is presumably where the signal handler
4762 will eventually return) and then allow the inferior to
4763 run free.
4764
4765 Note that this is only needed for a signal delivered
4766 while in the single-step range. Nested signals aren't a
4767 problem as they eventually all return. */
4768 if (debug_infrun)
4769 fprintf_unfiltered (gdb_stdlog,
4770 "infrun: signal may take us out of "
4771 "single-step range\n");
4772
4773 insert_hp_step_resume_breakpoint_at_frame (frame);
4774 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4775 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4776 ecs->event_thread->control.trap_expected = 0;
4777 keep_going (ecs);
4778 return;
4779 }
4780
4781 /* Note: step_resume_breakpoint may be non-NULL. This occures
4782 when either there's a nested signal, or when there's a
4783 pending signal enabled just as the signal handler returns
4784 (leaving the inferior at the step-resume-breakpoint without
4785 actually executing it). Either way continue until the
4786 breakpoint is really hit. */
4787
4788 if (!switch_back_to_stepped_thread (ecs))
4789 {
4790 if (debug_infrun)
4791 fprintf_unfiltered (gdb_stdlog,
4792 "infrun: random signal, keep going\n");
4793
4794 keep_going (ecs);
4795 }
4796 return;
4797 }
4798
4799 process_event_stop_test (ecs);
4800 }
4801
4802 /* Come here when we've got some debug event / signal we can explain
4803 (IOW, not a random signal), and test whether it should cause a
4804 stop, or whether we should resume the inferior (transparently).
4805 E.g., could be a breakpoint whose condition evaluates false; we
4806 could be still stepping within the line; etc. */
4807
4808 static void
4809 process_event_stop_test (struct execution_control_state *ecs)
4810 {
4811 struct symtab_and_line stop_pc_sal;
4812 struct frame_info *frame;
4813 struct gdbarch *gdbarch;
4814 CORE_ADDR jmp_buf_pc;
4815 struct bpstat_what what;
4816
4817 /* Handle cases caused by hitting a breakpoint. */
4818
4819 frame = get_current_frame ();
4820 gdbarch = get_frame_arch (frame);
4821
4822 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4823
4824 if (what.call_dummy)
4825 {
4826 stop_stack_dummy = what.call_dummy;
4827 }
4828
4829 /* If we hit an internal event that triggers symbol changes, the
4830 current frame will be invalidated within bpstat_what (e.g., if we
4831 hit an internal solib event). Re-fetch it. */
4832 frame = get_current_frame ();
4833 gdbarch = get_frame_arch (frame);
4834
4835 switch (what.main_action)
4836 {
4837 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4838 /* If we hit the breakpoint at longjmp while stepping, we
4839 install a momentary breakpoint at the target of the
4840 jmp_buf. */
4841
4842 if (debug_infrun)
4843 fprintf_unfiltered (gdb_stdlog,
4844 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4845
4846 ecs->event_thread->stepping_over_breakpoint = 1;
4847
4848 if (what.is_longjmp)
4849 {
4850 struct value *arg_value;
4851
4852 /* If we set the longjmp breakpoint via a SystemTap probe,
4853 then use it to extract the arguments. The destination PC
4854 is the third argument to the probe. */
4855 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4856 if (arg_value)
4857 {
4858 jmp_buf_pc = value_as_address (arg_value);
4859 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
4860 }
4861 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4862 || !gdbarch_get_longjmp_target (gdbarch,
4863 frame, &jmp_buf_pc))
4864 {
4865 if (debug_infrun)
4866 fprintf_unfiltered (gdb_stdlog,
4867 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4868 "(!gdbarch_get_longjmp_target)\n");
4869 keep_going (ecs);
4870 return;
4871 }
4872
4873 /* Insert a breakpoint at resume address. */
4874 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4875 }
4876 else
4877 check_exception_resume (ecs, frame);
4878 keep_going (ecs);
4879 return;
4880
4881 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4882 {
4883 struct frame_info *init_frame;
4884
4885 /* There are several cases to consider.
4886
4887 1. The initiating frame no longer exists. In this case we
4888 must stop, because the exception or longjmp has gone too
4889 far.
4890
4891 2. The initiating frame exists, and is the same as the
4892 current frame. We stop, because the exception or longjmp
4893 has been caught.
4894
4895 3. The initiating frame exists and is different from the
4896 current frame. This means the exception or longjmp has
4897 been caught beneath the initiating frame, so keep going.
4898
4899 4. longjmp breakpoint has been placed just to protect
4900 against stale dummy frames and user is not interested in
4901 stopping around longjmps. */
4902
4903 if (debug_infrun)
4904 fprintf_unfiltered (gdb_stdlog,
4905 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4906
4907 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4908 != NULL);
4909 delete_exception_resume_breakpoint (ecs->event_thread);
4910
4911 if (what.is_longjmp)
4912 {
4913 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
4914
4915 if (!frame_id_p (ecs->event_thread->initiating_frame))
4916 {
4917 /* Case 4. */
4918 keep_going (ecs);
4919 return;
4920 }
4921 }
4922
4923 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4924
4925 if (init_frame)
4926 {
4927 struct frame_id current_id
4928 = get_frame_id (get_current_frame ());
4929 if (frame_id_eq (current_id,
4930 ecs->event_thread->initiating_frame))
4931 {
4932 /* Case 2. Fall through. */
4933 }
4934 else
4935 {
4936 /* Case 3. */
4937 keep_going (ecs);
4938 return;
4939 }
4940 }
4941
4942 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4943 exists. */
4944 delete_step_resume_breakpoint (ecs->event_thread);
4945
4946 end_stepping_range (ecs);
4947 }
4948 return;
4949
4950 case BPSTAT_WHAT_SINGLE:
4951 if (debug_infrun)
4952 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4953 ecs->event_thread->stepping_over_breakpoint = 1;
4954 /* Still need to check other stuff, at least the case where we
4955 are stepping and step out of the right range. */
4956 break;
4957
4958 case BPSTAT_WHAT_STEP_RESUME:
4959 if (debug_infrun)
4960 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4961
4962 delete_step_resume_breakpoint (ecs->event_thread);
4963 if (ecs->event_thread->control.proceed_to_finish
4964 && execution_direction == EXEC_REVERSE)
4965 {
4966 struct thread_info *tp = ecs->event_thread;
4967
4968 /* We are finishing a function in reverse, and just hit the
4969 step-resume breakpoint at the start address of the
4970 function, and we're almost there -- just need to back up
4971 by one more single-step, which should take us back to the
4972 function call. */
4973 tp->control.step_range_start = tp->control.step_range_end = 1;
4974 keep_going (ecs);
4975 return;
4976 }
4977 fill_in_stop_func (gdbarch, ecs);
4978 if (stop_pc == ecs->stop_func_start
4979 && execution_direction == EXEC_REVERSE)
4980 {
4981 /* We are stepping over a function call in reverse, and just
4982 hit the step-resume breakpoint at the start address of
4983 the function. Go back to single-stepping, which should
4984 take us back to the function call. */
4985 ecs->event_thread->stepping_over_breakpoint = 1;
4986 keep_going (ecs);
4987 return;
4988 }
4989 break;
4990
4991 case BPSTAT_WHAT_STOP_NOISY:
4992 if (debug_infrun)
4993 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4994 stop_print_frame = 1;
4995
4996 /* Assume the thread stopped for a breapoint. We'll still check
4997 whether a/the breakpoint is there when the thread is next
4998 resumed. */
4999 ecs->event_thread->stepping_over_breakpoint = 1;
5000
5001 stop_waiting (ecs);
5002 return;
5003
5004 case BPSTAT_WHAT_STOP_SILENT:
5005 if (debug_infrun)
5006 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
5007 stop_print_frame = 0;
5008
5009 /* Assume the thread stopped for a breapoint. We'll still check
5010 whether a/the breakpoint is there when the thread is next
5011 resumed. */
5012 ecs->event_thread->stepping_over_breakpoint = 1;
5013 stop_waiting (ecs);
5014 return;
5015
5016 case BPSTAT_WHAT_HP_STEP_RESUME:
5017 if (debug_infrun)
5018 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
5019
5020 delete_step_resume_breakpoint (ecs->event_thread);
5021 if (ecs->event_thread->step_after_step_resume_breakpoint)
5022 {
5023 /* Back when the step-resume breakpoint was inserted, we
5024 were trying to single-step off a breakpoint. Go back to
5025 doing that. */
5026 ecs->event_thread->step_after_step_resume_breakpoint = 0;
5027 ecs->event_thread->stepping_over_breakpoint = 1;
5028 keep_going (ecs);
5029 return;
5030 }
5031 break;
5032
5033 case BPSTAT_WHAT_KEEP_CHECKING:
5034 break;
5035 }
5036
5037 /* If we stepped a permanent breakpoint and we had a high priority
5038 step-resume breakpoint for the address we stepped, but we didn't
5039 hit it, then we must have stepped into the signal handler. The
5040 step-resume was only necessary to catch the case of _not_
5041 stepping into the handler, so delete it, and fall through to
5042 checking whether the step finished. */
5043 if (ecs->event_thread->stepped_breakpoint)
5044 {
5045 struct breakpoint *sr_bp
5046 = ecs->event_thread->control.step_resume_breakpoint;
5047
5048 if (sr_bp != NULL
5049 && sr_bp->loc->permanent
5050 && sr_bp->type == bp_hp_step_resume
5051 && sr_bp->loc->address == ecs->event_thread->prev_pc)
5052 {
5053 if (debug_infrun)
5054 fprintf_unfiltered (gdb_stdlog,
5055 "infrun: stepped permanent breakpoint, stopped in "
5056 "handler\n");
5057 delete_step_resume_breakpoint (ecs->event_thread);
5058 ecs->event_thread->step_after_step_resume_breakpoint = 0;
5059 }
5060 }
5061
5062 /* We come here if we hit a breakpoint but should not stop for it.
5063 Possibly we also were stepping and should stop for that. So fall
5064 through and test for stepping. But, if not stepping, do not
5065 stop. */
5066
5067 /* In all-stop mode, if we're currently stepping but have stopped in
5068 some other thread, we need to switch back to the stepped thread. */
5069 if (switch_back_to_stepped_thread (ecs))
5070 return;
5071
5072 if (ecs->event_thread->control.step_resume_breakpoint)
5073 {
5074 if (debug_infrun)
5075 fprintf_unfiltered (gdb_stdlog,
5076 "infrun: step-resume breakpoint is inserted\n");
5077
5078 /* Having a step-resume breakpoint overrides anything
5079 else having to do with stepping commands until
5080 that breakpoint is reached. */
5081 keep_going (ecs);
5082 return;
5083 }
5084
5085 if (ecs->event_thread->control.step_range_end == 0)
5086 {
5087 if (debug_infrun)
5088 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
5089 /* Likewise if we aren't even stepping. */
5090 keep_going (ecs);
5091 return;
5092 }
5093
5094 /* Re-fetch current thread's frame in case the code above caused
5095 the frame cache to be re-initialized, making our FRAME variable
5096 a dangling pointer. */
5097 frame = get_current_frame ();
5098 gdbarch = get_frame_arch (frame);
5099 fill_in_stop_func (gdbarch, ecs);
5100
5101 /* If stepping through a line, keep going if still within it.
5102
5103 Note that step_range_end is the address of the first instruction
5104 beyond the step range, and NOT the address of the last instruction
5105 within it!
5106
5107 Note also that during reverse execution, we may be stepping
5108 through a function epilogue and therefore must detect when
5109 the current-frame changes in the middle of a line. */
5110
5111 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
5112 && (execution_direction != EXEC_REVERSE
5113 || frame_id_eq (get_frame_id (frame),
5114 ecs->event_thread->control.step_frame_id)))
5115 {
5116 if (debug_infrun)
5117 fprintf_unfiltered
5118 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
5119 paddress (gdbarch, ecs->event_thread->control.step_range_start),
5120 paddress (gdbarch, ecs->event_thread->control.step_range_end));
5121
5122 /* Tentatively re-enable range stepping; `resume' disables it if
5123 necessary (e.g., if we're stepping over a breakpoint or we
5124 have software watchpoints). */
5125 ecs->event_thread->control.may_range_step = 1;
5126
5127 /* When stepping backward, stop at beginning of line range
5128 (unless it's the function entry point, in which case
5129 keep going back to the call point). */
5130 if (stop_pc == ecs->event_thread->control.step_range_start
5131 && stop_pc != ecs->stop_func_start
5132 && execution_direction == EXEC_REVERSE)
5133 end_stepping_range (ecs);
5134 else
5135 keep_going (ecs);
5136
5137 return;
5138 }
5139
5140 /* We stepped out of the stepping range. */
5141
5142 /* If we are stepping at the source level and entered the runtime
5143 loader dynamic symbol resolution code...
5144
5145 EXEC_FORWARD: we keep on single stepping until we exit the run
5146 time loader code and reach the callee's address.
5147
5148 EXEC_REVERSE: we've already executed the callee (backward), and
5149 the runtime loader code is handled just like any other
5150 undebuggable function call. Now we need only keep stepping
5151 backward through the trampoline code, and that's handled further
5152 down, so there is nothing for us to do here. */
5153
5154 if (execution_direction != EXEC_REVERSE
5155 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5156 && in_solib_dynsym_resolve_code (stop_pc))
5157 {
5158 CORE_ADDR pc_after_resolver =
5159 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
5160
5161 if (debug_infrun)
5162 fprintf_unfiltered (gdb_stdlog,
5163 "infrun: stepped into dynsym resolve code\n");
5164
5165 if (pc_after_resolver)
5166 {
5167 /* Set up a step-resume breakpoint at the address
5168 indicated by SKIP_SOLIB_RESOLVER. */
5169 struct symtab_and_line sr_sal;
5170
5171 init_sal (&sr_sal);
5172 sr_sal.pc = pc_after_resolver;
5173 sr_sal.pspace = get_frame_program_space (frame);
5174
5175 insert_step_resume_breakpoint_at_sal (gdbarch,
5176 sr_sal, null_frame_id);
5177 }
5178
5179 keep_going (ecs);
5180 return;
5181 }
5182
5183 if (ecs->event_thread->control.step_range_end != 1
5184 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5185 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5186 && get_frame_type (frame) == SIGTRAMP_FRAME)
5187 {
5188 if (debug_infrun)
5189 fprintf_unfiltered (gdb_stdlog,
5190 "infrun: stepped into signal trampoline\n");
5191 /* The inferior, while doing a "step" or "next", has ended up in
5192 a signal trampoline (either by a signal being delivered or by
5193 the signal handler returning). Just single-step until the
5194 inferior leaves the trampoline (either by calling the handler
5195 or returning). */
5196 keep_going (ecs);
5197 return;
5198 }
5199
5200 /* If we're in the return path from a shared library trampoline,
5201 we want to proceed through the trampoline when stepping. */
5202 /* macro/2012-04-25: This needs to come before the subroutine
5203 call check below as on some targets return trampolines look
5204 like subroutine calls (MIPS16 return thunks). */
5205 if (gdbarch_in_solib_return_trampoline (gdbarch,
5206 stop_pc, ecs->stop_func_name)
5207 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5208 {
5209 /* Determine where this trampoline returns. */
5210 CORE_ADDR real_stop_pc;
5211
5212 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5213
5214 if (debug_infrun)
5215 fprintf_unfiltered (gdb_stdlog,
5216 "infrun: stepped into solib return tramp\n");
5217
5218 /* Only proceed through if we know where it's going. */
5219 if (real_stop_pc)
5220 {
5221 /* And put the step-breakpoint there and go until there. */
5222 struct symtab_and_line sr_sal;
5223
5224 init_sal (&sr_sal); /* initialize to zeroes */
5225 sr_sal.pc = real_stop_pc;
5226 sr_sal.section = find_pc_overlay (sr_sal.pc);
5227 sr_sal.pspace = get_frame_program_space (frame);
5228
5229 /* Do not specify what the fp should be when we stop since
5230 on some machines the prologue is where the new fp value
5231 is established. */
5232 insert_step_resume_breakpoint_at_sal (gdbarch,
5233 sr_sal, null_frame_id);
5234
5235 /* Restart without fiddling with the step ranges or
5236 other state. */
5237 keep_going (ecs);
5238 return;
5239 }
5240 }
5241
5242 /* Check for subroutine calls. The check for the current frame
5243 equalling the step ID is not necessary - the check of the
5244 previous frame's ID is sufficient - but it is a common case and
5245 cheaper than checking the previous frame's ID.
5246
5247 NOTE: frame_id_eq will never report two invalid frame IDs as
5248 being equal, so to get into this block, both the current and
5249 previous frame must have valid frame IDs. */
5250 /* The outer_frame_id check is a heuristic to detect stepping
5251 through startup code. If we step over an instruction which
5252 sets the stack pointer from an invalid value to a valid value,
5253 we may detect that as a subroutine call from the mythical
5254 "outermost" function. This could be fixed by marking
5255 outermost frames as !stack_p,code_p,special_p. Then the
5256 initial outermost frame, before sp was valid, would
5257 have code_addr == &_start. See the comment in frame_id_eq
5258 for more. */
5259 if (!frame_id_eq (get_stack_frame_id (frame),
5260 ecs->event_thread->control.step_stack_frame_id)
5261 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
5262 ecs->event_thread->control.step_stack_frame_id)
5263 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
5264 outer_frame_id)
5265 || (ecs->event_thread->control.step_start_function
5266 != find_pc_function (stop_pc)))))
5267 {
5268 CORE_ADDR real_stop_pc;
5269
5270 if (debug_infrun)
5271 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
5272
5273 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
5274 {
5275 /* I presume that step_over_calls is only 0 when we're
5276 supposed to be stepping at the assembly language level
5277 ("stepi"). Just stop. */
5278 /* And this works the same backward as frontward. MVS */
5279 end_stepping_range (ecs);
5280 return;
5281 }
5282
5283 /* Reverse stepping through solib trampolines. */
5284
5285 if (execution_direction == EXEC_REVERSE
5286 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
5287 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5288 || (ecs->stop_func_start == 0
5289 && in_solib_dynsym_resolve_code (stop_pc))))
5290 {
5291 /* Any solib trampoline code can be handled in reverse
5292 by simply continuing to single-step. We have already
5293 executed the solib function (backwards), and a few
5294 steps will take us back through the trampoline to the
5295 caller. */
5296 keep_going (ecs);
5297 return;
5298 }
5299
5300 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5301 {
5302 /* We're doing a "next".
5303
5304 Normal (forward) execution: set a breakpoint at the
5305 callee's return address (the address at which the caller
5306 will resume).
5307
5308 Reverse (backward) execution. set the step-resume
5309 breakpoint at the start of the function that we just
5310 stepped into (backwards), and continue to there. When we
5311 get there, we'll need to single-step back to the caller. */
5312
5313 if (execution_direction == EXEC_REVERSE)
5314 {
5315 /* If we're already at the start of the function, we've either
5316 just stepped backward into a single instruction function,
5317 or stepped back out of a signal handler to the first instruction
5318 of the function. Just keep going, which will single-step back
5319 to the caller. */
5320 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
5321 {
5322 struct symtab_and_line sr_sal;
5323
5324 /* Normal function call return (static or dynamic). */
5325 init_sal (&sr_sal);
5326 sr_sal.pc = ecs->stop_func_start;
5327 sr_sal.pspace = get_frame_program_space (frame);
5328 insert_step_resume_breakpoint_at_sal (gdbarch,
5329 sr_sal, null_frame_id);
5330 }
5331 }
5332 else
5333 insert_step_resume_breakpoint_at_caller (frame);
5334
5335 keep_going (ecs);
5336 return;
5337 }
5338
5339 /* If we are in a function call trampoline (a stub between the
5340 calling routine and the real function), locate the real
5341 function. That's what tells us (a) whether we want to step
5342 into it at all, and (b) what prologue we want to run to the
5343 end of, if we do step into it. */
5344 real_stop_pc = skip_language_trampoline (frame, stop_pc);
5345 if (real_stop_pc == 0)
5346 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5347 if (real_stop_pc != 0)
5348 ecs->stop_func_start = real_stop_pc;
5349
5350 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
5351 {
5352 struct symtab_and_line sr_sal;
5353
5354 init_sal (&sr_sal);
5355 sr_sal.pc = ecs->stop_func_start;
5356 sr_sal.pspace = get_frame_program_space (frame);
5357
5358 insert_step_resume_breakpoint_at_sal (gdbarch,
5359 sr_sal, null_frame_id);
5360 keep_going (ecs);
5361 return;
5362 }
5363
5364 /* If we have line number information for the function we are
5365 thinking of stepping into and the function isn't on the skip
5366 list, step into it.
5367
5368 If there are several symtabs at that PC (e.g. with include
5369 files), just want to know whether *any* of them have line
5370 numbers. find_pc_line handles this. */
5371 {
5372 struct symtab_and_line tmp_sal;
5373
5374 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
5375 if (tmp_sal.line != 0
5376 && !function_name_is_marked_for_skip (ecs->stop_func_name,
5377 &tmp_sal))
5378 {
5379 if (execution_direction == EXEC_REVERSE)
5380 handle_step_into_function_backward (gdbarch, ecs);
5381 else
5382 handle_step_into_function (gdbarch, ecs);
5383 return;
5384 }
5385 }
5386
5387 /* If we have no line number and the step-stop-if-no-debug is
5388 set, we stop the step so that the user has a chance to switch
5389 in assembly mode. */
5390 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5391 && step_stop_if_no_debug)
5392 {
5393 end_stepping_range (ecs);
5394 return;
5395 }
5396
5397 if (execution_direction == EXEC_REVERSE)
5398 {
5399 /* If we're already at the start of the function, we've either just
5400 stepped backward into a single instruction function without line
5401 number info, or stepped back out of a signal handler to the first
5402 instruction of the function without line number info. Just keep
5403 going, which will single-step back to the caller. */
5404 if (ecs->stop_func_start != stop_pc)
5405 {
5406 /* Set a breakpoint at callee's start address.
5407 From there we can step once and be back in the caller. */
5408 struct symtab_and_line sr_sal;
5409
5410 init_sal (&sr_sal);
5411 sr_sal.pc = ecs->stop_func_start;
5412 sr_sal.pspace = get_frame_program_space (frame);
5413 insert_step_resume_breakpoint_at_sal (gdbarch,
5414 sr_sal, null_frame_id);
5415 }
5416 }
5417 else
5418 /* Set a breakpoint at callee's return address (the address
5419 at which the caller will resume). */
5420 insert_step_resume_breakpoint_at_caller (frame);
5421
5422 keep_going (ecs);
5423 return;
5424 }
5425
5426 /* Reverse stepping through solib trampolines. */
5427
5428 if (execution_direction == EXEC_REVERSE
5429 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5430 {
5431 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5432 || (ecs->stop_func_start == 0
5433 && in_solib_dynsym_resolve_code (stop_pc)))
5434 {
5435 /* Any solib trampoline code can be handled in reverse
5436 by simply continuing to single-step. We have already
5437 executed the solib function (backwards), and a few
5438 steps will take us back through the trampoline to the
5439 caller. */
5440 keep_going (ecs);
5441 return;
5442 }
5443 else if (in_solib_dynsym_resolve_code (stop_pc))
5444 {
5445 /* Stepped backward into the solib dynsym resolver.
5446 Set a breakpoint at its start and continue, then
5447 one more step will take us out. */
5448 struct symtab_and_line sr_sal;
5449
5450 init_sal (&sr_sal);
5451 sr_sal.pc = ecs->stop_func_start;
5452 sr_sal.pspace = get_frame_program_space (frame);
5453 insert_step_resume_breakpoint_at_sal (gdbarch,
5454 sr_sal, null_frame_id);
5455 keep_going (ecs);
5456 return;
5457 }
5458 }
5459
5460 stop_pc_sal = find_pc_line (stop_pc, 0);
5461
5462 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5463 the trampoline processing logic, however, there are some trampolines
5464 that have no names, so we should do trampoline handling first. */
5465 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5466 && ecs->stop_func_name == NULL
5467 && stop_pc_sal.line == 0)
5468 {
5469 if (debug_infrun)
5470 fprintf_unfiltered (gdb_stdlog,
5471 "infrun: stepped into undebuggable function\n");
5472
5473 /* The inferior just stepped into, or returned to, an
5474 undebuggable function (where there is no debugging information
5475 and no line number corresponding to the address where the
5476 inferior stopped). Since we want to skip this kind of code,
5477 we keep going until the inferior returns from this
5478 function - unless the user has asked us not to (via
5479 set step-mode) or we no longer know how to get back
5480 to the call site. */
5481 if (step_stop_if_no_debug
5482 || !frame_id_p (frame_unwind_caller_id (frame)))
5483 {
5484 /* If we have no line number and the step-stop-if-no-debug
5485 is set, we stop the step so that the user has a chance to
5486 switch in assembly mode. */
5487 end_stepping_range (ecs);
5488 return;
5489 }
5490 else
5491 {
5492 /* Set a breakpoint at callee's return address (the address
5493 at which the caller will resume). */
5494 insert_step_resume_breakpoint_at_caller (frame);
5495 keep_going (ecs);
5496 return;
5497 }
5498 }
5499
5500 if (ecs->event_thread->control.step_range_end == 1)
5501 {
5502 /* It is stepi or nexti. We always want to stop stepping after
5503 one instruction. */
5504 if (debug_infrun)
5505 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5506 end_stepping_range (ecs);
5507 return;
5508 }
5509
5510 if (stop_pc_sal.line == 0)
5511 {
5512 /* We have no line number information. That means to stop
5513 stepping (does this always happen right after one instruction,
5514 when we do "s" in a function with no line numbers,
5515 or can this happen as a result of a return or longjmp?). */
5516 if (debug_infrun)
5517 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5518 end_stepping_range (ecs);
5519 return;
5520 }
5521
5522 /* Look for "calls" to inlined functions, part one. If the inline
5523 frame machinery detected some skipped call sites, we have entered
5524 a new inline function. */
5525
5526 if (frame_id_eq (get_frame_id (get_current_frame ()),
5527 ecs->event_thread->control.step_frame_id)
5528 && inline_skipped_frames (ecs->ptid))
5529 {
5530 struct symtab_and_line call_sal;
5531
5532 if (debug_infrun)
5533 fprintf_unfiltered (gdb_stdlog,
5534 "infrun: stepped into inlined function\n");
5535
5536 find_frame_sal (get_current_frame (), &call_sal);
5537
5538 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5539 {
5540 /* For "step", we're going to stop. But if the call site
5541 for this inlined function is on the same source line as
5542 we were previously stepping, go down into the function
5543 first. Otherwise stop at the call site. */
5544
5545 if (call_sal.line == ecs->event_thread->current_line
5546 && call_sal.symtab == ecs->event_thread->current_symtab)
5547 step_into_inline_frame (ecs->ptid);
5548
5549 end_stepping_range (ecs);
5550 return;
5551 }
5552 else
5553 {
5554 /* For "next", we should stop at the call site if it is on a
5555 different source line. Otherwise continue through the
5556 inlined function. */
5557 if (call_sal.line == ecs->event_thread->current_line
5558 && call_sal.symtab == ecs->event_thread->current_symtab)
5559 keep_going (ecs);
5560 else
5561 end_stepping_range (ecs);
5562 return;
5563 }
5564 }
5565
5566 /* Look for "calls" to inlined functions, part two. If we are still
5567 in the same real function we were stepping through, but we have
5568 to go further up to find the exact frame ID, we are stepping
5569 through a more inlined call beyond its call site. */
5570
5571 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5572 && !frame_id_eq (get_frame_id (get_current_frame ()),
5573 ecs->event_thread->control.step_frame_id)
5574 && stepped_in_from (get_current_frame (),
5575 ecs->event_thread->control.step_frame_id))
5576 {
5577 if (debug_infrun)
5578 fprintf_unfiltered (gdb_stdlog,
5579 "infrun: stepping through inlined function\n");
5580
5581 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5582 keep_going (ecs);
5583 else
5584 end_stepping_range (ecs);
5585 return;
5586 }
5587
5588 if ((stop_pc == stop_pc_sal.pc)
5589 && (ecs->event_thread->current_line != stop_pc_sal.line
5590 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5591 {
5592 /* We are at the start of a different line. So stop. Note that
5593 we don't stop if we step into the middle of a different line.
5594 That is said to make things like for (;;) statements work
5595 better. */
5596 if (debug_infrun)
5597 fprintf_unfiltered (gdb_stdlog,
5598 "infrun: stepped to a different line\n");
5599 end_stepping_range (ecs);
5600 return;
5601 }
5602
5603 /* We aren't done stepping.
5604
5605 Optimize by setting the stepping range to the line.
5606 (We might not be in the original line, but if we entered a
5607 new line in mid-statement, we continue stepping. This makes
5608 things like for(;;) statements work better.) */
5609
5610 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5611 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5612 ecs->event_thread->control.may_range_step = 1;
5613 set_step_info (frame, stop_pc_sal);
5614
5615 if (debug_infrun)
5616 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5617 keep_going (ecs);
5618 }
5619
5620 /* In all-stop mode, if we're currently stepping but have stopped in
5621 some other thread, we may need to switch back to the stepped
5622 thread. Returns true we set the inferior running, false if we left
5623 it stopped (and the event needs further processing). */
5624
5625 static int
5626 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5627 {
5628 if (!non_stop)
5629 {
5630 struct thread_info *tp;
5631 struct thread_info *stepping_thread;
5632
5633 /* If any thread is blocked on some internal breakpoint, and we
5634 simply need to step over that breakpoint to get it going
5635 again, do that first. */
5636
5637 /* However, if we see an event for the stepping thread, then we
5638 know all other threads have been moved past their breakpoints
5639 already. Let the caller check whether the step is finished,
5640 etc., before deciding to move it past a breakpoint. */
5641 if (ecs->event_thread->control.step_range_end != 0)
5642 return 0;
5643
5644 /* Check if the current thread is blocked on an incomplete
5645 step-over, interrupted by a random signal. */
5646 if (ecs->event_thread->control.trap_expected
5647 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5648 {
5649 if (debug_infrun)
5650 {
5651 fprintf_unfiltered (gdb_stdlog,
5652 "infrun: need to finish step-over of [%s]\n",
5653 target_pid_to_str (ecs->event_thread->ptid));
5654 }
5655 keep_going (ecs);
5656 return 1;
5657 }
5658
5659 /* Check if the current thread is blocked by a single-step
5660 breakpoint of another thread. */
5661 if (ecs->hit_singlestep_breakpoint)
5662 {
5663 if (debug_infrun)
5664 {
5665 fprintf_unfiltered (gdb_stdlog,
5666 "infrun: need to step [%s] over single-step "
5667 "breakpoint\n",
5668 target_pid_to_str (ecs->ptid));
5669 }
5670 keep_going (ecs);
5671 return 1;
5672 }
5673
5674 /* If this thread needs yet another step-over (e.g., stepping
5675 through a delay slot), do it first before moving on to
5676 another thread. */
5677 if (thread_still_needs_step_over (ecs->event_thread))
5678 {
5679 if (debug_infrun)
5680 {
5681 fprintf_unfiltered (gdb_stdlog,
5682 "infrun: thread [%s] still needs step-over\n",
5683 target_pid_to_str (ecs->event_thread->ptid));
5684 }
5685 keep_going (ecs);
5686 return 1;
5687 }
5688
5689 /* If scheduler locking applies even if not stepping, there's no
5690 need to walk over threads. Above we've checked whether the
5691 current thread is stepping. If some other thread not the
5692 event thread is stepping, then it must be that scheduler
5693 locking is not in effect. */
5694 if (schedlock_applies (ecs->event_thread))
5695 return 0;
5696
5697 /* Otherwise, we no longer expect a trap in the current thread.
5698 Clear the trap_expected flag before switching back -- this is
5699 what keep_going does as well, if we call it. */
5700 ecs->event_thread->control.trap_expected = 0;
5701
5702 /* Likewise, clear the signal if it should not be passed. */
5703 if (!signal_program[ecs->event_thread->suspend.stop_signal])
5704 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5705
5706 /* Do all pending step-overs before actually proceeding with
5707 step/next/etc. */
5708 if (start_step_over ())
5709 {
5710 prepare_to_wait (ecs);
5711 return 1;
5712 }
5713
5714 /* Look for the stepping/nexting thread. */
5715 stepping_thread = NULL;
5716
5717 ALL_NON_EXITED_THREADS (tp)
5718 {
5719 /* Ignore threads of processes we're not resuming. */
5720 if (!sched_multi
5721 && ptid_get_pid (tp->ptid) != ptid_get_pid (ecs->ptid))
5722 continue;
5723
5724 /* When stepping over a breakpoint, we lock all threads
5725 except the one that needs to move past the breakpoint.
5726 If a non-event thread has this set, the "incomplete
5727 step-over" check above should have caught it earlier. */
5728 gdb_assert (!tp->control.trap_expected);
5729
5730 /* Did we find the stepping thread? */
5731 if (tp->control.step_range_end)
5732 {
5733 /* Yep. There should only one though. */
5734 gdb_assert (stepping_thread == NULL);
5735
5736 /* The event thread is handled at the top, before we
5737 enter this loop. */
5738 gdb_assert (tp != ecs->event_thread);
5739
5740 /* If some thread other than the event thread is
5741 stepping, then scheduler locking can't be in effect,
5742 otherwise we wouldn't have resumed the current event
5743 thread in the first place. */
5744 gdb_assert (!schedlock_applies (tp));
5745
5746 stepping_thread = tp;
5747 }
5748 }
5749
5750 if (stepping_thread != NULL)
5751 {
5752 if (debug_infrun)
5753 fprintf_unfiltered (gdb_stdlog,
5754 "infrun: switching back to stepped thread\n");
5755
5756 if (keep_going_stepped_thread (stepping_thread))
5757 {
5758 prepare_to_wait (ecs);
5759 return 1;
5760 }
5761 }
5762 }
5763
5764 return 0;
5765 }
5766
5767 /* Set a previously stepped thread back to stepping. Returns true on
5768 success, false if the resume is not possible (e.g., the thread
5769 vanished). */
5770
5771 static int
5772 keep_going_stepped_thread (struct thread_info *tp)
5773 {
5774 struct frame_info *frame;
5775 struct gdbarch *gdbarch;
5776 struct execution_control_state ecss;
5777 struct execution_control_state *ecs = &ecss;
5778
5779 /* If the stepping thread exited, then don't try to switch back and
5780 resume it, which could fail in several different ways depending
5781 on the target. Instead, just keep going.
5782
5783 We can find a stepping dead thread in the thread list in two
5784 cases:
5785
5786 - The target supports thread exit events, and when the target
5787 tries to delete the thread from the thread list, inferior_ptid
5788 pointed at the exiting thread. In such case, calling
5789 delete_thread does not really remove the thread from the list;
5790 instead, the thread is left listed, with 'exited' state.
5791
5792 - The target's debug interface does not support thread exit
5793 events, and so we have no idea whatsoever if the previously
5794 stepping thread is still alive. For that reason, we need to
5795 synchronously query the target now. */
5796
5797 if (is_exited (tp->ptid)
5798 || !target_thread_alive (tp->ptid))
5799 {
5800 if (debug_infrun)
5801 fprintf_unfiltered (gdb_stdlog,
5802 "infrun: not resuming previously "
5803 "stepped thread, it has vanished\n");
5804
5805 delete_thread (tp->ptid);
5806 return 0;
5807 }
5808
5809 if (debug_infrun)
5810 fprintf_unfiltered (gdb_stdlog,
5811 "infrun: resuming previously stepped thread\n");
5812
5813 reset_ecs (ecs, tp);
5814 switch_to_thread (tp->ptid);
5815
5816 stop_pc = regcache_read_pc (get_thread_regcache (tp->ptid));
5817 frame = get_current_frame ();
5818 gdbarch = get_frame_arch (frame);
5819
5820 /* If the PC of the thread we were trying to single-step has
5821 changed, then that thread has trapped or been signaled, but the
5822 event has not been reported to GDB yet. Re-poll the target
5823 looking for this particular thread's event (i.e. temporarily
5824 enable schedlock) by:
5825
5826 - setting a break at the current PC
5827 - resuming that particular thread, only (by setting trap
5828 expected)
5829
5830 This prevents us continuously moving the single-step breakpoint
5831 forward, one instruction at a time, overstepping. */
5832
5833 if (stop_pc != tp->prev_pc)
5834 {
5835 ptid_t resume_ptid;
5836
5837 if (debug_infrun)
5838 fprintf_unfiltered (gdb_stdlog,
5839 "infrun: expected thread advanced also (%s -> %s)\n",
5840 paddress (target_gdbarch (), tp->prev_pc),
5841 paddress (target_gdbarch (), stop_pc));
5842
5843 /* Clear the info of the previous step-over, as it's no longer
5844 valid (if the thread was trying to step over a breakpoint, it
5845 has already succeeded). It's what keep_going would do too,
5846 if we called it. Do this before trying to insert the sss
5847 breakpoint, otherwise if we were previously trying to step
5848 over this exact address in another thread, the breakpoint is
5849 skipped. */
5850 clear_step_over_info ();
5851 tp->control.trap_expected = 0;
5852
5853 insert_single_step_breakpoint (get_frame_arch (frame),
5854 get_frame_address_space (frame),
5855 stop_pc);
5856
5857 resume_ptid = user_visible_resume_ptid (tp->control.stepping_command);
5858 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
5859 }
5860 else
5861 {
5862 if (debug_infrun)
5863 fprintf_unfiltered (gdb_stdlog,
5864 "infrun: expected thread still hasn't advanced\n");
5865
5866 keep_going_pass_signal (ecs);
5867 }
5868 return 1;
5869 }
5870
5871 /* Is thread TP in the middle of (software or hardware)
5872 single-stepping? (Note the result of this function must never be
5873 passed directly as target_resume's STEP parameter.) */
5874
5875 static int
5876 currently_stepping (struct thread_info *tp)
5877 {
5878 return ((tp->control.step_range_end
5879 && tp->control.step_resume_breakpoint == NULL)
5880 || tp->control.trap_expected
5881 || tp->stepped_breakpoint
5882 || bpstat_should_step ());
5883 }
5884
5885 /* Inferior has stepped into a subroutine call with source code that
5886 we should not step over. Do step to the first line of code in
5887 it. */
5888
5889 static void
5890 handle_step_into_function (struct gdbarch *gdbarch,
5891 struct execution_control_state *ecs)
5892 {
5893 struct compunit_symtab *cust;
5894 struct symtab_and_line stop_func_sal, sr_sal;
5895
5896 fill_in_stop_func (gdbarch, ecs);
5897
5898 cust = find_pc_compunit_symtab (stop_pc);
5899 if (cust != NULL && compunit_language (cust) != language_asm)
5900 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5901 ecs->stop_func_start);
5902
5903 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5904 /* Use the step_resume_break to step until the end of the prologue,
5905 even if that involves jumps (as it seems to on the vax under
5906 4.2). */
5907 /* If the prologue ends in the middle of a source line, continue to
5908 the end of that source line (if it is still within the function).
5909 Otherwise, just go to end of prologue. */
5910 if (stop_func_sal.end
5911 && stop_func_sal.pc != ecs->stop_func_start
5912 && stop_func_sal.end < ecs->stop_func_end)
5913 ecs->stop_func_start = stop_func_sal.end;
5914
5915 /* Architectures which require breakpoint adjustment might not be able
5916 to place a breakpoint at the computed address. If so, the test
5917 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5918 ecs->stop_func_start to an address at which a breakpoint may be
5919 legitimately placed.
5920
5921 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5922 made, GDB will enter an infinite loop when stepping through
5923 optimized code consisting of VLIW instructions which contain
5924 subinstructions corresponding to different source lines. On
5925 FR-V, it's not permitted to place a breakpoint on any but the
5926 first subinstruction of a VLIW instruction. When a breakpoint is
5927 set, GDB will adjust the breakpoint address to the beginning of
5928 the VLIW instruction. Thus, we need to make the corresponding
5929 adjustment here when computing the stop address. */
5930
5931 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5932 {
5933 ecs->stop_func_start
5934 = gdbarch_adjust_breakpoint_address (gdbarch,
5935 ecs->stop_func_start);
5936 }
5937
5938 if (ecs->stop_func_start == stop_pc)
5939 {
5940 /* We are already there: stop now. */
5941 end_stepping_range (ecs);
5942 return;
5943 }
5944 else
5945 {
5946 /* Put the step-breakpoint there and go until there. */
5947 init_sal (&sr_sal); /* initialize to zeroes */
5948 sr_sal.pc = ecs->stop_func_start;
5949 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5950 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5951
5952 /* Do not specify what the fp should be when we stop since on
5953 some machines the prologue is where the new fp value is
5954 established. */
5955 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5956
5957 /* And make sure stepping stops right away then. */
5958 ecs->event_thread->control.step_range_end
5959 = ecs->event_thread->control.step_range_start;
5960 }
5961 keep_going (ecs);
5962 }
5963
5964 /* Inferior has stepped backward into a subroutine call with source
5965 code that we should not step over. Do step to the beginning of the
5966 last line of code in it. */
5967
5968 static void
5969 handle_step_into_function_backward (struct gdbarch *gdbarch,
5970 struct execution_control_state *ecs)
5971 {
5972 struct compunit_symtab *cust;
5973 struct symtab_and_line stop_func_sal;
5974
5975 fill_in_stop_func (gdbarch, ecs);
5976
5977 cust = find_pc_compunit_symtab (stop_pc);
5978 if (cust != NULL && compunit_language (cust) != language_asm)
5979 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5980 ecs->stop_func_start);
5981
5982 stop_func_sal = find_pc_line (stop_pc, 0);
5983
5984 /* OK, we're just going to keep stepping here. */
5985 if (stop_func_sal.pc == stop_pc)
5986 {
5987 /* We're there already. Just stop stepping now. */
5988 end_stepping_range (ecs);
5989 }
5990 else
5991 {
5992 /* Else just reset the step range and keep going.
5993 No step-resume breakpoint, they don't work for
5994 epilogues, which can have multiple entry paths. */
5995 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5996 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5997 keep_going (ecs);
5998 }
5999 return;
6000 }
6001
6002 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
6003 This is used to both functions and to skip over code. */
6004
6005 static void
6006 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
6007 struct symtab_and_line sr_sal,
6008 struct frame_id sr_id,
6009 enum bptype sr_type)
6010 {
6011 /* There should never be more than one step-resume or longjmp-resume
6012 breakpoint per thread, so we should never be setting a new
6013 step_resume_breakpoint when one is already active. */
6014 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
6015 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
6016
6017 if (debug_infrun)
6018 fprintf_unfiltered (gdb_stdlog,
6019 "infrun: inserting step-resume breakpoint at %s\n",
6020 paddress (gdbarch, sr_sal.pc));
6021
6022 inferior_thread ()->control.step_resume_breakpoint
6023 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
6024 }
6025
6026 void
6027 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
6028 struct symtab_and_line sr_sal,
6029 struct frame_id sr_id)
6030 {
6031 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
6032 sr_sal, sr_id,
6033 bp_step_resume);
6034 }
6035
6036 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
6037 This is used to skip a potential signal handler.
6038
6039 This is called with the interrupted function's frame. The signal
6040 handler, when it returns, will resume the interrupted function at
6041 RETURN_FRAME.pc. */
6042
6043 static void
6044 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
6045 {
6046 struct symtab_and_line sr_sal;
6047 struct gdbarch *gdbarch;
6048
6049 gdb_assert (return_frame != NULL);
6050 init_sal (&sr_sal); /* initialize to zeros */
6051
6052 gdbarch = get_frame_arch (return_frame);
6053 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
6054 sr_sal.section = find_pc_overlay (sr_sal.pc);
6055 sr_sal.pspace = get_frame_program_space (return_frame);
6056
6057 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
6058 get_stack_frame_id (return_frame),
6059 bp_hp_step_resume);
6060 }
6061
6062 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
6063 is used to skip a function after stepping into it (for "next" or if
6064 the called function has no debugging information).
6065
6066 The current function has almost always been reached by single
6067 stepping a call or return instruction. NEXT_FRAME belongs to the
6068 current function, and the breakpoint will be set at the caller's
6069 resume address.
6070
6071 This is a separate function rather than reusing
6072 insert_hp_step_resume_breakpoint_at_frame in order to avoid
6073 get_prev_frame, which may stop prematurely (see the implementation
6074 of frame_unwind_caller_id for an example). */
6075
6076 static void
6077 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
6078 {
6079 struct symtab_and_line sr_sal;
6080 struct gdbarch *gdbarch;
6081
6082 /* We shouldn't have gotten here if we don't know where the call site
6083 is. */
6084 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
6085
6086 init_sal (&sr_sal); /* initialize to zeros */
6087
6088 gdbarch = frame_unwind_caller_arch (next_frame);
6089 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
6090 frame_unwind_caller_pc (next_frame));
6091 sr_sal.section = find_pc_overlay (sr_sal.pc);
6092 sr_sal.pspace = frame_unwind_program_space (next_frame);
6093
6094 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
6095 frame_unwind_caller_id (next_frame));
6096 }
6097
6098 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
6099 new breakpoint at the target of a jmp_buf. The handling of
6100 longjmp-resume uses the same mechanisms used for handling
6101 "step-resume" breakpoints. */
6102
6103 static void
6104 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
6105 {
6106 /* There should never be more than one longjmp-resume breakpoint per
6107 thread, so we should never be setting a new
6108 longjmp_resume_breakpoint when one is already active. */
6109 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
6110
6111 if (debug_infrun)
6112 fprintf_unfiltered (gdb_stdlog,
6113 "infrun: inserting longjmp-resume breakpoint at %s\n",
6114 paddress (gdbarch, pc));
6115
6116 inferior_thread ()->control.exception_resume_breakpoint =
6117 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
6118 }
6119
6120 /* Insert an exception resume breakpoint. TP is the thread throwing
6121 the exception. The block B is the block of the unwinder debug hook
6122 function. FRAME is the frame corresponding to the call to this
6123 function. SYM is the symbol of the function argument holding the
6124 target PC of the exception. */
6125
6126 static void
6127 insert_exception_resume_breakpoint (struct thread_info *tp,
6128 const struct block *b,
6129 struct frame_info *frame,
6130 struct symbol *sym)
6131 {
6132 TRY
6133 {
6134 struct symbol *vsym;
6135 struct value *value;
6136 CORE_ADDR handler;
6137 struct breakpoint *bp;
6138
6139 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN,
6140 NULL).symbol;
6141 value = read_var_value (vsym, frame);
6142 /* If the value was optimized out, revert to the old behavior. */
6143 if (! value_optimized_out (value))
6144 {
6145 handler = value_as_address (value);
6146
6147 if (debug_infrun)
6148 fprintf_unfiltered (gdb_stdlog,
6149 "infrun: exception resume at %lx\n",
6150 (unsigned long) handler);
6151
6152 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
6153 handler, bp_exception_resume);
6154
6155 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
6156 frame = NULL;
6157
6158 bp->thread = tp->num;
6159 inferior_thread ()->control.exception_resume_breakpoint = bp;
6160 }
6161 }
6162 CATCH (e, RETURN_MASK_ERROR)
6163 {
6164 /* We want to ignore errors here. */
6165 }
6166 END_CATCH
6167 }
6168
6169 /* A helper for check_exception_resume that sets an
6170 exception-breakpoint based on a SystemTap probe. */
6171
6172 static void
6173 insert_exception_resume_from_probe (struct thread_info *tp,
6174 const struct bound_probe *probe,
6175 struct frame_info *frame)
6176 {
6177 struct value *arg_value;
6178 CORE_ADDR handler;
6179 struct breakpoint *bp;
6180
6181 arg_value = probe_safe_evaluate_at_pc (frame, 1);
6182 if (!arg_value)
6183 return;
6184
6185 handler = value_as_address (arg_value);
6186
6187 if (debug_infrun)
6188 fprintf_unfiltered (gdb_stdlog,
6189 "infrun: exception resume at %s\n",
6190 paddress (get_objfile_arch (probe->objfile),
6191 handler));
6192
6193 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
6194 handler, bp_exception_resume);
6195 bp->thread = tp->num;
6196 inferior_thread ()->control.exception_resume_breakpoint = bp;
6197 }
6198
6199 /* This is called when an exception has been intercepted. Check to
6200 see whether the exception's destination is of interest, and if so,
6201 set an exception resume breakpoint there. */
6202
6203 static void
6204 check_exception_resume (struct execution_control_state *ecs,
6205 struct frame_info *frame)
6206 {
6207 struct bound_probe probe;
6208 struct symbol *func;
6209
6210 /* First see if this exception unwinding breakpoint was set via a
6211 SystemTap probe point. If so, the probe has two arguments: the
6212 CFA and the HANDLER. We ignore the CFA, extract the handler, and
6213 set a breakpoint there. */
6214 probe = find_probe_by_pc (get_frame_pc (frame));
6215 if (probe.probe)
6216 {
6217 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
6218 return;
6219 }
6220
6221 func = get_frame_function (frame);
6222 if (!func)
6223 return;
6224
6225 TRY
6226 {
6227 const struct block *b;
6228 struct block_iterator iter;
6229 struct symbol *sym;
6230 int argno = 0;
6231
6232 /* The exception breakpoint is a thread-specific breakpoint on
6233 the unwinder's debug hook, declared as:
6234
6235 void _Unwind_DebugHook (void *cfa, void *handler);
6236
6237 The CFA argument indicates the frame to which control is
6238 about to be transferred. HANDLER is the destination PC.
6239
6240 We ignore the CFA and set a temporary breakpoint at HANDLER.
6241 This is not extremely efficient but it avoids issues in gdb
6242 with computing the DWARF CFA, and it also works even in weird
6243 cases such as throwing an exception from inside a signal
6244 handler. */
6245
6246 b = SYMBOL_BLOCK_VALUE (func);
6247 ALL_BLOCK_SYMBOLS (b, iter, sym)
6248 {
6249 if (!SYMBOL_IS_ARGUMENT (sym))
6250 continue;
6251
6252 if (argno == 0)
6253 ++argno;
6254 else
6255 {
6256 insert_exception_resume_breakpoint (ecs->event_thread,
6257 b, frame, sym);
6258 break;
6259 }
6260 }
6261 }
6262 CATCH (e, RETURN_MASK_ERROR)
6263 {
6264 }
6265 END_CATCH
6266 }
6267
6268 static void
6269 stop_waiting (struct execution_control_state *ecs)
6270 {
6271 if (debug_infrun)
6272 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
6273
6274 clear_step_over_info ();
6275
6276 /* Let callers know we don't want to wait for the inferior anymore. */
6277 ecs->wait_some_more = 0;
6278 }
6279
6280 /* Like keep_going, but passes the signal to the inferior, even if the
6281 signal is set to nopass. */
6282
6283 static void
6284 keep_going_pass_signal (struct execution_control_state *ecs)
6285 {
6286 /* Make sure normal_stop is called if we get a QUIT handled before
6287 reaching resume. */
6288 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
6289
6290 gdb_assert (ptid_equal (ecs->event_thread->ptid, inferior_ptid));
6291
6292 /* Save the pc before execution, to compare with pc after stop. */
6293 ecs->event_thread->prev_pc
6294 = regcache_read_pc (get_thread_regcache (ecs->ptid));
6295
6296 if (ecs->event_thread->control.trap_expected)
6297 {
6298 struct thread_info *tp = ecs->event_thread;
6299
6300 if (debug_infrun)
6301 fprintf_unfiltered (gdb_stdlog,
6302 "infrun: %s has trap_expected set, "
6303 "resuming to collect trap\n",
6304 target_pid_to_str (tp->ptid));
6305
6306 /* We haven't yet gotten our trap, and either: intercepted a
6307 non-signal event (e.g., a fork); or took a signal which we
6308 are supposed to pass through to the inferior. Simply
6309 continue. */
6310 discard_cleanups (old_cleanups);
6311 resume (ecs->event_thread->suspend.stop_signal);
6312 }
6313 else
6314 {
6315 struct regcache *regcache = get_current_regcache ();
6316 int remove_bp;
6317 int remove_wps;
6318 enum step_over_what step_what;
6319
6320 /* Either the trap was not expected, but we are continuing
6321 anyway (if we got a signal, the user asked it be passed to
6322 the child)
6323 -- or --
6324 We got our expected trap, but decided we should resume from
6325 it.
6326
6327 We're going to run this baby now!
6328
6329 Note that insert_breakpoints won't try to re-insert
6330 already inserted breakpoints. Therefore, we don't
6331 care if breakpoints were already inserted, or not. */
6332
6333 /* If we need to step over a breakpoint, and we're not using
6334 displaced stepping to do so, insert all breakpoints
6335 (watchpoints, etc.) but the one we're stepping over, step one
6336 instruction, and then re-insert the breakpoint when that step
6337 is finished. */
6338
6339 step_what = thread_still_needs_step_over (ecs->event_thread);
6340
6341 remove_bp = (ecs->hit_singlestep_breakpoint
6342 || (step_what & STEP_OVER_BREAKPOINT));
6343 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
6344
6345 /* We can't use displaced stepping if we need to step past a
6346 watchpoint. The instruction copied to the scratch pad would
6347 still trigger the watchpoint. */
6348 if (remove_bp
6349 && (remove_wps
6350 || !use_displaced_stepping (get_regcache_arch (regcache))))
6351 {
6352 set_step_over_info (get_regcache_aspace (regcache),
6353 regcache_read_pc (regcache), remove_wps);
6354 }
6355 else if (remove_wps)
6356 set_step_over_info (NULL, 0, remove_wps);
6357 else
6358 clear_step_over_info ();
6359
6360 /* Stop stepping if inserting breakpoints fails. */
6361 TRY
6362 {
6363 insert_breakpoints ();
6364 }
6365 CATCH (e, RETURN_MASK_ERROR)
6366 {
6367 exception_print (gdb_stderr, e);
6368 stop_waiting (ecs);
6369 discard_cleanups (old_cleanups);
6370 return;
6371 }
6372 END_CATCH
6373
6374 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
6375
6376 discard_cleanups (old_cleanups);
6377 resume (ecs->event_thread->suspend.stop_signal);
6378 }
6379
6380 prepare_to_wait (ecs);
6381 }
6382
6383 /* Called when we should continue running the inferior, because the
6384 current event doesn't cause a user visible stop. This does the
6385 resuming part; waiting for the next event is done elsewhere. */
6386
6387 static void
6388 keep_going (struct execution_control_state *ecs)
6389 {
6390 if (ecs->event_thread->control.trap_expected
6391 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
6392 ecs->event_thread->control.trap_expected = 0;
6393
6394 if (!signal_program[ecs->event_thread->suspend.stop_signal])
6395 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6396 keep_going_pass_signal (ecs);
6397 }
6398
6399 /* This function normally comes after a resume, before
6400 handle_inferior_event exits. It takes care of any last bits of
6401 housekeeping, and sets the all-important wait_some_more flag. */
6402
6403 static void
6404 prepare_to_wait (struct execution_control_state *ecs)
6405 {
6406 if (debug_infrun)
6407 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
6408
6409 /* This is the old end of the while loop. Let everybody know we
6410 want to wait for the inferior some more and get called again
6411 soon. */
6412 ecs->wait_some_more = 1;
6413 }
6414
6415 /* We are done with the step range of a step/next/si/ni command.
6416 Called once for each n of a "step n" operation. */
6417
6418 static void
6419 end_stepping_range (struct execution_control_state *ecs)
6420 {
6421 ecs->event_thread->control.stop_step = 1;
6422 stop_waiting (ecs);
6423 }
6424
6425 /* Several print_*_reason functions to print why the inferior has stopped.
6426 We always print something when the inferior exits, or receives a signal.
6427 The rest of the cases are dealt with later on in normal_stop and
6428 print_it_typical. Ideally there should be a call to one of these
6429 print_*_reason functions functions from handle_inferior_event each time
6430 stop_waiting is called.
6431
6432 Note that we don't call these directly, instead we delegate that to
6433 the interpreters, through observers. Interpreters then call these
6434 with whatever uiout is right. */
6435
6436 void
6437 print_end_stepping_range_reason (struct ui_out *uiout)
6438 {
6439 /* For CLI-like interpreters, print nothing. */
6440
6441 if (ui_out_is_mi_like_p (uiout))
6442 {
6443 ui_out_field_string (uiout, "reason",
6444 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
6445 }
6446 }
6447
6448 void
6449 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6450 {
6451 annotate_signalled ();
6452 if (ui_out_is_mi_like_p (uiout))
6453 ui_out_field_string
6454 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
6455 ui_out_text (uiout, "\nProgram terminated with signal ");
6456 annotate_signal_name ();
6457 ui_out_field_string (uiout, "signal-name",
6458 gdb_signal_to_name (siggnal));
6459 annotate_signal_name_end ();
6460 ui_out_text (uiout, ", ");
6461 annotate_signal_string ();
6462 ui_out_field_string (uiout, "signal-meaning",
6463 gdb_signal_to_string (siggnal));
6464 annotate_signal_string_end ();
6465 ui_out_text (uiout, ".\n");
6466 ui_out_text (uiout, "The program no longer exists.\n");
6467 }
6468
6469 void
6470 print_exited_reason (struct ui_out *uiout, int exitstatus)
6471 {
6472 struct inferior *inf = current_inferior ();
6473 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
6474
6475 annotate_exited (exitstatus);
6476 if (exitstatus)
6477 {
6478 if (ui_out_is_mi_like_p (uiout))
6479 ui_out_field_string (uiout, "reason",
6480 async_reason_lookup (EXEC_ASYNC_EXITED));
6481 ui_out_text (uiout, "[Inferior ");
6482 ui_out_text (uiout, plongest (inf->num));
6483 ui_out_text (uiout, " (");
6484 ui_out_text (uiout, pidstr);
6485 ui_out_text (uiout, ") exited with code ");
6486 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
6487 ui_out_text (uiout, "]\n");
6488 }
6489 else
6490 {
6491 if (ui_out_is_mi_like_p (uiout))
6492 ui_out_field_string
6493 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6494 ui_out_text (uiout, "[Inferior ");
6495 ui_out_text (uiout, plongest (inf->num));
6496 ui_out_text (uiout, " (");
6497 ui_out_text (uiout, pidstr);
6498 ui_out_text (uiout, ") exited normally]\n");
6499 }
6500 }
6501
6502 void
6503 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6504 {
6505 annotate_signal ();
6506
6507 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
6508 {
6509 struct thread_info *t = inferior_thread ();
6510
6511 ui_out_text (uiout, "\n[");
6512 ui_out_field_string (uiout, "thread-name",
6513 target_pid_to_str (t->ptid));
6514 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
6515 ui_out_text (uiout, " stopped");
6516 }
6517 else
6518 {
6519 ui_out_text (uiout, "\nProgram received signal ");
6520 annotate_signal_name ();
6521 if (ui_out_is_mi_like_p (uiout))
6522 ui_out_field_string
6523 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
6524 ui_out_field_string (uiout, "signal-name",
6525 gdb_signal_to_name (siggnal));
6526 annotate_signal_name_end ();
6527 ui_out_text (uiout, ", ");
6528 annotate_signal_string ();
6529 ui_out_field_string (uiout, "signal-meaning",
6530 gdb_signal_to_string (siggnal));
6531 annotate_signal_string_end ();
6532 }
6533 ui_out_text (uiout, ".\n");
6534 }
6535
6536 void
6537 print_no_history_reason (struct ui_out *uiout)
6538 {
6539 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
6540 }
6541
6542 /* Print current location without a level number, if we have changed
6543 functions or hit a breakpoint. Print source line if we have one.
6544 bpstat_print contains the logic deciding in detail what to print,
6545 based on the event(s) that just occurred. */
6546
6547 void
6548 print_stop_event (struct target_waitstatus *ws)
6549 {
6550 int bpstat_ret;
6551 enum print_what source_flag;
6552 int do_frame_printing = 1;
6553 struct thread_info *tp = inferior_thread ();
6554
6555 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6556 switch (bpstat_ret)
6557 {
6558 case PRINT_UNKNOWN:
6559 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6560 should) carry around the function and does (or should) use
6561 that when doing a frame comparison. */
6562 if (tp->control.stop_step
6563 && frame_id_eq (tp->control.step_frame_id,
6564 get_frame_id (get_current_frame ()))
6565 && tp->control.step_start_function == find_pc_function (stop_pc))
6566 {
6567 /* Finished step, just print source line. */
6568 source_flag = SRC_LINE;
6569 }
6570 else
6571 {
6572 /* Print location and source line. */
6573 source_flag = SRC_AND_LOC;
6574 }
6575 break;
6576 case PRINT_SRC_AND_LOC:
6577 /* Print location and source line. */
6578 source_flag = SRC_AND_LOC;
6579 break;
6580 case PRINT_SRC_ONLY:
6581 source_flag = SRC_LINE;
6582 break;
6583 case PRINT_NOTHING:
6584 /* Something bogus. */
6585 source_flag = SRC_LINE;
6586 do_frame_printing = 0;
6587 break;
6588 default:
6589 internal_error (__FILE__, __LINE__, _("Unknown value."));
6590 }
6591
6592 /* The behavior of this routine with respect to the source
6593 flag is:
6594 SRC_LINE: Print only source line
6595 LOCATION: Print only location
6596 SRC_AND_LOC: Print location and source line. */
6597 if (do_frame_printing)
6598 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6599
6600 /* Display the auto-display expressions. */
6601 do_displays ();
6602 }
6603
6604 /* Here to return control to GDB when the inferior stops for real.
6605 Print appropriate messages, remove breakpoints, give terminal our modes.
6606
6607 STOP_PRINT_FRAME nonzero means print the executing frame
6608 (pc, function, args, file, line number and line text).
6609 BREAKPOINTS_FAILED nonzero means stop was due to error
6610 attempting to insert breakpoints. */
6611
6612 void
6613 normal_stop (void)
6614 {
6615 struct target_waitstatus last;
6616 ptid_t last_ptid;
6617 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6618 ptid_t pid_ptid;
6619
6620 get_last_target_status (&last_ptid, &last);
6621
6622 /* If an exception is thrown from this point on, make sure to
6623 propagate GDB's knowledge of the executing state to the
6624 frontend/user running state. A QUIT is an easy exception to see
6625 here, so do this before any filtered output. */
6626 if (!non_stop)
6627 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6628 else if (last.kind == TARGET_WAITKIND_SIGNALLED
6629 || last.kind == TARGET_WAITKIND_EXITED)
6630 {
6631 /* On some targets, we may still have live threads in the
6632 inferior when we get a process exit event. E.g., for
6633 "checkpoint", when the current checkpoint/fork exits,
6634 linux-fork.c automatically switches to another fork from
6635 within target_mourn_inferior. */
6636 if (!ptid_equal (inferior_ptid, null_ptid))
6637 {
6638 pid_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
6639 make_cleanup (finish_thread_state_cleanup, &pid_ptid);
6640 }
6641 }
6642 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
6643 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6644
6645 /* As we're presenting a stop, and potentially removing breakpoints,
6646 update the thread list so we can tell whether there are threads
6647 running on the target. With target remote, for example, we can
6648 only learn about new threads when we explicitly update the thread
6649 list. Do this before notifying the interpreters about signal
6650 stops, end of stepping ranges, etc., so that the "new thread"
6651 output is emitted before e.g., "Program received signal FOO",
6652 instead of after. */
6653 update_thread_list ();
6654
6655 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
6656 observer_notify_signal_received (inferior_thread ()->suspend.stop_signal);
6657
6658 /* As with the notification of thread events, we want to delay
6659 notifying the user that we've switched thread context until
6660 the inferior actually stops.
6661
6662 There's no point in saying anything if the inferior has exited.
6663 Note that SIGNALLED here means "exited with a signal", not
6664 "received a signal".
6665
6666 Also skip saying anything in non-stop mode. In that mode, as we
6667 don't want GDB to switch threads behind the user's back, to avoid
6668 races where the user is typing a command to apply to thread x,
6669 but GDB switches to thread y before the user finishes entering
6670 the command, fetch_inferior_event installs a cleanup to restore
6671 the current thread back to the thread the user had selected right
6672 after this event is handled, so we're not really switching, only
6673 informing of a stop. */
6674 if (!non_stop
6675 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6676 && target_has_execution
6677 && last.kind != TARGET_WAITKIND_SIGNALLED
6678 && last.kind != TARGET_WAITKIND_EXITED
6679 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6680 {
6681 target_terminal_ours_for_output ();
6682 printf_filtered (_("[Switching to %s]\n"),
6683 target_pid_to_str (inferior_ptid));
6684 annotate_thread_changed ();
6685 previous_inferior_ptid = inferior_ptid;
6686 }
6687
6688 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6689 {
6690 gdb_assert (sync_execution || !target_can_async_p ());
6691
6692 target_terminal_ours_for_output ();
6693 printf_filtered (_("No unwaited-for children left.\n"));
6694 }
6695
6696 /* Note: this depends on the update_thread_list call above. */
6697 if (!breakpoints_should_be_inserted_now () && target_has_execution)
6698 {
6699 if (remove_breakpoints ())
6700 {
6701 target_terminal_ours_for_output ();
6702 printf_filtered (_("Cannot remove breakpoints because "
6703 "program is no longer writable.\nFurther "
6704 "execution is probably impossible.\n"));
6705 }
6706 }
6707
6708 /* If an auto-display called a function and that got a signal,
6709 delete that auto-display to avoid an infinite recursion. */
6710
6711 if (stopped_by_random_signal)
6712 disable_current_display ();
6713
6714 /* Notify observers if we finished a "step"-like command, etc. */
6715 if (target_has_execution
6716 && last.kind != TARGET_WAITKIND_SIGNALLED
6717 && last.kind != TARGET_WAITKIND_EXITED
6718 && inferior_thread ()->control.stop_step)
6719 {
6720 /* But not if in the middle of doing a "step n" operation for
6721 n > 1 */
6722 if (inferior_thread ()->step_multi)
6723 goto done;
6724
6725 observer_notify_end_stepping_range ();
6726 }
6727
6728 target_terminal_ours ();
6729 async_enable_stdin ();
6730
6731 /* Set the current source location. This will also happen if we
6732 display the frame below, but the current SAL will be incorrect
6733 during a user hook-stop function. */
6734 if (has_stack_frames () && !stop_stack_dummy)
6735 set_current_sal_from_frame (get_current_frame ());
6736
6737 /* Let the user/frontend see the threads as stopped, but defer to
6738 call_function_by_hand if the thread finished an infcall
6739 successfully. We may be e.g., evaluating a breakpoint condition.
6740 In that case, the thread had state THREAD_RUNNING before the
6741 infcall, and shall remain marked running, all without informing
6742 the user/frontend about state transition changes. */
6743 if (target_has_execution
6744 && inferior_thread ()->control.in_infcall
6745 && stop_stack_dummy == STOP_STACK_DUMMY)
6746 discard_cleanups (old_chain);
6747 else
6748 do_cleanups (old_chain);
6749
6750 /* Look up the hook_stop and run it (CLI internally handles problem
6751 of stop_command's pre-hook not existing). */
6752 if (stop_command)
6753 catch_errors (hook_stop_stub, stop_command,
6754 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6755
6756 if (!has_stack_frames ())
6757 goto done;
6758
6759 if (last.kind == TARGET_WAITKIND_SIGNALLED
6760 || last.kind == TARGET_WAITKIND_EXITED)
6761 goto done;
6762
6763 /* Select innermost stack frame - i.e., current frame is frame 0,
6764 and current location is based on that.
6765 Don't do this on return from a stack dummy routine,
6766 or if the program has exited. */
6767
6768 if (!stop_stack_dummy)
6769 {
6770 select_frame (get_current_frame ());
6771
6772 /* If --batch-silent is enabled then there's no need to print the current
6773 source location, and to try risks causing an error message about
6774 missing source files. */
6775 if (stop_print_frame && !batch_silent)
6776 print_stop_event (&last);
6777 }
6778
6779 if (stop_stack_dummy == STOP_STACK_DUMMY)
6780 {
6781 /* Pop the empty frame that contains the stack dummy.
6782 This also restores inferior state prior to the call
6783 (struct infcall_suspend_state). */
6784 struct frame_info *frame = get_current_frame ();
6785
6786 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6787 frame_pop (frame);
6788 /* frame_pop() calls reinit_frame_cache as the last thing it
6789 does which means there's currently no selected frame. We
6790 don't need to re-establish a selected frame if the dummy call
6791 returns normally, that will be done by
6792 restore_infcall_control_state. However, we do have to handle
6793 the case where the dummy call is returning after being
6794 stopped (e.g. the dummy call previously hit a breakpoint).
6795 We can't know which case we have so just always re-establish
6796 a selected frame here. */
6797 select_frame (get_current_frame ());
6798 }
6799
6800 done:
6801 annotate_stopped ();
6802
6803 /* Suppress the stop observer if we're in the middle of:
6804
6805 - a step n (n > 1), as there still more steps to be done.
6806
6807 - a "finish" command, as the observer will be called in
6808 finish_command_continuation, so it can include the inferior
6809 function's return value.
6810
6811 - calling an inferior function, as we pretend we inferior didn't
6812 run at all. The return value of the call is handled by the
6813 expression evaluator, through call_function_by_hand. */
6814
6815 if (!target_has_execution
6816 || last.kind == TARGET_WAITKIND_SIGNALLED
6817 || last.kind == TARGET_WAITKIND_EXITED
6818 || last.kind == TARGET_WAITKIND_NO_RESUMED
6819 || (!(inferior_thread ()->step_multi
6820 && inferior_thread ()->control.stop_step)
6821 && !(inferior_thread ()->control.stop_bpstat
6822 && inferior_thread ()->control.proceed_to_finish)
6823 && !inferior_thread ()->control.in_infcall))
6824 {
6825 if (!ptid_equal (inferior_ptid, null_ptid))
6826 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6827 stop_print_frame);
6828 else
6829 observer_notify_normal_stop (NULL, stop_print_frame);
6830 }
6831
6832 if (target_has_execution)
6833 {
6834 if (last.kind != TARGET_WAITKIND_SIGNALLED
6835 && last.kind != TARGET_WAITKIND_EXITED)
6836 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6837 Delete any breakpoint that is to be deleted at the next stop. */
6838 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6839 }
6840
6841 /* Try to get rid of automatically added inferiors that are no
6842 longer needed. Keeping those around slows down things linearly.
6843 Note that this never removes the current inferior. */
6844 prune_inferiors ();
6845 }
6846
6847 static int
6848 hook_stop_stub (void *cmd)
6849 {
6850 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6851 return (0);
6852 }
6853 \f
6854 int
6855 signal_stop_state (int signo)
6856 {
6857 return signal_stop[signo];
6858 }
6859
6860 int
6861 signal_print_state (int signo)
6862 {
6863 return signal_print[signo];
6864 }
6865
6866 int
6867 signal_pass_state (int signo)
6868 {
6869 return signal_program[signo];
6870 }
6871
6872 static void
6873 signal_cache_update (int signo)
6874 {
6875 if (signo == -1)
6876 {
6877 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6878 signal_cache_update (signo);
6879
6880 return;
6881 }
6882
6883 signal_pass[signo] = (signal_stop[signo] == 0
6884 && signal_print[signo] == 0
6885 && signal_program[signo] == 1
6886 && signal_catch[signo] == 0);
6887 }
6888
6889 int
6890 signal_stop_update (int signo, int state)
6891 {
6892 int ret = signal_stop[signo];
6893
6894 signal_stop[signo] = state;
6895 signal_cache_update (signo);
6896 return ret;
6897 }
6898
6899 int
6900 signal_print_update (int signo, int state)
6901 {
6902 int ret = signal_print[signo];
6903
6904 signal_print[signo] = state;
6905 signal_cache_update (signo);
6906 return ret;
6907 }
6908
6909 int
6910 signal_pass_update (int signo, int state)
6911 {
6912 int ret = signal_program[signo];
6913
6914 signal_program[signo] = state;
6915 signal_cache_update (signo);
6916 return ret;
6917 }
6918
6919 /* Update the global 'signal_catch' from INFO and notify the
6920 target. */
6921
6922 void
6923 signal_catch_update (const unsigned int *info)
6924 {
6925 int i;
6926
6927 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6928 signal_catch[i] = info[i] > 0;
6929 signal_cache_update (-1);
6930 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6931 }
6932
6933 static void
6934 sig_print_header (void)
6935 {
6936 printf_filtered (_("Signal Stop\tPrint\tPass "
6937 "to program\tDescription\n"));
6938 }
6939
6940 static void
6941 sig_print_info (enum gdb_signal oursig)
6942 {
6943 const char *name = gdb_signal_to_name (oursig);
6944 int name_padding = 13 - strlen (name);
6945
6946 if (name_padding <= 0)
6947 name_padding = 0;
6948
6949 printf_filtered ("%s", name);
6950 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6951 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6952 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6953 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6954 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6955 }
6956
6957 /* Specify how various signals in the inferior should be handled. */
6958
6959 static void
6960 handle_command (char *args, int from_tty)
6961 {
6962 char **argv;
6963 int digits, wordlen;
6964 int sigfirst, signum, siglast;
6965 enum gdb_signal oursig;
6966 int allsigs;
6967 int nsigs;
6968 unsigned char *sigs;
6969 struct cleanup *old_chain;
6970
6971 if (args == NULL)
6972 {
6973 error_no_arg (_("signal to handle"));
6974 }
6975
6976 /* Allocate and zero an array of flags for which signals to handle. */
6977
6978 nsigs = (int) GDB_SIGNAL_LAST;
6979 sigs = (unsigned char *) alloca (nsigs);
6980 memset (sigs, 0, nsigs);
6981
6982 /* Break the command line up into args. */
6983
6984 argv = gdb_buildargv (args);
6985 old_chain = make_cleanup_freeargv (argv);
6986
6987 /* Walk through the args, looking for signal oursigs, signal names, and
6988 actions. Signal numbers and signal names may be interspersed with
6989 actions, with the actions being performed for all signals cumulatively
6990 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6991
6992 while (*argv != NULL)
6993 {
6994 wordlen = strlen (*argv);
6995 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6996 {;
6997 }
6998 allsigs = 0;
6999 sigfirst = siglast = -1;
7000
7001 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
7002 {
7003 /* Apply action to all signals except those used by the
7004 debugger. Silently skip those. */
7005 allsigs = 1;
7006 sigfirst = 0;
7007 siglast = nsigs - 1;
7008 }
7009 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
7010 {
7011 SET_SIGS (nsigs, sigs, signal_stop);
7012 SET_SIGS (nsigs, sigs, signal_print);
7013 }
7014 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
7015 {
7016 UNSET_SIGS (nsigs, sigs, signal_program);
7017 }
7018 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
7019 {
7020 SET_SIGS (nsigs, sigs, signal_print);
7021 }
7022 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
7023 {
7024 SET_SIGS (nsigs, sigs, signal_program);
7025 }
7026 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
7027 {
7028 UNSET_SIGS (nsigs, sigs, signal_stop);
7029 }
7030 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
7031 {
7032 SET_SIGS (nsigs, sigs, signal_program);
7033 }
7034 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
7035 {
7036 UNSET_SIGS (nsigs, sigs, signal_print);
7037 UNSET_SIGS (nsigs, sigs, signal_stop);
7038 }
7039 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
7040 {
7041 UNSET_SIGS (nsigs, sigs, signal_program);
7042 }
7043 else if (digits > 0)
7044 {
7045 /* It is numeric. The numeric signal refers to our own
7046 internal signal numbering from target.h, not to host/target
7047 signal number. This is a feature; users really should be
7048 using symbolic names anyway, and the common ones like
7049 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
7050
7051 sigfirst = siglast = (int)
7052 gdb_signal_from_command (atoi (*argv));
7053 if ((*argv)[digits] == '-')
7054 {
7055 siglast = (int)
7056 gdb_signal_from_command (atoi ((*argv) + digits + 1));
7057 }
7058 if (sigfirst > siglast)
7059 {
7060 /* Bet he didn't figure we'd think of this case... */
7061 signum = sigfirst;
7062 sigfirst = siglast;
7063 siglast = signum;
7064 }
7065 }
7066 else
7067 {
7068 oursig = gdb_signal_from_name (*argv);
7069 if (oursig != GDB_SIGNAL_UNKNOWN)
7070 {
7071 sigfirst = siglast = (int) oursig;
7072 }
7073 else
7074 {
7075 /* Not a number and not a recognized flag word => complain. */
7076 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
7077 }
7078 }
7079
7080 /* If any signal numbers or symbol names were found, set flags for
7081 which signals to apply actions to. */
7082
7083 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
7084 {
7085 switch ((enum gdb_signal) signum)
7086 {
7087 case GDB_SIGNAL_TRAP:
7088 case GDB_SIGNAL_INT:
7089 if (!allsigs && !sigs[signum])
7090 {
7091 if (query (_("%s is used by the debugger.\n\
7092 Are you sure you want to change it? "),
7093 gdb_signal_to_name ((enum gdb_signal) signum)))
7094 {
7095 sigs[signum] = 1;
7096 }
7097 else
7098 {
7099 printf_unfiltered (_("Not confirmed, unchanged.\n"));
7100 gdb_flush (gdb_stdout);
7101 }
7102 }
7103 break;
7104 case GDB_SIGNAL_0:
7105 case GDB_SIGNAL_DEFAULT:
7106 case GDB_SIGNAL_UNKNOWN:
7107 /* Make sure that "all" doesn't print these. */
7108 break;
7109 default:
7110 sigs[signum] = 1;
7111 break;
7112 }
7113 }
7114
7115 argv++;
7116 }
7117
7118 for (signum = 0; signum < nsigs; signum++)
7119 if (sigs[signum])
7120 {
7121 signal_cache_update (-1);
7122 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
7123 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
7124
7125 if (from_tty)
7126 {
7127 /* Show the results. */
7128 sig_print_header ();
7129 for (; signum < nsigs; signum++)
7130 if (sigs[signum])
7131 sig_print_info ((enum gdb_signal) signum);
7132 }
7133
7134 break;
7135 }
7136
7137 do_cleanups (old_chain);
7138 }
7139
7140 /* Complete the "handle" command. */
7141
7142 static VEC (char_ptr) *
7143 handle_completer (struct cmd_list_element *ignore,
7144 const char *text, const char *word)
7145 {
7146 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
7147 static const char * const keywords[] =
7148 {
7149 "all",
7150 "stop",
7151 "ignore",
7152 "print",
7153 "pass",
7154 "nostop",
7155 "noignore",
7156 "noprint",
7157 "nopass",
7158 NULL,
7159 };
7160
7161 vec_signals = signal_completer (ignore, text, word);
7162 vec_keywords = complete_on_enum (keywords, word, word);
7163
7164 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
7165 VEC_free (char_ptr, vec_signals);
7166 VEC_free (char_ptr, vec_keywords);
7167 return return_val;
7168 }
7169
7170 enum gdb_signal
7171 gdb_signal_from_command (int num)
7172 {
7173 if (num >= 1 && num <= 15)
7174 return (enum gdb_signal) num;
7175 error (_("Only signals 1-15 are valid as numeric signals.\n\
7176 Use \"info signals\" for a list of symbolic signals."));
7177 }
7178
7179 /* Print current contents of the tables set by the handle command.
7180 It is possible we should just be printing signals actually used
7181 by the current target (but for things to work right when switching
7182 targets, all signals should be in the signal tables). */
7183
7184 static void
7185 signals_info (char *signum_exp, int from_tty)
7186 {
7187 enum gdb_signal oursig;
7188
7189 sig_print_header ();
7190
7191 if (signum_exp)
7192 {
7193 /* First see if this is a symbol name. */
7194 oursig = gdb_signal_from_name (signum_exp);
7195 if (oursig == GDB_SIGNAL_UNKNOWN)
7196 {
7197 /* No, try numeric. */
7198 oursig =
7199 gdb_signal_from_command (parse_and_eval_long (signum_exp));
7200 }
7201 sig_print_info (oursig);
7202 return;
7203 }
7204
7205 printf_filtered ("\n");
7206 /* These ugly casts brought to you by the native VAX compiler. */
7207 for (oursig = GDB_SIGNAL_FIRST;
7208 (int) oursig < (int) GDB_SIGNAL_LAST;
7209 oursig = (enum gdb_signal) ((int) oursig + 1))
7210 {
7211 QUIT;
7212
7213 if (oursig != GDB_SIGNAL_UNKNOWN
7214 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
7215 sig_print_info (oursig);
7216 }
7217
7218 printf_filtered (_("\nUse the \"handle\" command "
7219 "to change these tables.\n"));
7220 }
7221
7222 /* Check if it makes sense to read $_siginfo from the current thread
7223 at this point. If not, throw an error. */
7224
7225 static void
7226 validate_siginfo_access (void)
7227 {
7228 /* No current inferior, no siginfo. */
7229 if (ptid_equal (inferior_ptid, null_ptid))
7230 error (_("No thread selected."));
7231
7232 /* Don't try to read from a dead thread. */
7233 if (is_exited (inferior_ptid))
7234 error (_("The current thread has terminated"));
7235
7236 /* ... or from a spinning thread. */
7237 if (is_running (inferior_ptid))
7238 error (_("Selected thread is running."));
7239 }
7240
7241 /* The $_siginfo convenience variable is a bit special. We don't know
7242 for sure the type of the value until we actually have a chance to
7243 fetch the data. The type can change depending on gdbarch, so it is
7244 also dependent on which thread you have selected.
7245
7246 1. making $_siginfo be an internalvar that creates a new value on
7247 access.
7248
7249 2. making the value of $_siginfo be an lval_computed value. */
7250
7251 /* This function implements the lval_computed support for reading a
7252 $_siginfo value. */
7253
7254 static void
7255 siginfo_value_read (struct value *v)
7256 {
7257 LONGEST transferred;
7258
7259 validate_siginfo_access ();
7260
7261 transferred =
7262 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
7263 NULL,
7264 value_contents_all_raw (v),
7265 value_offset (v),
7266 TYPE_LENGTH (value_type (v)));
7267
7268 if (transferred != TYPE_LENGTH (value_type (v)))
7269 error (_("Unable to read siginfo"));
7270 }
7271
7272 /* This function implements the lval_computed support for writing a
7273 $_siginfo value. */
7274
7275 static void
7276 siginfo_value_write (struct value *v, struct value *fromval)
7277 {
7278 LONGEST transferred;
7279
7280 validate_siginfo_access ();
7281
7282 transferred = target_write (&current_target,
7283 TARGET_OBJECT_SIGNAL_INFO,
7284 NULL,
7285 value_contents_all_raw (fromval),
7286 value_offset (v),
7287 TYPE_LENGTH (value_type (fromval)));
7288
7289 if (transferred != TYPE_LENGTH (value_type (fromval)))
7290 error (_("Unable to write siginfo"));
7291 }
7292
7293 static const struct lval_funcs siginfo_value_funcs =
7294 {
7295 siginfo_value_read,
7296 siginfo_value_write
7297 };
7298
7299 /* Return a new value with the correct type for the siginfo object of
7300 the current thread using architecture GDBARCH. Return a void value
7301 if there's no object available. */
7302
7303 static struct value *
7304 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
7305 void *ignore)
7306 {
7307 if (target_has_stack
7308 && !ptid_equal (inferior_ptid, null_ptid)
7309 && gdbarch_get_siginfo_type_p (gdbarch))
7310 {
7311 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7312
7313 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
7314 }
7315
7316 return allocate_value (builtin_type (gdbarch)->builtin_void);
7317 }
7318
7319 \f
7320 /* infcall_suspend_state contains state about the program itself like its
7321 registers and any signal it received when it last stopped.
7322 This state must be restored regardless of how the inferior function call
7323 ends (either successfully, or after it hits a breakpoint or signal)
7324 if the program is to properly continue where it left off. */
7325
7326 struct infcall_suspend_state
7327 {
7328 struct thread_suspend_state thread_suspend;
7329
7330 /* Other fields: */
7331 CORE_ADDR stop_pc;
7332 struct regcache *registers;
7333
7334 /* Format of SIGINFO_DATA or NULL if it is not present. */
7335 struct gdbarch *siginfo_gdbarch;
7336
7337 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
7338 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
7339 content would be invalid. */
7340 gdb_byte *siginfo_data;
7341 };
7342
7343 struct infcall_suspend_state *
7344 save_infcall_suspend_state (void)
7345 {
7346 struct infcall_suspend_state *inf_state;
7347 struct thread_info *tp = inferior_thread ();
7348 struct regcache *regcache = get_current_regcache ();
7349 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7350 gdb_byte *siginfo_data = NULL;
7351
7352 if (gdbarch_get_siginfo_type_p (gdbarch))
7353 {
7354 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7355 size_t len = TYPE_LENGTH (type);
7356 struct cleanup *back_to;
7357
7358 siginfo_data = xmalloc (len);
7359 back_to = make_cleanup (xfree, siginfo_data);
7360
7361 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7362 siginfo_data, 0, len) == len)
7363 discard_cleanups (back_to);
7364 else
7365 {
7366 /* Errors ignored. */
7367 do_cleanups (back_to);
7368 siginfo_data = NULL;
7369 }
7370 }
7371
7372 inf_state = XCNEW (struct infcall_suspend_state);
7373
7374 if (siginfo_data)
7375 {
7376 inf_state->siginfo_gdbarch = gdbarch;
7377 inf_state->siginfo_data = siginfo_data;
7378 }
7379
7380 inf_state->thread_suspend = tp->suspend;
7381
7382 /* run_inferior_call will not use the signal due to its `proceed' call with
7383 GDB_SIGNAL_0 anyway. */
7384 tp->suspend.stop_signal = GDB_SIGNAL_0;
7385
7386 inf_state->stop_pc = stop_pc;
7387
7388 inf_state->registers = regcache_dup (regcache);
7389
7390 return inf_state;
7391 }
7392
7393 /* Restore inferior session state to INF_STATE. */
7394
7395 void
7396 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7397 {
7398 struct thread_info *tp = inferior_thread ();
7399 struct regcache *regcache = get_current_regcache ();
7400 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7401
7402 tp->suspend = inf_state->thread_suspend;
7403
7404 stop_pc = inf_state->stop_pc;
7405
7406 if (inf_state->siginfo_gdbarch == gdbarch)
7407 {
7408 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7409
7410 /* Errors ignored. */
7411 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7412 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
7413 }
7414
7415 /* The inferior can be gone if the user types "print exit(0)"
7416 (and perhaps other times). */
7417 if (target_has_execution)
7418 /* NB: The register write goes through to the target. */
7419 regcache_cpy (regcache, inf_state->registers);
7420
7421 discard_infcall_suspend_state (inf_state);
7422 }
7423
7424 static void
7425 do_restore_infcall_suspend_state_cleanup (void *state)
7426 {
7427 restore_infcall_suspend_state (state);
7428 }
7429
7430 struct cleanup *
7431 make_cleanup_restore_infcall_suspend_state
7432 (struct infcall_suspend_state *inf_state)
7433 {
7434 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
7435 }
7436
7437 void
7438 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7439 {
7440 regcache_xfree (inf_state->registers);
7441 xfree (inf_state->siginfo_data);
7442 xfree (inf_state);
7443 }
7444
7445 struct regcache *
7446 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
7447 {
7448 return inf_state->registers;
7449 }
7450
7451 /* infcall_control_state contains state regarding gdb's control of the
7452 inferior itself like stepping control. It also contains session state like
7453 the user's currently selected frame. */
7454
7455 struct infcall_control_state
7456 {
7457 struct thread_control_state thread_control;
7458 struct inferior_control_state inferior_control;
7459
7460 /* Other fields: */
7461 enum stop_stack_kind stop_stack_dummy;
7462 int stopped_by_random_signal;
7463 int stop_after_trap;
7464
7465 /* ID if the selected frame when the inferior function call was made. */
7466 struct frame_id selected_frame_id;
7467 };
7468
7469 /* Save all of the information associated with the inferior<==>gdb
7470 connection. */
7471
7472 struct infcall_control_state *
7473 save_infcall_control_state (void)
7474 {
7475 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
7476 struct thread_info *tp = inferior_thread ();
7477 struct inferior *inf = current_inferior ();
7478
7479 inf_status->thread_control = tp->control;
7480 inf_status->inferior_control = inf->control;
7481
7482 tp->control.step_resume_breakpoint = NULL;
7483 tp->control.exception_resume_breakpoint = NULL;
7484
7485 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
7486 chain. If caller's caller is walking the chain, they'll be happier if we
7487 hand them back the original chain when restore_infcall_control_state is
7488 called. */
7489 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
7490
7491 /* Other fields: */
7492 inf_status->stop_stack_dummy = stop_stack_dummy;
7493 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7494 inf_status->stop_after_trap = stop_after_trap;
7495
7496 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7497
7498 return inf_status;
7499 }
7500
7501 static int
7502 restore_selected_frame (void *args)
7503 {
7504 struct frame_id *fid = (struct frame_id *) args;
7505 struct frame_info *frame;
7506
7507 frame = frame_find_by_id (*fid);
7508
7509 /* If inf_status->selected_frame_id is NULL, there was no previously
7510 selected frame. */
7511 if (frame == NULL)
7512 {
7513 warning (_("Unable to restore previously selected frame."));
7514 return 0;
7515 }
7516
7517 select_frame (frame);
7518
7519 return (1);
7520 }
7521
7522 /* Restore inferior session state to INF_STATUS. */
7523
7524 void
7525 restore_infcall_control_state (struct infcall_control_state *inf_status)
7526 {
7527 struct thread_info *tp = inferior_thread ();
7528 struct inferior *inf = current_inferior ();
7529
7530 if (tp->control.step_resume_breakpoint)
7531 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7532
7533 if (tp->control.exception_resume_breakpoint)
7534 tp->control.exception_resume_breakpoint->disposition
7535 = disp_del_at_next_stop;
7536
7537 /* Handle the bpstat_copy of the chain. */
7538 bpstat_clear (&tp->control.stop_bpstat);
7539
7540 tp->control = inf_status->thread_control;
7541 inf->control = inf_status->inferior_control;
7542
7543 /* Other fields: */
7544 stop_stack_dummy = inf_status->stop_stack_dummy;
7545 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7546 stop_after_trap = inf_status->stop_after_trap;
7547
7548 if (target_has_stack)
7549 {
7550 /* The point of catch_errors is that if the stack is clobbered,
7551 walking the stack might encounter a garbage pointer and
7552 error() trying to dereference it. */
7553 if (catch_errors
7554 (restore_selected_frame, &inf_status->selected_frame_id,
7555 "Unable to restore previously selected frame:\n",
7556 RETURN_MASK_ERROR) == 0)
7557 /* Error in restoring the selected frame. Select the innermost
7558 frame. */
7559 select_frame (get_current_frame ());
7560 }
7561
7562 xfree (inf_status);
7563 }
7564
7565 static void
7566 do_restore_infcall_control_state_cleanup (void *sts)
7567 {
7568 restore_infcall_control_state (sts);
7569 }
7570
7571 struct cleanup *
7572 make_cleanup_restore_infcall_control_state
7573 (struct infcall_control_state *inf_status)
7574 {
7575 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7576 }
7577
7578 void
7579 discard_infcall_control_state (struct infcall_control_state *inf_status)
7580 {
7581 if (inf_status->thread_control.step_resume_breakpoint)
7582 inf_status->thread_control.step_resume_breakpoint->disposition
7583 = disp_del_at_next_stop;
7584
7585 if (inf_status->thread_control.exception_resume_breakpoint)
7586 inf_status->thread_control.exception_resume_breakpoint->disposition
7587 = disp_del_at_next_stop;
7588
7589 /* See save_infcall_control_state for info on stop_bpstat. */
7590 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7591
7592 xfree (inf_status);
7593 }
7594 \f
7595 /* restore_inferior_ptid() will be used by the cleanup machinery
7596 to restore the inferior_ptid value saved in a call to
7597 save_inferior_ptid(). */
7598
7599 static void
7600 restore_inferior_ptid (void *arg)
7601 {
7602 ptid_t *saved_ptid_ptr = arg;
7603
7604 inferior_ptid = *saved_ptid_ptr;
7605 xfree (arg);
7606 }
7607
7608 /* Save the value of inferior_ptid so that it may be restored by a
7609 later call to do_cleanups(). Returns the struct cleanup pointer
7610 needed for later doing the cleanup. */
7611
7612 struct cleanup *
7613 save_inferior_ptid (void)
7614 {
7615 ptid_t *saved_ptid_ptr;
7616
7617 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7618 *saved_ptid_ptr = inferior_ptid;
7619 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7620 }
7621
7622 /* See infrun.h. */
7623
7624 void
7625 clear_exit_convenience_vars (void)
7626 {
7627 clear_internalvar (lookup_internalvar ("_exitsignal"));
7628 clear_internalvar (lookup_internalvar ("_exitcode"));
7629 }
7630 \f
7631
7632 /* User interface for reverse debugging:
7633 Set exec-direction / show exec-direction commands
7634 (returns error unless target implements to_set_exec_direction method). */
7635
7636 int execution_direction = EXEC_FORWARD;
7637 static const char exec_forward[] = "forward";
7638 static const char exec_reverse[] = "reverse";
7639 static const char *exec_direction = exec_forward;
7640 static const char *const exec_direction_names[] = {
7641 exec_forward,
7642 exec_reverse,
7643 NULL
7644 };
7645
7646 static void
7647 set_exec_direction_func (char *args, int from_tty,
7648 struct cmd_list_element *cmd)
7649 {
7650 if (target_can_execute_reverse)
7651 {
7652 if (!strcmp (exec_direction, exec_forward))
7653 execution_direction = EXEC_FORWARD;
7654 else if (!strcmp (exec_direction, exec_reverse))
7655 execution_direction = EXEC_REVERSE;
7656 }
7657 else
7658 {
7659 exec_direction = exec_forward;
7660 error (_("Target does not support this operation."));
7661 }
7662 }
7663
7664 static void
7665 show_exec_direction_func (struct ui_file *out, int from_tty,
7666 struct cmd_list_element *cmd, const char *value)
7667 {
7668 switch (execution_direction) {
7669 case EXEC_FORWARD:
7670 fprintf_filtered (out, _("Forward.\n"));
7671 break;
7672 case EXEC_REVERSE:
7673 fprintf_filtered (out, _("Reverse.\n"));
7674 break;
7675 default:
7676 internal_error (__FILE__, __LINE__,
7677 _("bogus execution_direction value: %d"),
7678 (int) execution_direction);
7679 }
7680 }
7681
7682 static void
7683 show_schedule_multiple (struct ui_file *file, int from_tty,
7684 struct cmd_list_element *c, const char *value)
7685 {
7686 fprintf_filtered (file, _("Resuming the execution of threads "
7687 "of all processes is %s.\n"), value);
7688 }
7689
7690 /* Implementation of `siginfo' variable. */
7691
7692 static const struct internalvar_funcs siginfo_funcs =
7693 {
7694 siginfo_make_value,
7695 NULL,
7696 NULL
7697 };
7698
7699 void
7700 _initialize_infrun (void)
7701 {
7702 int i;
7703 int numsigs;
7704 struct cmd_list_element *c;
7705
7706 add_info ("signals", signals_info, _("\
7707 What debugger does when program gets various signals.\n\
7708 Specify a signal as argument to print info on that signal only."));
7709 add_info_alias ("handle", "signals", 0);
7710
7711 c = add_com ("handle", class_run, handle_command, _("\
7712 Specify how to handle signals.\n\
7713 Usage: handle SIGNAL [ACTIONS]\n\
7714 Args are signals and actions to apply to those signals.\n\
7715 If no actions are specified, the current settings for the specified signals\n\
7716 will be displayed instead.\n\
7717 \n\
7718 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7719 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7720 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7721 The special arg \"all\" is recognized to mean all signals except those\n\
7722 used by the debugger, typically SIGTRAP and SIGINT.\n\
7723 \n\
7724 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7725 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7726 Stop means reenter debugger if this signal happens (implies print).\n\
7727 Print means print a message if this signal happens.\n\
7728 Pass means let program see this signal; otherwise program doesn't know.\n\
7729 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7730 Pass and Stop may be combined.\n\
7731 \n\
7732 Multiple signals may be specified. Signal numbers and signal names\n\
7733 may be interspersed with actions, with the actions being performed for\n\
7734 all signals cumulatively specified."));
7735 set_cmd_completer (c, handle_completer);
7736
7737 if (!dbx_commands)
7738 stop_command = add_cmd ("stop", class_obscure,
7739 not_just_help_class_command, _("\
7740 There is no `stop' command, but you can set a hook on `stop'.\n\
7741 This allows you to set a list of commands to be run each time execution\n\
7742 of the program stops."), &cmdlist);
7743
7744 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7745 Set inferior debugging."), _("\
7746 Show inferior debugging."), _("\
7747 When non-zero, inferior specific debugging is enabled."),
7748 NULL,
7749 show_debug_infrun,
7750 &setdebuglist, &showdebuglist);
7751
7752 add_setshow_boolean_cmd ("displaced", class_maintenance,
7753 &debug_displaced, _("\
7754 Set displaced stepping debugging."), _("\
7755 Show displaced stepping debugging."), _("\
7756 When non-zero, displaced stepping specific debugging is enabled."),
7757 NULL,
7758 show_debug_displaced,
7759 &setdebuglist, &showdebuglist);
7760
7761 add_setshow_boolean_cmd ("non-stop", no_class,
7762 &non_stop_1, _("\
7763 Set whether gdb controls the inferior in non-stop mode."), _("\
7764 Show whether gdb controls the inferior in non-stop mode."), _("\
7765 When debugging a multi-threaded program and this setting is\n\
7766 off (the default, also called all-stop mode), when one thread stops\n\
7767 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7768 all other threads in the program while you interact with the thread of\n\
7769 interest. When you continue or step a thread, you can allow the other\n\
7770 threads to run, or have them remain stopped, but while you inspect any\n\
7771 thread's state, all threads stop.\n\
7772 \n\
7773 In non-stop mode, when one thread stops, other threads can continue\n\
7774 to run freely. You'll be able to step each thread independently,\n\
7775 leave it stopped or free to run as needed."),
7776 set_non_stop,
7777 show_non_stop,
7778 &setlist,
7779 &showlist);
7780
7781 numsigs = (int) GDB_SIGNAL_LAST;
7782 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7783 signal_print = (unsigned char *)
7784 xmalloc (sizeof (signal_print[0]) * numsigs);
7785 signal_program = (unsigned char *)
7786 xmalloc (sizeof (signal_program[0]) * numsigs);
7787 signal_catch = (unsigned char *)
7788 xmalloc (sizeof (signal_catch[0]) * numsigs);
7789 signal_pass = (unsigned char *)
7790 xmalloc (sizeof (signal_pass[0]) * numsigs);
7791 for (i = 0; i < numsigs; i++)
7792 {
7793 signal_stop[i] = 1;
7794 signal_print[i] = 1;
7795 signal_program[i] = 1;
7796 signal_catch[i] = 0;
7797 }
7798
7799 /* Signals caused by debugger's own actions should not be given to
7800 the program afterwards.
7801
7802 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
7803 explicitly specifies that it should be delivered to the target
7804 program. Typically, that would occur when a user is debugging a
7805 target monitor on a simulator: the target monitor sets a
7806 breakpoint; the simulator encounters this breakpoint and halts
7807 the simulation handing control to GDB; GDB, noting that the stop
7808 address doesn't map to any known breakpoint, returns control back
7809 to the simulator; the simulator then delivers the hardware
7810 equivalent of a GDB_SIGNAL_TRAP to the program being
7811 debugged. */
7812 signal_program[GDB_SIGNAL_TRAP] = 0;
7813 signal_program[GDB_SIGNAL_INT] = 0;
7814
7815 /* Signals that are not errors should not normally enter the debugger. */
7816 signal_stop[GDB_SIGNAL_ALRM] = 0;
7817 signal_print[GDB_SIGNAL_ALRM] = 0;
7818 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7819 signal_print[GDB_SIGNAL_VTALRM] = 0;
7820 signal_stop[GDB_SIGNAL_PROF] = 0;
7821 signal_print[GDB_SIGNAL_PROF] = 0;
7822 signal_stop[GDB_SIGNAL_CHLD] = 0;
7823 signal_print[GDB_SIGNAL_CHLD] = 0;
7824 signal_stop[GDB_SIGNAL_IO] = 0;
7825 signal_print[GDB_SIGNAL_IO] = 0;
7826 signal_stop[GDB_SIGNAL_POLL] = 0;
7827 signal_print[GDB_SIGNAL_POLL] = 0;
7828 signal_stop[GDB_SIGNAL_URG] = 0;
7829 signal_print[GDB_SIGNAL_URG] = 0;
7830 signal_stop[GDB_SIGNAL_WINCH] = 0;
7831 signal_print[GDB_SIGNAL_WINCH] = 0;
7832 signal_stop[GDB_SIGNAL_PRIO] = 0;
7833 signal_print[GDB_SIGNAL_PRIO] = 0;
7834
7835 /* These signals are used internally by user-level thread
7836 implementations. (See signal(5) on Solaris.) Like the above
7837 signals, a healthy program receives and handles them as part of
7838 its normal operation. */
7839 signal_stop[GDB_SIGNAL_LWP] = 0;
7840 signal_print[GDB_SIGNAL_LWP] = 0;
7841 signal_stop[GDB_SIGNAL_WAITING] = 0;
7842 signal_print[GDB_SIGNAL_WAITING] = 0;
7843 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7844 signal_print[GDB_SIGNAL_CANCEL] = 0;
7845
7846 /* Update cached state. */
7847 signal_cache_update (-1);
7848
7849 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7850 &stop_on_solib_events, _("\
7851 Set stopping for shared library events."), _("\
7852 Show stopping for shared library events."), _("\
7853 If nonzero, gdb will give control to the user when the dynamic linker\n\
7854 notifies gdb of shared library events. The most common event of interest\n\
7855 to the user would be loading/unloading of a new library."),
7856 set_stop_on_solib_events,
7857 show_stop_on_solib_events,
7858 &setlist, &showlist);
7859
7860 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7861 follow_fork_mode_kind_names,
7862 &follow_fork_mode_string, _("\
7863 Set debugger response to a program call of fork or vfork."), _("\
7864 Show debugger response to a program call of fork or vfork."), _("\
7865 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7866 parent - the original process is debugged after a fork\n\
7867 child - the new process is debugged after a fork\n\
7868 The unfollowed process will continue to run.\n\
7869 By default, the debugger will follow the parent process."),
7870 NULL,
7871 show_follow_fork_mode_string,
7872 &setlist, &showlist);
7873
7874 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7875 follow_exec_mode_names,
7876 &follow_exec_mode_string, _("\
7877 Set debugger response to a program call of exec."), _("\
7878 Show debugger response to a program call of exec."), _("\
7879 An exec call replaces the program image of a process.\n\
7880 \n\
7881 follow-exec-mode can be:\n\
7882 \n\
7883 new - the debugger creates a new inferior and rebinds the process\n\
7884 to this new inferior. The program the process was running before\n\
7885 the exec call can be restarted afterwards by restarting the original\n\
7886 inferior.\n\
7887 \n\
7888 same - the debugger keeps the process bound to the same inferior.\n\
7889 The new executable image replaces the previous executable loaded in\n\
7890 the inferior. Restarting the inferior after the exec call restarts\n\
7891 the executable the process was running after the exec call.\n\
7892 \n\
7893 By default, the debugger will use the same inferior."),
7894 NULL,
7895 show_follow_exec_mode_string,
7896 &setlist, &showlist);
7897
7898 add_setshow_enum_cmd ("scheduler-locking", class_run,
7899 scheduler_enums, &scheduler_mode, _("\
7900 Set mode for locking scheduler during execution."), _("\
7901 Show mode for locking scheduler during execution."), _("\
7902 off == no locking (threads may preempt at any time)\n\
7903 on == full locking (no thread except the current thread may run)\n\
7904 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
7905 In this mode, other threads may run during other commands."),
7906 set_schedlock_func, /* traps on target vector */
7907 show_scheduler_mode,
7908 &setlist, &showlist);
7909
7910 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7911 Set mode for resuming threads of all processes."), _("\
7912 Show mode for resuming threads of all processes."), _("\
7913 When on, execution commands (such as 'continue' or 'next') resume all\n\
7914 threads of all processes. When off (which is the default), execution\n\
7915 commands only resume the threads of the current process. The set of\n\
7916 threads that are resumed is further refined by the scheduler-locking\n\
7917 mode (see help set scheduler-locking)."),
7918 NULL,
7919 show_schedule_multiple,
7920 &setlist, &showlist);
7921
7922 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7923 Set mode of the step operation."), _("\
7924 Show mode of the step operation."), _("\
7925 When set, doing a step over a function without debug line information\n\
7926 will stop at the first instruction of that function. Otherwise, the\n\
7927 function is skipped and the step command stops at a different source line."),
7928 NULL,
7929 show_step_stop_if_no_debug,
7930 &setlist, &showlist);
7931
7932 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7933 &can_use_displaced_stepping, _("\
7934 Set debugger's willingness to use displaced stepping."), _("\
7935 Show debugger's willingness to use displaced stepping."), _("\
7936 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7937 supported by the target architecture. If off, gdb will not use displaced\n\
7938 stepping to step over breakpoints, even if such is supported by the target\n\
7939 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7940 if the target architecture supports it and non-stop mode is active, but will not\n\
7941 use it in all-stop mode (see help set non-stop)."),
7942 NULL,
7943 show_can_use_displaced_stepping,
7944 &setlist, &showlist);
7945
7946 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7947 &exec_direction, _("Set direction of execution.\n\
7948 Options are 'forward' or 'reverse'."),
7949 _("Show direction of execution (forward/reverse)."),
7950 _("Tells gdb whether to execute forward or backward."),
7951 set_exec_direction_func, show_exec_direction_func,
7952 &setlist, &showlist);
7953
7954 /* Set/show detach-on-fork: user-settable mode. */
7955
7956 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7957 Set whether gdb will detach the child of a fork."), _("\
7958 Show whether gdb will detach the child of a fork."), _("\
7959 Tells gdb whether to detach the child of a fork."),
7960 NULL, NULL, &setlist, &showlist);
7961
7962 /* Set/show disable address space randomization mode. */
7963
7964 add_setshow_boolean_cmd ("disable-randomization", class_support,
7965 &disable_randomization, _("\
7966 Set disabling of debuggee's virtual address space randomization."), _("\
7967 Show disabling of debuggee's virtual address space randomization."), _("\
7968 When this mode is on (which is the default), randomization of the virtual\n\
7969 address space is disabled. Standalone programs run with the randomization\n\
7970 enabled by default on some platforms."),
7971 &set_disable_randomization,
7972 &show_disable_randomization,
7973 &setlist, &showlist);
7974
7975 /* ptid initializations */
7976 inferior_ptid = null_ptid;
7977 target_last_wait_ptid = minus_one_ptid;
7978
7979 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7980 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7981 observer_attach_thread_exit (infrun_thread_thread_exit);
7982 observer_attach_inferior_exit (infrun_inferior_exit);
7983
7984 /* Explicitly create without lookup, since that tries to create a
7985 value with a void typed value, and when we get here, gdbarch
7986 isn't initialized yet. At this point, we're quite sure there
7987 isn't another convenience variable of the same name. */
7988 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7989
7990 add_setshow_boolean_cmd ("observer", no_class,
7991 &observer_mode_1, _("\
7992 Set whether gdb controls the inferior in observer mode."), _("\
7993 Show whether gdb controls the inferior in observer mode."), _("\
7994 In observer mode, GDB can get data from the inferior, but not\n\
7995 affect its execution. Registers and memory may not be changed,\n\
7996 breakpoints may not be set, and the program cannot be interrupted\n\
7997 or signalled."),
7998 set_observer_mode,
7999 show_observer_mode,
8000 &setlist,
8001 &showlist);
8002 }
This page took 0.210041 seconds and 4 git commands to generate.