Fix for even more missed events; eliminate thread-hop code.
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2014 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include <string.h>
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "exceptions.h"
28 #include "breakpoint.h"
29 #include "gdb_wait.h"
30 #include "gdbcore.h"
31 #include "gdbcmd.h"
32 #include "cli/cli-script.h"
33 #include "target.h"
34 #include "gdbthread.h"
35 #include "annotate.h"
36 #include "symfile.h"
37 #include "top.h"
38 #include <signal.h>
39 #include "inf-loop.h"
40 #include "regcache.h"
41 #include "value.h"
42 #include "observer.h"
43 #include "language.h"
44 #include "solib.h"
45 #include "main.h"
46 #include "dictionary.h"
47 #include "block.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "record-full.h"
53 #include "inline-frame.h"
54 #include "jit.h"
55 #include "tracepoint.h"
56 #include "continuations.h"
57 #include "interps.h"
58 #include "skip.h"
59 #include "probe.h"
60 #include "objfiles.h"
61 #include "completer.h"
62 #include "target-descriptions.h"
63 #include "target-dcache.h"
64
65 /* Prototypes for local functions */
66
67 static void signals_info (char *, int);
68
69 static void handle_command (char *, int);
70
71 static void sig_print_info (enum gdb_signal);
72
73 static void sig_print_header (void);
74
75 static void resume_cleanups (void *);
76
77 static int hook_stop_stub (void *);
78
79 static int restore_selected_frame (void *);
80
81 static int follow_fork (void);
82
83 static void set_schedlock_func (char *args, int from_tty,
84 struct cmd_list_element *c);
85
86 static int currently_stepping (struct thread_info *tp);
87
88 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
89 void *data);
90
91 static void xdb_handle_command (char *args, int from_tty);
92
93 static int prepare_to_proceed (int);
94
95 static void print_exited_reason (int exitstatus);
96
97 static void print_signal_exited_reason (enum gdb_signal siggnal);
98
99 static void print_no_history_reason (void);
100
101 static void print_signal_received_reason (enum gdb_signal siggnal);
102
103 static void print_end_stepping_range_reason (void);
104
105 void _initialize_infrun (void);
106
107 void nullify_last_target_wait_ptid (void);
108
109 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
110
111 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
112
113 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
114
115 /* When set, stop the 'step' command if we enter a function which has
116 no line number information. The normal behavior is that we step
117 over such function. */
118 int step_stop_if_no_debug = 0;
119 static void
120 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
121 struct cmd_list_element *c, const char *value)
122 {
123 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
124 }
125
126 /* In asynchronous mode, but simulating synchronous execution. */
127
128 int sync_execution = 0;
129
130 /* proceed and normal_stop use this to notify the user when the
131 inferior stopped in a different thread than it had been running
132 in. */
133
134 static ptid_t previous_inferior_ptid;
135
136 /* If set (default for legacy reasons), when following a fork, GDB
137 will detach from one of the fork branches, child or parent.
138 Exactly which branch is detached depends on 'set follow-fork-mode'
139 setting. */
140
141 static int detach_fork = 1;
142
143 int debug_displaced = 0;
144 static void
145 show_debug_displaced (struct ui_file *file, int from_tty,
146 struct cmd_list_element *c, const char *value)
147 {
148 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
149 }
150
151 unsigned int debug_infrun = 0;
152 static void
153 show_debug_infrun (struct ui_file *file, int from_tty,
154 struct cmd_list_element *c, const char *value)
155 {
156 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
157 }
158
159
160 /* Support for disabling address space randomization. */
161
162 int disable_randomization = 1;
163
164 static void
165 show_disable_randomization (struct ui_file *file, int from_tty,
166 struct cmd_list_element *c, const char *value)
167 {
168 if (target_supports_disable_randomization ())
169 fprintf_filtered (file,
170 _("Disabling randomization of debuggee's "
171 "virtual address space is %s.\n"),
172 value);
173 else
174 fputs_filtered (_("Disabling randomization of debuggee's "
175 "virtual address space is unsupported on\n"
176 "this platform.\n"), file);
177 }
178
179 static void
180 set_disable_randomization (char *args, int from_tty,
181 struct cmd_list_element *c)
182 {
183 if (!target_supports_disable_randomization ())
184 error (_("Disabling randomization of debuggee's "
185 "virtual address space is unsupported on\n"
186 "this platform."));
187 }
188
189 /* User interface for non-stop mode. */
190
191 int non_stop = 0;
192 static int non_stop_1 = 0;
193
194 static void
195 set_non_stop (char *args, int from_tty,
196 struct cmd_list_element *c)
197 {
198 if (target_has_execution)
199 {
200 non_stop_1 = non_stop;
201 error (_("Cannot change this setting while the inferior is running."));
202 }
203
204 non_stop = non_stop_1;
205 }
206
207 static void
208 show_non_stop (struct ui_file *file, int from_tty,
209 struct cmd_list_element *c, const char *value)
210 {
211 fprintf_filtered (file,
212 _("Controlling the inferior in non-stop mode is %s.\n"),
213 value);
214 }
215
216 /* "Observer mode" is somewhat like a more extreme version of
217 non-stop, in which all GDB operations that might affect the
218 target's execution have been disabled. */
219
220 int observer_mode = 0;
221 static int observer_mode_1 = 0;
222
223 static void
224 set_observer_mode (char *args, int from_tty,
225 struct cmd_list_element *c)
226 {
227 if (target_has_execution)
228 {
229 observer_mode_1 = observer_mode;
230 error (_("Cannot change this setting while the inferior is running."));
231 }
232
233 observer_mode = observer_mode_1;
234
235 may_write_registers = !observer_mode;
236 may_write_memory = !observer_mode;
237 may_insert_breakpoints = !observer_mode;
238 may_insert_tracepoints = !observer_mode;
239 /* We can insert fast tracepoints in or out of observer mode,
240 but enable them if we're going into this mode. */
241 if (observer_mode)
242 may_insert_fast_tracepoints = 1;
243 may_stop = !observer_mode;
244 update_target_permissions ();
245
246 /* Going *into* observer mode we must force non-stop, then
247 going out we leave it that way. */
248 if (observer_mode)
249 {
250 target_async_permitted = 1;
251 pagination_enabled = 0;
252 non_stop = non_stop_1 = 1;
253 }
254
255 if (from_tty)
256 printf_filtered (_("Observer mode is now %s.\n"),
257 (observer_mode ? "on" : "off"));
258 }
259
260 static void
261 show_observer_mode (struct ui_file *file, int from_tty,
262 struct cmd_list_element *c, const char *value)
263 {
264 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
265 }
266
267 /* This updates the value of observer mode based on changes in
268 permissions. Note that we are deliberately ignoring the values of
269 may-write-registers and may-write-memory, since the user may have
270 reason to enable these during a session, for instance to turn on a
271 debugging-related global. */
272
273 void
274 update_observer_mode (void)
275 {
276 int newval;
277
278 newval = (!may_insert_breakpoints
279 && !may_insert_tracepoints
280 && may_insert_fast_tracepoints
281 && !may_stop
282 && non_stop);
283
284 /* Let the user know if things change. */
285 if (newval != observer_mode)
286 printf_filtered (_("Observer mode is now %s.\n"),
287 (newval ? "on" : "off"));
288
289 observer_mode = observer_mode_1 = newval;
290 }
291
292 /* Tables of how to react to signals; the user sets them. */
293
294 static unsigned char *signal_stop;
295 static unsigned char *signal_print;
296 static unsigned char *signal_program;
297
298 /* Table of signals that are registered with "catch signal". A
299 non-zero entry indicates that the signal is caught by some "catch
300 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
301 signals. */
302 static unsigned char *signal_catch;
303
304 /* Table of signals that the target may silently handle.
305 This is automatically determined from the flags above,
306 and simply cached here. */
307 static unsigned char *signal_pass;
308
309 #define SET_SIGS(nsigs,sigs,flags) \
310 do { \
311 int signum = (nsigs); \
312 while (signum-- > 0) \
313 if ((sigs)[signum]) \
314 (flags)[signum] = 1; \
315 } while (0)
316
317 #define UNSET_SIGS(nsigs,sigs,flags) \
318 do { \
319 int signum = (nsigs); \
320 while (signum-- > 0) \
321 if ((sigs)[signum]) \
322 (flags)[signum] = 0; \
323 } while (0)
324
325 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
326 this function is to avoid exporting `signal_program'. */
327
328 void
329 update_signals_program_target (void)
330 {
331 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
332 }
333
334 /* Value to pass to target_resume() to cause all threads to resume. */
335
336 #define RESUME_ALL minus_one_ptid
337
338 /* Command list pointer for the "stop" placeholder. */
339
340 static struct cmd_list_element *stop_command;
341
342 /* Function inferior was in as of last step command. */
343
344 static struct symbol *step_start_function;
345
346 /* Nonzero if we want to give control to the user when we're notified
347 of shared library events by the dynamic linker. */
348 int stop_on_solib_events;
349
350 /* Enable or disable optional shared library event breakpoints
351 as appropriate when the above flag is changed. */
352
353 static void
354 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
355 {
356 update_solib_breakpoints ();
357 }
358
359 static void
360 show_stop_on_solib_events (struct ui_file *file, int from_tty,
361 struct cmd_list_element *c, const char *value)
362 {
363 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
364 value);
365 }
366
367 /* Nonzero means expecting a trace trap
368 and should stop the inferior and return silently when it happens. */
369
370 int stop_after_trap;
371
372 /* Save register contents here when executing a "finish" command or are
373 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
374 Thus this contains the return value from the called function (assuming
375 values are returned in a register). */
376
377 struct regcache *stop_registers;
378
379 /* Nonzero after stop if current stack frame should be printed. */
380
381 static int stop_print_frame;
382
383 /* This is a cached copy of the pid/waitstatus of the last event
384 returned by target_wait()/deprecated_target_wait_hook(). This
385 information is returned by get_last_target_status(). */
386 static ptid_t target_last_wait_ptid;
387 static struct target_waitstatus target_last_waitstatus;
388
389 static void context_switch (ptid_t ptid);
390
391 void init_thread_stepping_state (struct thread_info *tss);
392
393 static void init_infwait_state (void);
394
395 static const char follow_fork_mode_child[] = "child";
396 static const char follow_fork_mode_parent[] = "parent";
397
398 static const char *const follow_fork_mode_kind_names[] = {
399 follow_fork_mode_child,
400 follow_fork_mode_parent,
401 NULL
402 };
403
404 static const char *follow_fork_mode_string = follow_fork_mode_parent;
405 static void
406 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
407 struct cmd_list_element *c, const char *value)
408 {
409 fprintf_filtered (file,
410 _("Debugger response to a program "
411 "call of fork or vfork is \"%s\".\n"),
412 value);
413 }
414 \f
415
416 /* Tell the target to follow the fork we're stopped at. Returns true
417 if the inferior should be resumed; false, if the target for some
418 reason decided it's best not to resume. */
419
420 static int
421 follow_fork (void)
422 {
423 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
424 int should_resume = 1;
425 struct thread_info *tp;
426
427 /* Copy user stepping state to the new inferior thread. FIXME: the
428 followed fork child thread should have a copy of most of the
429 parent thread structure's run control related fields, not just these.
430 Initialized to avoid "may be used uninitialized" warnings from gcc. */
431 struct breakpoint *step_resume_breakpoint = NULL;
432 struct breakpoint *exception_resume_breakpoint = NULL;
433 CORE_ADDR step_range_start = 0;
434 CORE_ADDR step_range_end = 0;
435 struct frame_id step_frame_id = { 0 };
436
437 if (!non_stop)
438 {
439 ptid_t wait_ptid;
440 struct target_waitstatus wait_status;
441
442 /* Get the last target status returned by target_wait(). */
443 get_last_target_status (&wait_ptid, &wait_status);
444
445 /* If not stopped at a fork event, then there's nothing else to
446 do. */
447 if (wait_status.kind != TARGET_WAITKIND_FORKED
448 && wait_status.kind != TARGET_WAITKIND_VFORKED)
449 return 1;
450
451 /* Check if we switched over from WAIT_PTID, since the event was
452 reported. */
453 if (!ptid_equal (wait_ptid, minus_one_ptid)
454 && !ptid_equal (inferior_ptid, wait_ptid))
455 {
456 /* We did. Switch back to WAIT_PTID thread, to tell the
457 target to follow it (in either direction). We'll
458 afterwards refuse to resume, and inform the user what
459 happened. */
460 switch_to_thread (wait_ptid);
461 should_resume = 0;
462 }
463 }
464
465 tp = inferior_thread ();
466
467 /* If there were any forks/vforks that were caught and are now to be
468 followed, then do so now. */
469 switch (tp->pending_follow.kind)
470 {
471 case TARGET_WAITKIND_FORKED:
472 case TARGET_WAITKIND_VFORKED:
473 {
474 ptid_t parent, child;
475
476 /* If the user did a next/step, etc, over a fork call,
477 preserve the stepping state in the fork child. */
478 if (follow_child && should_resume)
479 {
480 step_resume_breakpoint = clone_momentary_breakpoint
481 (tp->control.step_resume_breakpoint);
482 step_range_start = tp->control.step_range_start;
483 step_range_end = tp->control.step_range_end;
484 step_frame_id = tp->control.step_frame_id;
485 exception_resume_breakpoint
486 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
487
488 /* For now, delete the parent's sr breakpoint, otherwise,
489 parent/child sr breakpoints are considered duplicates,
490 and the child version will not be installed. Remove
491 this when the breakpoints module becomes aware of
492 inferiors and address spaces. */
493 delete_step_resume_breakpoint (tp);
494 tp->control.step_range_start = 0;
495 tp->control.step_range_end = 0;
496 tp->control.step_frame_id = null_frame_id;
497 delete_exception_resume_breakpoint (tp);
498 }
499
500 parent = inferior_ptid;
501 child = tp->pending_follow.value.related_pid;
502
503 /* Tell the target to do whatever is necessary to follow
504 either parent or child. */
505 if (target_follow_fork (follow_child, detach_fork))
506 {
507 /* Target refused to follow, or there's some other reason
508 we shouldn't resume. */
509 should_resume = 0;
510 }
511 else
512 {
513 /* This pending follow fork event is now handled, one way
514 or another. The previous selected thread may be gone
515 from the lists by now, but if it is still around, need
516 to clear the pending follow request. */
517 tp = find_thread_ptid (parent);
518 if (tp)
519 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
520
521 /* This makes sure we don't try to apply the "Switched
522 over from WAIT_PID" logic above. */
523 nullify_last_target_wait_ptid ();
524
525 /* If we followed the child, switch to it... */
526 if (follow_child)
527 {
528 switch_to_thread (child);
529
530 /* ... and preserve the stepping state, in case the
531 user was stepping over the fork call. */
532 if (should_resume)
533 {
534 tp = inferior_thread ();
535 tp->control.step_resume_breakpoint
536 = step_resume_breakpoint;
537 tp->control.step_range_start = step_range_start;
538 tp->control.step_range_end = step_range_end;
539 tp->control.step_frame_id = step_frame_id;
540 tp->control.exception_resume_breakpoint
541 = exception_resume_breakpoint;
542 }
543 else
544 {
545 /* If we get here, it was because we're trying to
546 resume from a fork catchpoint, but, the user
547 has switched threads away from the thread that
548 forked. In that case, the resume command
549 issued is most likely not applicable to the
550 child, so just warn, and refuse to resume. */
551 warning (_("Not resuming: switched threads "
552 "before following fork child.\n"));
553 }
554
555 /* Reset breakpoints in the child as appropriate. */
556 follow_inferior_reset_breakpoints ();
557 }
558 else
559 switch_to_thread (parent);
560 }
561 }
562 break;
563 case TARGET_WAITKIND_SPURIOUS:
564 /* Nothing to follow. */
565 break;
566 default:
567 internal_error (__FILE__, __LINE__,
568 "Unexpected pending_follow.kind %d\n",
569 tp->pending_follow.kind);
570 break;
571 }
572
573 return should_resume;
574 }
575
576 void
577 follow_inferior_reset_breakpoints (void)
578 {
579 struct thread_info *tp = inferior_thread ();
580
581 /* Was there a step_resume breakpoint? (There was if the user
582 did a "next" at the fork() call.) If so, explicitly reset its
583 thread number.
584
585 step_resumes are a form of bp that are made to be per-thread.
586 Since we created the step_resume bp when the parent process
587 was being debugged, and now are switching to the child process,
588 from the breakpoint package's viewpoint, that's a switch of
589 "threads". We must update the bp's notion of which thread
590 it is for, or it'll be ignored when it triggers. */
591
592 if (tp->control.step_resume_breakpoint)
593 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
594
595 if (tp->control.exception_resume_breakpoint)
596 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
597
598 /* Reinsert all breakpoints in the child. The user may have set
599 breakpoints after catching the fork, in which case those
600 were never set in the child, but only in the parent. This makes
601 sure the inserted breakpoints match the breakpoint list. */
602
603 breakpoint_re_set ();
604 insert_breakpoints ();
605 }
606
607 /* The child has exited or execed: resume threads of the parent the
608 user wanted to be executing. */
609
610 static int
611 proceed_after_vfork_done (struct thread_info *thread,
612 void *arg)
613 {
614 int pid = * (int *) arg;
615
616 if (ptid_get_pid (thread->ptid) == pid
617 && is_running (thread->ptid)
618 && !is_executing (thread->ptid)
619 && !thread->stop_requested
620 && thread->suspend.stop_signal == GDB_SIGNAL_0)
621 {
622 if (debug_infrun)
623 fprintf_unfiltered (gdb_stdlog,
624 "infrun: resuming vfork parent thread %s\n",
625 target_pid_to_str (thread->ptid));
626
627 switch_to_thread (thread->ptid);
628 clear_proceed_status ();
629 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
630 }
631
632 return 0;
633 }
634
635 /* Called whenever we notice an exec or exit event, to handle
636 detaching or resuming a vfork parent. */
637
638 static void
639 handle_vfork_child_exec_or_exit (int exec)
640 {
641 struct inferior *inf = current_inferior ();
642
643 if (inf->vfork_parent)
644 {
645 int resume_parent = -1;
646
647 /* This exec or exit marks the end of the shared memory region
648 between the parent and the child. If the user wanted to
649 detach from the parent, now is the time. */
650
651 if (inf->vfork_parent->pending_detach)
652 {
653 struct thread_info *tp;
654 struct cleanup *old_chain;
655 struct program_space *pspace;
656 struct address_space *aspace;
657
658 /* follow-fork child, detach-on-fork on. */
659
660 inf->vfork_parent->pending_detach = 0;
661
662 if (!exec)
663 {
664 /* If we're handling a child exit, then inferior_ptid
665 points at the inferior's pid, not to a thread. */
666 old_chain = save_inferior_ptid ();
667 save_current_program_space ();
668 save_current_inferior ();
669 }
670 else
671 old_chain = save_current_space_and_thread ();
672
673 /* We're letting loose of the parent. */
674 tp = any_live_thread_of_process (inf->vfork_parent->pid);
675 switch_to_thread (tp->ptid);
676
677 /* We're about to detach from the parent, which implicitly
678 removes breakpoints from its address space. There's a
679 catch here: we want to reuse the spaces for the child,
680 but, parent/child are still sharing the pspace at this
681 point, although the exec in reality makes the kernel give
682 the child a fresh set of new pages. The problem here is
683 that the breakpoints module being unaware of this, would
684 likely chose the child process to write to the parent
685 address space. Swapping the child temporarily away from
686 the spaces has the desired effect. Yes, this is "sort
687 of" a hack. */
688
689 pspace = inf->pspace;
690 aspace = inf->aspace;
691 inf->aspace = NULL;
692 inf->pspace = NULL;
693
694 if (debug_infrun || info_verbose)
695 {
696 target_terminal_ours ();
697
698 if (exec)
699 fprintf_filtered (gdb_stdlog,
700 "Detaching vfork parent process "
701 "%d after child exec.\n",
702 inf->vfork_parent->pid);
703 else
704 fprintf_filtered (gdb_stdlog,
705 "Detaching vfork parent process "
706 "%d after child exit.\n",
707 inf->vfork_parent->pid);
708 }
709
710 target_detach (NULL, 0);
711
712 /* Put it back. */
713 inf->pspace = pspace;
714 inf->aspace = aspace;
715
716 do_cleanups (old_chain);
717 }
718 else if (exec)
719 {
720 /* We're staying attached to the parent, so, really give the
721 child a new address space. */
722 inf->pspace = add_program_space (maybe_new_address_space ());
723 inf->aspace = inf->pspace->aspace;
724 inf->removable = 1;
725 set_current_program_space (inf->pspace);
726
727 resume_parent = inf->vfork_parent->pid;
728
729 /* Break the bonds. */
730 inf->vfork_parent->vfork_child = NULL;
731 }
732 else
733 {
734 struct cleanup *old_chain;
735 struct program_space *pspace;
736
737 /* If this is a vfork child exiting, then the pspace and
738 aspaces were shared with the parent. Since we're
739 reporting the process exit, we'll be mourning all that is
740 found in the address space, and switching to null_ptid,
741 preparing to start a new inferior. But, since we don't
742 want to clobber the parent's address/program spaces, we
743 go ahead and create a new one for this exiting
744 inferior. */
745
746 /* Switch to null_ptid, so that clone_program_space doesn't want
747 to read the selected frame of a dead process. */
748 old_chain = save_inferior_ptid ();
749 inferior_ptid = null_ptid;
750
751 /* This inferior is dead, so avoid giving the breakpoints
752 module the option to write through to it (cloning a
753 program space resets breakpoints). */
754 inf->aspace = NULL;
755 inf->pspace = NULL;
756 pspace = add_program_space (maybe_new_address_space ());
757 set_current_program_space (pspace);
758 inf->removable = 1;
759 inf->symfile_flags = SYMFILE_NO_READ;
760 clone_program_space (pspace, inf->vfork_parent->pspace);
761 inf->pspace = pspace;
762 inf->aspace = pspace->aspace;
763
764 /* Put back inferior_ptid. We'll continue mourning this
765 inferior. */
766 do_cleanups (old_chain);
767
768 resume_parent = inf->vfork_parent->pid;
769 /* Break the bonds. */
770 inf->vfork_parent->vfork_child = NULL;
771 }
772
773 inf->vfork_parent = NULL;
774
775 gdb_assert (current_program_space == inf->pspace);
776
777 if (non_stop && resume_parent != -1)
778 {
779 /* If the user wanted the parent to be running, let it go
780 free now. */
781 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
782
783 if (debug_infrun)
784 fprintf_unfiltered (gdb_stdlog,
785 "infrun: resuming vfork parent process %d\n",
786 resume_parent);
787
788 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
789
790 do_cleanups (old_chain);
791 }
792 }
793 }
794
795 /* Enum strings for "set|show follow-exec-mode". */
796
797 static const char follow_exec_mode_new[] = "new";
798 static const char follow_exec_mode_same[] = "same";
799 static const char *const follow_exec_mode_names[] =
800 {
801 follow_exec_mode_new,
802 follow_exec_mode_same,
803 NULL,
804 };
805
806 static const char *follow_exec_mode_string = follow_exec_mode_same;
807 static void
808 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
809 struct cmd_list_element *c, const char *value)
810 {
811 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
812 }
813
814 /* EXECD_PATHNAME is assumed to be non-NULL. */
815
816 static void
817 follow_exec (ptid_t pid, char *execd_pathname)
818 {
819 struct thread_info *th = inferior_thread ();
820 struct inferior *inf = current_inferior ();
821
822 /* This is an exec event that we actually wish to pay attention to.
823 Refresh our symbol table to the newly exec'd program, remove any
824 momentary bp's, etc.
825
826 If there are breakpoints, they aren't really inserted now,
827 since the exec() transformed our inferior into a fresh set
828 of instructions.
829
830 We want to preserve symbolic breakpoints on the list, since
831 we have hopes that they can be reset after the new a.out's
832 symbol table is read.
833
834 However, any "raw" breakpoints must be removed from the list
835 (e.g., the solib bp's), since their address is probably invalid
836 now.
837
838 And, we DON'T want to call delete_breakpoints() here, since
839 that may write the bp's "shadow contents" (the instruction
840 value that was overwritten witha TRAP instruction). Since
841 we now have a new a.out, those shadow contents aren't valid. */
842
843 mark_breakpoints_out ();
844
845 update_breakpoints_after_exec ();
846
847 /* If there was one, it's gone now. We cannot truly step-to-next
848 statement through an exec(). */
849 th->control.step_resume_breakpoint = NULL;
850 th->control.exception_resume_breakpoint = NULL;
851 th->control.step_range_start = 0;
852 th->control.step_range_end = 0;
853
854 /* The target reports the exec event to the main thread, even if
855 some other thread does the exec, and even if the main thread was
856 already stopped --- if debugging in non-stop mode, it's possible
857 the user had the main thread held stopped in the previous image
858 --- release it now. This is the same behavior as step-over-exec
859 with scheduler-locking on in all-stop mode. */
860 th->stop_requested = 0;
861
862 /* What is this a.out's name? */
863 printf_unfiltered (_("%s is executing new program: %s\n"),
864 target_pid_to_str (inferior_ptid),
865 execd_pathname);
866
867 /* We've followed the inferior through an exec. Therefore, the
868 inferior has essentially been killed & reborn. */
869
870 gdb_flush (gdb_stdout);
871
872 breakpoint_init_inferior (inf_execd);
873
874 if (gdb_sysroot && *gdb_sysroot)
875 {
876 char *name = alloca (strlen (gdb_sysroot)
877 + strlen (execd_pathname)
878 + 1);
879
880 strcpy (name, gdb_sysroot);
881 strcat (name, execd_pathname);
882 execd_pathname = name;
883 }
884
885 /* Reset the shared library package. This ensures that we get a
886 shlib event when the child reaches "_start", at which point the
887 dld will have had a chance to initialize the child. */
888 /* Also, loading a symbol file below may trigger symbol lookups, and
889 we don't want those to be satisfied by the libraries of the
890 previous incarnation of this process. */
891 no_shared_libraries (NULL, 0);
892
893 if (follow_exec_mode_string == follow_exec_mode_new)
894 {
895 struct program_space *pspace;
896
897 /* The user wants to keep the old inferior and program spaces
898 around. Create a new fresh one, and switch to it. */
899
900 inf = add_inferior (current_inferior ()->pid);
901 pspace = add_program_space (maybe_new_address_space ());
902 inf->pspace = pspace;
903 inf->aspace = pspace->aspace;
904
905 exit_inferior_num_silent (current_inferior ()->num);
906
907 set_current_inferior (inf);
908 set_current_program_space (pspace);
909 }
910 else
911 {
912 /* The old description may no longer be fit for the new image.
913 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
914 old description; we'll read a new one below. No need to do
915 this on "follow-exec-mode new", as the old inferior stays
916 around (its description is later cleared/refetched on
917 restart). */
918 target_clear_description ();
919 }
920
921 gdb_assert (current_program_space == inf->pspace);
922
923 /* That a.out is now the one to use. */
924 exec_file_attach (execd_pathname, 0);
925
926 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
927 (Position Independent Executable) main symbol file will get applied by
928 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
929 the breakpoints with the zero displacement. */
930
931 symbol_file_add (execd_pathname,
932 (inf->symfile_flags
933 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
934 NULL, 0);
935
936 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
937 set_initial_language ();
938
939 /* If the target can specify a description, read it. Must do this
940 after flipping to the new executable (because the target supplied
941 description must be compatible with the executable's
942 architecture, and the old executable may e.g., be 32-bit, while
943 the new one 64-bit), and before anything involving memory or
944 registers. */
945 target_find_description ();
946
947 solib_create_inferior_hook (0);
948
949 jit_inferior_created_hook ();
950
951 breakpoint_re_set ();
952
953 /* Reinsert all breakpoints. (Those which were symbolic have
954 been reset to the proper address in the new a.out, thanks
955 to symbol_file_command...). */
956 insert_breakpoints ();
957
958 /* The next resume of this inferior should bring it to the shlib
959 startup breakpoints. (If the user had also set bp's on
960 "main" from the old (parent) process, then they'll auto-
961 matically get reset there in the new process.). */
962 }
963
964 /* Non-zero if we just simulating a single-step. This is needed
965 because we cannot remove the breakpoints in the inferior process
966 until after the `wait' in `wait_for_inferior'. */
967 static int singlestep_breakpoints_inserted_p = 0;
968
969 /* The thread we inserted single-step breakpoints for. */
970 static ptid_t singlestep_ptid;
971
972 /* PC when we started this single-step. */
973 static CORE_ADDR singlestep_pc;
974
975 /* Info about an instruction that is being stepped over. Invalid if
976 ASPACE is NULL. */
977
978 struct step_over_info
979 {
980 /* The instruction's address space. */
981 struct address_space *aspace;
982
983 /* The instruction's address. */
984 CORE_ADDR address;
985 };
986
987 /* The step-over info of the location that is being stepped over.
988
989 Note that with async/breakpoint always-inserted mode, a user might
990 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
991 being stepped over. As setting a new breakpoint inserts all
992 breakpoints, we need to make sure the breakpoint being stepped over
993 isn't inserted then. We do that by only clearing the step-over
994 info when the step-over is actually finished (or aborted).
995
996 Presently GDB can only step over one breakpoint at any given time.
997 Given threads that can't run code in the same address space as the
998 breakpoint's can't really miss the breakpoint, GDB could be taught
999 to step-over at most one breakpoint per address space (so this info
1000 could move to the address space object if/when GDB is extended).
1001 The set of breakpoints being stepped over will normally be much
1002 smaller than the set of all breakpoints, so a flag in the
1003 breakpoint location structure would be wasteful. A separate list
1004 also saves complexity and run-time, as otherwise we'd have to go
1005 through all breakpoint locations clearing their flag whenever we
1006 start a new sequence. Similar considerations weigh against storing
1007 this info in the thread object. Plus, not all step overs actually
1008 have breakpoint locations -- e.g., stepping past a single-step
1009 breakpoint, or stepping to complete a non-continuable
1010 watchpoint. */
1011 static struct step_over_info step_over_info;
1012
1013 /* Record the address of the breakpoint/instruction we're currently
1014 stepping over. */
1015
1016 static void
1017 set_step_over_info (struct address_space *aspace, CORE_ADDR address)
1018 {
1019 step_over_info.aspace = aspace;
1020 step_over_info.address = address;
1021 }
1022
1023 /* Called when we're not longer stepping over a breakpoint / an
1024 instruction, so all breakpoints are free to be (re)inserted. */
1025
1026 static void
1027 clear_step_over_info (void)
1028 {
1029 step_over_info.aspace = NULL;
1030 step_over_info.address = 0;
1031 }
1032
1033 /* See inferior.h. */
1034
1035 int
1036 stepping_past_instruction_at (struct address_space *aspace,
1037 CORE_ADDR address)
1038 {
1039 return (step_over_info.aspace != NULL
1040 && breakpoint_address_match (aspace, address,
1041 step_over_info.aspace,
1042 step_over_info.address));
1043 }
1044
1045 \f
1046 /* Displaced stepping. */
1047
1048 /* In non-stop debugging mode, we must take special care to manage
1049 breakpoints properly; in particular, the traditional strategy for
1050 stepping a thread past a breakpoint it has hit is unsuitable.
1051 'Displaced stepping' is a tactic for stepping one thread past a
1052 breakpoint it has hit while ensuring that other threads running
1053 concurrently will hit the breakpoint as they should.
1054
1055 The traditional way to step a thread T off a breakpoint in a
1056 multi-threaded program in all-stop mode is as follows:
1057
1058 a0) Initially, all threads are stopped, and breakpoints are not
1059 inserted.
1060 a1) We single-step T, leaving breakpoints uninserted.
1061 a2) We insert breakpoints, and resume all threads.
1062
1063 In non-stop debugging, however, this strategy is unsuitable: we
1064 don't want to have to stop all threads in the system in order to
1065 continue or step T past a breakpoint. Instead, we use displaced
1066 stepping:
1067
1068 n0) Initially, T is stopped, other threads are running, and
1069 breakpoints are inserted.
1070 n1) We copy the instruction "under" the breakpoint to a separate
1071 location, outside the main code stream, making any adjustments
1072 to the instruction, register, and memory state as directed by
1073 T's architecture.
1074 n2) We single-step T over the instruction at its new location.
1075 n3) We adjust the resulting register and memory state as directed
1076 by T's architecture. This includes resetting T's PC to point
1077 back into the main instruction stream.
1078 n4) We resume T.
1079
1080 This approach depends on the following gdbarch methods:
1081
1082 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1083 indicate where to copy the instruction, and how much space must
1084 be reserved there. We use these in step n1.
1085
1086 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1087 address, and makes any necessary adjustments to the instruction,
1088 register contents, and memory. We use this in step n1.
1089
1090 - gdbarch_displaced_step_fixup adjusts registers and memory after
1091 we have successfuly single-stepped the instruction, to yield the
1092 same effect the instruction would have had if we had executed it
1093 at its original address. We use this in step n3.
1094
1095 - gdbarch_displaced_step_free_closure provides cleanup.
1096
1097 The gdbarch_displaced_step_copy_insn and
1098 gdbarch_displaced_step_fixup functions must be written so that
1099 copying an instruction with gdbarch_displaced_step_copy_insn,
1100 single-stepping across the copied instruction, and then applying
1101 gdbarch_displaced_insn_fixup should have the same effects on the
1102 thread's memory and registers as stepping the instruction in place
1103 would have. Exactly which responsibilities fall to the copy and
1104 which fall to the fixup is up to the author of those functions.
1105
1106 See the comments in gdbarch.sh for details.
1107
1108 Note that displaced stepping and software single-step cannot
1109 currently be used in combination, although with some care I think
1110 they could be made to. Software single-step works by placing
1111 breakpoints on all possible subsequent instructions; if the
1112 displaced instruction is a PC-relative jump, those breakpoints
1113 could fall in very strange places --- on pages that aren't
1114 executable, or at addresses that are not proper instruction
1115 boundaries. (We do generally let other threads run while we wait
1116 to hit the software single-step breakpoint, and they might
1117 encounter such a corrupted instruction.) One way to work around
1118 this would be to have gdbarch_displaced_step_copy_insn fully
1119 simulate the effect of PC-relative instructions (and return NULL)
1120 on architectures that use software single-stepping.
1121
1122 In non-stop mode, we can have independent and simultaneous step
1123 requests, so more than one thread may need to simultaneously step
1124 over a breakpoint. The current implementation assumes there is
1125 only one scratch space per process. In this case, we have to
1126 serialize access to the scratch space. If thread A wants to step
1127 over a breakpoint, but we are currently waiting for some other
1128 thread to complete a displaced step, we leave thread A stopped and
1129 place it in the displaced_step_request_queue. Whenever a displaced
1130 step finishes, we pick the next thread in the queue and start a new
1131 displaced step operation on it. See displaced_step_prepare and
1132 displaced_step_fixup for details. */
1133
1134 struct displaced_step_request
1135 {
1136 ptid_t ptid;
1137 struct displaced_step_request *next;
1138 };
1139
1140 /* Per-inferior displaced stepping state. */
1141 struct displaced_step_inferior_state
1142 {
1143 /* Pointer to next in linked list. */
1144 struct displaced_step_inferior_state *next;
1145
1146 /* The process this displaced step state refers to. */
1147 int pid;
1148
1149 /* A queue of pending displaced stepping requests. One entry per
1150 thread that needs to do a displaced step. */
1151 struct displaced_step_request *step_request_queue;
1152
1153 /* If this is not null_ptid, this is the thread carrying out a
1154 displaced single-step in process PID. This thread's state will
1155 require fixing up once it has completed its step. */
1156 ptid_t step_ptid;
1157
1158 /* The architecture the thread had when we stepped it. */
1159 struct gdbarch *step_gdbarch;
1160
1161 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1162 for post-step cleanup. */
1163 struct displaced_step_closure *step_closure;
1164
1165 /* The address of the original instruction, and the copy we
1166 made. */
1167 CORE_ADDR step_original, step_copy;
1168
1169 /* Saved contents of copy area. */
1170 gdb_byte *step_saved_copy;
1171 };
1172
1173 /* The list of states of processes involved in displaced stepping
1174 presently. */
1175 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1176
1177 /* Get the displaced stepping state of process PID. */
1178
1179 static struct displaced_step_inferior_state *
1180 get_displaced_stepping_state (int pid)
1181 {
1182 struct displaced_step_inferior_state *state;
1183
1184 for (state = displaced_step_inferior_states;
1185 state != NULL;
1186 state = state->next)
1187 if (state->pid == pid)
1188 return state;
1189
1190 return NULL;
1191 }
1192
1193 /* Add a new displaced stepping state for process PID to the displaced
1194 stepping state list, or return a pointer to an already existing
1195 entry, if it already exists. Never returns NULL. */
1196
1197 static struct displaced_step_inferior_state *
1198 add_displaced_stepping_state (int pid)
1199 {
1200 struct displaced_step_inferior_state *state;
1201
1202 for (state = displaced_step_inferior_states;
1203 state != NULL;
1204 state = state->next)
1205 if (state->pid == pid)
1206 return state;
1207
1208 state = xcalloc (1, sizeof (*state));
1209 state->pid = pid;
1210 state->next = displaced_step_inferior_states;
1211 displaced_step_inferior_states = state;
1212
1213 return state;
1214 }
1215
1216 /* If inferior is in displaced stepping, and ADDR equals to starting address
1217 of copy area, return corresponding displaced_step_closure. Otherwise,
1218 return NULL. */
1219
1220 struct displaced_step_closure*
1221 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1222 {
1223 struct displaced_step_inferior_state *displaced
1224 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1225
1226 /* If checking the mode of displaced instruction in copy area. */
1227 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1228 && (displaced->step_copy == addr))
1229 return displaced->step_closure;
1230
1231 return NULL;
1232 }
1233
1234 /* Remove the displaced stepping state of process PID. */
1235
1236 static void
1237 remove_displaced_stepping_state (int pid)
1238 {
1239 struct displaced_step_inferior_state *it, **prev_next_p;
1240
1241 gdb_assert (pid != 0);
1242
1243 it = displaced_step_inferior_states;
1244 prev_next_p = &displaced_step_inferior_states;
1245 while (it)
1246 {
1247 if (it->pid == pid)
1248 {
1249 *prev_next_p = it->next;
1250 xfree (it);
1251 return;
1252 }
1253
1254 prev_next_p = &it->next;
1255 it = *prev_next_p;
1256 }
1257 }
1258
1259 static void
1260 infrun_inferior_exit (struct inferior *inf)
1261 {
1262 remove_displaced_stepping_state (inf->pid);
1263 }
1264
1265 /* If ON, and the architecture supports it, GDB will use displaced
1266 stepping to step over breakpoints. If OFF, or if the architecture
1267 doesn't support it, GDB will instead use the traditional
1268 hold-and-step approach. If AUTO (which is the default), GDB will
1269 decide which technique to use to step over breakpoints depending on
1270 which of all-stop or non-stop mode is active --- displaced stepping
1271 in non-stop mode; hold-and-step in all-stop mode. */
1272
1273 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1274
1275 static void
1276 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1277 struct cmd_list_element *c,
1278 const char *value)
1279 {
1280 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1281 fprintf_filtered (file,
1282 _("Debugger's willingness to use displaced stepping "
1283 "to step over breakpoints is %s (currently %s).\n"),
1284 value, non_stop ? "on" : "off");
1285 else
1286 fprintf_filtered (file,
1287 _("Debugger's willingness to use displaced stepping "
1288 "to step over breakpoints is %s.\n"), value);
1289 }
1290
1291 /* Return non-zero if displaced stepping can/should be used to step
1292 over breakpoints. */
1293
1294 static int
1295 use_displaced_stepping (struct gdbarch *gdbarch)
1296 {
1297 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1298 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1299 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1300 && find_record_target () == NULL);
1301 }
1302
1303 /* Clean out any stray displaced stepping state. */
1304 static void
1305 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1306 {
1307 /* Indicate that there is no cleanup pending. */
1308 displaced->step_ptid = null_ptid;
1309
1310 if (displaced->step_closure)
1311 {
1312 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1313 displaced->step_closure);
1314 displaced->step_closure = NULL;
1315 }
1316 }
1317
1318 static void
1319 displaced_step_clear_cleanup (void *arg)
1320 {
1321 struct displaced_step_inferior_state *state = arg;
1322
1323 displaced_step_clear (state);
1324 }
1325
1326 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1327 void
1328 displaced_step_dump_bytes (struct ui_file *file,
1329 const gdb_byte *buf,
1330 size_t len)
1331 {
1332 int i;
1333
1334 for (i = 0; i < len; i++)
1335 fprintf_unfiltered (file, "%02x ", buf[i]);
1336 fputs_unfiltered ("\n", file);
1337 }
1338
1339 /* Prepare to single-step, using displaced stepping.
1340
1341 Note that we cannot use displaced stepping when we have a signal to
1342 deliver. If we have a signal to deliver and an instruction to step
1343 over, then after the step, there will be no indication from the
1344 target whether the thread entered a signal handler or ignored the
1345 signal and stepped over the instruction successfully --- both cases
1346 result in a simple SIGTRAP. In the first case we mustn't do a
1347 fixup, and in the second case we must --- but we can't tell which.
1348 Comments in the code for 'random signals' in handle_inferior_event
1349 explain how we handle this case instead.
1350
1351 Returns 1 if preparing was successful -- this thread is going to be
1352 stepped now; or 0 if displaced stepping this thread got queued. */
1353 static int
1354 displaced_step_prepare (ptid_t ptid)
1355 {
1356 struct cleanup *old_cleanups, *ignore_cleanups;
1357 struct thread_info *tp = find_thread_ptid (ptid);
1358 struct regcache *regcache = get_thread_regcache (ptid);
1359 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1360 CORE_ADDR original, copy;
1361 ULONGEST len;
1362 struct displaced_step_closure *closure;
1363 struct displaced_step_inferior_state *displaced;
1364 int status;
1365
1366 /* We should never reach this function if the architecture does not
1367 support displaced stepping. */
1368 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1369
1370 /* Disable range stepping while executing in the scratch pad. We
1371 want a single-step even if executing the displaced instruction in
1372 the scratch buffer lands within the stepping range (e.g., a
1373 jump/branch). */
1374 tp->control.may_range_step = 0;
1375
1376 /* We have to displaced step one thread at a time, as we only have
1377 access to a single scratch space per inferior. */
1378
1379 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1380
1381 if (!ptid_equal (displaced->step_ptid, null_ptid))
1382 {
1383 /* Already waiting for a displaced step to finish. Defer this
1384 request and place in queue. */
1385 struct displaced_step_request *req, *new_req;
1386
1387 if (debug_displaced)
1388 fprintf_unfiltered (gdb_stdlog,
1389 "displaced: defering step of %s\n",
1390 target_pid_to_str (ptid));
1391
1392 new_req = xmalloc (sizeof (*new_req));
1393 new_req->ptid = ptid;
1394 new_req->next = NULL;
1395
1396 if (displaced->step_request_queue)
1397 {
1398 for (req = displaced->step_request_queue;
1399 req && req->next;
1400 req = req->next)
1401 ;
1402 req->next = new_req;
1403 }
1404 else
1405 displaced->step_request_queue = new_req;
1406
1407 return 0;
1408 }
1409 else
1410 {
1411 if (debug_displaced)
1412 fprintf_unfiltered (gdb_stdlog,
1413 "displaced: stepping %s now\n",
1414 target_pid_to_str (ptid));
1415 }
1416
1417 displaced_step_clear (displaced);
1418
1419 old_cleanups = save_inferior_ptid ();
1420 inferior_ptid = ptid;
1421
1422 original = regcache_read_pc (regcache);
1423
1424 copy = gdbarch_displaced_step_location (gdbarch);
1425 len = gdbarch_max_insn_length (gdbarch);
1426
1427 /* Save the original contents of the copy area. */
1428 displaced->step_saved_copy = xmalloc (len);
1429 ignore_cleanups = make_cleanup (free_current_contents,
1430 &displaced->step_saved_copy);
1431 status = target_read_memory (copy, displaced->step_saved_copy, len);
1432 if (status != 0)
1433 throw_error (MEMORY_ERROR,
1434 _("Error accessing memory address %s (%s) for "
1435 "displaced-stepping scratch space."),
1436 paddress (gdbarch, copy), safe_strerror (status));
1437 if (debug_displaced)
1438 {
1439 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1440 paddress (gdbarch, copy));
1441 displaced_step_dump_bytes (gdb_stdlog,
1442 displaced->step_saved_copy,
1443 len);
1444 };
1445
1446 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1447 original, copy, regcache);
1448
1449 /* We don't support the fully-simulated case at present. */
1450 gdb_assert (closure);
1451
1452 /* Save the information we need to fix things up if the step
1453 succeeds. */
1454 displaced->step_ptid = ptid;
1455 displaced->step_gdbarch = gdbarch;
1456 displaced->step_closure = closure;
1457 displaced->step_original = original;
1458 displaced->step_copy = copy;
1459
1460 make_cleanup (displaced_step_clear_cleanup, displaced);
1461
1462 /* Resume execution at the copy. */
1463 regcache_write_pc (regcache, copy);
1464
1465 discard_cleanups (ignore_cleanups);
1466
1467 do_cleanups (old_cleanups);
1468
1469 if (debug_displaced)
1470 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1471 paddress (gdbarch, copy));
1472
1473 return 1;
1474 }
1475
1476 static void
1477 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1478 const gdb_byte *myaddr, int len)
1479 {
1480 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1481
1482 inferior_ptid = ptid;
1483 write_memory (memaddr, myaddr, len);
1484 do_cleanups (ptid_cleanup);
1485 }
1486
1487 /* Restore the contents of the copy area for thread PTID. */
1488
1489 static void
1490 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1491 ptid_t ptid)
1492 {
1493 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1494
1495 write_memory_ptid (ptid, displaced->step_copy,
1496 displaced->step_saved_copy, len);
1497 if (debug_displaced)
1498 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1499 target_pid_to_str (ptid),
1500 paddress (displaced->step_gdbarch,
1501 displaced->step_copy));
1502 }
1503
1504 static void
1505 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1506 {
1507 struct cleanup *old_cleanups;
1508 struct displaced_step_inferior_state *displaced
1509 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1510
1511 /* Was any thread of this process doing a displaced step? */
1512 if (displaced == NULL)
1513 return;
1514
1515 /* Was this event for the pid we displaced? */
1516 if (ptid_equal (displaced->step_ptid, null_ptid)
1517 || ! ptid_equal (displaced->step_ptid, event_ptid))
1518 return;
1519
1520 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1521
1522 displaced_step_restore (displaced, displaced->step_ptid);
1523
1524 /* Did the instruction complete successfully? */
1525 if (signal == GDB_SIGNAL_TRAP)
1526 {
1527 /* Fix up the resulting state. */
1528 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1529 displaced->step_closure,
1530 displaced->step_original,
1531 displaced->step_copy,
1532 get_thread_regcache (displaced->step_ptid));
1533 }
1534 else
1535 {
1536 /* Since the instruction didn't complete, all we can do is
1537 relocate the PC. */
1538 struct regcache *regcache = get_thread_regcache (event_ptid);
1539 CORE_ADDR pc = regcache_read_pc (regcache);
1540
1541 pc = displaced->step_original + (pc - displaced->step_copy);
1542 regcache_write_pc (regcache, pc);
1543 }
1544
1545 do_cleanups (old_cleanups);
1546
1547 displaced->step_ptid = null_ptid;
1548
1549 /* Are there any pending displaced stepping requests? If so, run
1550 one now. Leave the state object around, since we're likely to
1551 need it again soon. */
1552 while (displaced->step_request_queue)
1553 {
1554 struct displaced_step_request *head;
1555 ptid_t ptid;
1556 struct regcache *regcache;
1557 struct gdbarch *gdbarch;
1558 CORE_ADDR actual_pc;
1559 struct address_space *aspace;
1560
1561 head = displaced->step_request_queue;
1562 ptid = head->ptid;
1563 displaced->step_request_queue = head->next;
1564 xfree (head);
1565
1566 context_switch (ptid);
1567
1568 regcache = get_thread_regcache (ptid);
1569 actual_pc = regcache_read_pc (regcache);
1570 aspace = get_regcache_aspace (regcache);
1571
1572 if (breakpoint_here_p (aspace, actual_pc))
1573 {
1574 if (debug_displaced)
1575 fprintf_unfiltered (gdb_stdlog,
1576 "displaced: stepping queued %s now\n",
1577 target_pid_to_str (ptid));
1578
1579 displaced_step_prepare (ptid);
1580
1581 gdbarch = get_regcache_arch (regcache);
1582
1583 if (debug_displaced)
1584 {
1585 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1586 gdb_byte buf[4];
1587
1588 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1589 paddress (gdbarch, actual_pc));
1590 read_memory (actual_pc, buf, sizeof (buf));
1591 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1592 }
1593
1594 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1595 displaced->step_closure))
1596 target_resume (ptid, 1, GDB_SIGNAL_0);
1597 else
1598 target_resume (ptid, 0, GDB_SIGNAL_0);
1599
1600 /* Done, we're stepping a thread. */
1601 break;
1602 }
1603 else
1604 {
1605 int step;
1606 struct thread_info *tp = inferior_thread ();
1607
1608 /* The breakpoint we were sitting under has since been
1609 removed. */
1610 tp->control.trap_expected = 0;
1611
1612 /* Go back to what we were trying to do. */
1613 step = currently_stepping (tp);
1614
1615 if (debug_displaced)
1616 fprintf_unfiltered (gdb_stdlog,
1617 "displaced: breakpoint is gone: %s, step(%d)\n",
1618 target_pid_to_str (tp->ptid), step);
1619
1620 target_resume (ptid, step, GDB_SIGNAL_0);
1621 tp->suspend.stop_signal = GDB_SIGNAL_0;
1622
1623 /* This request was discarded. See if there's any other
1624 thread waiting for its turn. */
1625 }
1626 }
1627 }
1628
1629 /* Update global variables holding ptids to hold NEW_PTID if they were
1630 holding OLD_PTID. */
1631 static void
1632 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1633 {
1634 struct displaced_step_request *it;
1635 struct displaced_step_inferior_state *displaced;
1636
1637 if (ptid_equal (inferior_ptid, old_ptid))
1638 inferior_ptid = new_ptid;
1639
1640 if (ptid_equal (singlestep_ptid, old_ptid))
1641 singlestep_ptid = new_ptid;
1642
1643 for (displaced = displaced_step_inferior_states;
1644 displaced;
1645 displaced = displaced->next)
1646 {
1647 if (ptid_equal (displaced->step_ptid, old_ptid))
1648 displaced->step_ptid = new_ptid;
1649
1650 for (it = displaced->step_request_queue; it; it = it->next)
1651 if (ptid_equal (it->ptid, old_ptid))
1652 it->ptid = new_ptid;
1653 }
1654 }
1655
1656 \f
1657 /* Resuming. */
1658
1659 /* Things to clean up if we QUIT out of resume (). */
1660 static void
1661 resume_cleanups (void *ignore)
1662 {
1663 normal_stop ();
1664 }
1665
1666 static const char schedlock_off[] = "off";
1667 static const char schedlock_on[] = "on";
1668 static const char schedlock_step[] = "step";
1669 static const char *const scheduler_enums[] = {
1670 schedlock_off,
1671 schedlock_on,
1672 schedlock_step,
1673 NULL
1674 };
1675 static const char *scheduler_mode = schedlock_off;
1676 static void
1677 show_scheduler_mode (struct ui_file *file, int from_tty,
1678 struct cmd_list_element *c, const char *value)
1679 {
1680 fprintf_filtered (file,
1681 _("Mode for locking scheduler "
1682 "during execution is \"%s\".\n"),
1683 value);
1684 }
1685
1686 static void
1687 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1688 {
1689 if (!target_can_lock_scheduler)
1690 {
1691 scheduler_mode = schedlock_off;
1692 error (_("Target '%s' cannot support this command."), target_shortname);
1693 }
1694 }
1695
1696 /* True if execution commands resume all threads of all processes by
1697 default; otherwise, resume only threads of the current inferior
1698 process. */
1699 int sched_multi = 0;
1700
1701 /* Try to setup for software single stepping over the specified location.
1702 Return 1 if target_resume() should use hardware single step.
1703
1704 GDBARCH the current gdbarch.
1705 PC the location to step over. */
1706
1707 static int
1708 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1709 {
1710 int hw_step = 1;
1711
1712 if (execution_direction == EXEC_FORWARD
1713 && gdbarch_software_single_step_p (gdbarch)
1714 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1715 {
1716 hw_step = 0;
1717 /* Do not pull these breakpoints until after a `wait' in
1718 `wait_for_inferior'. */
1719 singlestep_breakpoints_inserted_p = 1;
1720 singlestep_ptid = inferior_ptid;
1721 singlestep_pc = pc;
1722 }
1723 return hw_step;
1724 }
1725
1726 /* Return a ptid representing the set of threads that we will proceed,
1727 in the perspective of the user/frontend. We may actually resume
1728 fewer threads at first, e.g., if a thread is stopped at a
1729 breakpoint that needs stepping-off, but that should not be visible
1730 to the user/frontend, and neither should the frontend/user be
1731 allowed to proceed any of the threads that happen to be stopped for
1732 internal run control handling, if a previous command wanted them
1733 resumed. */
1734
1735 ptid_t
1736 user_visible_resume_ptid (int step)
1737 {
1738 /* By default, resume all threads of all processes. */
1739 ptid_t resume_ptid = RESUME_ALL;
1740
1741 /* Maybe resume only all threads of the current process. */
1742 if (!sched_multi && target_supports_multi_process ())
1743 {
1744 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1745 }
1746
1747 /* Maybe resume a single thread after all. */
1748 if (non_stop)
1749 {
1750 /* With non-stop mode on, threads are always handled
1751 individually. */
1752 resume_ptid = inferior_ptid;
1753 }
1754 else if ((scheduler_mode == schedlock_on)
1755 || (scheduler_mode == schedlock_step
1756 && (step || singlestep_breakpoints_inserted_p)))
1757 {
1758 /* User-settable 'scheduler' mode requires solo thread resume. */
1759 resume_ptid = inferior_ptid;
1760 }
1761
1762 return resume_ptid;
1763 }
1764
1765 /* Resume the inferior, but allow a QUIT. This is useful if the user
1766 wants to interrupt some lengthy single-stepping operation
1767 (for child processes, the SIGINT goes to the inferior, and so
1768 we get a SIGINT random_signal, but for remote debugging and perhaps
1769 other targets, that's not true).
1770
1771 STEP nonzero if we should step (zero to continue instead).
1772 SIG is the signal to give the inferior (zero for none). */
1773 void
1774 resume (int step, enum gdb_signal sig)
1775 {
1776 int should_resume = 1;
1777 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1778 struct regcache *regcache = get_current_regcache ();
1779 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1780 struct thread_info *tp = inferior_thread ();
1781 CORE_ADDR pc = regcache_read_pc (regcache);
1782 struct address_space *aspace = get_regcache_aspace (regcache);
1783
1784 QUIT;
1785
1786 if (current_inferior ()->waiting_for_vfork_done)
1787 {
1788 /* Don't try to single-step a vfork parent that is waiting for
1789 the child to get out of the shared memory region (by exec'ing
1790 or exiting). This is particularly important on software
1791 single-step archs, as the child process would trip on the
1792 software single step breakpoint inserted for the parent
1793 process. Since the parent will not actually execute any
1794 instruction until the child is out of the shared region (such
1795 are vfork's semantics), it is safe to simply continue it.
1796 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1797 the parent, and tell it to `keep_going', which automatically
1798 re-sets it stepping. */
1799 if (debug_infrun)
1800 fprintf_unfiltered (gdb_stdlog,
1801 "infrun: resume : clear step\n");
1802 step = 0;
1803 }
1804
1805 if (debug_infrun)
1806 fprintf_unfiltered (gdb_stdlog,
1807 "infrun: resume (step=%d, signal=%s), "
1808 "trap_expected=%d, current thread [%s] at %s\n",
1809 step, gdb_signal_to_symbol_string (sig),
1810 tp->control.trap_expected,
1811 target_pid_to_str (inferior_ptid),
1812 paddress (gdbarch, pc));
1813
1814 /* Normally, by the time we reach `resume', the breakpoints are either
1815 removed or inserted, as appropriate. The exception is if we're sitting
1816 at a permanent breakpoint; we need to step over it, but permanent
1817 breakpoints can't be removed. So we have to test for it here. */
1818 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1819 {
1820 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1821 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1822 else
1823 error (_("\
1824 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1825 how to step past a permanent breakpoint on this architecture. Try using\n\
1826 a command like `return' or `jump' to continue execution."));
1827 }
1828
1829 /* If we have a breakpoint to step over, make sure to do a single
1830 step only. Same if we have software watchpoints. */
1831 if (tp->control.trap_expected || bpstat_should_step ())
1832 tp->control.may_range_step = 0;
1833
1834 /* If enabled, step over breakpoints by executing a copy of the
1835 instruction at a different address.
1836
1837 We can't use displaced stepping when we have a signal to deliver;
1838 the comments for displaced_step_prepare explain why. The
1839 comments in the handle_inferior event for dealing with 'random
1840 signals' explain what we do instead.
1841
1842 We can't use displaced stepping when we are waiting for vfork_done
1843 event, displaced stepping breaks the vfork child similarly as single
1844 step software breakpoint. */
1845 if (use_displaced_stepping (gdbarch)
1846 && (tp->control.trap_expected
1847 || (step && gdbarch_software_single_step_p (gdbarch)))
1848 && sig == GDB_SIGNAL_0
1849 && !current_inferior ()->waiting_for_vfork_done)
1850 {
1851 struct displaced_step_inferior_state *displaced;
1852
1853 if (!displaced_step_prepare (inferior_ptid))
1854 {
1855 /* Got placed in displaced stepping queue. Will be resumed
1856 later when all the currently queued displaced stepping
1857 requests finish. The thread is not executing at this point,
1858 and the call to set_executing will be made later. But we
1859 need to call set_running here, since from frontend point of view,
1860 the thread is running. */
1861 set_running (inferior_ptid, 1);
1862 discard_cleanups (old_cleanups);
1863 return;
1864 }
1865
1866 /* Update pc to reflect the new address from which we will execute
1867 instructions due to displaced stepping. */
1868 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
1869
1870 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1871 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1872 displaced->step_closure);
1873 }
1874
1875 /* Do we need to do it the hard way, w/temp breakpoints? */
1876 else if (step)
1877 step = maybe_software_singlestep (gdbarch, pc);
1878
1879 /* Currently, our software single-step implementation leads to different
1880 results than hardware single-stepping in one situation: when stepping
1881 into delivering a signal which has an associated signal handler,
1882 hardware single-step will stop at the first instruction of the handler,
1883 while software single-step will simply skip execution of the handler.
1884
1885 For now, this difference in behavior is accepted since there is no
1886 easy way to actually implement single-stepping into a signal handler
1887 without kernel support.
1888
1889 However, there is one scenario where this difference leads to follow-on
1890 problems: if we're stepping off a breakpoint by removing all breakpoints
1891 and then single-stepping. In this case, the software single-step
1892 behavior means that even if there is a *breakpoint* in the signal
1893 handler, GDB still would not stop.
1894
1895 Fortunately, we can at least fix this particular issue. We detect
1896 here the case where we are about to deliver a signal while software
1897 single-stepping with breakpoints removed. In this situation, we
1898 revert the decisions to remove all breakpoints and insert single-
1899 step breakpoints, and instead we install a step-resume breakpoint
1900 at the current address, deliver the signal without stepping, and
1901 once we arrive back at the step-resume breakpoint, actually step
1902 over the breakpoint we originally wanted to step over. */
1903 if (singlestep_breakpoints_inserted_p
1904 && tp->control.trap_expected && sig != GDB_SIGNAL_0)
1905 {
1906 /* If we have nested signals or a pending signal is delivered
1907 immediately after a handler returns, might might already have
1908 a step-resume breakpoint set on the earlier handler. We cannot
1909 set another step-resume breakpoint; just continue on until the
1910 original breakpoint is hit. */
1911 if (tp->control.step_resume_breakpoint == NULL)
1912 {
1913 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1914 tp->step_after_step_resume_breakpoint = 1;
1915 }
1916
1917 remove_single_step_breakpoints ();
1918 singlestep_breakpoints_inserted_p = 0;
1919
1920 clear_step_over_info ();
1921 tp->control.trap_expected = 0;
1922
1923 insert_breakpoints ();
1924 }
1925
1926 if (should_resume)
1927 {
1928 ptid_t resume_ptid;
1929
1930 /* If STEP is set, it's a request to use hardware stepping
1931 facilities. But in that case, we should never
1932 use singlestep breakpoint. */
1933 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1934
1935 /* Decide the set of threads to ask the target to resume. Start
1936 by assuming everything will be resumed, than narrow the set
1937 by applying increasingly restricting conditions. */
1938 resume_ptid = user_visible_resume_ptid (step);
1939
1940 /* Maybe resume a single thread after all. */
1941 if ((step || singlestep_breakpoints_inserted_p)
1942 && tp->control.trap_expected)
1943 {
1944 /* We're allowing a thread to run past a breakpoint it has
1945 hit, by single-stepping the thread with the breakpoint
1946 removed. In which case, we need to single-step only this
1947 thread, and keep others stopped, as they can miss this
1948 breakpoint if allowed to run. */
1949 resume_ptid = inferior_ptid;
1950 }
1951
1952 if (gdbarch_cannot_step_breakpoint (gdbarch))
1953 {
1954 /* Most targets can step a breakpoint instruction, thus
1955 executing it normally. But if this one cannot, just
1956 continue and we will hit it anyway. */
1957 if (step && breakpoint_inserted_here_p (aspace, pc))
1958 step = 0;
1959 }
1960
1961 if (debug_displaced
1962 && use_displaced_stepping (gdbarch)
1963 && tp->control.trap_expected)
1964 {
1965 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1966 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1967 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1968 gdb_byte buf[4];
1969
1970 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1971 paddress (resume_gdbarch, actual_pc));
1972 read_memory (actual_pc, buf, sizeof (buf));
1973 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1974 }
1975
1976 if (tp->control.may_range_step)
1977 {
1978 /* If we're resuming a thread with the PC out of the step
1979 range, then we're doing some nested/finer run control
1980 operation, like stepping the thread out of the dynamic
1981 linker or the displaced stepping scratch pad. We
1982 shouldn't have allowed a range step then. */
1983 gdb_assert (pc_in_thread_step_range (pc, tp));
1984 }
1985
1986 /* Install inferior's terminal modes. */
1987 target_terminal_inferior ();
1988
1989 /* Avoid confusing the next resume, if the next stop/resume
1990 happens to apply to another thread. */
1991 tp->suspend.stop_signal = GDB_SIGNAL_0;
1992
1993 /* Advise target which signals may be handled silently. If we have
1994 removed breakpoints because we are stepping over one (which can
1995 happen only if we are not using displaced stepping), we need to
1996 receive all signals to avoid accidentally skipping a breakpoint
1997 during execution of a signal handler. */
1998 if ((step || singlestep_breakpoints_inserted_p)
1999 && tp->control.trap_expected
2000 && !use_displaced_stepping (gdbarch))
2001 target_pass_signals (0, NULL);
2002 else
2003 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2004
2005 target_resume (resume_ptid, step, sig);
2006 }
2007
2008 discard_cleanups (old_cleanups);
2009 }
2010 \f
2011 /* Proceeding. */
2012
2013 /* Clear out all variables saying what to do when inferior is continued.
2014 First do this, then set the ones you want, then call `proceed'. */
2015
2016 static void
2017 clear_proceed_status_thread (struct thread_info *tp)
2018 {
2019 if (debug_infrun)
2020 fprintf_unfiltered (gdb_stdlog,
2021 "infrun: clear_proceed_status_thread (%s)\n",
2022 target_pid_to_str (tp->ptid));
2023
2024 tp->control.trap_expected = 0;
2025 tp->control.step_range_start = 0;
2026 tp->control.step_range_end = 0;
2027 tp->control.may_range_step = 0;
2028 tp->control.step_frame_id = null_frame_id;
2029 tp->control.step_stack_frame_id = null_frame_id;
2030 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2031 tp->stop_requested = 0;
2032
2033 tp->control.stop_step = 0;
2034
2035 tp->control.proceed_to_finish = 0;
2036
2037 /* Discard any remaining commands or status from previous stop. */
2038 bpstat_clear (&tp->control.stop_bpstat);
2039 }
2040
2041 static int
2042 clear_proceed_status_callback (struct thread_info *tp, void *data)
2043 {
2044 if (is_exited (tp->ptid))
2045 return 0;
2046
2047 clear_proceed_status_thread (tp);
2048 return 0;
2049 }
2050
2051 void
2052 clear_proceed_status (void)
2053 {
2054 if (!non_stop)
2055 {
2056 /* In all-stop mode, delete the per-thread status of all
2057 threads, even if inferior_ptid is null_ptid, there may be
2058 threads on the list. E.g., we may be launching a new
2059 process, while selecting the executable. */
2060 iterate_over_threads (clear_proceed_status_callback, NULL);
2061 }
2062
2063 if (!ptid_equal (inferior_ptid, null_ptid))
2064 {
2065 struct inferior *inferior;
2066
2067 if (non_stop)
2068 {
2069 /* If in non-stop mode, only delete the per-thread status of
2070 the current thread. */
2071 clear_proceed_status_thread (inferior_thread ());
2072 }
2073
2074 inferior = current_inferior ();
2075 inferior->control.stop_soon = NO_STOP_QUIETLY;
2076 }
2077
2078 stop_after_trap = 0;
2079
2080 clear_step_over_info ();
2081
2082 observer_notify_about_to_proceed ();
2083
2084 if (stop_registers)
2085 {
2086 regcache_xfree (stop_registers);
2087 stop_registers = NULL;
2088 }
2089 }
2090
2091 /* Check the current thread against the thread that reported the most recent
2092 event. If a step-over is required return TRUE and set the current thread
2093 to the old thread. Otherwise return FALSE.
2094
2095 This should be suitable for any targets that support threads. */
2096
2097 static int
2098 prepare_to_proceed (int step)
2099 {
2100 ptid_t wait_ptid;
2101 struct target_waitstatus wait_status;
2102 int schedlock_enabled;
2103
2104 /* With non-stop mode on, threads are always handled individually. */
2105 gdb_assert (! non_stop);
2106
2107 /* Get the last target status returned by target_wait(). */
2108 get_last_target_status (&wait_ptid, &wait_status);
2109
2110 /* Make sure we were stopped at a breakpoint. */
2111 if (wait_status.kind != TARGET_WAITKIND_STOPPED
2112 || (wait_status.value.sig != GDB_SIGNAL_TRAP
2113 && wait_status.value.sig != GDB_SIGNAL_ILL
2114 && wait_status.value.sig != GDB_SIGNAL_SEGV
2115 && wait_status.value.sig != GDB_SIGNAL_EMT))
2116 {
2117 return 0;
2118 }
2119
2120 schedlock_enabled = (scheduler_mode == schedlock_on
2121 || (scheduler_mode == schedlock_step
2122 && step));
2123
2124 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
2125 if (schedlock_enabled)
2126 return 0;
2127
2128 /* Don't switch over if we're about to resume some other process
2129 other than WAIT_PTID's, and schedule-multiple is off. */
2130 if (!sched_multi
2131 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
2132 return 0;
2133
2134 /* Switched over from WAIT_PID. */
2135 if (!ptid_equal (wait_ptid, minus_one_ptid)
2136 && !ptid_equal (inferior_ptid, wait_ptid))
2137 {
2138 struct regcache *regcache = get_thread_regcache (wait_ptid);
2139
2140 if (breakpoint_here_p (get_regcache_aspace (regcache),
2141 regcache_read_pc (regcache)))
2142 {
2143 /* Switch back to WAIT_PID thread. */
2144 switch_to_thread (wait_ptid);
2145
2146 if (debug_infrun)
2147 fprintf_unfiltered (gdb_stdlog,
2148 "infrun: prepare_to_proceed (step=%d), "
2149 "switched to [%s]\n",
2150 step, target_pid_to_str (inferior_ptid));
2151
2152 /* We return 1 to indicate that there is a breakpoint here,
2153 so we need to step over it before continuing to avoid
2154 hitting it straight away. */
2155 return 1;
2156 }
2157 }
2158
2159 return 0;
2160 }
2161
2162 /* Basic routine for continuing the program in various fashions.
2163
2164 ADDR is the address to resume at, or -1 for resume where stopped.
2165 SIGGNAL is the signal to give it, or 0 for none,
2166 or -1 for act according to how it stopped.
2167 STEP is nonzero if should trap after one instruction.
2168 -1 means return after that and print nothing.
2169 You should probably set various step_... variables
2170 before calling here, if you are stepping.
2171
2172 You should call clear_proceed_status before calling proceed. */
2173
2174 void
2175 proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
2176 {
2177 struct regcache *regcache;
2178 struct gdbarch *gdbarch;
2179 struct thread_info *tp;
2180 CORE_ADDR pc;
2181 struct address_space *aspace;
2182 /* GDB may force the inferior to step due to various reasons. */
2183 int force_step = 0;
2184
2185 /* If we're stopped at a fork/vfork, follow the branch set by the
2186 "set follow-fork-mode" command; otherwise, we'll just proceed
2187 resuming the current thread. */
2188 if (!follow_fork ())
2189 {
2190 /* The target for some reason decided not to resume. */
2191 normal_stop ();
2192 if (target_can_async_p ())
2193 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2194 return;
2195 }
2196
2197 /* We'll update this if & when we switch to a new thread. */
2198 previous_inferior_ptid = inferior_ptid;
2199
2200 regcache = get_current_regcache ();
2201 gdbarch = get_regcache_arch (regcache);
2202 aspace = get_regcache_aspace (regcache);
2203 pc = regcache_read_pc (regcache);
2204 tp = inferior_thread ();
2205
2206 if (step > 0)
2207 step_start_function = find_pc_function (pc);
2208 if (step < 0)
2209 stop_after_trap = 1;
2210
2211 if (addr == (CORE_ADDR) -1)
2212 {
2213 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2214 && execution_direction != EXEC_REVERSE)
2215 /* There is a breakpoint at the address we will resume at,
2216 step one instruction before inserting breakpoints so that
2217 we do not stop right away (and report a second hit at this
2218 breakpoint).
2219
2220 Note, we don't do this in reverse, because we won't
2221 actually be executing the breakpoint insn anyway.
2222 We'll be (un-)executing the previous instruction. */
2223
2224 force_step = 1;
2225 else if (gdbarch_single_step_through_delay_p (gdbarch)
2226 && gdbarch_single_step_through_delay (gdbarch,
2227 get_current_frame ()))
2228 /* We stepped onto an instruction that needs to be stepped
2229 again before re-inserting the breakpoint, do so. */
2230 force_step = 1;
2231 }
2232 else
2233 {
2234 regcache_write_pc (regcache, addr);
2235 }
2236
2237 if (debug_infrun)
2238 fprintf_unfiltered (gdb_stdlog,
2239 "infrun: proceed (addr=%s, signal=%s, step=%d)\n",
2240 paddress (gdbarch, addr),
2241 gdb_signal_to_symbol_string (siggnal), step);
2242
2243 if (non_stop)
2244 /* In non-stop, each thread is handled individually. The context
2245 must already be set to the right thread here. */
2246 ;
2247 else
2248 {
2249 /* In a multi-threaded task we may select another thread and
2250 then continue or step.
2251
2252 But if the old thread was stopped at a breakpoint, it will
2253 immediately cause another breakpoint stop without any
2254 execution (i.e. it will report a breakpoint hit incorrectly).
2255 So we must step over it first.
2256
2257 prepare_to_proceed checks the current thread against the
2258 thread that reported the most recent event. If a step-over
2259 is required it returns TRUE and sets the current thread to
2260 the old thread. */
2261
2262 /* Store the prev_pc for the stepping thread too, needed by
2263 switch_back_to_stepping thread. */
2264 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2265
2266 if (prepare_to_proceed (step))
2267 {
2268 force_step = 1;
2269 /* The current thread changed. */
2270 tp = inferior_thread ();
2271 }
2272 }
2273
2274 if (force_step)
2275 tp->control.trap_expected = 1;
2276
2277 /* If we need to step over a breakpoint, and we're not using
2278 displaced stepping to do so, insert all breakpoints (watchpoints,
2279 etc.) but the one we're stepping over, step one instruction, and
2280 then re-insert the breakpoint when that step is finished. */
2281 if (tp->control.trap_expected && !use_displaced_stepping (gdbarch))
2282 {
2283 struct regcache *regcache = get_current_regcache ();
2284
2285 set_step_over_info (get_regcache_aspace (regcache),
2286 regcache_read_pc (regcache));
2287 }
2288 else
2289 clear_step_over_info ();
2290
2291 insert_breakpoints ();
2292
2293 if (!non_stop)
2294 {
2295 /* Pass the last stop signal to the thread we're resuming,
2296 irrespective of whether the current thread is the thread that
2297 got the last event or not. This was historically GDB's
2298 behaviour before keeping a stop_signal per thread. */
2299
2300 struct thread_info *last_thread;
2301 ptid_t last_ptid;
2302 struct target_waitstatus last_status;
2303
2304 get_last_target_status (&last_ptid, &last_status);
2305 if (!ptid_equal (inferior_ptid, last_ptid)
2306 && !ptid_equal (last_ptid, null_ptid)
2307 && !ptid_equal (last_ptid, minus_one_ptid))
2308 {
2309 last_thread = find_thread_ptid (last_ptid);
2310 if (last_thread)
2311 {
2312 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2313 last_thread->suspend.stop_signal = GDB_SIGNAL_0;
2314 }
2315 }
2316 }
2317
2318 if (siggnal != GDB_SIGNAL_DEFAULT)
2319 tp->suspend.stop_signal = siggnal;
2320 /* If this signal should not be seen by program,
2321 give it zero. Used for debugging signals. */
2322 else if (!signal_program[tp->suspend.stop_signal])
2323 tp->suspend.stop_signal = GDB_SIGNAL_0;
2324
2325 annotate_starting ();
2326
2327 /* Make sure that output from GDB appears before output from the
2328 inferior. */
2329 gdb_flush (gdb_stdout);
2330
2331 /* Refresh prev_pc value just prior to resuming. This used to be
2332 done in stop_stepping, however, setting prev_pc there did not handle
2333 scenarios such as inferior function calls or returning from
2334 a function via the return command. In those cases, the prev_pc
2335 value was not set properly for subsequent commands. The prev_pc value
2336 is used to initialize the starting line number in the ecs. With an
2337 invalid value, the gdb next command ends up stopping at the position
2338 represented by the next line table entry past our start position.
2339 On platforms that generate one line table entry per line, this
2340 is not a problem. However, on the ia64, the compiler generates
2341 extraneous line table entries that do not increase the line number.
2342 When we issue the gdb next command on the ia64 after an inferior call
2343 or a return command, we often end up a few instructions forward, still
2344 within the original line we started.
2345
2346 An attempt was made to refresh the prev_pc at the same time the
2347 execution_control_state is initialized (for instance, just before
2348 waiting for an inferior event). But this approach did not work
2349 because of platforms that use ptrace, where the pc register cannot
2350 be read unless the inferior is stopped. At that point, we are not
2351 guaranteed the inferior is stopped and so the regcache_read_pc() call
2352 can fail. Setting the prev_pc value here ensures the value is updated
2353 correctly when the inferior is stopped. */
2354 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2355
2356 /* Fill in with reasonable starting values. */
2357 init_thread_stepping_state (tp);
2358
2359 /* Reset to normal state. */
2360 init_infwait_state ();
2361
2362 /* Resume inferior. */
2363 resume (force_step || step || bpstat_should_step (),
2364 tp->suspend.stop_signal);
2365
2366 /* Wait for it to stop (if not standalone)
2367 and in any case decode why it stopped, and act accordingly. */
2368 /* Do this only if we are not using the event loop, or if the target
2369 does not support asynchronous execution. */
2370 if (!target_can_async_p ())
2371 {
2372 wait_for_inferior ();
2373 normal_stop ();
2374 }
2375 }
2376 \f
2377
2378 /* Start remote-debugging of a machine over a serial link. */
2379
2380 void
2381 start_remote (int from_tty)
2382 {
2383 struct inferior *inferior;
2384
2385 inferior = current_inferior ();
2386 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2387
2388 /* Always go on waiting for the target, regardless of the mode. */
2389 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2390 indicate to wait_for_inferior that a target should timeout if
2391 nothing is returned (instead of just blocking). Because of this,
2392 targets expecting an immediate response need to, internally, set
2393 things up so that the target_wait() is forced to eventually
2394 timeout. */
2395 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2396 differentiate to its caller what the state of the target is after
2397 the initial open has been performed. Here we're assuming that
2398 the target has stopped. It should be possible to eventually have
2399 target_open() return to the caller an indication that the target
2400 is currently running and GDB state should be set to the same as
2401 for an async run. */
2402 wait_for_inferior ();
2403
2404 /* Now that the inferior has stopped, do any bookkeeping like
2405 loading shared libraries. We want to do this before normal_stop,
2406 so that the displayed frame is up to date. */
2407 post_create_inferior (&current_target, from_tty);
2408
2409 normal_stop ();
2410 }
2411
2412 /* Initialize static vars when a new inferior begins. */
2413
2414 void
2415 init_wait_for_inferior (void)
2416 {
2417 /* These are meaningless until the first time through wait_for_inferior. */
2418
2419 breakpoint_init_inferior (inf_starting);
2420
2421 clear_proceed_status ();
2422
2423 target_last_wait_ptid = minus_one_ptid;
2424
2425 previous_inferior_ptid = inferior_ptid;
2426 init_infwait_state ();
2427
2428 /* Discard any skipped inlined frames. */
2429 clear_inline_frame_state (minus_one_ptid);
2430
2431 singlestep_ptid = null_ptid;
2432 singlestep_pc = 0;
2433 }
2434
2435 \f
2436 /* This enum encodes possible reasons for doing a target_wait, so that
2437 wfi can call target_wait in one place. (Ultimately the call will be
2438 moved out of the infinite loop entirely.) */
2439
2440 enum infwait_states
2441 {
2442 infwait_normal_state,
2443 infwait_step_watch_state,
2444 infwait_nonstep_watch_state
2445 };
2446
2447 /* The PTID we'll do a target_wait on.*/
2448 ptid_t waiton_ptid;
2449
2450 /* Current inferior wait state. */
2451 static enum infwait_states infwait_state;
2452
2453 /* Data to be passed around while handling an event. This data is
2454 discarded between events. */
2455 struct execution_control_state
2456 {
2457 ptid_t ptid;
2458 /* The thread that got the event, if this was a thread event; NULL
2459 otherwise. */
2460 struct thread_info *event_thread;
2461
2462 struct target_waitstatus ws;
2463 int stop_func_filled_in;
2464 CORE_ADDR stop_func_start;
2465 CORE_ADDR stop_func_end;
2466 const char *stop_func_name;
2467 int wait_some_more;
2468
2469 /* We were in infwait_step_watch_state or
2470 infwait_nonstep_watch_state state, and the thread reported an
2471 event. */
2472 int stepped_after_stopped_by_watchpoint;
2473
2474 /* True if the event thread hit the single-step breakpoint of
2475 another thread. Thus the event doesn't cause a stop, the thread
2476 needs to be single-stepped past the single-step breakpoint before
2477 we can switch back to the original stepping thread. */
2478 int hit_singlestep_breakpoint;
2479 };
2480
2481 static void handle_inferior_event (struct execution_control_state *ecs);
2482
2483 static void handle_step_into_function (struct gdbarch *gdbarch,
2484 struct execution_control_state *ecs);
2485 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2486 struct execution_control_state *ecs);
2487 static void handle_signal_stop (struct execution_control_state *ecs);
2488 static void check_exception_resume (struct execution_control_state *,
2489 struct frame_info *);
2490
2491 static void stop_stepping (struct execution_control_state *ecs);
2492 static void prepare_to_wait (struct execution_control_state *ecs);
2493 static void keep_going (struct execution_control_state *ecs);
2494 static void process_event_stop_test (struct execution_control_state *ecs);
2495 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2496
2497 /* Callback for iterate over threads. If the thread is stopped, but
2498 the user/frontend doesn't know about that yet, go through
2499 normal_stop, as if the thread had just stopped now. ARG points at
2500 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2501 ptid_is_pid(PTID) is true, applies to all threads of the process
2502 pointed at by PTID. Otherwise, apply only to the thread pointed by
2503 PTID. */
2504
2505 static int
2506 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2507 {
2508 ptid_t ptid = * (ptid_t *) arg;
2509
2510 if ((ptid_equal (info->ptid, ptid)
2511 || ptid_equal (minus_one_ptid, ptid)
2512 || (ptid_is_pid (ptid)
2513 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2514 && is_running (info->ptid)
2515 && !is_executing (info->ptid))
2516 {
2517 struct cleanup *old_chain;
2518 struct execution_control_state ecss;
2519 struct execution_control_state *ecs = &ecss;
2520
2521 memset (ecs, 0, sizeof (*ecs));
2522
2523 old_chain = make_cleanup_restore_current_thread ();
2524
2525 overlay_cache_invalid = 1;
2526 /* Flush target cache before starting to handle each event.
2527 Target was running and cache could be stale. This is just a
2528 heuristic. Running threads may modify target memory, but we
2529 don't get any event. */
2530 target_dcache_invalidate ();
2531
2532 /* Go through handle_inferior_event/normal_stop, so we always
2533 have consistent output as if the stop event had been
2534 reported. */
2535 ecs->ptid = info->ptid;
2536 ecs->event_thread = find_thread_ptid (info->ptid);
2537 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2538 ecs->ws.value.sig = GDB_SIGNAL_0;
2539
2540 handle_inferior_event (ecs);
2541
2542 if (!ecs->wait_some_more)
2543 {
2544 struct thread_info *tp;
2545
2546 normal_stop ();
2547
2548 /* Finish off the continuations. */
2549 tp = inferior_thread ();
2550 do_all_intermediate_continuations_thread (tp, 1);
2551 do_all_continuations_thread (tp, 1);
2552 }
2553
2554 do_cleanups (old_chain);
2555 }
2556
2557 return 0;
2558 }
2559
2560 /* This function is attached as a "thread_stop_requested" observer.
2561 Cleanup local state that assumed the PTID was to be resumed, and
2562 report the stop to the frontend. */
2563
2564 static void
2565 infrun_thread_stop_requested (ptid_t ptid)
2566 {
2567 struct displaced_step_inferior_state *displaced;
2568
2569 /* PTID was requested to stop. Remove it from the displaced
2570 stepping queue, so we don't try to resume it automatically. */
2571
2572 for (displaced = displaced_step_inferior_states;
2573 displaced;
2574 displaced = displaced->next)
2575 {
2576 struct displaced_step_request *it, **prev_next_p;
2577
2578 it = displaced->step_request_queue;
2579 prev_next_p = &displaced->step_request_queue;
2580 while (it)
2581 {
2582 if (ptid_match (it->ptid, ptid))
2583 {
2584 *prev_next_p = it->next;
2585 it->next = NULL;
2586 xfree (it);
2587 }
2588 else
2589 {
2590 prev_next_p = &it->next;
2591 }
2592
2593 it = *prev_next_p;
2594 }
2595 }
2596
2597 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2598 }
2599
2600 static void
2601 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2602 {
2603 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2604 nullify_last_target_wait_ptid ();
2605 }
2606
2607 /* Callback for iterate_over_threads. */
2608
2609 static int
2610 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2611 {
2612 if (is_exited (info->ptid))
2613 return 0;
2614
2615 delete_step_resume_breakpoint (info);
2616 delete_exception_resume_breakpoint (info);
2617 return 0;
2618 }
2619
2620 /* In all-stop, delete the step resume breakpoint of any thread that
2621 had one. In non-stop, delete the step resume breakpoint of the
2622 thread that just stopped. */
2623
2624 static void
2625 delete_step_thread_step_resume_breakpoint (void)
2626 {
2627 if (!target_has_execution
2628 || ptid_equal (inferior_ptid, null_ptid))
2629 /* If the inferior has exited, we have already deleted the step
2630 resume breakpoints out of GDB's lists. */
2631 return;
2632
2633 if (non_stop)
2634 {
2635 /* If in non-stop mode, only delete the step-resume or
2636 longjmp-resume breakpoint of the thread that just stopped
2637 stepping. */
2638 struct thread_info *tp = inferior_thread ();
2639
2640 delete_step_resume_breakpoint (tp);
2641 delete_exception_resume_breakpoint (tp);
2642 }
2643 else
2644 /* In all-stop mode, delete all step-resume and longjmp-resume
2645 breakpoints of any thread that had them. */
2646 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2647 }
2648
2649 /* A cleanup wrapper. */
2650
2651 static void
2652 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2653 {
2654 delete_step_thread_step_resume_breakpoint ();
2655 }
2656
2657 /* Pretty print the results of target_wait, for debugging purposes. */
2658
2659 static void
2660 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2661 const struct target_waitstatus *ws)
2662 {
2663 char *status_string = target_waitstatus_to_string (ws);
2664 struct ui_file *tmp_stream = mem_fileopen ();
2665 char *text;
2666
2667 /* The text is split over several lines because it was getting too long.
2668 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2669 output as a unit; we want only one timestamp printed if debug_timestamp
2670 is set. */
2671
2672 fprintf_unfiltered (tmp_stream,
2673 "infrun: target_wait (%d", ptid_get_pid (waiton_ptid));
2674 if (ptid_get_pid (waiton_ptid) != -1)
2675 fprintf_unfiltered (tmp_stream,
2676 " [%s]", target_pid_to_str (waiton_ptid));
2677 fprintf_unfiltered (tmp_stream, ", status) =\n");
2678 fprintf_unfiltered (tmp_stream,
2679 "infrun: %d [%s],\n",
2680 ptid_get_pid (result_ptid),
2681 target_pid_to_str (result_ptid));
2682 fprintf_unfiltered (tmp_stream,
2683 "infrun: %s\n",
2684 status_string);
2685
2686 text = ui_file_xstrdup (tmp_stream, NULL);
2687
2688 /* This uses %s in part to handle %'s in the text, but also to avoid
2689 a gcc error: the format attribute requires a string literal. */
2690 fprintf_unfiltered (gdb_stdlog, "%s", text);
2691
2692 xfree (status_string);
2693 xfree (text);
2694 ui_file_delete (tmp_stream);
2695 }
2696
2697 /* Prepare and stabilize the inferior for detaching it. E.g.,
2698 detaching while a thread is displaced stepping is a recipe for
2699 crashing it, as nothing would readjust the PC out of the scratch
2700 pad. */
2701
2702 void
2703 prepare_for_detach (void)
2704 {
2705 struct inferior *inf = current_inferior ();
2706 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2707 struct cleanup *old_chain_1;
2708 struct displaced_step_inferior_state *displaced;
2709
2710 displaced = get_displaced_stepping_state (inf->pid);
2711
2712 /* Is any thread of this process displaced stepping? If not,
2713 there's nothing else to do. */
2714 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2715 return;
2716
2717 if (debug_infrun)
2718 fprintf_unfiltered (gdb_stdlog,
2719 "displaced-stepping in-process while detaching");
2720
2721 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2722 inf->detaching = 1;
2723
2724 while (!ptid_equal (displaced->step_ptid, null_ptid))
2725 {
2726 struct cleanup *old_chain_2;
2727 struct execution_control_state ecss;
2728 struct execution_control_state *ecs;
2729
2730 ecs = &ecss;
2731 memset (ecs, 0, sizeof (*ecs));
2732
2733 overlay_cache_invalid = 1;
2734 /* Flush target cache before starting to handle each event.
2735 Target was running and cache could be stale. This is just a
2736 heuristic. Running threads may modify target memory, but we
2737 don't get any event. */
2738 target_dcache_invalidate ();
2739
2740 if (deprecated_target_wait_hook)
2741 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2742 else
2743 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2744
2745 if (debug_infrun)
2746 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2747
2748 /* If an error happens while handling the event, propagate GDB's
2749 knowledge of the executing state to the frontend/user running
2750 state. */
2751 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2752 &minus_one_ptid);
2753
2754 /* Now figure out what to do with the result of the result. */
2755 handle_inferior_event (ecs);
2756
2757 /* No error, don't finish the state yet. */
2758 discard_cleanups (old_chain_2);
2759
2760 /* Breakpoints and watchpoints are not installed on the target
2761 at this point, and signals are passed directly to the
2762 inferior, so this must mean the process is gone. */
2763 if (!ecs->wait_some_more)
2764 {
2765 discard_cleanups (old_chain_1);
2766 error (_("Program exited while detaching"));
2767 }
2768 }
2769
2770 discard_cleanups (old_chain_1);
2771 }
2772
2773 /* Wait for control to return from inferior to debugger.
2774
2775 If inferior gets a signal, we may decide to start it up again
2776 instead of returning. That is why there is a loop in this function.
2777 When this function actually returns it means the inferior
2778 should be left stopped and GDB should read more commands. */
2779
2780 void
2781 wait_for_inferior (void)
2782 {
2783 struct cleanup *old_cleanups;
2784
2785 if (debug_infrun)
2786 fprintf_unfiltered
2787 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2788
2789 old_cleanups =
2790 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2791
2792 while (1)
2793 {
2794 struct execution_control_state ecss;
2795 struct execution_control_state *ecs = &ecss;
2796 struct cleanup *old_chain;
2797
2798 memset (ecs, 0, sizeof (*ecs));
2799
2800 overlay_cache_invalid = 1;
2801
2802 /* Flush target cache before starting to handle each event.
2803 Target was running and cache could be stale. This is just a
2804 heuristic. Running threads may modify target memory, but we
2805 don't get any event. */
2806 target_dcache_invalidate ();
2807
2808 if (deprecated_target_wait_hook)
2809 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2810 else
2811 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2812
2813 if (debug_infrun)
2814 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2815
2816 /* If an error happens while handling the event, propagate GDB's
2817 knowledge of the executing state to the frontend/user running
2818 state. */
2819 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2820
2821 /* Now figure out what to do with the result of the result. */
2822 handle_inferior_event (ecs);
2823
2824 /* No error, don't finish the state yet. */
2825 discard_cleanups (old_chain);
2826
2827 if (!ecs->wait_some_more)
2828 break;
2829 }
2830
2831 do_cleanups (old_cleanups);
2832 }
2833
2834 /* Asynchronous version of wait_for_inferior. It is called by the
2835 event loop whenever a change of state is detected on the file
2836 descriptor corresponding to the target. It can be called more than
2837 once to complete a single execution command. In such cases we need
2838 to keep the state in a global variable ECSS. If it is the last time
2839 that this function is called for a single execution command, then
2840 report to the user that the inferior has stopped, and do the
2841 necessary cleanups. */
2842
2843 void
2844 fetch_inferior_event (void *client_data)
2845 {
2846 struct execution_control_state ecss;
2847 struct execution_control_state *ecs = &ecss;
2848 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2849 struct cleanup *ts_old_chain;
2850 int was_sync = sync_execution;
2851 int cmd_done = 0;
2852
2853 memset (ecs, 0, sizeof (*ecs));
2854
2855 /* We're handling a live event, so make sure we're doing live
2856 debugging. If we're looking at traceframes while the target is
2857 running, we're going to need to get back to that mode after
2858 handling the event. */
2859 if (non_stop)
2860 {
2861 make_cleanup_restore_current_traceframe ();
2862 set_current_traceframe (-1);
2863 }
2864
2865 if (non_stop)
2866 /* In non-stop mode, the user/frontend should not notice a thread
2867 switch due to internal events. Make sure we reverse to the
2868 user selected thread and frame after handling the event and
2869 running any breakpoint commands. */
2870 make_cleanup_restore_current_thread ();
2871
2872 overlay_cache_invalid = 1;
2873 /* Flush target cache before starting to handle each event. Target
2874 was running and cache could be stale. This is just a heuristic.
2875 Running threads may modify target memory, but we don't get any
2876 event. */
2877 target_dcache_invalidate ();
2878
2879 make_cleanup_restore_integer (&execution_direction);
2880 execution_direction = target_execution_direction ();
2881
2882 if (deprecated_target_wait_hook)
2883 ecs->ptid =
2884 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2885 else
2886 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2887
2888 if (debug_infrun)
2889 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2890
2891 /* If an error happens while handling the event, propagate GDB's
2892 knowledge of the executing state to the frontend/user running
2893 state. */
2894 if (!non_stop)
2895 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2896 else
2897 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2898
2899 /* Get executed before make_cleanup_restore_current_thread above to apply
2900 still for the thread which has thrown the exception. */
2901 make_bpstat_clear_actions_cleanup ();
2902
2903 /* Now figure out what to do with the result of the result. */
2904 handle_inferior_event (ecs);
2905
2906 if (!ecs->wait_some_more)
2907 {
2908 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2909
2910 delete_step_thread_step_resume_breakpoint ();
2911
2912 /* We may not find an inferior if this was a process exit. */
2913 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2914 normal_stop ();
2915
2916 if (target_has_execution
2917 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2918 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2919 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2920 && ecs->event_thread->step_multi
2921 && ecs->event_thread->control.stop_step)
2922 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2923 else
2924 {
2925 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2926 cmd_done = 1;
2927 }
2928 }
2929
2930 /* No error, don't finish the thread states yet. */
2931 discard_cleanups (ts_old_chain);
2932
2933 /* Revert thread and frame. */
2934 do_cleanups (old_chain);
2935
2936 /* If the inferior was in sync execution mode, and now isn't,
2937 restore the prompt (a synchronous execution command has finished,
2938 and we're ready for input). */
2939 if (interpreter_async && was_sync && !sync_execution)
2940 display_gdb_prompt (0);
2941
2942 if (cmd_done
2943 && !was_sync
2944 && exec_done_display_p
2945 && (ptid_equal (inferior_ptid, null_ptid)
2946 || !is_running (inferior_ptid)))
2947 printf_unfiltered (_("completed.\n"));
2948 }
2949
2950 /* Record the frame and location we're currently stepping through. */
2951 void
2952 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2953 {
2954 struct thread_info *tp = inferior_thread ();
2955
2956 tp->control.step_frame_id = get_frame_id (frame);
2957 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2958
2959 tp->current_symtab = sal.symtab;
2960 tp->current_line = sal.line;
2961 }
2962
2963 /* Clear context switchable stepping state. */
2964
2965 void
2966 init_thread_stepping_state (struct thread_info *tss)
2967 {
2968 tss->stepping_over_breakpoint = 0;
2969 tss->step_after_step_resume_breakpoint = 0;
2970 }
2971
2972 /* Return the cached copy of the last pid/waitstatus returned by
2973 target_wait()/deprecated_target_wait_hook(). The data is actually
2974 cached by handle_inferior_event(), which gets called immediately
2975 after target_wait()/deprecated_target_wait_hook(). */
2976
2977 void
2978 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2979 {
2980 *ptidp = target_last_wait_ptid;
2981 *status = target_last_waitstatus;
2982 }
2983
2984 void
2985 nullify_last_target_wait_ptid (void)
2986 {
2987 target_last_wait_ptid = minus_one_ptid;
2988 }
2989
2990 /* Switch thread contexts. */
2991
2992 static void
2993 context_switch (ptid_t ptid)
2994 {
2995 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
2996 {
2997 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2998 target_pid_to_str (inferior_ptid));
2999 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3000 target_pid_to_str (ptid));
3001 }
3002
3003 switch_to_thread (ptid);
3004 }
3005
3006 static void
3007 adjust_pc_after_break (struct execution_control_state *ecs)
3008 {
3009 struct regcache *regcache;
3010 struct gdbarch *gdbarch;
3011 struct address_space *aspace;
3012 CORE_ADDR breakpoint_pc, decr_pc;
3013
3014 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3015 we aren't, just return.
3016
3017 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3018 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3019 implemented by software breakpoints should be handled through the normal
3020 breakpoint layer.
3021
3022 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3023 different signals (SIGILL or SIGEMT for instance), but it is less
3024 clear where the PC is pointing afterwards. It may not match
3025 gdbarch_decr_pc_after_break. I don't know any specific target that
3026 generates these signals at breakpoints (the code has been in GDB since at
3027 least 1992) so I can not guess how to handle them here.
3028
3029 In earlier versions of GDB, a target with
3030 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3031 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3032 target with both of these set in GDB history, and it seems unlikely to be
3033 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3034
3035 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3036 return;
3037
3038 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3039 return;
3040
3041 /* In reverse execution, when a breakpoint is hit, the instruction
3042 under it has already been de-executed. The reported PC always
3043 points at the breakpoint address, so adjusting it further would
3044 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3045 architecture:
3046
3047 B1 0x08000000 : INSN1
3048 B2 0x08000001 : INSN2
3049 0x08000002 : INSN3
3050 PC -> 0x08000003 : INSN4
3051
3052 Say you're stopped at 0x08000003 as above. Reverse continuing
3053 from that point should hit B2 as below. Reading the PC when the
3054 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3055 been de-executed already.
3056
3057 B1 0x08000000 : INSN1
3058 B2 PC -> 0x08000001 : INSN2
3059 0x08000002 : INSN3
3060 0x08000003 : INSN4
3061
3062 We can't apply the same logic as for forward execution, because
3063 we would wrongly adjust the PC to 0x08000000, since there's a
3064 breakpoint at PC - 1. We'd then report a hit on B1, although
3065 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3066 behaviour. */
3067 if (execution_direction == EXEC_REVERSE)
3068 return;
3069
3070 /* If this target does not decrement the PC after breakpoints, then
3071 we have nothing to do. */
3072 regcache = get_thread_regcache (ecs->ptid);
3073 gdbarch = get_regcache_arch (regcache);
3074
3075 decr_pc = target_decr_pc_after_break (gdbarch);
3076 if (decr_pc == 0)
3077 return;
3078
3079 aspace = get_regcache_aspace (regcache);
3080
3081 /* Find the location where (if we've hit a breakpoint) the
3082 breakpoint would be. */
3083 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3084
3085 /* Check whether there actually is a software breakpoint inserted at
3086 that location.
3087
3088 If in non-stop mode, a race condition is possible where we've
3089 removed a breakpoint, but stop events for that breakpoint were
3090 already queued and arrive later. To suppress those spurious
3091 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3092 and retire them after a number of stop events are reported. */
3093 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3094 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3095 {
3096 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3097
3098 if (record_full_is_used ())
3099 record_full_gdb_operation_disable_set ();
3100
3101 /* When using hardware single-step, a SIGTRAP is reported for both
3102 a completed single-step and a software breakpoint. Need to
3103 differentiate between the two, as the latter needs adjusting
3104 but the former does not.
3105
3106 The SIGTRAP can be due to a completed hardware single-step only if
3107 - we didn't insert software single-step breakpoints
3108 - the thread to be examined is still the current thread
3109 - this thread is currently being stepped
3110
3111 If any of these events did not occur, we must have stopped due
3112 to hitting a software breakpoint, and have to back up to the
3113 breakpoint address.
3114
3115 As a special case, we could have hardware single-stepped a
3116 software breakpoint. In this case (prev_pc == breakpoint_pc),
3117 we also need to back up to the breakpoint address. */
3118
3119 if (singlestep_breakpoints_inserted_p
3120 || !ptid_equal (ecs->ptid, inferior_ptid)
3121 || !currently_stepping (ecs->event_thread)
3122 || ecs->event_thread->prev_pc == breakpoint_pc)
3123 regcache_write_pc (regcache, breakpoint_pc);
3124
3125 do_cleanups (old_cleanups);
3126 }
3127 }
3128
3129 static void
3130 init_infwait_state (void)
3131 {
3132 waiton_ptid = pid_to_ptid (-1);
3133 infwait_state = infwait_normal_state;
3134 }
3135
3136 static int
3137 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3138 {
3139 for (frame = get_prev_frame (frame);
3140 frame != NULL;
3141 frame = get_prev_frame (frame))
3142 {
3143 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3144 return 1;
3145 if (get_frame_type (frame) != INLINE_FRAME)
3146 break;
3147 }
3148
3149 return 0;
3150 }
3151
3152 /* Auxiliary function that handles syscall entry/return events.
3153 It returns 1 if the inferior should keep going (and GDB
3154 should ignore the event), or 0 if the event deserves to be
3155 processed. */
3156
3157 static int
3158 handle_syscall_event (struct execution_control_state *ecs)
3159 {
3160 struct regcache *regcache;
3161 int syscall_number;
3162
3163 if (!ptid_equal (ecs->ptid, inferior_ptid))
3164 context_switch (ecs->ptid);
3165
3166 regcache = get_thread_regcache (ecs->ptid);
3167 syscall_number = ecs->ws.value.syscall_number;
3168 stop_pc = regcache_read_pc (regcache);
3169
3170 if (catch_syscall_enabled () > 0
3171 && catching_syscall_number (syscall_number) > 0)
3172 {
3173 if (debug_infrun)
3174 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3175 syscall_number);
3176
3177 ecs->event_thread->control.stop_bpstat
3178 = bpstat_stop_status (get_regcache_aspace (regcache),
3179 stop_pc, ecs->ptid, &ecs->ws);
3180
3181 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3182 {
3183 /* Catchpoint hit. */
3184 return 0;
3185 }
3186 }
3187
3188 /* If no catchpoint triggered for this, then keep going. */
3189 keep_going (ecs);
3190 return 1;
3191 }
3192
3193 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3194
3195 static void
3196 fill_in_stop_func (struct gdbarch *gdbarch,
3197 struct execution_control_state *ecs)
3198 {
3199 if (!ecs->stop_func_filled_in)
3200 {
3201 /* Don't care about return value; stop_func_start and stop_func_name
3202 will both be 0 if it doesn't work. */
3203 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3204 &ecs->stop_func_start, &ecs->stop_func_end);
3205 ecs->stop_func_start
3206 += gdbarch_deprecated_function_start_offset (gdbarch);
3207
3208 if (gdbarch_skip_entrypoint_p (gdbarch))
3209 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3210 ecs->stop_func_start);
3211
3212 ecs->stop_func_filled_in = 1;
3213 }
3214 }
3215
3216
3217 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3218
3219 static enum stop_kind
3220 get_inferior_stop_soon (ptid_t ptid)
3221 {
3222 struct inferior *inf = find_inferior_pid (ptid_get_pid (ptid));
3223
3224 gdb_assert (inf != NULL);
3225 return inf->control.stop_soon;
3226 }
3227
3228 /* Given an execution control state that has been freshly filled in by
3229 an event from the inferior, figure out what it means and take
3230 appropriate action.
3231
3232 The alternatives are:
3233
3234 1) stop_stepping and return; to really stop and return to the
3235 debugger.
3236
3237 2) keep_going and return; to wait for the next event (set
3238 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3239 once). */
3240
3241 static void
3242 handle_inferior_event (struct execution_control_state *ecs)
3243 {
3244 enum stop_kind stop_soon;
3245
3246 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3247 {
3248 /* We had an event in the inferior, but we are not interested in
3249 handling it at this level. The lower layers have already
3250 done what needs to be done, if anything.
3251
3252 One of the possible circumstances for this is when the
3253 inferior produces output for the console. The inferior has
3254 not stopped, and we are ignoring the event. Another possible
3255 circumstance is any event which the lower level knows will be
3256 reported multiple times without an intervening resume. */
3257 if (debug_infrun)
3258 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3259 prepare_to_wait (ecs);
3260 return;
3261 }
3262
3263 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3264 && target_can_async_p () && !sync_execution)
3265 {
3266 /* There were no unwaited-for children left in the target, but,
3267 we're not synchronously waiting for events either. Just
3268 ignore. Otherwise, if we were running a synchronous
3269 execution command, we need to cancel it and give the user
3270 back the terminal. */
3271 if (debug_infrun)
3272 fprintf_unfiltered (gdb_stdlog,
3273 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3274 prepare_to_wait (ecs);
3275 return;
3276 }
3277
3278 /* Cache the last pid/waitstatus. */
3279 target_last_wait_ptid = ecs->ptid;
3280 target_last_waitstatus = ecs->ws;
3281
3282 /* Always clear state belonging to the previous time we stopped. */
3283 stop_stack_dummy = STOP_NONE;
3284
3285 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3286 {
3287 /* No unwaited-for children left. IOW, all resumed children
3288 have exited. */
3289 if (debug_infrun)
3290 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3291
3292 stop_print_frame = 0;
3293 stop_stepping (ecs);
3294 return;
3295 }
3296
3297 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3298 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3299 {
3300 ecs->event_thread = find_thread_ptid (ecs->ptid);
3301 /* If it's a new thread, add it to the thread database. */
3302 if (ecs->event_thread == NULL)
3303 ecs->event_thread = add_thread (ecs->ptid);
3304
3305 /* Disable range stepping. If the next step request could use a
3306 range, this will be end up re-enabled then. */
3307 ecs->event_thread->control.may_range_step = 0;
3308 }
3309
3310 /* Dependent on valid ECS->EVENT_THREAD. */
3311 adjust_pc_after_break (ecs);
3312
3313 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3314 reinit_frame_cache ();
3315
3316 breakpoint_retire_moribund ();
3317
3318 /* First, distinguish signals caused by the debugger from signals
3319 that have to do with the program's own actions. Note that
3320 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3321 on the operating system version. Here we detect when a SIGILL or
3322 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3323 something similar for SIGSEGV, since a SIGSEGV will be generated
3324 when we're trying to execute a breakpoint instruction on a
3325 non-executable stack. This happens for call dummy breakpoints
3326 for architectures like SPARC that place call dummies on the
3327 stack. */
3328 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3329 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3330 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3331 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3332 {
3333 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3334
3335 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3336 regcache_read_pc (regcache)))
3337 {
3338 if (debug_infrun)
3339 fprintf_unfiltered (gdb_stdlog,
3340 "infrun: Treating signal as SIGTRAP\n");
3341 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3342 }
3343 }
3344
3345 /* Mark the non-executing threads accordingly. In all-stop, all
3346 threads of all processes are stopped when we get any event
3347 reported. In non-stop mode, only the event thread stops. If
3348 we're handling a process exit in non-stop mode, there's nothing
3349 to do, as threads of the dead process are gone, and threads of
3350 any other process were left running. */
3351 if (!non_stop)
3352 set_executing (minus_one_ptid, 0);
3353 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3354 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3355 set_executing (ecs->ptid, 0);
3356
3357 switch (infwait_state)
3358 {
3359 case infwait_normal_state:
3360 if (debug_infrun)
3361 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3362 break;
3363
3364 case infwait_step_watch_state:
3365 if (debug_infrun)
3366 fprintf_unfiltered (gdb_stdlog,
3367 "infrun: infwait_step_watch_state\n");
3368
3369 ecs->stepped_after_stopped_by_watchpoint = 1;
3370 break;
3371
3372 case infwait_nonstep_watch_state:
3373 if (debug_infrun)
3374 fprintf_unfiltered (gdb_stdlog,
3375 "infrun: infwait_nonstep_watch_state\n");
3376 insert_breakpoints ();
3377
3378 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3379 handle things like signals arriving and other things happening
3380 in combination correctly? */
3381 ecs->stepped_after_stopped_by_watchpoint = 1;
3382 break;
3383
3384 default:
3385 internal_error (__FILE__, __LINE__, _("bad switch"));
3386 }
3387
3388 infwait_state = infwait_normal_state;
3389 waiton_ptid = pid_to_ptid (-1);
3390
3391 switch (ecs->ws.kind)
3392 {
3393 case TARGET_WAITKIND_LOADED:
3394 if (debug_infrun)
3395 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3396 if (!ptid_equal (ecs->ptid, inferior_ptid))
3397 context_switch (ecs->ptid);
3398 /* Ignore gracefully during startup of the inferior, as it might
3399 be the shell which has just loaded some objects, otherwise
3400 add the symbols for the newly loaded objects. Also ignore at
3401 the beginning of an attach or remote session; we will query
3402 the full list of libraries once the connection is
3403 established. */
3404
3405 stop_soon = get_inferior_stop_soon (ecs->ptid);
3406 if (stop_soon == NO_STOP_QUIETLY)
3407 {
3408 struct regcache *regcache;
3409
3410 regcache = get_thread_regcache (ecs->ptid);
3411
3412 handle_solib_event ();
3413
3414 ecs->event_thread->control.stop_bpstat
3415 = bpstat_stop_status (get_regcache_aspace (regcache),
3416 stop_pc, ecs->ptid, &ecs->ws);
3417
3418 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3419 {
3420 /* A catchpoint triggered. */
3421 process_event_stop_test (ecs);
3422 return;
3423 }
3424
3425 /* If requested, stop when the dynamic linker notifies
3426 gdb of events. This allows the user to get control
3427 and place breakpoints in initializer routines for
3428 dynamically loaded objects (among other things). */
3429 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3430 if (stop_on_solib_events)
3431 {
3432 /* Make sure we print "Stopped due to solib-event" in
3433 normal_stop. */
3434 stop_print_frame = 1;
3435
3436 stop_stepping (ecs);
3437 return;
3438 }
3439 }
3440
3441 /* If we are skipping through a shell, or through shared library
3442 loading that we aren't interested in, resume the program. If
3443 we're running the program normally, also resume. */
3444 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3445 {
3446 /* Loading of shared libraries might have changed breakpoint
3447 addresses. Make sure new breakpoints are inserted. */
3448 if (stop_soon == NO_STOP_QUIETLY
3449 && !breakpoints_always_inserted_mode ())
3450 insert_breakpoints ();
3451 resume (0, GDB_SIGNAL_0);
3452 prepare_to_wait (ecs);
3453 return;
3454 }
3455
3456 /* But stop if we're attaching or setting up a remote
3457 connection. */
3458 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3459 || stop_soon == STOP_QUIETLY_REMOTE)
3460 {
3461 if (debug_infrun)
3462 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3463 stop_stepping (ecs);
3464 return;
3465 }
3466
3467 internal_error (__FILE__, __LINE__,
3468 _("unhandled stop_soon: %d"), (int) stop_soon);
3469
3470 case TARGET_WAITKIND_SPURIOUS:
3471 if (debug_infrun)
3472 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3473 if (!ptid_equal (ecs->ptid, inferior_ptid))
3474 context_switch (ecs->ptid);
3475 resume (0, GDB_SIGNAL_0);
3476 prepare_to_wait (ecs);
3477 return;
3478
3479 case TARGET_WAITKIND_EXITED:
3480 case TARGET_WAITKIND_SIGNALLED:
3481 if (debug_infrun)
3482 {
3483 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3484 fprintf_unfiltered (gdb_stdlog,
3485 "infrun: TARGET_WAITKIND_EXITED\n");
3486 else
3487 fprintf_unfiltered (gdb_stdlog,
3488 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3489 }
3490
3491 inferior_ptid = ecs->ptid;
3492 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3493 set_current_program_space (current_inferior ()->pspace);
3494 handle_vfork_child_exec_or_exit (0);
3495 target_terminal_ours (); /* Must do this before mourn anyway. */
3496
3497 /* Clearing any previous state of convenience variables. */
3498 clear_exit_convenience_vars ();
3499
3500 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3501 {
3502 /* Record the exit code in the convenience variable $_exitcode, so
3503 that the user can inspect this again later. */
3504 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3505 (LONGEST) ecs->ws.value.integer);
3506
3507 /* Also record this in the inferior itself. */
3508 current_inferior ()->has_exit_code = 1;
3509 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3510
3511 print_exited_reason (ecs->ws.value.integer);
3512 }
3513 else
3514 {
3515 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3516 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3517
3518 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3519 {
3520 /* Set the value of the internal variable $_exitsignal,
3521 which holds the signal uncaught by the inferior. */
3522 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3523 gdbarch_gdb_signal_to_target (gdbarch,
3524 ecs->ws.value.sig));
3525 }
3526 else
3527 {
3528 /* We don't have access to the target's method used for
3529 converting between signal numbers (GDB's internal
3530 representation <-> target's representation).
3531 Therefore, we cannot do a good job at displaying this
3532 information to the user. It's better to just warn
3533 her about it (if infrun debugging is enabled), and
3534 give up. */
3535 if (debug_infrun)
3536 fprintf_filtered (gdb_stdlog, _("\
3537 Cannot fill $_exitsignal with the correct signal number.\n"));
3538 }
3539
3540 print_signal_exited_reason (ecs->ws.value.sig);
3541 }
3542
3543 gdb_flush (gdb_stdout);
3544 target_mourn_inferior ();
3545 singlestep_breakpoints_inserted_p = 0;
3546 cancel_single_step_breakpoints ();
3547 stop_print_frame = 0;
3548 stop_stepping (ecs);
3549 return;
3550
3551 /* The following are the only cases in which we keep going;
3552 the above cases end in a continue or goto. */
3553 case TARGET_WAITKIND_FORKED:
3554 case TARGET_WAITKIND_VFORKED:
3555 if (debug_infrun)
3556 {
3557 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3558 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3559 else
3560 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3561 }
3562
3563 /* Check whether the inferior is displaced stepping. */
3564 {
3565 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3566 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3567 struct displaced_step_inferior_state *displaced
3568 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3569
3570 /* If checking displaced stepping is supported, and thread
3571 ecs->ptid is displaced stepping. */
3572 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3573 {
3574 struct inferior *parent_inf
3575 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3576 struct regcache *child_regcache;
3577 CORE_ADDR parent_pc;
3578
3579 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3580 indicating that the displaced stepping of syscall instruction
3581 has been done. Perform cleanup for parent process here. Note
3582 that this operation also cleans up the child process for vfork,
3583 because their pages are shared. */
3584 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3585
3586 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3587 {
3588 /* Restore scratch pad for child process. */
3589 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3590 }
3591
3592 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3593 the child's PC is also within the scratchpad. Set the child's PC
3594 to the parent's PC value, which has already been fixed up.
3595 FIXME: we use the parent's aspace here, although we're touching
3596 the child, because the child hasn't been added to the inferior
3597 list yet at this point. */
3598
3599 child_regcache
3600 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3601 gdbarch,
3602 parent_inf->aspace);
3603 /* Read PC value of parent process. */
3604 parent_pc = regcache_read_pc (regcache);
3605
3606 if (debug_displaced)
3607 fprintf_unfiltered (gdb_stdlog,
3608 "displaced: write child pc from %s to %s\n",
3609 paddress (gdbarch,
3610 regcache_read_pc (child_regcache)),
3611 paddress (gdbarch, parent_pc));
3612
3613 regcache_write_pc (child_regcache, parent_pc);
3614 }
3615 }
3616
3617 if (!ptid_equal (ecs->ptid, inferior_ptid))
3618 context_switch (ecs->ptid);
3619
3620 /* Immediately detach breakpoints from the child before there's
3621 any chance of letting the user delete breakpoints from the
3622 breakpoint lists. If we don't do this early, it's easy to
3623 leave left over traps in the child, vis: "break foo; catch
3624 fork; c; <fork>; del; c; <child calls foo>". We only follow
3625 the fork on the last `continue', and by that time the
3626 breakpoint at "foo" is long gone from the breakpoint table.
3627 If we vforked, then we don't need to unpatch here, since both
3628 parent and child are sharing the same memory pages; we'll
3629 need to unpatch at follow/detach time instead to be certain
3630 that new breakpoints added between catchpoint hit time and
3631 vfork follow are detached. */
3632 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3633 {
3634 /* This won't actually modify the breakpoint list, but will
3635 physically remove the breakpoints from the child. */
3636 detach_breakpoints (ecs->ws.value.related_pid);
3637 }
3638
3639 if (singlestep_breakpoints_inserted_p)
3640 {
3641 /* Pull the single step breakpoints out of the target. */
3642 remove_single_step_breakpoints ();
3643 singlestep_breakpoints_inserted_p = 0;
3644 }
3645
3646 /* In case the event is caught by a catchpoint, remember that
3647 the event is to be followed at the next resume of the thread,
3648 and not immediately. */
3649 ecs->event_thread->pending_follow = ecs->ws;
3650
3651 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3652
3653 ecs->event_thread->control.stop_bpstat
3654 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3655 stop_pc, ecs->ptid, &ecs->ws);
3656
3657 /* If no catchpoint triggered for this, then keep going. Note
3658 that we're interested in knowing the bpstat actually causes a
3659 stop, not just if it may explain the signal. Software
3660 watchpoints, for example, always appear in the bpstat. */
3661 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3662 {
3663 ptid_t parent;
3664 ptid_t child;
3665 int should_resume;
3666 int follow_child
3667 = (follow_fork_mode_string == follow_fork_mode_child);
3668
3669 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3670
3671 should_resume = follow_fork ();
3672
3673 parent = ecs->ptid;
3674 child = ecs->ws.value.related_pid;
3675
3676 /* In non-stop mode, also resume the other branch. */
3677 if (non_stop && !detach_fork)
3678 {
3679 if (follow_child)
3680 switch_to_thread (parent);
3681 else
3682 switch_to_thread (child);
3683
3684 ecs->event_thread = inferior_thread ();
3685 ecs->ptid = inferior_ptid;
3686 keep_going (ecs);
3687 }
3688
3689 if (follow_child)
3690 switch_to_thread (child);
3691 else
3692 switch_to_thread (parent);
3693
3694 ecs->event_thread = inferior_thread ();
3695 ecs->ptid = inferior_ptid;
3696
3697 if (should_resume)
3698 keep_going (ecs);
3699 else
3700 stop_stepping (ecs);
3701 return;
3702 }
3703 process_event_stop_test (ecs);
3704 return;
3705
3706 case TARGET_WAITKIND_VFORK_DONE:
3707 /* Done with the shared memory region. Re-insert breakpoints in
3708 the parent, and keep going. */
3709
3710 if (debug_infrun)
3711 fprintf_unfiltered (gdb_stdlog,
3712 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3713
3714 if (!ptid_equal (ecs->ptid, inferior_ptid))
3715 context_switch (ecs->ptid);
3716
3717 current_inferior ()->waiting_for_vfork_done = 0;
3718 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3719 /* This also takes care of reinserting breakpoints in the
3720 previously locked inferior. */
3721 keep_going (ecs);
3722 return;
3723
3724 case TARGET_WAITKIND_EXECD:
3725 if (debug_infrun)
3726 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3727
3728 if (!ptid_equal (ecs->ptid, inferior_ptid))
3729 context_switch (ecs->ptid);
3730
3731 singlestep_breakpoints_inserted_p = 0;
3732 cancel_single_step_breakpoints ();
3733
3734 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3735
3736 /* Do whatever is necessary to the parent branch of the vfork. */
3737 handle_vfork_child_exec_or_exit (1);
3738
3739 /* This causes the eventpoints and symbol table to be reset.
3740 Must do this now, before trying to determine whether to
3741 stop. */
3742 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3743
3744 ecs->event_thread->control.stop_bpstat
3745 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3746 stop_pc, ecs->ptid, &ecs->ws);
3747
3748 /* Note that this may be referenced from inside
3749 bpstat_stop_status above, through inferior_has_execd. */
3750 xfree (ecs->ws.value.execd_pathname);
3751 ecs->ws.value.execd_pathname = NULL;
3752
3753 /* If no catchpoint triggered for this, then keep going. */
3754 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3755 {
3756 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3757 keep_going (ecs);
3758 return;
3759 }
3760 process_event_stop_test (ecs);
3761 return;
3762
3763 /* Be careful not to try to gather much state about a thread
3764 that's in a syscall. It's frequently a losing proposition. */
3765 case TARGET_WAITKIND_SYSCALL_ENTRY:
3766 if (debug_infrun)
3767 fprintf_unfiltered (gdb_stdlog,
3768 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3769 /* Getting the current syscall number. */
3770 if (handle_syscall_event (ecs) == 0)
3771 process_event_stop_test (ecs);
3772 return;
3773
3774 /* Before examining the threads further, step this thread to
3775 get it entirely out of the syscall. (We get notice of the
3776 event when the thread is just on the verge of exiting a
3777 syscall. Stepping one instruction seems to get it back
3778 into user code.) */
3779 case TARGET_WAITKIND_SYSCALL_RETURN:
3780 if (debug_infrun)
3781 fprintf_unfiltered (gdb_stdlog,
3782 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3783 if (handle_syscall_event (ecs) == 0)
3784 process_event_stop_test (ecs);
3785 return;
3786
3787 case TARGET_WAITKIND_STOPPED:
3788 if (debug_infrun)
3789 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3790 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3791 handle_signal_stop (ecs);
3792 return;
3793
3794 case TARGET_WAITKIND_NO_HISTORY:
3795 if (debug_infrun)
3796 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
3797 /* Reverse execution: target ran out of history info. */
3798
3799 /* Pull the single step breakpoints out of the target. */
3800 if (singlestep_breakpoints_inserted_p)
3801 {
3802 if (!ptid_equal (ecs->ptid, inferior_ptid))
3803 context_switch (ecs->ptid);
3804 remove_single_step_breakpoints ();
3805 singlestep_breakpoints_inserted_p = 0;
3806 }
3807 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3808 print_no_history_reason ();
3809 stop_stepping (ecs);
3810 return;
3811 }
3812 }
3813
3814 /* Come here when the program has stopped with a signal. */
3815
3816 static void
3817 handle_signal_stop (struct execution_control_state *ecs)
3818 {
3819 struct frame_info *frame;
3820 struct gdbarch *gdbarch;
3821 int stopped_by_watchpoint;
3822 enum stop_kind stop_soon;
3823 int random_signal;
3824
3825 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
3826
3827 /* Do we need to clean up the state of a thread that has
3828 completed a displaced single-step? (Doing so usually affects
3829 the PC, so do it here, before we set stop_pc.) */
3830 displaced_step_fixup (ecs->ptid,
3831 ecs->event_thread->suspend.stop_signal);
3832
3833 /* If we either finished a single-step or hit a breakpoint, but
3834 the user wanted this thread to be stopped, pretend we got a
3835 SIG0 (generic unsignaled stop). */
3836 if (ecs->event_thread->stop_requested
3837 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3838 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3839
3840 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3841
3842 if (debug_infrun)
3843 {
3844 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3845 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3846 struct cleanup *old_chain = save_inferior_ptid ();
3847
3848 inferior_ptid = ecs->ptid;
3849
3850 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3851 paddress (gdbarch, stop_pc));
3852 if (target_stopped_by_watchpoint ())
3853 {
3854 CORE_ADDR addr;
3855
3856 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3857
3858 if (target_stopped_data_address (&current_target, &addr))
3859 fprintf_unfiltered (gdb_stdlog,
3860 "infrun: stopped data address = %s\n",
3861 paddress (gdbarch, addr));
3862 else
3863 fprintf_unfiltered (gdb_stdlog,
3864 "infrun: (no data address available)\n");
3865 }
3866
3867 do_cleanups (old_chain);
3868 }
3869
3870 /* This is originated from start_remote(), start_inferior() and
3871 shared libraries hook functions. */
3872 stop_soon = get_inferior_stop_soon (ecs->ptid);
3873 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3874 {
3875 if (!ptid_equal (ecs->ptid, inferior_ptid))
3876 context_switch (ecs->ptid);
3877 if (debug_infrun)
3878 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3879 stop_print_frame = 1;
3880 stop_stepping (ecs);
3881 return;
3882 }
3883
3884 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3885 && stop_after_trap)
3886 {
3887 if (!ptid_equal (ecs->ptid, inferior_ptid))
3888 context_switch (ecs->ptid);
3889 if (debug_infrun)
3890 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3891 stop_print_frame = 0;
3892 stop_stepping (ecs);
3893 return;
3894 }
3895
3896 /* This originates from attach_command(). We need to overwrite
3897 the stop_signal here, because some kernels don't ignore a
3898 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3899 See more comments in inferior.h. On the other hand, if we
3900 get a non-SIGSTOP, report it to the user - assume the backend
3901 will handle the SIGSTOP if it should show up later.
3902
3903 Also consider that the attach is complete when we see a
3904 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3905 target extended-remote report it instead of a SIGSTOP
3906 (e.g. gdbserver). We already rely on SIGTRAP being our
3907 signal, so this is no exception.
3908
3909 Also consider that the attach is complete when we see a
3910 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3911 the target to stop all threads of the inferior, in case the
3912 low level attach operation doesn't stop them implicitly. If
3913 they weren't stopped implicitly, then the stub will report a
3914 GDB_SIGNAL_0, meaning: stopped for no particular reason
3915 other than GDB's request. */
3916 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3917 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
3918 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3919 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
3920 {
3921 stop_print_frame = 1;
3922 stop_stepping (ecs);
3923 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3924 return;
3925 }
3926
3927 /* See if something interesting happened to the non-current thread. If
3928 so, then switch to that thread. */
3929 if (!ptid_equal (ecs->ptid, inferior_ptid))
3930 {
3931 if (debug_infrun)
3932 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3933
3934 context_switch (ecs->ptid);
3935
3936 if (deprecated_context_hook)
3937 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3938 }
3939
3940 /* At this point, get hold of the now-current thread's frame. */
3941 frame = get_current_frame ();
3942 gdbarch = get_frame_arch (frame);
3943
3944 /* Pull the single step breakpoints out of the target. */
3945 if (singlestep_breakpoints_inserted_p)
3946 {
3947 /* However, before doing so, if this single-step breakpoint was
3948 actually for another thread, set this thread up for moving
3949 past it. */
3950 if (!ptid_equal (ecs->ptid, singlestep_ptid)
3951 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3952 {
3953 struct regcache *regcache;
3954 struct address_space *aspace;
3955 CORE_ADDR pc;
3956
3957 regcache = get_thread_regcache (ecs->ptid);
3958 aspace = get_regcache_aspace (regcache);
3959 pc = regcache_read_pc (regcache);
3960 if (single_step_breakpoint_inserted_here_p (aspace, pc))
3961 {
3962 if (debug_infrun)
3963 {
3964 fprintf_unfiltered (gdb_stdlog,
3965 "infrun: [%s] hit step over single-step"
3966 " breakpoint of [%s]\n",
3967 target_pid_to_str (ecs->ptid),
3968 target_pid_to_str (singlestep_ptid));
3969 }
3970 ecs->hit_singlestep_breakpoint = 1;
3971 }
3972 }
3973
3974 remove_single_step_breakpoints ();
3975 singlestep_breakpoints_inserted_p = 0;
3976 }
3977
3978 if (ecs->stepped_after_stopped_by_watchpoint)
3979 stopped_by_watchpoint = 0;
3980 else
3981 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3982
3983 /* If necessary, step over this watchpoint. We'll be back to display
3984 it in a moment. */
3985 if (stopped_by_watchpoint
3986 && (target_have_steppable_watchpoint
3987 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3988 {
3989 /* At this point, we are stopped at an instruction which has
3990 attempted to write to a piece of memory under control of
3991 a watchpoint. The instruction hasn't actually executed
3992 yet. If we were to evaluate the watchpoint expression
3993 now, we would get the old value, and therefore no change
3994 would seem to have occurred.
3995
3996 In order to make watchpoints work `right', we really need
3997 to complete the memory write, and then evaluate the
3998 watchpoint expression. We do this by single-stepping the
3999 target.
4000
4001 It may not be necessary to disable the watchpoint to stop over
4002 it. For example, the PA can (with some kernel cooperation)
4003 single step over a watchpoint without disabling the watchpoint.
4004
4005 It is far more common to need to disable a watchpoint to step
4006 the inferior over it. If we have non-steppable watchpoints,
4007 we must disable the current watchpoint; it's simplest to
4008 disable all watchpoints and breakpoints. */
4009 int hw_step = 1;
4010
4011 if (!target_have_steppable_watchpoint)
4012 {
4013 remove_breakpoints ();
4014 /* See comment in resume why we need to stop bypassing signals
4015 while breakpoints have been removed. */
4016 target_pass_signals (0, NULL);
4017 }
4018 /* Single step */
4019 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4020 target_resume (ecs->ptid, hw_step, GDB_SIGNAL_0);
4021 waiton_ptid = ecs->ptid;
4022 if (target_have_steppable_watchpoint)
4023 infwait_state = infwait_step_watch_state;
4024 else
4025 infwait_state = infwait_nonstep_watch_state;
4026 prepare_to_wait (ecs);
4027 return;
4028 }
4029
4030 ecs->event_thread->stepping_over_breakpoint = 0;
4031 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4032 ecs->event_thread->control.stop_step = 0;
4033 stop_print_frame = 1;
4034 stopped_by_random_signal = 0;
4035
4036 /* Hide inlined functions starting here, unless we just performed stepi or
4037 nexti. After stepi and nexti, always show the innermost frame (not any
4038 inline function call sites). */
4039 if (ecs->event_thread->control.step_range_end != 1)
4040 {
4041 struct address_space *aspace =
4042 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4043
4044 /* skip_inline_frames is expensive, so we avoid it if we can
4045 determine that the address is one where functions cannot have
4046 been inlined. This improves performance with inferiors that
4047 load a lot of shared libraries, because the solib event
4048 breakpoint is defined as the address of a function (i.e. not
4049 inline). Note that we have to check the previous PC as well
4050 as the current one to catch cases when we have just
4051 single-stepped off a breakpoint prior to reinstating it.
4052 Note that we're assuming that the code we single-step to is
4053 not inline, but that's not definitive: there's nothing
4054 preventing the event breakpoint function from containing
4055 inlined code, and the single-step ending up there. If the
4056 user had set a breakpoint on that inlined code, the missing
4057 skip_inline_frames call would break things. Fortunately
4058 that's an extremely unlikely scenario. */
4059 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4060 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4061 && ecs->event_thread->control.trap_expected
4062 && pc_at_non_inline_function (aspace,
4063 ecs->event_thread->prev_pc,
4064 &ecs->ws)))
4065 {
4066 skip_inline_frames (ecs->ptid);
4067
4068 /* Re-fetch current thread's frame in case that invalidated
4069 the frame cache. */
4070 frame = get_current_frame ();
4071 gdbarch = get_frame_arch (frame);
4072 }
4073 }
4074
4075 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4076 && ecs->event_thread->control.trap_expected
4077 && gdbarch_single_step_through_delay_p (gdbarch)
4078 && currently_stepping (ecs->event_thread))
4079 {
4080 /* We're trying to step off a breakpoint. Turns out that we're
4081 also on an instruction that needs to be stepped multiple
4082 times before it's been fully executing. E.g., architectures
4083 with a delay slot. It needs to be stepped twice, once for
4084 the instruction and once for the delay slot. */
4085 int step_through_delay
4086 = gdbarch_single_step_through_delay (gdbarch, frame);
4087
4088 if (debug_infrun && step_through_delay)
4089 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4090 if (ecs->event_thread->control.step_range_end == 0
4091 && step_through_delay)
4092 {
4093 /* The user issued a continue when stopped at a breakpoint.
4094 Set up for another trap and get out of here. */
4095 ecs->event_thread->stepping_over_breakpoint = 1;
4096 keep_going (ecs);
4097 return;
4098 }
4099 else if (step_through_delay)
4100 {
4101 /* The user issued a step when stopped at a breakpoint.
4102 Maybe we should stop, maybe we should not - the delay
4103 slot *might* correspond to a line of source. In any
4104 case, don't decide that here, just set
4105 ecs->stepping_over_breakpoint, making sure we
4106 single-step again before breakpoints are re-inserted. */
4107 ecs->event_thread->stepping_over_breakpoint = 1;
4108 }
4109 }
4110
4111 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4112 handles this event. */
4113 ecs->event_thread->control.stop_bpstat
4114 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4115 stop_pc, ecs->ptid, &ecs->ws);
4116
4117 /* Following in case break condition called a
4118 function. */
4119 stop_print_frame = 1;
4120
4121 /* This is where we handle "moribund" watchpoints. Unlike
4122 software breakpoints traps, hardware watchpoint traps are
4123 always distinguishable from random traps. If no high-level
4124 watchpoint is associated with the reported stop data address
4125 anymore, then the bpstat does not explain the signal ---
4126 simply make sure to ignore it if `stopped_by_watchpoint' is
4127 set. */
4128
4129 if (debug_infrun
4130 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4131 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4132 GDB_SIGNAL_TRAP)
4133 && stopped_by_watchpoint)
4134 fprintf_unfiltered (gdb_stdlog,
4135 "infrun: no user watchpoint explains "
4136 "watchpoint SIGTRAP, ignoring\n");
4137
4138 /* NOTE: cagney/2003-03-29: These checks for a random signal
4139 at one stage in the past included checks for an inferior
4140 function call's call dummy's return breakpoint. The original
4141 comment, that went with the test, read:
4142
4143 ``End of a stack dummy. Some systems (e.g. Sony news) give
4144 another signal besides SIGTRAP, so check here as well as
4145 above.''
4146
4147 If someone ever tries to get call dummys on a
4148 non-executable stack to work (where the target would stop
4149 with something like a SIGSEGV), then those tests might need
4150 to be re-instated. Given, however, that the tests were only
4151 enabled when momentary breakpoints were not being used, I
4152 suspect that it won't be the case.
4153
4154 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4155 be necessary for call dummies on a non-executable stack on
4156 SPARC. */
4157
4158 /* See if the breakpoints module can explain the signal. */
4159 random_signal
4160 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4161 ecs->event_thread->suspend.stop_signal);
4162
4163 /* If not, perhaps stepping/nexting can. */
4164 if (random_signal)
4165 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4166 && currently_stepping (ecs->event_thread));
4167
4168 /* Perhaps the thread hit a single-step breakpoint of _another_
4169 thread. Single-step breakpoints are transparent to the
4170 breakpoints module. */
4171 if (random_signal)
4172 random_signal = !ecs->hit_singlestep_breakpoint;
4173
4174 /* No? Perhaps we got a moribund watchpoint. */
4175 if (random_signal)
4176 random_signal = !stopped_by_watchpoint;
4177
4178 /* For the program's own signals, act according to
4179 the signal handling tables. */
4180
4181 if (random_signal)
4182 {
4183 /* Signal not for debugging purposes. */
4184 int printed = 0;
4185 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4186 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4187
4188 if (debug_infrun)
4189 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4190 gdb_signal_to_symbol_string (stop_signal));
4191
4192 stopped_by_random_signal = 1;
4193
4194 if (signal_print[ecs->event_thread->suspend.stop_signal])
4195 {
4196 printed = 1;
4197 target_terminal_ours_for_output ();
4198 print_signal_received_reason
4199 (ecs->event_thread->suspend.stop_signal);
4200 }
4201 /* Always stop on signals if we're either just gaining control
4202 of the program, or the user explicitly requested this thread
4203 to remain stopped. */
4204 if (stop_soon != NO_STOP_QUIETLY
4205 || ecs->event_thread->stop_requested
4206 || (!inf->detaching
4207 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4208 {
4209 stop_stepping (ecs);
4210 return;
4211 }
4212 /* If not going to stop, give terminal back
4213 if we took it away. */
4214 else if (printed)
4215 target_terminal_inferior ();
4216
4217 /* Clear the signal if it should not be passed. */
4218 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4219 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4220
4221 if (ecs->event_thread->prev_pc == stop_pc
4222 && ecs->event_thread->control.trap_expected
4223 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4224 {
4225 /* We were just starting a new sequence, attempting to
4226 single-step off of a breakpoint and expecting a SIGTRAP.
4227 Instead this signal arrives. This signal will take us out
4228 of the stepping range so GDB needs to remember to, when
4229 the signal handler returns, resume stepping off that
4230 breakpoint. */
4231 /* To simplify things, "continue" is forced to use the same
4232 code paths as single-step - set a breakpoint at the
4233 signal return address and then, once hit, step off that
4234 breakpoint. */
4235 if (debug_infrun)
4236 fprintf_unfiltered (gdb_stdlog,
4237 "infrun: signal arrived while stepping over "
4238 "breakpoint\n");
4239
4240 insert_hp_step_resume_breakpoint_at_frame (frame);
4241 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4242 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4243 ecs->event_thread->control.trap_expected = 0;
4244
4245 /* If we were nexting/stepping some other thread, switch to
4246 it, so that we don't continue it, losing control. */
4247 if (!switch_back_to_stepped_thread (ecs))
4248 keep_going (ecs);
4249 return;
4250 }
4251
4252 if (ecs->event_thread->control.step_range_end != 0
4253 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4254 && pc_in_thread_step_range (stop_pc, ecs->event_thread)
4255 && frame_id_eq (get_stack_frame_id (frame),
4256 ecs->event_thread->control.step_stack_frame_id)
4257 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4258 {
4259 /* The inferior is about to take a signal that will take it
4260 out of the single step range. Set a breakpoint at the
4261 current PC (which is presumably where the signal handler
4262 will eventually return) and then allow the inferior to
4263 run free.
4264
4265 Note that this is only needed for a signal delivered
4266 while in the single-step range. Nested signals aren't a
4267 problem as they eventually all return. */
4268 if (debug_infrun)
4269 fprintf_unfiltered (gdb_stdlog,
4270 "infrun: signal may take us out of "
4271 "single-step range\n");
4272
4273 insert_hp_step_resume_breakpoint_at_frame (frame);
4274 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4275 ecs->event_thread->control.trap_expected = 0;
4276 keep_going (ecs);
4277 return;
4278 }
4279
4280 /* Note: step_resume_breakpoint may be non-NULL. This occures
4281 when either there's a nested signal, or when there's a
4282 pending signal enabled just as the signal handler returns
4283 (leaving the inferior at the step-resume-breakpoint without
4284 actually executing it). Either way continue until the
4285 breakpoint is really hit. */
4286
4287 if (!switch_back_to_stepped_thread (ecs))
4288 {
4289 if (debug_infrun)
4290 fprintf_unfiltered (gdb_stdlog,
4291 "infrun: random signal, keep going\n");
4292
4293 keep_going (ecs);
4294 }
4295 return;
4296 }
4297
4298 process_event_stop_test (ecs);
4299 }
4300
4301 /* Come here when we've got some debug event / signal we can explain
4302 (IOW, not a random signal), and test whether it should cause a
4303 stop, or whether we should resume the inferior (transparently).
4304 E.g., could be a breakpoint whose condition evaluates false; we
4305 could be still stepping within the line; etc. */
4306
4307 static void
4308 process_event_stop_test (struct execution_control_state *ecs)
4309 {
4310 struct symtab_and_line stop_pc_sal;
4311 struct frame_info *frame;
4312 struct gdbarch *gdbarch;
4313 CORE_ADDR jmp_buf_pc;
4314 struct bpstat_what what;
4315
4316 /* Handle cases caused by hitting a breakpoint. */
4317
4318 frame = get_current_frame ();
4319 gdbarch = get_frame_arch (frame);
4320
4321 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4322
4323 if (what.call_dummy)
4324 {
4325 stop_stack_dummy = what.call_dummy;
4326 }
4327
4328 /* If we hit an internal event that triggers symbol changes, the
4329 current frame will be invalidated within bpstat_what (e.g., if we
4330 hit an internal solib event). Re-fetch it. */
4331 frame = get_current_frame ();
4332 gdbarch = get_frame_arch (frame);
4333
4334 switch (what.main_action)
4335 {
4336 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4337 /* If we hit the breakpoint at longjmp while stepping, we
4338 install a momentary breakpoint at the target of the
4339 jmp_buf. */
4340
4341 if (debug_infrun)
4342 fprintf_unfiltered (gdb_stdlog,
4343 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4344
4345 ecs->event_thread->stepping_over_breakpoint = 1;
4346
4347 if (what.is_longjmp)
4348 {
4349 struct value *arg_value;
4350
4351 /* If we set the longjmp breakpoint via a SystemTap probe,
4352 then use it to extract the arguments. The destination PC
4353 is the third argument to the probe. */
4354 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4355 if (arg_value)
4356 jmp_buf_pc = value_as_address (arg_value);
4357 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4358 || !gdbarch_get_longjmp_target (gdbarch,
4359 frame, &jmp_buf_pc))
4360 {
4361 if (debug_infrun)
4362 fprintf_unfiltered (gdb_stdlog,
4363 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4364 "(!gdbarch_get_longjmp_target)\n");
4365 keep_going (ecs);
4366 return;
4367 }
4368
4369 /* Insert a breakpoint at resume address. */
4370 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4371 }
4372 else
4373 check_exception_resume (ecs, frame);
4374 keep_going (ecs);
4375 return;
4376
4377 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4378 {
4379 struct frame_info *init_frame;
4380
4381 /* There are several cases to consider.
4382
4383 1. The initiating frame no longer exists. In this case we
4384 must stop, because the exception or longjmp has gone too
4385 far.
4386
4387 2. The initiating frame exists, and is the same as the
4388 current frame. We stop, because the exception or longjmp
4389 has been caught.
4390
4391 3. The initiating frame exists and is different from the
4392 current frame. This means the exception or longjmp has
4393 been caught beneath the initiating frame, so keep going.
4394
4395 4. longjmp breakpoint has been placed just to protect
4396 against stale dummy frames and user is not interested in
4397 stopping around longjmps. */
4398
4399 if (debug_infrun)
4400 fprintf_unfiltered (gdb_stdlog,
4401 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4402
4403 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4404 != NULL);
4405 delete_exception_resume_breakpoint (ecs->event_thread);
4406
4407 if (what.is_longjmp)
4408 {
4409 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread->num);
4410
4411 if (!frame_id_p (ecs->event_thread->initiating_frame))
4412 {
4413 /* Case 4. */
4414 keep_going (ecs);
4415 return;
4416 }
4417 }
4418
4419 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4420
4421 if (init_frame)
4422 {
4423 struct frame_id current_id
4424 = get_frame_id (get_current_frame ());
4425 if (frame_id_eq (current_id,
4426 ecs->event_thread->initiating_frame))
4427 {
4428 /* Case 2. Fall through. */
4429 }
4430 else
4431 {
4432 /* Case 3. */
4433 keep_going (ecs);
4434 return;
4435 }
4436 }
4437
4438 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4439 exists. */
4440 delete_step_resume_breakpoint (ecs->event_thread);
4441
4442 ecs->event_thread->control.stop_step = 1;
4443 print_end_stepping_range_reason ();
4444 stop_stepping (ecs);
4445 }
4446 return;
4447
4448 case BPSTAT_WHAT_SINGLE:
4449 if (debug_infrun)
4450 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4451 ecs->event_thread->stepping_over_breakpoint = 1;
4452 /* Still need to check other stuff, at least the case where we
4453 are stepping and step out of the right range. */
4454 break;
4455
4456 case BPSTAT_WHAT_STEP_RESUME:
4457 if (debug_infrun)
4458 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4459
4460 delete_step_resume_breakpoint (ecs->event_thread);
4461 if (ecs->event_thread->control.proceed_to_finish
4462 && execution_direction == EXEC_REVERSE)
4463 {
4464 struct thread_info *tp = ecs->event_thread;
4465
4466 /* We are finishing a function in reverse, and just hit the
4467 step-resume breakpoint at the start address of the
4468 function, and we're almost there -- just need to back up
4469 by one more single-step, which should take us back to the
4470 function call. */
4471 tp->control.step_range_start = tp->control.step_range_end = 1;
4472 keep_going (ecs);
4473 return;
4474 }
4475 fill_in_stop_func (gdbarch, ecs);
4476 if (stop_pc == ecs->stop_func_start
4477 && execution_direction == EXEC_REVERSE)
4478 {
4479 /* We are stepping over a function call in reverse, and just
4480 hit the step-resume breakpoint at the start address of
4481 the function. Go back to single-stepping, which should
4482 take us back to the function call. */
4483 ecs->event_thread->stepping_over_breakpoint = 1;
4484 keep_going (ecs);
4485 return;
4486 }
4487 break;
4488
4489 case BPSTAT_WHAT_STOP_NOISY:
4490 if (debug_infrun)
4491 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4492 stop_print_frame = 1;
4493
4494 /* We are about to nuke the step_resume_breakpointt via the
4495 cleanup chain, so no need to worry about it here. */
4496
4497 stop_stepping (ecs);
4498 return;
4499
4500 case BPSTAT_WHAT_STOP_SILENT:
4501 if (debug_infrun)
4502 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4503 stop_print_frame = 0;
4504
4505 /* We are about to nuke the step_resume_breakpoin via the
4506 cleanup chain, so no need to worry about it here. */
4507
4508 stop_stepping (ecs);
4509 return;
4510
4511 case BPSTAT_WHAT_HP_STEP_RESUME:
4512 if (debug_infrun)
4513 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4514
4515 delete_step_resume_breakpoint (ecs->event_thread);
4516 if (ecs->event_thread->step_after_step_resume_breakpoint)
4517 {
4518 /* Back when the step-resume breakpoint was inserted, we
4519 were trying to single-step off a breakpoint. Go back to
4520 doing that. */
4521 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4522 ecs->event_thread->stepping_over_breakpoint = 1;
4523 keep_going (ecs);
4524 return;
4525 }
4526 break;
4527
4528 case BPSTAT_WHAT_KEEP_CHECKING:
4529 break;
4530 }
4531
4532 /* We come here if we hit a breakpoint but should not stop for it.
4533 Possibly we also were stepping and should stop for that. So fall
4534 through and test for stepping. But, if not stepping, do not
4535 stop. */
4536
4537 /* In all-stop mode, if we're currently stepping but have stopped in
4538 some other thread, we need to switch back to the stepped thread. */
4539 if (switch_back_to_stepped_thread (ecs))
4540 return;
4541
4542 if (ecs->event_thread->control.step_resume_breakpoint)
4543 {
4544 if (debug_infrun)
4545 fprintf_unfiltered (gdb_stdlog,
4546 "infrun: step-resume breakpoint is inserted\n");
4547
4548 /* Having a step-resume breakpoint overrides anything
4549 else having to do with stepping commands until
4550 that breakpoint is reached. */
4551 keep_going (ecs);
4552 return;
4553 }
4554
4555 if (ecs->event_thread->control.step_range_end == 0)
4556 {
4557 if (debug_infrun)
4558 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4559 /* Likewise if we aren't even stepping. */
4560 keep_going (ecs);
4561 return;
4562 }
4563
4564 /* Re-fetch current thread's frame in case the code above caused
4565 the frame cache to be re-initialized, making our FRAME variable
4566 a dangling pointer. */
4567 frame = get_current_frame ();
4568 gdbarch = get_frame_arch (frame);
4569 fill_in_stop_func (gdbarch, ecs);
4570
4571 /* If stepping through a line, keep going if still within it.
4572
4573 Note that step_range_end is the address of the first instruction
4574 beyond the step range, and NOT the address of the last instruction
4575 within it!
4576
4577 Note also that during reverse execution, we may be stepping
4578 through a function epilogue and therefore must detect when
4579 the current-frame changes in the middle of a line. */
4580
4581 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4582 && (execution_direction != EXEC_REVERSE
4583 || frame_id_eq (get_frame_id (frame),
4584 ecs->event_thread->control.step_frame_id)))
4585 {
4586 if (debug_infrun)
4587 fprintf_unfiltered
4588 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4589 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4590 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4591
4592 /* Tentatively re-enable range stepping; `resume' disables it if
4593 necessary (e.g., if we're stepping over a breakpoint or we
4594 have software watchpoints). */
4595 ecs->event_thread->control.may_range_step = 1;
4596
4597 /* When stepping backward, stop at beginning of line range
4598 (unless it's the function entry point, in which case
4599 keep going back to the call point). */
4600 if (stop_pc == ecs->event_thread->control.step_range_start
4601 && stop_pc != ecs->stop_func_start
4602 && execution_direction == EXEC_REVERSE)
4603 {
4604 ecs->event_thread->control.stop_step = 1;
4605 print_end_stepping_range_reason ();
4606 stop_stepping (ecs);
4607 }
4608 else
4609 keep_going (ecs);
4610
4611 return;
4612 }
4613
4614 /* We stepped out of the stepping range. */
4615
4616 /* If we are stepping at the source level and entered the runtime
4617 loader dynamic symbol resolution code...
4618
4619 EXEC_FORWARD: we keep on single stepping until we exit the run
4620 time loader code and reach the callee's address.
4621
4622 EXEC_REVERSE: we've already executed the callee (backward), and
4623 the runtime loader code is handled just like any other
4624 undebuggable function call. Now we need only keep stepping
4625 backward through the trampoline code, and that's handled further
4626 down, so there is nothing for us to do here. */
4627
4628 if (execution_direction != EXEC_REVERSE
4629 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4630 && in_solib_dynsym_resolve_code (stop_pc))
4631 {
4632 CORE_ADDR pc_after_resolver =
4633 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4634
4635 if (debug_infrun)
4636 fprintf_unfiltered (gdb_stdlog,
4637 "infrun: stepped into dynsym resolve code\n");
4638
4639 if (pc_after_resolver)
4640 {
4641 /* Set up a step-resume breakpoint at the address
4642 indicated by SKIP_SOLIB_RESOLVER. */
4643 struct symtab_and_line sr_sal;
4644
4645 init_sal (&sr_sal);
4646 sr_sal.pc = pc_after_resolver;
4647 sr_sal.pspace = get_frame_program_space (frame);
4648
4649 insert_step_resume_breakpoint_at_sal (gdbarch,
4650 sr_sal, null_frame_id);
4651 }
4652
4653 keep_going (ecs);
4654 return;
4655 }
4656
4657 if (ecs->event_thread->control.step_range_end != 1
4658 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4659 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4660 && get_frame_type (frame) == SIGTRAMP_FRAME)
4661 {
4662 if (debug_infrun)
4663 fprintf_unfiltered (gdb_stdlog,
4664 "infrun: stepped into signal trampoline\n");
4665 /* The inferior, while doing a "step" or "next", has ended up in
4666 a signal trampoline (either by a signal being delivered or by
4667 the signal handler returning). Just single-step until the
4668 inferior leaves the trampoline (either by calling the handler
4669 or returning). */
4670 keep_going (ecs);
4671 return;
4672 }
4673
4674 /* If we're in the return path from a shared library trampoline,
4675 we want to proceed through the trampoline when stepping. */
4676 /* macro/2012-04-25: This needs to come before the subroutine
4677 call check below as on some targets return trampolines look
4678 like subroutine calls (MIPS16 return thunks). */
4679 if (gdbarch_in_solib_return_trampoline (gdbarch,
4680 stop_pc, ecs->stop_func_name)
4681 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4682 {
4683 /* Determine where this trampoline returns. */
4684 CORE_ADDR real_stop_pc;
4685
4686 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4687
4688 if (debug_infrun)
4689 fprintf_unfiltered (gdb_stdlog,
4690 "infrun: stepped into solib return tramp\n");
4691
4692 /* Only proceed through if we know where it's going. */
4693 if (real_stop_pc)
4694 {
4695 /* And put the step-breakpoint there and go until there. */
4696 struct symtab_and_line sr_sal;
4697
4698 init_sal (&sr_sal); /* initialize to zeroes */
4699 sr_sal.pc = real_stop_pc;
4700 sr_sal.section = find_pc_overlay (sr_sal.pc);
4701 sr_sal.pspace = get_frame_program_space (frame);
4702
4703 /* Do not specify what the fp should be when we stop since
4704 on some machines the prologue is where the new fp value
4705 is established. */
4706 insert_step_resume_breakpoint_at_sal (gdbarch,
4707 sr_sal, null_frame_id);
4708
4709 /* Restart without fiddling with the step ranges or
4710 other state. */
4711 keep_going (ecs);
4712 return;
4713 }
4714 }
4715
4716 /* Check for subroutine calls. The check for the current frame
4717 equalling the step ID is not necessary - the check of the
4718 previous frame's ID is sufficient - but it is a common case and
4719 cheaper than checking the previous frame's ID.
4720
4721 NOTE: frame_id_eq will never report two invalid frame IDs as
4722 being equal, so to get into this block, both the current and
4723 previous frame must have valid frame IDs. */
4724 /* The outer_frame_id check is a heuristic to detect stepping
4725 through startup code. If we step over an instruction which
4726 sets the stack pointer from an invalid value to a valid value,
4727 we may detect that as a subroutine call from the mythical
4728 "outermost" function. This could be fixed by marking
4729 outermost frames as !stack_p,code_p,special_p. Then the
4730 initial outermost frame, before sp was valid, would
4731 have code_addr == &_start. See the comment in frame_id_eq
4732 for more. */
4733 if (!frame_id_eq (get_stack_frame_id (frame),
4734 ecs->event_thread->control.step_stack_frame_id)
4735 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4736 ecs->event_thread->control.step_stack_frame_id)
4737 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4738 outer_frame_id)
4739 || step_start_function != find_pc_function (stop_pc))))
4740 {
4741 CORE_ADDR real_stop_pc;
4742
4743 if (debug_infrun)
4744 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4745
4746 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4747 || ((ecs->event_thread->control.step_range_end == 1)
4748 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4749 ecs->stop_func_start)))
4750 {
4751 /* I presume that step_over_calls is only 0 when we're
4752 supposed to be stepping at the assembly language level
4753 ("stepi"). Just stop. */
4754 /* Also, maybe we just did a "nexti" inside a prolog, so we
4755 thought it was a subroutine call but it was not. Stop as
4756 well. FENN */
4757 /* And this works the same backward as frontward. MVS */
4758 ecs->event_thread->control.stop_step = 1;
4759 print_end_stepping_range_reason ();
4760 stop_stepping (ecs);
4761 return;
4762 }
4763
4764 /* Reverse stepping through solib trampolines. */
4765
4766 if (execution_direction == EXEC_REVERSE
4767 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4768 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4769 || (ecs->stop_func_start == 0
4770 && in_solib_dynsym_resolve_code (stop_pc))))
4771 {
4772 /* Any solib trampoline code can be handled in reverse
4773 by simply continuing to single-step. We have already
4774 executed the solib function (backwards), and a few
4775 steps will take us back through the trampoline to the
4776 caller. */
4777 keep_going (ecs);
4778 return;
4779 }
4780
4781 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4782 {
4783 /* We're doing a "next".
4784
4785 Normal (forward) execution: set a breakpoint at the
4786 callee's return address (the address at which the caller
4787 will resume).
4788
4789 Reverse (backward) execution. set the step-resume
4790 breakpoint at the start of the function that we just
4791 stepped into (backwards), and continue to there. When we
4792 get there, we'll need to single-step back to the caller. */
4793
4794 if (execution_direction == EXEC_REVERSE)
4795 {
4796 /* If we're already at the start of the function, we've either
4797 just stepped backward into a single instruction function,
4798 or stepped back out of a signal handler to the first instruction
4799 of the function. Just keep going, which will single-step back
4800 to the caller. */
4801 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
4802 {
4803 struct symtab_and_line sr_sal;
4804
4805 /* Normal function call return (static or dynamic). */
4806 init_sal (&sr_sal);
4807 sr_sal.pc = ecs->stop_func_start;
4808 sr_sal.pspace = get_frame_program_space (frame);
4809 insert_step_resume_breakpoint_at_sal (gdbarch,
4810 sr_sal, null_frame_id);
4811 }
4812 }
4813 else
4814 insert_step_resume_breakpoint_at_caller (frame);
4815
4816 keep_going (ecs);
4817 return;
4818 }
4819
4820 /* If we are in a function call trampoline (a stub between the
4821 calling routine and the real function), locate the real
4822 function. That's what tells us (a) whether we want to step
4823 into it at all, and (b) what prologue we want to run to the
4824 end of, if we do step into it. */
4825 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4826 if (real_stop_pc == 0)
4827 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4828 if (real_stop_pc != 0)
4829 ecs->stop_func_start = real_stop_pc;
4830
4831 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4832 {
4833 struct symtab_and_line sr_sal;
4834
4835 init_sal (&sr_sal);
4836 sr_sal.pc = ecs->stop_func_start;
4837 sr_sal.pspace = get_frame_program_space (frame);
4838
4839 insert_step_resume_breakpoint_at_sal (gdbarch,
4840 sr_sal, null_frame_id);
4841 keep_going (ecs);
4842 return;
4843 }
4844
4845 /* If we have line number information for the function we are
4846 thinking of stepping into and the function isn't on the skip
4847 list, step into it.
4848
4849 If there are several symtabs at that PC (e.g. with include
4850 files), just want to know whether *any* of them have line
4851 numbers. find_pc_line handles this. */
4852 {
4853 struct symtab_and_line tmp_sal;
4854
4855 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4856 if (tmp_sal.line != 0
4857 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4858 &tmp_sal))
4859 {
4860 if (execution_direction == EXEC_REVERSE)
4861 handle_step_into_function_backward (gdbarch, ecs);
4862 else
4863 handle_step_into_function (gdbarch, ecs);
4864 return;
4865 }
4866 }
4867
4868 /* If we have no line number and the step-stop-if-no-debug is
4869 set, we stop the step so that the user has a chance to switch
4870 in assembly mode. */
4871 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4872 && step_stop_if_no_debug)
4873 {
4874 ecs->event_thread->control.stop_step = 1;
4875 print_end_stepping_range_reason ();
4876 stop_stepping (ecs);
4877 return;
4878 }
4879
4880 if (execution_direction == EXEC_REVERSE)
4881 {
4882 /* If we're already at the start of the function, we've either just
4883 stepped backward into a single instruction function without line
4884 number info, or stepped back out of a signal handler to the first
4885 instruction of the function without line number info. Just keep
4886 going, which will single-step back to the caller. */
4887 if (ecs->stop_func_start != stop_pc)
4888 {
4889 /* Set a breakpoint at callee's start address.
4890 From there we can step once and be back in the caller. */
4891 struct symtab_and_line sr_sal;
4892
4893 init_sal (&sr_sal);
4894 sr_sal.pc = ecs->stop_func_start;
4895 sr_sal.pspace = get_frame_program_space (frame);
4896 insert_step_resume_breakpoint_at_sal (gdbarch,
4897 sr_sal, null_frame_id);
4898 }
4899 }
4900 else
4901 /* Set a breakpoint at callee's return address (the address
4902 at which the caller will resume). */
4903 insert_step_resume_breakpoint_at_caller (frame);
4904
4905 keep_going (ecs);
4906 return;
4907 }
4908
4909 /* Reverse stepping through solib trampolines. */
4910
4911 if (execution_direction == EXEC_REVERSE
4912 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4913 {
4914 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4915 || (ecs->stop_func_start == 0
4916 && in_solib_dynsym_resolve_code (stop_pc)))
4917 {
4918 /* Any solib trampoline code can be handled in reverse
4919 by simply continuing to single-step. We have already
4920 executed the solib function (backwards), and a few
4921 steps will take us back through the trampoline to the
4922 caller. */
4923 keep_going (ecs);
4924 return;
4925 }
4926 else if (in_solib_dynsym_resolve_code (stop_pc))
4927 {
4928 /* Stepped backward into the solib dynsym resolver.
4929 Set a breakpoint at its start and continue, then
4930 one more step will take us out. */
4931 struct symtab_and_line sr_sal;
4932
4933 init_sal (&sr_sal);
4934 sr_sal.pc = ecs->stop_func_start;
4935 sr_sal.pspace = get_frame_program_space (frame);
4936 insert_step_resume_breakpoint_at_sal (gdbarch,
4937 sr_sal, null_frame_id);
4938 keep_going (ecs);
4939 return;
4940 }
4941 }
4942
4943 stop_pc_sal = find_pc_line (stop_pc, 0);
4944
4945 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4946 the trampoline processing logic, however, there are some trampolines
4947 that have no names, so we should do trampoline handling first. */
4948 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4949 && ecs->stop_func_name == NULL
4950 && stop_pc_sal.line == 0)
4951 {
4952 if (debug_infrun)
4953 fprintf_unfiltered (gdb_stdlog,
4954 "infrun: stepped into undebuggable function\n");
4955
4956 /* The inferior just stepped into, or returned to, an
4957 undebuggable function (where there is no debugging information
4958 and no line number corresponding to the address where the
4959 inferior stopped). Since we want to skip this kind of code,
4960 we keep going until the inferior returns from this
4961 function - unless the user has asked us not to (via
4962 set step-mode) or we no longer know how to get back
4963 to the call site. */
4964 if (step_stop_if_no_debug
4965 || !frame_id_p (frame_unwind_caller_id (frame)))
4966 {
4967 /* If we have no line number and the step-stop-if-no-debug
4968 is set, we stop the step so that the user has a chance to
4969 switch in assembly mode. */
4970 ecs->event_thread->control.stop_step = 1;
4971 print_end_stepping_range_reason ();
4972 stop_stepping (ecs);
4973 return;
4974 }
4975 else
4976 {
4977 /* Set a breakpoint at callee's return address (the address
4978 at which the caller will resume). */
4979 insert_step_resume_breakpoint_at_caller (frame);
4980 keep_going (ecs);
4981 return;
4982 }
4983 }
4984
4985 if (ecs->event_thread->control.step_range_end == 1)
4986 {
4987 /* It is stepi or nexti. We always want to stop stepping after
4988 one instruction. */
4989 if (debug_infrun)
4990 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4991 ecs->event_thread->control.stop_step = 1;
4992 print_end_stepping_range_reason ();
4993 stop_stepping (ecs);
4994 return;
4995 }
4996
4997 if (stop_pc_sal.line == 0)
4998 {
4999 /* We have no line number information. That means to stop
5000 stepping (does this always happen right after one instruction,
5001 when we do "s" in a function with no line numbers,
5002 or can this happen as a result of a return or longjmp?). */
5003 if (debug_infrun)
5004 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5005 ecs->event_thread->control.stop_step = 1;
5006 print_end_stepping_range_reason ();
5007 stop_stepping (ecs);
5008 return;
5009 }
5010
5011 /* Look for "calls" to inlined functions, part one. If the inline
5012 frame machinery detected some skipped call sites, we have entered
5013 a new inline function. */
5014
5015 if (frame_id_eq (get_frame_id (get_current_frame ()),
5016 ecs->event_thread->control.step_frame_id)
5017 && inline_skipped_frames (ecs->ptid))
5018 {
5019 struct symtab_and_line call_sal;
5020
5021 if (debug_infrun)
5022 fprintf_unfiltered (gdb_stdlog,
5023 "infrun: stepped into inlined function\n");
5024
5025 find_frame_sal (get_current_frame (), &call_sal);
5026
5027 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5028 {
5029 /* For "step", we're going to stop. But if the call site
5030 for this inlined function is on the same source line as
5031 we were previously stepping, go down into the function
5032 first. Otherwise stop at the call site. */
5033
5034 if (call_sal.line == ecs->event_thread->current_line
5035 && call_sal.symtab == ecs->event_thread->current_symtab)
5036 step_into_inline_frame (ecs->ptid);
5037
5038 ecs->event_thread->control.stop_step = 1;
5039 print_end_stepping_range_reason ();
5040 stop_stepping (ecs);
5041 return;
5042 }
5043 else
5044 {
5045 /* For "next", we should stop at the call site if it is on a
5046 different source line. Otherwise continue through the
5047 inlined function. */
5048 if (call_sal.line == ecs->event_thread->current_line
5049 && call_sal.symtab == ecs->event_thread->current_symtab)
5050 keep_going (ecs);
5051 else
5052 {
5053 ecs->event_thread->control.stop_step = 1;
5054 print_end_stepping_range_reason ();
5055 stop_stepping (ecs);
5056 }
5057 return;
5058 }
5059 }
5060
5061 /* Look for "calls" to inlined functions, part two. If we are still
5062 in the same real function we were stepping through, but we have
5063 to go further up to find the exact frame ID, we are stepping
5064 through a more inlined call beyond its call site. */
5065
5066 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5067 && !frame_id_eq (get_frame_id (get_current_frame ()),
5068 ecs->event_thread->control.step_frame_id)
5069 && stepped_in_from (get_current_frame (),
5070 ecs->event_thread->control.step_frame_id))
5071 {
5072 if (debug_infrun)
5073 fprintf_unfiltered (gdb_stdlog,
5074 "infrun: stepping through inlined function\n");
5075
5076 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5077 keep_going (ecs);
5078 else
5079 {
5080 ecs->event_thread->control.stop_step = 1;
5081 print_end_stepping_range_reason ();
5082 stop_stepping (ecs);
5083 }
5084 return;
5085 }
5086
5087 if ((stop_pc == stop_pc_sal.pc)
5088 && (ecs->event_thread->current_line != stop_pc_sal.line
5089 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5090 {
5091 /* We are at the start of a different line. So stop. Note that
5092 we don't stop if we step into the middle of a different line.
5093 That is said to make things like for (;;) statements work
5094 better. */
5095 if (debug_infrun)
5096 fprintf_unfiltered (gdb_stdlog,
5097 "infrun: stepped to a different line\n");
5098 ecs->event_thread->control.stop_step = 1;
5099 print_end_stepping_range_reason ();
5100 stop_stepping (ecs);
5101 return;
5102 }
5103
5104 /* We aren't done stepping.
5105
5106 Optimize by setting the stepping range to the line.
5107 (We might not be in the original line, but if we entered a
5108 new line in mid-statement, we continue stepping. This makes
5109 things like for(;;) statements work better.) */
5110
5111 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5112 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5113 ecs->event_thread->control.may_range_step = 1;
5114 set_step_info (frame, stop_pc_sal);
5115
5116 if (debug_infrun)
5117 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5118 keep_going (ecs);
5119 }
5120
5121 /* In all-stop mode, if we're currently stepping but have stopped in
5122 some other thread, we may need to switch back to the stepped
5123 thread. Returns true we set the inferior running, false if we left
5124 it stopped (and the event needs further processing). */
5125
5126 static int
5127 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5128 {
5129 if (!non_stop)
5130 {
5131 struct thread_info *tp;
5132
5133 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
5134 ecs->event_thread);
5135 if (tp)
5136 {
5137 struct frame_info *frame;
5138 struct gdbarch *gdbarch;
5139
5140 /* However, if the current thread is blocked on some internal
5141 breakpoint, and we simply need to step over that breakpoint
5142 to get it going again, do that first. */
5143 if ((ecs->event_thread->control.trap_expected
5144 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5145 || ecs->event_thread->stepping_over_breakpoint
5146 || ecs->hit_singlestep_breakpoint)
5147 {
5148 keep_going (ecs);
5149 return 1;
5150 }
5151
5152 /* If the stepping thread exited, then don't try to switch
5153 back and resume it, which could fail in several different
5154 ways depending on the target. Instead, just keep going.
5155
5156 We can find a stepping dead thread in the thread list in
5157 two cases:
5158
5159 - The target supports thread exit events, and when the
5160 target tries to delete the thread from the thread list,
5161 inferior_ptid pointed at the exiting thread. In such
5162 case, calling delete_thread does not really remove the
5163 thread from the list; instead, the thread is left listed,
5164 with 'exited' state.
5165
5166 - The target's debug interface does not support thread
5167 exit events, and so we have no idea whatsoever if the
5168 previously stepping thread is still alive. For that
5169 reason, we need to synchronously query the target
5170 now. */
5171 if (is_exited (tp->ptid)
5172 || !target_thread_alive (tp->ptid))
5173 {
5174 if (debug_infrun)
5175 fprintf_unfiltered (gdb_stdlog,
5176 "infrun: not switching back to "
5177 "stepped thread, it has vanished\n");
5178
5179 delete_thread (tp->ptid);
5180 keep_going (ecs);
5181 return 1;
5182 }
5183
5184 /* Otherwise, we no longer expect a trap in the current thread.
5185 Clear the trap_expected flag before switching back -- this is
5186 what keep_going would do as well, if we called it. */
5187 ecs->event_thread->control.trap_expected = 0;
5188
5189 if (debug_infrun)
5190 fprintf_unfiltered (gdb_stdlog,
5191 "infrun: switching back to stepped thread\n");
5192
5193 ecs->event_thread = tp;
5194 ecs->ptid = tp->ptid;
5195 context_switch (ecs->ptid);
5196
5197 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5198 frame = get_current_frame ();
5199 gdbarch = get_frame_arch (frame);
5200
5201 /* If the PC of the thread we were trying to single-step has
5202 changed, then the thread we were trying to single-step
5203 has trapped or been signalled, but the event has not been
5204 reported to GDB yet. Re-poll the remote looking for this
5205 particular thread (i.e. temporarily enable schedlock) by:
5206
5207 - setting a break at the current PC
5208 - resuming that particular thread, only (by setting
5209 trap expected)
5210
5211 This prevents us continuously moving the single-step
5212 breakpoint forward, one instruction at a time,
5213 overstepping. */
5214
5215 if (gdbarch_software_single_step_p (gdbarch)
5216 && stop_pc != tp->prev_pc)
5217 {
5218 if (debug_infrun)
5219 fprintf_unfiltered (gdb_stdlog,
5220 "infrun: expected thread advanced also\n");
5221
5222 insert_single_step_breakpoint (get_frame_arch (frame),
5223 get_frame_address_space (frame),
5224 stop_pc);
5225 singlestep_breakpoints_inserted_p = 1;
5226 ecs->event_thread->control.trap_expected = 1;
5227 singlestep_ptid = inferior_ptid;
5228 singlestep_pc = stop_pc;
5229
5230 resume (0, GDB_SIGNAL_0);
5231 prepare_to_wait (ecs);
5232 }
5233 else
5234 {
5235 if (debug_infrun)
5236 fprintf_unfiltered (gdb_stdlog,
5237 "infrun: expected thread still "
5238 "hasn't advanced\n");
5239 keep_going (ecs);
5240 }
5241
5242 return 1;
5243 }
5244 }
5245 return 0;
5246 }
5247
5248 /* Is thread TP in the middle of single-stepping? */
5249
5250 static int
5251 currently_stepping (struct thread_info *tp)
5252 {
5253 return ((tp->control.step_range_end
5254 && tp->control.step_resume_breakpoint == NULL)
5255 || tp->control.trap_expected
5256 || bpstat_should_step ());
5257 }
5258
5259 /* Returns true if any thread *but* the one passed in "data" is in the
5260 middle of stepping or of handling a "next". */
5261
5262 static int
5263 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
5264 {
5265 if (tp == data)
5266 return 0;
5267
5268 return (tp->control.step_range_end
5269 || tp->control.trap_expected);
5270 }
5271
5272 /* Inferior has stepped into a subroutine call with source code that
5273 we should not step over. Do step to the first line of code in
5274 it. */
5275
5276 static void
5277 handle_step_into_function (struct gdbarch *gdbarch,
5278 struct execution_control_state *ecs)
5279 {
5280 struct symtab *s;
5281 struct symtab_and_line stop_func_sal, sr_sal;
5282
5283 fill_in_stop_func (gdbarch, ecs);
5284
5285 s = find_pc_symtab (stop_pc);
5286 if (s && s->language != language_asm)
5287 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5288 ecs->stop_func_start);
5289
5290 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5291 /* Use the step_resume_break to step until the end of the prologue,
5292 even if that involves jumps (as it seems to on the vax under
5293 4.2). */
5294 /* If the prologue ends in the middle of a source line, continue to
5295 the end of that source line (if it is still within the function).
5296 Otherwise, just go to end of prologue. */
5297 if (stop_func_sal.end
5298 && stop_func_sal.pc != ecs->stop_func_start
5299 && stop_func_sal.end < ecs->stop_func_end)
5300 ecs->stop_func_start = stop_func_sal.end;
5301
5302 /* Architectures which require breakpoint adjustment might not be able
5303 to place a breakpoint at the computed address. If so, the test
5304 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5305 ecs->stop_func_start to an address at which a breakpoint may be
5306 legitimately placed.
5307
5308 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5309 made, GDB will enter an infinite loop when stepping through
5310 optimized code consisting of VLIW instructions which contain
5311 subinstructions corresponding to different source lines. On
5312 FR-V, it's not permitted to place a breakpoint on any but the
5313 first subinstruction of a VLIW instruction. When a breakpoint is
5314 set, GDB will adjust the breakpoint address to the beginning of
5315 the VLIW instruction. Thus, we need to make the corresponding
5316 adjustment here when computing the stop address. */
5317
5318 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5319 {
5320 ecs->stop_func_start
5321 = gdbarch_adjust_breakpoint_address (gdbarch,
5322 ecs->stop_func_start);
5323 }
5324
5325 if (ecs->stop_func_start == stop_pc)
5326 {
5327 /* We are already there: stop now. */
5328 ecs->event_thread->control.stop_step = 1;
5329 print_end_stepping_range_reason ();
5330 stop_stepping (ecs);
5331 return;
5332 }
5333 else
5334 {
5335 /* Put the step-breakpoint there and go until there. */
5336 init_sal (&sr_sal); /* initialize to zeroes */
5337 sr_sal.pc = ecs->stop_func_start;
5338 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5339 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5340
5341 /* Do not specify what the fp should be when we stop since on
5342 some machines the prologue is where the new fp value is
5343 established. */
5344 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5345
5346 /* And make sure stepping stops right away then. */
5347 ecs->event_thread->control.step_range_end
5348 = ecs->event_thread->control.step_range_start;
5349 }
5350 keep_going (ecs);
5351 }
5352
5353 /* Inferior has stepped backward into a subroutine call with source
5354 code that we should not step over. Do step to the beginning of the
5355 last line of code in it. */
5356
5357 static void
5358 handle_step_into_function_backward (struct gdbarch *gdbarch,
5359 struct execution_control_state *ecs)
5360 {
5361 struct symtab *s;
5362 struct symtab_and_line stop_func_sal;
5363
5364 fill_in_stop_func (gdbarch, ecs);
5365
5366 s = find_pc_symtab (stop_pc);
5367 if (s && s->language != language_asm)
5368 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5369 ecs->stop_func_start);
5370
5371 stop_func_sal = find_pc_line (stop_pc, 0);
5372
5373 /* OK, we're just going to keep stepping here. */
5374 if (stop_func_sal.pc == stop_pc)
5375 {
5376 /* We're there already. Just stop stepping now. */
5377 ecs->event_thread->control.stop_step = 1;
5378 print_end_stepping_range_reason ();
5379 stop_stepping (ecs);
5380 }
5381 else
5382 {
5383 /* Else just reset the step range and keep going.
5384 No step-resume breakpoint, they don't work for
5385 epilogues, which can have multiple entry paths. */
5386 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5387 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5388 keep_going (ecs);
5389 }
5390 return;
5391 }
5392
5393 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5394 This is used to both functions and to skip over code. */
5395
5396 static void
5397 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5398 struct symtab_and_line sr_sal,
5399 struct frame_id sr_id,
5400 enum bptype sr_type)
5401 {
5402 /* There should never be more than one step-resume or longjmp-resume
5403 breakpoint per thread, so we should never be setting a new
5404 step_resume_breakpoint when one is already active. */
5405 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5406 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5407
5408 if (debug_infrun)
5409 fprintf_unfiltered (gdb_stdlog,
5410 "infrun: inserting step-resume breakpoint at %s\n",
5411 paddress (gdbarch, sr_sal.pc));
5412
5413 inferior_thread ()->control.step_resume_breakpoint
5414 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5415 }
5416
5417 void
5418 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5419 struct symtab_and_line sr_sal,
5420 struct frame_id sr_id)
5421 {
5422 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5423 sr_sal, sr_id,
5424 bp_step_resume);
5425 }
5426
5427 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5428 This is used to skip a potential signal handler.
5429
5430 This is called with the interrupted function's frame. The signal
5431 handler, when it returns, will resume the interrupted function at
5432 RETURN_FRAME.pc. */
5433
5434 static void
5435 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5436 {
5437 struct symtab_and_line sr_sal;
5438 struct gdbarch *gdbarch;
5439
5440 gdb_assert (return_frame != NULL);
5441 init_sal (&sr_sal); /* initialize to zeros */
5442
5443 gdbarch = get_frame_arch (return_frame);
5444 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5445 sr_sal.section = find_pc_overlay (sr_sal.pc);
5446 sr_sal.pspace = get_frame_program_space (return_frame);
5447
5448 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5449 get_stack_frame_id (return_frame),
5450 bp_hp_step_resume);
5451 }
5452
5453 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5454 is used to skip a function after stepping into it (for "next" or if
5455 the called function has no debugging information).
5456
5457 The current function has almost always been reached by single
5458 stepping a call or return instruction. NEXT_FRAME belongs to the
5459 current function, and the breakpoint will be set at the caller's
5460 resume address.
5461
5462 This is a separate function rather than reusing
5463 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5464 get_prev_frame, which may stop prematurely (see the implementation
5465 of frame_unwind_caller_id for an example). */
5466
5467 static void
5468 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5469 {
5470 struct symtab_and_line sr_sal;
5471 struct gdbarch *gdbarch;
5472
5473 /* We shouldn't have gotten here if we don't know where the call site
5474 is. */
5475 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5476
5477 init_sal (&sr_sal); /* initialize to zeros */
5478
5479 gdbarch = frame_unwind_caller_arch (next_frame);
5480 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5481 frame_unwind_caller_pc (next_frame));
5482 sr_sal.section = find_pc_overlay (sr_sal.pc);
5483 sr_sal.pspace = frame_unwind_program_space (next_frame);
5484
5485 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5486 frame_unwind_caller_id (next_frame));
5487 }
5488
5489 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5490 new breakpoint at the target of a jmp_buf. The handling of
5491 longjmp-resume uses the same mechanisms used for handling
5492 "step-resume" breakpoints. */
5493
5494 static void
5495 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5496 {
5497 /* There should never be more than one longjmp-resume breakpoint per
5498 thread, so we should never be setting a new
5499 longjmp_resume_breakpoint when one is already active. */
5500 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
5501
5502 if (debug_infrun)
5503 fprintf_unfiltered (gdb_stdlog,
5504 "infrun: inserting longjmp-resume breakpoint at %s\n",
5505 paddress (gdbarch, pc));
5506
5507 inferior_thread ()->control.exception_resume_breakpoint =
5508 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5509 }
5510
5511 /* Insert an exception resume breakpoint. TP is the thread throwing
5512 the exception. The block B is the block of the unwinder debug hook
5513 function. FRAME is the frame corresponding to the call to this
5514 function. SYM is the symbol of the function argument holding the
5515 target PC of the exception. */
5516
5517 static void
5518 insert_exception_resume_breakpoint (struct thread_info *tp,
5519 struct block *b,
5520 struct frame_info *frame,
5521 struct symbol *sym)
5522 {
5523 volatile struct gdb_exception e;
5524
5525 /* We want to ignore errors here. */
5526 TRY_CATCH (e, RETURN_MASK_ERROR)
5527 {
5528 struct symbol *vsym;
5529 struct value *value;
5530 CORE_ADDR handler;
5531 struct breakpoint *bp;
5532
5533 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5534 value = read_var_value (vsym, frame);
5535 /* If the value was optimized out, revert to the old behavior. */
5536 if (! value_optimized_out (value))
5537 {
5538 handler = value_as_address (value);
5539
5540 if (debug_infrun)
5541 fprintf_unfiltered (gdb_stdlog,
5542 "infrun: exception resume at %lx\n",
5543 (unsigned long) handler);
5544
5545 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5546 handler, bp_exception_resume);
5547
5548 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5549 frame = NULL;
5550
5551 bp->thread = tp->num;
5552 inferior_thread ()->control.exception_resume_breakpoint = bp;
5553 }
5554 }
5555 }
5556
5557 /* A helper for check_exception_resume that sets an
5558 exception-breakpoint based on a SystemTap probe. */
5559
5560 static void
5561 insert_exception_resume_from_probe (struct thread_info *tp,
5562 const struct bound_probe *probe,
5563 struct frame_info *frame)
5564 {
5565 struct value *arg_value;
5566 CORE_ADDR handler;
5567 struct breakpoint *bp;
5568
5569 arg_value = probe_safe_evaluate_at_pc (frame, 1);
5570 if (!arg_value)
5571 return;
5572
5573 handler = value_as_address (arg_value);
5574
5575 if (debug_infrun)
5576 fprintf_unfiltered (gdb_stdlog,
5577 "infrun: exception resume at %s\n",
5578 paddress (get_objfile_arch (probe->objfile),
5579 handler));
5580
5581 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5582 handler, bp_exception_resume);
5583 bp->thread = tp->num;
5584 inferior_thread ()->control.exception_resume_breakpoint = bp;
5585 }
5586
5587 /* This is called when an exception has been intercepted. Check to
5588 see whether the exception's destination is of interest, and if so,
5589 set an exception resume breakpoint there. */
5590
5591 static void
5592 check_exception_resume (struct execution_control_state *ecs,
5593 struct frame_info *frame)
5594 {
5595 volatile struct gdb_exception e;
5596 struct bound_probe probe;
5597 struct symbol *func;
5598
5599 /* First see if this exception unwinding breakpoint was set via a
5600 SystemTap probe point. If so, the probe has two arguments: the
5601 CFA and the HANDLER. We ignore the CFA, extract the handler, and
5602 set a breakpoint there. */
5603 probe = find_probe_by_pc (get_frame_pc (frame));
5604 if (probe.probe)
5605 {
5606 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
5607 return;
5608 }
5609
5610 func = get_frame_function (frame);
5611 if (!func)
5612 return;
5613
5614 TRY_CATCH (e, RETURN_MASK_ERROR)
5615 {
5616 struct block *b;
5617 struct block_iterator iter;
5618 struct symbol *sym;
5619 int argno = 0;
5620
5621 /* The exception breakpoint is a thread-specific breakpoint on
5622 the unwinder's debug hook, declared as:
5623
5624 void _Unwind_DebugHook (void *cfa, void *handler);
5625
5626 The CFA argument indicates the frame to which control is
5627 about to be transferred. HANDLER is the destination PC.
5628
5629 We ignore the CFA and set a temporary breakpoint at HANDLER.
5630 This is not extremely efficient but it avoids issues in gdb
5631 with computing the DWARF CFA, and it also works even in weird
5632 cases such as throwing an exception from inside a signal
5633 handler. */
5634
5635 b = SYMBOL_BLOCK_VALUE (func);
5636 ALL_BLOCK_SYMBOLS (b, iter, sym)
5637 {
5638 if (!SYMBOL_IS_ARGUMENT (sym))
5639 continue;
5640
5641 if (argno == 0)
5642 ++argno;
5643 else
5644 {
5645 insert_exception_resume_breakpoint (ecs->event_thread,
5646 b, frame, sym);
5647 break;
5648 }
5649 }
5650 }
5651 }
5652
5653 static void
5654 stop_stepping (struct execution_control_state *ecs)
5655 {
5656 if (debug_infrun)
5657 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5658
5659 clear_step_over_info ();
5660
5661 /* Let callers know we don't want to wait for the inferior anymore. */
5662 ecs->wait_some_more = 0;
5663 }
5664
5665 /* Called when we should continue running the inferior, because the
5666 current event doesn't cause a user visible stop. This does the
5667 resuming part; waiting for the next event is done elsewhere. */
5668
5669 static void
5670 keep_going (struct execution_control_state *ecs)
5671 {
5672 /* Make sure normal_stop is called if we get a QUIT handled before
5673 reaching resume. */
5674 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5675
5676 /* Save the pc before execution, to compare with pc after stop. */
5677 ecs->event_thread->prev_pc
5678 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5679
5680 if (ecs->event_thread->control.trap_expected
5681 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5682 {
5683 /* We haven't yet gotten our trap, and either: intercepted a
5684 non-signal event (e.g., a fork); or took a signal which we
5685 are supposed to pass through to the inferior. Simply
5686 continue. */
5687 discard_cleanups (old_cleanups);
5688 resume (currently_stepping (ecs->event_thread),
5689 ecs->event_thread->suspend.stop_signal);
5690 }
5691 else
5692 {
5693 volatile struct gdb_exception e;
5694 struct regcache *regcache = get_current_regcache ();
5695
5696 /* Either the trap was not expected, but we are continuing
5697 anyway (if we got a signal, the user asked it be passed to
5698 the child)
5699 -- or --
5700 We got our expected trap, but decided we should resume from
5701 it.
5702
5703 We're going to run this baby now!
5704
5705 Note that insert_breakpoints won't try to re-insert
5706 already inserted breakpoints. Therefore, we don't
5707 care if breakpoints were already inserted, or not. */
5708
5709 /* If we need to step over a breakpoint, and we're not using
5710 displaced stepping to do so, insert all breakpoints
5711 (watchpoints, etc.) but the one we're stepping over, step one
5712 instruction, and then re-insert the breakpoint when that step
5713 is finished. */
5714 if ((ecs->hit_singlestep_breakpoint
5715 || ecs->event_thread->stepping_over_breakpoint)
5716 && !use_displaced_stepping (get_regcache_arch (regcache)))
5717 {
5718 set_step_over_info (get_regcache_aspace (regcache),
5719 regcache_read_pc (regcache));
5720 }
5721 else
5722 clear_step_over_info ();
5723
5724 /* Stop stepping if inserting breakpoints fails. */
5725 TRY_CATCH (e, RETURN_MASK_ERROR)
5726 {
5727 insert_breakpoints ();
5728 }
5729 if (e.reason < 0)
5730 {
5731 exception_print (gdb_stderr, e);
5732 stop_stepping (ecs);
5733 return;
5734 }
5735
5736 ecs->event_thread->control.trap_expected
5737 = (ecs->event_thread->stepping_over_breakpoint
5738 || ecs->hit_singlestep_breakpoint);
5739
5740 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
5741 explicitly specifies that such a signal should be delivered
5742 to the target program). Typically, that would occur when a
5743 user is debugging a target monitor on a simulator: the target
5744 monitor sets a breakpoint; the simulator encounters this
5745 breakpoint and halts the simulation handing control to GDB;
5746 GDB, noting that the stop address doesn't map to any known
5747 breakpoint, returns control back to the simulator; the
5748 simulator then delivers the hardware equivalent of a
5749 GDB_SIGNAL_TRAP to the program being debugged. */
5750 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5751 && !signal_program[ecs->event_thread->suspend.stop_signal])
5752 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5753
5754 discard_cleanups (old_cleanups);
5755 resume (currently_stepping (ecs->event_thread),
5756 ecs->event_thread->suspend.stop_signal);
5757 }
5758
5759 prepare_to_wait (ecs);
5760 }
5761
5762 /* This function normally comes after a resume, before
5763 handle_inferior_event exits. It takes care of any last bits of
5764 housekeeping, and sets the all-important wait_some_more flag. */
5765
5766 static void
5767 prepare_to_wait (struct execution_control_state *ecs)
5768 {
5769 if (debug_infrun)
5770 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5771
5772 /* This is the old end of the while loop. Let everybody know we
5773 want to wait for the inferior some more and get called again
5774 soon. */
5775 ecs->wait_some_more = 1;
5776 }
5777
5778 /* Several print_*_reason functions to print why the inferior has stopped.
5779 We always print something when the inferior exits, or receives a signal.
5780 The rest of the cases are dealt with later on in normal_stop and
5781 print_it_typical. Ideally there should be a call to one of these
5782 print_*_reason functions functions from handle_inferior_event each time
5783 stop_stepping is called. */
5784
5785 /* Print why the inferior has stopped.
5786 We are done with a step/next/si/ni command, print why the inferior has
5787 stopped. For now print nothing. Print a message only if not in the middle
5788 of doing a "step n" operation for n > 1. */
5789
5790 static void
5791 print_end_stepping_range_reason (void)
5792 {
5793 if ((!inferior_thread ()->step_multi
5794 || !inferior_thread ()->control.stop_step)
5795 && ui_out_is_mi_like_p (current_uiout))
5796 ui_out_field_string (current_uiout, "reason",
5797 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5798 }
5799
5800 /* The inferior was terminated by a signal, print why it stopped. */
5801
5802 static void
5803 print_signal_exited_reason (enum gdb_signal siggnal)
5804 {
5805 struct ui_out *uiout = current_uiout;
5806
5807 annotate_signalled ();
5808 if (ui_out_is_mi_like_p (uiout))
5809 ui_out_field_string
5810 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5811 ui_out_text (uiout, "\nProgram terminated with signal ");
5812 annotate_signal_name ();
5813 ui_out_field_string (uiout, "signal-name",
5814 gdb_signal_to_name (siggnal));
5815 annotate_signal_name_end ();
5816 ui_out_text (uiout, ", ");
5817 annotate_signal_string ();
5818 ui_out_field_string (uiout, "signal-meaning",
5819 gdb_signal_to_string (siggnal));
5820 annotate_signal_string_end ();
5821 ui_out_text (uiout, ".\n");
5822 ui_out_text (uiout, "The program no longer exists.\n");
5823 }
5824
5825 /* The inferior program is finished, print why it stopped. */
5826
5827 static void
5828 print_exited_reason (int exitstatus)
5829 {
5830 struct inferior *inf = current_inferior ();
5831 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5832 struct ui_out *uiout = current_uiout;
5833
5834 annotate_exited (exitstatus);
5835 if (exitstatus)
5836 {
5837 if (ui_out_is_mi_like_p (uiout))
5838 ui_out_field_string (uiout, "reason",
5839 async_reason_lookup (EXEC_ASYNC_EXITED));
5840 ui_out_text (uiout, "[Inferior ");
5841 ui_out_text (uiout, plongest (inf->num));
5842 ui_out_text (uiout, " (");
5843 ui_out_text (uiout, pidstr);
5844 ui_out_text (uiout, ") exited with code ");
5845 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5846 ui_out_text (uiout, "]\n");
5847 }
5848 else
5849 {
5850 if (ui_out_is_mi_like_p (uiout))
5851 ui_out_field_string
5852 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5853 ui_out_text (uiout, "[Inferior ");
5854 ui_out_text (uiout, plongest (inf->num));
5855 ui_out_text (uiout, " (");
5856 ui_out_text (uiout, pidstr);
5857 ui_out_text (uiout, ") exited normally]\n");
5858 }
5859 /* Support the --return-child-result option. */
5860 return_child_result_value = exitstatus;
5861 }
5862
5863 /* Signal received, print why the inferior has stopped. The signal table
5864 tells us to print about it. */
5865
5866 static void
5867 print_signal_received_reason (enum gdb_signal siggnal)
5868 {
5869 struct ui_out *uiout = current_uiout;
5870
5871 annotate_signal ();
5872
5873 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5874 {
5875 struct thread_info *t = inferior_thread ();
5876
5877 ui_out_text (uiout, "\n[");
5878 ui_out_field_string (uiout, "thread-name",
5879 target_pid_to_str (t->ptid));
5880 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5881 ui_out_text (uiout, " stopped");
5882 }
5883 else
5884 {
5885 ui_out_text (uiout, "\nProgram received signal ");
5886 annotate_signal_name ();
5887 if (ui_out_is_mi_like_p (uiout))
5888 ui_out_field_string
5889 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5890 ui_out_field_string (uiout, "signal-name",
5891 gdb_signal_to_name (siggnal));
5892 annotate_signal_name_end ();
5893 ui_out_text (uiout, ", ");
5894 annotate_signal_string ();
5895 ui_out_field_string (uiout, "signal-meaning",
5896 gdb_signal_to_string (siggnal));
5897 annotate_signal_string_end ();
5898 }
5899 ui_out_text (uiout, ".\n");
5900 }
5901
5902 /* Reverse execution: target ran out of history info, print why the inferior
5903 has stopped. */
5904
5905 static void
5906 print_no_history_reason (void)
5907 {
5908 ui_out_text (current_uiout, "\nNo more reverse-execution history.\n");
5909 }
5910
5911 /* Print current location without a level number, if we have changed
5912 functions or hit a breakpoint. Print source line if we have one.
5913 bpstat_print contains the logic deciding in detail what to print,
5914 based on the event(s) that just occurred. */
5915
5916 void
5917 print_stop_event (struct target_waitstatus *ws)
5918 {
5919 int bpstat_ret;
5920 int source_flag;
5921 int do_frame_printing = 1;
5922 struct thread_info *tp = inferior_thread ();
5923
5924 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
5925 switch (bpstat_ret)
5926 {
5927 case PRINT_UNKNOWN:
5928 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
5929 should) carry around the function and does (or should) use
5930 that when doing a frame comparison. */
5931 if (tp->control.stop_step
5932 && frame_id_eq (tp->control.step_frame_id,
5933 get_frame_id (get_current_frame ()))
5934 && step_start_function == find_pc_function (stop_pc))
5935 {
5936 /* Finished step, just print source line. */
5937 source_flag = SRC_LINE;
5938 }
5939 else
5940 {
5941 /* Print location and source line. */
5942 source_flag = SRC_AND_LOC;
5943 }
5944 break;
5945 case PRINT_SRC_AND_LOC:
5946 /* Print location and source line. */
5947 source_flag = SRC_AND_LOC;
5948 break;
5949 case PRINT_SRC_ONLY:
5950 source_flag = SRC_LINE;
5951 break;
5952 case PRINT_NOTHING:
5953 /* Something bogus. */
5954 source_flag = SRC_LINE;
5955 do_frame_printing = 0;
5956 break;
5957 default:
5958 internal_error (__FILE__, __LINE__, _("Unknown value."));
5959 }
5960
5961 /* The behavior of this routine with respect to the source
5962 flag is:
5963 SRC_LINE: Print only source line
5964 LOCATION: Print only location
5965 SRC_AND_LOC: Print location and source line. */
5966 if (do_frame_printing)
5967 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
5968
5969 /* Display the auto-display expressions. */
5970 do_displays ();
5971 }
5972
5973 /* Here to return control to GDB when the inferior stops for real.
5974 Print appropriate messages, remove breakpoints, give terminal our modes.
5975
5976 STOP_PRINT_FRAME nonzero means print the executing frame
5977 (pc, function, args, file, line number and line text).
5978 BREAKPOINTS_FAILED nonzero means stop was due to error
5979 attempting to insert breakpoints. */
5980
5981 void
5982 normal_stop (void)
5983 {
5984 struct target_waitstatus last;
5985 ptid_t last_ptid;
5986 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5987
5988 get_last_target_status (&last_ptid, &last);
5989
5990 /* If an exception is thrown from this point on, make sure to
5991 propagate GDB's knowledge of the executing state to the
5992 frontend/user running state. A QUIT is an easy exception to see
5993 here, so do this before any filtered output. */
5994 if (!non_stop)
5995 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5996 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5997 && last.kind != TARGET_WAITKIND_EXITED
5998 && last.kind != TARGET_WAITKIND_NO_RESUMED)
5999 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6000
6001 /* In non-stop mode, we don't want GDB to switch threads behind the
6002 user's back, to avoid races where the user is typing a command to
6003 apply to thread x, but GDB switches to thread y before the user
6004 finishes entering the command. */
6005
6006 /* As with the notification of thread events, we want to delay
6007 notifying the user that we've switched thread context until
6008 the inferior actually stops.
6009
6010 There's no point in saying anything if the inferior has exited.
6011 Note that SIGNALLED here means "exited with a signal", not
6012 "received a signal". */
6013 if (!non_stop
6014 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6015 && target_has_execution
6016 && last.kind != TARGET_WAITKIND_SIGNALLED
6017 && last.kind != TARGET_WAITKIND_EXITED
6018 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6019 {
6020 target_terminal_ours_for_output ();
6021 printf_filtered (_("[Switching to %s]\n"),
6022 target_pid_to_str (inferior_ptid));
6023 annotate_thread_changed ();
6024 previous_inferior_ptid = inferior_ptid;
6025 }
6026
6027 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6028 {
6029 gdb_assert (sync_execution || !target_can_async_p ());
6030
6031 target_terminal_ours_for_output ();
6032 printf_filtered (_("No unwaited-for children left.\n"));
6033 }
6034
6035 if (!breakpoints_always_inserted_mode () && target_has_execution)
6036 {
6037 if (remove_breakpoints ())
6038 {
6039 target_terminal_ours_for_output ();
6040 printf_filtered (_("Cannot remove breakpoints because "
6041 "program is no longer writable.\nFurther "
6042 "execution is probably impossible.\n"));
6043 }
6044 }
6045
6046 /* If an auto-display called a function and that got a signal,
6047 delete that auto-display to avoid an infinite recursion. */
6048
6049 if (stopped_by_random_signal)
6050 disable_current_display ();
6051
6052 /* Don't print a message if in the middle of doing a "step n"
6053 operation for n > 1 */
6054 if (target_has_execution
6055 && last.kind != TARGET_WAITKIND_SIGNALLED
6056 && last.kind != TARGET_WAITKIND_EXITED
6057 && inferior_thread ()->step_multi
6058 && inferior_thread ()->control.stop_step)
6059 goto done;
6060
6061 target_terminal_ours ();
6062 async_enable_stdin ();
6063
6064 /* Set the current source location. This will also happen if we
6065 display the frame below, but the current SAL will be incorrect
6066 during a user hook-stop function. */
6067 if (has_stack_frames () && !stop_stack_dummy)
6068 set_current_sal_from_frame (get_current_frame (), 1);
6069
6070 /* Let the user/frontend see the threads as stopped. */
6071 do_cleanups (old_chain);
6072
6073 /* Look up the hook_stop and run it (CLI internally handles problem
6074 of stop_command's pre-hook not existing). */
6075 if (stop_command)
6076 catch_errors (hook_stop_stub, stop_command,
6077 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6078
6079 if (!has_stack_frames ())
6080 goto done;
6081
6082 if (last.kind == TARGET_WAITKIND_SIGNALLED
6083 || last.kind == TARGET_WAITKIND_EXITED)
6084 goto done;
6085
6086 /* Select innermost stack frame - i.e., current frame is frame 0,
6087 and current location is based on that.
6088 Don't do this on return from a stack dummy routine,
6089 or if the program has exited. */
6090
6091 if (!stop_stack_dummy)
6092 {
6093 select_frame (get_current_frame ());
6094
6095 /* If --batch-silent is enabled then there's no need to print the current
6096 source location, and to try risks causing an error message about
6097 missing source files. */
6098 if (stop_print_frame && !batch_silent)
6099 print_stop_event (&last);
6100 }
6101
6102 /* Save the function value return registers, if we care.
6103 We might be about to restore their previous contents. */
6104 if (inferior_thread ()->control.proceed_to_finish
6105 && execution_direction != EXEC_REVERSE)
6106 {
6107 /* This should not be necessary. */
6108 if (stop_registers)
6109 regcache_xfree (stop_registers);
6110
6111 /* NB: The copy goes through to the target picking up the value of
6112 all the registers. */
6113 stop_registers = regcache_dup (get_current_regcache ());
6114 }
6115
6116 if (stop_stack_dummy == STOP_STACK_DUMMY)
6117 {
6118 /* Pop the empty frame that contains the stack dummy.
6119 This also restores inferior state prior to the call
6120 (struct infcall_suspend_state). */
6121 struct frame_info *frame = get_current_frame ();
6122
6123 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6124 frame_pop (frame);
6125 /* frame_pop() calls reinit_frame_cache as the last thing it
6126 does which means there's currently no selected frame. We
6127 don't need to re-establish a selected frame if the dummy call
6128 returns normally, that will be done by
6129 restore_infcall_control_state. However, we do have to handle
6130 the case where the dummy call is returning after being
6131 stopped (e.g. the dummy call previously hit a breakpoint).
6132 We can't know which case we have so just always re-establish
6133 a selected frame here. */
6134 select_frame (get_current_frame ());
6135 }
6136
6137 done:
6138 annotate_stopped ();
6139
6140 /* Suppress the stop observer if we're in the middle of:
6141
6142 - a step n (n > 1), as there still more steps to be done.
6143
6144 - a "finish" command, as the observer will be called in
6145 finish_command_continuation, so it can include the inferior
6146 function's return value.
6147
6148 - calling an inferior function, as we pretend we inferior didn't
6149 run at all. The return value of the call is handled by the
6150 expression evaluator, through call_function_by_hand. */
6151
6152 if (!target_has_execution
6153 || last.kind == TARGET_WAITKIND_SIGNALLED
6154 || last.kind == TARGET_WAITKIND_EXITED
6155 || last.kind == TARGET_WAITKIND_NO_RESUMED
6156 || (!(inferior_thread ()->step_multi
6157 && inferior_thread ()->control.stop_step)
6158 && !(inferior_thread ()->control.stop_bpstat
6159 && inferior_thread ()->control.proceed_to_finish)
6160 && !inferior_thread ()->control.in_infcall))
6161 {
6162 if (!ptid_equal (inferior_ptid, null_ptid))
6163 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6164 stop_print_frame);
6165 else
6166 observer_notify_normal_stop (NULL, stop_print_frame);
6167 }
6168
6169 if (target_has_execution)
6170 {
6171 if (last.kind != TARGET_WAITKIND_SIGNALLED
6172 && last.kind != TARGET_WAITKIND_EXITED)
6173 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6174 Delete any breakpoint that is to be deleted at the next stop. */
6175 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6176 }
6177
6178 /* Try to get rid of automatically added inferiors that are no
6179 longer needed. Keeping those around slows down things linearly.
6180 Note that this never removes the current inferior. */
6181 prune_inferiors ();
6182 }
6183
6184 static int
6185 hook_stop_stub (void *cmd)
6186 {
6187 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6188 return (0);
6189 }
6190 \f
6191 int
6192 signal_stop_state (int signo)
6193 {
6194 return signal_stop[signo];
6195 }
6196
6197 int
6198 signal_print_state (int signo)
6199 {
6200 return signal_print[signo];
6201 }
6202
6203 int
6204 signal_pass_state (int signo)
6205 {
6206 return signal_program[signo];
6207 }
6208
6209 static void
6210 signal_cache_update (int signo)
6211 {
6212 if (signo == -1)
6213 {
6214 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6215 signal_cache_update (signo);
6216
6217 return;
6218 }
6219
6220 signal_pass[signo] = (signal_stop[signo] == 0
6221 && signal_print[signo] == 0
6222 && signal_program[signo] == 1
6223 && signal_catch[signo] == 0);
6224 }
6225
6226 int
6227 signal_stop_update (int signo, int state)
6228 {
6229 int ret = signal_stop[signo];
6230
6231 signal_stop[signo] = state;
6232 signal_cache_update (signo);
6233 return ret;
6234 }
6235
6236 int
6237 signal_print_update (int signo, int state)
6238 {
6239 int ret = signal_print[signo];
6240
6241 signal_print[signo] = state;
6242 signal_cache_update (signo);
6243 return ret;
6244 }
6245
6246 int
6247 signal_pass_update (int signo, int state)
6248 {
6249 int ret = signal_program[signo];
6250
6251 signal_program[signo] = state;
6252 signal_cache_update (signo);
6253 return ret;
6254 }
6255
6256 /* Update the global 'signal_catch' from INFO and notify the
6257 target. */
6258
6259 void
6260 signal_catch_update (const unsigned int *info)
6261 {
6262 int i;
6263
6264 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6265 signal_catch[i] = info[i] > 0;
6266 signal_cache_update (-1);
6267 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6268 }
6269
6270 static void
6271 sig_print_header (void)
6272 {
6273 printf_filtered (_("Signal Stop\tPrint\tPass "
6274 "to program\tDescription\n"));
6275 }
6276
6277 static void
6278 sig_print_info (enum gdb_signal oursig)
6279 {
6280 const char *name = gdb_signal_to_name (oursig);
6281 int name_padding = 13 - strlen (name);
6282
6283 if (name_padding <= 0)
6284 name_padding = 0;
6285
6286 printf_filtered ("%s", name);
6287 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6288 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6289 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6290 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6291 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6292 }
6293
6294 /* Specify how various signals in the inferior should be handled. */
6295
6296 static void
6297 handle_command (char *args, int from_tty)
6298 {
6299 char **argv;
6300 int digits, wordlen;
6301 int sigfirst, signum, siglast;
6302 enum gdb_signal oursig;
6303 int allsigs;
6304 int nsigs;
6305 unsigned char *sigs;
6306 struct cleanup *old_chain;
6307
6308 if (args == NULL)
6309 {
6310 error_no_arg (_("signal to handle"));
6311 }
6312
6313 /* Allocate and zero an array of flags for which signals to handle. */
6314
6315 nsigs = (int) GDB_SIGNAL_LAST;
6316 sigs = (unsigned char *) alloca (nsigs);
6317 memset (sigs, 0, nsigs);
6318
6319 /* Break the command line up into args. */
6320
6321 argv = gdb_buildargv (args);
6322 old_chain = make_cleanup_freeargv (argv);
6323
6324 /* Walk through the args, looking for signal oursigs, signal names, and
6325 actions. Signal numbers and signal names may be interspersed with
6326 actions, with the actions being performed for all signals cumulatively
6327 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6328
6329 while (*argv != NULL)
6330 {
6331 wordlen = strlen (*argv);
6332 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6333 {;
6334 }
6335 allsigs = 0;
6336 sigfirst = siglast = -1;
6337
6338 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6339 {
6340 /* Apply action to all signals except those used by the
6341 debugger. Silently skip those. */
6342 allsigs = 1;
6343 sigfirst = 0;
6344 siglast = nsigs - 1;
6345 }
6346 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6347 {
6348 SET_SIGS (nsigs, sigs, signal_stop);
6349 SET_SIGS (nsigs, sigs, signal_print);
6350 }
6351 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6352 {
6353 UNSET_SIGS (nsigs, sigs, signal_program);
6354 }
6355 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6356 {
6357 SET_SIGS (nsigs, sigs, signal_print);
6358 }
6359 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6360 {
6361 SET_SIGS (nsigs, sigs, signal_program);
6362 }
6363 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6364 {
6365 UNSET_SIGS (nsigs, sigs, signal_stop);
6366 }
6367 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6368 {
6369 SET_SIGS (nsigs, sigs, signal_program);
6370 }
6371 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6372 {
6373 UNSET_SIGS (nsigs, sigs, signal_print);
6374 UNSET_SIGS (nsigs, sigs, signal_stop);
6375 }
6376 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6377 {
6378 UNSET_SIGS (nsigs, sigs, signal_program);
6379 }
6380 else if (digits > 0)
6381 {
6382 /* It is numeric. The numeric signal refers to our own
6383 internal signal numbering from target.h, not to host/target
6384 signal number. This is a feature; users really should be
6385 using symbolic names anyway, and the common ones like
6386 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6387
6388 sigfirst = siglast = (int)
6389 gdb_signal_from_command (atoi (*argv));
6390 if ((*argv)[digits] == '-')
6391 {
6392 siglast = (int)
6393 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6394 }
6395 if (sigfirst > siglast)
6396 {
6397 /* Bet he didn't figure we'd think of this case... */
6398 signum = sigfirst;
6399 sigfirst = siglast;
6400 siglast = signum;
6401 }
6402 }
6403 else
6404 {
6405 oursig = gdb_signal_from_name (*argv);
6406 if (oursig != GDB_SIGNAL_UNKNOWN)
6407 {
6408 sigfirst = siglast = (int) oursig;
6409 }
6410 else
6411 {
6412 /* Not a number and not a recognized flag word => complain. */
6413 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6414 }
6415 }
6416
6417 /* If any signal numbers or symbol names were found, set flags for
6418 which signals to apply actions to. */
6419
6420 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6421 {
6422 switch ((enum gdb_signal) signum)
6423 {
6424 case GDB_SIGNAL_TRAP:
6425 case GDB_SIGNAL_INT:
6426 if (!allsigs && !sigs[signum])
6427 {
6428 if (query (_("%s is used by the debugger.\n\
6429 Are you sure you want to change it? "),
6430 gdb_signal_to_name ((enum gdb_signal) signum)))
6431 {
6432 sigs[signum] = 1;
6433 }
6434 else
6435 {
6436 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6437 gdb_flush (gdb_stdout);
6438 }
6439 }
6440 break;
6441 case GDB_SIGNAL_0:
6442 case GDB_SIGNAL_DEFAULT:
6443 case GDB_SIGNAL_UNKNOWN:
6444 /* Make sure that "all" doesn't print these. */
6445 break;
6446 default:
6447 sigs[signum] = 1;
6448 break;
6449 }
6450 }
6451
6452 argv++;
6453 }
6454
6455 for (signum = 0; signum < nsigs; signum++)
6456 if (sigs[signum])
6457 {
6458 signal_cache_update (-1);
6459 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6460 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
6461
6462 if (from_tty)
6463 {
6464 /* Show the results. */
6465 sig_print_header ();
6466 for (; signum < nsigs; signum++)
6467 if (sigs[signum])
6468 sig_print_info (signum);
6469 }
6470
6471 break;
6472 }
6473
6474 do_cleanups (old_chain);
6475 }
6476
6477 /* Complete the "handle" command. */
6478
6479 static VEC (char_ptr) *
6480 handle_completer (struct cmd_list_element *ignore,
6481 const char *text, const char *word)
6482 {
6483 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
6484 static const char * const keywords[] =
6485 {
6486 "all",
6487 "stop",
6488 "ignore",
6489 "print",
6490 "pass",
6491 "nostop",
6492 "noignore",
6493 "noprint",
6494 "nopass",
6495 NULL,
6496 };
6497
6498 vec_signals = signal_completer (ignore, text, word);
6499 vec_keywords = complete_on_enum (keywords, word, word);
6500
6501 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
6502 VEC_free (char_ptr, vec_signals);
6503 VEC_free (char_ptr, vec_keywords);
6504 return return_val;
6505 }
6506
6507 static void
6508 xdb_handle_command (char *args, int from_tty)
6509 {
6510 char **argv;
6511 struct cleanup *old_chain;
6512
6513 if (args == NULL)
6514 error_no_arg (_("xdb command"));
6515
6516 /* Break the command line up into args. */
6517
6518 argv = gdb_buildargv (args);
6519 old_chain = make_cleanup_freeargv (argv);
6520 if (argv[1] != (char *) NULL)
6521 {
6522 char *argBuf;
6523 int bufLen;
6524
6525 bufLen = strlen (argv[0]) + 20;
6526 argBuf = (char *) xmalloc (bufLen);
6527 if (argBuf)
6528 {
6529 int validFlag = 1;
6530 enum gdb_signal oursig;
6531
6532 oursig = gdb_signal_from_name (argv[0]);
6533 memset (argBuf, 0, bufLen);
6534 if (strcmp (argv[1], "Q") == 0)
6535 sprintf (argBuf, "%s %s", argv[0], "noprint");
6536 else
6537 {
6538 if (strcmp (argv[1], "s") == 0)
6539 {
6540 if (!signal_stop[oursig])
6541 sprintf (argBuf, "%s %s", argv[0], "stop");
6542 else
6543 sprintf (argBuf, "%s %s", argv[0], "nostop");
6544 }
6545 else if (strcmp (argv[1], "i") == 0)
6546 {
6547 if (!signal_program[oursig])
6548 sprintf (argBuf, "%s %s", argv[0], "pass");
6549 else
6550 sprintf (argBuf, "%s %s", argv[0], "nopass");
6551 }
6552 else if (strcmp (argv[1], "r") == 0)
6553 {
6554 if (!signal_print[oursig])
6555 sprintf (argBuf, "%s %s", argv[0], "print");
6556 else
6557 sprintf (argBuf, "%s %s", argv[0], "noprint");
6558 }
6559 else
6560 validFlag = 0;
6561 }
6562 if (validFlag)
6563 handle_command (argBuf, from_tty);
6564 else
6565 printf_filtered (_("Invalid signal handling flag.\n"));
6566 if (argBuf)
6567 xfree (argBuf);
6568 }
6569 }
6570 do_cleanups (old_chain);
6571 }
6572
6573 enum gdb_signal
6574 gdb_signal_from_command (int num)
6575 {
6576 if (num >= 1 && num <= 15)
6577 return (enum gdb_signal) num;
6578 error (_("Only signals 1-15 are valid as numeric signals.\n\
6579 Use \"info signals\" for a list of symbolic signals."));
6580 }
6581
6582 /* Print current contents of the tables set by the handle command.
6583 It is possible we should just be printing signals actually used
6584 by the current target (but for things to work right when switching
6585 targets, all signals should be in the signal tables). */
6586
6587 static void
6588 signals_info (char *signum_exp, int from_tty)
6589 {
6590 enum gdb_signal oursig;
6591
6592 sig_print_header ();
6593
6594 if (signum_exp)
6595 {
6596 /* First see if this is a symbol name. */
6597 oursig = gdb_signal_from_name (signum_exp);
6598 if (oursig == GDB_SIGNAL_UNKNOWN)
6599 {
6600 /* No, try numeric. */
6601 oursig =
6602 gdb_signal_from_command (parse_and_eval_long (signum_exp));
6603 }
6604 sig_print_info (oursig);
6605 return;
6606 }
6607
6608 printf_filtered ("\n");
6609 /* These ugly casts brought to you by the native VAX compiler. */
6610 for (oursig = GDB_SIGNAL_FIRST;
6611 (int) oursig < (int) GDB_SIGNAL_LAST;
6612 oursig = (enum gdb_signal) ((int) oursig + 1))
6613 {
6614 QUIT;
6615
6616 if (oursig != GDB_SIGNAL_UNKNOWN
6617 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
6618 sig_print_info (oursig);
6619 }
6620
6621 printf_filtered (_("\nUse the \"handle\" command "
6622 "to change these tables.\n"));
6623 }
6624
6625 /* Check if it makes sense to read $_siginfo from the current thread
6626 at this point. If not, throw an error. */
6627
6628 static void
6629 validate_siginfo_access (void)
6630 {
6631 /* No current inferior, no siginfo. */
6632 if (ptid_equal (inferior_ptid, null_ptid))
6633 error (_("No thread selected."));
6634
6635 /* Don't try to read from a dead thread. */
6636 if (is_exited (inferior_ptid))
6637 error (_("The current thread has terminated"));
6638
6639 /* ... or from a spinning thread. */
6640 if (is_running (inferior_ptid))
6641 error (_("Selected thread is running."));
6642 }
6643
6644 /* The $_siginfo convenience variable is a bit special. We don't know
6645 for sure the type of the value until we actually have a chance to
6646 fetch the data. The type can change depending on gdbarch, so it is
6647 also dependent on which thread you have selected.
6648
6649 1. making $_siginfo be an internalvar that creates a new value on
6650 access.
6651
6652 2. making the value of $_siginfo be an lval_computed value. */
6653
6654 /* This function implements the lval_computed support for reading a
6655 $_siginfo value. */
6656
6657 static void
6658 siginfo_value_read (struct value *v)
6659 {
6660 LONGEST transferred;
6661
6662 validate_siginfo_access ();
6663
6664 transferred =
6665 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6666 NULL,
6667 value_contents_all_raw (v),
6668 value_offset (v),
6669 TYPE_LENGTH (value_type (v)));
6670
6671 if (transferred != TYPE_LENGTH (value_type (v)))
6672 error (_("Unable to read siginfo"));
6673 }
6674
6675 /* This function implements the lval_computed support for writing a
6676 $_siginfo value. */
6677
6678 static void
6679 siginfo_value_write (struct value *v, struct value *fromval)
6680 {
6681 LONGEST transferred;
6682
6683 validate_siginfo_access ();
6684
6685 transferred = target_write (&current_target,
6686 TARGET_OBJECT_SIGNAL_INFO,
6687 NULL,
6688 value_contents_all_raw (fromval),
6689 value_offset (v),
6690 TYPE_LENGTH (value_type (fromval)));
6691
6692 if (transferred != TYPE_LENGTH (value_type (fromval)))
6693 error (_("Unable to write siginfo"));
6694 }
6695
6696 static const struct lval_funcs siginfo_value_funcs =
6697 {
6698 siginfo_value_read,
6699 siginfo_value_write
6700 };
6701
6702 /* Return a new value with the correct type for the siginfo object of
6703 the current thread using architecture GDBARCH. Return a void value
6704 if there's no object available. */
6705
6706 static struct value *
6707 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
6708 void *ignore)
6709 {
6710 if (target_has_stack
6711 && !ptid_equal (inferior_ptid, null_ptid)
6712 && gdbarch_get_siginfo_type_p (gdbarch))
6713 {
6714 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6715
6716 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6717 }
6718
6719 return allocate_value (builtin_type (gdbarch)->builtin_void);
6720 }
6721
6722 \f
6723 /* infcall_suspend_state contains state about the program itself like its
6724 registers and any signal it received when it last stopped.
6725 This state must be restored regardless of how the inferior function call
6726 ends (either successfully, or after it hits a breakpoint or signal)
6727 if the program is to properly continue where it left off. */
6728
6729 struct infcall_suspend_state
6730 {
6731 struct thread_suspend_state thread_suspend;
6732 #if 0 /* Currently unused and empty structures are not valid C. */
6733 struct inferior_suspend_state inferior_suspend;
6734 #endif
6735
6736 /* Other fields: */
6737 CORE_ADDR stop_pc;
6738 struct regcache *registers;
6739
6740 /* Format of SIGINFO_DATA or NULL if it is not present. */
6741 struct gdbarch *siginfo_gdbarch;
6742
6743 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6744 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6745 content would be invalid. */
6746 gdb_byte *siginfo_data;
6747 };
6748
6749 struct infcall_suspend_state *
6750 save_infcall_suspend_state (void)
6751 {
6752 struct infcall_suspend_state *inf_state;
6753 struct thread_info *tp = inferior_thread ();
6754 #if 0
6755 struct inferior *inf = current_inferior ();
6756 #endif
6757 struct regcache *regcache = get_current_regcache ();
6758 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6759 gdb_byte *siginfo_data = NULL;
6760
6761 if (gdbarch_get_siginfo_type_p (gdbarch))
6762 {
6763 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6764 size_t len = TYPE_LENGTH (type);
6765 struct cleanup *back_to;
6766
6767 siginfo_data = xmalloc (len);
6768 back_to = make_cleanup (xfree, siginfo_data);
6769
6770 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6771 siginfo_data, 0, len) == len)
6772 discard_cleanups (back_to);
6773 else
6774 {
6775 /* Errors ignored. */
6776 do_cleanups (back_to);
6777 siginfo_data = NULL;
6778 }
6779 }
6780
6781 inf_state = XCNEW (struct infcall_suspend_state);
6782
6783 if (siginfo_data)
6784 {
6785 inf_state->siginfo_gdbarch = gdbarch;
6786 inf_state->siginfo_data = siginfo_data;
6787 }
6788
6789 inf_state->thread_suspend = tp->suspend;
6790 #if 0 /* Currently unused and empty structures are not valid C. */
6791 inf_state->inferior_suspend = inf->suspend;
6792 #endif
6793
6794 /* run_inferior_call will not use the signal due to its `proceed' call with
6795 GDB_SIGNAL_0 anyway. */
6796 tp->suspend.stop_signal = GDB_SIGNAL_0;
6797
6798 inf_state->stop_pc = stop_pc;
6799
6800 inf_state->registers = regcache_dup (regcache);
6801
6802 return inf_state;
6803 }
6804
6805 /* Restore inferior session state to INF_STATE. */
6806
6807 void
6808 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6809 {
6810 struct thread_info *tp = inferior_thread ();
6811 #if 0
6812 struct inferior *inf = current_inferior ();
6813 #endif
6814 struct regcache *regcache = get_current_regcache ();
6815 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6816
6817 tp->suspend = inf_state->thread_suspend;
6818 #if 0 /* Currently unused and empty structures are not valid C. */
6819 inf->suspend = inf_state->inferior_suspend;
6820 #endif
6821
6822 stop_pc = inf_state->stop_pc;
6823
6824 if (inf_state->siginfo_gdbarch == gdbarch)
6825 {
6826 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6827
6828 /* Errors ignored. */
6829 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6830 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
6831 }
6832
6833 /* The inferior can be gone if the user types "print exit(0)"
6834 (and perhaps other times). */
6835 if (target_has_execution)
6836 /* NB: The register write goes through to the target. */
6837 regcache_cpy (regcache, inf_state->registers);
6838
6839 discard_infcall_suspend_state (inf_state);
6840 }
6841
6842 static void
6843 do_restore_infcall_suspend_state_cleanup (void *state)
6844 {
6845 restore_infcall_suspend_state (state);
6846 }
6847
6848 struct cleanup *
6849 make_cleanup_restore_infcall_suspend_state
6850 (struct infcall_suspend_state *inf_state)
6851 {
6852 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6853 }
6854
6855 void
6856 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6857 {
6858 regcache_xfree (inf_state->registers);
6859 xfree (inf_state->siginfo_data);
6860 xfree (inf_state);
6861 }
6862
6863 struct regcache *
6864 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6865 {
6866 return inf_state->registers;
6867 }
6868
6869 /* infcall_control_state contains state regarding gdb's control of the
6870 inferior itself like stepping control. It also contains session state like
6871 the user's currently selected frame. */
6872
6873 struct infcall_control_state
6874 {
6875 struct thread_control_state thread_control;
6876 struct inferior_control_state inferior_control;
6877
6878 /* Other fields: */
6879 enum stop_stack_kind stop_stack_dummy;
6880 int stopped_by_random_signal;
6881 int stop_after_trap;
6882
6883 /* ID if the selected frame when the inferior function call was made. */
6884 struct frame_id selected_frame_id;
6885 };
6886
6887 /* Save all of the information associated with the inferior<==>gdb
6888 connection. */
6889
6890 struct infcall_control_state *
6891 save_infcall_control_state (void)
6892 {
6893 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
6894 struct thread_info *tp = inferior_thread ();
6895 struct inferior *inf = current_inferior ();
6896
6897 inf_status->thread_control = tp->control;
6898 inf_status->inferior_control = inf->control;
6899
6900 tp->control.step_resume_breakpoint = NULL;
6901 tp->control.exception_resume_breakpoint = NULL;
6902
6903 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
6904 chain. If caller's caller is walking the chain, they'll be happier if we
6905 hand them back the original chain when restore_infcall_control_state is
6906 called. */
6907 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
6908
6909 /* Other fields: */
6910 inf_status->stop_stack_dummy = stop_stack_dummy;
6911 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6912 inf_status->stop_after_trap = stop_after_trap;
6913
6914 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6915
6916 return inf_status;
6917 }
6918
6919 static int
6920 restore_selected_frame (void *args)
6921 {
6922 struct frame_id *fid = (struct frame_id *) args;
6923 struct frame_info *frame;
6924
6925 frame = frame_find_by_id (*fid);
6926
6927 /* If inf_status->selected_frame_id is NULL, there was no previously
6928 selected frame. */
6929 if (frame == NULL)
6930 {
6931 warning (_("Unable to restore previously selected frame."));
6932 return 0;
6933 }
6934
6935 select_frame (frame);
6936
6937 return (1);
6938 }
6939
6940 /* Restore inferior session state to INF_STATUS. */
6941
6942 void
6943 restore_infcall_control_state (struct infcall_control_state *inf_status)
6944 {
6945 struct thread_info *tp = inferior_thread ();
6946 struct inferior *inf = current_inferior ();
6947
6948 if (tp->control.step_resume_breakpoint)
6949 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
6950
6951 if (tp->control.exception_resume_breakpoint)
6952 tp->control.exception_resume_breakpoint->disposition
6953 = disp_del_at_next_stop;
6954
6955 /* Handle the bpstat_copy of the chain. */
6956 bpstat_clear (&tp->control.stop_bpstat);
6957
6958 tp->control = inf_status->thread_control;
6959 inf->control = inf_status->inferior_control;
6960
6961 /* Other fields: */
6962 stop_stack_dummy = inf_status->stop_stack_dummy;
6963 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6964 stop_after_trap = inf_status->stop_after_trap;
6965
6966 if (target_has_stack)
6967 {
6968 /* The point of catch_errors is that if the stack is clobbered,
6969 walking the stack might encounter a garbage pointer and
6970 error() trying to dereference it. */
6971 if (catch_errors
6972 (restore_selected_frame, &inf_status->selected_frame_id,
6973 "Unable to restore previously selected frame:\n",
6974 RETURN_MASK_ERROR) == 0)
6975 /* Error in restoring the selected frame. Select the innermost
6976 frame. */
6977 select_frame (get_current_frame ());
6978 }
6979
6980 xfree (inf_status);
6981 }
6982
6983 static void
6984 do_restore_infcall_control_state_cleanup (void *sts)
6985 {
6986 restore_infcall_control_state (sts);
6987 }
6988
6989 struct cleanup *
6990 make_cleanup_restore_infcall_control_state
6991 (struct infcall_control_state *inf_status)
6992 {
6993 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
6994 }
6995
6996 void
6997 discard_infcall_control_state (struct infcall_control_state *inf_status)
6998 {
6999 if (inf_status->thread_control.step_resume_breakpoint)
7000 inf_status->thread_control.step_resume_breakpoint->disposition
7001 = disp_del_at_next_stop;
7002
7003 if (inf_status->thread_control.exception_resume_breakpoint)
7004 inf_status->thread_control.exception_resume_breakpoint->disposition
7005 = disp_del_at_next_stop;
7006
7007 /* See save_infcall_control_state for info on stop_bpstat. */
7008 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7009
7010 xfree (inf_status);
7011 }
7012 \f
7013 /* restore_inferior_ptid() will be used by the cleanup machinery
7014 to restore the inferior_ptid value saved in a call to
7015 save_inferior_ptid(). */
7016
7017 static void
7018 restore_inferior_ptid (void *arg)
7019 {
7020 ptid_t *saved_ptid_ptr = arg;
7021
7022 inferior_ptid = *saved_ptid_ptr;
7023 xfree (arg);
7024 }
7025
7026 /* Save the value of inferior_ptid so that it may be restored by a
7027 later call to do_cleanups(). Returns the struct cleanup pointer
7028 needed for later doing the cleanup. */
7029
7030 struct cleanup *
7031 save_inferior_ptid (void)
7032 {
7033 ptid_t *saved_ptid_ptr;
7034
7035 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7036 *saved_ptid_ptr = inferior_ptid;
7037 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7038 }
7039
7040 /* See inferior.h. */
7041
7042 void
7043 clear_exit_convenience_vars (void)
7044 {
7045 clear_internalvar (lookup_internalvar ("_exitsignal"));
7046 clear_internalvar (lookup_internalvar ("_exitcode"));
7047 }
7048 \f
7049
7050 /* User interface for reverse debugging:
7051 Set exec-direction / show exec-direction commands
7052 (returns error unless target implements to_set_exec_direction method). */
7053
7054 int execution_direction = EXEC_FORWARD;
7055 static const char exec_forward[] = "forward";
7056 static const char exec_reverse[] = "reverse";
7057 static const char *exec_direction = exec_forward;
7058 static const char *const exec_direction_names[] = {
7059 exec_forward,
7060 exec_reverse,
7061 NULL
7062 };
7063
7064 static void
7065 set_exec_direction_func (char *args, int from_tty,
7066 struct cmd_list_element *cmd)
7067 {
7068 if (target_can_execute_reverse)
7069 {
7070 if (!strcmp (exec_direction, exec_forward))
7071 execution_direction = EXEC_FORWARD;
7072 else if (!strcmp (exec_direction, exec_reverse))
7073 execution_direction = EXEC_REVERSE;
7074 }
7075 else
7076 {
7077 exec_direction = exec_forward;
7078 error (_("Target does not support this operation."));
7079 }
7080 }
7081
7082 static void
7083 show_exec_direction_func (struct ui_file *out, int from_tty,
7084 struct cmd_list_element *cmd, const char *value)
7085 {
7086 switch (execution_direction) {
7087 case EXEC_FORWARD:
7088 fprintf_filtered (out, _("Forward.\n"));
7089 break;
7090 case EXEC_REVERSE:
7091 fprintf_filtered (out, _("Reverse.\n"));
7092 break;
7093 default:
7094 internal_error (__FILE__, __LINE__,
7095 _("bogus execution_direction value: %d"),
7096 (int) execution_direction);
7097 }
7098 }
7099
7100 static void
7101 show_schedule_multiple (struct ui_file *file, int from_tty,
7102 struct cmd_list_element *c, const char *value)
7103 {
7104 fprintf_filtered (file, _("Resuming the execution of threads "
7105 "of all processes is %s.\n"), value);
7106 }
7107
7108 /* Implementation of `siginfo' variable. */
7109
7110 static const struct internalvar_funcs siginfo_funcs =
7111 {
7112 siginfo_make_value,
7113 NULL,
7114 NULL
7115 };
7116
7117 void
7118 _initialize_infrun (void)
7119 {
7120 int i;
7121 int numsigs;
7122 struct cmd_list_element *c;
7123
7124 add_info ("signals", signals_info, _("\
7125 What debugger does when program gets various signals.\n\
7126 Specify a signal as argument to print info on that signal only."));
7127 add_info_alias ("handle", "signals", 0);
7128
7129 c = add_com ("handle", class_run, handle_command, _("\
7130 Specify how to handle signals.\n\
7131 Usage: handle SIGNAL [ACTIONS]\n\
7132 Args are signals and actions to apply to those signals.\n\
7133 If no actions are specified, the current settings for the specified signals\n\
7134 will be displayed instead.\n\
7135 \n\
7136 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7137 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7138 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7139 The special arg \"all\" is recognized to mean all signals except those\n\
7140 used by the debugger, typically SIGTRAP and SIGINT.\n\
7141 \n\
7142 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7143 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7144 Stop means reenter debugger if this signal happens (implies print).\n\
7145 Print means print a message if this signal happens.\n\
7146 Pass means let program see this signal; otherwise program doesn't know.\n\
7147 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7148 Pass and Stop may be combined.\n\
7149 \n\
7150 Multiple signals may be specified. Signal numbers and signal names\n\
7151 may be interspersed with actions, with the actions being performed for\n\
7152 all signals cumulatively specified."));
7153 set_cmd_completer (c, handle_completer);
7154
7155 if (xdb_commands)
7156 {
7157 add_com ("lz", class_info, signals_info, _("\
7158 What debugger does when program gets various signals.\n\
7159 Specify a signal as argument to print info on that signal only."));
7160 add_com ("z", class_run, xdb_handle_command, _("\
7161 Specify how to handle a signal.\n\
7162 Args are signals and actions to apply to those signals.\n\
7163 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7164 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7165 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7166 The special arg \"all\" is recognized to mean all signals except those\n\
7167 used by the debugger, typically SIGTRAP and SIGINT.\n\
7168 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7169 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7170 nopass), \"Q\" (noprint)\n\
7171 Stop means reenter debugger if this signal happens (implies print).\n\
7172 Print means print a message if this signal happens.\n\
7173 Pass means let program see this signal; otherwise program doesn't know.\n\
7174 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7175 Pass and Stop may be combined."));
7176 }
7177
7178 if (!dbx_commands)
7179 stop_command = add_cmd ("stop", class_obscure,
7180 not_just_help_class_command, _("\
7181 There is no `stop' command, but you can set a hook on `stop'.\n\
7182 This allows you to set a list of commands to be run each time execution\n\
7183 of the program stops."), &cmdlist);
7184
7185 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7186 Set inferior debugging."), _("\
7187 Show inferior debugging."), _("\
7188 When non-zero, inferior specific debugging is enabled."),
7189 NULL,
7190 show_debug_infrun,
7191 &setdebuglist, &showdebuglist);
7192
7193 add_setshow_boolean_cmd ("displaced", class_maintenance,
7194 &debug_displaced, _("\
7195 Set displaced stepping debugging."), _("\
7196 Show displaced stepping debugging."), _("\
7197 When non-zero, displaced stepping specific debugging is enabled."),
7198 NULL,
7199 show_debug_displaced,
7200 &setdebuglist, &showdebuglist);
7201
7202 add_setshow_boolean_cmd ("non-stop", no_class,
7203 &non_stop_1, _("\
7204 Set whether gdb controls the inferior in non-stop mode."), _("\
7205 Show whether gdb controls the inferior in non-stop mode."), _("\
7206 When debugging a multi-threaded program and this setting is\n\
7207 off (the default, also called all-stop mode), when one thread stops\n\
7208 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7209 all other threads in the program while you interact with the thread of\n\
7210 interest. When you continue or step a thread, you can allow the other\n\
7211 threads to run, or have them remain stopped, but while you inspect any\n\
7212 thread's state, all threads stop.\n\
7213 \n\
7214 In non-stop mode, when one thread stops, other threads can continue\n\
7215 to run freely. You'll be able to step each thread independently,\n\
7216 leave it stopped or free to run as needed."),
7217 set_non_stop,
7218 show_non_stop,
7219 &setlist,
7220 &showlist);
7221
7222 numsigs = (int) GDB_SIGNAL_LAST;
7223 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7224 signal_print = (unsigned char *)
7225 xmalloc (sizeof (signal_print[0]) * numsigs);
7226 signal_program = (unsigned char *)
7227 xmalloc (sizeof (signal_program[0]) * numsigs);
7228 signal_catch = (unsigned char *)
7229 xmalloc (sizeof (signal_catch[0]) * numsigs);
7230 signal_pass = (unsigned char *)
7231 xmalloc (sizeof (signal_program[0]) * numsigs);
7232 for (i = 0; i < numsigs; i++)
7233 {
7234 signal_stop[i] = 1;
7235 signal_print[i] = 1;
7236 signal_program[i] = 1;
7237 signal_catch[i] = 0;
7238 }
7239
7240 /* Signals caused by debugger's own actions
7241 should not be given to the program afterwards. */
7242 signal_program[GDB_SIGNAL_TRAP] = 0;
7243 signal_program[GDB_SIGNAL_INT] = 0;
7244
7245 /* Signals that are not errors should not normally enter the debugger. */
7246 signal_stop[GDB_SIGNAL_ALRM] = 0;
7247 signal_print[GDB_SIGNAL_ALRM] = 0;
7248 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7249 signal_print[GDB_SIGNAL_VTALRM] = 0;
7250 signal_stop[GDB_SIGNAL_PROF] = 0;
7251 signal_print[GDB_SIGNAL_PROF] = 0;
7252 signal_stop[GDB_SIGNAL_CHLD] = 0;
7253 signal_print[GDB_SIGNAL_CHLD] = 0;
7254 signal_stop[GDB_SIGNAL_IO] = 0;
7255 signal_print[GDB_SIGNAL_IO] = 0;
7256 signal_stop[GDB_SIGNAL_POLL] = 0;
7257 signal_print[GDB_SIGNAL_POLL] = 0;
7258 signal_stop[GDB_SIGNAL_URG] = 0;
7259 signal_print[GDB_SIGNAL_URG] = 0;
7260 signal_stop[GDB_SIGNAL_WINCH] = 0;
7261 signal_print[GDB_SIGNAL_WINCH] = 0;
7262 signal_stop[GDB_SIGNAL_PRIO] = 0;
7263 signal_print[GDB_SIGNAL_PRIO] = 0;
7264
7265 /* These signals are used internally by user-level thread
7266 implementations. (See signal(5) on Solaris.) Like the above
7267 signals, a healthy program receives and handles them as part of
7268 its normal operation. */
7269 signal_stop[GDB_SIGNAL_LWP] = 0;
7270 signal_print[GDB_SIGNAL_LWP] = 0;
7271 signal_stop[GDB_SIGNAL_WAITING] = 0;
7272 signal_print[GDB_SIGNAL_WAITING] = 0;
7273 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7274 signal_print[GDB_SIGNAL_CANCEL] = 0;
7275
7276 /* Update cached state. */
7277 signal_cache_update (-1);
7278
7279 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7280 &stop_on_solib_events, _("\
7281 Set stopping for shared library events."), _("\
7282 Show stopping for shared library events."), _("\
7283 If nonzero, gdb will give control to the user when the dynamic linker\n\
7284 notifies gdb of shared library events. The most common event of interest\n\
7285 to the user would be loading/unloading of a new library."),
7286 set_stop_on_solib_events,
7287 show_stop_on_solib_events,
7288 &setlist, &showlist);
7289
7290 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7291 follow_fork_mode_kind_names,
7292 &follow_fork_mode_string, _("\
7293 Set debugger response to a program call of fork or vfork."), _("\
7294 Show debugger response to a program call of fork or vfork."), _("\
7295 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7296 parent - the original process is debugged after a fork\n\
7297 child - the new process is debugged after a fork\n\
7298 The unfollowed process will continue to run.\n\
7299 By default, the debugger will follow the parent process."),
7300 NULL,
7301 show_follow_fork_mode_string,
7302 &setlist, &showlist);
7303
7304 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7305 follow_exec_mode_names,
7306 &follow_exec_mode_string, _("\
7307 Set debugger response to a program call of exec."), _("\
7308 Show debugger response to a program call of exec."), _("\
7309 An exec call replaces the program image of a process.\n\
7310 \n\
7311 follow-exec-mode can be:\n\
7312 \n\
7313 new - the debugger creates a new inferior and rebinds the process\n\
7314 to this new inferior. The program the process was running before\n\
7315 the exec call can be restarted afterwards by restarting the original\n\
7316 inferior.\n\
7317 \n\
7318 same - the debugger keeps the process bound to the same inferior.\n\
7319 The new executable image replaces the previous executable loaded in\n\
7320 the inferior. Restarting the inferior after the exec call restarts\n\
7321 the executable the process was running after the exec call.\n\
7322 \n\
7323 By default, the debugger will use the same inferior."),
7324 NULL,
7325 show_follow_exec_mode_string,
7326 &setlist, &showlist);
7327
7328 add_setshow_enum_cmd ("scheduler-locking", class_run,
7329 scheduler_enums, &scheduler_mode, _("\
7330 Set mode for locking scheduler during execution."), _("\
7331 Show mode for locking scheduler during execution."), _("\
7332 off == no locking (threads may preempt at any time)\n\
7333 on == full locking (no thread except the current thread may run)\n\
7334 step == scheduler locked during every single-step operation.\n\
7335 In this mode, no other thread may run during a step command.\n\
7336 Other threads may run while stepping over a function call ('next')."),
7337 set_schedlock_func, /* traps on target vector */
7338 show_scheduler_mode,
7339 &setlist, &showlist);
7340
7341 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7342 Set mode for resuming threads of all processes."), _("\
7343 Show mode for resuming threads of all processes."), _("\
7344 When on, execution commands (such as 'continue' or 'next') resume all\n\
7345 threads of all processes. When off (which is the default), execution\n\
7346 commands only resume the threads of the current process. The set of\n\
7347 threads that are resumed is further refined by the scheduler-locking\n\
7348 mode (see help set scheduler-locking)."),
7349 NULL,
7350 show_schedule_multiple,
7351 &setlist, &showlist);
7352
7353 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7354 Set mode of the step operation."), _("\
7355 Show mode of the step operation."), _("\
7356 When set, doing a step over a function without debug line information\n\
7357 will stop at the first instruction of that function. Otherwise, the\n\
7358 function is skipped and the step command stops at a different source line."),
7359 NULL,
7360 show_step_stop_if_no_debug,
7361 &setlist, &showlist);
7362
7363 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7364 &can_use_displaced_stepping, _("\
7365 Set debugger's willingness to use displaced stepping."), _("\
7366 Show debugger's willingness to use displaced stepping."), _("\
7367 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7368 supported by the target architecture. If off, gdb will not use displaced\n\
7369 stepping to step over breakpoints, even if such is supported by the target\n\
7370 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7371 if the target architecture supports it and non-stop mode is active, but will not\n\
7372 use it in all-stop mode (see help set non-stop)."),
7373 NULL,
7374 show_can_use_displaced_stepping,
7375 &setlist, &showlist);
7376
7377 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7378 &exec_direction, _("Set direction of execution.\n\
7379 Options are 'forward' or 'reverse'."),
7380 _("Show direction of execution (forward/reverse)."),
7381 _("Tells gdb whether to execute forward or backward."),
7382 set_exec_direction_func, show_exec_direction_func,
7383 &setlist, &showlist);
7384
7385 /* Set/show detach-on-fork: user-settable mode. */
7386
7387 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7388 Set whether gdb will detach the child of a fork."), _("\
7389 Show whether gdb will detach the child of a fork."), _("\
7390 Tells gdb whether to detach the child of a fork."),
7391 NULL, NULL, &setlist, &showlist);
7392
7393 /* Set/show disable address space randomization mode. */
7394
7395 add_setshow_boolean_cmd ("disable-randomization", class_support,
7396 &disable_randomization, _("\
7397 Set disabling of debuggee's virtual address space randomization."), _("\
7398 Show disabling of debuggee's virtual address space randomization."), _("\
7399 When this mode is on (which is the default), randomization of the virtual\n\
7400 address space is disabled. Standalone programs run with the randomization\n\
7401 enabled by default on some platforms."),
7402 &set_disable_randomization,
7403 &show_disable_randomization,
7404 &setlist, &showlist);
7405
7406 /* ptid initializations */
7407 inferior_ptid = null_ptid;
7408 target_last_wait_ptid = minus_one_ptid;
7409
7410 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7411 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7412 observer_attach_thread_exit (infrun_thread_thread_exit);
7413 observer_attach_inferior_exit (infrun_inferior_exit);
7414
7415 /* Explicitly create without lookup, since that tries to create a
7416 value with a void typed value, and when we get here, gdbarch
7417 isn't initialized yet. At this point, we're quite sure there
7418 isn't another convenience variable of the same name. */
7419 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7420
7421 add_setshow_boolean_cmd ("observer", no_class,
7422 &observer_mode_1, _("\
7423 Set whether gdb controls the inferior in observer mode."), _("\
7424 Show whether gdb controls the inferior in observer mode."), _("\
7425 In observer mode, GDB can get data from the inferior, but not\n\
7426 affect its execution. Registers and memory may not be changed,\n\
7427 breakpoints may not be set, and the program cannot be interrupted\n\
7428 or signalled."),
7429 set_observer_mode,
7430 show_observer_mode,
7431 &setlist,
7432 &showlist);
7433 }
This page took 0.276978 seconds and 4 git commands to generate.