Git sucks!
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2014 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include <string.h>
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "exceptions.h"
28 #include "breakpoint.h"
29 #include "gdb_wait.h"
30 #include "gdbcore.h"
31 #include "gdbcmd.h"
32 #include "cli/cli-script.h"
33 #include "target.h"
34 #include "gdbthread.h"
35 #include "annotate.h"
36 #include "symfile.h"
37 #include "top.h"
38 #include <signal.h>
39 #include "inf-loop.h"
40 #include "regcache.h"
41 #include "value.h"
42 #include "observer.h"
43 #include "language.h"
44 #include "solib.h"
45 #include "main.h"
46 #include "dictionary.h"
47 #include "block.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "record-full.h"
53 #include "inline-frame.h"
54 #include "jit.h"
55 #include "tracepoint.h"
56 #include "continuations.h"
57 #include "interps.h"
58 #include "skip.h"
59 #include "probe.h"
60 #include "objfiles.h"
61 #include "completer.h"
62 #include "target-descriptions.h"
63 #include "target-dcache.h"
64
65 /* Prototypes for local functions */
66
67 static void signals_info (char *, int);
68
69 static void handle_command (char *, int);
70
71 static void sig_print_info (enum gdb_signal);
72
73 static void sig_print_header (void);
74
75 static void resume_cleanups (void *);
76
77 static int hook_stop_stub (void *);
78
79 static int restore_selected_frame (void *);
80
81 static int follow_fork (void);
82
83 static void set_schedlock_func (char *args, int from_tty,
84 struct cmd_list_element *c);
85
86 static int currently_stepping (struct thread_info *tp);
87
88 static void xdb_handle_command (char *args, int from_tty);
89
90 static void print_exited_reason (int exitstatus);
91
92 static void print_signal_exited_reason (enum gdb_signal siggnal);
93
94 static void print_no_history_reason (void);
95
96 static void print_signal_received_reason (enum gdb_signal siggnal);
97
98 static void print_end_stepping_range_reason (void);
99
100 void _initialize_infrun (void);
101
102 void nullify_last_target_wait_ptid (void);
103
104 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
105
106 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
107
108 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
109
110 /* When set, stop the 'step' command if we enter a function which has
111 no line number information. The normal behavior is that we step
112 over such function. */
113 int step_stop_if_no_debug = 0;
114 static void
115 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
116 struct cmd_list_element *c, const char *value)
117 {
118 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
119 }
120
121 /* In asynchronous mode, but simulating synchronous execution. */
122
123 int sync_execution = 0;
124
125 /* proceed and normal_stop use this to notify the user when the
126 inferior stopped in a different thread than it had been running
127 in. */
128
129 static ptid_t previous_inferior_ptid;
130
131 /* If set (default for legacy reasons), when following a fork, GDB
132 will detach from one of the fork branches, child or parent.
133 Exactly which branch is detached depends on 'set follow-fork-mode'
134 setting. */
135
136 static int detach_fork = 1;
137
138 int debug_displaced = 0;
139 static void
140 show_debug_displaced (struct ui_file *file, int from_tty,
141 struct cmd_list_element *c, const char *value)
142 {
143 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
144 }
145
146 unsigned int debug_infrun = 0;
147 static void
148 show_debug_infrun (struct ui_file *file, int from_tty,
149 struct cmd_list_element *c, const char *value)
150 {
151 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
152 }
153
154
155 /* Support for disabling address space randomization. */
156
157 int disable_randomization = 1;
158
159 static void
160 show_disable_randomization (struct ui_file *file, int from_tty,
161 struct cmd_list_element *c, const char *value)
162 {
163 if (target_supports_disable_randomization ())
164 fprintf_filtered (file,
165 _("Disabling randomization of debuggee's "
166 "virtual address space is %s.\n"),
167 value);
168 else
169 fputs_filtered (_("Disabling randomization of debuggee's "
170 "virtual address space is unsupported on\n"
171 "this platform.\n"), file);
172 }
173
174 static void
175 set_disable_randomization (char *args, int from_tty,
176 struct cmd_list_element *c)
177 {
178 if (!target_supports_disable_randomization ())
179 error (_("Disabling randomization of debuggee's "
180 "virtual address space is unsupported on\n"
181 "this platform."));
182 }
183
184 /* User interface for non-stop mode. */
185
186 int non_stop = 0;
187 static int non_stop_1 = 0;
188
189 static void
190 set_non_stop (char *args, int from_tty,
191 struct cmd_list_element *c)
192 {
193 if (target_has_execution)
194 {
195 non_stop_1 = non_stop;
196 error (_("Cannot change this setting while the inferior is running."));
197 }
198
199 non_stop = non_stop_1;
200 }
201
202 static void
203 show_non_stop (struct ui_file *file, int from_tty,
204 struct cmd_list_element *c, const char *value)
205 {
206 fprintf_filtered (file,
207 _("Controlling the inferior in non-stop mode is %s.\n"),
208 value);
209 }
210
211 /* "Observer mode" is somewhat like a more extreme version of
212 non-stop, in which all GDB operations that might affect the
213 target's execution have been disabled. */
214
215 int observer_mode = 0;
216 static int observer_mode_1 = 0;
217
218 static void
219 set_observer_mode (char *args, int from_tty,
220 struct cmd_list_element *c)
221 {
222 if (target_has_execution)
223 {
224 observer_mode_1 = observer_mode;
225 error (_("Cannot change this setting while the inferior is running."));
226 }
227
228 observer_mode = observer_mode_1;
229
230 may_write_registers = !observer_mode;
231 may_write_memory = !observer_mode;
232 may_insert_breakpoints = !observer_mode;
233 may_insert_tracepoints = !observer_mode;
234 /* We can insert fast tracepoints in or out of observer mode,
235 but enable them if we're going into this mode. */
236 if (observer_mode)
237 may_insert_fast_tracepoints = 1;
238 may_stop = !observer_mode;
239 update_target_permissions ();
240
241 /* Going *into* observer mode we must force non-stop, then
242 going out we leave it that way. */
243 if (observer_mode)
244 {
245 target_async_permitted = 1;
246 pagination_enabled = 0;
247 non_stop = non_stop_1 = 1;
248 }
249
250 if (from_tty)
251 printf_filtered (_("Observer mode is now %s.\n"),
252 (observer_mode ? "on" : "off"));
253 }
254
255 static void
256 show_observer_mode (struct ui_file *file, int from_tty,
257 struct cmd_list_element *c, const char *value)
258 {
259 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
260 }
261
262 /* This updates the value of observer mode based on changes in
263 permissions. Note that we are deliberately ignoring the values of
264 may-write-registers and may-write-memory, since the user may have
265 reason to enable these during a session, for instance to turn on a
266 debugging-related global. */
267
268 void
269 update_observer_mode (void)
270 {
271 int newval;
272
273 newval = (!may_insert_breakpoints
274 && !may_insert_tracepoints
275 && may_insert_fast_tracepoints
276 && !may_stop
277 && non_stop);
278
279 /* Let the user know if things change. */
280 if (newval != observer_mode)
281 printf_filtered (_("Observer mode is now %s.\n"),
282 (newval ? "on" : "off"));
283
284 observer_mode = observer_mode_1 = newval;
285 }
286
287 /* Tables of how to react to signals; the user sets them. */
288
289 static unsigned char *signal_stop;
290 static unsigned char *signal_print;
291 static unsigned char *signal_program;
292
293 /* Table of signals that are registered with "catch signal". A
294 non-zero entry indicates that the signal is caught by some "catch
295 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
296 signals. */
297 static unsigned char *signal_catch;
298
299 /* Table of signals that the target may silently handle.
300 This is automatically determined from the flags above,
301 and simply cached here. */
302 static unsigned char *signal_pass;
303
304 #define SET_SIGS(nsigs,sigs,flags) \
305 do { \
306 int signum = (nsigs); \
307 while (signum-- > 0) \
308 if ((sigs)[signum]) \
309 (flags)[signum] = 1; \
310 } while (0)
311
312 #define UNSET_SIGS(nsigs,sigs,flags) \
313 do { \
314 int signum = (nsigs); \
315 while (signum-- > 0) \
316 if ((sigs)[signum]) \
317 (flags)[signum] = 0; \
318 } while (0)
319
320 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
321 this function is to avoid exporting `signal_program'. */
322
323 void
324 update_signals_program_target (void)
325 {
326 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
327 }
328
329 /* Value to pass to target_resume() to cause all threads to resume. */
330
331 #define RESUME_ALL minus_one_ptid
332
333 /* Command list pointer for the "stop" placeholder. */
334
335 static struct cmd_list_element *stop_command;
336
337 /* Function inferior was in as of last step command. */
338
339 static struct symbol *step_start_function;
340
341 /* Nonzero if we want to give control to the user when we're notified
342 of shared library events by the dynamic linker. */
343 int stop_on_solib_events;
344
345 /* Enable or disable optional shared library event breakpoints
346 as appropriate when the above flag is changed. */
347
348 static void
349 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
350 {
351 update_solib_breakpoints ();
352 }
353
354 static void
355 show_stop_on_solib_events (struct ui_file *file, int from_tty,
356 struct cmd_list_element *c, const char *value)
357 {
358 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
359 value);
360 }
361
362 /* Nonzero means expecting a trace trap
363 and should stop the inferior and return silently when it happens. */
364
365 int stop_after_trap;
366
367 /* Save register contents here when executing a "finish" command or are
368 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
369 Thus this contains the return value from the called function (assuming
370 values are returned in a register). */
371
372 struct regcache *stop_registers;
373
374 /* Nonzero after stop if current stack frame should be printed. */
375
376 static int stop_print_frame;
377
378 /* This is a cached copy of the pid/waitstatus of the last event
379 returned by target_wait()/deprecated_target_wait_hook(). This
380 information is returned by get_last_target_status(). */
381 static ptid_t target_last_wait_ptid;
382 static struct target_waitstatus target_last_waitstatus;
383
384 static void context_switch (ptid_t ptid);
385
386 void init_thread_stepping_state (struct thread_info *tss);
387
388 static void init_infwait_state (void);
389
390 static const char follow_fork_mode_child[] = "child";
391 static const char follow_fork_mode_parent[] = "parent";
392
393 static const char *const follow_fork_mode_kind_names[] = {
394 follow_fork_mode_child,
395 follow_fork_mode_parent,
396 NULL
397 };
398
399 static const char *follow_fork_mode_string = follow_fork_mode_parent;
400 static void
401 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
402 struct cmd_list_element *c, const char *value)
403 {
404 fprintf_filtered (file,
405 _("Debugger response to a program "
406 "call of fork or vfork is \"%s\".\n"),
407 value);
408 }
409 \f
410
411 /* Tell the target to follow the fork we're stopped at. Returns true
412 if the inferior should be resumed; false, if the target for some
413 reason decided it's best not to resume. */
414
415 static int
416 follow_fork (void)
417 {
418 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
419 int should_resume = 1;
420 struct thread_info *tp;
421
422 /* Copy user stepping state to the new inferior thread. FIXME: the
423 followed fork child thread should have a copy of most of the
424 parent thread structure's run control related fields, not just these.
425 Initialized to avoid "may be used uninitialized" warnings from gcc. */
426 struct breakpoint *step_resume_breakpoint = NULL;
427 struct breakpoint *exception_resume_breakpoint = NULL;
428 CORE_ADDR step_range_start = 0;
429 CORE_ADDR step_range_end = 0;
430 struct frame_id step_frame_id = { 0 };
431
432 if (!non_stop)
433 {
434 ptid_t wait_ptid;
435 struct target_waitstatus wait_status;
436
437 /* Get the last target status returned by target_wait(). */
438 get_last_target_status (&wait_ptid, &wait_status);
439
440 /* If not stopped at a fork event, then there's nothing else to
441 do. */
442 if (wait_status.kind != TARGET_WAITKIND_FORKED
443 && wait_status.kind != TARGET_WAITKIND_VFORKED)
444 return 1;
445
446 /* Check if we switched over from WAIT_PTID, since the event was
447 reported. */
448 if (!ptid_equal (wait_ptid, minus_one_ptid)
449 && !ptid_equal (inferior_ptid, wait_ptid))
450 {
451 /* We did. Switch back to WAIT_PTID thread, to tell the
452 target to follow it (in either direction). We'll
453 afterwards refuse to resume, and inform the user what
454 happened. */
455 switch_to_thread (wait_ptid);
456 should_resume = 0;
457 }
458 }
459
460 tp = inferior_thread ();
461
462 /* If there were any forks/vforks that were caught and are now to be
463 followed, then do so now. */
464 switch (tp->pending_follow.kind)
465 {
466 case TARGET_WAITKIND_FORKED:
467 case TARGET_WAITKIND_VFORKED:
468 {
469 ptid_t parent, child;
470
471 /* If the user did a next/step, etc, over a fork call,
472 preserve the stepping state in the fork child. */
473 if (follow_child && should_resume)
474 {
475 step_resume_breakpoint = clone_momentary_breakpoint
476 (tp->control.step_resume_breakpoint);
477 step_range_start = tp->control.step_range_start;
478 step_range_end = tp->control.step_range_end;
479 step_frame_id = tp->control.step_frame_id;
480 exception_resume_breakpoint
481 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
482
483 /* For now, delete the parent's sr breakpoint, otherwise,
484 parent/child sr breakpoints are considered duplicates,
485 and the child version will not be installed. Remove
486 this when the breakpoints module becomes aware of
487 inferiors and address spaces. */
488 delete_step_resume_breakpoint (tp);
489 tp->control.step_range_start = 0;
490 tp->control.step_range_end = 0;
491 tp->control.step_frame_id = null_frame_id;
492 delete_exception_resume_breakpoint (tp);
493 }
494
495 parent = inferior_ptid;
496 child = tp->pending_follow.value.related_pid;
497
498 /* Tell the target to do whatever is necessary to follow
499 either parent or child. */
500 if (target_follow_fork (follow_child, detach_fork))
501 {
502 /* Target refused to follow, or there's some other reason
503 we shouldn't resume. */
504 should_resume = 0;
505 }
506 else
507 {
508 /* This pending follow fork event is now handled, one way
509 or another. The previous selected thread may be gone
510 from the lists by now, but if it is still around, need
511 to clear the pending follow request. */
512 tp = find_thread_ptid (parent);
513 if (tp)
514 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
515
516 /* This makes sure we don't try to apply the "Switched
517 over from WAIT_PID" logic above. */
518 nullify_last_target_wait_ptid ();
519
520 /* If we followed the child, switch to it... */
521 if (follow_child)
522 {
523 switch_to_thread (child);
524
525 /* ... and preserve the stepping state, in case the
526 user was stepping over the fork call. */
527 if (should_resume)
528 {
529 tp = inferior_thread ();
530 tp->control.step_resume_breakpoint
531 = step_resume_breakpoint;
532 tp->control.step_range_start = step_range_start;
533 tp->control.step_range_end = step_range_end;
534 tp->control.step_frame_id = step_frame_id;
535 tp->control.exception_resume_breakpoint
536 = exception_resume_breakpoint;
537 }
538 else
539 {
540 /* If we get here, it was because we're trying to
541 resume from a fork catchpoint, but, the user
542 has switched threads away from the thread that
543 forked. In that case, the resume command
544 issued is most likely not applicable to the
545 child, so just warn, and refuse to resume. */
546 warning (_("Not resuming: switched threads "
547 "before following fork child.\n"));
548 }
549
550 /* Reset breakpoints in the child as appropriate. */
551 follow_inferior_reset_breakpoints ();
552 }
553 else
554 switch_to_thread (parent);
555 }
556 }
557 break;
558 case TARGET_WAITKIND_SPURIOUS:
559 /* Nothing to follow. */
560 break;
561 default:
562 internal_error (__FILE__, __LINE__,
563 "Unexpected pending_follow.kind %d\n",
564 tp->pending_follow.kind);
565 break;
566 }
567
568 return should_resume;
569 }
570
571 void
572 follow_inferior_reset_breakpoints (void)
573 {
574 struct thread_info *tp = inferior_thread ();
575
576 /* Was there a step_resume breakpoint? (There was if the user
577 did a "next" at the fork() call.) If so, explicitly reset its
578 thread number.
579
580 step_resumes are a form of bp that are made to be per-thread.
581 Since we created the step_resume bp when the parent process
582 was being debugged, and now are switching to the child process,
583 from the breakpoint package's viewpoint, that's a switch of
584 "threads". We must update the bp's notion of which thread
585 it is for, or it'll be ignored when it triggers. */
586
587 if (tp->control.step_resume_breakpoint)
588 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
589
590 if (tp->control.exception_resume_breakpoint)
591 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
592
593 /* Reinsert all breakpoints in the child. The user may have set
594 breakpoints after catching the fork, in which case those
595 were never set in the child, but only in the parent. This makes
596 sure the inserted breakpoints match the breakpoint list. */
597
598 breakpoint_re_set ();
599 insert_breakpoints ();
600 }
601
602 /* The child has exited or execed: resume threads of the parent the
603 user wanted to be executing. */
604
605 static int
606 proceed_after_vfork_done (struct thread_info *thread,
607 void *arg)
608 {
609 int pid = * (int *) arg;
610
611 if (ptid_get_pid (thread->ptid) == pid
612 && is_running (thread->ptid)
613 && !is_executing (thread->ptid)
614 && !thread->stop_requested
615 && thread->suspend.stop_signal == GDB_SIGNAL_0)
616 {
617 if (debug_infrun)
618 fprintf_unfiltered (gdb_stdlog,
619 "infrun: resuming vfork parent thread %s\n",
620 target_pid_to_str (thread->ptid));
621
622 switch_to_thread (thread->ptid);
623 clear_proceed_status ();
624 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
625 }
626
627 return 0;
628 }
629
630 /* Called whenever we notice an exec or exit event, to handle
631 detaching or resuming a vfork parent. */
632
633 static void
634 handle_vfork_child_exec_or_exit (int exec)
635 {
636 struct inferior *inf = current_inferior ();
637
638 if (inf->vfork_parent)
639 {
640 int resume_parent = -1;
641
642 /* This exec or exit marks the end of the shared memory region
643 between the parent and the child. If the user wanted to
644 detach from the parent, now is the time. */
645
646 if (inf->vfork_parent->pending_detach)
647 {
648 struct thread_info *tp;
649 struct cleanup *old_chain;
650 struct program_space *pspace;
651 struct address_space *aspace;
652
653 /* follow-fork child, detach-on-fork on. */
654
655 inf->vfork_parent->pending_detach = 0;
656
657 if (!exec)
658 {
659 /* If we're handling a child exit, then inferior_ptid
660 points at the inferior's pid, not to a thread. */
661 old_chain = save_inferior_ptid ();
662 save_current_program_space ();
663 save_current_inferior ();
664 }
665 else
666 old_chain = save_current_space_and_thread ();
667
668 /* We're letting loose of the parent. */
669 tp = any_live_thread_of_process (inf->vfork_parent->pid);
670 switch_to_thread (tp->ptid);
671
672 /* We're about to detach from the parent, which implicitly
673 removes breakpoints from its address space. There's a
674 catch here: we want to reuse the spaces for the child,
675 but, parent/child are still sharing the pspace at this
676 point, although the exec in reality makes the kernel give
677 the child a fresh set of new pages. The problem here is
678 that the breakpoints module being unaware of this, would
679 likely chose the child process to write to the parent
680 address space. Swapping the child temporarily away from
681 the spaces has the desired effect. Yes, this is "sort
682 of" a hack. */
683
684 pspace = inf->pspace;
685 aspace = inf->aspace;
686 inf->aspace = NULL;
687 inf->pspace = NULL;
688
689 if (debug_infrun || info_verbose)
690 {
691 target_terminal_ours ();
692
693 if (exec)
694 fprintf_filtered (gdb_stdlog,
695 "Detaching vfork parent process "
696 "%d after child exec.\n",
697 inf->vfork_parent->pid);
698 else
699 fprintf_filtered (gdb_stdlog,
700 "Detaching vfork parent process "
701 "%d after child exit.\n",
702 inf->vfork_parent->pid);
703 }
704
705 target_detach (NULL, 0);
706
707 /* Put it back. */
708 inf->pspace = pspace;
709 inf->aspace = aspace;
710
711 do_cleanups (old_chain);
712 }
713 else if (exec)
714 {
715 /* We're staying attached to the parent, so, really give the
716 child a new address space. */
717 inf->pspace = add_program_space (maybe_new_address_space ());
718 inf->aspace = inf->pspace->aspace;
719 inf->removable = 1;
720 set_current_program_space (inf->pspace);
721
722 resume_parent = inf->vfork_parent->pid;
723
724 /* Break the bonds. */
725 inf->vfork_parent->vfork_child = NULL;
726 }
727 else
728 {
729 struct cleanup *old_chain;
730 struct program_space *pspace;
731
732 /* If this is a vfork child exiting, then the pspace and
733 aspaces were shared with the parent. Since we're
734 reporting the process exit, we'll be mourning all that is
735 found in the address space, and switching to null_ptid,
736 preparing to start a new inferior. But, since we don't
737 want to clobber the parent's address/program spaces, we
738 go ahead and create a new one for this exiting
739 inferior. */
740
741 /* Switch to null_ptid, so that clone_program_space doesn't want
742 to read the selected frame of a dead process. */
743 old_chain = save_inferior_ptid ();
744 inferior_ptid = null_ptid;
745
746 /* This inferior is dead, so avoid giving the breakpoints
747 module the option to write through to it (cloning a
748 program space resets breakpoints). */
749 inf->aspace = NULL;
750 inf->pspace = NULL;
751 pspace = add_program_space (maybe_new_address_space ());
752 set_current_program_space (pspace);
753 inf->removable = 1;
754 inf->symfile_flags = SYMFILE_NO_READ;
755 clone_program_space (pspace, inf->vfork_parent->pspace);
756 inf->pspace = pspace;
757 inf->aspace = pspace->aspace;
758
759 /* Put back inferior_ptid. We'll continue mourning this
760 inferior. */
761 do_cleanups (old_chain);
762
763 resume_parent = inf->vfork_parent->pid;
764 /* Break the bonds. */
765 inf->vfork_parent->vfork_child = NULL;
766 }
767
768 inf->vfork_parent = NULL;
769
770 gdb_assert (current_program_space == inf->pspace);
771
772 if (non_stop && resume_parent != -1)
773 {
774 /* If the user wanted the parent to be running, let it go
775 free now. */
776 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
777
778 if (debug_infrun)
779 fprintf_unfiltered (gdb_stdlog,
780 "infrun: resuming vfork parent process %d\n",
781 resume_parent);
782
783 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
784
785 do_cleanups (old_chain);
786 }
787 }
788 }
789
790 /* Enum strings for "set|show follow-exec-mode". */
791
792 static const char follow_exec_mode_new[] = "new";
793 static const char follow_exec_mode_same[] = "same";
794 static const char *const follow_exec_mode_names[] =
795 {
796 follow_exec_mode_new,
797 follow_exec_mode_same,
798 NULL,
799 };
800
801 static const char *follow_exec_mode_string = follow_exec_mode_same;
802 static void
803 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
804 struct cmd_list_element *c, const char *value)
805 {
806 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
807 }
808
809 /* EXECD_PATHNAME is assumed to be non-NULL. */
810
811 static void
812 follow_exec (ptid_t pid, char *execd_pathname)
813 {
814 struct thread_info *th = inferior_thread ();
815 struct inferior *inf = current_inferior ();
816
817 /* This is an exec event that we actually wish to pay attention to.
818 Refresh our symbol table to the newly exec'd program, remove any
819 momentary bp's, etc.
820
821 If there are breakpoints, they aren't really inserted now,
822 since the exec() transformed our inferior into a fresh set
823 of instructions.
824
825 We want to preserve symbolic breakpoints on the list, since
826 we have hopes that they can be reset after the new a.out's
827 symbol table is read.
828
829 However, any "raw" breakpoints must be removed from the list
830 (e.g., the solib bp's), since their address is probably invalid
831 now.
832
833 And, we DON'T want to call delete_breakpoints() here, since
834 that may write the bp's "shadow contents" (the instruction
835 value that was overwritten witha TRAP instruction). Since
836 we now have a new a.out, those shadow contents aren't valid. */
837
838 mark_breakpoints_out ();
839
840 update_breakpoints_after_exec ();
841
842 /* If there was one, it's gone now. We cannot truly step-to-next
843 statement through an exec(). */
844 th->control.step_resume_breakpoint = NULL;
845 th->control.exception_resume_breakpoint = NULL;
846 th->control.step_range_start = 0;
847 th->control.step_range_end = 0;
848
849 /* The target reports the exec event to the main thread, even if
850 some other thread does the exec, and even if the main thread was
851 already stopped --- if debugging in non-stop mode, it's possible
852 the user had the main thread held stopped in the previous image
853 --- release it now. This is the same behavior as step-over-exec
854 with scheduler-locking on in all-stop mode. */
855 th->stop_requested = 0;
856
857 /* What is this a.out's name? */
858 printf_unfiltered (_("%s is executing new program: %s\n"),
859 target_pid_to_str (inferior_ptid),
860 execd_pathname);
861
862 /* We've followed the inferior through an exec. Therefore, the
863 inferior has essentially been killed & reborn. */
864
865 gdb_flush (gdb_stdout);
866
867 breakpoint_init_inferior (inf_execd);
868
869 if (gdb_sysroot && *gdb_sysroot)
870 {
871 char *name = alloca (strlen (gdb_sysroot)
872 + strlen (execd_pathname)
873 + 1);
874
875 strcpy (name, gdb_sysroot);
876 strcat (name, execd_pathname);
877 execd_pathname = name;
878 }
879
880 /* Reset the shared library package. This ensures that we get a
881 shlib event when the child reaches "_start", at which point the
882 dld will have had a chance to initialize the child. */
883 /* Also, loading a symbol file below may trigger symbol lookups, and
884 we don't want those to be satisfied by the libraries of the
885 previous incarnation of this process. */
886 no_shared_libraries (NULL, 0);
887
888 if (follow_exec_mode_string == follow_exec_mode_new)
889 {
890 struct program_space *pspace;
891
892 /* The user wants to keep the old inferior and program spaces
893 around. Create a new fresh one, and switch to it. */
894
895 inf = add_inferior (current_inferior ()->pid);
896 pspace = add_program_space (maybe_new_address_space ());
897 inf->pspace = pspace;
898 inf->aspace = pspace->aspace;
899
900 exit_inferior_num_silent (current_inferior ()->num);
901
902 set_current_inferior (inf);
903 set_current_program_space (pspace);
904 }
905 else
906 {
907 /* The old description may no longer be fit for the new image.
908 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
909 old description; we'll read a new one below. No need to do
910 this on "follow-exec-mode new", as the old inferior stays
911 around (its description is later cleared/refetched on
912 restart). */
913 target_clear_description ();
914 }
915
916 gdb_assert (current_program_space == inf->pspace);
917
918 /* That a.out is now the one to use. */
919 exec_file_attach (execd_pathname, 0);
920
921 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
922 (Position Independent Executable) main symbol file will get applied by
923 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
924 the breakpoints with the zero displacement. */
925
926 symbol_file_add (execd_pathname,
927 (inf->symfile_flags
928 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
929 NULL, 0);
930
931 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
932 set_initial_language ();
933
934 /* If the target can specify a description, read it. Must do this
935 after flipping to the new executable (because the target supplied
936 description must be compatible with the executable's
937 architecture, and the old executable may e.g., be 32-bit, while
938 the new one 64-bit), and before anything involving memory or
939 registers. */
940 target_find_description ();
941
942 solib_create_inferior_hook (0);
943
944 jit_inferior_created_hook ();
945
946 breakpoint_re_set ();
947
948 /* Reinsert all breakpoints. (Those which were symbolic have
949 been reset to the proper address in the new a.out, thanks
950 to symbol_file_command...). */
951 insert_breakpoints ();
952
953 /* The next resume of this inferior should bring it to the shlib
954 startup breakpoints. (If the user had also set bp's on
955 "main" from the old (parent) process, then they'll auto-
956 matically get reset there in the new process.). */
957 }
958
959 /* Non-zero if we just simulating a single-step. This is needed
960 because we cannot remove the breakpoints in the inferior process
961 until after the `wait' in `wait_for_inferior'. */
962 static int singlestep_breakpoints_inserted_p = 0;
963
964 /* The thread we inserted single-step breakpoints for. */
965 static ptid_t singlestep_ptid;
966
967 /* PC when we started this single-step. */
968 static CORE_ADDR singlestep_pc;
969
970 /* Info about an instruction that is being stepped over. Invalid if
971 ASPACE is NULL. */
972
973 struct step_over_info
974 {
975 /* The instruction's address space. */
976 struct address_space *aspace;
977
978 /* The instruction's address. */
979 CORE_ADDR address;
980 };
981
982 /* The step-over info of the location that is being stepped over.
983
984 Note that with async/breakpoint always-inserted mode, a user might
985 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
986 being stepped over. As setting a new breakpoint inserts all
987 breakpoints, we need to make sure the breakpoint being stepped over
988 isn't inserted then. We do that by only clearing the step-over
989 info when the step-over is actually finished (or aborted).
990
991 Presently GDB can only step over one breakpoint at any given time.
992 Given threads that can't run code in the same address space as the
993 breakpoint's can't really miss the breakpoint, GDB could be taught
994 to step-over at most one breakpoint per address space (so this info
995 could move to the address space object if/when GDB is extended).
996 The set of breakpoints being stepped over will normally be much
997 smaller than the set of all breakpoints, so a flag in the
998 breakpoint location structure would be wasteful. A separate list
999 also saves complexity and run-time, as otherwise we'd have to go
1000 through all breakpoint locations clearing their flag whenever we
1001 start a new sequence. Similar considerations weigh against storing
1002 this info in the thread object. Plus, not all step overs actually
1003 have breakpoint locations -- e.g., stepping past a single-step
1004 breakpoint, or stepping to complete a non-continuable
1005 watchpoint. */
1006 static struct step_over_info step_over_info;
1007
1008 /* Record the address of the breakpoint/instruction we're currently
1009 stepping over. */
1010
1011 static void
1012 set_step_over_info (struct address_space *aspace, CORE_ADDR address)
1013 {
1014 step_over_info.aspace = aspace;
1015 step_over_info.address = address;
1016 }
1017
1018 /* Called when we're not longer stepping over a breakpoint / an
1019 instruction, so all breakpoints are free to be (re)inserted. */
1020
1021 static void
1022 clear_step_over_info (void)
1023 {
1024 step_over_info.aspace = NULL;
1025 step_over_info.address = 0;
1026 }
1027
1028 /* See inferior.h. */
1029
1030 int
1031 stepping_past_instruction_at (struct address_space *aspace,
1032 CORE_ADDR address)
1033 {
1034 return (step_over_info.aspace != NULL
1035 && breakpoint_address_match (aspace, address,
1036 step_over_info.aspace,
1037 step_over_info.address));
1038 }
1039
1040 \f
1041 /* Displaced stepping. */
1042
1043 /* In non-stop debugging mode, we must take special care to manage
1044 breakpoints properly; in particular, the traditional strategy for
1045 stepping a thread past a breakpoint it has hit is unsuitable.
1046 'Displaced stepping' is a tactic for stepping one thread past a
1047 breakpoint it has hit while ensuring that other threads running
1048 concurrently will hit the breakpoint as they should.
1049
1050 The traditional way to step a thread T off a breakpoint in a
1051 multi-threaded program in all-stop mode is as follows:
1052
1053 a0) Initially, all threads are stopped, and breakpoints are not
1054 inserted.
1055 a1) We single-step T, leaving breakpoints uninserted.
1056 a2) We insert breakpoints, and resume all threads.
1057
1058 In non-stop debugging, however, this strategy is unsuitable: we
1059 don't want to have to stop all threads in the system in order to
1060 continue or step T past a breakpoint. Instead, we use displaced
1061 stepping:
1062
1063 n0) Initially, T is stopped, other threads are running, and
1064 breakpoints are inserted.
1065 n1) We copy the instruction "under" the breakpoint to a separate
1066 location, outside the main code stream, making any adjustments
1067 to the instruction, register, and memory state as directed by
1068 T's architecture.
1069 n2) We single-step T over the instruction at its new location.
1070 n3) We adjust the resulting register and memory state as directed
1071 by T's architecture. This includes resetting T's PC to point
1072 back into the main instruction stream.
1073 n4) We resume T.
1074
1075 This approach depends on the following gdbarch methods:
1076
1077 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1078 indicate where to copy the instruction, and how much space must
1079 be reserved there. We use these in step n1.
1080
1081 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1082 address, and makes any necessary adjustments to the instruction,
1083 register contents, and memory. We use this in step n1.
1084
1085 - gdbarch_displaced_step_fixup adjusts registers and memory after
1086 we have successfuly single-stepped the instruction, to yield the
1087 same effect the instruction would have had if we had executed it
1088 at its original address. We use this in step n3.
1089
1090 - gdbarch_displaced_step_free_closure provides cleanup.
1091
1092 The gdbarch_displaced_step_copy_insn and
1093 gdbarch_displaced_step_fixup functions must be written so that
1094 copying an instruction with gdbarch_displaced_step_copy_insn,
1095 single-stepping across the copied instruction, and then applying
1096 gdbarch_displaced_insn_fixup should have the same effects on the
1097 thread's memory and registers as stepping the instruction in place
1098 would have. Exactly which responsibilities fall to the copy and
1099 which fall to the fixup is up to the author of those functions.
1100
1101 See the comments in gdbarch.sh for details.
1102
1103 Note that displaced stepping and software single-step cannot
1104 currently be used in combination, although with some care I think
1105 they could be made to. Software single-step works by placing
1106 breakpoints on all possible subsequent instructions; if the
1107 displaced instruction is a PC-relative jump, those breakpoints
1108 could fall in very strange places --- on pages that aren't
1109 executable, or at addresses that are not proper instruction
1110 boundaries. (We do generally let other threads run while we wait
1111 to hit the software single-step breakpoint, and they might
1112 encounter such a corrupted instruction.) One way to work around
1113 this would be to have gdbarch_displaced_step_copy_insn fully
1114 simulate the effect of PC-relative instructions (and return NULL)
1115 on architectures that use software single-stepping.
1116
1117 In non-stop mode, we can have independent and simultaneous step
1118 requests, so more than one thread may need to simultaneously step
1119 over a breakpoint. The current implementation assumes there is
1120 only one scratch space per process. In this case, we have to
1121 serialize access to the scratch space. If thread A wants to step
1122 over a breakpoint, but we are currently waiting for some other
1123 thread to complete a displaced step, we leave thread A stopped and
1124 place it in the displaced_step_request_queue. Whenever a displaced
1125 step finishes, we pick the next thread in the queue and start a new
1126 displaced step operation on it. See displaced_step_prepare and
1127 displaced_step_fixup for details. */
1128
1129 struct displaced_step_request
1130 {
1131 ptid_t ptid;
1132 struct displaced_step_request *next;
1133 };
1134
1135 /* Per-inferior displaced stepping state. */
1136 struct displaced_step_inferior_state
1137 {
1138 /* Pointer to next in linked list. */
1139 struct displaced_step_inferior_state *next;
1140
1141 /* The process this displaced step state refers to. */
1142 int pid;
1143
1144 /* A queue of pending displaced stepping requests. One entry per
1145 thread that needs to do a displaced step. */
1146 struct displaced_step_request *step_request_queue;
1147
1148 /* If this is not null_ptid, this is the thread carrying out a
1149 displaced single-step in process PID. This thread's state will
1150 require fixing up once it has completed its step. */
1151 ptid_t step_ptid;
1152
1153 /* The architecture the thread had when we stepped it. */
1154 struct gdbarch *step_gdbarch;
1155
1156 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1157 for post-step cleanup. */
1158 struct displaced_step_closure *step_closure;
1159
1160 /* The address of the original instruction, and the copy we
1161 made. */
1162 CORE_ADDR step_original, step_copy;
1163
1164 /* Saved contents of copy area. */
1165 gdb_byte *step_saved_copy;
1166 };
1167
1168 /* The list of states of processes involved in displaced stepping
1169 presently. */
1170 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1171
1172 /* Get the displaced stepping state of process PID. */
1173
1174 static struct displaced_step_inferior_state *
1175 get_displaced_stepping_state (int pid)
1176 {
1177 struct displaced_step_inferior_state *state;
1178
1179 for (state = displaced_step_inferior_states;
1180 state != NULL;
1181 state = state->next)
1182 if (state->pid == pid)
1183 return state;
1184
1185 return NULL;
1186 }
1187
1188 /* Add a new displaced stepping state for process PID to the displaced
1189 stepping state list, or return a pointer to an already existing
1190 entry, if it already exists. Never returns NULL. */
1191
1192 static struct displaced_step_inferior_state *
1193 add_displaced_stepping_state (int pid)
1194 {
1195 struct displaced_step_inferior_state *state;
1196
1197 for (state = displaced_step_inferior_states;
1198 state != NULL;
1199 state = state->next)
1200 if (state->pid == pid)
1201 return state;
1202
1203 state = xcalloc (1, sizeof (*state));
1204 state->pid = pid;
1205 state->next = displaced_step_inferior_states;
1206 displaced_step_inferior_states = state;
1207
1208 return state;
1209 }
1210
1211 /* If inferior is in displaced stepping, and ADDR equals to starting address
1212 of copy area, return corresponding displaced_step_closure. Otherwise,
1213 return NULL. */
1214
1215 struct displaced_step_closure*
1216 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1217 {
1218 struct displaced_step_inferior_state *displaced
1219 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1220
1221 /* If checking the mode of displaced instruction in copy area. */
1222 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1223 && (displaced->step_copy == addr))
1224 return displaced->step_closure;
1225
1226 return NULL;
1227 }
1228
1229 /* Remove the displaced stepping state of process PID. */
1230
1231 static void
1232 remove_displaced_stepping_state (int pid)
1233 {
1234 struct displaced_step_inferior_state *it, **prev_next_p;
1235
1236 gdb_assert (pid != 0);
1237
1238 it = displaced_step_inferior_states;
1239 prev_next_p = &displaced_step_inferior_states;
1240 while (it)
1241 {
1242 if (it->pid == pid)
1243 {
1244 *prev_next_p = it->next;
1245 xfree (it);
1246 return;
1247 }
1248
1249 prev_next_p = &it->next;
1250 it = *prev_next_p;
1251 }
1252 }
1253
1254 static void
1255 infrun_inferior_exit (struct inferior *inf)
1256 {
1257 remove_displaced_stepping_state (inf->pid);
1258 }
1259
1260 /* If ON, and the architecture supports it, GDB will use displaced
1261 stepping to step over breakpoints. If OFF, or if the architecture
1262 doesn't support it, GDB will instead use the traditional
1263 hold-and-step approach. If AUTO (which is the default), GDB will
1264 decide which technique to use to step over breakpoints depending on
1265 which of all-stop or non-stop mode is active --- displaced stepping
1266 in non-stop mode; hold-and-step in all-stop mode. */
1267
1268 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1269
1270 static void
1271 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1272 struct cmd_list_element *c,
1273 const char *value)
1274 {
1275 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1276 fprintf_filtered (file,
1277 _("Debugger's willingness to use displaced stepping "
1278 "to step over breakpoints is %s (currently %s).\n"),
1279 value, non_stop ? "on" : "off");
1280 else
1281 fprintf_filtered (file,
1282 _("Debugger's willingness to use displaced stepping "
1283 "to step over breakpoints is %s.\n"), value);
1284 }
1285
1286 /* Return non-zero if displaced stepping can/should be used to step
1287 over breakpoints. */
1288
1289 static int
1290 use_displaced_stepping (struct gdbarch *gdbarch)
1291 {
1292 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1293 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1294 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1295 && find_record_target () == NULL);
1296 }
1297
1298 /* Clean out any stray displaced stepping state. */
1299 static void
1300 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1301 {
1302 /* Indicate that there is no cleanup pending. */
1303 displaced->step_ptid = null_ptid;
1304
1305 if (displaced->step_closure)
1306 {
1307 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1308 displaced->step_closure);
1309 displaced->step_closure = NULL;
1310 }
1311 }
1312
1313 static void
1314 displaced_step_clear_cleanup (void *arg)
1315 {
1316 struct displaced_step_inferior_state *state = arg;
1317
1318 displaced_step_clear (state);
1319 }
1320
1321 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1322 void
1323 displaced_step_dump_bytes (struct ui_file *file,
1324 const gdb_byte *buf,
1325 size_t len)
1326 {
1327 int i;
1328
1329 for (i = 0; i < len; i++)
1330 fprintf_unfiltered (file, "%02x ", buf[i]);
1331 fputs_unfiltered ("\n", file);
1332 }
1333
1334 /* Prepare to single-step, using displaced stepping.
1335
1336 Note that we cannot use displaced stepping when we have a signal to
1337 deliver. If we have a signal to deliver and an instruction to step
1338 over, then after the step, there will be no indication from the
1339 target whether the thread entered a signal handler or ignored the
1340 signal and stepped over the instruction successfully --- both cases
1341 result in a simple SIGTRAP. In the first case we mustn't do a
1342 fixup, and in the second case we must --- but we can't tell which.
1343 Comments in the code for 'random signals' in handle_inferior_event
1344 explain how we handle this case instead.
1345
1346 Returns 1 if preparing was successful -- this thread is going to be
1347 stepped now; or 0 if displaced stepping this thread got queued. */
1348 static int
1349 displaced_step_prepare (ptid_t ptid)
1350 {
1351 struct cleanup *old_cleanups, *ignore_cleanups;
1352 struct thread_info *tp = find_thread_ptid (ptid);
1353 struct regcache *regcache = get_thread_regcache (ptid);
1354 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1355 CORE_ADDR original, copy;
1356 ULONGEST len;
1357 struct displaced_step_closure *closure;
1358 struct displaced_step_inferior_state *displaced;
1359 int status;
1360
1361 /* We should never reach this function if the architecture does not
1362 support displaced stepping. */
1363 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1364
1365 /* Disable range stepping while executing in the scratch pad. We
1366 want a single-step even if executing the displaced instruction in
1367 the scratch buffer lands within the stepping range (e.g., a
1368 jump/branch). */
1369 tp->control.may_range_step = 0;
1370
1371 /* We have to displaced step one thread at a time, as we only have
1372 access to a single scratch space per inferior. */
1373
1374 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1375
1376 if (!ptid_equal (displaced->step_ptid, null_ptid))
1377 {
1378 /* Already waiting for a displaced step to finish. Defer this
1379 request and place in queue. */
1380 struct displaced_step_request *req, *new_req;
1381
1382 if (debug_displaced)
1383 fprintf_unfiltered (gdb_stdlog,
1384 "displaced: defering step of %s\n",
1385 target_pid_to_str (ptid));
1386
1387 new_req = xmalloc (sizeof (*new_req));
1388 new_req->ptid = ptid;
1389 new_req->next = NULL;
1390
1391 if (displaced->step_request_queue)
1392 {
1393 for (req = displaced->step_request_queue;
1394 req && req->next;
1395 req = req->next)
1396 ;
1397 req->next = new_req;
1398 }
1399 else
1400 displaced->step_request_queue = new_req;
1401
1402 return 0;
1403 }
1404 else
1405 {
1406 if (debug_displaced)
1407 fprintf_unfiltered (gdb_stdlog,
1408 "displaced: stepping %s now\n",
1409 target_pid_to_str (ptid));
1410 }
1411
1412 displaced_step_clear (displaced);
1413
1414 old_cleanups = save_inferior_ptid ();
1415 inferior_ptid = ptid;
1416
1417 original = regcache_read_pc (regcache);
1418
1419 copy = gdbarch_displaced_step_location (gdbarch);
1420 len = gdbarch_max_insn_length (gdbarch);
1421
1422 /* Save the original contents of the copy area. */
1423 displaced->step_saved_copy = xmalloc (len);
1424 ignore_cleanups = make_cleanup (free_current_contents,
1425 &displaced->step_saved_copy);
1426 status = target_read_memory (copy, displaced->step_saved_copy, len);
1427 if (status != 0)
1428 throw_error (MEMORY_ERROR,
1429 _("Error accessing memory address %s (%s) for "
1430 "displaced-stepping scratch space."),
1431 paddress (gdbarch, copy), safe_strerror (status));
1432 if (debug_displaced)
1433 {
1434 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1435 paddress (gdbarch, copy));
1436 displaced_step_dump_bytes (gdb_stdlog,
1437 displaced->step_saved_copy,
1438 len);
1439 };
1440
1441 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1442 original, copy, regcache);
1443
1444 /* We don't support the fully-simulated case at present. */
1445 gdb_assert (closure);
1446
1447 /* Save the information we need to fix things up if the step
1448 succeeds. */
1449 displaced->step_ptid = ptid;
1450 displaced->step_gdbarch = gdbarch;
1451 displaced->step_closure = closure;
1452 displaced->step_original = original;
1453 displaced->step_copy = copy;
1454
1455 make_cleanup (displaced_step_clear_cleanup, displaced);
1456
1457 /* Resume execution at the copy. */
1458 regcache_write_pc (regcache, copy);
1459
1460 discard_cleanups (ignore_cleanups);
1461
1462 do_cleanups (old_cleanups);
1463
1464 if (debug_displaced)
1465 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1466 paddress (gdbarch, copy));
1467
1468 return 1;
1469 }
1470
1471 static void
1472 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1473 const gdb_byte *myaddr, int len)
1474 {
1475 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1476
1477 inferior_ptid = ptid;
1478 write_memory (memaddr, myaddr, len);
1479 do_cleanups (ptid_cleanup);
1480 }
1481
1482 /* Restore the contents of the copy area for thread PTID. */
1483
1484 static void
1485 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1486 ptid_t ptid)
1487 {
1488 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1489
1490 write_memory_ptid (ptid, displaced->step_copy,
1491 displaced->step_saved_copy, len);
1492 if (debug_displaced)
1493 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1494 target_pid_to_str (ptid),
1495 paddress (displaced->step_gdbarch,
1496 displaced->step_copy));
1497 }
1498
1499 static void
1500 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1501 {
1502 struct cleanup *old_cleanups;
1503 struct displaced_step_inferior_state *displaced
1504 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1505
1506 /* Was any thread of this process doing a displaced step? */
1507 if (displaced == NULL)
1508 return;
1509
1510 /* Was this event for the pid we displaced? */
1511 if (ptid_equal (displaced->step_ptid, null_ptid)
1512 || ! ptid_equal (displaced->step_ptid, event_ptid))
1513 return;
1514
1515 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1516
1517 displaced_step_restore (displaced, displaced->step_ptid);
1518
1519 /* Did the instruction complete successfully? */
1520 if (signal == GDB_SIGNAL_TRAP)
1521 {
1522 /* Fix up the resulting state. */
1523 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1524 displaced->step_closure,
1525 displaced->step_original,
1526 displaced->step_copy,
1527 get_thread_regcache (displaced->step_ptid));
1528 }
1529 else
1530 {
1531 /* Since the instruction didn't complete, all we can do is
1532 relocate the PC. */
1533 struct regcache *regcache = get_thread_regcache (event_ptid);
1534 CORE_ADDR pc = regcache_read_pc (regcache);
1535
1536 pc = displaced->step_original + (pc - displaced->step_copy);
1537 regcache_write_pc (regcache, pc);
1538 }
1539
1540 do_cleanups (old_cleanups);
1541
1542 displaced->step_ptid = null_ptid;
1543
1544 /* Are there any pending displaced stepping requests? If so, run
1545 one now. Leave the state object around, since we're likely to
1546 need it again soon. */
1547 while (displaced->step_request_queue)
1548 {
1549 struct displaced_step_request *head;
1550 ptid_t ptid;
1551 struct regcache *regcache;
1552 struct gdbarch *gdbarch;
1553 CORE_ADDR actual_pc;
1554 struct address_space *aspace;
1555
1556 head = displaced->step_request_queue;
1557 ptid = head->ptid;
1558 displaced->step_request_queue = head->next;
1559 xfree (head);
1560
1561 context_switch (ptid);
1562
1563 regcache = get_thread_regcache (ptid);
1564 actual_pc = regcache_read_pc (regcache);
1565 aspace = get_regcache_aspace (regcache);
1566
1567 if (breakpoint_here_p (aspace, actual_pc))
1568 {
1569 if (debug_displaced)
1570 fprintf_unfiltered (gdb_stdlog,
1571 "displaced: stepping queued %s now\n",
1572 target_pid_to_str (ptid));
1573
1574 displaced_step_prepare (ptid);
1575
1576 gdbarch = get_regcache_arch (regcache);
1577
1578 if (debug_displaced)
1579 {
1580 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1581 gdb_byte buf[4];
1582
1583 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1584 paddress (gdbarch, actual_pc));
1585 read_memory (actual_pc, buf, sizeof (buf));
1586 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1587 }
1588
1589 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1590 displaced->step_closure))
1591 target_resume (ptid, 1, GDB_SIGNAL_0);
1592 else
1593 target_resume (ptid, 0, GDB_SIGNAL_0);
1594
1595 /* Done, we're stepping a thread. */
1596 break;
1597 }
1598 else
1599 {
1600 int step;
1601 struct thread_info *tp = inferior_thread ();
1602
1603 /* The breakpoint we were sitting under has since been
1604 removed. */
1605 tp->control.trap_expected = 0;
1606
1607 /* Go back to what we were trying to do. */
1608 step = currently_stepping (tp);
1609
1610 if (debug_displaced)
1611 fprintf_unfiltered (gdb_stdlog,
1612 "displaced: breakpoint is gone: %s, step(%d)\n",
1613 target_pid_to_str (tp->ptid), step);
1614
1615 target_resume (ptid, step, GDB_SIGNAL_0);
1616 tp->suspend.stop_signal = GDB_SIGNAL_0;
1617
1618 /* This request was discarded. See if there's any other
1619 thread waiting for its turn. */
1620 }
1621 }
1622 }
1623
1624 /* Update global variables holding ptids to hold NEW_PTID if they were
1625 holding OLD_PTID. */
1626 static void
1627 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1628 {
1629 struct displaced_step_request *it;
1630 struct displaced_step_inferior_state *displaced;
1631
1632 if (ptid_equal (inferior_ptid, old_ptid))
1633 inferior_ptid = new_ptid;
1634
1635 if (ptid_equal (singlestep_ptid, old_ptid))
1636 singlestep_ptid = new_ptid;
1637
1638 for (displaced = displaced_step_inferior_states;
1639 displaced;
1640 displaced = displaced->next)
1641 {
1642 if (ptid_equal (displaced->step_ptid, old_ptid))
1643 displaced->step_ptid = new_ptid;
1644
1645 for (it = displaced->step_request_queue; it; it = it->next)
1646 if (ptid_equal (it->ptid, old_ptid))
1647 it->ptid = new_ptid;
1648 }
1649 }
1650
1651 \f
1652 /* Resuming. */
1653
1654 /* Things to clean up if we QUIT out of resume (). */
1655 static void
1656 resume_cleanups (void *ignore)
1657 {
1658 normal_stop ();
1659 }
1660
1661 static const char schedlock_off[] = "off";
1662 static const char schedlock_on[] = "on";
1663 static const char schedlock_step[] = "step";
1664 static const char *const scheduler_enums[] = {
1665 schedlock_off,
1666 schedlock_on,
1667 schedlock_step,
1668 NULL
1669 };
1670 static const char *scheduler_mode = schedlock_off;
1671 static void
1672 show_scheduler_mode (struct ui_file *file, int from_tty,
1673 struct cmd_list_element *c, const char *value)
1674 {
1675 fprintf_filtered (file,
1676 _("Mode for locking scheduler "
1677 "during execution is \"%s\".\n"),
1678 value);
1679 }
1680
1681 static void
1682 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1683 {
1684 if (!target_can_lock_scheduler)
1685 {
1686 scheduler_mode = schedlock_off;
1687 error (_("Target '%s' cannot support this command."), target_shortname);
1688 }
1689 }
1690
1691 /* True if execution commands resume all threads of all processes by
1692 default; otherwise, resume only threads of the current inferior
1693 process. */
1694 int sched_multi = 0;
1695
1696 /* Try to setup for software single stepping over the specified location.
1697 Return 1 if target_resume() should use hardware single step.
1698
1699 GDBARCH the current gdbarch.
1700 PC the location to step over. */
1701
1702 static int
1703 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1704 {
1705 int hw_step = 1;
1706
1707 if (execution_direction == EXEC_FORWARD
1708 && gdbarch_software_single_step_p (gdbarch)
1709 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1710 {
1711 hw_step = 0;
1712 /* Do not pull these breakpoints until after a `wait' in
1713 `wait_for_inferior'. */
1714 singlestep_breakpoints_inserted_p = 1;
1715 singlestep_ptid = inferior_ptid;
1716 singlestep_pc = pc;
1717 }
1718 return hw_step;
1719 }
1720
1721 /* Return a ptid representing the set of threads that we will proceed,
1722 in the perspective of the user/frontend. We may actually resume
1723 fewer threads at first, e.g., if a thread is stopped at a
1724 breakpoint that needs stepping-off, but that should not be visible
1725 to the user/frontend, and neither should the frontend/user be
1726 allowed to proceed any of the threads that happen to be stopped for
1727 internal run control handling, if a previous command wanted them
1728 resumed. */
1729
1730 ptid_t
1731 user_visible_resume_ptid (int step)
1732 {
1733 /* By default, resume all threads of all processes. */
1734 ptid_t resume_ptid = RESUME_ALL;
1735
1736 /* Maybe resume only all threads of the current process. */
1737 if (!sched_multi && target_supports_multi_process ())
1738 {
1739 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1740 }
1741
1742 /* Maybe resume a single thread after all. */
1743 if (non_stop)
1744 {
1745 /* With non-stop mode on, threads are always handled
1746 individually. */
1747 resume_ptid = inferior_ptid;
1748 }
1749 else if ((scheduler_mode == schedlock_on)
1750 || (scheduler_mode == schedlock_step
1751 && (step || singlestep_breakpoints_inserted_p)))
1752 {
1753 /* User-settable 'scheduler' mode requires solo thread resume. */
1754 resume_ptid = inferior_ptid;
1755 }
1756
1757 return resume_ptid;
1758 }
1759
1760 /* Resume the inferior, but allow a QUIT. This is useful if the user
1761 wants to interrupt some lengthy single-stepping operation
1762 (for child processes, the SIGINT goes to the inferior, and so
1763 we get a SIGINT random_signal, but for remote debugging and perhaps
1764 other targets, that's not true).
1765
1766 STEP nonzero if we should step (zero to continue instead).
1767 SIG is the signal to give the inferior (zero for none). */
1768 void
1769 resume (int step, enum gdb_signal sig)
1770 {
1771 int should_resume = 1;
1772 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1773 struct regcache *regcache = get_current_regcache ();
1774 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1775 struct thread_info *tp = inferior_thread ();
1776 CORE_ADDR pc = regcache_read_pc (regcache);
1777 struct address_space *aspace = get_regcache_aspace (regcache);
1778
1779 QUIT;
1780
1781 if (current_inferior ()->waiting_for_vfork_done)
1782 {
1783 /* Don't try to single-step a vfork parent that is waiting for
1784 the child to get out of the shared memory region (by exec'ing
1785 or exiting). This is particularly important on software
1786 single-step archs, as the child process would trip on the
1787 software single step breakpoint inserted for the parent
1788 process. Since the parent will not actually execute any
1789 instruction until the child is out of the shared region (such
1790 are vfork's semantics), it is safe to simply continue it.
1791 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1792 the parent, and tell it to `keep_going', which automatically
1793 re-sets it stepping. */
1794 if (debug_infrun)
1795 fprintf_unfiltered (gdb_stdlog,
1796 "infrun: resume : clear step\n");
1797 step = 0;
1798 }
1799
1800 if (debug_infrun)
1801 fprintf_unfiltered (gdb_stdlog,
1802 "infrun: resume (step=%d, signal=%s), "
1803 "trap_expected=%d, current thread [%s] at %s\n",
1804 step, gdb_signal_to_symbol_string (sig),
1805 tp->control.trap_expected,
1806 target_pid_to_str (inferior_ptid),
1807 paddress (gdbarch, pc));
1808
1809 /* Normally, by the time we reach `resume', the breakpoints are either
1810 removed or inserted, as appropriate. The exception is if we're sitting
1811 at a permanent breakpoint; we need to step over it, but permanent
1812 breakpoints can't be removed. So we have to test for it here. */
1813 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1814 {
1815 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1816 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1817 else
1818 error (_("\
1819 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1820 how to step past a permanent breakpoint on this architecture. Try using\n\
1821 a command like `return' or `jump' to continue execution."));
1822 }
1823
1824 /* If we have a breakpoint to step over, make sure to do a single
1825 step only. Same if we have software watchpoints. */
1826 if (tp->control.trap_expected || bpstat_should_step ())
1827 tp->control.may_range_step = 0;
1828
1829 /* If enabled, step over breakpoints by executing a copy of the
1830 instruction at a different address.
1831
1832 We can't use displaced stepping when we have a signal to deliver;
1833 the comments for displaced_step_prepare explain why. The
1834 comments in the handle_inferior event for dealing with 'random
1835 signals' explain what we do instead.
1836
1837 We can't use displaced stepping when we are waiting for vfork_done
1838 event, displaced stepping breaks the vfork child similarly as single
1839 step software breakpoint. */
1840 if (use_displaced_stepping (gdbarch)
1841 && (tp->control.trap_expected
1842 || (step && gdbarch_software_single_step_p (gdbarch)))
1843 && sig == GDB_SIGNAL_0
1844 && !current_inferior ()->waiting_for_vfork_done)
1845 {
1846 struct displaced_step_inferior_state *displaced;
1847
1848 if (!displaced_step_prepare (inferior_ptid))
1849 {
1850 /* Got placed in displaced stepping queue. Will be resumed
1851 later when all the currently queued displaced stepping
1852 requests finish. The thread is not executing at this point,
1853 and the call to set_executing will be made later. But we
1854 need to call set_running here, since from frontend point of view,
1855 the thread is running. */
1856 set_running (inferior_ptid, 1);
1857 discard_cleanups (old_cleanups);
1858 return;
1859 }
1860
1861 /* Update pc to reflect the new address from which we will execute
1862 instructions due to displaced stepping. */
1863 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
1864
1865 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1866 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1867 displaced->step_closure);
1868 }
1869
1870 /* Do we need to do it the hard way, w/temp breakpoints? */
1871 else if (step)
1872 step = maybe_software_singlestep (gdbarch, pc);
1873
1874 /* Currently, our software single-step implementation leads to different
1875 results than hardware single-stepping in one situation: when stepping
1876 into delivering a signal which has an associated signal handler,
1877 hardware single-step will stop at the first instruction of the handler,
1878 while software single-step will simply skip execution of the handler.
1879
1880 For now, this difference in behavior is accepted since there is no
1881 easy way to actually implement single-stepping into a signal handler
1882 without kernel support.
1883
1884 However, there is one scenario where this difference leads to follow-on
1885 problems: if we're stepping off a breakpoint by removing all breakpoints
1886 and then single-stepping. In this case, the software single-step
1887 behavior means that even if there is a *breakpoint* in the signal
1888 handler, GDB still would not stop.
1889
1890 Fortunately, we can at least fix this particular issue. We detect
1891 here the case where we are about to deliver a signal while software
1892 single-stepping with breakpoints removed. In this situation, we
1893 revert the decisions to remove all breakpoints and insert single-
1894 step breakpoints, and instead we install a step-resume breakpoint
1895 at the current address, deliver the signal without stepping, and
1896 once we arrive back at the step-resume breakpoint, actually step
1897 over the breakpoint we originally wanted to step over. */
1898 if (singlestep_breakpoints_inserted_p
1899 && tp->control.trap_expected && sig != GDB_SIGNAL_0)
1900 {
1901 /* If we have nested signals or a pending signal is delivered
1902 immediately after a handler returns, might might already have
1903 a step-resume breakpoint set on the earlier handler. We cannot
1904 set another step-resume breakpoint; just continue on until the
1905 original breakpoint is hit. */
1906 if (tp->control.step_resume_breakpoint == NULL)
1907 {
1908 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1909 tp->step_after_step_resume_breakpoint = 1;
1910 }
1911
1912 remove_single_step_breakpoints ();
1913 singlestep_breakpoints_inserted_p = 0;
1914
1915 clear_step_over_info ();
1916 tp->control.trap_expected = 0;
1917
1918 insert_breakpoints ();
1919 }
1920
1921 if (should_resume)
1922 {
1923 ptid_t resume_ptid;
1924
1925 /* If STEP is set, it's a request to use hardware stepping
1926 facilities. But in that case, we should never
1927 use singlestep breakpoint. */
1928 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1929
1930 /* Decide the set of threads to ask the target to resume. Start
1931 by assuming everything will be resumed, than narrow the set
1932 by applying increasingly restricting conditions. */
1933 resume_ptid = user_visible_resume_ptid (step);
1934
1935 /* Maybe resume a single thread after all. */
1936 if ((step || singlestep_breakpoints_inserted_p)
1937 && tp->control.trap_expected)
1938 {
1939 /* We're allowing a thread to run past a breakpoint it has
1940 hit, by single-stepping the thread with the breakpoint
1941 removed. In which case, we need to single-step only this
1942 thread, and keep others stopped, as they can miss this
1943 breakpoint if allowed to run. */
1944 resume_ptid = inferior_ptid;
1945 }
1946
1947 if (gdbarch_cannot_step_breakpoint (gdbarch))
1948 {
1949 /* Most targets can step a breakpoint instruction, thus
1950 executing it normally. But if this one cannot, just
1951 continue and we will hit it anyway. */
1952 if (step && breakpoint_inserted_here_p (aspace, pc))
1953 step = 0;
1954 }
1955
1956 if (debug_displaced
1957 && use_displaced_stepping (gdbarch)
1958 && tp->control.trap_expected)
1959 {
1960 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1961 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1962 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1963 gdb_byte buf[4];
1964
1965 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1966 paddress (resume_gdbarch, actual_pc));
1967 read_memory (actual_pc, buf, sizeof (buf));
1968 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1969 }
1970
1971 if (tp->control.may_range_step)
1972 {
1973 /* If we're resuming a thread with the PC out of the step
1974 range, then we're doing some nested/finer run control
1975 operation, like stepping the thread out of the dynamic
1976 linker or the displaced stepping scratch pad. We
1977 shouldn't have allowed a range step then. */
1978 gdb_assert (pc_in_thread_step_range (pc, tp));
1979 }
1980
1981 /* Install inferior's terminal modes. */
1982 target_terminal_inferior ();
1983
1984 /* Avoid confusing the next resume, if the next stop/resume
1985 happens to apply to another thread. */
1986 tp->suspend.stop_signal = GDB_SIGNAL_0;
1987
1988 /* Advise target which signals may be handled silently. If we have
1989 removed breakpoints because we are stepping over one (which can
1990 happen only if we are not using displaced stepping), we need to
1991 receive all signals to avoid accidentally skipping a breakpoint
1992 during execution of a signal handler. */
1993 if ((step || singlestep_breakpoints_inserted_p)
1994 && tp->control.trap_expected
1995 && !use_displaced_stepping (gdbarch))
1996 target_pass_signals (0, NULL);
1997 else
1998 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
1999
2000 target_resume (resume_ptid, step, sig);
2001 }
2002
2003 discard_cleanups (old_cleanups);
2004 }
2005 \f
2006 /* Proceeding. */
2007
2008 /* Clear out all variables saying what to do when inferior is continued.
2009 First do this, then set the ones you want, then call `proceed'. */
2010
2011 static void
2012 clear_proceed_status_thread (struct thread_info *tp)
2013 {
2014 if (debug_infrun)
2015 fprintf_unfiltered (gdb_stdlog,
2016 "infrun: clear_proceed_status_thread (%s)\n",
2017 target_pid_to_str (tp->ptid));
2018
2019 tp->control.trap_expected = 0;
2020 tp->control.step_range_start = 0;
2021 tp->control.step_range_end = 0;
2022 tp->control.may_range_step = 0;
2023 tp->control.step_frame_id = null_frame_id;
2024 tp->control.step_stack_frame_id = null_frame_id;
2025 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2026 tp->stop_requested = 0;
2027
2028 tp->control.stop_step = 0;
2029
2030 tp->control.proceed_to_finish = 0;
2031
2032 /* Discard any remaining commands or status from previous stop. */
2033 bpstat_clear (&tp->control.stop_bpstat);
2034 }
2035
2036 static int
2037 clear_proceed_status_callback (struct thread_info *tp, void *data)
2038 {
2039 if (is_exited (tp->ptid))
2040 return 0;
2041
2042 clear_proceed_status_thread (tp);
2043 return 0;
2044 }
2045
2046 void
2047 clear_proceed_status (void)
2048 {
2049 if (!non_stop)
2050 {
2051 /* In all-stop mode, delete the per-thread status of all
2052 threads, even if inferior_ptid is null_ptid, there may be
2053 threads on the list. E.g., we may be launching a new
2054 process, while selecting the executable. */
2055 iterate_over_threads (clear_proceed_status_callback, NULL);
2056 }
2057
2058 if (!ptid_equal (inferior_ptid, null_ptid))
2059 {
2060 struct inferior *inferior;
2061
2062 if (non_stop)
2063 {
2064 /* If in non-stop mode, only delete the per-thread status of
2065 the current thread. */
2066 clear_proceed_status_thread (inferior_thread ());
2067 }
2068
2069 inferior = current_inferior ();
2070 inferior->control.stop_soon = NO_STOP_QUIETLY;
2071 }
2072
2073 stop_after_trap = 0;
2074
2075 clear_step_over_info ();
2076
2077 observer_notify_about_to_proceed ();
2078
2079 if (stop_registers)
2080 {
2081 regcache_xfree (stop_registers);
2082 stop_registers = NULL;
2083 }
2084 }
2085
2086 /* Returns true if TP is still stopped at a breakpoint that needs
2087 stepping-over in order to make progress. If the breakpoint is gone
2088 meanwhile, we can skip the whole step-over dance. */
2089
2090 static int
2091 thread_still_needs_step_over (struct thread_info *tp)
2092 {
2093 if (tp->stepping_over_breakpoint)
2094 {
2095 struct regcache *regcache = get_thread_regcache (tp->ptid);
2096
2097 if (breakpoint_here_p (get_regcache_aspace (regcache),
2098 regcache_read_pc (regcache)))
2099 return 1;
2100
2101 tp->stepping_over_breakpoint = 0;
2102 }
2103
2104 return 0;
2105 }
2106
2107 /* Returns true if scheduler locking applies. STEP indicates whether
2108 we're about to do a step/next-like command to a thread. */
2109
2110 static int
2111 schedlock_applies (int step)
2112 {
2113 return (scheduler_mode == schedlock_on
2114 || (scheduler_mode == schedlock_step
2115 && step));
2116 }
2117
2118 /* Look a thread other than EXCEPT that has previously reported a
2119 breakpoint event, and thus needs a step-over in order to make
2120 progress. Returns NULL is none is found. STEP indicates whether
2121 we're about to step the current thread, in order to decide whether
2122 "set scheduler-locking step" applies. */
2123
2124 static struct thread_info *
2125 find_thread_needs_step_over (int step, struct thread_info *except)
2126 {
2127 struct thread_info *tp, *current;
2128
2129 /* With non-stop mode on, threads are always handled individually. */
2130 gdb_assert (! non_stop);
2131
2132 current = inferior_thread ();
2133
2134 /* If scheduler locking applies, we can avoid iterating over all
2135 threads. */
2136 if (schedlock_applies (step))
2137 {
2138 if (except != current
2139 && thread_still_needs_step_over (current))
2140 return current;
2141
2142 return NULL;
2143 }
2144
2145 ALL_THREADS (tp)
2146 {
2147 /* Ignore the EXCEPT thread. */
2148 if (tp == except)
2149 continue;
2150 /* Ignore threads of processes we're not resuming. */
2151 if (!sched_multi
2152 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2153 continue;
2154
2155 if (thread_still_needs_step_over (tp))
2156 return tp;
2157 }
2158
2159 return NULL;
2160 }
2161
2162 /* Basic routine for continuing the program in various fashions.
2163
2164 ADDR is the address to resume at, or -1 for resume where stopped.
2165 SIGGNAL is the signal to give it, or 0 for none,
2166 or -1 for act according to how it stopped.
2167 STEP is nonzero if should trap after one instruction.
2168 -1 means return after that and print nothing.
2169 You should probably set various step_... variables
2170 before calling here, if you are stepping.
2171
2172 You should call clear_proceed_status before calling proceed. */
2173
2174 void
2175 proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
2176 {
2177 struct regcache *regcache;
2178 struct gdbarch *gdbarch;
2179 struct thread_info *tp;
2180 CORE_ADDR pc;
2181 struct address_space *aspace;
2182
2183 /* If we're stopped at a fork/vfork, follow the branch set by the
2184 "set follow-fork-mode" command; otherwise, we'll just proceed
2185 resuming the current thread. */
2186 if (!follow_fork ())
2187 {
2188 /* The target for some reason decided not to resume. */
2189 normal_stop ();
2190 if (target_can_async_p ())
2191 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2192 return;
2193 }
2194
2195 /* We'll update this if & when we switch to a new thread. */
2196 previous_inferior_ptid = inferior_ptid;
2197
2198 regcache = get_current_regcache ();
2199 gdbarch = get_regcache_arch (regcache);
2200 aspace = get_regcache_aspace (regcache);
2201 pc = regcache_read_pc (regcache);
2202 tp = inferior_thread ();
2203
2204 if (step > 0)
2205 step_start_function = find_pc_function (pc);
2206 if (step < 0)
2207 stop_after_trap = 1;
2208
2209 /* Fill in with reasonable starting values. */
2210 init_thread_stepping_state (tp);
2211
2212 if (addr == (CORE_ADDR) -1)
2213 {
2214 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2215 && execution_direction != EXEC_REVERSE)
2216 /* There is a breakpoint at the address we will resume at,
2217 step one instruction before inserting breakpoints so that
2218 we do not stop right away (and report a second hit at this
2219 breakpoint).
2220
2221 Note, we don't do this in reverse, because we won't
2222 actually be executing the breakpoint insn anyway.
2223 We'll be (un-)executing the previous instruction. */
2224 tp->stepping_over_breakpoint = 1;
2225 else if (gdbarch_single_step_through_delay_p (gdbarch)
2226 && gdbarch_single_step_through_delay (gdbarch,
2227 get_current_frame ()))
2228 /* We stepped onto an instruction that needs to be stepped
2229 again before re-inserting the breakpoint, do so. */
2230 tp->stepping_over_breakpoint = 1;
2231 }
2232 else
2233 {
2234 regcache_write_pc (regcache, addr);
2235 }
2236
2237 if (debug_infrun)
2238 fprintf_unfiltered (gdb_stdlog,
2239 "infrun: proceed (addr=%s, signal=%s, step=%d)\n",
2240 paddress (gdbarch, addr),
2241 gdb_signal_to_symbol_string (siggnal), step);
2242
2243 if (non_stop)
2244 /* In non-stop, each thread is handled individually. The context
2245 must already be set to the right thread here. */
2246 ;
2247 else
2248 {
2249 struct thread_info *step_over;
2250
2251 /* In a multi-threaded task we may select another thread and
2252 then continue or step.
2253
2254 But if the old thread was stopped at a breakpoint, it will
2255 immediately cause another breakpoint stop without any
2256 execution (i.e. it will report a breakpoint hit incorrectly).
2257 So we must step over it first.
2258
2259 Look for a thread other than the current (TP) that reported a
2260 breakpoint hit and hasn't been resumed yet since. */
2261 step_over = find_thread_needs_step_over (step, tp);
2262 if (step_over != NULL)
2263 {
2264 if (debug_infrun)
2265 fprintf_unfiltered (gdb_stdlog,
2266 "infrun: need to step-over [%s] first\n",
2267 target_pid_to_str (step_over->ptid));
2268
2269 /* Store the prev_pc for the stepping thread too, needed by
2270 switch_back_to_stepping thread. */
2271 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2272 switch_to_thread (step_over->ptid);
2273 tp = step_over;
2274 }
2275 }
2276
2277 /* If we need to step over a breakpoint, and we're not using
2278 displaced stepping to do so, insert all breakpoints (watchpoints,
2279 etc.) but the one we're stepping over, step one instruction, and
2280 then re-insert the breakpoint when that step is finished. */
2281 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2282 {
2283 struct regcache *regcache = get_current_regcache ();
2284
2285 set_step_over_info (get_regcache_aspace (regcache),
2286 regcache_read_pc (regcache));
2287 }
2288 else
2289 clear_step_over_info ();
2290
2291 insert_breakpoints ();
2292
2293 tp->control.trap_expected = tp->stepping_over_breakpoint;
2294
2295 if (!non_stop)
2296 {
2297 /* Pass the last stop signal to the thread we're resuming,
2298 irrespective of whether the current thread is the thread that
2299 got the last event or not. This was historically GDB's
2300 behaviour before keeping a stop_signal per thread. */
2301
2302 struct thread_info *last_thread;
2303 ptid_t last_ptid;
2304 struct target_waitstatus last_status;
2305
2306 get_last_target_status (&last_ptid, &last_status);
2307 if (!ptid_equal (inferior_ptid, last_ptid)
2308 && !ptid_equal (last_ptid, null_ptid)
2309 && !ptid_equal (last_ptid, minus_one_ptid))
2310 {
2311 last_thread = find_thread_ptid (last_ptid);
2312 if (last_thread)
2313 {
2314 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2315 last_thread->suspend.stop_signal = GDB_SIGNAL_0;
2316 }
2317 }
2318 }
2319
2320 if (siggnal != GDB_SIGNAL_DEFAULT)
2321 tp->suspend.stop_signal = siggnal;
2322 /* If this signal should not be seen by program,
2323 give it zero. Used for debugging signals. */
2324 else if (!signal_program[tp->suspend.stop_signal])
2325 tp->suspend.stop_signal = GDB_SIGNAL_0;
2326
2327 annotate_starting ();
2328
2329 /* Make sure that output from GDB appears before output from the
2330 inferior. */
2331 gdb_flush (gdb_stdout);
2332
2333 /* Refresh prev_pc value just prior to resuming. This used to be
2334 done in stop_stepping, however, setting prev_pc there did not handle
2335 scenarios such as inferior function calls or returning from
2336 a function via the return command. In those cases, the prev_pc
2337 value was not set properly for subsequent commands. The prev_pc value
2338 is used to initialize the starting line number in the ecs. With an
2339 invalid value, the gdb next command ends up stopping at the position
2340 represented by the next line table entry past our start position.
2341 On platforms that generate one line table entry per line, this
2342 is not a problem. However, on the ia64, the compiler generates
2343 extraneous line table entries that do not increase the line number.
2344 When we issue the gdb next command on the ia64 after an inferior call
2345 or a return command, we often end up a few instructions forward, still
2346 within the original line we started.
2347
2348 An attempt was made to refresh the prev_pc at the same time the
2349 execution_control_state is initialized (for instance, just before
2350 waiting for an inferior event). But this approach did not work
2351 because of platforms that use ptrace, where the pc register cannot
2352 be read unless the inferior is stopped. At that point, we are not
2353 guaranteed the inferior is stopped and so the regcache_read_pc() call
2354 can fail. Setting the prev_pc value here ensures the value is updated
2355 correctly when the inferior is stopped. */
2356 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2357
2358 /* Reset to normal state. */
2359 init_infwait_state ();
2360
2361 /* Resume inferior. */
2362 resume (tp->control.trap_expected || step || bpstat_should_step (),
2363 tp->suspend.stop_signal);
2364
2365 /* Wait for it to stop (if not standalone)
2366 and in any case decode why it stopped, and act accordingly. */
2367 /* Do this only if we are not using the event loop, or if the target
2368 does not support asynchronous execution. */
2369 if (!target_can_async_p ())
2370 {
2371 wait_for_inferior ();
2372 normal_stop ();
2373 }
2374 }
2375 \f
2376
2377 /* Start remote-debugging of a machine over a serial link. */
2378
2379 void
2380 start_remote (int from_tty)
2381 {
2382 struct inferior *inferior;
2383
2384 inferior = current_inferior ();
2385 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2386
2387 /* Always go on waiting for the target, regardless of the mode. */
2388 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2389 indicate to wait_for_inferior that a target should timeout if
2390 nothing is returned (instead of just blocking). Because of this,
2391 targets expecting an immediate response need to, internally, set
2392 things up so that the target_wait() is forced to eventually
2393 timeout. */
2394 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2395 differentiate to its caller what the state of the target is after
2396 the initial open has been performed. Here we're assuming that
2397 the target has stopped. It should be possible to eventually have
2398 target_open() return to the caller an indication that the target
2399 is currently running and GDB state should be set to the same as
2400 for an async run. */
2401 wait_for_inferior ();
2402
2403 /* Now that the inferior has stopped, do any bookkeeping like
2404 loading shared libraries. We want to do this before normal_stop,
2405 so that the displayed frame is up to date. */
2406 post_create_inferior (&current_target, from_tty);
2407
2408 normal_stop ();
2409 }
2410
2411 /* Initialize static vars when a new inferior begins. */
2412
2413 void
2414 init_wait_for_inferior (void)
2415 {
2416 /* These are meaningless until the first time through wait_for_inferior. */
2417
2418 breakpoint_init_inferior (inf_starting);
2419
2420 clear_proceed_status ();
2421
2422 target_last_wait_ptid = minus_one_ptid;
2423
2424 previous_inferior_ptid = inferior_ptid;
2425 init_infwait_state ();
2426
2427 /* Discard any skipped inlined frames. */
2428 clear_inline_frame_state (minus_one_ptid);
2429
2430 singlestep_ptid = null_ptid;
2431 singlestep_pc = 0;
2432 }
2433
2434 \f
2435 /* This enum encodes possible reasons for doing a target_wait, so that
2436 wfi can call target_wait in one place. (Ultimately the call will be
2437 moved out of the infinite loop entirely.) */
2438
2439 enum infwait_states
2440 {
2441 infwait_normal_state,
2442 infwait_step_watch_state,
2443 infwait_nonstep_watch_state
2444 };
2445
2446 /* The PTID we'll do a target_wait on.*/
2447 ptid_t waiton_ptid;
2448
2449 /* Current inferior wait state. */
2450 static enum infwait_states infwait_state;
2451
2452 /* Data to be passed around while handling an event. This data is
2453 discarded between events. */
2454 struct execution_control_state
2455 {
2456 ptid_t ptid;
2457 /* The thread that got the event, if this was a thread event; NULL
2458 otherwise. */
2459 struct thread_info *event_thread;
2460
2461 struct target_waitstatus ws;
2462 int stop_func_filled_in;
2463 CORE_ADDR stop_func_start;
2464 CORE_ADDR stop_func_end;
2465 const char *stop_func_name;
2466 int wait_some_more;
2467
2468 /* We were in infwait_step_watch_state or
2469 infwait_nonstep_watch_state state, and the thread reported an
2470 event. */
2471 int stepped_after_stopped_by_watchpoint;
2472
2473 /* True if the event thread hit the single-step breakpoint of
2474 another thread. Thus the event doesn't cause a stop, the thread
2475 needs to be single-stepped past the single-step breakpoint before
2476 we can switch back to the original stepping thread. */
2477 int hit_singlestep_breakpoint;
2478 };
2479
2480 static void handle_inferior_event (struct execution_control_state *ecs);
2481
2482 static void handle_step_into_function (struct gdbarch *gdbarch,
2483 struct execution_control_state *ecs);
2484 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2485 struct execution_control_state *ecs);
2486 static void handle_signal_stop (struct execution_control_state *ecs);
2487 static void check_exception_resume (struct execution_control_state *,
2488 struct frame_info *);
2489
2490 static void stop_stepping (struct execution_control_state *ecs);
2491 static void prepare_to_wait (struct execution_control_state *ecs);
2492 static void keep_going (struct execution_control_state *ecs);
2493 static void process_event_stop_test (struct execution_control_state *ecs);
2494 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2495
2496 /* Callback for iterate over threads. If the thread is stopped, but
2497 the user/frontend doesn't know about that yet, go through
2498 normal_stop, as if the thread had just stopped now. ARG points at
2499 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2500 ptid_is_pid(PTID) is true, applies to all threads of the process
2501 pointed at by PTID. Otherwise, apply only to the thread pointed by
2502 PTID. */
2503
2504 static int
2505 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2506 {
2507 ptid_t ptid = * (ptid_t *) arg;
2508
2509 if ((ptid_equal (info->ptid, ptid)
2510 || ptid_equal (minus_one_ptid, ptid)
2511 || (ptid_is_pid (ptid)
2512 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2513 && is_running (info->ptid)
2514 && !is_executing (info->ptid))
2515 {
2516 struct cleanup *old_chain;
2517 struct execution_control_state ecss;
2518 struct execution_control_state *ecs = &ecss;
2519
2520 memset (ecs, 0, sizeof (*ecs));
2521
2522 old_chain = make_cleanup_restore_current_thread ();
2523
2524 overlay_cache_invalid = 1;
2525 /* Flush target cache before starting to handle each event.
2526 Target was running and cache could be stale. This is just a
2527 heuristic. Running threads may modify target memory, but we
2528 don't get any event. */
2529 target_dcache_invalidate ();
2530
2531 /* Go through handle_inferior_event/normal_stop, so we always
2532 have consistent output as if the stop event had been
2533 reported. */
2534 ecs->ptid = info->ptid;
2535 ecs->event_thread = find_thread_ptid (info->ptid);
2536 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2537 ecs->ws.value.sig = GDB_SIGNAL_0;
2538
2539 handle_inferior_event (ecs);
2540
2541 if (!ecs->wait_some_more)
2542 {
2543 struct thread_info *tp;
2544
2545 normal_stop ();
2546
2547 /* Finish off the continuations. */
2548 tp = inferior_thread ();
2549 do_all_intermediate_continuations_thread (tp, 1);
2550 do_all_continuations_thread (tp, 1);
2551 }
2552
2553 do_cleanups (old_chain);
2554 }
2555
2556 return 0;
2557 }
2558
2559 /* This function is attached as a "thread_stop_requested" observer.
2560 Cleanup local state that assumed the PTID was to be resumed, and
2561 report the stop to the frontend. */
2562
2563 static void
2564 infrun_thread_stop_requested (ptid_t ptid)
2565 {
2566 struct displaced_step_inferior_state *displaced;
2567
2568 /* PTID was requested to stop. Remove it from the displaced
2569 stepping queue, so we don't try to resume it automatically. */
2570
2571 for (displaced = displaced_step_inferior_states;
2572 displaced;
2573 displaced = displaced->next)
2574 {
2575 struct displaced_step_request *it, **prev_next_p;
2576
2577 it = displaced->step_request_queue;
2578 prev_next_p = &displaced->step_request_queue;
2579 while (it)
2580 {
2581 if (ptid_match (it->ptid, ptid))
2582 {
2583 *prev_next_p = it->next;
2584 it->next = NULL;
2585 xfree (it);
2586 }
2587 else
2588 {
2589 prev_next_p = &it->next;
2590 }
2591
2592 it = *prev_next_p;
2593 }
2594 }
2595
2596 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2597 }
2598
2599 static void
2600 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2601 {
2602 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2603 nullify_last_target_wait_ptid ();
2604 }
2605
2606 /* Callback for iterate_over_threads. */
2607
2608 static int
2609 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2610 {
2611 if (is_exited (info->ptid))
2612 return 0;
2613
2614 delete_step_resume_breakpoint (info);
2615 delete_exception_resume_breakpoint (info);
2616 return 0;
2617 }
2618
2619 /* In all-stop, delete the step resume breakpoint of any thread that
2620 had one. In non-stop, delete the step resume breakpoint of the
2621 thread that just stopped. */
2622
2623 static void
2624 delete_step_thread_step_resume_breakpoint (void)
2625 {
2626 if (!target_has_execution
2627 || ptid_equal (inferior_ptid, null_ptid))
2628 /* If the inferior has exited, we have already deleted the step
2629 resume breakpoints out of GDB's lists. */
2630 return;
2631
2632 if (non_stop)
2633 {
2634 /* If in non-stop mode, only delete the step-resume or
2635 longjmp-resume breakpoint of the thread that just stopped
2636 stepping. */
2637 struct thread_info *tp = inferior_thread ();
2638
2639 delete_step_resume_breakpoint (tp);
2640 delete_exception_resume_breakpoint (tp);
2641 }
2642 else
2643 /* In all-stop mode, delete all step-resume and longjmp-resume
2644 breakpoints of any thread that had them. */
2645 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2646 }
2647
2648 /* A cleanup wrapper. */
2649
2650 static void
2651 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2652 {
2653 delete_step_thread_step_resume_breakpoint ();
2654 }
2655
2656 /* Pretty print the results of target_wait, for debugging purposes. */
2657
2658 static void
2659 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2660 const struct target_waitstatus *ws)
2661 {
2662 char *status_string = target_waitstatus_to_string (ws);
2663 struct ui_file *tmp_stream = mem_fileopen ();
2664 char *text;
2665
2666 /* The text is split over several lines because it was getting too long.
2667 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2668 output as a unit; we want only one timestamp printed if debug_timestamp
2669 is set. */
2670
2671 fprintf_unfiltered (tmp_stream,
2672 "infrun: target_wait (%d", ptid_get_pid (waiton_ptid));
2673 if (ptid_get_pid (waiton_ptid) != -1)
2674 fprintf_unfiltered (tmp_stream,
2675 " [%s]", target_pid_to_str (waiton_ptid));
2676 fprintf_unfiltered (tmp_stream, ", status) =\n");
2677 fprintf_unfiltered (tmp_stream,
2678 "infrun: %d [%s],\n",
2679 ptid_get_pid (result_ptid),
2680 target_pid_to_str (result_ptid));
2681 fprintf_unfiltered (tmp_stream,
2682 "infrun: %s\n",
2683 status_string);
2684
2685 text = ui_file_xstrdup (tmp_stream, NULL);
2686
2687 /* This uses %s in part to handle %'s in the text, but also to avoid
2688 a gcc error: the format attribute requires a string literal. */
2689 fprintf_unfiltered (gdb_stdlog, "%s", text);
2690
2691 xfree (status_string);
2692 xfree (text);
2693 ui_file_delete (tmp_stream);
2694 }
2695
2696 /* Prepare and stabilize the inferior for detaching it. E.g.,
2697 detaching while a thread is displaced stepping is a recipe for
2698 crashing it, as nothing would readjust the PC out of the scratch
2699 pad. */
2700
2701 void
2702 prepare_for_detach (void)
2703 {
2704 struct inferior *inf = current_inferior ();
2705 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2706 struct cleanup *old_chain_1;
2707 struct displaced_step_inferior_state *displaced;
2708
2709 displaced = get_displaced_stepping_state (inf->pid);
2710
2711 /* Is any thread of this process displaced stepping? If not,
2712 there's nothing else to do. */
2713 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2714 return;
2715
2716 if (debug_infrun)
2717 fprintf_unfiltered (gdb_stdlog,
2718 "displaced-stepping in-process while detaching");
2719
2720 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2721 inf->detaching = 1;
2722
2723 while (!ptid_equal (displaced->step_ptid, null_ptid))
2724 {
2725 struct cleanup *old_chain_2;
2726 struct execution_control_state ecss;
2727 struct execution_control_state *ecs;
2728
2729 ecs = &ecss;
2730 memset (ecs, 0, sizeof (*ecs));
2731
2732 overlay_cache_invalid = 1;
2733 /* Flush target cache before starting to handle each event.
2734 Target was running and cache could be stale. This is just a
2735 heuristic. Running threads may modify target memory, but we
2736 don't get any event. */
2737 target_dcache_invalidate ();
2738
2739 if (deprecated_target_wait_hook)
2740 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2741 else
2742 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2743
2744 if (debug_infrun)
2745 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2746
2747 /* If an error happens while handling the event, propagate GDB's
2748 knowledge of the executing state to the frontend/user running
2749 state. */
2750 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2751 &minus_one_ptid);
2752
2753 /* Now figure out what to do with the result of the result. */
2754 handle_inferior_event (ecs);
2755
2756 /* No error, don't finish the state yet. */
2757 discard_cleanups (old_chain_2);
2758
2759 /* Breakpoints and watchpoints are not installed on the target
2760 at this point, and signals are passed directly to the
2761 inferior, so this must mean the process is gone. */
2762 if (!ecs->wait_some_more)
2763 {
2764 discard_cleanups (old_chain_1);
2765 error (_("Program exited while detaching"));
2766 }
2767 }
2768
2769 discard_cleanups (old_chain_1);
2770 }
2771
2772 /* Wait for control to return from inferior to debugger.
2773
2774 If inferior gets a signal, we may decide to start it up again
2775 instead of returning. That is why there is a loop in this function.
2776 When this function actually returns it means the inferior
2777 should be left stopped and GDB should read more commands. */
2778
2779 void
2780 wait_for_inferior (void)
2781 {
2782 struct cleanup *old_cleanups;
2783
2784 if (debug_infrun)
2785 fprintf_unfiltered
2786 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2787
2788 old_cleanups =
2789 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2790
2791 while (1)
2792 {
2793 struct execution_control_state ecss;
2794 struct execution_control_state *ecs = &ecss;
2795 struct cleanup *old_chain;
2796
2797 memset (ecs, 0, sizeof (*ecs));
2798
2799 overlay_cache_invalid = 1;
2800
2801 /* Flush target cache before starting to handle each event.
2802 Target was running and cache could be stale. This is just a
2803 heuristic. Running threads may modify target memory, but we
2804 don't get any event. */
2805 target_dcache_invalidate ();
2806
2807 if (deprecated_target_wait_hook)
2808 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2809 else
2810 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2811
2812 if (debug_infrun)
2813 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2814
2815 /* If an error happens while handling the event, propagate GDB's
2816 knowledge of the executing state to the frontend/user running
2817 state. */
2818 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2819
2820 /* Now figure out what to do with the result of the result. */
2821 handle_inferior_event (ecs);
2822
2823 /* No error, don't finish the state yet. */
2824 discard_cleanups (old_chain);
2825
2826 if (!ecs->wait_some_more)
2827 break;
2828 }
2829
2830 do_cleanups (old_cleanups);
2831 }
2832
2833 /* Asynchronous version of wait_for_inferior. It is called by the
2834 event loop whenever a change of state is detected on the file
2835 descriptor corresponding to the target. It can be called more than
2836 once to complete a single execution command. In such cases we need
2837 to keep the state in a global variable ECSS. If it is the last time
2838 that this function is called for a single execution command, then
2839 report to the user that the inferior has stopped, and do the
2840 necessary cleanups. */
2841
2842 void
2843 fetch_inferior_event (void *client_data)
2844 {
2845 struct execution_control_state ecss;
2846 struct execution_control_state *ecs = &ecss;
2847 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2848 struct cleanup *ts_old_chain;
2849 int was_sync = sync_execution;
2850 int cmd_done = 0;
2851
2852 memset (ecs, 0, sizeof (*ecs));
2853
2854 /* We're handling a live event, so make sure we're doing live
2855 debugging. If we're looking at traceframes while the target is
2856 running, we're going to need to get back to that mode after
2857 handling the event. */
2858 if (non_stop)
2859 {
2860 make_cleanup_restore_current_traceframe ();
2861 set_current_traceframe (-1);
2862 }
2863
2864 if (non_stop)
2865 /* In non-stop mode, the user/frontend should not notice a thread
2866 switch due to internal events. Make sure we reverse to the
2867 user selected thread and frame after handling the event and
2868 running any breakpoint commands. */
2869 make_cleanup_restore_current_thread ();
2870
2871 overlay_cache_invalid = 1;
2872 /* Flush target cache before starting to handle each event. Target
2873 was running and cache could be stale. This is just a heuristic.
2874 Running threads may modify target memory, but we don't get any
2875 event. */
2876 target_dcache_invalidate ();
2877
2878 make_cleanup_restore_integer (&execution_direction);
2879 execution_direction = target_execution_direction ();
2880
2881 if (deprecated_target_wait_hook)
2882 ecs->ptid =
2883 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2884 else
2885 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2886
2887 if (debug_infrun)
2888 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2889
2890 /* If an error happens while handling the event, propagate GDB's
2891 knowledge of the executing state to the frontend/user running
2892 state. */
2893 if (!non_stop)
2894 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2895 else
2896 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2897
2898 /* Get executed before make_cleanup_restore_current_thread above to apply
2899 still for the thread which has thrown the exception. */
2900 make_bpstat_clear_actions_cleanup ();
2901
2902 /* Now figure out what to do with the result of the result. */
2903 handle_inferior_event (ecs);
2904
2905 if (!ecs->wait_some_more)
2906 {
2907 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2908
2909 delete_step_thread_step_resume_breakpoint ();
2910
2911 /* We may not find an inferior if this was a process exit. */
2912 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2913 normal_stop ();
2914
2915 if (target_has_execution
2916 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2917 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2918 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2919 && ecs->event_thread->step_multi
2920 && ecs->event_thread->control.stop_step)
2921 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2922 else
2923 {
2924 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2925 cmd_done = 1;
2926 }
2927 }
2928
2929 /* No error, don't finish the thread states yet. */
2930 discard_cleanups (ts_old_chain);
2931
2932 /* Revert thread and frame. */
2933 do_cleanups (old_chain);
2934
2935 /* If the inferior was in sync execution mode, and now isn't,
2936 restore the prompt (a synchronous execution command has finished,
2937 and we're ready for input). */
2938 if (interpreter_async && was_sync && !sync_execution)
2939 display_gdb_prompt (0);
2940
2941 if (cmd_done
2942 && !was_sync
2943 && exec_done_display_p
2944 && (ptid_equal (inferior_ptid, null_ptid)
2945 || !is_running (inferior_ptid)))
2946 printf_unfiltered (_("completed.\n"));
2947 }
2948
2949 /* Record the frame and location we're currently stepping through. */
2950 void
2951 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2952 {
2953 struct thread_info *tp = inferior_thread ();
2954
2955 tp->control.step_frame_id = get_frame_id (frame);
2956 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2957
2958 tp->current_symtab = sal.symtab;
2959 tp->current_line = sal.line;
2960 }
2961
2962 /* Clear context switchable stepping state. */
2963
2964 void
2965 init_thread_stepping_state (struct thread_info *tss)
2966 {
2967 tss->stepping_over_breakpoint = 0;
2968 tss->step_after_step_resume_breakpoint = 0;
2969 }
2970
2971 /* Set the cached copy of the last ptid/waitstatus. */
2972
2973 static void
2974 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
2975 {
2976 target_last_wait_ptid = ptid;
2977 target_last_waitstatus = status;
2978 }
2979
2980 /* Return the cached copy of the last pid/waitstatus returned by
2981 target_wait()/deprecated_target_wait_hook(). The data is actually
2982 cached by handle_inferior_event(), which gets called immediately
2983 after target_wait()/deprecated_target_wait_hook(). */
2984
2985 void
2986 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2987 {
2988 *ptidp = target_last_wait_ptid;
2989 *status = target_last_waitstatus;
2990 }
2991
2992 void
2993 nullify_last_target_wait_ptid (void)
2994 {
2995 target_last_wait_ptid = minus_one_ptid;
2996 }
2997
2998 /* Switch thread contexts. */
2999
3000 static void
3001 context_switch (ptid_t ptid)
3002 {
3003 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3004 {
3005 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3006 target_pid_to_str (inferior_ptid));
3007 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3008 target_pid_to_str (ptid));
3009 }
3010
3011 switch_to_thread (ptid);
3012 }
3013
3014 static void
3015 adjust_pc_after_break (struct execution_control_state *ecs)
3016 {
3017 struct regcache *regcache;
3018 struct gdbarch *gdbarch;
3019 struct address_space *aspace;
3020 CORE_ADDR breakpoint_pc, decr_pc;
3021
3022 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3023 we aren't, just return.
3024
3025 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3026 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3027 implemented by software breakpoints should be handled through the normal
3028 breakpoint layer.
3029
3030 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3031 different signals (SIGILL or SIGEMT for instance), but it is less
3032 clear where the PC is pointing afterwards. It may not match
3033 gdbarch_decr_pc_after_break. I don't know any specific target that
3034 generates these signals at breakpoints (the code has been in GDB since at
3035 least 1992) so I can not guess how to handle them here.
3036
3037 In earlier versions of GDB, a target with
3038 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3039 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3040 target with both of these set in GDB history, and it seems unlikely to be
3041 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3042
3043 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3044 return;
3045
3046 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3047 return;
3048
3049 /* In reverse execution, when a breakpoint is hit, the instruction
3050 under it has already been de-executed. The reported PC always
3051 points at the breakpoint address, so adjusting it further would
3052 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3053 architecture:
3054
3055 B1 0x08000000 : INSN1
3056 B2 0x08000001 : INSN2
3057 0x08000002 : INSN3
3058 PC -> 0x08000003 : INSN4
3059
3060 Say you're stopped at 0x08000003 as above. Reverse continuing
3061 from that point should hit B2 as below. Reading the PC when the
3062 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3063 been de-executed already.
3064
3065 B1 0x08000000 : INSN1
3066 B2 PC -> 0x08000001 : INSN2
3067 0x08000002 : INSN3
3068 0x08000003 : INSN4
3069
3070 We can't apply the same logic as for forward execution, because
3071 we would wrongly adjust the PC to 0x08000000, since there's a
3072 breakpoint at PC - 1. We'd then report a hit on B1, although
3073 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3074 behaviour. */
3075 if (execution_direction == EXEC_REVERSE)
3076 return;
3077
3078 /* If this target does not decrement the PC after breakpoints, then
3079 we have nothing to do. */
3080 regcache = get_thread_regcache (ecs->ptid);
3081 gdbarch = get_regcache_arch (regcache);
3082
3083 decr_pc = target_decr_pc_after_break (gdbarch);
3084 if (decr_pc == 0)
3085 return;
3086
3087 aspace = get_regcache_aspace (regcache);
3088
3089 /* Find the location where (if we've hit a breakpoint) the
3090 breakpoint would be. */
3091 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3092
3093 /* Check whether there actually is a software breakpoint inserted at
3094 that location.
3095
3096 If in non-stop mode, a race condition is possible where we've
3097 removed a breakpoint, but stop events for that breakpoint were
3098 already queued and arrive later. To suppress those spurious
3099 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3100 and retire them after a number of stop events are reported. */
3101 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3102 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3103 {
3104 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3105
3106 if (record_full_is_used ())
3107 record_full_gdb_operation_disable_set ();
3108
3109 /* When using hardware single-step, a SIGTRAP is reported for both
3110 a completed single-step and a software breakpoint. Need to
3111 differentiate between the two, as the latter needs adjusting
3112 but the former does not.
3113
3114 The SIGTRAP can be due to a completed hardware single-step only if
3115 - we didn't insert software single-step breakpoints
3116 - the thread to be examined is still the current thread
3117 - this thread is currently being stepped
3118
3119 If any of these events did not occur, we must have stopped due
3120 to hitting a software breakpoint, and have to back up to the
3121 breakpoint address.
3122
3123 As a special case, we could have hardware single-stepped a
3124 software breakpoint. In this case (prev_pc == breakpoint_pc),
3125 we also need to back up to the breakpoint address. */
3126
3127 if (singlestep_breakpoints_inserted_p
3128 || !ptid_equal (ecs->ptid, inferior_ptid)
3129 || !currently_stepping (ecs->event_thread)
3130 || ecs->event_thread->prev_pc == breakpoint_pc)
3131 regcache_write_pc (regcache, breakpoint_pc);
3132
3133 do_cleanups (old_cleanups);
3134 }
3135 }
3136
3137 static void
3138 init_infwait_state (void)
3139 {
3140 waiton_ptid = pid_to_ptid (-1);
3141 infwait_state = infwait_normal_state;
3142 }
3143
3144 static int
3145 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3146 {
3147 for (frame = get_prev_frame (frame);
3148 frame != NULL;
3149 frame = get_prev_frame (frame))
3150 {
3151 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3152 return 1;
3153 if (get_frame_type (frame) != INLINE_FRAME)
3154 break;
3155 }
3156
3157 return 0;
3158 }
3159
3160 /* Auxiliary function that handles syscall entry/return events.
3161 It returns 1 if the inferior should keep going (and GDB
3162 should ignore the event), or 0 if the event deserves to be
3163 processed. */
3164
3165 static int
3166 handle_syscall_event (struct execution_control_state *ecs)
3167 {
3168 struct regcache *regcache;
3169 int syscall_number;
3170
3171 if (!ptid_equal (ecs->ptid, inferior_ptid))
3172 context_switch (ecs->ptid);
3173
3174 regcache = get_thread_regcache (ecs->ptid);
3175 syscall_number = ecs->ws.value.syscall_number;
3176 stop_pc = regcache_read_pc (regcache);
3177
3178 if (catch_syscall_enabled () > 0
3179 && catching_syscall_number (syscall_number) > 0)
3180 {
3181 if (debug_infrun)
3182 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3183 syscall_number);
3184
3185 ecs->event_thread->control.stop_bpstat
3186 = bpstat_stop_status (get_regcache_aspace (regcache),
3187 stop_pc, ecs->ptid, &ecs->ws);
3188
3189 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3190 {
3191 /* Catchpoint hit. */
3192 return 0;
3193 }
3194 }
3195
3196 /* If no catchpoint triggered for this, then keep going. */
3197 keep_going (ecs);
3198 return 1;
3199 }
3200
3201 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3202
3203 static void
3204 fill_in_stop_func (struct gdbarch *gdbarch,
3205 struct execution_control_state *ecs)
3206 {
3207 if (!ecs->stop_func_filled_in)
3208 {
3209 /* Don't care about return value; stop_func_start and stop_func_name
3210 will both be 0 if it doesn't work. */
3211 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3212 &ecs->stop_func_start, &ecs->stop_func_end);
3213 ecs->stop_func_start
3214 += gdbarch_deprecated_function_start_offset (gdbarch);
3215
3216 if (gdbarch_skip_entrypoint_p (gdbarch))
3217 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3218 ecs->stop_func_start);
3219
3220 ecs->stop_func_filled_in = 1;
3221 }
3222 }
3223
3224
3225 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3226
3227 static enum stop_kind
3228 get_inferior_stop_soon (ptid_t ptid)
3229 {
3230 struct inferior *inf = find_inferior_pid (ptid_get_pid (ptid));
3231
3232 gdb_assert (inf != NULL);
3233 return inf->control.stop_soon;
3234 }
3235
3236 /* Given an execution control state that has been freshly filled in by
3237 an event from the inferior, figure out what it means and take
3238 appropriate action.
3239
3240 The alternatives are:
3241
3242 1) stop_stepping and return; to really stop and return to the
3243 debugger.
3244
3245 2) keep_going and return; to wait for the next event (set
3246 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3247 once). */
3248
3249 static void
3250 handle_inferior_event (struct execution_control_state *ecs)
3251 {
3252 enum stop_kind stop_soon;
3253
3254 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3255 {
3256 /* We had an event in the inferior, but we are not interested in
3257 handling it at this level. The lower layers have already
3258 done what needs to be done, if anything.
3259
3260 One of the possible circumstances for this is when the
3261 inferior produces output for the console. The inferior has
3262 not stopped, and we are ignoring the event. Another possible
3263 circumstance is any event which the lower level knows will be
3264 reported multiple times without an intervening resume. */
3265 if (debug_infrun)
3266 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3267 prepare_to_wait (ecs);
3268 return;
3269 }
3270
3271 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3272 && target_can_async_p () && !sync_execution)
3273 {
3274 /* There were no unwaited-for children left in the target, but,
3275 we're not synchronously waiting for events either. Just
3276 ignore. Otherwise, if we were running a synchronous
3277 execution command, we need to cancel it and give the user
3278 back the terminal. */
3279 if (debug_infrun)
3280 fprintf_unfiltered (gdb_stdlog,
3281 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3282 prepare_to_wait (ecs);
3283 return;
3284 }
3285
3286 /* Cache the last pid/waitstatus. */
3287 set_last_target_status (ecs->ptid, ecs->ws);
3288
3289 /* Always clear state belonging to the previous time we stopped. */
3290 stop_stack_dummy = STOP_NONE;
3291
3292 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3293 {
3294 /* No unwaited-for children left. IOW, all resumed children
3295 have exited. */
3296 if (debug_infrun)
3297 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3298
3299 stop_print_frame = 0;
3300 stop_stepping (ecs);
3301 return;
3302 }
3303
3304 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3305 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3306 {
3307 ecs->event_thread = find_thread_ptid (ecs->ptid);
3308 /* If it's a new thread, add it to the thread database. */
3309 if (ecs->event_thread == NULL)
3310 ecs->event_thread = add_thread (ecs->ptid);
3311
3312 /* Disable range stepping. If the next step request could use a
3313 range, this will be end up re-enabled then. */
3314 ecs->event_thread->control.may_range_step = 0;
3315 }
3316
3317 /* Dependent on valid ECS->EVENT_THREAD. */
3318 adjust_pc_after_break (ecs);
3319
3320 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3321 reinit_frame_cache ();
3322
3323 breakpoint_retire_moribund ();
3324
3325 /* First, distinguish signals caused by the debugger from signals
3326 that have to do with the program's own actions. Note that
3327 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3328 on the operating system version. Here we detect when a SIGILL or
3329 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3330 something similar for SIGSEGV, since a SIGSEGV will be generated
3331 when we're trying to execute a breakpoint instruction on a
3332 non-executable stack. This happens for call dummy breakpoints
3333 for architectures like SPARC that place call dummies on the
3334 stack. */
3335 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3336 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3337 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3338 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3339 {
3340 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3341
3342 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3343 regcache_read_pc (regcache)))
3344 {
3345 if (debug_infrun)
3346 fprintf_unfiltered (gdb_stdlog,
3347 "infrun: Treating signal as SIGTRAP\n");
3348 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3349 }
3350 }
3351
3352 /* Mark the non-executing threads accordingly. In all-stop, all
3353 threads of all processes are stopped when we get any event
3354 reported. In non-stop mode, only the event thread stops. If
3355 we're handling a process exit in non-stop mode, there's nothing
3356 to do, as threads of the dead process are gone, and threads of
3357 any other process were left running. */
3358 if (!non_stop)
3359 set_executing (minus_one_ptid, 0);
3360 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3361 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3362 set_executing (ecs->ptid, 0);
3363
3364 switch (infwait_state)
3365 {
3366 case infwait_normal_state:
3367 if (debug_infrun)
3368 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3369 break;
3370
3371 case infwait_step_watch_state:
3372 if (debug_infrun)
3373 fprintf_unfiltered (gdb_stdlog,
3374 "infrun: infwait_step_watch_state\n");
3375
3376 ecs->stepped_after_stopped_by_watchpoint = 1;
3377 break;
3378
3379 case infwait_nonstep_watch_state:
3380 if (debug_infrun)
3381 fprintf_unfiltered (gdb_stdlog,
3382 "infrun: infwait_nonstep_watch_state\n");
3383 insert_breakpoints ();
3384
3385 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3386 handle things like signals arriving and other things happening
3387 in combination correctly? */
3388 ecs->stepped_after_stopped_by_watchpoint = 1;
3389 break;
3390
3391 default:
3392 internal_error (__FILE__, __LINE__, _("bad switch"));
3393 }
3394
3395 infwait_state = infwait_normal_state;
3396 waiton_ptid = pid_to_ptid (-1);
3397
3398 switch (ecs->ws.kind)
3399 {
3400 case TARGET_WAITKIND_LOADED:
3401 if (debug_infrun)
3402 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3403 if (!ptid_equal (ecs->ptid, inferior_ptid))
3404 context_switch (ecs->ptid);
3405 /* Ignore gracefully during startup of the inferior, as it might
3406 be the shell which has just loaded some objects, otherwise
3407 add the symbols for the newly loaded objects. Also ignore at
3408 the beginning of an attach or remote session; we will query
3409 the full list of libraries once the connection is
3410 established. */
3411
3412 stop_soon = get_inferior_stop_soon (ecs->ptid);
3413 if (stop_soon == NO_STOP_QUIETLY)
3414 {
3415 struct regcache *regcache;
3416
3417 regcache = get_thread_regcache (ecs->ptid);
3418
3419 handle_solib_event ();
3420
3421 ecs->event_thread->control.stop_bpstat
3422 = bpstat_stop_status (get_regcache_aspace (regcache),
3423 stop_pc, ecs->ptid, &ecs->ws);
3424
3425 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3426 {
3427 /* A catchpoint triggered. */
3428 process_event_stop_test (ecs);
3429 return;
3430 }
3431
3432 /* If requested, stop when the dynamic linker notifies
3433 gdb of events. This allows the user to get control
3434 and place breakpoints in initializer routines for
3435 dynamically loaded objects (among other things). */
3436 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3437 if (stop_on_solib_events)
3438 {
3439 /* Make sure we print "Stopped due to solib-event" in
3440 normal_stop. */
3441 stop_print_frame = 1;
3442
3443 stop_stepping (ecs);
3444 return;
3445 }
3446 }
3447
3448 /* If we are skipping through a shell, or through shared library
3449 loading that we aren't interested in, resume the program. If
3450 we're running the program normally, also resume. */
3451 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3452 {
3453 /* Loading of shared libraries might have changed breakpoint
3454 addresses. Make sure new breakpoints are inserted. */
3455 if (stop_soon == NO_STOP_QUIETLY
3456 && !breakpoints_always_inserted_mode ())
3457 insert_breakpoints ();
3458 resume (0, GDB_SIGNAL_0);
3459 prepare_to_wait (ecs);
3460 return;
3461 }
3462
3463 /* But stop if we're attaching or setting up a remote
3464 connection. */
3465 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3466 || stop_soon == STOP_QUIETLY_REMOTE)
3467 {
3468 if (debug_infrun)
3469 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3470 stop_stepping (ecs);
3471 return;
3472 }
3473
3474 internal_error (__FILE__, __LINE__,
3475 _("unhandled stop_soon: %d"), (int) stop_soon);
3476
3477 case TARGET_WAITKIND_SPURIOUS:
3478 if (debug_infrun)
3479 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3480 if (!ptid_equal (ecs->ptid, inferior_ptid))
3481 context_switch (ecs->ptid);
3482 resume (0, GDB_SIGNAL_0);
3483 prepare_to_wait (ecs);
3484 return;
3485
3486 case TARGET_WAITKIND_EXITED:
3487 case TARGET_WAITKIND_SIGNALLED:
3488 if (debug_infrun)
3489 {
3490 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3491 fprintf_unfiltered (gdb_stdlog,
3492 "infrun: TARGET_WAITKIND_EXITED\n");
3493 else
3494 fprintf_unfiltered (gdb_stdlog,
3495 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3496 }
3497
3498 inferior_ptid = ecs->ptid;
3499 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3500 set_current_program_space (current_inferior ()->pspace);
3501 handle_vfork_child_exec_or_exit (0);
3502 target_terminal_ours (); /* Must do this before mourn anyway. */
3503
3504 /* Clearing any previous state of convenience variables. */
3505 clear_exit_convenience_vars ();
3506
3507 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3508 {
3509 /* Record the exit code in the convenience variable $_exitcode, so
3510 that the user can inspect this again later. */
3511 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3512 (LONGEST) ecs->ws.value.integer);
3513
3514 /* Also record this in the inferior itself. */
3515 current_inferior ()->has_exit_code = 1;
3516 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3517
3518 print_exited_reason (ecs->ws.value.integer);
3519 }
3520 else
3521 {
3522 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3523 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3524
3525 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3526 {
3527 /* Set the value of the internal variable $_exitsignal,
3528 which holds the signal uncaught by the inferior. */
3529 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3530 gdbarch_gdb_signal_to_target (gdbarch,
3531 ecs->ws.value.sig));
3532 }
3533 else
3534 {
3535 /* We don't have access to the target's method used for
3536 converting between signal numbers (GDB's internal
3537 representation <-> target's representation).
3538 Therefore, we cannot do a good job at displaying this
3539 information to the user. It's better to just warn
3540 her about it (if infrun debugging is enabled), and
3541 give up. */
3542 if (debug_infrun)
3543 fprintf_filtered (gdb_stdlog, _("\
3544 Cannot fill $_exitsignal with the correct signal number.\n"));
3545 }
3546
3547 print_signal_exited_reason (ecs->ws.value.sig);
3548 }
3549
3550 gdb_flush (gdb_stdout);
3551 target_mourn_inferior ();
3552 singlestep_breakpoints_inserted_p = 0;
3553 cancel_single_step_breakpoints ();
3554 stop_print_frame = 0;
3555 stop_stepping (ecs);
3556 return;
3557
3558 /* The following are the only cases in which we keep going;
3559 the above cases end in a continue or goto. */
3560 case TARGET_WAITKIND_FORKED:
3561 case TARGET_WAITKIND_VFORKED:
3562 if (debug_infrun)
3563 {
3564 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3565 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3566 else
3567 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3568 }
3569
3570 /* Check whether the inferior is displaced stepping. */
3571 {
3572 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3573 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3574 struct displaced_step_inferior_state *displaced
3575 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3576
3577 /* If checking displaced stepping is supported, and thread
3578 ecs->ptid is displaced stepping. */
3579 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3580 {
3581 struct inferior *parent_inf
3582 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3583 struct regcache *child_regcache;
3584 CORE_ADDR parent_pc;
3585
3586 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3587 indicating that the displaced stepping of syscall instruction
3588 has been done. Perform cleanup for parent process here. Note
3589 that this operation also cleans up the child process for vfork,
3590 because their pages are shared. */
3591 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3592
3593 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3594 {
3595 /* Restore scratch pad for child process. */
3596 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3597 }
3598
3599 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3600 the child's PC is also within the scratchpad. Set the child's PC
3601 to the parent's PC value, which has already been fixed up.
3602 FIXME: we use the parent's aspace here, although we're touching
3603 the child, because the child hasn't been added to the inferior
3604 list yet at this point. */
3605
3606 child_regcache
3607 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3608 gdbarch,
3609 parent_inf->aspace);
3610 /* Read PC value of parent process. */
3611 parent_pc = regcache_read_pc (regcache);
3612
3613 if (debug_displaced)
3614 fprintf_unfiltered (gdb_stdlog,
3615 "displaced: write child pc from %s to %s\n",
3616 paddress (gdbarch,
3617 regcache_read_pc (child_regcache)),
3618 paddress (gdbarch, parent_pc));
3619
3620 regcache_write_pc (child_regcache, parent_pc);
3621 }
3622 }
3623
3624 if (!ptid_equal (ecs->ptid, inferior_ptid))
3625 context_switch (ecs->ptid);
3626
3627 /* Immediately detach breakpoints from the child before there's
3628 any chance of letting the user delete breakpoints from the
3629 breakpoint lists. If we don't do this early, it's easy to
3630 leave left over traps in the child, vis: "break foo; catch
3631 fork; c; <fork>; del; c; <child calls foo>". We only follow
3632 the fork on the last `continue', and by that time the
3633 breakpoint at "foo" is long gone from the breakpoint table.
3634 If we vforked, then we don't need to unpatch here, since both
3635 parent and child are sharing the same memory pages; we'll
3636 need to unpatch at follow/detach time instead to be certain
3637 that new breakpoints added between catchpoint hit time and
3638 vfork follow are detached. */
3639 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3640 {
3641 /* This won't actually modify the breakpoint list, but will
3642 physically remove the breakpoints from the child. */
3643 detach_breakpoints (ecs->ws.value.related_pid);
3644 }
3645
3646 if (singlestep_breakpoints_inserted_p)
3647 {
3648 /* Pull the single step breakpoints out of the target. */
3649 remove_single_step_breakpoints ();
3650 singlestep_breakpoints_inserted_p = 0;
3651 }
3652
3653 /* In case the event is caught by a catchpoint, remember that
3654 the event is to be followed at the next resume of the thread,
3655 and not immediately. */
3656 ecs->event_thread->pending_follow = ecs->ws;
3657
3658 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3659
3660 ecs->event_thread->control.stop_bpstat
3661 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3662 stop_pc, ecs->ptid, &ecs->ws);
3663
3664 /* If no catchpoint triggered for this, then keep going. Note
3665 that we're interested in knowing the bpstat actually causes a
3666 stop, not just if it may explain the signal. Software
3667 watchpoints, for example, always appear in the bpstat. */
3668 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3669 {
3670 ptid_t parent;
3671 ptid_t child;
3672 int should_resume;
3673 int follow_child
3674 = (follow_fork_mode_string == follow_fork_mode_child);
3675
3676 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3677
3678 should_resume = follow_fork ();
3679
3680 parent = ecs->ptid;
3681 child = ecs->ws.value.related_pid;
3682
3683 /* In non-stop mode, also resume the other branch. */
3684 if (non_stop && !detach_fork)
3685 {
3686 if (follow_child)
3687 switch_to_thread (parent);
3688 else
3689 switch_to_thread (child);
3690
3691 ecs->event_thread = inferior_thread ();
3692 ecs->ptid = inferior_ptid;
3693 keep_going (ecs);
3694 }
3695
3696 if (follow_child)
3697 switch_to_thread (child);
3698 else
3699 switch_to_thread (parent);
3700
3701 ecs->event_thread = inferior_thread ();
3702 ecs->ptid = inferior_ptid;
3703
3704 if (should_resume)
3705 keep_going (ecs);
3706 else
3707 stop_stepping (ecs);
3708 return;
3709 }
3710 process_event_stop_test (ecs);
3711 return;
3712
3713 case TARGET_WAITKIND_VFORK_DONE:
3714 /* Done with the shared memory region. Re-insert breakpoints in
3715 the parent, and keep going. */
3716
3717 if (debug_infrun)
3718 fprintf_unfiltered (gdb_stdlog,
3719 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3720
3721 if (!ptid_equal (ecs->ptid, inferior_ptid))
3722 context_switch (ecs->ptid);
3723
3724 current_inferior ()->waiting_for_vfork_done = 0;
3725 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3726 /* This also takes care of reinserting breakpoints in the
3727 previously locked inferior. */
3728 keep_going (ecs);
3729 return;
3730
3731 case TARGET_WAITKIND_EXECD:
3732 if (debug_infrun)
3733 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3734
3735 if (!ptid_equal (ecs->ptid, inferior_ptid))
3736 context_switch (ecs->ptid);
3737
3738 singlestep_breakpoints_inserted_p = 0;
3739 cancel_single_step_breakpoints ();
3740
3741 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3742
3743 /* Do whatever is necessary to the parent branch of the vfork. */
3744 handle_vfork_child_exec_or_exit (1);
3745
3746 /* This causes the eventpoints and symbol table to be reset.
3747 Must do this now, before trying to determine whether to
3748 stop. */
3749 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3750
3751 ecs->event_thread->control.stop_bpstat
3752 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3753 stop_pc, ecs->ptid, &ecs->ws);
3754
3755 /* Note that this may be referenced from inside
3756 bpstat_stop_status above, through inferior_has_execd. */
3757 xfree (ecs->ws.value.execd_pathname);
3758 ecs->ws.value.execd_pathname = NULL;
3759
3760 /* If no catchpoint triggered for this, then keep going. */
3761 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3762 {
3763 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3764 keep_going (ecs);
3765 return;
3766 }
3767 process_event_stop_test (ecs);
3768 return;
3769
3770 /* Be careful not to try to gather much state about a thread
3771 that's in a syscall. It's frequently a losing proposition. */
3772 case TARGET_WAITKIND_SYSCALL_ENTRY:
3773 if (debug_infrun)
3774 fprintf_unfiltered (gdb_stdlog,
3775 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3776 /* Getting the current syscall number. */
3777 if (handle_syscall_event (ecs) == 0)
3778 process_event_stop_test (ecs);
3779 return;
3780
3781 /* Before examining the threads further, step this thread to
3782 get it entirely out of the syscall. (We get notice of the
3783 event when the thread is just on the verge of exiting a
3784 syscall. Stepping one instruction seems to get it back
3785 into user code.) */
3786 case TARGET_WAITKIND_SYSCALL_RETURN:
3787 if (debug_infrun)
3788 fprintf_unfiltered (gdb_stdlog,
3789 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3790 if (handle_syscall_event (ecs) == 0)
3791 process_event_stop_test (ecs);
3792 return;
3793
3794 case TARGET_WAITKIND_STOPPED:
3795 if (debug_infrun)
3796 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3797 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3798 handle_signal_stop (ecs);
3799 return;
3800
3801 case TARGET_WAITKIND_NO_HISTORY:
3802 if (debug_infrun)
3803 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
3804 /* Reverse execution: target ran out of history info. */
3805
3806 /* Pull the single step breakpoints out of the target. */
3807 if (singlestep_breakpoints_inserted_p)
3808 {
3809 if (!ptid_equal (ecs->ptid, inferior_ptid))
3810 context_switch (ecs->ptid);
3811 remove_single_step_breakpoints ();
3812 singlestep_breakpoints_inserted_p = 0;
3813 }
3814 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3815 print_no_history_reason ();
3816 stop_stepping (ecs);
3817 return;
3818 }
3819 }
3820
3821 /* Come here when the program has stopped with a signal. */
3822
3823 static void
3824 handle_signal_stop (struct execution_control_state *ecs)
3825 {
3826 struct frame_info *frame;
3827 struct gdbarch *gdbarch;
3828 int stopped_by_watchpoint;
3829 enum stop_kind stop_soon;
3830 int random_signal;
3831
3832 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
3833
3834 /* Do we need to clean up the state of a thread that has
3835 completed a displaced single-step? (Doing so usually affects
3836 the PC, so do it here, before we set stop_pc.) */
3837 displaced_step_fixup (ecs->ptid,
3838 ecs->event_thread->suspend.stop_signal);
3839
3840 /* If we either finished a single-step or hit a breakpoint, but
3841 the user wanted this thread to be stopped, pretend we got a
3842 SIG0 (generic unsignaled stop). */
3843 if (ecs->event_thread->stop_requested
3844 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3845 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3846
3847 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3848
3849 if (debug_infrun)
3850 {
3851 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3852 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3853 struct cleanup *old_chain = save_inferior_ptid ();
3854
3855 inferior_ptid = ecs->ptid;
3856
3857 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3858 paddress (gdbarch, stop_pc));
3859 if (target_stopped_by_watchpoint ())
3860 {
3861 CORE_ADDR addr;
3862
3863 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3864
3865 if (target_stopped_data_address (&current_target, &addr))
3866 fprintf_unfiltered (gdb_stdlog,
3867 "infrun: stopped data address = %s\n",
3868 paddress (gdbarch, addr));
3869 else
3870 fprintf_unfiltered (gdb_stdlog,
3871 "infrun: (no data address available)\n");
3872 }
3873
3874 do_cleanups (old_chain);
3875 }
3876
3877 /* This is originated from start_remote(), start_inferior() and
3878 shared libraries hook functions. */
3879 stop_soon = get_inferior_stop_soon (ecs->ptid);
3880 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3881 {
3882 if (!ptid_equal (ecs->ptid, inferior_ptid))
3883 context_switch (ecs->ptid);
3884 if (debug_infrun)
3885 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3886 stop_print_frame = 1;
3887 stop_stepping (ecs);
3888 return;
3889 }
3890
3891 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3892 && stop_after_trap)
3893 {
3894 if (!ptid_equal (ecs->ptid, inferior_ptid))
3895 context_switch (ecs->ptid);
3896 if (debug_infrun)
3897 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3898 stop_print_frame = 0;
3899 stop_stepping (ecs);
3900 return;
3901 }
3902
3903 /* This originates from attach_command(). We need to overwrite
3904 the stop_signal here, because some kernels don't ignore a
3905 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3906 See more comments in inferior.h. On the other hand, if we
3907 get a non-SIGSTOP, report it to the user - assume the backend
3908 will handle the SIGSTOP if it should show up later.
3909
3910 Also consider that the attach is complete when we see a
3911 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3912 target extended-remote report it instead of a SIGSTOP
3913 (e.g. gdbserver). We already rely on SIGTRAP being our
3914 signal, so this is no exception.
3915
3916 Also consider that the attach is complete when we see a
3917 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3918 the target to stop all threads of the inferior, in case the
3919 low level attach operation doesn't stop them implicitly. If
3920 they weren't stopped implicitly, then the stub will report a
3921 GDB_SIGNAL_0, meaning: stopped for no particular reason
3922 other than GDB's request. */
3923 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3924 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
3925 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3926 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
3927 {
3928 stop_print_frame = 1;
3929 stop_stepping (ecs);
3930 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3931 return;
3932 }
3933
3934 /* See if something interesting happened to the non-current thread. If
3935 so, then switch to that thread. */
3936 if (!ptid_equal (ecs->ptid, inferior_ptid))
3937 {
3938 if (debug_infrun)
3939 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3940
3941 context_switch (ecs->ptid);
3942
3943 if (deprecated_context_hook)
3944 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3945 }
3946
3947 /* At this point, get hold of the now-current thread's frame. */
3948 frame = get_current_frame ();
3949 gdbarch = get_frame_arch (frame);
3950
3951 /* Pull the single step breakpoints out of the target. */
3952 if (singlestep_breakpoints_inserted_p)
3953 {
3954 /* However, before doing so, if this single-step breakpoint was
3955 actually for another thread, set this thread up for moving
3956 past it. */
3957 if (!ptid_equal (ecs->ptid, singlestep_ptid)
3958 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3959 {
3960 struct regcache *regcache;
3961 struct address_space *aspace;
3962 CORE_ADDR pc;
3963
3964 regcache = get_thread_regcache (ecs->ptid);
3965 aspace = get_regcache_aspace (regcache);
3966 pc = regcache_read_pc (regcache);
3967 if (single_step_breakpoint_inserted_here_p (aspace, pc))
3968 {
3969 if (debug_infrun)
3970 {
3971 fprintf_unfiltered (gdb_stdlog,
3972 "infrun: [%s] hit step over single-step"
3973 " breakpoint of [%s]\n",
3974 target_pid_to_str (ecs->ptid),
3975 target_pid_to_str (singlestep_ptid));
3976 }
3977 ecs->hit_singlestep_breakpoint = 1;
3978 }
3979 }
3980
3981 remove_single_step_breakpoints ();
3982 singlestep_breakpoints_inserted_p = 0;
3983 }
3984
3985 if (ecs->stepped_after_stopped_by_watchpoint)
3986 stopped_by_watchpoint = 0;
3987 else
3988 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3989
3990 /* If necessary, step over this watchpoint. We'll be back to display
3991 it in a moment. */
3992 if (stopped_by_watchpoint
3993 && (target_have_steppable_watchpoint
3994 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3995 {
3996 /* At this point, we are stopped at an instruction which has
3997 attempted to write to a piece of memory under control of
3998 a watchpoint. The instruction hasn't actually executed
3999 yet. If we were to evaluate the watchpoint expression
4000 now, we would get the old value, and therefore no change
4001 would seem to have occurred.
4002
4003 In order to make watchpoints work `right', we really need
4004 to complete the memory write, and then evaluate the
4005 watchpoint expression. We do this by single-stepping the
4006 target.
4007
4008 It may not be necessary to disable the watchpoint to stop over
4009 it. For example, the PA can (with some kernel cooperation)
4010 single step over a watchpoint without disabling the watchpoint.
4011
4012 It is far more common to need to disable a watchpoint to step
4013 the inferior over it. If we have non-steppable watchpoints,
4014 we must disable the current watchpoint; it's simplest to
4015 disable all watchpoints and breakpoints. */
4016 int hw_step = 1;
4017
4018 if (!target_have_steppable_watchpoint)
4019 {
4020 remove_breakpoints ();
4021 /* See comment in resume why we need to stop bypassing signals
4022 while breakpoints have been removed. */
4023 target_pass_signals (0, NULL);
4024 }
4025 /* Single step */
4026 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4027 target_resume (ecs->ptid, hw_step, GDB_SIGNAL_0);
4028 waiton_ptid = ecs->ptid;
4029 if (target_have_steppable_watchpoint)
4030 infwait_state = infwait_step_watch_state;
4031 else
4032 infwait_state = infwait_nonstep_watch_state;
4033 prepare_to_wait (ecs);
4034 return;
4035 }
4036
4037 ecs->event_thread->stepping_over_breakpoint = 0;
4038 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4039 ecs->event_thread->control.stop_step = 0;
4040 stop_print_frame = 1;
4041 stopped_by_random_signal = 0;
4042
4043 /* Hide inlined functions starting here, unless we just performed stepi or
4044 nexti. After stepi and nexti, always show the innermost frame (not any
4045 inline function call sites). */
4046 if (ecs->event_thread->control.step_range_end != 1)
4047 {
4048 struct address_space *aspace =
4049 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4050
4051 /* skip_inline_frames is expensive, so we avoid it if we can
4052 determine that the address is one where functions cannot have
4053 been inlined. This improves performance with inferiors that
4054 load a lot of shared libraries, because the solib event
4055 breakpoint is defined as the address of a function (i.e. not
4056 inline). Note that we have to check the previous PC as well
4057 as the current one to catch cases when we have just
4058 single-stepped off a breakpoint prior to reinstating it.
4059 Note that we're assuming that the code we single-step to is
4060 not inline, but that's not definitive: there's nothing
4061 preventing the event breakpoint function from containing
4062 inlined code, and the single-step ending up there. If the
4063 user had set a breakpoint on that inlined code, the missing
4064 skip_inline_frames call would break things. Fortunately
4065 that's an extremely unlikely scenario. */
4066 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4067 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4068 && ecs->event_thread->control.trap_expected
4069 && pc_at_non_inline_function (aspace,
4070 ecs->event_thread->prev_pc,
4071 &ecs->ws)))
4072 {
4073 skip_inline_frames (ecs->ptid);
4074
4075 /* Re-fetch current thread's frame in case that invalidated
4076 the frame cache. */
4077 frame = get_current_frame ();
4078 gdbarch = get_frame_arch (frame);
4079 }
4080 }
4081
4082 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4083 && ecs->event_thread->control.trap_expected
4084 && gdbarch_single_step_through_delay_p (gdbarch)
4085 && currently_stepping (ecs->event_thread))
4086 {
4087 /* We're trying to step off a breakpoint. Turns out that we're
4088 also on an instruction that needs to be stepped multiple
4089 times before it's been fully executing. E.g., architectures
4090 with a delay slot. It needs to be stepped twice, once for
4091 the instruction and once for the delay slot. */
4092 int step_through_delay
4093 = gdbarch_single_step_through_delay (gdbarch, frame);
4094
4095 if (debug_infrun && step_through_delay)
4096 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4097 if (ecs->event_thread->control.step_range_end == 0
4098 && step_through_delay)
4099 {
4100 /* The user issued a continue when stopped at a breakpoint.
4101 Set up for another trap and get out of here. */
4102 ecs->event_thread->stepping_over_breakpoint = 1;
4103 keep_going (ecs);
4104 return;
4105 }
4106 else if (step_through_delay)
4107 {
4108 /* The user issued a step when stopped at a breakpoint.
4109 Maybe we should stop, maybe we should not - the delay
4110 slot *might* correspond to a line of source. In any
4111 case, don't decide that here, just set
4112 ecs->stepping_over_breakpoint, making sure we
4113 single-step again before breakpoints are re-inserted. */
4114 ecs->event_thread->stepping_over_breakpoint = 1;
4115 }
4116 }
4117
4118 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4119 handles this event. */
4120 ecs->event_thread->control.stop_bpstat
4121 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4122 stop_pc, ecs->ptid, &ecs->ws);
4123
4124 /* Following in case break condition called a
4125 function. */
4126 stop_print_frame = 1;
4127
4128 /* This is where we handle "moribund" watchpoints. Unlike
4129 software breakpoints traps, hardware watchpoint traps are
4130 always distinguishable from random traps. If no high-level
4131 watchpoint is associated with the reported stop data address
4132 anymore, then the bpstat does not explain the signal ---
4133 simply make sure to ignore it if `stopped_by_watchpoint' is
4134 set. */
4135
4136 if (debug_infrun
4137 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4138 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4139 GDB_SIGNAL_TRAP)
4140 && stopped_by_watchpoint)
4141 fprintf_unfiltered (gdb_stdlog,
4142 "infrun: no user watchpoint explains "
4143 "watchpoint SIGTRAP, ignoring\n");
4144
4145 /* NOTE: cagney/2003-03-29: These checks for a random signal
4146 at one stage in the past included checks for an inferior
4147 function call's call dummy's return breakpoint. The original
4148 comment, that went with the test, read:
4149
4150 ``End of a stack dummy. Some systems (e.g. Sony news) give
4151 another signal besides SIGTRAP, so check here as well as
4152 above.''
4153
4154 If someone ever tries to get call dummys on a
4155 non-executable stack to work (where the target would stop
4156 with something like a SIGSEGV), then those tests might need
4157 to be re-instated. Given, however, that the tests were only
4158 enabled when momentary breakpoints were not being used, I
4159 suspect that it won't be the case.
4160
4161 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4162 be necessary for call dummies on a non-executable stack on
4163 SPARC. */
4164
4165 /* See if the breakpoints module can explain the signal. */
4166 random_signal
4167 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4168 ecs->event_thread->suspend.stop_signal);
4169
4170 /* If not, perhaps stepping/nexting can. */
4171 if (random_signal)
4172 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4173 && currently_stepping (ecs->event_thread));
4174
4175 /* Perhaps the thread hit a single-step breakpoint of _another_
4176 thread. Single-step breakpoints are transparent to the
4177 breakpoints module. */
4178 if (random_signal)
4179 random_signal = !ecs->hit_singlestep_breakpoint;
4180
4181 /* No? Perhaps we got a moribund watchpoint. */
4182 if (random_signal)
4183 random_signal = !stopped_by_watchpoint;
4184
4185 /* For the program's own signals, act according to
4186 the signal handling tables. */
4187
4188 if (random_signal)
4189 {
4190 /* Signal not for debugging purposes. */
4191 int printed = 0;
4192 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4193 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4194
4195 if (debug_infrun)
4196 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4197 gdb_signal_to_symbol_string (stop_signal));
4198
4199 stopped_by_random_signal = 1;
4200
4201 if (signal_print[ecs->event_thread->suspend.stop_signal])
4202 {
4203 printed = 1;
4204 target_terminal_ours_for_output ();
4205 print_signal_received_reason
4206 (ecs->event_thread->suspend.stop_signal);
4207 }
4208 /* Always stop on signals if we're either just gaining control
4209 of the program, or the user explicitly requested this thread
4210 to remain stopped. */
4211 if (stop_soon != NO_STOP_QUIETLY
4212 || ecs->event_thread->stop_requested
4213 || (!inf->detaching
4214 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4215 {
4216 stop_stepping (ecs);
4217 return;
4218 }
4219 /* If not going to stop, give terminal back
4220 if we took it away. */
4221 else if (printed)
4222 target_terminal_inferior ();
4223
4224 /* Clear the signal if it should not be passed. */
4225 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4226 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4227
4228 if (ecs->event_thread->prev_pc == stop_pc
4229 && ecs->event_thread->control.trap_expected
4230 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4231 {
4232 /* We were just starting a new sequence, attempting to
4233 single-step off of a breakpoint and expecting a SIGTRAP.
4234 Instead this signal arrives. This signal will take us out
4235 of the stepping range so GDB needs to remember to, when
4236 the signal handler returns, resume stepping off that
4237 breakpoint. */
4238 /* To simplify things, "continue" is forced to use the same
4239 code paths as single-step - set a breakpoint at the
4240 signal return address and then, once hit, step off that
4241 breakpoint. */
4242 if (debug_infrun)
4243 fprintf_unfiltered (gdb_stdlog,
4244 "infrun: signal arrived while stepping over "
4245 "breakpoint\n");
4246
4247 insert_hp_step_resume_breakpoint_at_frame (frame);
4248 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4249 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4250 ecs->event_thread->control.trap_expected = 0;
4251
4252 /* If we were nexting/stepping some other thread, switch to
4253 it, so that we don't continue it, losing control. */
4254 if (!switch_back_to_stepped_thread (ecs))
4255 keep_going (ecs);
4256 return;
4257 }
4258
4259 if (ecs->event_thread->control.step_range_end != 0
4260 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4261 && pc_in_thread_step_range (stop_pc, ecs->event_thread)
4262 && frame_id_eq (get_stack_frame_id (frame),
4263 ecs->event_thread->control.step_stack_frame_id)
4264 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4265 {
4266 /* The inferior is about to take a signal that will take it
4267 out of the single step range. Set a breakpoint at the
4268 current PC (which is presumably where the signal handler
4269 will eventually return) and then allow the inferior to
4270 run free.
4271
4272 Note that this is only needed for a signal delivered
4273 while in the single-step range. Nested signals aren't a
4274 problem as they eventually all return. */
4275 if (debug_infrun)
4276 fprintf_unfiltered (gdb_stdlog,
4277 "infrun: signal may take us out of "
4278 "single-step range\n");
4279
4280 insert_hp_step_resume_breakpoint_at_frame (frame);
4281 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4282 ecs->event_thread->control.trap_expected = 0;
4283 keep_going (ecs);
4284 return;
4285 }
4286
4287 /* Note: step_resume_breakpoint may be non-NULL. This occures
4288 when either there's a nested signal, or when there's a
4289 pending signal enabled just as the signal handler returns
4290 (leaving the inferior at the step-resume-breakpoint without
4291 actually executing it). Either way continue until the
4292 breakpoint is really hit. */
4293
4294 if (!switch_back_to_stepped_thread (ecs))
4295 {
4296 if (debug_infrun)
4297 fprintf_unfiltered (gdb_stdlog,
4298 "infrun: random signal, keep going\n");
4299
4300 keep_going (ecs);
4301 }
4302 return;
4303 }
4304
4305 process_event_stop_test (ecs);
4306 }
4307
4308 /* Come here when we've got some debug event / signal we can explain
4309 (IOW, not a random signal), and test whether it should cause a
4310 stop, or whether we should resume the inferior (transparently).
4311 E.g., could be a breakpoint whose condition evaluates false; we
4312 could be still stepping within the line; etc. */
4313
4314 static void
4315 process_event_stop_test (struct execution_control_state *ecs)
4316 {
4317 struct symtab_and_line stop_pc_sal;
4318 struct frame_info *frame;
4319 struct gdbarch *gdbarch;
4320 CORE_ADDR jmp_buf_pc;
4321 struct bpstat_what what;
4322
4323 /* Handle cases caused by hitting a breakpoint. */
4324
4325 frame = get_current_frame ();
4326 gdbarch = get_frame_arch (frame);
4327
4328 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4329
4330 if (what.call_dummy)
4331 {
4332 stop_stack_dummy = what.call_dummy;
4333 }
4334
4335 /* If we hit an internal event that triggers symbol changes, the
4336 current frame will be invalidated within bpstat_what (e.g., if we
4337 hit an internal solib event). Re-fetch it. */
4338 frame = get_current_frame ();
4339 gdbarch = get_frame_arch (frame);
4340
4341 switch (what.main_action)
4342 {
4343 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4344 /* If we hit the breakpoint at longjmp while stepping, we
4345 install a momentary breakpoint at the target of the
4346 jmp_buf. */
4347
4348 if (debug_infrun)
4349 fprintf_unfiltered (gdb_stdlog,
4350 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4351
4352 ecs->event_thread->stepping_over_breakpoint = 1;
4353
4354 if (what.is_longjmp)
4355 {
4356 struct value *arg_value;
4357
4358 /* If we set the longjmp breakpoint via a SystemTap probe,
4359 then use it to extract the arguments. The destination PC
4360 is the third argument to the probe. */
4361 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4362 if (arg_value)
4363 jmp_buf_pc = value_as_address (arg_value);
4364 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4365 || !gdbarch_get_longjmp_target (gdbarch,
4366 frame, &jmp_buf_pc))
4367 {
4368 if (debug_infrun)
4369 fprintf_unfiltered (gdb_stdlog,
4370 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4371 "(!gdbarch_get_longjmp_target)\n");
4372 keep_going (ecs);
4373 return;
4374 }
4375
4376 /* Insert a breakpoint at resume address. */
4377 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4378 }
4379 else
4380 check_exception_resume (ecs, frame);
4381 keep_going (ecs);
4382 return;
4383
4384 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4385 {
4386 struct frame_info *init_frame;
4387
4388 /* There are several cases to consider.
4389
4390 1. The initiating frame no longer exists. In this case we
4391 must stop, because the exception or longjmp has gone too
4392 far.
4393
4394 2. The initiating frame exists, and is the same as the
4395 current frame. We stop, because the exception or longjmp
4396 has been caught.
4397
4398 3. The initiating frame exists and is different from the
4399 current frame. This means the exception or longjmp has
4400 been caught beneath the initiating frame, so keep going.
4401
4402 4. longjmp breakpoint has been placed just to protect
4403 against stale dummy frames and user is not interested in
4404 stopping around longjmps. */
4405
4406 if (debug_infrun)
4407 fprintf_unfiltered (gdb_stdlog,
4408 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4409
4410 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4411 != NULL);
4412 delete_exception_resume_breakpoint (ecs->event_thread);
4413
4414 if (what.is_longjmp)
4415 {
4416 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread->num);
4417
4418 if (!frame_id_p (ecs->event_thread->initiating_frame))
4419 {
4420 /* Case 4. */
4421 keep_going (ecs);
4422 return;
4423 }
4424 }
4425
4426 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4427
4428 if (init_frame)
4429 {
4430 struct frame_id current_id
4431 = get_frame_id (get_current_frame ());
4432 if (frame_id_eq (current_id,
4433 ecs->event_thread->initiating_frame))
4434 {
4435 /* Case 2. Fall through. */
4436 }
4437 else
4438 {
4439 /* Case 3. */
4440 keep_going (ecs);
4441 return;
4442 }
4443 }
4444
4445 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4446 exists. */
4447 delete_step_resume_breakpoint (ecs->event_thread);
4448
4449 ecs->event_thread->control.stop_step = 1;
4450 print_end_stepping_range_reason ();
4451 stop_stepping (ecs);
4452 }
4453 return;
4454
4455 case BPSTAT_WHAT_SINGLE:
4456 if (debug_infrun)
4457 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4458 ecs->event_thread->stepping_over_breakpoint = 1;
4459 /* Still need to check other stuff, at least the case where we
4460 are stepping and step out of the right range. */
4461 break;
4462
4463 case BPSTAT_WHAT_STEP_RESUME:
4464 if (debug_infrun)
4465 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4466
4467 delete_step_resume_breakpoint (ecs->event_thread);
4468 if (ecs->event_thread->control.proceed_to_finish
4469 && execution_direction == EXEC_REVERSE)
4470 {
4471 struct thread_info *tp = ecs->event_thread;
4472
4473 /* We are finishing a function in reverse, and just hit the
4474 step-resume breakpoint at the start address of the
4475 function, and we're almost there -- just need to back up
4476 by one more single-step, which should take us back to the
4477 function call. */
4478 tp->control.step_range_start = tp->control.step_range_end = 1;
4479 keep_going (ecs);
4480 return;
4481 }
4482 fill_in_stop_func (gdbarch, ecs);
4483 if (stop_pc == ecs->stop_func_start
4484 && execution_direction == EXEC_REVERSE)
4485 {
4486 /* We are stepping over a function call in reverse, and just
4487 hit the step-resume breakpoint at the start address of
4488 the function. Go back to single-stepping, which should
4489 take us back to the function call. */
4490 ecs->event_thread->stepping_over_breakpoint = 1;
4491 keep_going (ecs);
4492 return;
4493 }
4494 break;
4495
4496 case BPSTAT_WHAT_STOP_NOISY:
4497 if (debug_infrun)
4498 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4499 stop_print_frame = 1;
4500
4501 /* Assume the thread stopped for a breapoint. We'll still check
4502 whether a/the breakpoint is there when the thread is next
4503 resumed. */
4504 ecs->event_thread->stepping_over_breakpoint = 1;
4505
4506 stop_stepping (ecs);
4507 return;
4508
4509 case BPSTAT_WHAT_STOP_SILENT:
4510 if (debug_infrun)
4511 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4512 stop_print_frame = 0;
4513
4514 /* Assume the thread stopped for a breapoint. We'll still check
4515 whether a/the breakpoint is there when the thread is next
4516 resumed. */
4517 ecs->event_thread->stepping_over_breakpoint = 1;
4518 stop_stepping (ecs);
4519 return;
4520
4521 case BPSTAT_WHAT_HP_STEP_RESUME:
4522 if (debug_infrun)
4523 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4524
4525 delete_step_resume_breakpoint (ecs->event_thread);
4526 if (ecs->event_thread->step_after_step_resume_breakpoint)
4527 {
4528 /* Back when the step-resume breakpoint was inserted, we
4529 were trying to single-step off a breakpoint. Go back to
4530 doing that. */
4531 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4532 ecs->event_thread->stepping_over_breakpoint = 1;
4533 keep_going (ecs);
4534 return;
4535 }
4536 break;
4537
4538 case BPSTAT_WHAT_KEEP_CHECKING:
4539 break;
4540 }
4541
4542 /* We come here if we hit a breakpoint but should not stop for it.
4543 Possibly we also were stepping and should stop for that. So fall
4544 through and test for stepping. But, if not stepping, do not
4545 stop. */
4546
4547 /* In all-stop mode, if we're currently stepping but have stopped in
4548 some other thread, we need to switch back to the stepped thread. */
4549 if (switch_back_to_stepped_thread (ecs))
4550 return;
4551
4552 if (ecs->event_thread->control.step_resume_breakpoint)
4553 {
4554 if (debug_infrun)
4555 fprintf_unfiltered (gdb_stdlog,
4556 "infrun: step-resume breakpoint is inserted\n");
4557
4558 /* Having a step-resume breakpoint overrides anything
4559 else having to do with stepping commands until
4560 that breakpoint is reached. */
4561 keep_going (ecs);
4562 return;
4563 }
4564
4565 if (ecs->event_thread->control.step_range_end == 0)
4566 {
4567 if (debug_infrun)
4568 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4569 /* Likewise if we aren't even stepping. */
4570 keep_going (ecs);
4571 return;
4572 }
4573
4574 /* Re-fetch current thread's frame in case the code above caused
4575 the frame cache to be re-initialized, making our FRAME variable
4576 a dangling pointer. */
4577 frame = get_current_frame ();
4578 gdbarch = get_frame_arch (frame);
4579 fill_in_stop_func (gdbarch, ecs);
4580
4581 /* If stepping through a line, keep going if still within it.
4582
4583 Note that step_range_end is the address of the first instruction
4584 beyond the step range, and NOT the address of the last instruction
4585 within it!
4586
4587 Note also that during reverse execution, we may be stepping
4588 through a function epilogue and therefore must detect when
4589 the current-frame changes in the middle of a line. */
4590
4591 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4592 && (execution_direction != EXEC_REVERSE
4593 || frame_id_eq (get_frame_id (frame),
4594 ecs->event_thread->control.step_frame_id)))
4595 {
4596 if (debug_infrun)
4597 fprintf_unfiltered
4598 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4599 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4600 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4601
4602 /* Tentatively re-enable range stepping; `resume' disables it if
4603 necessary (e.g., if we're stepping over a breakpoint or we
4604 have software watchpoints). */
4605 ecs->event_thread->control.may_range_step = 1;
4606
4607 /* When stepping backward, stop at beginning of line range
4608 (unless it's the function entry point, in which case
4609 keep going back to the call point). */
4610 if (stop_pc == ecs->event_thread->control.step_range_start
4611 && stop_pc != ecs->stop_func_start
4612 && execution_direction == EXEC_REVERSE)
4613 {
4614 ecs->event_thread->control.stop_step = 1;
4615 print_end_stepping_range_reason ();
4616 stop_stepping (ecs);
4617 }
4618 else
4619 keep_going (ecs);
4620
4621 return;
4622 }
4623
4624 /* We stepped out of the stepping range. */
4625
4626 /* If we are stepping at the source level and entered the runtime
4627 loader dynamic symbol resolution code...
4628
4629 EXEC_FORWARD: we keep on single stepping until we exit the run
4630 time loader code and reach the callee's address.
4631
4632 EXEC_REVERSE: we've already executed the callee (backward), and
4633 the runtime loader code is handled just like any other
4634 undebuggable function call. Now we need only keep stepping
4635 backward through the trampoline code, and that's handled further
4636 down, so there is nothing for us to do here. */
4637
4638 if (execution_direction != EXEC_REVERSE
4639 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4640 && in_solib_dynsym_resolve_code (stop_pc))
4641 {
4642 CORE_ADDR pc_after_resolver =
4643 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4644
4645 if (debug_infrun)
4646 fprintf_unfiltered (gdb_stdlog,
4647 "infrun: stepped into dynsym resolve code\n");
4648
4649 if (pc_after_resolver)
4650 {
4651 /* Set up a step-resume breakpoint at the address
4652 indicated by SKIP_SOLIB_RESOLVER. */
4653 struct symtab_and_line sr_sal;
4654
4655 init_sal (&sr_sal);
4656 sr_sal.pc = pc_after_resolver;
4657 sr_sal.pspace = get_frame_program_space (frame);
4658
4659 insert_step_resume_breakpoint_at_sal (gdbarch,
4660 sr_sal, null_frame_id);
4661 }
4662
4663 keep_going (ecs);
4664 return;
4665 }
4666
4667 if (ecs->event_thread->control.step_range_end != 1
4668 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4669 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4670 && get_frame_type (frame) == SIGTRAMP_FRAME)
4671 {
4672 if (debug_infrun)
4673 fprintf_unfiltered (gdb_stdlog,
4674 "infrun: stepped into signal trampoline\n");
4675 /* The inferior, while doing a "step" or "next", has ended up in
4676 a signal trampoline (either by a signal being delivered or by
4677 the signal handler returning). Just single-step until the
4678 inferior leaves the trampoline (either by calling the handler
4679 or returning). */
4680 keep_going (ecs);
4681 return;
4682 }
4683
4684 /* If we're in the return path from a shared library trampoline,
4685 we want to proceed through the trampoline when stepping. */
4686 /* macro/2012-04-25: This needs to come before the subroutine
4687 call check below as on some targets return trampolines look
4688 like subroutine calls (MIPS16 return thunks). */
4689 if (gdbarch_in_solib_return_trampoline (gdbarch,
4690 stop_pc, ecs->stop_func_name)
4691 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4692 {
4693 /* Determine where this trampoline returns. */
4694 CORE_ADDR real_stop_pc;
4695
4696 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4697
4698 if (debug_infrun)
4699 fprintf_unfiltered (gdb_stdlog,
4700 "infrun: stepped into solib return tramp\n");
4701
4702 /* Only proceed through if we know where it's going. */
4703 if (real_stop_pc)
4704 {
4705 /* And put the step-breakpoint there and go until there. */
4706 struct symtab_and_line sr_sal;
4707
4708 init_sal (&sr_sal); /* initialize to zeroes */
4709 sr_sal.pc = real_stop_pc;
4710 sr_sal.section = find_pc_overlay (sr_sal.pc);
4711 sr_sal.pspace = get_frame_program_space (frame);
4712
4713 /* Do not specify what the fp should be when we stop since
4714 on some machines the prologue is where the new fp value
4715 is established. */
4716 insert_step_resume_breakpoint_at_sal (gdbarch,
4717 sr_sal, null_frame_id);
4718
4719 /* Restart without fiddling with the step ranges or
4720 other state. */
4721 keep_going (ecs);
4722 return;
4723 }
4724 }
4725
4726 /* Check for subroutine calls. The check for the current frame
4727 equalling the step ID is not necessary - the check of the
4728 previous frame's ID is sufficient - but it is a common case and
4729 cheaper than checking the previous frame's ID.
4730
4731 NOTE: frame_id_eq will never report two invalid frame IDs as
4732 being equal, so to get into this block, both the current and
4733 previous frame must have valid frame IDs. */
4734 /* The outer_frame_id check is a heuristic to detect stepping
4735 through startup code. If we step over an instruction which
4736 sets the stack pointer from an invalid value to a valid value,
4737 we may detect that as a subroutine call from the mythical
4738 "outermost" function. This could be fixed by marking
4739 outermost frames as !stack_p,code_p,special_p. Then the
4740 initial outermost frame, before sp was valid, would
4741 have code_addr == &_start. See the comment in frame_id_eq
4742 for more. */
4743 if (!frame_id_eq (get_stack_frame_id (frame),
4744 ecs->event_thread->control.step_stack_frame_id)
4745 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4746 ecs->event_thread->control.step_stack_frame_id)
4747 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4748 outer_frame_id)
4749 || step_start_function != find_pc_function (stop_pc))))
4750 {
4751 CORE_ADDR real_stop_pc;
4752
4753 if (debug_infrun)
4754 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4755
4756 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4757 || ((ecs->event_thread->control.step_range_end == 1)
4758 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4759 ecs->stop_func_start)))
4760 {
4761 /* I presume that step_over_calls is only 0 when we're
4762 supposed to be stepping at the assembly language level
4763 ("stepi"). Just stop. */
4764 /* Also, maybe we just did a "nexti" inside a prolog, so we
4765 thought it was a subroutine call but it was not. Stop as
4766 well. FENN */
4767 /* And this works the same backward as frontward. MVS */
4768 ecs->event_thread->control.stop_step = 1;
4769 print_end_stepping_range_reason ();
4770 stop_stepping (ecs);
4771 return;
4772 }
4773
4774 /* Reverse stepping through solib trampolines. */
4775
4776 if (execution_direction == EXEC_REVERSE
4777 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4778 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4779 || (ecs->stop_func_start == 0
4780 && in_solib_dynsym_resolve_code (stop_pc))))
4781 {
4782 /* Any solib trampoline code can be handled in reverse
4783 by simply continuing to single-step. We have already
4784 executed the solib function (backwards), and a few
4785 steps will take us back through the trampoline to the
4786 caller. */
4787 keep_going (ecs);
4788 return;
4789 }
4790
4791 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4792 {
4793 /* We're doing a "next".
4794
4795 Normal (forward) execution: set a breakpoint at the
4796 callee's return address (the address at which the caller
4797 will resume).
4798
4799 Reverse (backward) execution. set the step-resume
4800 breakpoint at the start of the function that we just
4801 stepped into (backwards), and continue to there. When we
4802 get there, we'll need to single-step back to the caller. */
4803
4804 if (execution_direction == EXEC_REVERSE)
4805 {
4806 /* If we're already at the start of the function, we've either
4807 just stepped backward into a single instruction function,
4808 or stepped back out of a signal handler to the first instruction
4809 of the function. Just keep going, which will single-step back
4810 to the caller. */
4811 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
4812 {
4813 struct symtab_and_line sr_sal;
4814
4815 /* Normal function call return (static or dynamic). */
4816 init_sal (&sr_sal);
4817 sr_sal.pc = ecs->stop_func_start;
4818 sr_sal.pspace = get_frame_program_space (frame);
4819 insert_step_resume_breakpoint_at_sal (gdbarch,
4820 sr_sal, null_frame_id);
4821 }
4822 }
4823 else
4824 insert_step_resume_breakpoint_at_caller (frame);
4825
4826 keep_going (ecs);
4827 return;
4828 }
4829
4830 /* If we are in a function call trampoline (a stub between the
4831 calling routine and the real function), locate the real
4832 function. That's what tells us (a) whether we want to step
4833 into it at all, and (b) what prologue we want to run to the
4834 end of, if we do step into it. */
4835 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4836 if (real_stop_pc == 0)
4837 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4838 if (real_stop_pc != 0)
4839 ecs->stop_func_start = real_stop_pc;
4840
4841 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4842 {
4843 struct symtab_and_line sr_sal;
4844
4845 init_sal (&sr_sal);
4846 sr_sal.pc = ecs->stop_func_start;
4847 sr_sal.pspace = get_frame_program_space (frame);
4848
4849 insert_step_resume_breakpoint_at_sal (gdbarch,
4850 sr_sal, null_frame_id);
4851 keep_going (ecs);
4852 return;
4853 }
4854
4855 /* If we have line number information for the function we are
4856 thinking of stepping into and the function isn't on the skip
4857 list, step into it.
4858
4859 If there are several symtabs at that PC (e.g. with include
4860 files), just want to know whether *any* of them have line
4861 numbers. find_pc_line handles this. */
4862 {
4863 struct symtab_and_line tmp_sal;
4864
4865 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4866 if (tmp_sal.line != 0
4867 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4868 &tmp_sal))
4869 {
4870 if (execution_direction == EXEC_REVERSE)
4871 handle_step_into_function_backward (gdbarch, ecs);
4872 else
4873 handle_step_into_function (gdbarch, ecs);
4874 return;
4875 }
4876 }
4877
4878 /* If we have no line number and the step-stop-if-no-debug is
4879 set, we stop the step so that the user has a chance to switch
4880 in assembly mode. */
4881 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4882 && step_stop_if_no_debug)
4883 {
4884 ecs->event_thread->control.stop_step = 1;
4885 print_end_stepping_range_reason ();
4886 stop_stepping (ecs);
4887 return;
4888 }
4889
4890 if (execution_direction == EXEC_REVERSE)
4891 {
4892 /* If we're already at the start of the function, we've either just
4893 stepped backward into a single instruction function without line
4894 number info, or stepped back out of a signal handler to the first
4895 instruction of the function without line number info. Just keep
4896 going, which will single-step back to the caller. */
4897 if (ecs->stop_func_start != stop_pc)
4898 {
4899 /* Set a breakpoint at callee's start address.
4900 From there we can step once and be back in the caller. */
4901 struct symtab_and_line sr_sal;
4902
4903 init_sal (&sr_sal);
4904 sr_sal.pc = ecs->stop_func_start;
4905 sr_sal.pspace = get_frame_program_space (frame);
4906 insert_step_resume_breakpoint_at_sal (gdbarch,
4907 sr_sal, null_frame_id);
4908 }
4909 }
4910 else
4911 /* Set a breakpoint at callee's return address (the address
4912 at which the caller will resume). */
4913 insert_step_resume_breakpoint_at_caller (frame);
4914
4915 keep_going (ecs);
4916 return;
4917 }
4918
4919 /* Reverse stepping through solib trampolines. */
4920
4921 if (execution_direction == EXEC_REVERSE
4922 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4923 {
4924 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4925 || (ecs->stop_func_start == 0
4926 && in_solib_dynsym_resolve_code (stop_pc)))
4927 {
4928 /* Any solib trampoline code can be handled in reverse
4929 by simply continuing to single-step. We have already
4930 executed the solib function (backwards), and a few
4931 steps will take us back through the trampoline to the
4932 caller. */
4933 keep_going (ecs);
4934 return;
4935 }
4936 else if (in_solib_dynsym_resolve_code (stop_pc))
4937 {
4938 /* Stepped backward into the solib dynsym resolver.
4939 Set a breakpoint at its start and continue, then
4940 one more step will take us out. */
4941 struct symtab_and_line sr_sal;
4942
4943 init_sal (&sr_sal);
4944 sr_sal.pc = ecs->stop_func_start;
4945 sr_sal.pspace = get_frame_program_space (frame);
4946 insert_step_resume_breakpoint_at_sal (gdbarch,
4947 sr_sal, null_frame_id);
4948 keep_going (ecs);
4949 return;
4950 }
4951 }
4952
4953 stop_pc_sal = find_pc_line (stop_pc, 0);
4954
4955 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4956 the trampoline processing logic, however, there are some trampolines
4957 that have no names, so we should do trampoline handling first. */
4958 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4959 && ecs->stop_func_name == NULL
4960 && stop_pc_sal.line == 0)
4961 {
4962 if (debug_infrun)
4963 fprintf_unfiltered (gdb_stdlog,
4964 "infrun: stepped into undebuggable function\n");
4965
4966 /* The inferior just stepped into, or returned to, an
4967 undebuggable function (where there is no debugging information
4968 and no line number corresponding to the address where the
4969 inferior stopped). Since we want to skip this kind of code,
4970 we keep going until the inferior returns from this
4971 function - unless the user has asked us not to (via
4972 set step-mode) or we no longer know how to get back
4973 to the call site. */
4974 if (step_stop_if_no_debug
4975 || !frame_id_p (frame_unwind_caller_id (frame)))
4976 {
4977 /* If we have no line number and the step-stop-if-no-debug
4978 is set, we stop the step so that the user has a chance to
4979 switch in assembly mode. */
4980 ecs->event_thread->control.stop_step = 1;
4981 print_end_stepping_range_reason ();
4982 stop_stepping (ecs);
4983 return;
4984 }
4985 else
4986 {
4987 /* Set a breakpoint at callee's return address (the address
4988 at which the caller will resume). */
4989 insert_step_resume_breakpoint_at_caller (frame);
4990 keep_going (ecs);
4991 return;
4992 }
4993 }
4994
4995 if (ecs->event_thread->control.step_range_end == 1)
4996 {
4997 /* It is stepi or nexti. We always want to stop stepping after
4998 one instruction. */
4999 if (debug_infrun)
5000 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5001 ecs->event_thread->control.stop_step = 1;
5002 print_end_stepping_range_reason ();
5003 stop_stepping (ecs);
5004 return;
5005 }
5006
5007 if (stop_pc_sal.line == 0)
5008 {
5009 /* We have no line number information. That means to stop
5010 stepping (does this always happen right after one instruction,
5011 when we do "s" in a function with no line numbers,
5012 or can this happen as a result of a return or longjmp?). */
5013 if (debug_infrun)
5014 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5015 ecs->event_thread->control.stop_step = 1;
5016 print_end_stepping_range_reason ();
5017 stop_stepping (ecs);
5018 return;
5019 }
5020
5021 /* Look for "calls" to inlined functions, part one. If the inline
5022 frame machinery detected some skipped call sites, we have entered
5023 a new inline function. */
5024
5025 if (frame_id_eq (get_frame_id (get_current_frame ()),
5026 ecs->event_thread->control.step_frame_id)
5027 && inline_skipped_frames (ecs->ptid))
5028 {
5029 struct symtab_and_line call_sal;
5030
5031 if (debug_infrun)
5032 fprintf_unfiltered (gdb_stdlog,
5033 "infrun: stepped into inlined function\n");
5034
5035 find_frame_sal (get_current_frame (), &call_sal);
5036
5037 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5038 {
5039 /* For "step", we're going to stop. But if the call site
5040 for this inlined function is on the same source line as
5041 we were previously stepping, go down into the function
5042 first. Otherwise stop at the call site. */
5043
5044 if (call_sal.line == ecs->event_thread->current_line
5045 && call_sal.symtab == ecs->event_thread->current_symtab)
5046 step_into_inline_frame (ecs->ptid);
5047
5048 ecs->event_thread->control.stop_step = 1;
5049 print_end_stepping_range_reason ();
5050 stop_stepping (ecs);
5051 return;
5052 }
5053 else
5054 {
5055 /* For "next", we should stop at the call site if it is on a
5056 different source line. Otherwise continue through the
5057 inlined function. */
5058 if (call_sal.line == ecs->event_thread->current_line
5059 && call_sal.symtab == ecs->event_thread->current_symtab)
5060 keep_going (ecs);
5061 else
5062 {
5063 ecs->event_thread->control.stop_step = 1;
5064 print_end_stepping_range_reason ();
5065 stop_stepping (ecs);
5066 }
5067 return;
5068 }
5069 }
5070
5071 /* Look for "calls" to inlined functions, part two. If we are still
5072 in the same real function we were stepping through, but we have
5073 to go further up to find the exact frame ID, we are stepping
5074 through a more inlined call beyond its call site. */
5075
5076 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5077 && !frame_id_eq (get_frame_id (get_current_frame ()),
5078 ecs->event_thread->control.step_frame_id)
5079 && stepped_in_from (get_current_frame (),
5080 ecs->event_thread->control.step_frame_id))
5081 {
5082 if (debug_infrun)
5083 fprintf_unfiltered (gdb_stdlog,
5084 "infrun: stepping through inlined function\n");
5085
5086 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5087 keep_going (ecs);
5088 else
5089 {
5090 ecs->event_thread->control.stop_step = 1;
5091 print_end_stepping_range_reason ();
5092 stop_stepping (ecs);
5093 }
5094 return;
5095 }
5096
5097 if ((stop_pc == stop_pc_sal.pc)
5098 && (ecs->event_thread->current_line != stop_pc_sal.line
5099 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5100 {
5101 /* We are at the start of a different line. So stop. Note that
5102 we don't stop if we step into the middle of a different line.
5103 That is said to make things like for (;;) statements work
5104 better. */
5105 if (debug_infrun)
5106 fprintf_unfiltered (gdb_stdlog,
5107 "infrun: stepped to a different line\n");
5108 ecs->event_thread->control.stop_step = 1;
5109 print_end_stepping_range_reason ();
5110 stop_stepping (ecs);
5111 return;
5112 }
5113
5114 /* We aren't done stepping.
5115
5116 Optimize by setting the stepping range to the line.
5117 (We might not be in the original line, but if we entered a
5118 new line in mid-statement, we continue stepping. This makes
5119 things like for(;;) statements work better.) */
5120
5121 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5122 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5123 ecs->event_thread->control.may_range_step = 1;
5124 set_step_info (frame, stop_pc_sal);
5125
5126 if (debug_infrun)
5127 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5128 keep_going (ecs);
5129 }
5130
5131 /* In all-stop mode, if we're currently stepping but have stopped in
5132 some other thread, we may need to switch back to the stepped
5133 thread. Returns true we set the inferior running, false if we left
5134 it stopped (and the event needs further processing). */
5135
5136 static int
5137 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5138 {
5139 if (!non_stop)
5140 {
5141 struct thread_info *tp;
5142 struct thread_info *stepping_thread;
5143 struct thread_info *step_over;
5144
5145 /* If any thread is blocked on some internal breakpoint, and we
5146 simply need to step over that breakpoint to get it going
5147 again, do that first. */
5148
5149 /* However, if we see an event for the stepping thread, then we
5150 know all other threads have been moved past their breakpoints
5151 already. Let the caller check whether the step is finished,
5152 etc., before deciding to move it past a breakpoint. */
5153 if (ecs->event_thread->control.step_range_end != 0)
5154 return 0;
5155
5156 /* Check if the current thread is blocked on an incomplete
5157 step-over, interrupted by a random signal. */
5158 if (ecs->event_thread->control.trap_expected
5159 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5160 {
5161 if (debug_infrun)
5162 {
5163 fprintf_unfiltered (gdb_stdlog,
5164 "infrun: need to finish step-over of [%s]\n",
5165 target_pid_to_str (ecs->event_thread->ptid));
5166 }
5167 keep_going (ecs);
5168 return 1;
5169 }
5170
5171 /* Check if the current thread is blocked by a single-step
5172 breakpoint of another thread. */
5173 if (ecs->hit_singlestep_breakpoint)
5174 {
5175 if (debug_infrun)
5176 {
5177 fprintf_unfiltered (gdb_stdlog,
5178 "infrun: need to step [%s] over single-step "
5179 "breakpoint\n",
5180 target_pid_to_str (ecs->ptid));
5181 }
5182 keep_going (ecs);
5183 return 1;
5184 }
5185
5186 /* Otherwise, we no longer expect a trap in the current thread.
5187 Clear the trap_expected flag before switching back -- this is
5188 what keep_going does as well, if we call it. */
5189 ecs->event_thread->control.trap_expected = 0;
5190
5191 /* If scheduler locking applies even if not stepping, there's no
5192 need to walk over threads. Above we've checked whether the
5193 current thread is stepping. If some other thread not the
5194 event thread is stepping, then it must be that scheduler
5195 locking is not in effect. */
5196 if (schedlock_applies (0))
5197 return 0;
5198
5199 /* Look for the stepping/nexting thread, and check if any other
5200 thread other than the stepping thread needs to start a
5201 step-over. Do all step-overs before actually proceeding with
5202 step/next/etc. */
5203 stepping_thread = NULL;
5204 step_over = NULL;
5205 ALL_THREADS (tp)
5206 {
5207 /* Ignore threads of processes we're not resuming. */
5208 if (!sched_multi
5209 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
5210 continue;
5211
5212 /* When stepping over a breakpoint, we lock all threads
5213 except the one that needs to move past the breakpoint.
5214 If a non-event thread has this set, the "incomplete
5215 step-over" check above should have caught it earlier. */
5216 gdb_assert (!tp->control.trap_expected);
5217
5218 /* Did we find the stepping thread? */
5219 if (tp->control.step_range_end)
5220 {
5221 /* Yep. There should only one though. */
5222 gdb_assert (stepping_thread == NULL);
5223
5224 /* The event thread is handled at the top, before we
5225 enter this loop. */
5226 gdb_assert (tp != ecs->event_thread);
5227
5228 /* If some thread other than the event thread is
5229 stepping, then scheduler locking can't be in effect,
5230 otherwise we wouldn't have resumed the current event
5231 thread in the first place. */
5232 gdb_assert (!schedlock_applies (1));
5233
5234 stepping_thread = tp;
5235 }
5236 else if (thread_still_needs_step_over (tp))
5237 {
5238 step_over = tp;
5239
5240 /* At the top we've returned early if the event thread
5241 is stepping. If some other thread not the event
5242 thread is stepping, then scheduler locking can't be
5243 in effect, and we can resume this thread. No need to
5244 keep looking for the stepping thread then. */
5245 break;
5246 }
5247 }
5248
5249 if (step_over != NULL)
5250 {
5251 tp = step_over;
5252 if (debug_infrun)
5253 {
5254 fprintf_unfiltered (gdb_stdlog,
5255 "infrun: need to step-over [%s]\n",
5256 target_pid_to_str (tp->ptid));
5257 }
5258
5259 /* Only the stepping thread should have this set. */
5260 gdb_assert (tp->control.step_range_end == 0);
5261
5262 ecs->ptid = tp->ptid;
5263 ecs->event_thread = tp;
5264 switch_to_thread (ecs->ptid);
5265 keep_going (ecs);
5266 return 1;
5267 }
5268
5269 if (stepping_thread != NULL)
5270 {
5271 struct frame_info *frame;
5272 struct gdbarch *gdbarch;
5273
5274 tp = stepping_thread;
5275
5276 /* If the stepping thread exited, then don't try to switch
5277 back and resume it, which could fail in several different
5278 ways depending on the target. Instead, just keep going.
5279
5280 We can find a stepping dead thread in the thread list in
5281 two cases:
5282
5283 - The target supports thread exit events, and when the
5284 target tries to delete the thread from the thread list,
5285 inferior_ptid pointed at the exiting thread. In such
5286 case, calling delete_thread does not really remove the
5287 thread from the list; instead, the thread is left listed,
5288 with 'exited' state.
5289
5290 - The target's debug interface does not support thread
5291 exit events, and so we have no idea whatsoever if the
5292 previously stepping thread is still alive. For that
5293 reason, we need to synchronously query the target
5294 now. */
5295 if (is_exited (tp->ptid)
5296 || !target_thread_alive (tp->ptid))
5297 {
5298 if (debug_infrun)
5299 fprintf_unfiltered (gdb_stdlog,
5300 "infrun: not switching back to "
5301 "stepped thread, it has vanished\n");
5302
5303 delete_thread (tp->ptid);
5304 keep_going (ecs);
5305 return 1;
5306 }
5307
5308 if (debug_infrun)
5309 fprintf_unfiltered (gdb_stdlog,
5310 "infrun: switching back to stepped thread\n");
5311
5312 ecs->event_thread = tp;
5313 ecs->ptid = tp->ptid;
5314 context_switch (ecs->ptid);
5315
5316 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5317 frame = get_current_frame ();
5318 gdbarch = get_frame_arch (frame);
5319
5320 /* If the PC of the thread we were trying to single-step has
5321 changed, then that thread has trapped or been signaled,
5322 but the event has not been reported to GDB yet. Re-poll
5323 the target looking for this particular thread's event
5324 (i.e. temporarily enable schedlock) by:
5325
5326 - setting a break at the current PC
5327 - resuming that particular thread, only (by setting
5328 trap expected)
5329
5330 This prevents us continuously moving the single-step
5331 breakpoint forward, one instruction at a time,
5332 overstepping. */
5333
5334 if (gdbarch_software_single_step_p (gdbarch)
5335 && stop_pc != tp->prev_pc)
5336 {
5337 if (debug_infrun)
5338 fprintf_unfiltered (gdb_stdlog,
5339 "infrun: expected thread advanced also\n");
5340
5341 insert_single_step_breakpoint (get_frame_arch (frame),
5342 get_frame_address_space (frame),
5343 stop_pc);
5344 singlestep_breakpoints_inserted_p = 1;
5345 ecs->event_thread->control.trap_expected = 1;
5346 singlestep_ptid = inferior_ptid;
5347 singlestep_pc = stop_pc;
5348
5349 resume (0, GDB_SIGNAL_0);
5350 prepare_to_wait (ecs);
5351 }
5352 else
5353 {
5354 if (debug_infrun)
5355 fprintf_unfiltered (gdb_stdlog,
5356 "infrun: expected thread still "
5357 "hasn't advanced\n");
5358 keep_going (ecs);
5359 }
5360
5361 return 1;
5362 }
5363 }
5364 return 0;
5365 }
5366
5367 /* Is thread TP in the middle of single-stepping? */
5368
5369 static int
5370 currently_stepping (struct thread_info *tp)
5371 {
5372 return ((tp->control.step_range_end
5373 && tp->control.step_resume_breakpoint == NULL)
5374 || tp->control.trap_expected
5375 || bpstat_should_step ());
5376 }
5377
5378 /* Inferior has stepped into a subroutine call with source code that
5379 we should not step over. Do step to the first line of code in
5380 it. */
5381
5382 static void
5383 handle_step_into_function (struct gdbarch *gdbarch,
5384 struct execution_control_state *ecs)
5385 {
5386 struct symtab *s;
5387 struct symtab_and_line stop_func_sal, sr_sal;
5388
5389 fill_in_stop_func (gdbarch, ecs);
5390
5391 s = find_pc_symtab (stop_pc);
5392 if (s && s->language != language_asm)
5393 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5394 ecs->stop_func_start);
5395
5396 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5397 /* Use the step_resume_break to step until the end of the prologue,
5398 even if that involves jumps (as it seems to on the vax under
5399 4.2). */
5400 /* If the prologue ends in the middle of a source line, continue to
5401 the end of that source line (if it is still within the function).
5402 Otherwise, just go to end of prologue. */
5403 if (stop_func_sal.end
5404 && stop_func_sal.pc != ecs->stop_func_start
5405 && stop_func_sal.end < ecs->stop_func_end)
5406 ecs->stop_func_start = stop_func_sal.end;
5407
5408 /* Architectures which require breakpoint adjustment might not be able
5409 to place a breakpoint at the computed address. If so, the test
5410 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5411 ecs->stop_func_start to an address at which a breakpoint may be
5412 legitimately placed.
5413
5414 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5415 made, GDB will enter an infinite loop when stepping through
5416 optimized code consisting of VLIW instructions which contain
5417 subinstructions corresponding to different source lines. On
5418 FR-V, it's not permitted to place a breakpoint on any but the
5419 first subinstruction of a VLIW instruction. When a breakpoint is
5420 set, GDB will adjust the breakpoint address to the beginning of
5421 the VLIW instruction. Thus, we need to make the corresponding
5422 adjustment here when computing the stop address. */
5423
5424 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5425 {
5426 ecs->stop_func_start
5427 = gdbarch_adjust_breakpoint_address (gdbarch,
5428 ecs->stop_func_start);
5429 }
5430
5431 if (ecs->stop_func_start == stop_pc)
5432 {
5433 /* We are already there: stop now. */
5434 ecs->event_thread->control.stop_step = 1;
5435 print_end_stepping_range_reason ();
5436 stop_stepping (ecs);
5437 return;
5438 }
5439 else
5440 {
5441 /* Put the step-breakpoint there and go until there. */
5442 init_sal (&sr_sal); /* initialize to zeroes */
5443 sr_sal.pc = ecs->stop_func_start;
5444 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5445 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5446
5447 /* Do not specify what the fp should be when we stop since on
5448 some machines the prologue is where the new fp value is
5449 established. */
5450 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5451
5452 /* And make sure stepping stops right away then. */
5453 ecs->event_thread->control.step_range_end
5454 = ecs->event_thread->control.step_range_start;
5455 }
5456 keep_going (ecs);
5457 }
5458
5459 /* Inferior has stepped backward into a subroutine call with source
5460 code that we should not step over. Do step to the beginning of the
5461 last line of code in it. */
5462
5463 static void
5464 handle_step_into_function_backward (struct gdbarch *gdbarch,
5465 struct execution_control_state *ecs)
5466 {
5467 struct symtab *s;
5468 struct symtab_and_line stop_func_sal;
5469
5470 fill_in_stop_func (gdbarch, ecs);
5471
5472 s = find_pc_symtab (stop_pc);
5473 if (s && s->language != language_asm)
5474 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5475 ecs->stop_func_start);
5476
5477 stop_func_sal = find_pc_line (stop_pc, 0);
5478
5479 /* OK, we're just going to keep stepping here. */
5480 if (stop_func_sal.pc == stop_pc)
5481 {
5482 /* We're there already. Just stop stepping now. */
5483 ecs->event_thread->control.stop_step = 1;
5484 print_end_stepping_range_reason ();
5485 stop_stepping (ecs);
5486 }
5487 else
5488 {
5489 /* Else just reset the step range and keep going.
5490 No step-resume breakpoint, they don't work for
5491 epilogues, which can have multiple entry paths. */
5492 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5493 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5494 keep_going (ecs);
5495 }
5496 return;
5497 }
5498
5499 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5500 This is used to both functions and to skip over code. */
5501
5502 static void
5503 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5504 struct symtab_and_line sr_sal,
5505 struct frame_id sr_id,
5506 enum bptype sr_type)
5507 {
5508 /* There should never be more than one step-resume or longjmp-resume
5509 breakpoint per thread, so we should never be setting a new
5510 step_resume_breakpoint when one is already active. */
5511 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5512 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5513
5514 if (debug_infrun)
5515 fprintf_unfiltered (gdb_stdlog,
5516 "infrun: inserting step-resume breakpoint at %s\n",
5517 paddress (gdbarch, sr_sal.pc));
5518
5519 inferior_thread ()->control.step_resume_breakpoint
5520 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5521 }
5522
5523 void
5524 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5525 struct symtab_and_line sr_sal,
5526 struct frame_id sr_id)
5527 {
5528 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5529 sr_sal, sr_id,
5530 bp_step_resume);
5531 }
5532
5533 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5534 This is used to skip a potential signal handler.
5535
5536 This is called with the interrupted function's frame. The signal
5537 handler, when it returns, will resume the interrupted function at
5538 RETURN_FRAME.pc. */
5539
5540 static void
5541 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5542 {
5543 struct symtab_and_line sr_sal;
5544 struct gdbarch *gdbarch;
5545
5546 gdb_assert (return_frame != NULL);
5547 init_sal (&sr_sal); /* initialize to zeros */
5548
5549 gdbarch = get_frame_arch (return_frame);
5550 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5551 sr_sal.section = find_pc_overlay (sr_sal.pc);
5552 sr_sal.pspace = get_frame_program_space (return_frame);
5553
5554 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5555 get_stack_frame_id (return_frame),
5556 bp_hp_step_resume);
5557 }
5558
5559 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5560 is used to skip a function after stepping into it (for "next" or if
5561 the called function has no debugging information).
5562
5563 The current function has almost always been reached by single
5564 stepping a call or return instruction. NEXT_FRAME belongs to the
5565 current function, and the breakpoint will be set at the caller's
5566 resume address.
5567
5568 This is a separate function rather than reusing
5569 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5570 get_prev_frame, which may stop prematurely (see the implementation
5571 of frame_unwind_caller_id for an example). */
5572
5573 static void
5574 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5575 {
5576 struct symtab_and_line sr_sal;
5577 struct gdbarch *gdbarch;
5578
5579 /* We shouldn't have gotten here if we don't know where the call site
5580 is. */
5581 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5582
5583 init_sal (&sr_sal); /* initialize to zeros */
5584
5585 gdbarch = frame_unwind_caller_arch (next_frame);
5586 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5587 frame_unwind_caller_pc (next_frame));
5588 sr_sal.section = find_pc_overlay (sr_sal.pc);
5589 sr_sal.pspace = frame_unwind_program_space (next_frame);
5590
5591 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5592 frame_unwind_caller_id (next_frame));
5593 }
5594
5595 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5596 new breakpoint at the target of a jmp_buf. The handling of
5597 longjmp-resume uses the same mechanisms used for handling
5598 "step-resume" breakpoints. */
5599
5600 static void
5601 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5602 {
5603 /* There should never be more than one longjmp-resume breakpoint per
5604 thread, so we should never be setting a new
5605 longjmp_resume_breakpoint when one is already active. */
5606 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
5607
5608 if (debug_infrun)
5609 fprintf_unfiltered (gdb_stdlog,
5610 "infrun: inserting longjmp-resume breakpoint at %s\n",
5611 paddress (gdbarch, pc));
5612
5613 inferior_thread ()->control.exception_resume_breakpoint =
5614 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5615 }
5616
5617 /* Insert an exception resume breakpoint. TP is the thread throwing
5618 the exception. The block B is the block of the unwinder debug hook
5619 function. FRAME is the frame corresponding to the call to this
5620 function. SYM is the symbol of the function argument holding the
5621 target PC of the exception. */
5622
5623 static void
5624 insert_exception_resume_breakpoint (struct thread_info *tp,
5625 struct block *b,
5626 struct frame_info *frame,
5627 struct symbol *sym)
5628 {
5629 volatile struct gdb_exception e;
5630
5631 /* We want to ignore errors here. */
5632 TRY_CATCH (e, RETURN_MASK_ERROR)
5633 {
5634 struct symbol *vsym;
5635 struct value *value;
5636 CORE_ADDR handler;
5637 struct breakpoint *bp;
5638
5639 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5640 value = read_var_value (vsym, frame);
5641 /* If the value was optimized out, revert to the old behavior. */
5642 if (! value_optimized_out (value))
5643 {
5644 handler = value_as_address (value);
5645
5646 if (debug_infrun)
5647 fprintf_unfiltered (gdb_stdlog,
5648 "infrun: exception resume at %lx\n",
5649 (unsigned long) handler);
5650
5651 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5652 handler, bp_exception_resume);
5653
5654 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5655 frame = NULL;
5656
5657 bp->thread = tp->num;
5658 inferior_thread ()->control.exception_resume_breakpoint = bp;
5659 }
5660 }
5661 }
5662
5663 /* A helper for check_exception_resume that sets an
5664 exception-breakpoint based on a SystemTap probe. */
5665
5666 static void
5667 insert_exception_resume_from_probe (struct thread_info *tp,
5668 const struct bound_probe *probe,
5669 struct frame_info *frame)
5670 {
5671 struct value *arg_value;
5672 CORE_ADDR handler;
5673 struct breakpoint *bp;
5674
5675 arg_value = probe_safe_evaluate_at_pc (frame, 1);
5676 if (!arg_value)
5677 return;
5678
5679 handler = value_as_address (arg_value);
5680
5681 if (debug_infrun)
5682 fprintf_unfiltered (gdb_stdlog,
5683 "infrun: exception resume at %s\n",
5684 paddress (get_objfile_arch (probe->objfile),
5685 handler));
5686
5687 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5688 handler, bp_exception_resume);
5689 bp->thread = tp->num;
5690 inferior_thread ()->control.exception_resume_breakpoint = bp;
5691 }
5692
5693 /* This is called when an exception has been intercepted. Check to
5694 see whether the exception's destination is of interest, and if so,
5695 set an exception resume breakpoint there. */
5696
5697 static void
5698 check_exception_resume (struct execution_control_state *ecs,
5699 struct frame_info *frame)
5700 {
5701 volatile struct gdb_exception e;
5702 struct bound_probe probe;
5703 struct symbol *func;
5704
5705 /* First see if this exception unwinding breakpoint was set via a
5706 SystemTap probe point. If so, the probe has two arguments: the
5707 CFA and the HANDLER. We ignore the CFA, extract the handler, and
5708 set a breakpoint there. */
5709 probe = find_probe_by_pc (get_frame_pc (frame));
5710 if (probe.probe)
5711 {
5712 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
5713 return;
5714 }
5715
5716 func = get_frame_function (frame);
5717 if (!func)
5718 return;
5719
5720 TRY_CATCH (e, RETURN_MASK_ERROR)
5721 {
5722 struct block *b;
5723 struct block_iterator iter;
5724 struct symbol *sym;
5725 int argno = 0;
5726
5727 /* The exception breakpoint is a thread-specific breakpoint on
5728 the unwinder's debug hook, declared as:
5729
5730 void _Unwind_DebugHook (void *cfa, void *handler);
5731
5732 The CFA argument indicates the frame to which control is
5733 about to be transferred. HANDLER is the destination PC.
5734
5735 We ignore the CFA and set a temporary breakpoint at HANDLER.
5736 This is not extremely efficient but it avoids issues in gdb
5737 with computing the DWARF CFA, and it also works even in weird
5738 cases such as throwing an exception from inside a signal
5739 handler. */
5740
5741 b = SYMBOL_BLOCK_VALUE (func);
5742 ALL_BLOCK_SYMBOLS (b, iter, sym)
5743 {
5744 if (!SYMBOL_IS_ARGUMENT (sym))
5745 continue;
5746
5747 if (argno == 0)
5748 ++argno;
5749 else
5750 {
5751 insert_exception_resume_breakpoint (ecs->event_thread,
5752 b, frame, sym);
5753 break;
5754 }
5755 }
5756 }
5757 }
5758
5759 static void
5760 stop_stepping (struct execution_control_state *ecs)
5761 {
5762 if (debug_infrun)
5763 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5764
5765 clear_step_over_info ();
5766
5767 /* Let callers know we don't want to wait for the inferior anymore. */
5768 ecs->wait_some_more = 0;
5769 }
5770
5771 /* Called when we should continue running the inferior, because the
5772 current event doesn't cause a user visible stop. This does the
5773 resuming part; waiting for the next event is done elsewhere. */
5774
5775 static void
5776 keep_going (struct execution_control_state *ecs)
5777 {
5778 /* Make sure normal_stop is called if we get a QUIT handled before
5779 reaching resume. */
5780 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5781
5782 /* Save the pc before execution, to compare with pc after stop. */
5783 ecs->event_thread->prev_pc
5784 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5785
5786 if (ecs->event_thread->control.trap_expected
5787 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5788 {
5789 /* We haven't yet gotten our trap, and either: intercepted a
5790 non-signal event (e.g., a fork); or took a signal which we
5791 are supposed to pass through to the inferior. Simply
5792 continue. */
5793 discard_cleanups (old_cleanups);
5794 resume (currently_stepping (ecs->event_thread),
5795 ecs->event_thread->suspend.stop_signal);
5796 }
5797 else
5798 {
5799 volatile struct gdb_exception e;
5800 struct regcache *regcache = get_current_regcache ();
5801
5802 /* Either the trap was not expected, but we are continuing
5803 anyway (if we got a signal, the user asked it be passed to
5804 the child)
5805 -- or --
5806 We got our expected trap, but decided we should resume from
5807 it.
5808
5809 We're going to run this baby now!
5810
5811 Note that insert_breakpoints won't try to re-insert
5812 already inserted breakpoints. Therefore, we don't
5813 care if breakpoints were already inserted, or not. */
5814
5815 /* If we need to step over a breakpoint, and we're not using
5816 displaced stepping to do so, insert all breakpoints
5817 (watchpoints, etc.) but the one we're stepping over, step one
5818 instruction, and then re-insert the breakpoint when that step
5819 is finished. */
5820 if ((ecs->hit_singlestep_breakpoint
5821 || thread_still_needs_step_over (ecs->event_thread))
5822 && !use_displaced_stepping (get_regcache_arch (regcache)))
5823 {
5824 set_step_over_info (get_regcache_aspace (regcache),
5825 regcache_read_pc (regcache));
5826 }
5827 else
5828 clear_step_over_info ();
5829
5830 /* Stop stepping if inserting breakpoints fails. */
5831 TRY_CATCH (e, RETURN_MASK_ERROR)
5832 {
5833 insert_breakpoints ();
5834 }
5835 if (e.reason < 0)
5836 {
5837 exception_print (gdb_stderr, e);
5838 stop_stepping (ecs);
5839 return;
5840 }
5841
5842 ecs->event_thread->control.trap_expected
5843 = (ecs->event_thread->stepping_over_breakpoint
5844 || ecs->hit_singlestep_breakpoint);
5845
5846 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
5847 explicitly specifies that such a signal should be delivered
5848 to the target program). Typically, that would occur when a
5849 user is debugging a target monitor on a simulator: the target
5850 monitor sets a breakpoint; the simulator encounters this
5851 breakpoint and halts the simulation handing control to GDB;
5852 GDB, noting that the stop address doesn't map to any known
5853 breakpoint, returns control back to the simulator; the
5854 simulator then delivers the hardware equivalent of a
5855 GDB_SIGNAL_TRAP to the program being debugged. */
5856 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5857 && !signal_program[ecs->event_thread->suspend.stop_signal])
5858 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5859
5860 discard_cleanups (old_cleanups);
5861 resume (currently_stepping (ecs->event_thread),
5862 ecs->event_thread->suspend.stop_signal);
5863 }
5864
5865 prepare_to_wait (ecs);
5866 }
5867
5868 /* This function normally comes after a resume, before
5869 handle_inferior_event exits. It takes care of any last bits of
5870 housekeeping, and sets the all-important wait_some_more flag. */
5871
5872 static void
5873 prepare_to_wait (struct execution_control_state *ecs)
5874 {
5875 if (debug_infrun)
5876 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5877
5878 /* This is the old end of the while loop. Let everybody know we
5879 want to wait for the inferior some more and get called again
5880 soon. */
5881 ecs->wait_some_more = 1;
5882 }
5883
5884 /* Several print_*_reason functions to print why the inferior has stopped.
5885 We always print something when the inferior exits, or receives a signal.
5886 The rest of the cases are dealt with later on in normal_stop and
5887 print_it_typical. Ideally there should be a call to one of these
5888 print_*_reason functions functions from handle_inferior_event each time
5889 stop_stepping is called. */
5890
5891 /* Print why the inferior has stopped.
5892 We are done with a step/next/si/ni command, print why the inferior has
5893 stopped. For now print nothing. Print a message only if not in the middle
5894 of doing a "step n" operation for n > 1. */
5895
5896 static void
5897 print_end_stepping_range_reason (void)
5898 {
5899 if ((!inferior_thread ()->step_multi
5900 || !inferior_thread ()->control.stop_step)
5901 && ui_out_is_mi_like_p (current_uiout))
5902 ui_out_field_string (current_uiout, "reason",
5903 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5904 }
5905
5906 /* The inferior was terminated by a signal, print why it stopped. */
5907
5908 static void
5909 print_signal_exited_reason (enum gdb_signal siggnal)
5910 {
5911 struct ui_out *uiout = current_uiout;
5912
5913 annotate_signalled ();
5914 if (ui_out_is_mi_like_p (uiout))
5915 ui_out_field_string
5916 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5917 ui_out_text (uiout, "\nProgram terminated with signal ");
5918 annotate_signal_name ();
5919 ui_out_field_string (uiout, "signal-name",
5920 gdb_signal_to_name (siggnal));
5921 annotate_signal_name_end ();
5922 ui_out_text (uiout, ", ");
5923 annotate_signal_string ();
5924 ui_out_field_string (uiout, "signal-meaning",
5925 gdb_signal_to_string (siggnal));
5926 annotate_signal_string_end ();
5927 ui_out_text (uiout, ".\n");
5928 ui_out_text (uiout, "The program no longer exists.\n");
5929 }
5930
5931 /* The inferior program is finished, print why it stopped. */
5932
5933 static void
5934 print_exited_reason (int exitstatus)
5935 {
5936 struct inferior *inf = current_inferior ();
5937 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5938 struct ui_out *uiout = current_uiout;
5939
5940 annotate_exited (exitstatus);
5941 if (exitstatus)
5942 {
5943 if (ui_out_is_mi_like_p (uiout))
5944 ui_out_field_string (uiout, "reason",
5945 async_reason_lookup (EXEC_ASYNC_EXITED));
5946 ui_out_text (uiout, "[Inferior ");
5947 ui_out_text (uiout, plongest (inf->num));
5948 ui_out_text (uiout, " (");
5949 ui_out_text (uiout, pidstr);
5950 ui_out_text (uiout, ") exited with code ");
5951 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5952 ui_out_text (uiout, "]\n");
5953 }
5954 else
5955 {
5956 if (ui_out_is_mi_like_p (uiout))
5957 ui_out_field_string
5958 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5959 ui_out_text (uiout, "[Inferior ");
5960 ui_out_text (uiout, plongest (inf->num));
5961 ui_out_text (uiout, " (");
5962 ui_out_text (uiout, pidstr);
5963 ui_out_text (uiout, ") exited normally]\n");
5964 }
5965 /* Support the --return-child-result option. */
5966 return_child_result_value = exitstatus;
5967 }
5968
5969 /* Signal received, print why the inferior has stopped. The signal table
5970 tells us to print about it. */
5971
5972 static void
5973 print_signal_received_reason (enum gdb_signal siggnal)
5974 {
5975 struct ui_out *uiout = current_uiout;
5976
5977 annotate_signal ();
5978
5979 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5980 {
5981 struct thread_info *t = inferior_thread ();
5982
5983 ui_out_text (uiout, "\n[");
5984 ui_out_field_string (uiout, "thread-name",
5985 target_pid_to_str (t->ptid));
5986 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5987 ui_out_text (uiout, " stopped");
5988 }
5989 else
5990 {
5991 ui_out_text (uiout, "\nProgram received signal ");
5992 annotate_signal_name ();
5993 if (ui_out_is_mi_like_p (uiout))
5994 ui_out_field_string
5995 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5996 ui_out_field_string (uiout, "signal-name",
5997 gdb_signal_to_name (siggnal));
5998 annotate_signal_name_end ();
5999 ui_out_text (uiout, ", ");
6000 annotate_signal_string ();
6001 ui_out_field_string (uiout, "signal-meaning",
6002 gdb_signal_to_string (siggnal));
6003 annotate_signal_string_end ();
6004 }
6005 ui_out_text (uiout, ".\n");
6006 }
6007
6008 /* Reverse execution: target ran out of history info, print why the inferior
6009 has stopped. */
6010
6011 static void
6012 print_no_history_reason (void)
6013 {
6014 ui_out_text (current_uiout, "\nNo more reverse-execution history.\n");
6015 }
6016
6017 /* Print current location without a level number, if we have changed
6018 functions or hit a breakpoint. Print source line if we have one.
6019 bpstat_print contains the logic deciding in detail what to print,
6020 based on the event(s) that just occurred. */
6021
6022 void
6023 print_stop_event (struct target_waitstatus *ws)
6024 {
6025 int bpstat_ret;
6026 int source_flag;
6027 int do_frame_printing = 1;
6028 struct thread_info *tp = inferior_thread ();
6029
6030 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6031 switch (bpstat_ret)
6032 {
6033 case PRINT_UNKNOWN:
6034 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6035 should) carry around the function and does (or should) use
6036 that when doing a frame comparison. */
6037 if (tp->control.stop_step
6038 && frame_id_eq (tp->control.step_frame_id,
6039 get_frame_id (get_current_frame ()))
6040 && step_start_function == find_pc_function (stop_pc))
6041 {
6042 /* Finished step, just print source line. */
6043 source_flag = SRC_LINE;
6044 }
6045 else
6046 {
6047 /* Print location and source line. */
6048 source_flag = SRC_AND_LOC;
6049 }
6050 break;
6051 case PRINT_SRC_AND_LOC:
6052 /* Print location and source line. */
6053 source_flag = SRC_AND_LOC;
6054 break;
6055 case PRINT_SRC_ONLY:
6056 source_flag = SRC_LINE;
6057 break;
6058 case PRINT_NOTHING:
6059 /* Something bogus. */
6060 source_flag = SRC_LINE;
6061 do_frame_printing = 0;
6062 break;
6063 default:
6064 internal_error (__FILE__, __LINE__, _("Unknown value."));
6065 }
6066
6067 /* The behavior of this routine with respect to the source
6068 flag is:
6069 SRC_LINE: Print only source line
6070 LOCATION: Print only location
6071 SRC_AND_LOC: Print location and source line. */
6072 if (do_frame_printing)
6073 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6074
6075 /* Display the auto-display expressions. */
6076 do_displays ();
6077 }
6078
6079 /* Here to return control to GDB when the inferior stops for real.
6080 Print appropriate messages, remove breakpoints, give terminal our modes.
6081
6082 STOP_PRINT_FRAME nonzero means print the executing frame
6083 (pc, function, args, file, line number and line text).
6084 BREAKPOINTS_FAILED nonzero means stop was due to error
6085 attempting to insert breakpoints. */
6086
6087 void
6088 normal_stop (void)
6089 {
6090 struct target_waitstatus last;
6091 ptid_t last_ptid;
6092 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6093
6094 get_last_target_status (&last_ptid, &last);
6095
6096 /* If an exception is thrown from this point on, make sure to
6097 propagate GDB's knowledge of the executing state to the
6098 frontend/user running state. A QUIT is an easy exception to see
6099 here, so do this before any filtered output. */
6100 if (!non_stop)
6101 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6102 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6103 && last.kind != TARGET_WAITKIND_EXITED
6104 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6105 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6106
6107 /* As with the notification of thread events, we want to delay
6108 notifying the user that we've switched thread context until
6109 the inferior actually stops.
6110
6111 There's no point in saying anything if the inferior has exited.
6112 Note that SIGNALLED here means "exited with a signal", not
6113 "received a signal".
6114
6115 Also skip saying anything in non-stop mode. In that mode, as we
6116 don't want GDB to switch threads behind the user's back, to avoid
6117 races where the user is typing a command to apply to thread x,
6118 but GDB switches to thread y before the user finishes entering
6119 the command, fetch_inferior_event installs a cleanup to restore
6120 the current thread back to the thread the user had selected right
6121 after this event is handled, so we're not really switching, only
6122 informing of a stop. */
6123 if (!non_stop
6124 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6125 && target_has_execution
6126 && last.kind != TARGET_WAITKIND_SIGNALLED
6127 && last.kind != TARGET_WAITKIND_EXITED
6128 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6129 {
6130 target_terminal_ours_for_output ();
6131 printf_filtered (_("[Switching to %s]\n"),
6132 target_pid_to_str (inferior_ptid));
6133 annotate_thread_changed ();
6134 previous_inferior_ptid = inferior_ptid;
6135 }
6136
6137 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6138 {
6139 gdb_assert (sync_execution || !target_can_async_p ());
6140
6141 target_terminal_ours_for_output ();
6142 printf_filtered (_("No unwaited-for children left.\n"));
6143 }
6144
6145 if (!breakpoints_always_inserted_mode () && target_has_execution)
6146 {
6147 if (remove_breakpoints ())
6148 {
6149 target_terminal_ours_for_output ();
6150 printf_filtered (_("Cannot remove breakpoints because "
6151 "program is no longer writable.\nFurther "
6152 "execution is probably impossible.\n"));
6153 }
6154 }
6155
6156 /* If an auto-display called a function and that got a signal,
6157 delete that auto-display to avoid an infinite recursion. */
6158
6159 if (stopped_by_random_signal)
6160 disable_current_display ();
6161
6162 /* Don't print a message if in the middle of doing a "step n"
6163 operation for n > 1 */
6164 if (target_has_execution
6165 && last.kind != TARGET_WAITKIND_SIGNALLED
6166 && last.kind != TARGET_WAITKIND_EXITED
6167 && inferior_thread ()->step_multi
6168 && inferior_thread ()->control.stop_step)
6169 goto done;
6170
6171 target_terminal_ours ();
6172 async_enable_stdin ();
6173
6174 /* Set the current source location. This will also happen if we
6175 display the frame below, but the current SAL will be incorrect
6176 during a user hook-stop function. */
6177 if (has_stack_frames () && !stop_stack_dummy)
6178 set_current_sal_from_frame (get_current_frame (), 1);
6179
6180 /* Let the user/frontend see the threads as stopped. */
6181 do_cleanups (old_chain);
6182
6183 /* Look up the hook_stop and run it (CLI internally handles problem
6184 of stop_command's pre-hook not existing). */
6185 if (stop_command)
6186 catch_errors (hook_stop_stub, stop_command,
6187 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6188
6189 if (!has_stack_frames ())
6190 goto done;
6191
6192 if (last.kind == TARGET_WAITKIND_SIGNALLED
6193 || last.kind == TARGET_WAITKIND_EXITED)
6194 goto done;
6195
6196 /* Select innermost stack frame - i.e., current frame is frame 0,
6197 and current location is based on that.
6198 Don't do this on return from a stack dummy routine,
6199 or if the program has exited. */
6200
6201 if (!stop_stack_dummy)
6202 {
6203 select_frame (get_current_frame ());
6204
6205 /* If --batch-silent is enabled then there's no need to print the current
6206 source location, and to try risks causing an error message about
6207 missing source files. */
6208 if (stop_print_frame && !batch_silent)
6209 print_stop_event (&last);
6210 }
6211
6212 /* Save the function value return registers, if we care.
6213 We might be about to restore their previous contents. */
6214 if (inferior_thread ()->control.proceed_to_finish
6215 && execution_direction != EXEC_REVERSE)
6216 {
6217 /* This should not be necessary. */
6218 if (stop_registers)
6219 regcache_xfree (stop_registers);
6220
6221 /* NB: The copy goes through to the target picking up the value of
6222 all the registers. */
6223 stop_registers = regcache_dup (get_current_regcache ());
6224 }
6225
6226 if (stop_stack_dummy == STOP_STACK_DUMMY)
6227 {
6228 /* Pop the empty frame that contains the stack dummy.
6229 This also restores inferior state prior to the call
6230 (struct infcall_suspend_state). */
6231 struct frame_info *frame = get_current_frame ();
6232
6233 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6234 frame_pop (frame);
6235 /* frame_pop() calls reinit_frame_cache as the last thing it
6236 does which means there's currently no selected frame. We
6237 don't need to re-establish a selected frame if the dummy call
6238 returns normally, that will be done by
6239 restore_infcall_control_state. However, we do have to handle
6240 the case where the dummy call is returning after being
6241 stopped (e.g. the dummy call previously hit a breakpoint).
6242 We can't know which case we have so just always re-establish
6243 a selected frame here. */
6244 select_frame (get_current_frame ());
6245 }
6246
6247 done:
6248 annotate_stopped ();
6249
6250 /* Suppress the stop observer if we're in the middle of:
6251
6252 - a step n (n > 1), as there still more steps to be done.
6253
6254 - a "finish" command, as the observer will be called in
6255 finish_command_continuation, so it can include the inferior
6256 function's return value.
6257
6258 - calling an inferior function, as we pretend we inferior didn't
6259 run at all. The return value of the call is handled by the
6260 expression evaluator, through call_function_by_hand. */
6261
6262 if (!target_has_execution
6263 || last.kind == TARGET_WAITKIND_SIGNALLED
6264 || last.kind == TARGET_WAITKIND_EXITED
6265 || last.kind == TARGET_WAITKIND_NO_RESUMED
6266 || (!(inferior_thread ()->step_multi
6267 && inferior_thread ()->control.stop_step)
6268 && !(inferior_thread ()->control.stop_bpstat
6269 && inferior_thread ()->control.proceed_to_finish)
6270 && !inferior_thread ()->control.in_infcall))
6271 {
6272 if (!ptid_equal (inferior_ptid, null_ptid))
6273 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6274 stop_print_frame);
6275 else
6276 observer_notify_normal_stop (NULL, stop_print_frame);
6277 }
6278
6279 if (target_has_execution)
6280 {
6281 if (last.kind != TARGET_WAITKIND_SIGNALLED
6282 && last.kind != TARGET_WAITKIND_EXITED)
6283 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6284 Delete any breakpoint that is to be deleted at the next stop. */
6285 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6286 }
6287
6288 /* Try to get rid of automatically added inferiors that are no
6289 longer needed. Keeping those around slows down things linearly.
6290 Note that this never removes the current inferior. */
6291 prune_inferiors ();
6292 }
6293
6294 static int
6295 hook_stop_stub (void *cmd)
6296 {
6297 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6298 return (0);
6299 }
6300 \f
6301 int
6302 signal_stop_state (int signo)
6303 {
6304 return signal_stop[signo];
6305 }
6306
6307 int
6308 signal_print_state (int signo)
6309 {
6310 return signal_print[signo];
6311 }
6312
6313 int
6314 signal_pass_state (int signo)
6315 {
6316 return signal_program[signo];
6317 }
6318
6319 static void
6320 signal_cache_update (int signo)
6321 {
6322 if (signo == -1)
6323 {
6324 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6325 signal_cache_update (signo);
6326
6327 return;
6328 }
6329
6330 signal_pass[signo] = (signal_stop[signo] == 0
6331 && signal_print[signo] == 0
6332 && signal_program[signo] == 1
6333 && signal_catch[signo] == 0);
6334 }
6335
6336 int
6337 signal_stop_update (int signo, int state)
6338 {
6339 int ret = signal_stop[signo];
6340
6341 signal_stop[signo] = state;
6342 signal_cache_update (signo);
6343 return ret;
6344 }
6345
6346 int
6347 signal_print_update (int signo, int state)
6348 {
6349 int ret = signal_print[signo];
6350
6351 signal_print[signo] = state;
6352 signal_cache_update (signo);
6353 return ret;
6354 }
6355
6356 int
6357 signal_pass_update (int signo, int state)
6358 {
6359 int ret = signal_program[signo];
6360
6361 signal_program[signo] = state;
6362 signal_cache_update (signo);
6363 return ret;
6364 }
6365
6366 /* Update the global 'signal_catch' from INFO and notify the
6367 target. */
6368
6369 void
6370 signal_catch_update (const unsigned int *info)
6371 {
6372 int i;
6373
6374 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6375 signal_catch[i] = info[i] > 0;
6376 signal_cache_update (-1);
6377 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6378 }
6379
6380 static void
6381 sig_print_header (void)
6382 {
6383 printf_filtered (_("Signal Stop\tPrint\tPass "
6384 "to program\tDescription\n"));
6385 }
6386
6387 static void
6388 sig_print_info (enum gdb_signal oursig)
6389 {
6390 const char *name = gdb_signal_to_name (oursig);
6391 int name_padding = 13 - strlen (name);
6392
6393 if (name_padding <= 0)
6394 name_padding = 0;
6395
6396 printf_filtered ("%s", name);
6397 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6398 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6399 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6400 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6401 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6402 }
6403
6404 /* Specify how various signals in the inferior should be handled. */
6405
6406 static void
6407 handle_command (char *args, int from_tty)
6408 {
6409 char **argv;
6410 int digits, wordlen;
6411 int sigfirst, signum, siglast;
6412 enum gdb_signal oursig;
6413 int allsigs;
6414 int nsigs;
6415 unsigned char *sigs;
6416 struct cleanup *old_chain;
6417
6418 if (args == NULL)
6419 {
6420 error_no_arg (_("signal to handle"));
6421 }
6422
6423 /* Allocate and zero an array of flags for which signals to handle. */
6424
6425 nsigs = (int) GDB_SIGNAL_LAST;
6426 sigs = (unsigned char *) alloca (nsigs);
6427 memset (sigs, 0, nsigs);
6428
6429 /* Break the command line up into args. */
6430
6431 argv = gdb_buildargv (args);
6432 old_chain = make_cleanup_freeargv (argv);
6433
6434 /* Walk through the args, looking for signal oursigs, signal names, and
6435 actions. Signal numbers and signal names may be interspersed with
6436 actions, with the actions being performed for all signals cumulatively
6437 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6438
6439 while (*argv != NULL)
6440 {
6441 wordlen = strlen (*argv);
6442 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6443 {;
6444 }
6445 allsigs = 0;
6446 sigfirst = siglast = -1;
6447
6448 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6449 {
6450 /* Apply action to all signals except those used by the
6451 debugger. Silently skip those. */
6452 allsigs = 1;
6453 sigfirst = 0;
6454 siglast = nsigs - 1;
6455 }
6456 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6457 {
6458 SET_SIGS (nsigs, sigs, signal_stop);
6459 SET_SIGS (nsigs, sigs, signal_print);
6460 }
6461 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6462 {
6463 UNSET_SIGS (nsigs, sigs, signal_program);
6464 }
6465 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6466 {
6467 SET_SIGS (nsigs, sigs, signal_print);
6468 }
6469 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6470 {
6471 SET_SIGS (nsigs, sigs, signal_program);
6472 }
6473 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6474 {
6475 UNSET_SIGS (nsigs, sigs, signal_stop);
6476 }
6477 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6478 {
6479 SET_SIGS (nsigs, sigs, signal_program);
6480 }
6481 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6482 {
6483 UNSET_SIGS (nsigs, sigs, signal_print);
6484 UNSET_SIGS (nsigs, sigs, signal_stop);
6485 }
6486 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6487 {
6488 UNSET_SIGS (nsigs, sigs, signal_program);
6489 }
6490 else if (digits > 0)
6491 {
6492 /* It is numeric. The numeric signal refers to our own
6493 internal signal numbering from target.h, not to host/target
6494 signal number. This is a feature; users really should be
6495 using symbolic names anyway, and the common ones like
6496 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6497
6498 sigfirst = siglast = (int)
6499 gdb_signal_from_command (atoi (*argv));
6500 if ((*argv)[digits] == '-')
6501 {
6502 siglast = (int)
6503 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6504 }
6505 if (sigfirst > siglast)
6506 {
6507 /* Bet he didn't figure we'd think of this case... */
6508 signum = sigfirst;
6509 sigfirst = siglast;
6510 siglast = signum;
6511 }
6512 }
6513 else
6514 {
6515 oursig = gdb_signal_from_name (*argv);
6516 if (oursig != GDB_SIGNAL_UNKNOWN)
6517 {
6518 sigfirst = siglast = (int) oursig;
6519 }
6520 else
6521 {
6522 /* Not a number and not a recognized flag word => complain. */
6523 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6524 }
6525 }
6526
6527 /* If any signal numbers or symbol names were found, set flags for
6528 which signals to apply actions to. */
6529
6530 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6531 {
6532 switch ((enum gdb_signal) signum)
6533 {
6534 case GDB_SIGNAL_TRAP:
6535 case GDB_SIGNAL_INT:
6536 if (!allsigs && !sigs[signum])
6537 {
6538 if (query (_("%s is used by the debugger.\n\
6539 Are you sure you want to change it? "),
6540 gdb_signal_to_name ((enum gdb_signal) signum)))
6541 {
6542 sigs[signum] = 1;
6543 }
6544 else
6545 {
6546 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6547 gdb_flush (gdb_stdout);
6548 }
6549 }
6550 break;
6551 case GDB_SIGNAL_0:
6552 case GDB_SIGNAL_DEFAULT:
6553 case GDB_SIGNAL_UNKNOWN:
6554 /* Make sure that "all" doesn't print these. */
6555 break;
6556 default:
6557 sigs[signum] = 1;
6558 break;
6559 }
6560 }
6561
6562 argv++;
6563 }
6564
6565 for (signum = 0; signum < nsigs; signum++)
6566 if (sigs[signum])
6567 {
6568 signal_cache_update (-1);
6569 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6570 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
6571
6572 if (from_tty)
6573 {
6574 /* Show the results. */
6575 sig_print_header ();
6576 for (; signum < nsigs; signum++)
6577 if (sigs[signum])
6578 sig_print_info (signum);
6579 }
6580
6581 break;
6582 }
6583
6584 do_cleanups (old_chain);
6585 }
6586
6587 /* Complete the "handle" command. */
6588
6589 static VEC (char_ptr) *
6590 handle_completer (struct cmd_list_element *ignore,
6591 const char *text, const char *word)
6592 {
6593 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
6594 static const char * const keywords[] =
6595 {
6596 "all",
6597 "stop",
6598 "ignore",
6599 "print",
6600 "pass",
6601 "nostop",
6602 "noignore",
6603 "noprint",
6604 "nopass",
6605 NULL,
6606 };
6607
6608 vec_signals = signal_completer (ignore, text, word);
6609 vec_keywords = complete_on_enum (keywords, word, word);
6610
6611 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
6612 VEC_free (char_ptr, vec_signals);
6613 VEC_free (char_ptr, vec_keywords);
6614 return return_val;
6615 }
6616
6617 static void
6618 xdb_handle_command (char *args, int from_tty)
6619 {
6620 char **argv;
6621 struct cleanup *old_chain;
6622
6623 if (args == NULL)
6624 error_no_arg (_("xdb command"));
6625
6626 /* Break the command line up into args. */
6627
6628 argv = gdb_buildargv (args);
6629 old_chain = make_cleanup_freeargv (argv);
6630 if (argv[1] != (char *) NULL)
6631 {
6632 char *argBuf;
6633 int bufLen;
6634
6635 bufLen = strlen (argv[0]) + 20;
6636 argBuf = (char *) xmalloc (bufLen);
6637 if (argBuf)
6638 {
6639 int validFlag = 1;
6640 enum gdb_signal oursig;
6641
6642 oursig = gdb_signal_from_name (argv[0]);
6643 memset (argBuf, 0, bufLen);
6644 if (strcmp (argv[1], "Q") == 0)
6645 sprintf (argBuf, "%s %s", argv[0], "noprint");
6646 else
6647 {
6648 if (strcmp (argv[1], "s") == 0)
6649 {
6650 if (!signal_stop[oursig])
6651 sprintf (argBuf, "%s %s", argv[0], "stop");
6652 else
6653 sprintf (argBuf, "%s %s", argv[0], "nostop");
6654 }
6655 else if (strcmp (argv[1], "i") == 0)
6656 {
6657 if (!signal_program[oursig])
6658 sprintf (argBuf, "%s %s", argv[0], "pass");
6659 else
6660 sprintf (argBuf, "%s %s", argv[0], "nopass");
6661 }
6662 else if (strcmp (argv[1], "r") == 0)
6663 {
6664 if (!signal_print[oursig])
6665 sprintf (argBuf, "%s %s", argv[0], "print");
6666 else
6667 sprintf (argBuf, "%s %s", argv[0], "noprint");
6668 }
6669 else
6670 validFlag = 0;
6671 }
6672 if (validFlag)
6673 handle_command (argBuf, from_tty);
6674 else
6675 printf_filtered (_("Invalid signal handling flag.\n"));
6676 if (argBuf)
6677 xfree (argBuf);
6678 }
6679 }
6680 do_cleanups (old_chain);
6681 }
6682
6683 enum gdb_signal
6684 gdb_signal_from_command (int num)
6685 {
6686 if (num >= 1 && num <= 15)
6687 return (enum gdb_signal) num;
6688 error (_("Only signals 1-15 are valid as numeric signals.\n\
6689 Use \"info signals\" for a list of symbolic signals."));
6690 }
6691
6692 /* Print current contents of the tables set by the handle command.
6693 It is possible we should just be printing signals actually used
6694 by the current target (but for things to work right when switching
6695 targets, all signals should be in the signal tables). */
6696
6697 static void
6698 signals_info (char *signum_exp, int from_tty)
6699 {
6700 enum gdb_signal oursig;
6701
6702 sig_print_header ();
6703
6704 if (signum_exp)
6705 {
6706 /* First see if this is a symbol name. */
6707 oursig = gdb_signal_from_name (signum_exp);
6708 if (oursig == GDB_SIGNAL_UNKNOWN)
6709 {
6710 /* No, try numeric. */
6711 oursig =
6712 gdb_signal_from_command (parse_and_eval_long (signum_exp));
6713 }
6714 sig_print_info (oursig);
6715 return;
6716 }
6717
6718 printf_filtered ("\n");
6719 /* These ugly casts brought to you by the native VAX compiler. */
6720 for (oursig = GDB_SIGNAL_FIRST;
6721 (int) oursig < (int) GDB_SIGNAL_LAST;
6722 oursig = (enum gdb_signal) ((int) oursig + 1))
6723 {
6724 QUIT;
6725
6726 if (oursig != GDB_SIGNAL_UNKNOWN
6727 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
6728 sig_print_info (oursig);
6729 }
6730
6731 printf_filtered (_("\nUse the \"handle\" command "
6732 "to change these tables.\n"));
6733 }
6734
6735 /* Check if it makes sense to read $_siginfo from the current thread
6736 at this point. If not, throw an error. */
6737
6738 static void
6739 validate_siginfo_access (void)
6740 {
6741 /* No current inferior, no siginfo. */
6742 if (ptid_equal (inferior_ptid, null_ptid))
6743 error (_("No thread selected."));
6744
6745 /* Don't try to read from a dead thread. */
6746 if (is_exited (inferior_ptid))
6747 error (_("The current thread has terminated"));
6748
6749 /* ... or from a spinning thread. */
6750 if (is_running (inferior_ptid))
6751 error (_("Selected thread is running."));
6752 }
6753
6754 /* The $_siginfo convenience variable is a bit special. We don't know
6755 for sure the type of the value until we actually have a chance to
6756 fetch the data. The type can change depending on gdbarch, so it is
6757 also dependent on which thread you have selected.
6758
6759 1. making $_siginfo be an internalvar that creates a new value on
6760 access.
6761
6762 2. making the value of $_siginfo be an lval_computed value. */
6763
6764 /* This function implements the lval_computed support for reading a
6765 $_siginfo value. */
6766
6767 static void
6768 siginfo_value_read (struct value *v)
6769 {
6770 LONGEST transferred;
6771
6772 validate_siginfo_access ();
6773
6774 transferred =
6775 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6776 NULL,
6777 value_contents_all_raw (v),
6778 value_offset (v),
6779 TYPE_LENGTH (value_type (v)));
6780
6781 if (transferred != TYPE_LENGTH (value_type (v)))
6782 error (_("Unable to read siginfo"));
6783 }
6784
6785 /* This function implements the lval_computed support for writing a
6786 $_siginfo value. */
6787
6788 static void
6789 siginfo_value_write (struct value *v, struct value *fromval)
6790 {
6791 LONGEST transferred;
6792
6793 validate_siginfo_access ();
6794
6795 transferred = target_write (&current_target,
6796 TARGET_OBJECT_SIGNAL_INFO,
6797 NULL,
6798 value_contents_all_raw (fromval),
6799 value_offset (v),
6800 TYPE_LENGTH (value_type (fromval)));
6801
6802 if (transferred != TYPE_LENGTH (value_type (fromval)))
6803 error (_("Unable to write siginfo"));
6804 }
6805
6806 static const struct lval_funcs siginfo_value_funcs =
6807 {
6808 siginfo_value_read,
6809 siginfo_value_write
6810 };
6811
6812 /* Return a new value with the correct type for the siginfo object of
6813 the current thread using architecture GDBARCH. Return a void value
6814 if there's no object available. */
6815
6816 static struct value *
6817 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
6818 void *ignore)
6819 {
6820 if (target_has_stack
6821 && !ptid_equal (inferior_ptid, null_ptid)
6822 && gdbarch_get_siginfo_type_p (gdbarch))
6823 {
6824 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6825
6826 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6827 }
6828
6829 return allocate_value (builtin_type (gdbarch)->builtin_void);
6830 }
6831
6832 \f
6833 /* infcall_suspend_state contains state about the program itself like its
6834 registers and any signal it received when it last stopped.
6835 This state must be restored regardless of how the inferior function call
6836 ends (either successfully, or after it hits a breakpoint or signal)
6837 if the program is to properly continue where it left off. */
6838
6839 struct infcall_suspend_state
6840 {
6841 struct thread_suspend_state thread_suspend;
6842 #if 0 /* Currently unused and empty structures are not valid C. */
6843 struct inferior_suspend_state inferior_suspend;
6844 #endif
6845
6846 /* Other fields: */
6847 CORE_ADDR stop_pc;
6848 struct regcache *registers;
6849
6850 /* Format of SIGINFO_DATA or NULL if it is not present. */
6851 struct gdbarch *siginfo_gdbarch;
6852
6853 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6854 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6855 content would be invalid. */
6856 gdb_byte *siginfo_data;
6857 };
6858
6859 struct infcall_suspend_state *
6860 save_infcall_suspend_state (void)
6861 {
6862 struct infcall_suspend_state *inf_state;
6863 struct thread_info *tp = inferior_thread ();
6864 #if 0
6865 struct inferior *inf = current_inferior ();
6866 #endif
6867 struct regcache *regcache = get_current_regcache ();
6868 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6869 gdb_byte *siginfo_data = NULL;
6870
6871 if (gdbarch_get_siginfo_type_p (gdbarch))
6872 {
6873 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6874 size_t len = TYPE_LENGTH (type);
6875 struct cleanup *back_to;
6876
6877 siginfo_data = xmalloc (len);
6878 back_to = make_cleanup (xfree, siginfo_data);
6879
6880 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6881 siginfo_data, 0, len) == len)
6882 discard_cleanups (back_to);
6883 else
6884 {
6885 /* Errors ignored. */
6886 do_cleanups (back_to);
6887 siginfo_data = NULL;
6888 }
6889 }
6890
6891 inf_state = XCNEW (struct infcall_suspend_state);
6892
6893 if (siginfo_data)
6894 {
6895 inf_state->siginfo_gdbarch = gdbarch;
6896 inf_state->siginfo_data = siginfo_data;
6897 }
6898
6899 inf_state->thread_suspend = tp->suspend;
6900 #if 0 /* Currently unused and empty structures are not valid C. */
6901 inf_state->inferior_suspend = inf->suspend;
6902 #endif
6903
6904 /* run_inferior_call will not use the signal due to its `proceed' call with
6905 GDB_SIGNAL_0 anyway. */
6906 tp->suspend.stop_signal = GDB_SIGNAL_0;
6907
6908 inf_state->stop_pc = stop_pc;
6909
6910 inf_state->registers = regcache_dup (regcache);
6911
6912 return inf_state;
6913 }
6914
6915 /* Restore inferior session state to INF_STATE. */
6916
6917 void
6918 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6919 {
6920 struct thread_info *tp = inferior_thread ();
6921 #if 0
6922 struct inferior *inf = current_inferior ();
6923 #endif
6924 struct regcache *regcache = get_current_regcache ();
6925 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6926
6927 tp->suspend = inf_state->thread_suspend;
6928 #if 0 /* Currently unused and empty structures are not valid C. */
6929 inf->suspend = inf_state->inferior_suspend;
6930 #endif
6931
6932 stop_pc = inf_state->stop_pc;
6933
6934 if (inf_state->siginfo_gdbarch == gdbarch)
6935 {
6936 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6937
6938 /* Errors ignored. */
6939 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6940 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
6941 }
6942
6943 /* The inferior can be gone if the user types "print exit(0)"
6944 (and perhaps other times). */
6945 if (target_has_execution)
6946 /* NB: The register write goes through to the target. */
6947 regcache_cpy (regcache, inf_state->registers);
6948
6949 discard_infcall_suspend_state (inf_state);
6950 }
6951
6952 static void
6953 do_restore_infcall_suspend_state_cleanup (void *state)
6954 {
6955 restore_infcall_suspend_state (state);
6956 }
6957
6958 struct cleanup *
6959 make_cleanup_restore_infcall_suspend_state
6960 (struct infcall_suspend_state *inf_state)
6961 {
6962 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6963 }
6964
6965 void
6966 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6967 {
6968 regcache_xfree (inf_state->registers);
6969 xfree (inf_state->siginfo_data);
6970 xfree (inf_state);
6971 }
6972
6973 struct regcache *
6974 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6975 {
6976 return inf_state->registers;
6977 }
6978
6979 /* infcall_control_state contains state regarding gdb's control of the
6980 inferior itself like stepping control. It also contains session state like
6981 the user's currently selected frame. */
6982
6983 struct infcall_control_state
6984 {
6985 struct thread_control_state thread_control;
6986 struct inferior_control_state inferior_control;
6987
6988 /* Other fields: */
6989 enum stop_stack_kind stop_stack_dummy;
6990 int stopped_by_random_signal;
6991 int stop_after_trap;
6992
6993 /* ID if the selected frame when the inferior function call was made. */
6994 struct frame_id selected_frame_id;
6995 };
6996
6997 /* Save all of the information associated with the inferior<==>gdb
6998 connection. */
6999
7000 struct infcall_control_state *
7001 save_infcall_control_state (void)
7002 {
7003 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
7004 struct thread_info *tp = inferior_thread ();
7005 struct inferior *inf = current_inferior ();
7006
7007 inf_status->thread_control = tp->control;
7008 inf_status->inferior_control = inf->control;
7009
7010 tp->control.step_resume_breakpoint = NULL;
7011 tp->control.exception_resume_breakpoint = NULL;
7012
7013 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
7014 chain. If caller's caller is walking the chain, they'll be happier if we
7015 hand them back the original chain when restore_infcall_control_state is
7016 called. */
7017 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
7018
7019 /* Other fields: */
7020 inf_status->stop_stack_dummy = stop_stack_dummy;
7021 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7022 inf_status->stop_after_trap = stop_after_trap;
7023
7024 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7025
7026 return inf_status;
7027 }
7028
7029 static int
7030 restore_selected_frame (void *args)
7031 {
7032 struct frame_id *fid = (struct frame_id *) args;
7033 struct frame_info *frame;
7034
7035 frame = frame_find_by_id (*fid);
7036
7037 /* If inf_status->selected_frame_id is NULL, there was no previously
7038 selected frame. */
7039 if (frame == NULL)
7040 {
7041 warning (_("Unable to restore previously selected frame."));
7042 return 0;
7043 }
7044
7045 select_frame (frame);
7046
7047 return (1);
7048 }
7049
7050 /* Restore inferior session state to INF_STATUS. */
7051
7052 void
7053 restore_infcall_control_state (struct infcall_control_state *inf_status)
7054 {
7055 struct thread_info *tp = inferior_thread ();
7056 struct inferior *inf = current_inferior ();
7057
7058 if (tp->control.step_resume_breakpoint)
7059 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7060
7061 if (tp->control.exception_resume_breakpoint)
7062 tp->control.exception_resume_breakpoint->disposition
7063 = disp_del_at_next_stop;
7064
7065 /* Handle the bpstat_copy of the chain. */
7066 bpstat_clear (&tp->control.stop_bpstat);
7067
7068 tp->control = inf_status->thread_control;
7069 inf->control = inf_status->inferior_control;
7070
7071 /* Other fields: */
7072 stop_stack_dummy = inf_status->stop_stack_dummy;
7073 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7074 stop_after_trap = inf_status->stop_after_trap;
7075
7076 if (target_has_stack)
7077 {
7078 /* The point of catch_errors is that if the stack is clobbered,
7079 walking the stack might encounter a garbage pointer and
7080 error() trying to dereference it. */
7081 if (catch_errors
7082 (restore_selected_frame, &inf_status->selected_frame_id,
7083 "Unable to restore previously selected frame:\n",
7084 RETURN_MASK_ERROR) == 0)
7085 /* Error in restoring the selected frame. Select the innermost
7086 frame. */
7087 select_frame (get_current_frame ());
7088 }
7089
7090 xfree (inf_status);
7091 }
7092
7093 static void
7094 do_restore_infcall_control_state_cleanup (void *sts)
7095 {
7096 restore_infcall_control_state (sts);
7097 }
7098
7099 struct cleanup *
7100 make_cleanup_restore_infcall_control_state
7101 (struct infcall_control_state *inf_status)
7102 {
7103 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7104 }
7105
7106 void
7107 discard_infcall_control_state (struct infcall_control_state *inf_status)
7108 {
7109 if (inf_status->thread_control.step_resume_breakpoint)
7110 inf_status->thread_control.step_resume_breakpoint->disposition
7111 = disp_del_at_next_stop;
7112
7113 if (inf_status->thread_control.exception_resume_breakpoint)
7114 inf_status->thread_control.exception_resume_breakpoint->disposition
7115 = disp_del_at_next_stop;
7116
7117 /* See save_infcall_control_state for info on stop_bpstat. */
7118 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7119
7120 xfree (inf_status);
7121 }
7122 \f
7123 /* restore_inferior_ptid() will be used by the cleanup machinery
7124 to restore the inferior_ptid value saved in a call to
7125 save_inferior_ptid(). */
7126
7127 static void
7128 restore_inferior_ptid (void *arg)
7129 {
7130 ptid_t *saved_ptid_ptr = arg;
7131
7132 inferior_ptid = *saved_ptid_ptr;
7133 xfree (arg);
7134 }
7135
7136 /* Save the value of inferior_ptid so that it may be restored by a
7137 later call to do_cleanups(). Returns the struct cleanup pointer
7138 needed for later doing the cleanup. */
7139
7140 struct cleanup *
7141 save_inferior_ptid (void)
7142 {
7143 ptid_t *saved_ptid_ptr;
7144
7145 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7146 *saved_ptid_ptr = inferior_ptid;
7147 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7148 }
7149
7150 /* See inferior.h. */
7151
7152 void
7153 clear_exit_convenience_vars (void)
7154 {
7155 clear_internalvar (lookup_internalvar ("_exitsignal"));
7156 clear_internalvar (lookup_internalvar ("_exitcode"));
7157 }
7158 \f
7159
7160 /* User interface for reverse debugging:
7161 Set exec-direction / show exec-direction commands
7162 (returns error unless target implements to_set_exec_direction method). */
7163
7164 int execution_direction = EXEC_FORWARD;
7165 static const char exec_forward[] = "forward";
7166 static const char exec_reverse[] = "reverse";
7167 static const char *exec_direction = exec_forward;
7168 static const char *const exec_direction_names[] = {
7169 exec_forward,
7170 exec_reverse,
7171 NULL
7172 };
7173
7174 static void
7175 set_exec_direction_func (char *args, int from_tty,
7176 struct cmd_list_element *cmd)
7177 {
7178 if (target_can_execute_reverse)
7179 {
7180 if (!strcmp (exec_direction, exec_forward))
7181 execution_direction = EXEC_FORWARD;
7182 else if (!strcmp (exec_direction, exec_reverse))
7183 execution_direction = EXEC_REVERSE;
7184 }
7185 else
7186 {
7187 exec_direction = exec_forward;
7188 error (_("Target does not support this operation."));
7189 }
7190 }
7191
7192 static void
7193 show_exec_direction_func (struct ui_file *out, int from_tty,
7194 struct cmd_list_element *cmd, const char *value)
7195 {
7196 switch (execution_direction) {
7197 case EXEC_FORWARD:
7198 fprintf_filtered (out, _("Forward.\n"));
7199 break;
7200 case EXEC_REVERSE:
7201 fprintf_filtered (out, _("Reverse.\n"));
7202 break;
7203 default:
7204 internal_error (__FILE__, __LINE__,
7205 _("bogus execution_direction value: %d"),
7206 (int) execution_direction);
7207 }
7208 }
7209
7210 static void
7211 show_schedule_multiple (struct ui_file *file, int from_tty,
7212 struct cmd_list_element *c, const char *value)
7213 {
7214 fprintf_filtered (file, _("Resuming the execution of threads "
7215 "of all processes is %s.\n"), value);
7216 }
7217
7218 /* Implementation of `siginfo' variable. */
7219
7220 static const struct internalvar_funcs siginfo_funcs =
7221 {
7222 siginfo_make_value,
7223 NULL,
7224 NULL
7225 };
7226
7227 void
7228 _initialize_infrun (void)
7229 {
7230 int i;
7231 int numsigs;
7232 struct cmd_list_element *c;
7233
7234 add_info ("signals", signals_info, _("\
7235 What debugger does when program gets various signals.\n\
7236 Specify a signal as argument to print info on that signal only."));
7237 add_info_alias ("handle", "signals", 0);
7238
7239 c = add_com ("handle", class_run, handle_command, _("\
7240 Specify how to handle signals.\n\
7241 Usage: handle SIGNAL [ACTIONS]\n\
7242 Args are signals and actions to apply to those signals.\n\
7243 If no actions are specified, the current settings for the specified signals\n\
7244 will be displayed instead.\n\
7245 \n\
7246 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7247 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7248 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7249 The special arg \"all\" is recognized to mean all signals except those\n\
7250 used by the debugger, typically SIGTRAP and SIGINT.\n\
7251 \n\
7252 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7253 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7254 Stop means reenter debugger if this signal happens (implies print).\n\
7255 Print means print a message if this signal happens.\n\
7256 Pass means let program see this signal; otherwise program doesn't know.\n\
7257 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7258 Pass and Stop may be combined.\n\
7259 \n\
7260 Multiple signals may be specified. Signal numbers and signal names\n\
7261 may be interspersed with actions, with the actions being performed for\n\
7262 all signals cumulatively specified."));
7263 set_cmd_completer (c, handle_completer);
7264
7265 if (xdb_commands)
7266 {
7267 add_com ("lz", class_info, signals_info, _("\
7268 What debugger does when program gets various signals.\n\
7269 Specify a signal as argument to print info on that signal only."));
7270 add_com ("z", class_run, xdb_handle_command, _("\
7271 Specify how to handle a signal.\n\
7272 Args are signals and actions to apply to those signals.\n\
7273 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7274 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7275 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7276 The special arg \"all\" is recognized to mean all signals except those\n\
7277 used by the debugger, typically SIGTRAP and SIGINT.\n\
7278 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7279 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7280 nopass), \"Q\" (noprint)\n\
7281 Stop means reenter debugger if this signal happens (implies print).\n\
7282 Print means print a message if this signal happens.\n\
7283 Pass means let program see this signal; otherwise program doesn't know.\n\
7284 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7285 Pass and Stop may be combined."));
7286 }
7287
7288 if (!dbx_commands)
7289 stop_command = add_cmd ("stop", class_obscure,
7290 not_just_help_class_command, _("\
7291 There is no `stop' command, but you can set a hook on `stop'.\n\
7292 This allows you to set a list of commands to be run each time execution\n\
7293 of the program stops."), &cmdlist);
7294
7295 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7296 Set inferior debugging."), _("\
7297 Show inferior debugging."), _("\
7298 When non-zero, inferior specific debugging is enabled."),
7299 NULL,
7300 show_debug_infrun,
7301 &setdebuglist, &showdebuglist);
7302
7303 add_setshow_boolean_cmd ("displaced", class_maintenance,
7304 &debug_displaced, _("\
7305 Set displaced stepping debugging."), _("\
7306 Show displaced stepping debugging."), _("\
7307 When non-zero, displaced stepping specific debugging is enabled."),
7308 NULL,
7309 show_debug_displaced,
7310 &setdebuglist, &showdebuglist);
7311
7312 add_setshow_boolean_cmd ("non-stop", no_class,
7313 &non_stop_1, _("\
7314 Set whether gdb controls the inferior in non-stop mode."), _("\
7315 Show whether gdb controls the inferior in non-stop mode."), _("\
7316 When debugging a multi-threaded program and this setting is\n\
7317 off (the default, also called all-stop mode), when one thread stops\n\
7318 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7319 all other threads in the program while you interact with the thread of\n\
7320 interest. When you continue or step a thread, you can allow the other\n\
7321 threads to run, or have them remain stopped, but while you inspect any\n\
7322 thread's state, all threads stop.\n\
7323 \n\
7324 In non-stop mode, when one thread stops, other threads can continue\n\
7325 to run freely. You'll be able to step each thread independently,\n\
7326 leave it stopped or free to run as needed."),
7327 set_non_stop,
7328 show_non_stop,
7329 &setlist,
7330 &showlist);
7331
7332 numsigs = (int) GDB_SIGNAL_LAST;
7333 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7334 signal_print = (unsigned char *)
7335 xmalloc (sizeof (signal_print[0]) * numsigs);
7336 signal_program = (unsigned char *)
7337 xmalloc (sizeof (signal_program[0]) * numsigs);
7338 signal_catch = (unsigned char *)
7339 xmalloc (sizeof (signal_catch[0]) * numsigs);
7340 signal_pass = (unsigned char *)
7341 xmalloc (sizeof (signal_program[0]) * numsigs);
7342 for (i = 0; i < numsigs; i++)
7343 {
7344 signal_stop[i] = 1;
7345 signal_print[i] = 1;
7346 signal_program[i] = 1;
7347 signal_catch[i] = 0;
7348 }
7349
7350 /* Signals caused by debugger's own actions
7351 should not be given to the program afterwards. */
7352 signal_program[GDB_SIGNAL_TRAP] = 0;
7353 signal_program[GDB_SIGNAL_INT] = 0;
7354
7355 /* Signals that are not errors should not normally enter the debugger. */
7356 signal_stop[GDB_SIGNAL_ALRM] = 0;
7357 signal_print[GDB_SIGNAL_ALRM] = 0;
7358 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7359 signal_print[GDB_SIGNAL_VTALRM] = 0;
7360 signal_stop[GDB_SIGNAL_PROF] = 0;
7361 signal_print[GDB_SIGNAL_PROF] = 0;
7362 signal_stop[GDB_SIGNAL_CHLD] = 0;
7363 signal_print[GDB_SIGNAL_CHLD] = 0;
7364 signal_stop[GDB_SIGNAL_IO] = 0;
7365 signal_print[GDB_SIGNAL_IO] = 0;
7366 signal_stop[GDB_SIGNAL_POLL] = 0;
7367 signal_print[GDB_SIGNAL_POLL] = 0;
7368 signal_stop[GDB_SIGNAL_URG] = 0;
7369 signal_print[GDB_SIGNAL_URG] = 0;
7370 signal_stop[GDB_SIGNAL_WINCH] = 0;
7371 signal_print[GDB_SIGNAL_WINCH] = 0;
7372 signal_stop[GDB_SIGNAL_PRIO] = 0;
7373 signal_print[GDB_SIGNAL_PRIO] = 0;
7374
7375 /* These signals are used internally by user-level thread
7376 implementations. (See signal(5) on Solaris.) Like the above
7377 signals, a healthy program receives and handles them as part of
7378 its normal operation. */
7379 signal_stop[GDB_SIGNAL_LWP] = 0;
7380 signal_print[GDB_SIGNAL_LWP] = 0;
7381 signal_stop[GDB_SIGNAL_WAITING] = 0;
7382 signal_print[GDB_SIGNAL_WAITING] = 0;
7383 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7384 signal_print[GDB_SIGNAL_CANCEL] = 0;
7385
7386 /* Update cached state. */
7387 signal_cache_update (-1);
7388
7389 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7390 &stop_on_solib_events, _("\
7391 Set stopping for shared library events."), _("\
7392 Show stopping for shared library events."), _("\
7393 If nonzero, gdb will give control to the user when the dynamic linker\n\
7394 notifies gdb of shared library events. The most common event of interest\n\
7395 to the user would be loading/unloading of a new library."),
7396 set_stop_on_solib_events,
7397 show_stop_on_solib_events,
7398 &setlist, &showlist);
7399
7400 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7401 follow_fork_mode_kind_names,
7402 &follow_fork_mode_string, _("\
7403 Set debugger response to a program call of fork or vfork."), _("\
7404 Show debugger response to a program call of fork or vfork."), _("\
7405 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7406 parent - the original process is debugged after a fork\n\
7407 child - the new process is debugged after a fork\n\
7408 The unfollowed process will continue to run.\n\
7409 By default, the debugger will follow the parent process."),
7410 NULL,
7411 show_follow_fork_mode_string,
7412 &setlist, &showlist);
7413
7414 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7415 follow_exec_mode_names,
7416 &follow_exec_mode_string, _("\
7417 Set debugger response to a program call of exec."), _("\
7418 Show debugger response to a program call of exec."), _("\
7419 An exec call replaces the program image of a process.\n\
7420 \n\
7421 follow-exec-mode can be:\n\
7422 \n\
7423 new - the debugger creates a new inferior and rebinds the process\n\
7424 to this new inferior. The program the process was running before\n\
7425 the exec call can be restarted afterwards by restarting the original\n\
7426 inferior.\n\
7427 \n\
7428 same - the debugger keeps the process bound to the same inferior.\n\
7429 The new executable image replaces the previous executable loaded in\n\
7430 the inferior. Restarting the inferior after the exec call restarts\n\
7431 the executable the process was running after the exec call.\n\
7432 \n\
7433 By default, the debugger will use the same inferior."),
7434 NULL,
7435 show_follow_exec_mode_string,
7436 &setlist, &showlist);
7437
7438 add_setshow_enum_cmd ("scheduler-locking", class_run,
7439 scheduler_enums, &scheduler_mode, _("\
7440 Set mode for locking scheduler during execution."), _("\
7441 Show mode for locking scheduler during execution."), _("\
7442 off == no locking (threads may preempt at any time)\n\
7443 on == full locking (no thread except the current thread may run)\n\
7444 step == scheduler locked during every single-step operation.\n\
7445 In this mode, no other thread may run during a step command.\n\
7446 Other threads may run while stepping over a function call ('next')."),
7447 set_schedlock_func, /* traps on target vector */
7448 show_scheduler_mode,
7449 &setlist, &showlist);
7450
7451 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7452 Set mode for resuming threads of all processes."), _("\
7453 Show mode for resuming threads of all processes."), _("\
7454 When on, execution commands (such as 'continue' or 'next') resume all\n\
7455 threads of all processes. When off (which is the default), execution\n\
7456 commands only resume the threads of the current process. The set of\n\
7457 threads that are resumed is further refined by the scheduler-locking\n\
7458 mode (see help set scheduler-locking)."),
7459 NULL,
7460 show_schedule_multiple,
7461 &setlist, &showlist);
7462
7463 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7464 Set mode of the step operation."), _("\
7465 Show mode of the step operation."), _("\
7466 When set, doing a step over a function without debug line information\n\
7467 will stop at the first instruction of that function. Otherwise, the\n\
7468 function is skipped and the step command stops at a different source line."),
7469 NULL,
7470 show_step_stop_if_no_debug,
7471 &setlist, &showlist);
7472
7473 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7474 &can_use_displaced_stepping, _("\
7475 Set debugger's willingness to use displaced stepping."), _("\
7476 Show debugger's willingness to use displaced stepping."), _("\
7477 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7478 supported by the target architecture. If off, gdb will not use displaced\n\
7479 stepping to step over breakpoints, even if such is supported by the target\n\
7480 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7481 if the target architecture supports it and non-stop mode is active, but will not\n\
7482 use it in all-stop mode (see help set non-stop)."),
7483 NULL,
7484 show_can_use_displaced_stepping,
7485 &setlist, &showlist);
7486
7487 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7488 &exec_direction, _("Set direction of execution.\n\
7489 Options are 'forward' or 'reverse'."),
7490 _("Show direction of execution (forward/reverse)."),
7491 _("Tells gdb whether to execute forward or backward."),
7492 set_exec_direction_func, show_exec_direction_func,
7493 &setlist, &showlist);
7494
7495 /* Set/show detach-on-fork: user-settable mode. */
7496
7497 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7498 Set whether gdb will detach the child of a fork."), _("\
7499 Show whether gdb will detach the child of a fork."), _("\
7500 Tells gdb whether to detach the child of a fork."),
7501 NULL, NULL, &setlist, &showlist);
7502
7503 /* Set/show disable address space randomization mode. */
7504
7505 add_setshow_boolean_cmd ("disable-randomization", class_support,
7506 &disable_randomization, _("\
7507 Set disabling of debuggee's virtual address space randomization."), _("\
7508 Show disabling of debuggee's virtual address space randomization."), _("\
7509 When this mode is on (which is the default), randomization of the virtual\n\
7510 address space is disabled. Standalone programs run with the randomization\n\
7511 enabled by default on some platforms."),
7512 &set_disable_randomization,
7513 &show_disable_randomization,
7514 &setlist, &showlist);
7515
7516 /* ptid initializations */
7517 inferior_ptid = null_ptid;
7518 target_last_wait_ptid = minus_one_ptid;
7519
7520 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7521 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7522 observer_attach_thread_exit (infrun_thread_thread_exit);
7523 observer_attach_inferior_exit (infrun_inferior_exit);
7524
7525 /* Explicitly create without lookup, since that tries to create a
7526 value with a void typed value, and when we get here, gdbarch
7527 isn't initialized yet. At this point, we're quite sure there
7528 isn't another convenience variable of the same name. */
7529 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7530
7531 add_setshow_boolean_cmd ("observer", no_class,
7532 &observer_mode_1, _("\
7533 Set whether gdb controls the inferior in observer mode."), _("\
7534 Show whether gdb controls the inferior in observer mode."), _("\
7535 In observer mode, GDB can get data from the inferior, but not\n\
7536 affect its execution. Registers and memory may not be changed,\n\
7537 breakpoints may not be set, and the program cannot be interrupted\n\
7538 or signalled."),
7539 set_observer_mode,
7540 show_observer_mode,
7541 &setlist,
7542 &showlist);
7543 }
This page took 0.20134 seconds and 4 git commands to generate.