Support fusion for ELFv2 stubs
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2014 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <string.h>
24 #include <ctype.h>
25 #include "symtab.h"
26 #include "frame.h"
27 #include "inferior.h"
28 #include "exceptions.h"
29 #include "breakpoint.h"
30 #include "gdb_wait.h"
31 #include "gdbcore.h"
32 #include "gdbcmd.h"
33 #include "cli/cli-script.h"
34 #include "target.h"
35 #include "gdbthread.h"
36 #include "annotate.h"
37 #include "symfile.h"
38 #include "top.h"
39 #include <signal.h>
40 #include "inf-loop.h"
41 #include "regcache.h"
42 #include "value.h"
43 #include "observer.h"
44 #include "language.h"
45 #include "solib.h"
46 #include "main.h"
47 #include "dictionary.h"
48 #include "block.h"
49 #include "gdb_assert.h"
50 #include "mi/mi-common.h"
51 #include "event-top.h"
52 #include "record.h"
53 #include "record-full.h"
54 #include "inline-frame.h"
55 #include "jit.h"
56 #include "tracepoint.h"
57 #include "continuations.h"
58 #include "interps.h"
59 #include "skip.h"
60 #include "probe.h"
61 #include "objfiles.h"
62 #include "completer.h"
63 #include "target-descriptions.h"
64 #include "target-dcache.h"
65
66 /* Prototypes for local functions */
67
68 static void signals_info (char *, int);
69
70 static void handle_command (char *, int);
71
72 static void sig_print_info (enum gdb_signal);
73
74 static void sig_print_header (void);
75
76 static void resume_cleanups (void *);
77
78 static int hook_stop_stub (void *);
79
80 static int restore_selected_frame (void *);
81
82 static int follow_fork (void);
83
84 static void set_schedlock_func (char *args, int from_tty,
85 struct cmd_list_element *c);
86
87 static int currently_stepping (struct thread_info *tp);
88
89 static void xdb_handle_command (char *args, int from_tty);
90
91 void _initialize_infrun (void);
92
93 void nullify_last_target_wait_ptid (void);
94
95 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
96
97 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
98
99 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
100
101 /* When set, stop the 'step' command if we enter a function which has
102 no line number information. The normal behavior is that we step
103 over such function. */
104 int step_stop_if_no_debug = 0;
105 static void
106 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
107 struct cmd_list_element *c, const char *value)
108 {
109 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
110 }
111
112 /* In asynchronous mode, but simulating synchronous execution. */
113
114 int sync_execution = 0;
115
116 /* proceed and normal_stop use this to notify the user when the
117 inferior stopped in a different thread than it had been running
118 in. */
119
120 static ptid_t previous_inferior_ptid;
121
122 /* If set (default for legacy reasons), when following a fork, GDB
123 will detach from one of the fork branches, child or parent.
124 Exactly which branch is detached depends on 'set follow-fork-mode'
125 setting. */
126
127 static int detach_fork = 1;
128
129 int debug_displaced = 0;
130 static void
131 show_debug_displaced (struct ui_file *file, int from_tty,
132 struct cmd_list_element *c, const char *value)
133 {
134 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
135 }
136
137 unsigned int debug_infrun = 0;
138 static void
139 show_debug_infrun (struct ui_file *file, int from_tty,
140 struct cmd_list_element *c, const char *value)
141 {
142 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
143 }
144
145
146 /* Support for disabling address space randomization. */
147
148 int disable_randomization = 1;
149
150 static void
151 show_disable_randomization (struct ui_file *file, int from_tty,
152 struct cmd_list_element *c, const char *value)
153 {
154 if (target_supports_disable_randomization ())
155 fprintf_filtered (file,
156 _("Disabling randomization of debuggee's "
157 "virtual address space is %s.\n"),
158 value);
159 else
160 fputs_filtered (_("Disabling randomization of debuggee's "
161 "virtual address space is unsupported on\n"
162 "this platform.\n"), file);
163 }
164
165 static void
166 set_disable_randomization (char *args, int from_tty,
167 struct cmd_list_element *c)
168 {
169 if (!target_supports_disable_randomization ())
170 error (_("Disabling randomization of debuggee's "
171 "virtual address space is unsupported on\n"
172 "this platform."));
173 }
174
175 /* User interface for non-stop mode. */
176
177 int non_stop = 0;
178 static int non_stop_1 = 0;
179
180 static void
181 set_non_stop (char *args, int from_tty,
182 struct cmd_list_element *c)
183 {
184 if (target_has_execution)
185 {
186 non_stop_1 = non_stop;
187 error (_("Cannot change this setting while the inferior is running."));
188 }
189
190 non_stop = non_stop_1;
191 }
192
193 static void
194 show_non_stop (struct ui_file *file, int from_tty,
195 struct cmd_list_element *c, const char *value)
196 {
197 fprintf_filtered (file,
198 _("Controlling the inferior in non-stop mode is %s.\n"),
199 value);
200 }
201
202 /* "Observer mode" is somewhat like a more extreme version of
203 non-stop, in which all GDB operations that might affect the
204 target's execution have been disabled. */
205
206 int observer_mode = 0;
207 static int observer_mode_1 = 0;
208
209 static void
210 set_observer_mode (char *args, int from_tty,
211 struct cmd_list_element *c)
212 {
213 if (target_has_execution)
214 {
215 observer_mode_1 = observer_mode;
216 error (_("Cannot change this setting while the inferior is running."));
217 }
218
219 observer_mode = observer_mode_1;
220
221 may_write_registers = !observer_mode;
222 may_write_memory = !observer_mode;
223 may_insert_breakpoints = !observer_mode;
224 may_insert_tracepoints = !observer_mode;
225 /* We can insert fast tracepoints in or out of observer mode,
226 but enable them if we're going into this mode. */
227 if (observer_mode)
228 may_insert_fast_tracepoints = 1;
229 may_stop = !observer_mode;
230 update_target_permissions ();
231
232 /* Going *into* observer mode we must force non-stop, then
233 going out we leave it that way. */
234 if (observer_mode)
235 {
236 pagination_enabled = 0;
237 non_stop = non_stop_1 = 1;
238 }
239
240 if (from_tty)
241 printf_filtered (_("Observer mode is now %s.\n"),
242 (observer_mode ? "on" : "off"));
243 }
244
245 static void
246 show_observer_mode (struct ui_file *file, int from_tty,
247 struct cmd_list_element *c, const char *value)
248 {
249 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
250 }
251
252 /* This updates the value of observer mode based on changes in
253 permissions. Note that we are deliberately ignoring the values of
254 may-write-registers and may-write-memory, since the user may have
255 reason to enable these during a session, for instance to turn on a
256 debugging-related global. */
257
258 void
259 update_observer_mode (void)
260 {
261 int newval;
262
263 newval = (!may_insert_breakpoints
264 && !may_insert_tracepoints
265 && may_insert_fast_tracepoints
266 && !may_stop
267 && non_stop);
268
269 /* Let the user know if things change. */
270 if (newval != observer_mode)
271 printf_filtered (_("Observer mode is now %s.\n"),
272 (newval ? "on" : "off"));
273
274 observer_mode = observer_mode_1 = newval;
275 }
276
277 /* Tables of how to react to signals; the user sets them. */
278
279 static unsigned char *signal_stop;
280 static unsigned char *signal_print;
281 static unsigned char *signal_program;
282
283 /* Table of signals that are registered with "catch signal". A
284 non-zero entry indicates that the signal is caught by some "catch
285 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
286 signals. */
287 static unsigned char *signal_catch;
288
289 /* Table of signals that the target may silently handle.
290 This is automatically determined from the flags above,
291 and simply cached here. */
292 static unsigned char *signal_pass;
293
294 #define SET_SIGS(nsigs,sigs,flags) \
295 do { \
296 int signum = (nsigs); \
297 while (signum-- > 0) \
298 if ((sigs)[signum]) \
299 (flags)[signum] = 1; \
300 } while (0)
301
302 #define UNSET_SIGS(nsigs,sigs,flags) \
303 do { \
304 int signum = (nsigs); \
305 while (signum-- > 0) \
306 if ((sigs)[signum]) \
307 (flags)[signum] = 0; \
308 } while (0)
309
310 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
311 this function is to avoid exporting `signal_program'. */
312
313 void
314 update_signals_program_target (void)
315 {
316 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
317 }
318
319 /* Value to pass to target_resume() to cause all threads to resume. */
320
321 #define RESUME_ALL minus_one_ptid
322
323 /* Command list pointer for the "stop" placeholder. */
324
325 static struct cmd_list_element *stop_command;
326
327 /* Function inferior was in as of last step command. */
328
329 static struct symbol *step_start_function;
330
331 /* Nonzero if we want to give control to the user when we're notified
332 of shared library events by the dynamic linker. */
333 int stop_on_solib_events;
334
335 /* Enable or disable optional shared library event breakpoints
336 as appropriate when the above flag is changed. */
337
338 static void
339 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
340 {
341 update_solib_breakpoints ();
342 }
343
344 static void
345 show_stop_on_solib_events (struct ui_file *file, int from_tty,
346 struct cmd_list_element *c, const char *value)
347 {
348 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
349 value);
350 }
351
352 /* Nonzero means expecting a trace trap
353 and should stop the inferior and return silently when it happens. */
354
355 int stop_after_trap;
356
357 /* Save register contents here when executing a "finish" command or are
358 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
359 Thus this contains the return value from the called function (assuming
360 values are returned in a register). */
361
362 struct regcache *stop_registers;
363
364 /* Nonzero after stop if current stack frame should be printed. */
365
366 static int stop_print_frame;
367
368 /* This is a cached copy of the pid/waitstatus of the last event
369 returned by target_wait()/deprecated_target_wait_hook(). This
370 information is returned by get_last_target_status(). */
371 static ptid_t target_last_wait_ptid;
372 static struct target_waitstatus target_last_waitstatus;
373
374 static void context_switch (ptid_t ptid);
375
376 void init_thread_stepping_state (struct thread_info *tss);
377
378 static void init_infwait_state (void);
379
380 static const char follow_fork_mode_child[] = "child";
381 static const char follow_fork_mode_parent[] = "parent";
382
383 static const char *const follow_fork_mode_kind_names[] = {
384 follow_fork_mode_child,
385 follow_fork_mode_parent,
386 NULL
387 };
388
389 static const char *follow_fork_mode_string = follow_fork_mode_parent;
390 static void
391 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
392 struct cmd_list_element *c, const char *value)
393 {
394 fprintf_filtered (file,
395 _("Debugger response to a program "
396 "call of fork or vfork is \"%s\".\n"),
397 value);
398 }
399 \f
400
401 /* Tell the target to follow the fork we're stopped at. Returns true
402 if the inferior should be resumed; false, if the target for some
403 reason decided it's best not to resume. */
404
405 static int
406 follow_fork (void)
407 {
408 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
409 int should_resume = 1;
410 struct thread_info *tp;
411
412 /* Copy user stepping state to the new inferior thread. FIXME: the
413 followed fork child thread should have a copy of most of the
414 parent thread structure's run control related fields, not just these.
415 Initialized to avoid "may be used uninitialized" warnings from gcc. */
416 struct breakpoint *step_resume_breakpoint = NULL;
417 struct breakpoint *exception_resume_breakpoint = NULL;
418 CORE_ADDR step_range_start = 0;
419 CORE_ADDR step_range_end = 0;
420 struct frame_id step_frame_id = { 0 };
421 struct interp *command_interp = NULL;
422
423 if (!non_stop)
424 {
425 ptid_t wait_ptid;
426 struct target_waitstatus wait_status;
427
428 /* Get the last target status returned by target_wait(). */
429 get_last_target_status (&wait_ptid, &wait_status);
430
431 /* If not stopped at a fork event, then there's nothing else to
432 do. */
433 if (wait_status.kind != TARGET_WAITKIND_FORKED
434 && wait_status.kind != TARGET_WAITKIND_VFORKED)
435 return 1;
436
437 /* Check if we switched over from WAIT_PTID, since the event was
438 reported. */
439 if (!ptid_equal (wait_ptid, minus_one_ptid)
440 && !ptid_equal (inferior_ptid, wait_ptid))
441 {
442 /* We did. Switch back to WAIT_PTID thread, to tell the
443 target to follow it (in either direction). We'll
444 afterwards refuse to resume, and inform the user what
445 happened. */
446 switch_to_thread (wait_ptid);
447 should_resume = 0;
448 }
449 }
450
451 tp = inferior_thread ();
452
453 /* If there were any forks/vforks that were caught and are now to be
454 followed, then do so now. */
455 switch (tp->pending_follow.kind)
456 {
457 case TARGET_WAITKIND_FORKED:
458 case TARGET_WAITKIND_VFORKED:
459 {
460 ptid_t parent, child;
461
462 /* If the user did a next/step, etc, over a fork call,
463 preserve the stepping state in the fork child. */
464 if (follow_child && should_resume)
465 {
466 step_resume_breakpoint = clone_momentary_breakpoint
467 (tp->control.step_resume_breakpoint);
468 step_range_start = tp->control.step_range_start;
469 step_range_end = tp->control.step_range_end;
470 step_frame_id = tp->control.step_frame_id;
471 exception_resume_breakpoint
472 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
473 command_interp = tp->control.command_interp;
474
475 /* For now, delete the parent's sr breakpoint, otherwise,
476 parent/child sr breakpoints are considered duplicates,
477 and the child version will not be installed. Remove
478 this when the breakpoints module becomes aware of
479 inferiors and address spaces. */
480 delete_step_resume_breakpoint (tp);
481 tp->control.step_range_start = 0;
482 tp->control.step_range_end = 0;
483 tp->control.step_frame_id = null_frame_id;
484 delete_exception_resume_breakpoint (tp);
485 tp->control.command_interp = NULL;
486 }
487
488 parent = inferior_ptid;
489 child = tp->pending_follow.value.related_pid;
490
491 /* Tell the target to do whatever is necessary to follow
492 either parent or child. */
493 if (target_follow_fork (follow_child, detach_fork))
494 {
495 /* Target refused to follow, or there's some other reason
496 we shouldn't resume. */
497 should_resume = 0;
498 }
499 else
500 {
501 /* This pending follow fork event is now handled, one way
502 or another. The previous selected thread may be gone
503 from the lists by now, but if it is still around, need
504 to clear the pending follow request. */
505 tp = find_thread_ptid (parent);
506 if (tp)
507 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
508
509 /* This makes sure we don't try to apply the "Switched
510 over from WAIT_PID" logic above. */
511 nullify_last_target_wait_ptid ();
512
513 /* If we followed the child, switch to it... */
514 if (follow_child)
515 {
516 switch_to_thread (child);
517
518 /* ... and preserve the stepping state, in case the
519 user was stepping over the fork call. */
520 if (should_resume)
521 {
522 tp = inferior_thread ();
523 tp->control.step_resume_breakpoint
524 = step_resume_breakpoint;
525 tp->control.step_range_start = step_range_start;
526 tp->control.step_range_end = step_range_end;
527 tp->control.step_frame_id = step_frame_id;
528 tp->control.exception_resume_breakpoint
529 = exception_resume_breakpoint;
530 tp->control.command_interp = command_interp;
531 }
532 else
533 {
534 /* If we get here, it was because we're trying to
535 resume from a fork catchpoint, but, the user
536 has switched threads away from the thread that
537 forked. In that case, the resume command
538 issued is most likely not applicable to the
539 child, so just warn, and refuse to resume. */
540 warning (_("Not resuming: switched threads "
541 "before following fork child.\n"));
542 }
543
544 /* Reset breakpoints in the child as appropriate. */
545 follow_inferior_reset_breakpoints ();
546 }
547 else
548 switch_to_thread (parent);
549 }
550 }
551 break;
552 case TARGET_WAITKIND_SPURIOUS:
553 /* Nothing to follow. */
554 break;
555 default:
556 internal_error (__FILE__, __LINE__,
557 "Unexpected pending_follow.kind %d\n",
558 tp->pending_follow.kind);
559 break;
560 }
561
562 return should_resume;
563 }
564
565 void
566 follow_inferior_reset_breakpoints (void)
567 {
568 struct thread_info *tp = inferior_thread ();
569
570 /* Was there a step_resume breakpoint? (There was if the user
571 did a "next" at the fork() call.) If so, explicitly reset its
572 thread number.
573
574 step_resumes are a form of bp that are made to be per-thread.
575 Since we created the step_resume bp when the parent process
576 was being debugged, and now are switching to the child process,
577 from the breakpoint package's viewpoint, that's a switch of
578 "threads". We must update the bp's notion of which thread
579 it is for, or it'll be ignored when it triggers. */
580
581 if (tp->control.step_resume_breakpoint)
582 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
583
584 if (tp->control.exception_resume_breakpoint)
585 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
586
587 /* Reinsert all breakpoints in the child. The user may have set
588 breakpoints after catching the fork, in which case those
589 were never set in the child, but only in the parent. This makes
590 sure the inserted breakpoints match the breakpoint list. */
591
592 breakpoint_re_set ();
593 insert_breakpoints ();
594 }
595
596 /* The child has exited or execed: resume threads of the parent the
597 user wanted to be executing. */
598
599 static int
600 proceed_after_vfork_done (struct thread_info *thread,
601 void *arg)
602 {
603 int pid = * (int *) arg;
604
605 if (ptid_get_pid (thread->ptid) == pid
606 && is_running (thread->ptid)
607 && !is_executing (thread->ptid)
608 && !thread->stop_requested
609 && thread->suspend.stop_signal == GDB_SIGNAL_0)
610 {
611 if (debug_infrun)
612 fprintf_unfiltered (gdb_stdlog,
613 "infrun: resuming vfork parent thread %s\n",
614 target_pid_to_str (thread->ptid));
615
616 switch_to_thread (thread->ptid);
617 clear_proceed_status ();
618 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
619 }
620
621 return 0;
622 }
623
624 /* Called whenever we notice an exec or exit event, to handle
625 detaching or resuming a vfork parent. */
626
627 static void
628 handle_vfork_child_exec_or_exit (int exec)
629 {
630 struct inferior *inf = current_inferior ();
631
632 if (inf->vfork_parent)
633 {
634 int resume_parent = -1;
635
636 /* This exec or exit marks the end of the shared memory region
637 between the parent and the child. If the user wanted to
638 detach from the parent, now is the time. */
639
640 if (inf->vfork_parent->pending_detach)
641 {
642 struct thread_info *tp;
643 struct cleanup *old_chain;
644 struct program_space *pspace;
645 struct address_space *aspace;
646
647 /* follow-fork child, detach-on-fork on. */
648
649 inf->vfork_parent->pending_detach = 0;
650
651 if (!exec)
652 {
653 /* If we're handling a child exit, then inferior_ptid
654 points at the inferior's pid, not to a thread. */
655 old_chain = save_inferior_ptid ();
656 save_current_program_space ();
657 save_current_inferior ();
658 }
659 else
660 old_chain = save_current_space_and_thread ();
661
662 /* We're letting loose of the parent. */
663 tp = any_live_thread_of_process (inf->vfork_parent->pid);
664 switch_to_thread (tp->ptid);
665
666 /* We're about to detach from the parent, which implicitly
667 removes breakpoints from its address space. There's a
668 catch here: we want to reuse the spaces for the child,
669 but, parent/child are still sharing the pspace at this
670 point, although the exec in reality makes the kernel give
671 the child a fresh set of new pages. The problem here is
672 that the breakpoints module being unaware of this, would
673 likely chose the child process to write to the parent
674 address space. Swapping the child temporarily away from
675 the spaces has the desired effect. Yes, this is "sort
676 of" a hack. */
677
678 pspace = inf->pspace;
679 aspace = inf->aspace;
680 inf->aspace = NULL;
681 inf->pspace = NULL;
682
683 if (debug_infrun || info_verbose)
684 {
685 target_terminal_ours ();
686
687 if (exec)
688 fprintf_filtered (gdb_stdlog,
689 "Detaching vfork parent process "
690 "%d after child exec.\n",
691 inf->vfork_parent->pid);
692 else
693 fprintf_filtered (gdb_stdlog,
694 "Detaching vfork parent process "
695 "%d after child exit.\n",
696 inf->vfork_parent->pid);
697 }
698
699 target_detach (NULL, 0);
700
701 /* Put it back. */
702 inf->pspace = pspace;
703 inf->aspace = aspace;
704
705 do_cleanups (old_chain);
706 }
707 else if (exec)
708 {
709 /* We're staying attached to the parent, so, really give the
710 child a new address space. */
711 inf->pspace = add_program_space (maybe_new_address_space ());
712 inf->aspace = inf->pspace->aspace;
713 inf->removable = 1;
714 set_current_program_space (inf->pspace);
715
716 resume_parent = inf->vfork_parent->pid;
717
718 /* Break the bonds. */
719 inf->vfork_parent->vfork_child = NULL;
720 }
721 else
722 {
723 struct cleanup *old_chain;
724 struct program_space *pspace;
725
726 /* If this is a vfork child exiting, then the pspace and
727 aspaces were shared with the parent. Since we're
728 reporting the process exit, we'll be mourning all that is
729 found in the address space, and switching to null_ptid,
730 preparing to start a new inferior. But, since we don't
731 want to clobber the parent's address/program spaces, we
732 go ahead and create a new one for this exiting
733 inferior. */
734
735 /* Switch to null_ptid, so that clone_program_space doesn't want
736 to read the selected frame of a dead process. */
737 old_chain = save_inferior_ptid ();
738 inferior_ptid = null_ptid;
739
740 /* This inferior is dead, so avoid giving the breakpoints
741 module the option to write through to it (cloning a
742 program space resets breakpoints). */
743 inf->aspace = NULL;
744 inf->pspace = NULL;
745 pspace = add_program_space (maybe_new_address_space ());
746 set_current_program_space (pspace);
747 inf->removable = 1;
748 inf->symfile_flags = SYMFILE_NO_READ;
749 clone_program_space (pspace, inf->vfork_parent->pspace);
750 inf->pspace = pspace;
751 inf->aspace = pspace->aspace;
752
753 /* Put back inferior_ptid. We'll continue mourning this
754 inferior. */
755 do_cleanups (old_chain);
756
757 resume_parent = inf->vfork_parent->pid;
758 /* Break the bonds. */
759 inf->vfork_parent->vfork_child = NULL;
760 }
761
762 inf->vfork_parent = NULL;
763
764 gdb_assert (current_program_space == inf->pspace);
765
766 if (non_stop && resume_parent != -1)
767 {
768 /* If the user wanted the parent to be running, let it go
769 free now. */
770 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
771
772 if (debug_infrun)
773 fprintf_unfiltered (gdb_stdlog,
774 "infrun: resuming vfork parent process %d\n",
775 resume_parent);
776
777 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
778
779 do_cleanups (old_chain);
780 }
781 }
782 }
783
784 /* Enum strings for "set|show follow-exec-mode". */
785
786 static const char follow_exec_mode_new[] = "new";
787 static const char follow_exec_mode_same[] = "same";
788 static const char *const follow_exec_mode_names[] =
789 {
790 follow_exec_mode_new,
791 follow_exec_mode_same,
792 NULL,
793 };
794
795 static const char *follow_exec_mode_string = follow_exec_mode_same;
796 static void
797 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
798 struct cmd_list_element *c, const char *value)
799 {
800 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
801 }
802
803 /* EXECD_PATHNAME is assumed to be non-NULL. */
804
805 static void
806 follow_exec (ptid_t pid, char *execd_pathname)
807 {
808 struct thread_info *th = inferior_thread ();
809 struct inferior *inf = current_inferior ();
810
811 /* This is an exec event that we actually wish to pay attention to.
812 Refresh our symbol table to the newly exec'd program, remove any
813 momentary bp's, etc.
814
815 If there are breakpoints, they aren't really inserted now,
816 since the exec() transformed our inferior into a fresh set
817 of instructions.
818
819 We want to preserve symbolic breakpoints on the list, since
820 we have hopes that they can be reset after the new a.out's
821 symbol table is read.
822
823 However, any "raw" breakpoints must be removed from the list
824 (e.g., the solib bp's), since their address is probably invalid
825 now.
826
827 And, we DON'T want to call delete_breakpoints() here, since
828 that may write the bp's "shadow contents" (the instruction
829 value that was overwritten witha TRAP instruction). Since
830 we now have a new a.out, those shadow contents aren't valid. */
831
832 mark_breakpoints_out ();
833
834 update_breakpoints_after_exec ();
835
836 /* If there was one, it's gone now. We cannot truly step-to-next
837 statement through an exec(). */
838 th->control.step_resume_breakpoint = NULL;
839 th->control.exception_resume_breakpoint = NULL;
840 th->control.step_range_start = 0;
841 th->control.step_range_end = 0;
842
843 /* The target reports the exec event to the main thread, even if
844 some other thread does the exec, and even if the main thread was
845 already stopped --- if debugging in non-stop mode, it's possible
846 the user had the main thread held stopped in the previous image
847 --- release it now. This is the same behavior as step-over-exec
848 with scheduler-locking on in all-stop mode. */
849 th->stop_requested = 0;
850
851 /* What is this a.out's name? */
852 printf_unfiltered (_("%s is executing new program: %s\n"),
853 target_pid_to_str (inferior_ptid),
854 execd_pathname);
855
856 /* We've followed the inferior through an exec. Therefore, the
857 inferior has essentially been killed & reborn. */
858
859 gdb_flush (gdb_stdout);
860
861 breakpoint_init_inferior (inf_execd);
862
863 if (gdb_sysroot && *gdb_sysroot)
864 {
865 char *name = alloca (strlen (gdb_sysroot)
866 + strlen (execd_pathname)
867 + 1);
868
869 strcpy (name, gdb_sysroot);
870 strcat (name, execd_pathname);
871 execd_pathname = name;
872 }
873
874 /* Reset the shared library package. This ensures that we get a
875 shlib event when the child reaches "_start", at which point the
876 dld will have had a chance to initialize the child. */
877 /* Also, loading a symbol file below may trigger symbol lookups, and
878 we don't want those to be satisfied by the libraries of the
879 previous incarnation of this process. */
880 no_shared_libraries (NULL, 0);
881
882 if (follow_exec_mode_string == follow_exec_mode_new)
883 {
884 struct program_space *pspace;
885
886 /* The user wants to keep the old inferior and program spaces
887 around. Create a new fresh one, and switch to it. */
888
889 inf = add_inferior (current_inferior ()->pid);
890 pspace = add_program_space (maybe_new_address_space ());
891 inf->pspace = pspace;
892 inf->aspace = pspace->aspace;
893
894 exit_inferior_num_silent (current_inferior ()->num);
895
896 set_current_inferior (inf);
897 set_current_program_space (pspace);
898 }
899 else
900 {
901 /* The old description may no longer be fit for the new image.
902 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
903 old description; we'll read a new one below. No need to do
904 this on "follow-exec-mode new", as the old inferior stays
905 around (its description is later cleared/refetched on
906 restart). */
907 target_clear_description ();
908 }
909
910 gdb_assert (current_program_space == inf->pspace);
911
912 /* That a.out is now the one to use. */
913 exec_file_attach (execd_pathname, 0);
914
915 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
916 (Position Independent Executable) main symbol file will get applied by
917 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
918 the breakpoints with the zero displacement. */
919
920 symbol_file_add (execd_pathname,
921 (inf->symfile_flags
922 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
923 NULL, 0);
924
925 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
926 set_initial_language ();
927
928 /* If the target can specify a description, read it. Must do this
929 after flipping to the new executable (because the target supplied
930 description must be compatible with the executable's
931 architecture, and the old executable may e.g., be 32-bit, while
932 the new one 64-bit), and before anything involving memory or
933 registers. */
934 target_find_description ();
935
936 solib_create_inferior_hook (0);
937
938 jit_inferior_created_hook ();
939
940 breakpoint_re_set ();
941
942 /* Reinsert all breakpoints. (Those which were symbolic have
943 been reset to the proper address in the new a.out, thanks
944 to symbol_file_command...). */
945 insert_breakpoints ();
946
947 /* The next resume of this inferior should bring it to the shlib
948 startup breakpoints. (If the user had also set bp's on
949 "main" from the old (parent) process, then they'll auto-
950 matically get reset there in the new process.). */
951 }
952
953 /* Non-zero if we just simulating a single-step. This is needed
954 because we cannot remove the breakpoints in the inferior process
955 until after the `wait' in `wait_for_inferior'. */
956 static int singlestep_breakpoints_inserted_p = 0;
957
958 /* The thread we inserted single-step breakpoints for. */
959 static ptid_t singlestep_ptid;
960
961 /* PC when we started this single-step. */
962 static CORE_ADDR singlestep_pc;
963
964 /* Info about an instruction that is being stepped over. Invalid if
965 ASPACE is NULL. */
966
967 struct step_over_info
968 {
969 /* The instruction's address space. */
970 struct address_space *aspace;
971
972 /* The instruction's address. */
973 CORE_ADDR address;
974 };
975
976 /* The step-over info of the location that is being stepped over.
977
978 Note that with async/breakpoint always-inserted mode, a user might
979 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
980 being stepped over. As setting a new breakpoint inserts all
981 breakpoints, we need to make sure the breakpoint being stepped over
982 isn't inserted then. We do that by only clearing the step-over
983 info when the step-over is actually finished (or aborted).
984
985 Presently GDB can only step over one breakpoint at any given time.
986 Given threads that can't run code in the same address space as the
987 breakpoint's can't really miss the breakpoint, GDB could be taught
988 to step-over at most one breakpoint per address space (so this info
989 could move to the address space object if/when GDB is extended).
990 The set of breakpoints being stepped over will normally be much
991 smaller than the set of all breakpoints, so a flag in the
992 breakpoint location structure would be wasteful. A separate list
993 also saves complexity and run-time, as otherwise we'd have to go
994 through all breakpoint locations clearing their flag whenever we
995 start a new sequence. Similar considerations weigh against storing
996 this info in the thread object. Plus, not all step overs actually
997 have breakpoint locations -- e.g., stepping past a single-step
998 breakpoint, or stepping to complete a non-continuable
999 watchpoint. */
1000 static struct step_over_info step_over_info;
1001
1002 /* Record the address of the breakpoint/instruction we're currently
1003 stepping over. */
1004
1005 static void
1006 set_step_over_info (struct address_space *aspace, CORE_ADDR address)
1007 {
1008 step_over_info.aspace = aspace;
1009 step_over_info.address = address;
1010 }
1011
1012 /* Called when we're not longer stepping over a breakpoint / an
1013 instruction, so all breakpoints are free to be (re)inserted. */
1014
1015 static void
1016 clear_step_over_info (void)
1017 {
1018 step_over_info.aspace = NULL;
1019 step_over_info.address = 0;
1020 }
1021
1022 /* See inferior.h. */
1023
1024 int
1025 stepping_past_instruction_at (struct address_space *aspace,
1026 CORE_ADDR address)
1027 {
1028 return (step_over_info.aspace != NULL
1029 && breakpoint_address_match (aspace, address,
1030 step_over_info.aspace,
1031 step_over_info.address));
1032 }
1033
1034 \f
1035 /* Displaced stepping. */
1036
1037 /* In non-stop debugging mode, we must take special care to manage
1038 breakpoints properly; in particular, the traditional strategy for
1039 stepping a thread past a breakpoint it has hit is unsuitable.
1040 'Displaced stepping' is a tactic for stepping one thread past a
1041 breakpoint it has hit while ensuring that other threads running
1042 concurrently will hit the breakpoint as they should.
1043
1044 The traditional way to step a thread T off a breakpoint in a
1045 multi-threaded program in all-stop mode is as follows:
1046
1047 a0) Initially, all threads are stopped, and breakpoints are not
1048 inserted.
1049 a1) We single-step T, leaving breakpoints uninserted.
1050 a2) We insert breakpoints, and resume all threads.
1051
1052 In non-stop debugging, however, this strategy is unsuitable: we
1053 don't want to have to stop all threads in the system in order to
1054 continue or step T past a breakpoint. Instead, we use displaced
1055 stepping:
1056
1057 n0) Initially, T is stopped, other threads are running, and
1058 breakpoints are inserted.
1059 n1) We copy the instruction "under" the breakpoint to a separate
1060 location, outside the main code stream, making any adjustments
1061 to the instruction, register, and memory state as directed by
1062 T's architecture.
1063 n2) We single-step T over the instruction at its new location.
1064 n3) We adjust the resulting register and memory state as directed
1065 by T's architecture. This includes resetting T's PC to point
1066 back into the main instruction stream.
1067 n4) We resume T.
1068
1069 This approach depends on the following gdbarch methods:
1070
1071 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1072 indicate where to copy the instruction, and how much space must
1073 be reserved there. We use these in step n1.
1074
1075 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1076 address, and makes any necessary adjustments to the instruction,
1077 register contents, and memory. We use this in step n1.
1078
1079 - gdbarch_displaced_step_fixup adjusts registers and memory after
1080 we have successfuly single-stepped the instruction, to yield the
1081 same effect the instruction would have had if we had executed it
1082 at its original address. We use this in step n3.
1083
1084 - gdbarch_displaced_step_free_closure provides cleanup.
1085
1086 The gdbarch_displaced_step_copy_insn and
1087 gdbarch_displaced_step_fixup functions must be written so that
1088 copying an instruction with gdbarch_displaced_step_copy_insn,
1089 single-stepping across the copied instruction, and then applying
1090 gdbarch_displaced_insn_fixup should have the same effects on the
1091 thread's memory and registers as stepping the instruction in place
1092 would have. Exactly which responsibilities fall to the copy and
1093 which fall to the fixup is up to the author of those functions.
1094
1095 See the comments in gdbarch.sh for details.
1096
1097 Note that displaced stepping and software single-step cannot
1098 currently be used in combination, although with some care I think
1099 they could be made to. Software single-step works by placing
1100 breakpoints on all possible subsequent instructions; if the
1101 displaced instruction is a PC-relative jump, those breakpoints
1102 could fall in very strange places --- on pages that aren't
1103 executable, or at addresses that are not proper instruction
1104 boundaries. (We do generally let other threads run while we wait
1105 to hit the software single-step breakpoint, and they might
1106 encounter such a corrupted instruction.) One way to work around
1107 this would be to have gdbarch_displaced_step_copy_insn fully
1108 simulate the effect of PC-relative instructions (and return NULL)
1109 on architectures that use software single-stepping.
1110
1111 In non-stop mode, we can have independent and simultaneous step
1112 requests, so more than one thread may need to simultaneously step
1113 over a breakpoint. The current implementation assumes there is
1114 only one scratch space per process. In this case, we have to
1115 serialize access to the scratch space. If thread A wants to step
1116 over a breakpoint, but we are currently waiting for some other
1117 thread to complete a displaced step, we leave thread A stopped and
1118 place it in the displaced_step_request_queue. Whenever a displaced
1119 step finishes, we pick the next thread in the queue and start a new
1120 displaced step operation on it. See displaced_step_prepare and
1121 displaced_step_fixup for details. */
1122
1123 struct displaced_step_request
1124 {
1125 ptid_t ptid;
1126 struct displaced_step_request *next;
1127 };
1128
1129 /* Per-inferior displaced stepping state. */
1130 struct displaced_step_inferior_state
1131 {
1132 /* Pointer to next in linked list. */
1133 struct displaced_step_inferior_state *next;
1134
1135 /* The process this displaced step state refers to. */
1136 int pid;
1137
1138 /* A queue of pending displaced stepping requests. One entry per
1139 thread that needs to do a displaced step. */
1140 struct displaced_step_request *step_request_queue;
1141
1142 /* If this is not null_ptid, this is the thread carrying out a
1143 displaced single-step in process PID. This thread's state will
1144 require fixing up once it has completed its step. */
1145 ptid_t step_ptid;
1146
1147 /* The architecture the thread had when we stepped it. */
1148 struct gdbarch *step_gdbarch;
1149
1150 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1151 for post-step cleanup. */
1152 struct displaced_step_closure *step_closure;
1153
1154 /* The address of the original instruction, and the copy we
1155 made. */
1156 CORE_ADDR step_original, step_copy;
1157
1158 /* Saved contents of copy area. */
1159 gdb_byte *step_saved_copy;
1160 };
1161
1162 /* The list of states of processes involved in displaced stepping
1163 presently. */
1164 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1165
1166 /* Get the displaced stepping state of process PID. */
1167
1168 static struct displaced_step_inferior_state *
1169 get_displaced_stepping_state (int pid)
1170 {
1171 struct displaced_step_inferior_state *state;
1172
1173 for (state = displaced_step_inferior_states;
1174 state != NULL;
1175 state = state->next)
1176 if (state->pid == pid)
1177 return state;
1178
1179 return NULL;
1180 }
1181
1182 /* Add a new displaced stepping state for process PID to the displaced
1183 stepping state list, or return a pointer to an already existing
1184 entry, if it already exists. Never returns NULL. */
1185
1186 static struct displaced_step_inferior_state *
1187 add_displaced_stepping_state (int pid)
1188 {
1189 struct displaced_step_inferior_state *state;
1190
1191 for (state = displaced_step_inferior_states;
1192 state != NULL;
1193 state = state->next)
1194 if (state->pid == pid)
1195 return state;
1196
1197 state = xcalloc (1, sizeof (*state));
1198 state->pid = pid;
1199 state->next = displaced_step_inferior_states;
1200 displaced_step_inferior_states = state;
1201
1202 return state;
1203 }
1204
1205 /* If inferior is in displaced stepping, and ADDR equals to starting address
1206 of copy area, return corresponding displaced_step_closure. Otherwise,
1207 return NULL. */
1208
1209 struct displaced_step_closure*
1210 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1211 {
1212 struct displaced_step_inferior_state *displaced
1213 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1214
1215 /* If checking the mode of displaced instruction in copy area. */
1216 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1217 && (displaced->step_copy == addr))
1218 return displaced->step_closure;
1219
1220 return NULL;
1221 }
1222
1223 /* Remove the displaced stepping state of process PID. */
1224
1225 static void
1226 remove_displaced_stepping_state (int pid)
1227 {
1228 struct displaced_step_inferior_state *it, **prev_next_p;
1229
1230 gdb_assert (pid != 0);
1231
1232 it = displaced_step_inferior_states;
1233 prev_next_p = &displaced_step_inferior_states;
1234 while (it)
1235 {
1236 if (it->pid == pid)
1237 {
1238 *prev_next_p = it->next;
1239 xfree (it);
1240 return;
1241 }
1242
1243 prev_next_p = &it->next;
1244 it = *prev_next_p;
1245 }
1246 }
1247
1248 static void
1249 infrun_inferior_exit (struct inferior *inf)
1250 {
1251 remove_displaced_stepping_state (inf->pid);
1252 }
1253
1254 /* If ON, and the architecture supports it, GDB will use displaced
1255 stepping to step over breakpoints. If OFF, or if the architecture
1256 doesn't support it, GDB will instead use the traditional
1257 hold-and-step approach. If AUTO (which is the default), GDB will
1258 decide which technique to use to step over breakpoints depending on
1259 which of all-stop or non-stop mode is active --- displaced stepping
1260 in non-stop mode; hold-and-step in all-stop mode. */
1261
1262 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1263
1264 static void
1265 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1266 struct cmd_list_element *c,
1267 const char *value)
1268 {
1269 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1270 fprintf_filtered (file,
1271 _("Debugger's willingness to use displaced stepping "
1272 "to step over breakpoints is %s (currently %s).\n"),
1273 value, non_stop ? "on" : "off");
1274 else
1275 fprintf_filtered (file,
1276 _("Debugger's willingness to use displaced stepping "
1277 "to step over breakpoints is %s.\n"), value);
1278 }
1279
1280 /* Return non-zero if displaced stepping can/should be used to step
1281 over breakpoints. */
1282
1283 static int
1284 use_displaced_stepping (struct gdbarch *gdbarch)
1285 {
1286 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1287 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1288 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1289 && find_record_target () == NULL);
1290 }
1291
1292 /* Clean out any stray displaced stepping state. */
1293 static void
1294 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1295 {
1296 /* Indicate that there is no cleanup pending. */
1297 displaced->step_ptid = null_ptid;
1298
1299 if (displaced->step_closure)
1300 {
1301 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1302 displaced->step_closure);
1303 displaced->step_closure = NULL;
1304 }
1305 }
1306
1307 static void
1308 displaced_step_clear_cleanup (void *arg)
1309 {
1310 struct displaced_step_inferior_state *state = arg;
1311
1312 displaced_step_clear (state);
1313 }
1314
1315 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1316 void
1317 displaced_step_dump_bytes (struct ui_file *file,
1318 const gdb_byte *buf,
1319 size_t len)
1320 {
1321 int i;
1322
1323 for (i = 0; i < len; i++)
1324 fprintf_unfiltered (file, "%02x ", buf[i]);
1325 fputs_unfiltered ("\n", file);
1326 }
1327
1328 /* Prepare to single-step, using displaced stepping.
1329
1330 Note that we cannot use displaced stepping when we have a signal to
1331 deliver. If we have a signal to deliver and an instruction to step
1332 over, then after the step, there will be no indication from the
1333 target whether the thread entered a signal handler or ignored the
1334 signal and stepped over the instruction successfully --- both cases
1335 result in a simple SIGTRAP. In the first case we mustn't do a
1336 fixup, and in the second case we must --- but we can't tell which.
1337 Comments in the code for 'random signals' in handle_inferior_event
1338 explain how we handle this case instead.
1339
1340 Returns 1 if preparing was successful -- this thread is going to be
1341 stepped now; or 0 if displaced stepping this thread got queued. */
1342 static int
1343 displaced_step_prepare (ptid_t ptid)
1344 {
1345 struct cleanup *old_cleanups, *ignore_cleanups;
1346 struct thread_info *tp = find_thread_ptid (ptid);
1347 struct regcache *regcache = get_thread_regcache (ptid);
1348 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1349 CORE_ADDR original, copy;
1350 ULONGEST len;
1351 struct displaced_step_closure *closure;
1352 struct displaced_step_inferior_state *displaced;
1353 int status;
1354
1355 /* We should never reach this function if the architecture does not
1356 support displaced stepping. */
1357 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1358
1359 /* Disable range stepping while executing in the scratch pad. We
1360 want a single-step even if executing the displaced instruction in
1361 the scratch buffer lands within the stepping range (e.g., a
1362 jump/branch). */
1363 tp->control.may_range_step = 0;
1364
1365 /* We have to displaced step one thread at a time, as we only have
1366 access to a single scratch space per inferior. */
1367
1368 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1369
1370 if (!ptid_equal (displaced->step_ptid, null_ptid))
1371 {
1372 /* Already waiting for a displaced step to finish. Defer this
1373 request and place in queue. */
1374 struct displaced_step_request *req, *new_req;
1375
1376 if (debug_displaced)
1377 fprintf_unfiltered (gdb_stdlog,
1378 "displaced: defering step of %s\n",
1379 target_pid_to_str (ptid));
1380
1381 new_req = xmalloc (sizeof (*new_req));
1382 new_req->ptid = ptid;
1383 new_req->next = NULL;
1384
1385 if (displaced->step_request_queue)
1386 {
1387 for (req = displaced->step_request_queue;
1388 req && req->next;
1389 req = req->next)
1390 ;
1391 req->next = new_req;
1392 }
1393 else
1394 displaced->step_request_queue = new_req;
1395
1396 return 0;
1397 }
1398 else
1399 {
1400 if (debug_displaced)
1401 fprintf_unfiltered (gdb_stdlog,
1402 "displaced: stepping %s now\n",
1403 target_pid_to_str (ptid));
1404 }
1405
1406 displaced_step_clear (displaced);
1407
1408 old_cleanups = save_inferior_ptid ();
1409 inferior_ptid = ptid;
1410
1411 original = regcache_read_pc (regcache);
1412
1413 copy = gdbarch_displaced_step_location (gdbarch);
1414 len = gdbarch_max_insn_length (gdbarch);
1415
1416 /* Save the original contents of the copy area. */
1417 displaced->step_saved_copy = xmalloc (len);
1418 ignore_cleanups = make_cleanup (free_current_contents,
1419 &displaced->step_saved_copy);
1420 status = target_read_memory (copy, displaced->step_saved_copy, len);
1421 if (status != 0)
1422 throw_error (MEMORY_ERROR,
1423 _("Error accessing memory address %s (%s) for "
1424 "displaced-stepping scratch space."),
1425 paddress (gdbarch, copy), safe_strerror (status));
1426 if (debug_displaced)
1427 {
1428 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1429 paddress (gdbarch, copy));
1430 displaced_step_dump_bytes (gdb_stdlog,
1431 displaced->step_saved_copy,
1432 len);
1433 };
1434
1435 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1436 original, copy, regcache);
1437
1438 /* We don't support the fully-simulated case at present. */
1439 gdb_assert (closure);
1440
1441 /* Save the information we need to fix things up if the step
1442 succeeds. */
1443 displaced->step_ptid = ptid;
1444 displaced->step_gdbarch = gdbarch;
1445 displaced->step_closure = closure;
1446 displaced->step_original = original;
1447 displaced->step_copy = copy;
1448
1449 make_cleanup (displaced_step_clear_cleanup, displaced);
1450
1451 /* Resume execution at the copy. */
1452 regcache_write_pc (regcache, copy);
1453
1454 discard_cleanups (ignore_cleanups);
1455
1456 do_cleanups (old_cleanups);
1457
1458 if (debug_displaced)
1459 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1460 paddress (gdbarch, copy));
1461
1462 return 1;
1463 }
1464
1465 static void
1466 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1467 const gdb_byte *myaddr, int len)
1468 {
1469 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1470
1471 inferior_ptid = ptid;
1472 write_memory (memaddr, myaddr, len);
1473 do_cleanups (ptid_cleanup);
1474 }
1475
1476 /* Restore the contents of the copy area for thread PTID. */
1477
1478 static void
1479 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1480 ptid_t ptid)
1481 {
1482 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1483
1484 write_memory_ptid (ptid, displaced->step_copy,
1485 displaced->step_saved_copy, len);
1486 if (debug_displaced)
1487 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1488 target_pid_to_str (ptid),
1489 paddress (displaced->step_gdbarch,
1490 displaced->step_copy));
1491 }
1492
1493 static void
1494 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1495 {
1496 struct cleanup *old_cleanups;
1497 struct displaced_step_inferior_state *displaced
1498 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1499
1500 /* Was any thread of this process doing a displaced step? */
1501 if (displaced == NULL)
1502 return;
1503
1504 /* Was this event for the pid we displaced? */
1505 if (ptid_equal (displaced->step_ptid, null_ptid)
1506 || ! ptid_equal (displaced->step_ptid, event_ptid))
1507 return;
1508
1509 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1510
1511 displaced_step_restore (displaced, displaced->step_ptid);
1512
1513 /* Did the instruction complete successfully? */
1514 if (signal == GDB_SIGNAL_TRAP)
1515 {
1516 /* Fix up the resulting state. */
1517 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1518 displaced->step_closure,
1519 displaced->step_original,
1520 displaced->step_copy,
1521 get_thread_regcache (displaced->step_ptid));
1522 }
1523 else
1524 {
1525 /* Since the instruction didn't complete, all we can do is
1526 relocate the PC. */
1527 struct regcache *regcache = get_thread_regcache (event_ptid);
1528 CORE_ADDR pc = regcache_read_pc (regcache);
1529
1530 pc = displaced->step_original + (pc - displaced->step_copy);
1531 regcache_write_pc (regcache, pc);
1532 }
1533
1534 do_cleanups (old_cleanups);
1535
1536 displaced->step_ptid = null_ptid;
1537
1538 /* Are there any pending displaced stepping requests? If so, run
1539 one now. Leave the state object around, since we're likely to
1540 need it again soon. */
1541 while (displaced->step_request_queue)
1542 {
1543 struct displaced_step_request *head;
1544 ptid_t ptid;
1545 struct regcache *regcache;
1546 struct gdbarch *gdbarch;
1547 CORE_ADDR actual_pc;
1548 struct address_space *aspace;
1549
1550 head = displaced->step_request_queue;
1551 ptid = head->ptid;
1552 displaced->step_request_queue = head->next;
1553 xfree (head);
1554
1555 context_switch (ptid);
1556
1557 regcache = get_thread_regcache (ptid);
1558 actual_pc = regcache_read_pc (regcache);
1559 aspace = get_regcache_aspace (regcache);
1560
1561 if (breakpoint_here_p (aspace, actual_pc))
1562 {
1563 if (debug_displaced)
1564 fprintf_unfiltered (gdb_stdlog,
1565 "displaced: stepping queued %s now\n",
1566 target_pid_to_str (ptid));
1567
1568 displaced_step_prepare (ptid);
1569
1570 gdbarch = get_regcache_arch (regcache);
1571
1572 if (debug_displaced)
1573 {
1574 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1575 gdb_byte buf[4];
1576
1577 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1578 paddress (gdbarch, actual_pc));
1579 read_memory (actual_pc, buf, sizeof (buf));
1580 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1581 }
1582
1583 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1584 displaced->step_closure))
1585 target_resume (ptid, 1, GDB_SIGNAL_0);
1586 else
1587 target_resume (ptid, 0, GDB_SIGNAL_0);
1588
1589 /* Done, we're stepping a thread. */
1590 break;
1591 }
1592 else
1593 {
1594 int step;
1595 struct thread_info *tp = inferior_thread ();
1596
1597 /* The breakpoint we were sitting under has since been
1598 removed. */
1599 tp->control.trap_expected = 0;
1600
1601 /* Go back to what we were trying to do. */
1602 step = currently_stepping (tp);
1603
1604 if (debug_displaced)
1605 fprintf_unfiltered (gdb_stdlog,
1606 "displaced: breakpoint is gone: %s, step(%d)\n",
1607 target_pid_to_str (tp->ptid), step);
1608
1609 target_resume (ptid, step, GDB_SIGNAL_0);
1610 tp->suspend.stop_signal = GDB_SIGNAL_0;
1611
1612 /* This request was discarded. See if there's any other
1613 thread waiting for its turn. */
1614 }
1615 }
1616 }
1617
1618 /* Update global variables holding ptids to hold NEW_PTID if they were
1619 holding OLD_PTID. */
1620 static void
1621 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1622 {
1623 struct displaced_step_request *it;
1624 struct displaced_step_inferior_state *displaced;
1625
1626 if (ptid_equal (inferior_ptid, old_ptid))
1627 inferior_ptid = new_ptid;
1628
1629 if (ptid_equal (singlestep_ptid, old_ptid))
1630 singlestep_ptid = new_ptid;
1631
1632 for (displaced = displaced_step_inferior_states;
1633 displaced;
1634 displaced = displaced->next)
1635 {
1636 if (ptid_equal (displaced->step_ptid, old_ptid))
1637 displaced->step_ptid = new_ptid;
1638
1639 for (it = displaced->step_request_queue; it; it = it->next)
1640 if (ptid_equal (it->ptid, old_ptid))
1641 it->ptid = new_ptid;
1642 }
1643 }
1644
1645 \f
1646 /* Resuming. */
1647
1648 /* Things to clean up if we QUIT out of resume (). */
1649 static void
1650 resume_cleanups (void *ignore)
1651 {
1652 normal_stop ();
1653 }
1654
1655 static const char schedlock_off[] = "off";
1656 static const char schedlock_on[] = "on";
1657 static const char schedlock_step[] = "step";
1658 static const char *const scheduler_enums[] = {
1659 schedlock_off,
1660 schedlock_on,
1661 schedlock_step,
1662 NULL
1663 };
1664 static const char *scheduler_mode = schedlock_off;
1665 static void
1666 show_scheduler_mode (struct ui_file *file, int from_tty,
1667 struct cmd_list_element *c, const char *value)
1668 {
1669 fprintf_filtered (file,
1670 _("Mode for locking scheduler "
1671 "during execution is \"%s\".\n"),
1672 value);
1673 }
1674
1675 static void
1676 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1677 {
1678 if (!target_can_lock_scheduler)
1679 {
1680 scheduler_mode = schedlock_off;
1681 error (_("Target '%s' cannot support this command."), target_shortname);
1682 }
1683 }
1684
1685 /* True if execution commands resume all threads of all processes by
1686 default; otherwise, resume only threads of the current inferior
1687 process. */
1688 int sched_multi = 0;
1689
1690 /* Try to setup for software single stepping over the specified location.
1691 Return 1 if target_resume() should use hardware single step.
1692
1693 GDBARCH the current gdbarch.
1694 PC the location to step over. */
1695
1696 static int
1697 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1698 {
1699 int hw_step = 1;
1700
1701 if (execution_direction == EXEC_FORWARD
1702 && gdbarch_software_single_step_p (gdbarch)
1703 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1704 {
1705 hw_step = 0;
1706 /* Do not pull these breakpoints until after a `wait' in
1707 `wait_for_inferior'. */
1708 singlestep_breakpoints_inserted_p = 1;
1709 singlestep_ptid = inferior_ptid;
1710 singlestep_pc = pc;
1711 }
1712 return hw_step;
1713 }
1714
1715 /* Return a ptid representing the set of threads that we will proceed,
1716 in the perspective of the user/frontend. We may actually resume
1717 fewer threads at first, e.g., if a thread is stopped at a
1718 breakpoint that needs stepping-off, but that should not be visible
1719 to the user/frontend, and neither should the frontend/user be
1720 allowed to proceed any of the threads that happen to be stopped for
1721 internal run control handling, if a previous command wanted them
1722 resumed. */
1723
1724 ptid_t
1725 user_visible_resume_ptid (int step)
1726 {
1727 /* By default, resume all threads of all processes. */
1728 ptid_t resume_ptid = RESUME_ALL;
1729
1730 /* Maybe resume only all threads of the current process. */
1731 if (!sched_multi && target_supports_multi_process ())
1732 {
1733 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1734 }
1735
1736 /* Maybe resume a single thread after all. */
1737 if (non_stop)
1738 {
1739 /* With non-stop mode on, threads are always handled
1740 individually. */
1741 resume_ptid = inferior_ptid;
1742 }
1743 else if ((scheduler_mode == schedlock_on)
1744 || (scheduler_mode == schedlock_step
1745 && (step || singlestep_breakpoints_inserted_p)))
1746 {
1747 /* User-settable 'scheduler' mode requires solo thread resume. */
1748 resume_ptid = inferior_ptid;
1749 }
1750
1751 return resume_ptid;
1752 }
1753
1754 /* Resume the inferior, but allow a QUIT. This is useful if the user
1755 wants to interrupt some lengthy single-stepping operation
1756 (for child processes, the SIGINT goes to the inferior, and so
1757 we get a SIGINT random_signal, but for remote debugging and perhaps
1758 other targets, that's not true).
1759
1760 STEP nonzero if we should step (zero to continue instead).
1761 SIG is the signal to give the inferior (zero for none). */
1762 void
1763 resume (int step, enum gdb_signal sig)
1764 {
1765 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1766 struct regcache *regcache = get_current_regcache ();
1767 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1768 struct thread_info *tp = inferior_thread ();
1769 CORE_ADDR pc = regcache_read_pc (regcache);
1770 struct address_space *aspace = get_regcache_aspace (regcache);
1771 ptid_t resume_ptid;
1772 /* From here on, this represents the caller's step vs continue
1773 request, while STEP represents what we'll actually request the
1774 target to do. STEP can decay from a step to a continue, if e.g.,
1775 we need to implement single-stepping with breakpoints (software
1776 single-step). When deciding whether "set scheduler-locking step"
1777 applies, it's the callers intention that counts. */
1778 const int entry_step = step;
1779
1780 QUIT;
1781
1782 if (current_inferior ()->waiting_for_vfork_done)
1783 {
1784 /* Don't try to single-step a vfork parent that is waiting for
1785 the child to get out of the shared memory region (by exec'ing
1786 or exiting). This is particularly important on software
1787 single-step archs, as the child process would trip on the
1788 software single step breakpoint inserted for the parent
1789 process. Since the parent will not actually execute any
1790 instruction until the child is out of the shared region (such
1791 are vfork's semantics), it is safe to simply continue it.
1792 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1793 the parent, and tell it to `keep_going', which automatically
1794 re-sets it stepping. */
1795 if (debug_infrun)
1796 fprintf_unfiltered (gdb_stdlog,
1797 "infrun: resume : clear step\n");
1798 step = 0;
1799 }
1800
1801 if (debug_infrun)
1802 fprintf_unfiltered (gdb_stdlog,
1803 "infrun: resume (step=%d, signal=%s), "
1804 "trap_expected=%d, current thread [%s] at %s\n",
1805 step, gdb_signal_to_symbol_string (sig),
1806 tp->control.trap_expected,
1807 target_pid_to_str (inferior_ptid),
1808 paddress (gdbarch, pc));
1809
1810 /* Normally, by the time we reach `resume', the breakpoints are either
1811 removed or inserted, as appropriate. The exception is if we're sitting
1812 at a permanent breakpoint; we need to step over it, but permanent
1813 breakpoints can't be removed. So we have to test for it here. */
1814 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1815 {
1816 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1817 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1818 else
1819 error (_("\
1820 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1821 how to step past a permanent breakpoint on this architecture. Try using\n\
1822 a command like `return' or `jump' to continue execution."));
1823 }
1824
1825 /* If we have a breakpoint to step over, make sure to do a single
1826 step only. Same if we have software watchpoints. */
1827 if (tp->control.trap_expected || bpstat_should_step ())
1828 tp->control.may_range_step = 0;
1829
1830 /* If enabled, step over breakpoints by executing a copy of the
1831 instruction at a different address.
1832
1833 We can't use displaced stepping when we have a signal to deliver;
1834 the comments for displaced_step_prepare explain why. The
1835 comments in the handle_inferior event for dealing with 'random
1836 signals' explain what we do instead.
1837
1838 We can't use displaced stepping when we are waiting for vfork_done
1839 event, displaced stepping breaks the vfork child similarly as single
1840 step software breakpoint. */
1841 if (use_displaced_stepping (gdbarch)
1842 && (tp->control.trap_expected
1843 || (step && gdbarch_software_single_step_p (gdbarch)))
1844 && sig == GDB_SIGNAL_0
1845 && !current_inferior ()->waiting_for_vfork_done)
1846 {
1847 struct displaced_step_inferior_state *displaced;
1848
1849 if (!displaced_step_prepare (inferior_ptid))
1850 {
1851 /* Got placed in displaced stepping queue. Will be resumed
1852 later when all the currently queued displaced stepping
1853 requests finish. The thread is not executing at this
1854 point, and the call to set_executing will be made later.
1855 But we need to call set_running here, since from the
1856 user/frontend's point of view, threads were set running.
1857 Unless we're calling an inferior function, as in that
1858 case we pretend the inferior doesn't run at all. */
1859 if (!tp->control.in_infcall)
1860 set_running (user_visible_resume_ptid (entry_step), 1);
1861 discard_cleanups (old_cleanups);
1862 return;
1863 }
1864
1865 /* Update pc to reflect the new address from which we will execute
1866 instructions due to displaced stepping. */
1867 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
1868
1869 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1870 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1871 displaced->step_closure);
1872 }
1873
1874 /* Do we need to do it the hard way, w/temp breakpoints? */
1875 else if (step)
1876 step = maybe_software_singlestep (gdbarch, pc);
1877
1878 /* Currently, our software single-step implementation leads to different
1879 results than hardware single-stepping in one situation: when stepping
1880 into delivering a signal which has an associated signal handler,
1881 hardware single-step will stop at the first instruction of the handler,
1882 while software single-step will simply skip execution of the handler.
1883
1884 For now, this difference in behavior is accepted since there is no
1885 easy way to actually implement single-stepping into a signal handler
1886 without kernel support.
1887
1888 However, there is one scenario where this difference leads to follow-on
1889 problems: if we're stepping off a breakpoint by removing all breakpoints
1890 and then single-stepping. In this case, the software single-step
1891 behavior means that even if there is a *breakpoint* in the signal
1892 handler, GDB still would not stop.
1893
1894 Fortunately, we can at least fix this particular issue. We detect
1895 here the case where we are about to deliver a signal while software
1896 single-stepping with breakpoints removed. In this situation, we
1897 revert the decisions to remove all breakpoints and insert single-
1898 step breakpoints, and instead we install a step-resume breakpoint
1899 at the current address, deliver the signal without stepping, and
1900 once we arrive back at the step-resume breakpoint, actually step
1901 over the breakpoint we originally wanted to step over. */
1902 if (singlestep_breakpoints_inserted_p
1903 && tp->control.trap_expected && sig != GDB_SIGNAL_0)
1904 {
1905 /* If we have nested signals or a pending signal is delivered
1906 immediately after a handler returns, might might already have
1907 a step-resume breakpoint set on the earlier handler. We cannot
1908 set another step-resume breakpoint; just continue on until the
1909 original breakpoint is hit. */
1910 if (tp->control.step_resume_breakpoint == NULL)
1911 {
1912 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1913 tp->step_after_step_resume_breakpoint = 1;
1914 }
1915
1916 remove_single_step_breakpoints ();
1917 singlestep_breakpoints_inserted_p = 0;
1918
1919 clear_step_over_info ();
1920 tp->control.trap_expected = 0;
1921
1922 insert_breakpoints ();
1923 }
1924
1925 /* If STEP is set, it's a request to use hardware stepping
1926 facilities. But in that case, we should never
1927 use singlestep breakpoint. */
1928 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1929
1930 /* Decide the set of threads to ask the target to resume. Start
1931 by assuming everything will be resumed, than narrow the set
1932 by applying increasingly restricting conditions. */
1933 resume_ptid = user_visible_resume_ptid (entry_step);
1934
1935 /* Even if RESUME_PTID is a wildcard, and we end up resuming less
1936 (e.g., we might need to step over a breakpoint), from the
1937 user/frontend's point of view, all threads in RESUME_PTID are now
1938 running. Unless we're calling an inferior function, as in that
1939 case pretend we inferior doesn't run at all. */
1940 if (!tp->control.in_infcall)
1941 set_running (resume_ptid, 1);
1942
1943 /* Maybe resume a single thread after all. */
1944 if ((step || singlestep_breakpoints_inserted_p)
1945 && tp->control.trap_expected)
1946 {
1947 /* We're allowing a thread to run past a breakpoint it has
1948 hit, by single-stepping the thread with the breakpoint
1949 removed. In which case, we need to single-step only this
1950 thread, and keep others stopped, as they can miss this
1951 breakpoint if allowed to run. */
1952 resume_ptid = inferior_ptid;
1953 }
1954
1955 if (gdbarch_cannot_step_breakpoint (gdbarch))
1956 {
1957 /* Most targets can step a breakpoint instruction, thus
1958 executing it normally. But if this one cannot, just
1959 continue and we will hit it anyway. */
1960 if (step && breakpoint_inserted_here_p (aspace, pc))
1961 step = 0;
1962 }
1963
1964 if (debug_displaced
1965 && use_displaced_stepping (gdbarch)
1966 && tp->control.trap_expected)
1967 {
1968 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1969 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1970 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1971 gdb_byte buf[4];
1972
1973 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1974 paddress (resume_gdbarch, actual_pc));
1975 read_memory (actual_pc, buf, sizeof (buf));
1976 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1977 }
1978
1979 if (tp->control.may_range_step)
1980 {
1981 /* If we're resuming a thread with the PC out of the step
1982 range, then we're doing some nested/finer run control
1983 operation, like stepping the thread out of the dynamic
1984 linker or the displaced stepping scratch pad. We
1985 shouldn't have allowed a range step then. */
1986 gdb_assert (pc_in_thread_step_range (pc, tp));
1987 }
1988
1989 /* Install inferior's terminal modes. */
1990 target_terminal_inferior ();
1991
1992 /* Avoid confusing the next resume, if the next stop/resume
1993 happens to apply to another thread. */
1994 tp->suspend.stop_signal = GDB_SIGNAL_0;
1995
1996 /* Advise target which signals may be handled silently. If we have
1997 removed breakpoints because we are stepping over one (which can
1998 happen only if we are not using displaced stepping), we need to
1999 receive all signals to avoid accidentally skipping a breakpoint
2000 during execution of a signal handler. */
2001 if ((step || singlestep_breakpoints_inserted_p)
2002 && tp->control.trap_expected
2003 && !use_displaced_stepping (gdbarch))
2004 target_pass_signals (0, NULL);
2005 else
2006 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2007
2008 target_resume (resume_ptid, step, sig);
2009
2010 discard_cleanups (old_cleanups);
2011 }
2012 \f
2013 /* Proceeding. */
2014
2015 /* Clear out all variables saying what to do when inferior is continued.
2016 First do this, then set the ones you want, then call `proceed'. */
2017
2018 static void
2019 clear_proceed_status_thread (struct thread_info *tp)
2020 {
2021 if (debug_infrun)
2022 fprintf_unfiltered (gdb_stdlog,
2023 "infrun: clear_proceed_status_thread (%s)\n",
2024 target_pid_to_str (tp->ptid));
2025
2026 tp->control.trap_expected = 0;
2027 tp->control.step_range_start = 0;
2028 tp->control.step_range_end = 0;
2029 tp->control.may_range_step = 0;
2030 tp->control.step_frame_id = null_frame_id;
2031 tp->control.step_stack_frame_id = null_frame_id;
2032 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2033 tp->stop_requested = 0;
2034
2035 tp->control.stop_step = 0;
2036
2037 tp->control.proceed_to_finish = 0;
2038
2039 tp->control.command_interp = NULL;
2040
2041 /* Discard any remaining commands or status from previous stop. */
2042 bpstat_clear (&tp->control.stop_bpstat);
2043 }
2044
2045 static int
2046 clear_proceed_status_callback (struct thread_info *tp, void *data)
2047 {
2048 if (is_exited (tp->ptid))
2049 return 0;
2050
2051 clear_proceed_status_thread (tp);
2052 return 0;
2053 }
2054
2055 void
2056 clear_proceed_status (void)
2057 {
2058 if (!non_stop)
2059 {
2060 /* In all-stop mode, delete the per-thread status of all
2061 threads, even if inferior_ptid is null_ptid, there may be
2062 threads on the list. E.g., we may be launching a new
2063 process, while selecting the executable. */
2064 iterate_over_threads (clear_proceed_status_callback, NULL);
2065 }
2066
2067 if (!ptid_equal (inferior_ptid, null_ptid))
2068 {
2069 struct inferior *inferior;
2070
2071 if (non_stop)
2072 {
2073 /* If in non-stop mode, only delete the per-thread status of
2074 the current thread. */
2075 clear_proceed_status_thread (inferior_thread ());
2076 }
2077
2078 inferior = current_inferior ();
2079 inferior->control.stop_soon = NO_STOP_QUIETLY;
2080 }
2081
2082 stop_after_trap = 0;
2083
2084 clear_step_over_info ();
2085
2086 observer_notify_about_to_proceed ();
2087
2088 if (stop_registers)
2089 {
2090 regcache_xfree (stop_registers);
2091 stop_registers = NULL;
2092 }
2093 }
2094
2095 /* Returns true if TP is still stopped at a breakpoint that needs
2096 stepping-over in order to make progress. If the breakpoint is gone
2097 meanwhile, we can skip the whole step-over dance. */
2098
2099 static int
2100 thread_still_needs_step_over (struct thread_info *tp)
2101 {
2102 if (tp->stepping_over_breakpoint)
2103 {
2104 struct regcache *regcache = get_thread_regcache (tp->ptid);
2105
2106 if (breakpoint_here_p (get_regcache_aspace (regcache),
2107 regcache_read_pc (regcache)))
2108 return 1;
2109
2110 tp->stepping_over_breakpoint = 0;
2111 }
2112
2113 return 0;
2114 }
2115
2116 /* Returns true if scheduler locking applies. STEP indicates whether
2117 we're about to do a step/next-like command to a thread. */
2118
2119 static int
2120 schedlock_applies (int step)
2121 {
2122 return (scheduler_mode == schedlock_on
2123 || (scheduler_mode == schedlock_step
2124 && step));
2125 }
2126
2127 /* Look a thread other than EXCEPT that has previously reported a
2128 breakpoint event, and thus needs a step-over in order to make
2129 progress. Returns NULL is none is found. STEP indicates whether
2130 we're about to step the current thread, in order to decide whether
2131 "set scheduler-locking step" applies. */
2132
2133 static struct thread_info *
2134 find_thread_needs_step_over (int step, struct thread_info *except)
2135 {
2136 struct thread_info *tp, *current;
2137
2138 /* With non-stop mode on, threads are always handled individually. */
2139 gdb_assert (! non_stop);
2140
2141 current = inferior_thread ();
2142
2143 /* If scheduler locking applies, we can avoid iterating over all
2144 threads. */
2145 if (schedlock_applies (step))
2146 {
2147 if (except != current
2148 && thread_still_needs_step_over (current))
2149 return current;
2150
2151 return NULL;
2152 }
2153
2154 ALL_THREADS (tp)
2155 {
2156 /* Ignore the EXCEPT thread. */
2157 if (tp == except)
2158 continue;
2159 /* Ignore threads of processes we're not resuming. */
2160 if (!sched_multi
2161 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2162 continue;
2163
2164 if (thread_still_needs_step_over (tp))
2165 return tp;
2166 }
2167
2168 return NULL;
2169 }
2170
2171 /* Basic routine for continuing the program in various fashions.
2172
2173 ADDR is the address to resume at, or -1 for resume where stopped.
2174 SIGGNAL is the signal to give it, or 0 for none,
2175 or -1 for act according to how it stopped.
2176 STEP is nonzero if should trap after one instruction.
2177 -1 means return after that and print nothing.
2178 You should probably set various step_... variables
2179 before calling here, if you are stepping.
2180
2181 You should call clear_proceed_status before calling proceed. */
2182
2183 void
2184 proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
2185 {
2186 struct regcache *regcache;
2187 struct gdbarch *gdbarch;
2188 struct thread_info *tp;
2189 CORE_ADDR pc;
2190 struct address_space *aspace;
2191
2192 /* If we're stopped at a fork/vfork, follow the branch set by the
2193 "set follow-fork-mode" command; otherwise, we'll just proceed
2194 resuming the current thread. */
2195 if (!follow_fork ())
2196 {
2197 /* The target for some reason decided not to resume. */
2198 normal_stop ();
2199 if (target_can_async_p ())
2200 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2201 return;
2202 }
2203
2204 /* We'll update this if & when we switch to a new thread. */
2205 previous_inferior_ptid = inferior_ptid;
2206
2207 regcache = get_current_regcache ();
2208 gdbarch = get_regcache_arch (regcache);
2209 aspace = get_regcache_aspace (regcache);
2210 pc = regcache_read_pc (regcache);
2211 tp = inferior_thread ();
2212
2213 if (step > 0)
2214 step_start_function = find_pc_function (pc);
2215 if (step < 0)
2216 stop_after_trap = 1;
2217
2218 /* Fill in with reasonable starting values. */
2219 init_thread_stepping_state (tp);
2220
2221 if (addr == (CORE_ADDR) -1)
2222 {
2223 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2224 && execution_direction != EXEC_REVERSE)
2225 /* There is a breakpoint at the address we will resume at,
2226 step one instruction before inserting breakpoints so that
2227 we do not stop right away (and report a second hit at this
2228 breakpoint).
2229
2230 Note, we don't do this in reverse, because we won't
2231 actually be executing the breakpoint insn anyway.
2232 We'll be (un-)executing the previous instruction. */
2233 tp->stepping_over_breakpoint = 1;
2234 else if (gdbarch_single_step_through_delay_p (gdbarch)
2235 && gdbarch_single_step_through_delay (gdbarch,
2236 get_current_frame ()))
2237 /* We stepped onto an instruction that needs to be stepped
2238 again before re-inserting the breakpoint, do so. */
2239 tp->stepping_over_breakpoint = 1;
2240 }
2241 else
2242 {
2243 regcache_write_pc (regcache, addr);
2244 }
2245
2246 /* Record the interpreter that issued the execution command that
2247 caused this thread to resume. If the top level interpreter is
2248 MI/async, and the execution command was a CLI command
2249 (next/step/etc.), we'll want to print stop event output to the MI
2250 console channel (the stepped-to line, etc.), as if the user
2251 entered the execution command on a real GDB console. */
2252 inferior_thread ()->control.command_interp = command_interp ();
2253
2254 if (debug_infrun)
2255 fprintf_unfiltered (gdb_stdlog,
2256 "infrun: proceed (addr=%s, signal=%s, step=%d)\n",
2257 paddress (gdbarch, addr),
2258 gdb_signal_to_symbol_string (siggnal), step);
2259
2260 if (non_stop)
2261 /* In non-stop, each thread is handled individually. The context
2262 must already be set to the right thread here. */
2263 ;
2264 else
2265 {
2266 struct thread_info *step_over;
2267
2268 /* In a multi-threaded task we may select another thread and
2269 then continue or step.
2270
2271 But if the old thread was stopped at a breakpoint, it will
2272 immediately cause another breakpoint stop without any
2273 execution (i.e. it will report a breakpoint hit incorrectly).
2274 So we must step over it first.
2275
2276 Look for a thread other than the current (TP) that reported a
2277 breakpoint hit and hasn't been resumed yet since. */
2278 step_over = find_thread_needs_step_over (step, tp);
2279 if (step_over != NULL)
2280 {
2281 if (debug_infrun)
2282 fprintf_unfiltered (gdb_stdlog,
2283 "infrun: need to step-over [%s] first\n",
2284 target_pid_to_str (step_over->ptid));
2285
2286 /* Store the prev_pc for the stepping thread too, needed by
2287 switch_back_to_stepping thread. */
2288 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2289 switch_to_thread (step_over->ptid);
2290 tp = step_over;
2291 }
2292 }
2293
2294 /* If we need to step over a breakpoint, and we're not using
2295 displaced stepping to do so, insert all breakpoints (watchpoints,
2296 etc.) but the one we're stepping over, step one instruction, and
2297 then re-insert the breakpoint when that step is finished. */
2298 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2299 {
2300 struct regcache *regcache = get_current_regcache ();
2301
2302 set_step_over_info (get_regcache_aspace (regcache),
2303 regcache_read_pc (regcache));
2304 }
2305 else
2306 clear_step_over_info ();
2307
2308 insert_breakpoints ();
2309
2310 tp->control.trap_expected = tp->stepping_over_breakpoint;
2311
2312 if (!non_stop)
2313 {
2314 /* Pass the last stop signal to the thread we're resuming,
2315 irrespective of whether the current thread is the thread that
2316 got the last event or not. This was historically GDB's
2317 behaviour before keeping a stop_signal per thread. */
2318
2319 struct thread_info *last_thread;
2320 ptid_t last_ptid;
2321 struct target_waitstatus last_status;
2322
2323 get_last_target_status (&last_ptid, &last_status);
2324 if (!ptid_equal (inferior_ptid, last_ptid)
2325 && !ptid_equal (last_ptid, null_ptid)
2326 && !ptid_equal (last_ptid, minus_one_ptid))
2327 {
2328 last_thread = find_thread_ptid (last_ptid);
2329 if (last_thread)
2330 {
2331 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2332 last_thread->suspend.stop_signal = GDB_SIGNAL_0;
2333 }
2334 }
2335 }
2336
2337 if (siggnal != GDB_SIGNAL_DEFAULT)
2338 tp->suspend.stop_signal = siggnal;
2339 /* If this signal should not be seen by program,
2340 give it zero. Used for debugging signals. */
2341 else if (!signal_program[tp->suspend.stop_signal])
2342 tp->suspend.stop_signal = GDB_SIGNAL_0;
2343
2344 annotate_starting ();
2345
2346 /* Make sure that output from GDB appears before output from the
2347 inferior. */
2348 gdb_flush (gdb_stdout);
2349
2350 /* Refresh prev_pc value just prior to resuming. This used to be
2351 done in stop_waiting, however, setting prev_pc there did not handle
2352 scenarios such as inferior function calls or returning from
2353 a function via the return command. In those cases, the prev_pc
2354 value was not set properly for subsequent commands. The prev_pc value
2355 is used to initialize the starting line number in the ecs. With an
2356 invalid value, the gdb next command ends up stopping at the position
2357 represented by the next line table entry past our start position.
2358 On platforms that generate one line table entry per line, this
2359 is not a problem. However, on the ia64, the compiler generates
2360 extraneous line table entries that do not increase the line number.
2361 When we issue the gdb next command on the ia64 after an inferior call
2362 or a return command, we often end up a few instructions forward, still
2363 within the original line we started.
2364
2365 An attempt was made to refresh the prev_pc at the same time the
2366 execution_control_state is initialized (for instance, just before
2367 waiting for an inferior event). But this approach did not work
2368 because of platforms that use ptrace, where the pc register cannot
2369 be read unless the inferior is stopped. At that point, we are not
2370 guaranteed the inferior is stopped and so the regcache_read_pc() call
2371 can fail. Setting the prev_pc value here ensures the value is updated
2372 correctly when the inferior is stopped. */
2373 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2374
2375 /* Reset to normal state. */
2376 init_infwait_state ();
2377
2378 /* Resume inferior. */
2379 resume (tp->control.trap_expected || step || bpstat_should_step (),
2380 tp->suspend.stop_signal);
2381
2382 /* Wait for it to stop (if not standalone)
2383 and in any case decode why it stopped, and act accordingly. */
2384 /* Do this only if we are not using the event loop, or if the target
2385 does not support asynchronous execution. */
2386 if (!target_can_async_p ())
2387 {
2388 wait_for_inferior ();
2389 normal_stop ();
2390 }
2391 }
2392 \f
2393
2394 /* Start remote-debugging of a machine over a serial link. */
2395
2396 void
2397 start_remote (int from_tty)
2398 {
2399 struct inferior *inferior;
2400
2401 inferior = current_inferior ();
2402 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2403
2404 /* Always go on waiting for the target, regardless of the mode. */
2405 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2406 indicate to wait_for_inferior that a target should timeout if
2407 nothing is returned (instead of just blocking). Because of this,
2408 targets expecting an immediate response need to, internally, set
2409 things up so that the target_wait() is forced to eventually
2410 timeout. */
2411 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2412 differentiate to its caller what the state of the target is after
2413 the initial open has been performed. Here we're assuming that
2414 the target has stopped. It should be possible to eventually have
2415 target_open() return to the caller an indication that the target
2416 is currently running and GDB state should be set to the same as
2417 for an async run. */
2418 wait_for_inferior ();
2419
2420 /* Now that the inferior has stopped, do any bookkeeping like
2421 loading shared libraries. We want to do this before normal_stop,
2422 so that the displayed frame is up to date. */
2423 post_create_inferior (&current_target, from_tty);
2424
2425 normal_stop ();
2426 }
2427
2428 /* Initialize static vars when a new inferior begins. */
2429
2430 void
2431 init_wait_for_inferior (void)
2432 {
2433 /* These are meaningless until the first time through wait_for_inferior. */
2434
2435 breakpoint_init_inferior (inf_starting);
2436
2437 clear_proceed_status ();
2438
2439 target_last_wait_ptid = minus_one_ptid;
2440
2441 previous_inferior_ptid = inferior_ptid;
2442 init_infwait_state ();
2443
2444 /* Discard any skipped inlined frames. */
2445 clear_inline_frame_state (minus_one_ptid);
2446
2447 singlestep_ptid = null_ptid;
2448 singlestep_pc = 0;
2449 }
2450
2451 \f
2452 /* This enum encodes possible reasons for doing a target_wait, so that
2453 wfi can call target_wait in one place. (Ultimately the call will be
2454 moved out of the infinite loop entirely.) */
2455
2456 enum infwait_states
2457 {
2458 infwait_normal_state,
2459 infwait_step_watch_state,
2460 infwait_nonstep_watch_state
2461 };
2462
2463 /* The PTID we'll do a target_wait on.*/
2464 ptid_t waiton_ptid;
2465
2466 /* Current inferior wait state. */
2467 static enum infwait_states infwait_state;
2468
2469 /* Data to be passed around while handling an event. This data is
2470 discarded between events. */
2471 struct execution_control_state
2472 {
2473 ptid_t ptid;
2474 /* The thread that got the event, if this was a thread event; NULL
2475 otherwise. */
2476 struct thread_info *event_thread;
2477
2478 struct target_waitstatus ws;
2479 int stop_func_filled_in;
2480 CORE_ADDR stop_func_start;
2481 CORE_ADDR stop_func_end;
2482 const char *stop_func_name;
2483 int wait_some_more;
2484
2485 /* We were in infwait_step_watch_state or
2486 infwait_nonstep_watch_state state, and the thread reported an
2487 event. */
2488 int stepped_after_stopped_by_watchpoint;
2489
2490 /* True if the event thread hit the single-step breakpoint of
2491 another thread. Thus the event doesn't cause a stop, the thread
2492 needs to be single-stepped past the single-step breakpoint before
2493 we can switch back to the original stepping thread. */
2494 int hit_singlestep_breakpoint;
2495 };
2496
2497 static void handle_inferior_event (struct execution_control_state *ecs);
2498
2499 static void handle_step_into_function (struct gdbarch *gdbarch,
2500 struct execution_control_state *ecs);
2501 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2502 struct execution_control_state *ecs);
2503 static void handle_signal_stop (struct execution_control_state *ecs);
2504 static void check_exception_resume (struct execution_control_state *,
2505 struct frame_info *);
2506
2507 static void end_stepping_range (struct execution_control_state *ecs);
2508 static void stop_waiting (struct execution_control_state *ecs);
2509 static void prepare_to_wait (struct execution_control_state *ecs);
2510 static void keep_going (struct execution_control_state *ecs);
2511 static void process_event_stop_test (struct execution_control_state *ecs);
2512 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2513
2514 /* Callback for iterate over threads. If the thread is stopped, but
2515 the user/frontend doesn't know about that yet, go through
2516 normal_stop, as if the thread had just stopped now. ARG points at
2517 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2518 ptid_is_pid(PTID) is true, applies to all threads of the process
2519 pointed at by PTID. Otherwise, apply only to the thread pointed by
2520 PTID. */
2521
2522 static int
2523 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2524 {
2525 ptid_t ptid = * (ptid_t *) arg;
2526
2527 if ((ptid_equal (info->ptid, ptid)
2528 || ptid_equal (minus_one_ptid, ptid)
2529 || (ptid_is_pid (ptid)
2530 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2531 && is_running (info->ptid)
2532 && !is_executing (info->ptid))
2533 {
2534 struct cleanup *old_chain;
2535 struct execution_control_state ecss;
2536 struct execution_control_state *ecs = &ecss;
2537
2538 memset (ecs, 0, sizeof (*ecs));
2539
2540 old_chain = make_cleanup_restore_current_thread ();
2541
2542 overlay_cache_invalid = 1;
2543 /* Flush target cache before starting to handle each event.
2544 Target was running and cache could be stale. This is just a
2545 heuristic. Running threads may modify target memory, but we
2546 don't get any event. */
2547 target_dcache_invalidate ();
2548
2549 /* Go through handle_inferior_event/normal_stop, so we always
2550 have consistent output as if the stop event had been
2551 reported. */
2552 ecs->ptid = info->ptid;
2553 ecs->event_thread = find_thread_ptid (info->ptid);
2554 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2555 ecs->ws.value.sig = GDB_SIGNAL_0;
2556
2557 handle_inferior_event (ecs);
2558
2559 if (!ecs->wait_some_more)
2560 {
2561 struct thread_info *tp;
2562
2563 normal_stop ();
2564
2565 /* Finish off the continuations. */
2566 tp = inferior_thread ();
2567 do_all_intermediate_continuations_thread (tp, 1);
2568 do_all_continuations_thread (tp, 1);
2569 }
2570
2571 do_cleanups (old_chain);
2572 }
2573
2574 return 0;
2575 }
2576
2577 /* This function is attached as a "thread_stop_requested" observer.
2578 Cleanup local state that assumed the PTID was to be resumed, and
2579 report the stop to the frontend. */
2580
2581 static void
2582 infrun_thread_stop_requested (ptid_t ptid)
2583 {
2584 struct displaced_step_inferior_state *displaced;
2585
2586 /* PTID was requested to stop. Remove it from the displaced
2587 stepping queue, so we don't try to resume it automatically. */
2588
2589 for (displaced = displaced_step_inferior_states;
2590 displaced;
2591 displaced = displaced->next)
2592 {
2593 struct displaced_step_request *it, **prev_next_p;
2594
2595 it = displaced->step_request_queue;
2596 prev_next_p = &displaced->step_request_queue;
2597 while (it)
2598 {
2599 if (ptid_match (it->ptid, ptid))
2600 {
2601 *prev_next_p = it->next;
2602 it->next = NULL;
2603 xfree (it);
2604 }
2605 else
2606 {
2607 prev_next_p = &it->next;
2608 }
2609
2610 it = *prev_next_p;
2611 }
2612 }
2613
2614 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2615 }
2616
2617 static void
2618 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2619 {
2620 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2621 nullify_last_target_wait_ptid ();
2622 }
2623
2624 /* Callback for iterate_over_threads. */
2625
2626 static int
2627 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2628 {
2629 if (is_exited (info->ptid))
2630 return 0;
2631
2632 delete_step_resume_breakpoint (info);
2633 delete_exception_resume_breakpoint (info);
2634 return 0;
2635 }
2636
2637 /* In all-stop, delete the step resume breakpoint of any thread that
2638 had one. In non-stop, delete the step resume breakpoint of the
2639 thread that just stopped. */
2640
2641 static void
2642 delete_step_thread_step_resume_breakpoint (void)
2643 {
2644 if (!target_has_execution
2645 || ptid_equal (inferior_ptid, null_ptid))
2646 /* If the inferior has exited, we have already deleted the step
2647 resume breakpoints out of GDB's lists. */
2648 return;
2649
2650 if (non_stop)
2651 {
2652 /* If in non-stop mode, only delete the step-resume or
2653 longjmp-resume breakpoint of the thread that just stopped
2654 stepping. */
2655 struct thread_info *tp = inferior_thread ();
2656
2657 delete_step_resume_breakpoint (tp);
2658 delete_exception_resume_breakpoint (tp);
2659 }
2660 else
2661 /* In all-stop mode, delete all step-resume and longjmp-resume
2662 breakpoints of any thread that had them. */
2663 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2664 }
2665
2666 /* A cleanup wrapper. */
2667
2668 static void
2669 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2670 {
2671 delete_step_thread_step_resume_breakpoint ();
2672 }
2673
2674 /* Pretty print the results of target_wait, for debugging purposes. */
2675
2676 static void
2677 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2678 const struct target_waitstatus *ws)
2679 {
2680 char *status_string = target_waitstatus_to_string (ws);
2681 struct ui_file *tmp_stream = mem_fileopen ();
2682 char *text;
2683
2684 /* The text is split over several lines because it was getting too long.
2685 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2686 output as a unit; we want only one timestamp printed if debug_timestamp
2687 is set. */
2688
2689 fprintf_unfiltered (tmp_stream,
2690 "infrun: target_wait (%d", ptid_get_pid (waiton_ptid));
2691 if (ptid_get_pid (waiton_ptid) != -1)
2692 fprintf_unfiltered (tmp_stream,
2693 " [%s]", target_pid_to_str (waiton_ptid));
2694 fprintf_unfiltered (tmp_stream, ", status) =\n");
2695 fprintf_unfiltered (tmp_stream,
2696 "infrun: %d [%s],\n",
2697 ptid_get_pid (result_ptid),
2698 target_pid_to_str (result_ptid));
2699 fprintf_unfiltered (tmp_stream,
2700 "infrun: %s\n",
2701 status_string);
2702
2703 text = ui_file_xstrdup (tmp_stream, NULL);
2704
2705 /* This uses %s in part to handle %'s in the text, but also to avoid
2706 a gcc error: the format attribute requires a string literal. */
2707 fprintf_unfiltered (gdb_stdlog, "%s", text);
2708
2709 xfree (status_string);
2710 xfree (text);
2711 ui_file_delete (tmp_stream);
2712 }
2713
2714 /* Prepare and stabilize the inferior for detaching it. E.g.,
2715 detaching while a thread is displaced stepping is a recipe for
2716 crashing it, as nothing would readjust the PC out of the scratch
2717 pad. */
2718
2719 void
2720 prepare_for_detach (void)
2721 {
2722 struct inferior *inf = current_inferior ();
2723 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2724 struct cleanup *old_chain_1;
2725 struct displaced_step_inferior_state *displaced;
2726
2727 displaced = get_displaced_stepping_state (inf->pid);
2728
2729 /* Is any thread of this process displaced stepping? If not,
2730 there's nothing else to do. */
2731 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2732 return;
2733
2734 if (debug_infrun)
2735 fprintf_unfiltered (gdb_stdlog,
2736 "displaced-stepping in-process while detaching");
2737
2738 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2739 inf->detaching = 1;
2740
2741 while (!ptid_equal (displaced->step_ptid, null_ptid))
2742 {
2743 struct cleanup *old_chain_2;
2744 struct execution_control_state ecss;
2745 struct execution_control_state *ecs;
2746
2747 ecs = &ecss;
2748 memset (ecs, 0, sizeof (*ecs));
2749
2750 overlay_cache_invalid = 1;
2751 /* Flush target cache before starting to handle each event.
2752 Target was running and cache could be stale. This is just a
2753 heuristic. Running threads may modify target memory, but we
2754 don't get any event. */
2755 target_dcache_invalidate ();
2756
2757 if (deprecated_target_wait_hook)
2758 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2759 else
2760 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2761
2762 if (debug_infrun)
2763 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2764
2765 /* If an error happens while handling the event, propagate GDB's
2766 knowledge of the executing state to the frontend/user running
2767 state. */
2768 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2769 &minus_one_ptid);
2770
2771 /* Now figure out what to do with the result of the result. */
2772 handle_inferior_event (ecs);
2773
2774 /* No error, don't finish the state yet. */
2775 discard_cleanups (old_chain_2);
2776
2777 /* Breakpoints and watchpoints are not installed on the target
2778 at this point, and signals are passed directly to the
2779 inferior, so this must mean the process is gone. */
2780 if (!ecs->wait_some_more)
2781 {
2782 discard_cleanups (old_chain_1);
2783 error (_("Program exited while detaching"));
2784 }
2785 }
2786
2787 discard_cleanups (old_chain_1);
2788 }
2789
2790 /* Wait for control to return from inferior to debugger.
2791
2792 If inferior gets a signal, we may decide to start it up again
2793 instead of returning. That is why there is a loop in this function.
2794 When this function actually returns it means the inferior
2795 should be left stopped and GDB should read more commands. */
2796
2797 void
2798 wait_for_inferior (void)
2799 {
2800 struct cleanup *old_cleanups;
2801
2802 if (debug_infrun)
2803 fprintf_unfiltered
2804 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2805
2806 old_cleanups =
2807 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2808
2809 while (1)
2810 {
2811 struct execution_control_state ecss;
2812 struct execution_control_state *ecs = &ecss;
2813 struct cleanup *old_chain;
2814
2815 memset (ecs, 0, sizeof (*ecs));
2816
2817 overlay_cache_invalid = 1;
2818
2819 /* Flush target cache before starting to handle each event.
2820 Target was running and cache could be stale. This is just a
2821 heuristic. Running threads may modify target memory, but we
2822 don't get any event. */
2823 target_dcache_invalidate ();
2824
2825 if (deprecated_target_wait_hook)
2826 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2827 else
2828 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2829
2830 if (debug_infrun)
2831 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2832
2833 /* If an error happens while handling the event, propagate GDB's
2834 knowledge of the executing state to the frontend/user running
2835 state. */
2836 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2837
2838 /* Now figure out what to do with the result of the result. */
2839 handle_inferior_event (ecs);
2840
2841 /* No error, don't finish the state yet. */
2842 discard_cleanups (old_chain);
2843
2844 if (!ecs->wait_some_more)
2845 break;
2846 }
2847
2848 do_cleanups (old_cleanups);
2849 }
2850
2851 /* Asynchronous version of wait_for_inferior. It is called by the
2852 event loop whenever a change of state is detected on the file
2853 descriptor corresponding to the target. It can be called more than
2854 once to complete a single execution command. In such cases we need
2855 to keep the state in a global variable ECSS. If it is the last time
2856 that this function is called for a single execution command, then
2857 report to the user that the inferior has stopped, and do the
2858 necessary cleanups. */
2859
2860 void
2861 fetch_inferior_event (void *client_data)
2862 {
2863 struct execution_control_state ecss;
2864 struct execution_control_state *ecs = &ecss;
2865 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2866 struct cleanup *ts_old_chain;
2867 int was_sync = sync_execution;
2868 int cmd_done = 0;
2869
2870 memset (ecs, 0, sizeof (*ecs));
2871
2872 /* We're handling a live event, so make sure we're doing live
2873 debugging. If we're looking at traceframes while the target is
2874 running, we're going to need to get back to that mode after
2875 handling the event. */
2876 if (non_stop)
2877 {
2878 make_cleanup_restore_current_traceframe ();
2879 set_current_traceframe (-1);
2880 }
2881
2882 if (non_stop)
2883 /* In non-stop mode, the user/frontend should not notice a thread
2884 switch due to internal events. Make sure we reverse to the
2885 user selected thread and frame after handling the event and
2886 running any breakpoint commands. */
2887 make_cleanup_restore_current_thread ();
2888
2889 overlay_cache_invalid = 1;
2890 /* Flush target cache before starting to handle each event. Target
2891 was running and cache could be stale. This is just a heuristic.
2892 Running threads may modify target memory, but we don't get any
2893 event. */
2894 target_dcache_invalidate ();
2895
2896 make_cleanup_restore_integer (&execution_direction);
2897 execution_direction = target_execution_direction ();
2898
2899 if (deprecated_target_wait_hook)
2900 ecs->ptid =
2901 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2902 else
2903 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2904
2905 if (debug_infrun)
2906 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2907
2908 /* If an error happens while handling the event, propagate GDB's
2909 knowledge of the executing state to the frontend/user running
2910 state. */
2911 if (!non_stop)
2912 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2913 else
2914 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2915
2916 /* Get executed before make_cleanup_restore_current_thread above to apply
2917 still for the thread which has thrown the exception. */
2918 make_bpstat_clear_actions_cleanup ();
2919
2920 /* Now figure out what to do with the result of the result. */
2921 handle_inferior_event (ecs);
2922
2923 if (!ecs->wait_some_more)
2924 {
2925 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2926
2927 delete_step_thread_step_resume_breakpoint ();
2928
2929 /* We may not find an inferior if this was a process exit. */
2930 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2931 normal_stop ();
2932
2933 if (target_has_execution
2934 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2935 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2936 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2937 && ecs->event_thread->step_multi
2938 && ecs->event_thread->control.stop_step)
2939 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2940 else
2941 {
2942 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2943 cmd_done = 1;
2944 }
2945 }
2946
2947 /* No error, don't finish the thread states yet. */
2948 discard_cleanups (ts_old_chain);
2949
2950 /* Revert thread and frame. */
2951 do_cleanups (old_chain);
2952
2953 /* If the inferior was in sync execution mode, and now isn't,
2954 restore the prompt (a synchronous execution command has finished,
2955 and we're ready for input). */
2956 if (interpreter_async && was_sync && !sync_execution)
2957 observer_notify_sync_execution_done ();
2958
2959 if (cmd_done
2960 && !was_sync
2961 && exec_done_display_p
2962 && (ptid_equal (inferior_ptid, null_ptid)
2963 || !is_running (inferior_ptid)))
2964 printf_unfiltered (_("completed.\n"));
2965 }
2966
2967 /* Record the frame and location we're currently stepping through. */
2968 void
2969 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2970 {
2971 struct thread_info *tp = inferior_thread ();
2972
2973 tp->control.step_frame_id = get_frame_id (frame);
2974 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2975
2976 tp->current_symtab = sal.symtab;
2977 tp->current_line = sal.line;
2978 }
2979
2980 /* Clear context switchable stepping state. */
2981
2982 void
2983 init_thread_stepping_state (struct thread_info *tss)
2984 {
2985 tss->stepping_over_breakpoint = 0;
2986 tss->step_after_step_resume_breakpoint = 0;
2987 }
2988
2989 /* Set the cached copy of the last ptid/waitstatus. */
2990
2991 static void
2992 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
2993 {
2994 target_last_wait_ptid = ptid;
2995 target_last_waitstatus = status;
2996 }
2997
2998 /* Return the cached copy of the last pid/waitstatus returned by
2999 target_wait()/deprecated_target_wait_hook(). The data is actually
3000 cached by handle_inferior_event(), which gets called immediately
3001 after target_wait()/deprecated_target_wait_hook(). */
3002
3003 void
3004 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
3005 {
3006 *ptidp = target_last_wait_ptid;
3007 *status = target_last_waitstatus;
3008 }
3009
3010 void
3011 nullify_last_target_wait_ptid (void)
3012 {
3013 target_last_wait_ptid = minus_one_ptid;
3014 }
3015
3016 /* Switch thread contexts. */
3017
3018 static void
3019 context_switch (ptid_t ptid)
3020 {
3021 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3022 {
3023 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3024 target_pid_to_str (inferior_ptid));
3025 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3026 target_pid_to_str (ptid));
3027 }
3028
3029 switch_to_thread (ptid);
3030 }
3031
3032 static void
3033 adjust_pc_after_break (struct execution_control_state *ecs)
3034 {
3035 struct regcache *regcache;
3036 struct gdbarch *gdbarch;
3037 struct address_space *aspace;
3038 CORE_ADDR breakpoint_pc, decr_pc;
3039
3040 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3041 we aren't, just return.
3042
3043 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3044 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3045 implemented by software breakpoints should be handled through the normal
3046 breakpoint layer.
3047
3048 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3049 different signals (SIGILL or SIGEMT for instance), but it is less
3050 clear where the PC is pointing afterwards. It may not match
3051 gdbarch_decr_pc_after_break. I don't know any specific target that
3052 generates these signals at breakpoints (the code has been in GDB since at
3053 least 1992) so I can not guess how to handle them here.
3054
3055 In earlier versions of GDB, a target with
3056 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3057 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3058 target with both of these set in GDB history, and it seems unlikely to be
3059 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3060
3061 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3062 return;
3063
3064 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3065 return;
3066
3067 /* In reverse execution, when a breakpoint is hit, the instruction
3068 under it has already been de-executed. The reported PC always
3069 points at the breakpoint address, so adjusting it further would
3070 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3071 architecture:
3072
3073 B1 0x08000000 : INSN1
3074 B2 0x08000001 : INSN2
3075 0x08000002 : INSN3
3076 PC -> 0x08000003 : INSN4
3077
3078 Say you're stopped at 0x08000003 as above. Reverse continuing
3079 from that point should hit B2 as below. Reading the PC when the
3080 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3081 been de-executed already.
3082
3083 B1 0x08000000 : INSN1
3084 B2 PC -> 0x08000001 : INSN2
3085 0x08000002 : INSN3
3086 0x08000003 : INSN4
3087
3088 We can't apply the same logic as for forward execution, because
3089 we would wrongly adjust the PC to 0x08000000, since there's a
3090 breakpoint at PC - 1. We'd then report a hit on B1, although
3091 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3092 behaviour. */
3093 if (execution_direction == EXEC_REVERSE)
3094 return;
3095
3096 /* If this target does not decrement the PC after breakpoints, then
3097 we have nothing to do. */
3098 regcache = get_thread_regcache (ecs->ptid);
3099 gdbarch = get_regcache_arch (regcache);
3100
3101 decr_pc = target_decr_pc_after_break (gdbarch);
3102 if (decr_pc == 0)
3103 return;
3104
3105 aspace = get_regcache_aspace (regcache);
3106
3107 /* Find the location where (if we've hit a breakpoint) the
3108 breakpoint would be. */
3109 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3110
3111 /* Check whether there actually is a software breakpoint inserted at
3112 that location.
3113
3114 If in non-stop mode, a race condition is possible where we've
3115 removed a breakpoint, but stop events for that breakpoint were
3116 already queued and arrive later. To suppress those spurious
3117 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3118 and retire them after a number of stop events are reported. */
3119 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3120 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3121 {
3122 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3123
3124 if (record_full_is_used ())
3125 record_full_gdb_operation_disable_set ();
3126
3127 /* When using hardware single-step, a SIGTRAP is reported for both
3128 a completed single-step and a software breakpoint. Need to
3129 differentiate between the two, as the latter needs adjusting
3130 but the former does not.
3131
3132 The SIGTRAP can be due to a completed hardware single-step only if
3133 - we didn't insert software single-step breakpoints
3134 - the thread to be examined is still the current thread
3135 - this thread is currently being stepped
3136
3137 If any of these events did not occur, we must have stopped due
3138 to hitting a software breakpoint, and have to back up to the
3139 breakpoint address.
3140
3141 As a special case, we could have hardware single-stepped a
3142 software breakpoint. In this case (prev_pc == breakpoint_pc),
3143 we also need to back up to the breakpoint address. */
3144
3145 if (singlestep_breakpoints_inserted_p
3146 || !ptid_equal (ecs->ptid, inferior_ptid)
3147 || !currently_stepping (ecs->event_thread)
3148 || ecs->event_thread->prev_pc == breakpoint_pc)
3149 regcache_write_pc (regcache, breakpoint_pc);
3150
3151 do_cleanups (old_cleanups);
3152 }
3153 }
3154
3155 static void
3156 init_infwait_state (void)
3157 {
3158 waiton_ptid = pid_to_ptid (-1);
3159 infwait_state = infwait_normal_state;
3160 }
3161
3162 static int
3163 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3164 {
3165 for (frame = get_prev_frame (frame);
3166 frame != NULL;
3167 frame = get_prev_frame (frame))
3168 {
3169 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3170 return 1;
3171 if (get_frame_type (frame) != INLINE_FRAME)
3172 break;
3173 }
3174
3175 return 0;
3176 }
3177
3178 /* Auxiliary function that handles syscall entry/return events.
3179 It returns 1 if the inferior should keep going (and GDB
3180 should ignore the event), or 0 if the event deserves to be
3181 processed. */
3182
3183 static int
3184 handle_syscall_event (struct execution_control_state *ecs)
3185 {
3186 struct regcache *regcache;
3187 int syscall_number;
3188
3189 if (!ptid_equal (ecs->ptid, inferior_ptid))
3190 context_switch (ecs->ptid);
3191
3192 regcache = get_thread_regcache (ecs->ptid);
3193 syscall_number = ecs->ws.value.syscall_number;
3194 stop_pc = regcache_read_pc (regcache);
3195
3196 if (catch_syscall_enabled () > 0
3197 && catching_syscall_number (syscall_number) > 0)
3198 {
3199 if (debug_infrun)
3200 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3201 syscall_number);
3202
3203 ecs->event_thread->control.stop_bpstat
3204 = bpstat_stop_status (get_regcache_aspace (regcache),
3205 stop_pc, ecs->ptid, &ecs->ws);
3206
3207 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3208 {
3209 /* Catchpoint hit. */
3210 return 0;
3211 }
3212 }
3213
3214 /* If no catchpoint triggered for this, then keep going. */
3215 keep_going (ecs);
3216 return 1;
3217 }
3218
3219 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3220
3221 static void
3222 fill_in_stop_func (struct gdbarch *gdbarch,
3223 struct execution_control_state *ecs)
3224 {
3225 if (!ecs->stop_func_filled_in)
3226 {
3227 /* Don't care about return value; stop_func_start and stop_func_name
3228 will both be 0 if it doesn't work. */
3229 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3230 &ecs->stop_func_start, &ecs->stop_func_end);
3231 ecs->stop_func_start
3232 += gdbarch_deprecated_function_start_offset (gdbarch);
3233
3234 if (gdbarch_skip_entrypoint_p (gdbarch))
3235 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3236 ecs->stop_func_start);
3237
3238 ecs->stop_func_filled_in = 1;
3239 }
3240 }
3241
3242
3243 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3244
3245 static enum stop_kind
3246 get_inferior_stop_soon (ptid_t ptid)
3247 {
3248 struct inferior *inf = find_inferior_pid (ptid_get_pid (ptid));
3249
3250 gdb_assert (inf != NULL);
3251 return inf->control.stop_soon;
3252 }
3253
3254 /* Given an execution control state that has been freshly filled in by
3255 an event from the inferior, figure out what it means and take
3256 appropriate action.
3257
3258 The alternatives are:
3259
3260 1) stop_waiting and return; to really stop and return to the
3261 debugger.
3262
3263 2) keep_going and return; to wait for the next event (set
3264 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3265 once). */
3266
3267 static void
3268 handle_inferior_event (struct execution_control_state *ecs)
3269 {
3270 enum stop_kind stop_soon;
3271
3272 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3273 {
3274 /* We had an event in the inferior, but we are not interested in
3275 handling it at this level. The lower layers have already
3276 done what needs to be done, if anything.
3277
3278 One of the possible circumstances for this is when the
3279 inferior produces output for the console. The inferior has
3280 not stopped, and we are ignoring the event. Another possible
3281 circumstance is any event which the lower level knows will be
3282 reported multiple times without an intervening resume. */
3283 if (debug_infrun)
3284 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3285 prepare_to_wait (ecs);
3286 return;
3287 }
3288
3289 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3290 && target_can_async_p () && !sync_execution)
3291 {
3292 /* There were no unwaited-for children left in the target, but,
3293 we're not synchronously waiting for events either. Just
3294 ignore. Otherwise, if we were running a synchronous
3295 execution command, we need to cancel it and give the user
3296 back the terminal. */
3297 if (debug_infrun)
3298 fprintf_unfiltered (gdb_stdlog,
3299 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3300 prepare_to_wait (ecs);
3301 return;
3302 }
3303
3304 /* Cache the last pid/waitstatus. */
3305 set_last_target_status (ecs->ptid, ecs->ws);
3306
3307 /* Always clear state belonging to the previous time we stopped. */
3308 stop_stack_dummy = STOP_NONE;
3309
3310 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3311 {
3312 /* No unwaited-for children left. IOW, all resumed children
3313 have exited. */
3314 if (debug_infrun)
3315 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3316
3317 stop_print_frame = 0;
3318 stop_waiting (ecs);
3319 return;
3320 }
3321
3322 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3323 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3324 {
3325 ecs->event_thread = find_thread_ptid (ecs->ptid);
3326 /* If it's a new thread, add it to the thread database. */
3327 if (ecs->event_thread == NULL)
3328 ecs->event_thread = add_thread (ecs->ptid);
3329
3330 /* Disable range stepping. If the next step request could use a
3331 range, this will be end up re-enabled then. */
3332 ecs->event_thread->control.may_range_step = 0;
3333 }
3334
3335 /* Dependent on valid ECS->EVENT_THREAD. */
3336 adjust_pc_after_break (ecs);
3337
3338 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3339 reinit_frame_cache ();
3340
3341 breakpoint_retire_moribund ();
3342
3343 /* First, distinguish signals caused by the debugger from signals
3344 that have to do with the program's own actions. Note that
3345 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3346 on the operating system version. Here we detect when a SIGILL or
3347 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3348 something similar for SIGSEGV, since a SIGSEGV will be generated
3349 when we're trying to execute a breakpoint instruction on a
3350 non-executable stack. This happens for call dummy breakpoints
3351 for architectures like SPARC that place call dummies on the
3352 stack. */
3353 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3354 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3355 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3356 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3357 {
3358 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3359
3360 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3361 regcache_read_pc (regcache)))
3362 {
3363 if (debug_infrun)
3364 fprintf_unfiltered (gdb_stdlog,
3365 "infrun: Treating signal as SIGTRAP\n");
3366 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3367 }
3368 }
3369
3370 /* Mark the non-executing threads accordingly. In all-stop, all
3371 threads of all processes are stopped when we get any event
3372 reported. In non-stop mode, only the event thread stops. If
3373 we're handling a process exit in non-stop mode, there's nothing
3374 to do, as threads of the dead process are gone, and threads of
3375 any other process were left running. */
3376 if (!non_stop)
3377 set_executing (minus_one_ptid, 0);
3378 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3379 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3380 set_executing (ecs->ptid, 0);
3381
3382 switch (infwait_state)
3383 {
3384 case infwait_normal_state:
3385 if (debug_infrun)
3386 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3387 break;
3388
3389 case infwait_step_watch_state:
3390 if (debug_infrun)
3391 fprintf_unfiltered (gdb_stdlog,
3392 "infrun: infwait_step_watch_state\n");
3393
3394 ecs->stepped_after_stopped_by_watchpoint = 1;
3395 break;
3396
3397 case infwait_nonstep_watch_state:
3398 if (debug_infrun)
3399 fprintf_unfiltered (gdb_stdlog,
3400 "infrun: infwait_nonstep_watch_state\n");
3401 insert_breakpoints ();
3402
3403 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3404 handle things like signals arriving and other things happening
3405 in combination correctly? */
3406 ecs->stepped_after_stopped_by_watchpoint = 1;
3407 break;
3408
3409 default:
3410 internal_error (__FILE__, __LINE__, _("bad switch"));
3411 }
3412
3413 infwait_state = infwait_normal_state;
3414 waiton_ptid = pid_to_ptid (-1);
3415
3416 switch (ecs->ws.kind)
3417 {
3418 case TARGET_WAITKIND_LOADED:
3419 if (debug_infrun)
3420 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3421 if (!ptid_equal (ecs->ptid, inferior_ptid))
3422 context_switch (ecs->ptid);
3423 /* Ignore gracefully during startup of the inferior, as it might
3424 be the shell which has just loaded some objects, otherwise
3425 add the symbols for the newly loaded objects. Also ignore at
3426 the beginning of an attach or remote session; we will query
3427 the full list of libraries once the connection is
3428 established. */
3429
3430 stop_soon = get_inferior_stop_soon (ecs->ptid);
3431 if (stop_soon == NO_STOP_QUIETLY)
3432 {
3433 struct regcache *regcache;
3434
3435 regcache = get_thread_regcache (ecs->ptid);
3436
3437 handle_solib_event ();
3438
3439 ecs->event_thread->control.stop_bpstat
3440 = bpstat_stop_status (get_regcache_aspace (regcache),
3441 stop_pc, ecs->ptid, &ecs->ws);
3442
3443 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3444 {
3445 /* A catchpoint triggered. */
3446 process_event_stop_test (ecs);
3447 return;
3448 }
3449
3450 /* If requested, stop when the dynamic linker notifies
3451 gdb of events. This allows the user to get control
3452 and place breakpoints in initializer routines for
3453 dynamically loaded objects (among other things). */
3454 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3455 if (stop_on_solib_events)
3456 {
3457 /* Make sure we print "Stopped due to solib-event" in
3458 normal_stop. */
3459 stop_print_frame = 1;
3460
3461 stop_waiting (ecs);
3462 return;
3463 }
3464 }
3465
3466 /* If we are skipping through a shell, or through shared library
3467 loading that we aren't interested in, resume the program. If
3468 we're running the program normally, also resume. */
3469 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3470 {
3471 /* Loading of shared libraries might have changed breakpoint
3472 addresses. Make sure new breakpoints are inserted. */
3473 if (stop_soon == NO_STOP_QUIETLY
3474 && !breakpoints_always_inserted_mode ())
3475 insert_breakpoints ();
3476 resume (0, GDB_SIGNAL_0);
3477 prepare_to_wait (ecs);
3478 return;
3479 }
3480
3481 /* But stop if we're attaching or setting up a remote
3482 connection. */
3483 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3484 || stop_soon == STOP_QUIETLY_REMOTE)
3485 {
3486 if (debug_infrun)
3487 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3488 stop_waiting (ecs);
3489 return;
3490 }
3491
3492 internal_error (__FILE__, __LINE__,
3493 _("unhandled stop_soon: %d"), (int) stop_soon);
3494
3495 case TARGET_WAITKIND_SPURIOUS:
3496 if (debug_infrun)
3497 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3498 if (!ptid_equal (ecs->ptid, inferior_ptid))
3499 context_switch (ecs->ptid);
3500 resume (0, GDB_SIGNAL_0);
3501 prepare_to_wait (ecs);
3502 return;
3503
3504 case TARGET_WAITKIND_EXITED:
3505 case TARGET_WAITKIND_SIGNALLED:
3506 if (debug_infrun)
3507 {
3508 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3509 fprintf_unfiltered (gdb_stdlog,
3510 "infrun: TARGET_WAITKIND_EXITED\n");
3511 else
3512 fprintf_unfiltered (gdb_stdlog,
3513 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3514 }
3515
3516 inferior_ptid = ecs->ptid;
3517 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3518 set_current_program_space (current_inferior ()->pspace);
3519 handle_vfork_child_exec_or_exit (0);
3520 target_terminal_ours (); /* Must do this before mourn anyway. */
3521
3522 /* Clearing any previous state of convenience variables. */
3523 clear_exit_convenience_vars ();
3524
3525 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3526 {
3527 /* Record the exit code in the convenience variable $_exitcode, so
3528 that the user can inspect this again later. */
3529 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3530 (LONGEST) ecs->ws.value.integer);
3531
3532 /* Also record this in the inferior itself. */
3533 current_inferior ()->has_exit_code = 1;
3534 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3535
3536 /* Support the --return-child-result option. */
3537 return_child_result_value = ecs->ws.value.integer;
3538
3539 observer_notify_exited (ecs->ws.value.integer);
3540 }
3541 else
3542 {
3543 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3544 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3545
3546 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3547 {
3548 /* Set the value of the internal variable $_exitsignal,
3549 which holds the signal uncaught by the inferior. */
3550 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3551 gdbarch_gdb_signal_to_target (gdbarch,
3552 ecs->ws.value.sig));
3553 }
3554 else
3555 {
3556 /* We don't have access to the target's method used for
3557 converting between signal numbers (GDB's internal
3558 representation <-> target's representation).
3559 Therefore, we cannot do a good job at displaying this
3560 information to the user. It's better to just warn
3561 her about it (if infrun debugging is enabled), and
3562 give up. */
3563 if (debug_infrun)
3564 fprintf_filtered (gdb_stdlog, _("\
3565 Cannot fill $_exitsignal with the correct signal number.\n"));
3566 }
3567
3568 observer_notify_signal_exited (ecs->ws.value.sig);
3569 }
3570
3571 gdb_flush (gdb_stdout);
3572 target_mourn_inferior ();
3573 singlestep_breakpoints_inserted_p = 0;
3574 cancel_single_step_breakpoints ();
3575 stop_print_frame = 0;
3576 stop_waiting (ecs);
3577 return;
3578
3579 /* The following are the only cases in which we keep going;
3580 the above cases end in a continue or goto. */
3581 case TARGET_WAITKIND_FORKED:
3582 case TARGET_WAITKIND_VFORKED:
3583 if (debug_infrun)
3584 {
3585 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3586 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3587 else
3588 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3589 }
3590
3591 /* Check whether the inferior is displaced stepping. */
3592 {
3593 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3594 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3595 struct displaced_step_inferior_state *displaced
3596 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3597
3598 /* If checking displaced stepping is supported, and thread
3599 ecs->ptid is displaced stepping. */
3600 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3601 {
3602 struct inferior *parent_inf
3603 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3604 struct regcache *child_regcache;
3605 CORE_ADDR parent_pc;
3606
3607 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3608 indicating that the displaced stepping of syscall instruction
3609 has been done. Perform cleanup for parent process here. Note
3610 that this operation also cleans up the child process for vfork,
3611 because their pages are shared. */
3612 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3613
3614 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3615 {
3616 /* Restore scratch pad for child process. */
3617 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3618 }
3619
3620 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3621 the child's PC is also within the scratchpad. Set the child's PC
3622 to the parent's PC value, which has already been fixed up.
3623 FIXME: we use the parent's aspace here, although we're touching
3624 the child, because the child hasn't been added to the inferior
3625 list yet at this point. */
3626
3627 child_regcache
3628 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3629 gdbarch,
3630 parent_inf->aspace);
3631 /* Read PC value of parent process. */
3632 parent_pc = regcache_read_pc (regcache);
3633
3634 if (debug_displaced)
3635 fprintf_unfiltered (gdb_stdlog,
3636 "displaced: write child pc from %s to %s\n",
3637 paddress (gdbarch,
3638 regcache_read_pc (child_regcache)),
3639 paddress (gdbarch, parent_pc));
3640
3641 regcache_write_pc (child_regcache, parent_pc);
3642 }
3643 }
3644
3645 if (!ptid_equal (ecs->ptid, inferior_ptid))
3646 context_switch (ecs->ptid);
3647
3648 /* Immediately detach breakpoints from the child before there's
3649 any chance of letting the user delete breakpoints from the
3650 breakpoint lists. If we don't do this early, it's easy to
3651 leave left over traps in the child, vis: "break foo; catch
3652 fork; c; <fork>; del; c; <child calls foo>". We only follow
3653 the fork on the last `continue', and by that time the
3654 breakpoint at "foo" is long gone from the breakpoint table.
3655 If we vforked, then we don't need to unpatch here, since both
3656 parent and child are sharing the same memory pages; we'll
3657 need to unpatch at follow/detach time instead to be certain
3658 that new breakpoints added between catchpoint hit time and
3659 vfork follow are detached. */
3660 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3661 {
3662 /* This won't actually modify the breakpoint list, but will
3663 physically remove the breakpoints from the child. */
3664 detach_breakpoints (ecs->ws.value.related_pid);
3665 }
3666
3667 if (singlestep_breakpoints_inserted_p)
3668 {
3669 /* Pull the single step breakpoints out of the target. */
3670 remove_single_step_breakpoints ();
3671 singlestep_breakpoints_inserted_p = 0;
3672 }
3673
3674 /* In case the event is caught by a catchpoint, remember that
3675 the event is to be followed at the next resume of the thread,
3676 and not immediately. */
3677 ecs->event_thread->pending_follow = ecs->ws;
3678
3679 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3680
3681 ecs->event_thread->control.stop_bpstat
3682 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3683 stop_pc, ecs->ptid, &ecs->ws);
3684
3685 /* If no catchpoint triggered for this, then keep going. Note
3686 that we're interested in knowing the bpstat actually causes a
3687 stop, not just if it may explain the signal. Software
3688 watchpoints, for example, always appear in the bpstat. */
3689 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3690 {
3691 ptid_t parent;
3692 ptid_t child;
3693 int should_resume;
3694 int follow_child
3695 = (follow_fork_mode_string == follow_fork_mode_child);
3696
3697 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3698
3699 should_resume = follow_fork ();
3700
3701 parent = ecs->ptid;
3702 child = ecs->ws.value.related_pid;
3703
3704 /* In non-stop mode, also resume the other branch. */
3705 if (non_stop && !detach_fork)
3706 {
3707 if (follow_child)
3708 switch_to_thread (parent);
3709 else
3710 switch_to_thread (child);
3711
3712 ecs->event_thread = inferior_thread ();
3713 ecs->ptid = inferior_ptid;
3714 keep_going (ecs);
3715 }
3716
3717 if (follow_child)
3718 switch_to_thread (child);
3719 else
3720 switch_to_thread (parent);
3721
3722 ecs->event_thread = inferior_thread ();
3723 ecs->ptid = inferior_ptid;
3724
3725 if (should_resume)
3726 keep_going (ecs);
3727 else
3728 stop_waiting (ecs);
3729 return;
3730 }
3731 process_event_stop_test (ecs);
3732 return;
3733
3734 case TARGET_WAITKIND_VFORK_DONE:
3735 /* Done with the shared memory region. Re-insert breakpoints in
3736 the parent, and keep going. */
3737
3738 if (debug_infrun)
3739 fprintf_unfiltered (gdb_stdlog,
3740 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3741
3742 if (!ptid_equal (ecs->ptid, inferior_ptid))
3743 context_switch (ecs->ptid);
3744
3745 current_inferior ()->waiting_for_vfork_done = 0;
3746 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3747 /* This also takes care of reinserting breakpoints in the
3748 previously locked inferior. */
3749 keep_going (ecs);
3750 return;
3751
3752 case TARGET_WAITKIND_EXECD:
3753 if (debug_infrun)
3754 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3755
3756 if (!ptid_equal (ecs->ptid, inferior_ptid))
3757 context_switch (ecs->ptid);
3758
3759 singlestep_breakpoints_inserted_p = 0;
3760 cancel_single_step_breakpoints ();
3761
3762 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3763
3764 /* Do whatever is necessary to the parent branch of the vfork. */
3765 handle_vfork_child_exec_or_exit (1);
3766
3767 /* This causes the eventpoints and symbol table to be reset.
3768 Must do this now, before trying to determine whether to
3769 stop. */
3770 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3771
3772 ecs->event_thread->control.stop_bpstat
3773 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3774 stop_pc, ecs->ptid, &ecs->ws);
3775
3776 /* Note that this may be referenced from inside
3777 bpstat_stop_status above, through inferior_has_execd. */
3778 xfree (ecs->ws.value.execd_pathname);
3779 ecs->ws.value.execd_pathname = NULL;
3780
3781 /* If no catchpoint triggered for this, then keep going. */
3782 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3783 {
3784 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3785 keep_going (ecs);
3786 return;
3787 }
3788 process_event_stop_test (ecs);
3789 return;
3790
3791 /* Be careful not to try to gather much state about a thread
3792 that's in a syscall. It's frequently a losing proposition. */
3793 case TARGET_WAITKIND_SYSCALL_ENTRY:
3794 if (debug_infrun)
3795 fprintf_unfiltered (gdb_stdlog,
3796 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3797 /* Getting the current syscall number. */
3798 if (handle_syscall_event (ecs) == 0)
3799 process_event_stop_test (ecs);
3800 return;
3801
3802 /* Before examining the threads further, step this thread to
3803 get it entirely out of the syscall. (We get notice of the
3804 event when the thread is just on the verge of exiting a
3805 syscall. Stepping one instruction seems to get it back
3806 into user code.) */
3807 case TARGET_WAITKIND_SYSCALL_RETURN:
3808 if (debug_infrun)
3809 fprintf_unfiltered (gdb_stdlog,
3810 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3811 if (handle_syscall_event (ecs) == 0)
3812 process_event_stop_test (ecs);
3813 return;
3814
3815 case TARGET_WAITKIND_STOPPED:
3816 if (debug_infrun)
3817 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3818 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3819 handle_signal_stop (ecs);
3820 return;
3821
3822 case TARGET_WAITKIND_NO_HISTORY:
3823 if (debug_infrun)
3824 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
3825 /* Reverse execution: target ran out of history info. */
3826
3827 /* Pull the single step breakpoints out of the target. */
3828 if (singlestep_breakpoints_inserted_p)
3829 {
3830 if (!ptid_equal (ecs->ptid, inferior_ptid))
3831 context_switch (ecs->ptid);
3832 remove_single_step_breakpoints ();
3833 singlestep_breakpoints_inserted_p = 0;
3834 }
3835 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3836 observer_notify_no_history ();
3837 stop_waiting (ecs);
3838 return;
3839 }
3840 }
3841
3842 /* Come here when the program has stopped with a signal. */
3843
3844 static void
3845 handle_signal_stop (struct execution_control_state *ecs)
3846 {
3847 struct frame_info *frame;
3848 struct gdbarch *gdbarch;
3849 int stopped_by_watchpoint;
3850 enum stop_kind stop_soon;
3851 int random_signal;
3852
3853 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
3854
3855 /* Do we need to clean up the state of a thread that has
3856 completed a displaced single-step? (Doing so usually affects
3857 the PC, so do it here, before we set stop_pc.) */
3858 displaced_step_fixup (ecs->ptid,
3859 ecs->event_thread->suspend.stop_signal);
3860
3861 /* If we either finished a single-step or hit a breakpoint, but
3862 the user wanted this thread to be stopped, pretend we got a
3863 SIG0 (generic unsignaled stop). */
3864 if (ecs->event_thread->stop_requested
3865 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3866 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3867
3868 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3869
3870 if (debug_infrun)
3871 {
3872 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3873 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3874 struct cleanup *old_chain = save_inferior_ptid ();
3875
3876 inferior_ptid = ecs->ptid;
3877
3878 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3879 paddress (gdbarch, stop_pc));
3880 if (target_stopped_by_watchpoint ())
3881 {
3882 CORE_ADDR addr;
3883
3884 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3885
3886 if (target_stopped_data_address (&current_target, &addr))
3887 fprintf_unfiltered (gdb_stdlog,
3888 "infrun: stopped data address = %s\n",
3889 paddress (gdbarch, addr));
3890 else
3891 fprintf_unfiltered (gdb_stdlog,
3892 "infrun: (no data address available)\n");
3893 }
3894
3895 do_cleanups (old_chain);
3896 }
3897
3898 /* This is originated from start_remote(), start_inferior() and
3899 shared libraries hook functions. */
3900 stop_soon = get_inferior_stop_soon (ecs->ptid);
3901 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3902 {
3903 if (!ptid_equal (ecs->ptid, inferior_ptid))
3904 context_switch (ecs->ptid);
3905 if (debug_infrun)
3906 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3907 stop_print_frame = 1;
3908 stop_waiting (ecs);
3909 return;
3910 }
3911
3912 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3913 && stop_after_trap)
3914 {
3915 if (!ptid_equal (ecs->ptid, inferior_ptid))
3916 context_switch (ecs->ptid);
3917 if (debug_infrun)
3918 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3919 stop_print_frame = 0;
3920 stop_waiting (ecs);
3921 return;
3922 }
3923
3924 /* This originates from attach_command(). We need to overwrite
3925 the stop_signal here, because some kernels don't ignore a
3926 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3927 See more comments in inferior.h. On the other hand, if we
3928 get a non-SIGSTOP, report it to the user - assume the backend
3929 will handle the SIGSTOP if it should show up later.
3930
3931 Also consider that the attach is complete when we see a
3932 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3933 target extended-remote report it instead of a SIGSTOP
3934 (e.g. gdbserver). We already rely on SIGTRAP being our
3935 signal, so this is no exception.
3936
3937 Also consider that the attach is complete when we see a
3938 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3939 the target to stop all threads of the inferior, in case the
3940 low level attach operation doesn't stop them implicitly. If
3941 they weren't stopped implicitly, then the stub will report a
3942 GDB_SIGNAL_0, meaning: stopped for no particular reason
3943 other than GDB's request. */
3944 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3945 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
3946 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3947 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
3948 {
3949 stop_print_frame = 1;
3950 stop_waiting (ecs);
3951 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3952 return;
3953 }
3954
3955 /* See if something interesting happened to the non-current thread. If
3956 so, then switch to that thread. */
3957 if (!ptid_equal (ecs->ptid, inferior_ptid))
3958 {
3959 if (debug_infrun)
3960 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3961
3962 context_switch (ecs->ptid);
3963
3964 if (deprecated_context_hook)
3965 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3966 }
3967
3968 /* At this point, get hold of the now-current thread's frame. */
3969 frame = get_current_frame ();
3970 gdbarch = get_frame_arch (frame);
3971
3972 /* Pull the single step breakpoints out of the target. */
3973 if (singlestep_breakpoints_inserted_p)
3974 {
3975 /* However, before doing so, if this single-step breakpoint was
3976 actually for another thread, set this thread up for moving
3977 past it. */
3978 if (!ptid_equal (ecs->ptid, singlestep_ptid)
3979 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3980 {
3981 struct regcache *regcache;
3982 struct address_space *aspace;
3983 CORE_ADDR pc;
3984
3985 regcache = get_thread_regcache (ecs->ptid);
3986 aspace = get_regcache_aspace (regcache);
3987 pc = regcache_read_pc (regcache);
3988 if (single_step_breakpoint_inserted_here_p (aspace, pc))
3989 {
3990 if (debug_infrun)
3991 {
3992 fprintf_unfiltered (gdb_stdlog,
3993 "infrun: [%s] hit step over single-step"
3994 " breakpoint of [%s]\n",
3995 target_pid_to_str (ecs->ptid),
3996 target_pid_to_str (singlestep_ptid));
3997 }
3998 ecs->hit_singlestep_breakpoint = 1;
3999 }
4000 }
4001
4002 remove_single_step_breakpoints ();
4003 singlestep_breakpoints_inserted_p = 0;
4004 }
4005
4006 if (ecs->stepped_after_stopped_by_watchpoint)
4007 stopped_by_watchpoint = 0;
4008 else
4009 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4010
4011 /* If necessary, step over this watchpoint. We'll be back to display
4012 it in a moment. */
4013 if (stopped_by_watchpoint
4014 && (target_have_steppable_watchpoint
4015 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4016 {
4017 /* At this point, we are stopped at an instruction which has
4018 attempted to write to a piece of memory under control of
4019 a watchpoint. The instruction hasn't actually executed
4020 yet. If we were to evaluate the watchpoint expression
4021 now, we would get the old value, and therefore no change
4022 would seem to have occurred.
4023
4024 In order to make watchpoints work `right', we really need
4025 to complete the memory write, and then evaluate the
4026 watchpoint expression. We do this by single-stepping the
4027 target.
4028
4029 It may not be necessary to disable the watchpoint to stop over
4030 it. For example, the PA can (with some kernel cooperation)
4031 single step over a watchpoint without disabling the watchpoint.
4032
4033 It is far more common to need to disable a watchpoint to step
4034 the inferior over it. If we have non-steppable watchpoints,
4035 we must disable the current watchpoint; it's simplest to
4036 disable all watchpoints and breakpoints. */
4037 int hw_step = 1;
4038
4039 if (!target_have_steppable_watchpoint)
4040 {
4041 remove_breakpoints ();
4042 /* See comment in resume why we need to stop bypassing signals
4043 while breakpoints have been removed. */
4044 target_pass_signals (0, NULL);
4045 }
4046 /* Single step */
4047 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4048 target_resume (ecs->ptid, hw_step, GDB_SIGNAL_0);
4049 waiton_ptid = ecs->ptid;
4050 if (target_have_steppable_watchpoint)
4051 infwait_state = infwait_step_watch_state;
4052 else
4053 infwait_state = infwait_nonstep_watch_state;
4054 prepare_to_wait (ecs);
4055 return;
4056 }
4057
4058 ecs->event_thread->stepping_over_breakpoint = 0;
4059 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4060 ecs->event_thread->control.stop_step = 0;
4061 stop_print_frame = 1;
4062 stopped_by_random_signal = 0;
4063
4064 /* Hide inlined functions starting here, unless we just performed stepi or
4065 nexti. After stepi and nexti, always show the innermost frame (not any
4066 inline function call sites). */
4067 if (ecs->event_thread->control.step_range_end != 1)
4068 {
4069 struct address_space *aspace =
4070 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4071
4072 /* skip_inline_frames is expensive, so we avoid it if we can
4073 determine that the address is one where functions cannot have
4074 been inlined. This improves performance with inferiors that
4075 load a lot of shared libraries, because the solib event
4076 breakpoint is defined as the address of a function (i.e. not
4077 inline). Note that we have to check the previous PC as well
4078 as the current one to catch cases when we have just
4079 single-stepped off a breakpoint prior to reinstating it.
4080 Note that we're assuming that the code we single-step to is
4081 not inline, but that's not definitive: there's nothing
4082 preventing the event breakpoint function from containing
4083 inlined code, and the single-step ending up there. If the
4084 user had set a breakpoint on that inlined code, the missing
4085 skip_inline_frames call would break things. Fortunately
4086 that's an extremely unlikely scenario. */
4087 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4088 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4089 && ecs->event_thread->control.trap_expected
4090 && pc_at_non_inline_function (aspace,
4091 ecs->event_thread->prev_pc,
4092 &ecs->ws)))
4093 {
4094 skip_inline_frames (ecs->ptid);
4095
4096 /* Re-fetch current thread's frame in case that invalidated
4097 the frame cache. */
4098 frame = get_current_frame ();
4099 gdbarch = get_frame_arch (frame);
4100 }
4101 }
4102
4103 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4104 && ecs->event_thread->control.trap_expected
4105 && gdbarch_single_step_through_delay_p (gdbarch)
4106 && currently_stepping (ecs->event_thread))
4107 {
4108 /* We're trying to step off a breakpoint. Turns out that we're
4109 also on an instruction that needs to be stepped multiple
4110 times before it's been fully executing. E.g., architectures
4111 with a delay slot. It needs to be stepped twice, once for
4112 the instruction and once for the delay slot. */
4113 int step_through_delay
4114 = gdbarch_single_step_through_delay (gdbarch, frame);
4115
4116 if (debug_infrun && step_through_delay)
4117 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4118 if (ecs->event_thread->control.step_range_end == 0
4119 && step_through_delay)
4120 {
4121 /* The user issued a continue when stopped at a breakpoint.
4122 Set up for another trap and get out of here. */
4123 ecs->event_thread->stepping_over_breakpoint = 1;
4124 keep_going (ecs);
4125 return;
4126 }
4127 else if (step_through_delay)
4128 {
4129 /* The user issued a step when stopped at a breakpoint.
4130 Maybe we should stop, maybe we should not - the delay
4131 slot *might* correspond to a line of source. In any
4132 case, don't decide that here, just set
4133 ecs->stepping_over_breakpoint, making sure we
4134 single-step again before breakpoints are re-inserted. */
4135 ecs->event_thread->stepping_over_breakpoint = 1;
4136 }
4137 }
4138
4139 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4140 handles this event. */
4141 ecs->event_thread->control.stop_bpstat
4142 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4143 stop_pc, ecs->ptid, &ecs->ws);
4144
4145 /* Following in case break condition called a
4146 function. */
4147 stop_print_frame = 1;
4148
4149 /* This is where we handle "moribund" watchpoints. Unlike
4150 software breakpoints traps, hardware watchpoint traps are
4151 always distinguishable from random traps. If no high-level
4152 watchpoint is associated with the reported stop data address
4153 anymore, then the bpstat does not explain the signal ---
4154 simply make sure to ignore it if `stopped_by_watchpoint' is
4155 set. */
4156
4157 if (debug_infrun
4158 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4159 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4160 GDB_SIGNAL_TRAP)
4161 && stopped_by_watchpoint)
4162 fprintf_unfiltered (gdb_stdlog,
4163 "infrun: no user watchpoint explains "
4164 "watchpoint SIGTRAP, ignoring\n");
4165
4166 /* NOTE: cagney/2003-03-29: These checks for a random signal
4167 at one stage in the past included checks for an inferior
4168 function call's call dummy's return breakpoint. The original
4169 comment, that went with the test, read:
4170
4171 ``End of a stack dummy. Some systems (e.g. Sony news) give
4172 another signal besides SIGTRAP, so check here as well as
4173 above.''
4174
4175 If someone ever tries to get call dummys on a
4176 non-executable stack to work (where the target would stop
4177 with something like a SIGSEGV), then those tests might need
4178 to be re-instated. Given, however, that the tests were only
4179 enabled when momentary breakpoints were not being used, I
4180 suspect that it won't be the case.
4181
4182 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4183 be necessary for call dummies on a non-executable stack on
4184 SPARC. */
4185
4186 /* See if the breakpoints module can explain the signal. */
4187 random_signal
4188 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4189 ecs->event_thread->suspend.stop_signal);
4190
4191 /* If not, perhaps stepping/nexting can. */
4192 if (random_signal)
4193 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4194 && currently_stepping (ecs->event_thread));
4195
4196 /* Perhaps the thread hit a single-step breakpoint of _another_
4197 thread. Single-step breakpoints are transparent to the
4198 breakpoints module. */
4199 if (random_signal)
4200 random_signal = !ecs->hit_singlestep_breakpoint;
4201
4202 /* No? Perhaps we got a moribund watchpoint. */
4203 if (random_signal)
4204 random_signal = !stopped_by_watchpoint;
4205
4206 /* For the program's own signals, act according to
4207 the signal handling tables. */
4208
4209 if (random_signal)
4210 {
4211 /* Signal not for debugging purposes. */
4212 int printed = 0;
4213 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4214 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4215
4216 if (debug_infrun)
4217 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4218 gdb_signal_to_symbol_string (stop_signal));
4219
4220 stopped_by_random_signal = 1;
4221
4222 if (signal_print[ecs->event_thread->suspend.stop_signal])
4223 {
4224 /* The signal table tells us to print about this signal. */
4225 printed = 1;
4226 target_terminal_ours_for_output ();
4227 observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
4228 }
4229 /* Always stop on signals if we're either just gaining control
4230 of the program, or the user explicitly requested this thread
4231 to remain stopped. */
4232 if (stop_soon != NO_STOP_QUIETLY
4233 || ecs->event_thread->stop_requested
4234 || (!inf->detaching
4235 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4236 {
4237 stop_waiting (ecs);
4238 return;
4239 }
4240 /* If not going to stop, give terminal back
4241 if we took it away. */
4242 else if (printed)
4243 target_terminal_inferior ();
4244
4245 /* Clear the signal if it should not be passed. */
4246 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4247 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4248
4249 if (ecs->event_thread->prev_pc == stop_pc
4250 && ecs->event_thread->control.trap_expected
4251 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4252 {
4253 /* We were just starting a new sequence, attempting to
4254 single-step off of a breakpoint and expecting a SIGTRAP.
4255 Instead this signal arrives. This signal will take us out
4256 of the stepping range so GDB needs to remember to, when
4257 the signal handler returns, resume stepping off that
4258 breakpoint. */
4259 /* To simplify things, "continue" is forced to use the same
4260 code paths as single-step - set a breakpoint at the
4261 signal return address and then, once hit, step off that
4262 breakpoint. */
4263 if (debug_infrun)
4264 fprintf_unfiltered (gdb_stdlog,
4265 "infrun: signal arrived while stepping over "
4266 "breakpoint\n");
4267
4268 insert_hp_step_resume_breakpoint_at_frame (frame);
4269 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4270 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4271 ecs->event_thread->control.trap_expected = 0;
4272
4273 /* If we were nexting/stepping some other thread, switch to
4274 it, so that we don't continue it, losing control. */
4275 if (!switch_back_to_stepped_thread (ecs))
4276 keep_going (ecs);
4277 return;
4278 }
4279
4280 if (ecs->event_thread->control.step_range_end != 0
4281 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4282 && pc_in_thread_step_range (stop_pc, ecs->event_thread)
4283 && frame_id_eq (get_stack_frame_id (frame),
4284 ecs->event_thread->control.step_stack_frame_id)
4285 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4286 {
4287 /* The inferior is about to take a signal that will take it
4288 out of the single step range. Set a breakpoint at the
4289 current PC (which is presumably where the signal handler
4290 will eventually return) and then allow the inferior to
4291 run free.
4292
4293 Note that this is only needed for a signal delivered
4294 while in the single-step range. Nested signals aren't a
4295 problem as they eventually all return. */
4296 if (debug_infrun)
4297 fprintf_unfiltered (gdb_stdlog,
4298 "infrun: signal may take us out of "
4299 "single-step range\n");
4300
4301 insert_hp_step_resume_breakpoint_at_frame (frame);
4302 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4303 ecs->event_thread->control.trap_expected = 0;
4304 keep_going (ecs);
4305 return;
4306 }
4307
4308 /* Note: step_resume_breakpoint may be non-NULL. This occures
4309 when either there's a nested signal, or when there's a
4310 pending signal enabled just as the signal handler returns
4311 (leaving the inferior at the step-resume-breakpoint without
4312 actually executing it). Either way continue until the
4313 breakpoint is really hit. */
4314
4315 if (!switch_back_to_stepped_thread (ecs))
4316 {
4317 if (debug_infrun)
4318 fprintf_unfiltered (gdb_stdlog,
4319 "infrun: random signal, keep going\n");
4320
4321 keep_going (ecs);
4322 }
4323 return;
4324 }
4325
4326 process_event_stop_test (ecs);
4327 }
4328
4329 /* Come here when we've got some debug event / signal we can explain
4330 (IOW, not a random signal), and test whether it should cause a
4331 stop, or whether we should resume the inferior (transparently).
4332 E.g., could be a breakpoint whose condition evaluates false; we
4333 could be still stepping within the line; etc. */
4334
4335 static void
4336 process_event_stop_test (struct execution_control_state *ecs)
4337 {
4338 struct symtab_and_line stop_pc_sal;
4339 struct frame_info *frame;
4340 struct gdbarch *gdbarch;
4341 CORE_ADDR jmp_buf_pc;
4342 struct bpstat_what what;
4343
4344 /* Handle cases caused by hitting a breakpoint. */
4345
4346 frame = get_current_frame ();
4347 gdbarch = get_frame_arch (frame);
4348
4349 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4350
4351 if (what.call_dummy)
4352 {
4353 stop_stack_dummy = what.call_dummy;
4354 }
4355
4356 /* If we hit an internal event that triggers symbol changes, the
4357 current frame will be invalidated within bpstat_what (e.g., if we
4358 hit an internal solib event). Re-fetch it. */
4359 frame = get_current_frame ();
4360 gdbarch = get_frame_arch (frame);
4361
4362 switch (what.main_action)
4363 {
4364 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4365 /* If we hit the breakpoint at longjmp while stepping, we
4366 install a momentary breakpoint at the target of the
4367 jmp_buf. */
4368
4369 if (debug_infrun)
4370 fprintf_unfiltered (gdb_stdlog,
4371 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4372
4373 ecs->event_thread->stepping_over_breakpoint = 1;
4374
4375 if (what.is_longjmp)
4376 {
4377 struct value *arg_value;
4378
4379 /* If we set the longjmp breakpoint via a SystemTap probe,
4380 then use it to extract the arguments. The destination PC
4381 is the third argument to the probe. */
4382 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4383 if (arg_value)
4384 jmp_buf_pc = value_as_address (arg_value);
4385 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4386 || !gdbarch_get_longjmp_target (gdbarch,
4387 frame, &jmp_buf_pc))
4388 {
4389 if (debug_infrun)
4390 fprintf_unfiltered (gdb_stdlog,
4391 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4392 "(!gdbarch_get_longjmp_target)\n");
4393 keep_going (ecs);
4394 return;
4395 }
4396
4397 /* Insert a breakpoint at resume address. */
4398 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4399 }
4400 else
4401 check_exception_resume (ecs, frame);
4402 keep_going (ecs);
4403 return;
4404
4405 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4406 {
4407 struct frame_info *init_frame;
4408
4409 /* There are several cases to consider.
4410
4411 1. The initiating frame no longer exists. In this case we
4412 must stop, because the exception or longjmp has gone too
4413 far.
4414
4415 2. The initiating frame exists, and is the same as the
4416 current frame. We stop, because the exception or longjmp
4417 has been caught.
4418
4419 3. The initiating frame exists and is different from the
4420 current frame. This means the exception or longjmp has
4421 been caught beneath the initiating frame, so keep going.
4422
4423 4. longjmp breakpoint has been placed just to protect
4424 against stale dummy frames and user is not interested in
4425 stopping around longjmps. */
4426
4427 if (debug_infrun)
4428 fprintf_unfiltered (gdb_stdlog,
4429 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4430
4431 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4432 != NULL);
4433 delete_exception_resume_breakpoint (ecs->event_thread);
4434
4435 if (what.is_longjmp)
4436 {
4437 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread->num);
4438
4439 if (!frame_id_p (ecs->event_thread->initiating_frame))
4440 {
4441 /* Case 4. */
4442 keep_going (ecs);
4443 return;
4444 }
4445 }
4446
4447 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4448
4449 if (init_frame)
4450 {
4451 struct frame_id current_id
4452 = get_frame_id (get_current_frame ());
4453 if (frame_id_eq (current_id,
4454 ecs->event_thread->initiating_frame))
4455 {
4456 /* Case 2. Fall through. */
4457 }
4458 else
4459 {
4460 /* Case 3. */
4461 keep_going (ecs);
4462 return;
4463 }
4464 }
4465
4466 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4467 exists. */
4468 delete_step_resume_breakpoint (ecs->event_thread);
4469
4470 end_stepping_range (ecs);
4471 }
4472 return;
4473
4474 case BPSTAT_WHAT_SINGLE:
4475 if (debug_infrun)
4476 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4477 ecs->event_thread->stepping_over_breakpoint = 1;
4478 /* Still need to check other stuff, at least the case where we
4479 are stepping and step out of the right range. */
4480 break;
4481
4482 case BPSTAT_WHAT_STEP_RESUME:
4483 if (debug_infrun)
4484 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4485
4486 delete_step_resume_breakpoint (ecs->event_thread);
4487 if (ecs->event_thread->control.proceed_to_finish
4488 && execution_direction == EXEC_REVERSE)
4489 {
4490 struct thread_info *tp = ecs->event_thread;
4491
4492 /* We are finishing a function in reverse, and just hit the
4493 step-resume breakpoint at the start address of the
4494 function, and we're almost there -- just need to back up
4495 by one more single-step, which should take us back to the
4496 function call. */
4497 tp->control.step_range_start = tp->control.step_range_end = 1;
4498 keep_going (ecs);
4499 return;
4500 }
4501 fill_in_stop_func (gdbarch, ecs);
4502 if (stop_pc == ecs->stop_func_start
4503 && execution_direction == EXEC_REVERSE)
4504 {
4505 /* We are stepping over a function call in reverse, and just
4506 hit the step-resume breakpoint at the start address of
4507 the function. Go back to single-stepping, which should
4508 take us back to the function call. */
4509 ecs->event_thread->stepping_over_breakpoint = 1;
4510 keep_going (ecs);
4511 return;
4512 }
4513 break;
4514
4515 case BPSTAT_WHAT_STOP_NOISY:
4516 if (debug_infrun)
4517 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4518 stop_print_frame = 1;
4519
4520 /* Assume the thread stopped for a breapoint. We'll still check
4521 whether a/the breakpoint is there when the thread is next
4522 resumed. */
4523 ecs->event_thread->stepping_over_breakpoint = 1;
4524
4525 stop_waiting (ecs);
4526 return;
4527
4528 case BPSTAT_WHAT_STOP_SILENT:
4529 if (debug_infrun)
4530 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4531 stop_print_frame = 0;
4532
4533 /* Assume the thread stopped for a breapoint. We'll still check
4534 whether a/the breakpoint is there when the thread is next
4535 resumed. */
4536 ecs->event_thread->stepping_over_breakpoint = 1;
4537 stop_waiting (ecs);
4538 return;
4539
4540 case BPSTAT_WHAT_HP_STEP_RESUME:
4541 if (debug_infrun)
4542 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4543
4544 delete_step_resume_breakpoint (ecs->event_thread);
4545 if (ecs->event_thread->step_after_step_resume_breakpoint)
4546 {
4547 /* Back when the step-resume breakpoint was inserted, we
4548 were trying to single-step off a breakpoint. Go back to
4549 doing that. */
4550 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4551 ecs->event_thread->stepping_over_breakpoint = 1;
4552 keep_going (ecs);
4553 return;
4554 }
4555 break;
4556
4557 case BPSTAT_WHAT_KEEP_CHECKING:
4558 break;
4559 }
4560
4561 /* We come here if we hit a breakpoint but should not stop for it.
4562 Possibly we also were stepping and should stop for that. So fall
4563 through and test for stepping. But, if not stepping, do not
4564 stop. */
4565
4566 /* In all-stop mode, if we're currently stepping but have stopped in
4567 some other thread, we need to switch back to the stepped thread. */
4568 if (switch_back_to_stepped_thread (ecs))
4569 return;
4570
4571 if (ecs->event_thread->control.step_resume_breakpoint)
4572 {
4573 if (debug_infrun)
4574 fprintf_unfiltered (gdb_stdlog,
4575 "infrun: step-resume breakpoint is inserted\n");
4576
4577 /* Having a step-resume breakpoint overrides anything
4578 else having to do with stepping commands until
4579 that breakpoint is reached. */
4580 keep_going (ecs);
4581 return;
4582 }
4583
4584 if (ecs->event_thread->control.step_range_end == 0)
4585 {
4586 if (debug_infrun)
4587 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4588 /* Likewise if we aren't even stepping. */
4589 keep_going (ecs);
4590 return;
4591 }
4592
4593 /* Re-fetch current thread's frame in case the code above caused
4594 the frame cache to be re-initialized, making our FRAME variable
4595 a dangling pointer. */
4596 frame = get_current_frame ();
4597 gdbarch = get_frame_arch (frame);
4598 fill_in_stop_func (gdbarch, ecs);
4599
4600 /* If stepping through a line, keep going if still within it.
4601
4602 Note that step_range_end is the address of the first instruction
4603 beyond the step range, and NOT the address of the last instruction
4604 within it!
4605
4606 Note also that during reverse execution, we may be stepping
4607 through a function epilogue and therefore must detect when
4608 the current-frame changes in the middle of a line. */
4609
4610 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4611 && (execution_direction != EXEC_REVERSE
4612 || frame_id_eq (get_frame_id (frame),
4613 ecs->event_thread->control.step_frame_id)))
4614 {
4615 if (debug_infrun)
4616 fprintf_unfiltered
4617 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4618 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4619 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4620
4621 /* Tentatively re-enable range stepping; `resume' disables it if
4622 necessary (e.g., if we're stepping over a breakpoint or we
4623 have software watchpoints). */
4624 ecs->event_thread->control.may_range_step = 1;
4625
4626 /* When stepping backward, stop at beginning of line range
4627 (unless it's the function entry point, in which case
4628 keep going back to the call point). */
4629 if (stop_pc == ecs->event_thread->control.step_range_start
4630 && stop_pc != ecs->stop_func_start
4631 && execution_direction == EXEC_REVERSE)
4632 end_stepping_range (ecs);
4633 else
4634 keep_going (ecs);
4635
4636 return;
4637 }
4638
4639 /* We stepped out of the stepping range. */
4640
4641 /* If we are stepping at the source level and entered the runtime
4642 loader dynamic symbol resolution code...
4643
4644 EXEC_FORWARD: we keep on single stepping until we exit the run
4645 time loader code and reach the callee's address.
4646
4647 EXEC_REVERSE: we've already executed the callee (backward), and
4648 the runtime loader code is handled just like any other
4649 undebuggable function call. Now we need only keep stepping
4650 backward through the trampoline code, and that's handled further
4651 down, so there is nothing for us to do here. */
4652
4653 if (execution_direction != EXEC_REVERSE
4654 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4655 && in_solib_dynsym_resolve_code (stop_pc))
4656 {
4657 CORE_ADDR pc_after_resolver =
4658 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4659
4660 if (debug_infrun)
4661 fprintf_unfiltered (gdb_stdlog,
4662 "infrun: stepped into dynsym resolve code\n");
4663
4664 if (pc_after_resolver)
4665 {
4666 /* Set up a step-resume breakpoint at the address
4667 indicated by SKIP_SOLIB_RESOLVER. */
4668 struct symtab_and_line sr_sal;
4669
4670 init_sal (&sr_sal);
4671 sr_sal.pc = pc_after_resolver;
4672 sr_sal.pspace = get_frame_program_space (frame);
4673
4674 insert_step_resume_breakpoint_at_sal (gdbarch,
4675 sr_sal, null_frame_id);
4676 }
4677
4678 keep_going (ecs);
4679 return;
4680 }
4681
4682 if (ecs->event_thread->control.step_range_end != 1
4683 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4684 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4685 && get_frame_type (frame) == SIGTRAMP_FRAME)
4686 {
4687 if (debug_infrun)
4688 fprintf_unfiltered (gdb_stdlog,
4689 "infrun: stepped into signal trampoline\n");
4690 /* The inferior, while doing a "step" or "next", has ended up in
4691 a signal trampoline (either by a signal being delivered or by
4692 the signal handler returning). Just single-step until the
4693 inferior leaves the trampoline (either by calling the handler
4694 or returning). */
4695 keep_going (ecs);
4696 return;
4697 }
4698
4699 /* If we're in the return path from a shared library trampoline,
4700 we want to proceed through the trampoline when stepping. */
4701 /* macro/2012-04-25: This needs to come before the subroutine
4702 call check below as on some targets return trampolines look
4703 like subroutine calls (MIPS16 return thunks). */
4704 if (gdbarch_in_solib_return_trampoline (gdbarch,
4705 stop_pc, ecs->stop_func_name)
4706 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4707 {
4708 /* Determine where this trampoline returns. */
4709 CORE_ADDR real_stop_pc;
4710
4711 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4712
4713 if (debug_infrun)
4714 fprintf_unfiltered (gdb_stdlog,
4715 "infrun: stepped into solib return tramp\n");
4716
4717 /* Only proceed through if we know where it's going. */
4718 if (real_stop_pc)
4719 {
4720 /* And put the step-breakpoint there and go until there. */
4721 struct symtab_and_line sr_sal;
4722
4723 init_sal (&sr_sal); /* initialize to zeroes */
4724 sr_sal.pc = real_stop_pc;
4725 sr_sal.section = find_pc_overlay (sr_sal.pc);
4726 sr_sal.pspace = get_frame_program_space (frame);
4727
4728 /* Do not specify what the fp should be when we stop since
4729 on some machines the prologue is where the new fp value
4730 is established. */
4731 insert_step_resume_breakpoint_at_sal (gdbarch,
4732 sr_sal, null_frame_id);
4733
4734 /* Restart without fiddling with the step ranges or
4735 other state. */
4736 keep_going (ecs);
4737 return;
4738 }
4739 }
4740
4741 /* Check for subroutine calls. The check for the current frame
4742 equalling the step ID is not necessary - the check of the
4743 previous frame's ID is sufficient - but it is a common case and
4744 cheaper than checking the previous frame's ID.
4745
4746 NOTE: frame_id_eq will never report two invalid frame IDs as
4747 being equal, so to get into this block, both the current and
4748 previous frame must have valid frame IDs. */
4749 /* The outer_frame_id check is a heuristic to detect stepping
4750 through startup code. If we step over an instruction which
4751 sets the stack pointer from an invalid value to a valid value,
4752 we may detect that as a subroutine call from the mythical
4753 "outermost" function. This could be fixed by marking
4754 outermost frames as !stack_p,code_p,special_p. Then the
4755 initial outermost frame, before sp was valid, would
4756 have code_addr == &_start. See the comment in frame_id_eq
4757 for more. */
4758 if (!frame_id_eq (get_stack_frame_id (frame),
4759 ecs->event_thread->control.step_stack_frame_id)
4760 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4761 ecs->event_thread->control.step_stack_frame_id)
4762 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4763 outer_frame_id)
4764 || step_start_function != find_pc_function (stop_pc))))
4765 {
4766 CORE_ADDR real_stop_pc;
4767
4768 if (debug_infrun)
4769 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4770
4771 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4772 || ((ecs->event_thread->control.step_range_end == 1)
4773 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4774 ecs->stop_func_start)))
4775 {
4776 /* I presume that step_over_calls is only 0 when we're
4777 supposed to be stepping at the assembly language level
4778 ("stepi"). Just stop. */
4779 /* Also, maybe we just did a "nexti" inside a prolog, so we
4780 thought it was a subroutine call but it was not. Stop as
4781 well. FENN */
4782 /* And this works the same backward as frontward. MVS */
4783 end_stepping_range (ecs);
4784 return;
4785 }
4786
4787 /* Reverse stepping through solib trampolines. */
4788
4789 if (execution_direction == EXEC_REVERSE
4790 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4791 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4792 || (ecs->stop_func_start == 0
4793 && in_solib_dynsym_resolve_code (stop_pc))))
4794 {
4795 /* Any solib trampoline code can be handled in reverse
4796 by simply continuing to single-step. We have already
4797 executed the solib function (backwards), and a few
4798 steps will take us back through the trampoline to the
4799 caller. */
4800 keep_going (ecs);
4801 return;
4802 }
4803
4804 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4805 {
4806 /* We're doing a "next".
4807
4808 Normal (forward) execution: set a breakpoint at the
4809 callee's return address (the address at which the caller
4810 will resume).
4811
4812 Reverse (backward) execution. set the step-resume
4813 breakpoint at the start of the function that we just
4814 stepped into (backwards), and continue to there. When we
4815 get there, we'll need to single-step back to the caller. */
4816
4817 if (execution_direction == EXEC_REVERSE)
4818 {
4819 /* If we're already at the start of the function, we've either
4820 just stepped backward into a single instruction function,
4821 or stepped back out of a signal handler to the first instruction
4822 of the function. Just keep going, which will single-step back
4823 to the caller. */
4824 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
4825 {
4826 struct symtab_and_line sr_sal;
4827
4828 /* Normal function call return (static or dynamic). */
4829 init_sal (&sr_sal);
4830 sr_sal.pc = ecs->stop_func_start;
4831 sr_sal.pspace = get_frame_program_space (frame);
4832 insert_step_resume_breakpoint_at_sal (gdbarch,
4833 sr_sal, null_frame_id);
4834 }
4835 }
4836 else
4837 insert_step_resume_breakpoint_at_caller (frame);
4838
4839 keep_going (ecs);
4840 return;
4841 }
4842
4843 /* If we are in a function call trampoline (a stub between the
4844 calling routine and the real function), locate the real
4845 function. That's what tells us (a) whether we want to step
4846 into it at all, and (b) what prologue we want to run to the
4847 end of, if we do step into it. */
4848 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4849 if (real_stop_pc == 0)
4850 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4851 if (real_stop_pc != 0)
4852 ecs->stop_func_start = real_stop_pc;
4853
4854 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4855 {
4856 struct symtab_and_line sr_sal;
4857
4858 init_sal (&sr_sal);
4859 sr_sal.pc = ecs->stop_func_start;
4860 sr_sal.pspace = get_frame_program_space (frame);
4861
4862 insert_step_resume_breakpoint_at_sal (gdbarch,
4863 sr_sal, null_frame_id);
4864 keep_going (ecs);
4865 return;
4866 }
4867
4868 /* If we have line number information for the function we are
4869 thinking of stepping into and the function isn't on the skip
4870 list, step into it.
4871
4872 If there are several symtabs at that PC (e.g. with include
4873 files), just want to know whether *any* of them have line
4874 numbers. find_pc_line handles this. */
4875 {
4876 struct symtab_and_line tmp_sal;
4877
4878 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4879 if (tmp_sal.line != 0
4880 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4881 &tmp_sal))
4882 {
4883 if (execution_direction == EXEC_REVERSE)
4884 handle_step_into_function_backward (gdbarch, ecs);
4885 else
4886 handle_step_into_function (gdbarch, ecs);
4887 return;
4888 }
4889 }
4890
4891 /* If we have no line number and the step-stop-if-no-debug is
4892 set, we stop the step so that the user has a chance to switch
4893 in assembly mode. */
4894 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4895 && step_stop_if_no_debug)
4896 {
4897 end_stepping_range (ecs);
4898 return;
4899 }
4900
4901 if (execution_direction == EXEC_REVERSE)
4902 {
4903 /* If we're already at the start of the function, we've either just
4904 stepped backward into a single instruction function without line
4905 number info, or stepped back out of a signal handler to the first
4906 instruction of the function without line number info. Just keep
4907 going, which will single-step back to the caller. */
4908 if (ecs->stop_func_start != stop_pc)
4909 {
4910 /* Set a breakpoint at callee's start address.
4911 From there we can step once and be back in the caller. */
4912 struct symtab_and_line sr_sal;
4913
4914 init_sal (&sr_sal);
4915 sr_sal.pc = ecs->stop_func_start;
4916 sr_sal.pspace = get_frame_program_space (frame);
4917 insert_step_resume_breakpoint_at_sal (gdbarch,
4918 sr_sal, null_frame_id);
4919 }
4920 }
4921 else
4922 /* Set a breakpoint at callee's return address (the address
4923 at which the caller will resume). */
4924 insert_step_resume_breakpoint_at_caller (frame);
4925
4926 keep_going (ecs);
4927 return;
4928 }
4929
4930 /* Reverse stepping through solib trampolines. */
4931
4932 if (execution_direction == EXEC_REVERSE
4933 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4934 {
4935 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4936 || (ecs->stop_func_start == 0
4937 && in_solib_dynsym_resolve_code (stop_pc)))
4938 {
4939 /* Any solib trampoline code can be handled in reverse
4940 by simply continuing to single-step. We have already
4941 executed the solib function (backwards), and a few
4942 steps will take us back through the trampoline to the
4943 caller. */
4944 keep_going (ecs);
4945 return;
4946 }
4947 else if (in_solib_dynsym_resolve_code (stop_pc))
4948 {
4949 /* Stepped backward into the solib dynsym resolver.
4950 Set a breakpoint at its start and continue, then
4951 one more step will take us out. */
4952 struct symtab_and_line sr_sal;
4953
4954 init_sal (&sr_sal);
4955 sr_sal.pc = ecs->stop_func_start;
4956 sr_sal.pspace = get_frame_program_space (frame);
4957 insert_step_resume_breakpoint_at_sal (gdbarch,
4958 sr_sal, null_frame_id);
4959 keep_going (ecs);
4960 return;
4961 }
4962 }
4963
4964 stop_pc_sal = find_pc_line (stop_pc, 0);
4965
4966 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4967 the trampoline processing logic, however, there are some trampolines
4968 that have no names, so we should do trampoline handling first. */
4969 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4970 && ecs->stop_func_name == NULL
4971 && stop_pc_sal.line == 0)
4972 {
4973 if (debug_infrun)
4974 fprintf_unfiltered (gdb_stdlog,
4975 "infrun: stepped into undebuggable function\n");
4976
4977 /* The inferior just stepped into, or returned to, an
4978 undebuggable function (where there is no debugging information
4979 and no line number corresponding to the address where the
4980 inferior stopped). Since we want to skip this kind of code,
4981 we keep going until the inferior returns from this
4982 function - unless the user has asked us not to (via
4983 set step-mode) or we no longer know how to get back
4984 to the call site. */
4985 if (step_stop_if_no_debug
4986 || !frame_id_p (frame_unwind_caller_id (frame)))
4987 {
4988 /* If we have no line number and the step-stop-if-no-debug
4989 is set, we stop the step so that the user has a chance to
4990 switch in assembly mode. */
4991 end_stepping_range (ecs);
4992 return;
4993 }
4994 else
4995 {
4996 /* Set a breakpoint at callee's return address (the address
4997 at which the caller will resume). */
4998 insert_step_resume_breakpoint_at_caller (frame);
4999 keep_going (ecs);
5000 return;
5001 }
5002 }
5003
5004 if (ecs->event_thread->control.step_range_end == 1)
5005 {
5006 /* It is stepi or nexti. We always want to stop stepping after
5007 one instruction. */
5008 if (debug_infrun)
5009 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5010 end_stepping_range (ecs);
5011 return;
5012 }
5013
5014 if (stop_pc_sal.line == 0)
5015 {
5016 /* We have no line number information. That means to stop
5017 stepping (does this always happen right after one instruction,
5018 when we do "s" in a function with no line numbers,
5019 or can this happen as a result of a return or longjmp?). */
5020 if (debug_infrun)
5021 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5022 end_stepping_range (ecs);
5023 return;
5024 }
5025
5026 /* Look for "calls" to inlined functions, part one. If the inline
5027 frame machinery detected some skipped call sites, we have entered
5028 a new inline function. */
5029
5030 if (frame_id_eq (get_frame_id (get_current_frame ()),
5031 ecs->event_thread->control.step_frame_id)
5032 && inline_skipped_frames (ecs->ptid))
5033 {
5034 struct symtab_and_line call_sal;
5035
5036 if (debug_infrun)
5037 fprintf_unfiltered (gdb_stdlog,
5038 "infrun: stepped into inlined function\n");
5039
5040 find_frame_sal (get_current_frame (), &call_sal);
5041
5042 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5043 {
5044 /* For "step", we're going to stop. But if the call site
5045 for this inlined function is on the same source line as
5046 we were previously stepping, go down into the function
5047 first. Otherwise stop at the call site. */
5048
5049 if (call_sal.line == ecs->event_thread->current_line
5050 && call_sal.symtab == ecs->event_thread->current_symtab)
5051 step_into_inline_frame (ecs->ptid);
5052
5053 end_stepping_range (ecs);
5054 return;
5055 }
5056 else
5057 {
5058 /* For "next", we should stop at the call site if it is on a
5059 different source line. Otherwise continue through the
5060 inlined function. */
5061 if (call_sal.line == ecs->event_thread->current_line
5062 && call_sal.symtab == ecs->event_thread->current_symtab)
5063 keep_going (ecs);
5064 else
5065 end_stepping_range (ecs);
5066 return;
5067 }
5068 }
5069
5070 /* Look for "calls" to inlined functions, part two. If we are still
5071 in the same real function we were stepping through, but we have
5072 to go further up to find the exact frame ID, we are stepping
5073 through a more inlined call beyond its call site. */
5074
5075 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5076 && !frame_id_eq (get_frame_id (get_current_frame ()),
5077 ecs->event_thread->control.step_frame_id)
5078 && stepped_in_from (get_current_frame (),
5079 ecs->event_thread->control.step_frame_id))
5080 {
5081 if (debug_infrun)
5082 fprintf_unfiltered (gdb_stdlog,
5083 "infrun: stepping through inlined function\n");
5084
5085 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5086 keep_going (ecs);
5087 else
5088 end_stepping_range (ecs);
5089 return;
5090 }
5091
5092 if ((stop_pc == stop_pc_sal.pc)
5093 && (ecs->event_thread->current_line != stop_pc_sal.line
5094 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5095 {
5096 /* We are at the start of a different line. So stop. Note that
5097 we don't stop if we step into the middle of a different line.
5098 That is said to make things like for (;;) statements work
5099 better. */
5100 if (debug_infrun)
5101 fprintf_unfiltered (gdb_stdlog,
5102 "infrun: stepped to a different line\n");
5103 end_stepping_range (ecs);
5104 return;
5105 }
5106
5107 /* We aren't done stepping.
5108
5109 Optimize by setting the stepping range to the line.
5110 (We might not be in the original line, but if we entered a
5111 new line in mid-statement, we continue stepping. This makes
5112 things like for(;;) statements work better.) */
5113
5114 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5115 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5116 ecs->event_thread->control.may_range_step = 1;
5117 set_step_info (frame, stop_pc_sal);
5118
5119 if (debug_infrun)
5120 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5121 keep_going (ecs);
5122 }
5123
5124 /* In all-stop mode, if we're currently stepping but have stopped in
5125 some other thread, we may need to switch back to the stepped
5126 thread. Returns true we set the inferior running, false if we left
5127 it stopped (and the event needs further processing). */
5128
5129 static int
5130 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5131 {
5132 if (!non_stop)
5133 {
5134 struct thread_info *tp;
5135 struct thread_info *stepping_thread;
5136 struct thread_info *step_over;
5137
5138 /* If any thread is blocked on some internal breakpoint, and we
5139 simply need to step over that breakpoint to get it going
5140 again, do that first. */
5141
5142 /* However, if we see an event for the stepping thread, then we
5143 know all other threads have been moved past their breakpoints
5144 already. Let the caller check whether the step is finished,
5145 etc., before deciding to move it past a breakpoint. */
5146 if (ecs->event_thread->control.step_range_end != 0)
5147 return 0;
5148
5149 /* Check if the current thread is blocked on an incomplete
5150 step-over, interrupted by a random signal. */
5151 if (ecs->event_thread->control.trap_expected
5152 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5153 {
5154 if (debug_infrun)
5155 {
5156 fprintf_unfiltered (gdb_stdlog,
5157 "infrun: need to finish step-over of [%s]\n",
5158 target_pid_to_str (ecs->event_thread->ptid));
5159 }
5160 keep_going (ecs);
5161 return 1;
5162 }
5163
5164 /* Check if the current thread is blocked by a single-step
5165 breakpoint of another thread. */
5166 if (ecs->hit_singlestep_breakpoint)
5167 {
5168 if (debug_infrun)
5169 {
5170 fprintf_unfiltered (gdb_stdlog,
5171 "infrun: need to step [%s] over single-step "
5172 "breakpoint\n",
5173 target_pid_to_str (ecs->ptid));
5174 }
5175 keep_going (ecs);
5176 return 1;
5177 }
5178
5179 /* Otherwise, we no longer expect a trap in the current thread.
5180 Clear the trap_expected flag before switching back -- this is
5181 what keep_going does as well, if we call it. */
5182 ecs->event_thread->control.trap_expected = 0;
5183
5184 /* If scheduler locking applies even if not stepping, there's no
5185 need to walk over threads. Above we've checked whether the
5186 current thread is stepping. If some other thread not the
5187 event thread is stepping, then it must be that scheduler
5188 locking is not in effect. */
5189 if (schedlock_applies (0))
5190 return 0;
5191
5192 /* Look for the stepping/nexting thread, and check if any other
5193 thread other than the stepping thread needs to start a
5194 step-over. Do all step-overs before actually proceeding with
5195 step/next/etc. */
5196 stepping_thread = NULL;
5197 step_over = NULL;
5198 ALL_THREADS (tp)
5199 {
5200 /* Ignore threads of processes we're not resuming. */
5201 if (!sched_multi
5202 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
5203 continue;
5204
5205 /* When stepping over a breakpoint, we lock all threads
5206 except the one that needs to move past the breakpoint.
5207 If a non-event thread has this set, the "incomplete
5208 step-over" check above should have caught it earlier. */
5209 gdb_assert (!tp->control.trap_expected);
5210
5211 /* Did we find the stepping thread? */
5212 if (tp->control.step_range_end)
5213 {
5214 /* Yep. There should only one though. */
5215 gdb_assert (stepping_thread == NULL);
5216
5217 /* The event thread is handled at the top, before we
5218 enter this loop. */
5219 gdb_assert (tp != ecs->event_thread);
5220
5221 /* If some thread other than the event thread is
5222 stepping, then scheduler locking can't be in effect,
5223 otherwise we wouldn't have resumed the current event
5224 thread in the first place. */
5225 gdb_assert (!schedlock_applies (1));
5226
5227 stepping_thread = tp;
5228 }
5229 else if (thread_still_needs_step_over (tp))
5230 {
5231 step_over = tp;
5232
5233 /* At the top we've returned early if the event thread
5234 is stepping. If some other thread not the event
5235 thread is stepping, then scheduler locking can't be
5236 in effect, and we can resume this thread. No need to
5237 keep looking for the stepping thread then. */
5238 break;
5239 }
5240 }
5241
5242 if (step_over != NULL)
5243 {
5244 tp = step_over;
5245 if (debug_infrun)
5246 {
5247 fprintf_unfiltered (gdb_stdlog,
5248 "infrun: need to step-over [%s]\n",
5249 target_pid_to_str (tp->ptid));
5250 }
5251
5252 /* Only the stepping thread should have this set. */
5253 gdb_assert (tp->control.step_range_end == 0);
5254
5255 ecs->ptid = tp->ptid;
5256 ecs->event_thread = tp;
5257 switch_to_thread (ecs->ptid);
5258 keep_going (ecs);
5259 return 1;
5260 }
5261
5262 if (stepping_thread != NULL)
5263 {
5264 struct frame_info *frame;
5265 struct gdbarch *gdbarch;
5266
5267 tp = stepping_thread;
5268
5269 /* If the stepping thread exited, then don't try to switch
5270 back and resume it, which could fail in several different
5271 ways depending on the target. Instead, just keep going.
5272
5273 We can find a stepping dead thread in the thread list in
5274 two cases:
5275
5276 - The target supports thread exit events, and when the
5277 target tries to delete the thread from the thread list,
5278 inferior_ptid pointed at the exiting thread. In such
5279 case, calling delete_thread does not really remove the
5280 thread from the list; instead, the thread is left listed,
5281 with 'exited' state.
5282
5283 - The target's debug interface does not support thread
5284 exit events, and so we have no idea whatsoever if the
5285 previously stepping thread is still alive. For that
5286 reason, we need to synchronously query the target
5287 now. */
5288 if (is_exited (tp->ptid)
5289 || !target_thread_alive (tp->ptid))
5290 {
5291 if (debug_infrun)
5292 fprintf_unfiltered (gdb_stdlog,
5293 "infrun: not switching back to "
5294 "stepped thread, it has vanished\n");
5295
5296 delete_thread (tp->ptid);
5297 keep_going (ecs);
5298 return 1;
5299 }
5300
5301 if (debug_infrun)
5302 fprintf_unfiltered (gdb_stdlog,
5303 "infrun: switching back to stepped thread\n");
5304
5305 ecs->event_thread = tp;
5306 ecs->ptid = tp->ptid;
5307 context_switch (ecs->ptid);
5308
5309 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5310 frame = get_current_frame ();
5311 gdbarch = get_frame_arch (frame);
5312
5313 /* If the PC of the thread we were trying to single-step has
5314 changed, then that thread has trapped or been signaled,
5315 but the event has not been reported to GDB yet. Re-poll
5316 the target looking for this particular thread's event
5317 (i.e. temporarily enable schedlock) by:
5318
5319 - setting a break at the current PC
5320 - resuming that particular thread, only (by setting
5321 trap expected)
5322
5323 This prevents us continuously moving the single-step
5324 breakpoint forward, one instruction at a time,
5325 overstepping. */
5326
5327 if (gdbarch_software_single_step_p (gdbarch)
5328 && stop_pc != tp->prev_pc)
5329 {
5330 if (debug_infrun)
5331 fprintf_unfiltered (gdb_stdlog,
5332 "infrun: expected thread advanced also\n");
5333
5334 insert_single_step_breakpoint (get_frame_arch (frame),
5335 get_frame_address_space (frame),
5336 stop_pc);
5337 singlestep_breakpoints_inserted_p = 1;
5338 ecs->event_thread->control.trap_expected = 1;
5339 singlestep_ptid = inferior_ptid;
5340 singlestep_pc = stop_pc;
5341
5342 resume (0, GDB_SIGNAL_0);
5343 prepare_to_wait (ecs);
5344 }
5345 else
5346 {
5347 if (debug_infrun)
5348 fprintf_unfiltered (gdb_stdlog,
5349 "infrun: expected thread still "
5350 "hasn't advanced\n");
5351 keep_going (ecs);
5352 }
5353
5354 return 1;
5355 }
5356 }
5357 return 0;
5358 }
5359
5360 /* Is thread TP in the middle of single-stepping? */
5361
5362 static int
5363 currently_stepping (struct thread_info *tp)
5364 {
5365 return ((tp->control.step_range_end
5366 && tp->control.step_resume_breakpoint == NULL)
5367 || tp->control.trap_expected
5368 || bpstat_should_step ());
5369 }
5370
5371 /* Inferior has stepped into a subroutine call with source code that
5372 we should not step over. Do step to the first line of code in
5373 it. */
5374
5375 static void
5376 handle_step_into_function (struct gdbarch *gdbarch,
5377 struct execution_control_state *ecs)
5378 {
5379 struct symtab *s;
5380 struct symtab_and_line stop_func_sal, sr_sal;
5381
5382 fill_in_stop_func (gdbarch, ecs);
5383
5384 s = find_pc_symtab (stop_pc);
5385 if (s && s->language != language_asm)
5386 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5387 ecs->stop_func_start);
5388
5389 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5390 /* Use the step_resume_break to step until the end of the prologue,
5391 even if that involves jumps (as it seems to on the vax under
5392 4.2). */
5393 /* If the prologue ends in the middle of a source line, continue to
5394 the end of that source line (if it is still within the function).
5395 Otherwise, just go to end of prologue. */
5396 if (stop_func_sal.end
5397 && stop_func_sal.pc != ecs->stop_func_start
5398 && stop_func_sal.end < ecs->stop_func_end)
5399 ecs->stop_func_start = stop_func_sal.end;
5400
5401 /* Architectures which require breakpoint adjustment might not be able
5402 to place a breakpoint at the computed address. If so, the test
5403 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5404 ecs->stop_func_start to an address at which a breakpoint may be
5405 legitimately placed.
5406
5407 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5408 made, GDB will enter an infinite loop when stepping through
5409 optimized code consisting of VLIW instructions which contain
5410 subinstructions corresponding to different source lines. On
5411 FR-V, it's not permitted to place a breakpoint on any but the
5412 first subinstruction of a VLIW instruction. When a breakpoint is
5413 set, GDB will adjust the breakpoint address to the beginning of
5414 the VLIW instruction. Thus, we need to make the corresponding
5415 adjustment here when computing the stop address. */
5416
5417 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5418 {
5419 ecs->stop_func_start
5420 = gdbarch_adjust_breakpoint_address (gdbarch,
5421 ecs->stop_func_start);
5422 }
5423
5424 if (ecs->stop_func_start == stop_pc)
5425 {
5426 /* We are already there: stop now. */
5427 end_stepping_range (ecs);
5428 return;
5429 }
5430 else
5431 {
5432 /* Put the step-breakpoint there and go until there. */
5433 init_sal (&sr_sal); /* initialize to zeroes */
5434 sr_sal.pc = ecs->stop_func_start;
5435 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5436 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5437
5438 /* Do not specify what the fp should be when we stop since on
5439 some machines the prologue is where the new fp value is
5440 established. */
5441 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5442
5443 /* And make sure stepping stops right away then. */
5444 ecs->event_thread->control.step_range_end
5445 = ecs->event_thread->control.step_range_start;
5446 }
5447 keep_going (ecs);
5448 }
5449
5450 /* Inferior has stepped backward into a subroutine call with source
5451 code that we should not step over. Do step to the beginning of the
5452 last line of code in it. */
5453
5454 static void
5455 handle_step_into_function_backward (struct gdbarch *gdbarch,
5456 struct execution_control_state *ecs)
5457 {
5458 struct symtab *s;
5459 struct symtab_and_line stop_func_sal;
5460
5461 fill_in_stop_func (gdbarch, ecs);
5462
5463 s = find_pc_symtab (stop_pc);
5464 if (s && s->language != language_asm)
5465 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5466 ecs->stop_func_start);
5467
5468 stop_func_sal = find_pc_line (stop_pc, 0);
5469
5470 /* OK, we're just going to keep stepping here. */
5471 if (stop_func_sal.pc == stop_pc)
5472 {
5473 /* We're there already. Just stop stepping now. */
5474 end_stepping_range (ecs);
5475 }
5476 else
5477 {
5478 /* Else just reset the step range and keep going.
5479 No step-resume breakpoint, they don't work for
5480 epilogues, which can have multiple entry paths. */
5481 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5482 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5483 keep_going (ecs);
5484 }
5485 return;
5486 }
5487
5488 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5489 This is used to both functions and to skip over code. */
5490
5491 static void
5492 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5493 struct symtab_and_line sr_sal,
5494 struct frame_id sr_id,
5495 enum bptype sr_type)
5496 {
5497 /* There should never be more than one step-resume or longjmp-resume
5498 breakpoint per thread, so we should never be setting a new
5499 step_resume_breakpoint when one is already active. */
5500 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5501 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5502
5503 if (debug_infrun)
5504 fprintf_unfiltered (gdb_stdlog,
5505 "infrun: inserting step-resume breakpoint at %s\n",
5506 paddress (gdbarch, sr_sal.pc));
5507
5508 inferior_thread ()->control.step_resume_breakpoint
5509 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5510 }
5511
5512 void
5513 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5514 struct symtab_and_line sr_sal,
5515 struct frame_id sr_id)
5516 {
5517 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5518 sr_sal, sr_id,
5519 bp_step_resume);
5520 }
5521
5522 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5523 This is used to skip a potential signal handler.
5524
5525 This is called with the interrupted function's frame. The signal
5526 handler, when it returns, will resume the interrupted function at
5527 RETURN_FRAME.pc. */
5528
5529 static void
5530 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5531 {
5532 struct symtab_and_line sr_sal;
5533 struct gdbarch *gdbarch;
5534
5535 gdb_assert (return_frame != NULL);
5536 init_sal (&sr_sal); /* initialize to zeros */
5537
5538 gdbarch = get_frame_arch (return_frame);
5539 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5540 sr_sal.section = find_pc_overlay (sr_sal.pc);
5541 sr_sal.pspace = get_frame_program_space (return_frame);
5542
5543 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5544 get_stack_frame_id (return_frame),
5545 bp_hp_step_resume);
5546 }
5547
5548 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5549 is used to skip a function after stepping into it (for "next" or if
5550 the called function has no debugging information).
5551
5552 The current function has almost always been reached by single
5553 stepping a call or return instruction. NEXT_FRAME belongs to the
5554 current function, and the breakpoint will be set at the caller's
5555 resume address.
5556
5557 This is a separate function rather than reusing
5558 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5559 get_prev_frame, which may stop prematurely (see the implementation
5560 of frame_unwind_caller_id for an example). */
5561
5562 static void
5563 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5564 {
5565 struct symtab_and_line sr_sal;
5566 struct gdbarch *gdbarch;
5567
5568 /* We shouldn't have gotten here if we don't know where the call site
5569 is. */
5570 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5571
5572 init_sal (&sr_sal); /* initialize to zeros */
5573
5574 gdbarch = frame_unwind_caller_arch (next_frame);
5575 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5576 frame_unwind_caller_pc (next_frame));
5577 sr_sal.section = find_pc_overlay (sr_sal.pc);
5578 sr_sal.pspace = frame_unwind_program_space (next_frame);
5579
5580 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5581 frame_unwind_caller_id (next_frame));
5582 }
5583
5584 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5585 new breakpoint at the target of a jmp_buf. The handling of
5586 longjmp-resume uses the same mechanisms used for handling
5587 "step-resume" breakpoints. */
5588
5589 static void
5590 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5591 {
5592 /* There should never be more than one longjmp-resume breakpoint per
5593 thread, so we should never be setting a new
5594 longjmp_resume_breakpoint when one is already active. */
5595 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
5596
5597 if (debug_infrun)
5598 fprintf_unfiltered (gdb_stdlog,
5599 "infrun: inserting longjmp-resume breakpoint at %s\n",
5600 paddress (gdbarch, pc));
5601
5602 inferior_thread ()->control.exception_resume_breakpoint =
5603 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5604 }
5605
5606 /* Insert an exception resume breakpoint. TP is the thread throwing
5607 the exception. The block B is the block of the unwinder debug hook
5608 function. FRAME is the frame corresponding to the call to this
5609 function. SYM is the symbol of the function argument holding the
5610 target PC of the exception. */
5611
5612 static void
5613 insert_exception_resume_breakpoint (struct thread_info *tp,
5614 struct block *b,
5615 struct frame_info *frame,
5616 struct symbol *sym)
5617 {
5618 volatile struct gdb_exception e;
5619
5620 /* We want to ignore errors here. */
5621 TRY_CATCH (e, RETURN_MASK_ERROR)
5622 {
5623 struct symbol *vsym;
5624 struct value *value;
5625 CORE_ADDR handler;
5626 struct breakpoint *bp;
5627
5628 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5629 value = read_var_value (vsym, frame);
5630 /* If the value was optimized out, revert to the old behavior. */
5631 if (! value_optimized_out (value))
5632 {
5633 handler = value_as_address (value);
5634
5635 if (debug_infrun)
5636 fprintf_unfiltered (gdb_stdlog,
5637 "infrun: exception resume at %lx\n",
5638 (unsigned long) handler);
5639
5640 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5641 handler, bp_exception_resume);
5642
5643 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5644 frame = NULL;
5645
5646 bp->thread = tp->num;
5647 inferior_thread ()->control.exception_resume_breakpoint = bp;
5648 }
5649 }
5650 }
5651
5652 /* A helper for check_exception_resume that sets an
5653 exception-breakpoint based on a SystemTap probe. */
5654
5655 static void
5656 insert_exception_resume_from_probe (struct thread_info *tp,
5657 const struct bound_probe *probe,
5658 struct frame_info *frame)
5659 {
5660 struct value *arg_value;
5661 CORE_ADDR handler;
5662 struct breakpoint *bp;
5663
5664 arg_value = probe_safe_evaluate_at_pc (frame, 1);
5665 if (!arg_value)
5666 return;
5667
5668 handler = value_as_address (arg_value);
5669
5670 if (debug_infrun)
5671 fprintf_unfiltered (gdb_stdlog,
5672 "infrun: exception resume at %s\n",
5673 paddress (get_objfile_arch (probe->objfile),
5674 handler));
5675
5676 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5677 handler, bp_exception_resume);
5678 bp->thread = tp->num;
5679 inferior_thread ()->control.exception_resume_breakpoint = bp;
5680 }
5681
5682 /* This is called when an exception has been intercepted. Check to
5683 see whether the exception's destination is of interest, and if so,
5684 set an exception resume breakpoint there. */
5685
5686 static void
5687 check_exception_resume (struct execution_control_state *ecs,
5688 struct frame_info *frame)
5689 {
5690 volatile struct gdb_exception e;
5691 struct bound_probe probe;
5692 struct symbol *func;
5693
5694 /* First see if this exception unwinding breakpoint was set via a
5695 SystemTap probe point. If so, the probe has two arguments: the
5696 CFA and the HANDLER. We ignore the CFA, extract the handler, and
5697 set a breakpoint there. */
5698 probe = find_probe_by_pc (get_frame_pc (frame));
5699 if (probe.probe)
5700 {
5701 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
5702 return;
5703 }
5704
5705 func = get_frame_function (frame);
5706 if (!func)
5707 return;
5708
5709 TRY_CATCH (e, RETURN_MASK_ERROR)
5710 {
5711 struct block *b;
5712 struct block_iterator iter;
5713 struct symbol *sym;
5714 int argno = 0;
5715
5716 /* The exception breakpoint is a thread-specific breakpoint on
5717 the unwinder's debug hook, declared as:
5718
5719 void _Unwind_DebugHook (void *cfa, void *handler);
5720
5721 The CFA argument indicates the frame to which control is
5722 about to be transferred. HANDLER is the destination PC.
5723
5724 We ignore the CFA and set a temporary breakpoint at HANDLER.
5725 This is not extremely efficient but it avoids issues in gdb
5726 with computing the DWARF CFA, and it also works even in weird
5727 cases such as throwing an exception from inside a signal
5728 handler. */
5729
5730 b = SYMBOL_BLOCK_VALUE (func);
5731 ALL_BLOCK_SYMBOLS (b, iter, sym)
5732 {
5733 if (!SYMBOL_IS_ARGUMENT (sym))
5734 continue;
5735
5736 if (argno == 0)
5737 ++argno;
5738 else
5739 {
5740 insert_exception_resume_breakpoint (ecs->event_thread,
5741 b, frame, sym);
5742 break;
5743 }
5744 }
5745 }
5746 }
5747
5748 static void
5749 stop_waiting (struct execution_control_state *ecs)
5750 {
5751 if (debug_infrun)
5752 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
5753
5754 clear_step_over_info ();
5755
5756 /* Let callers know we don't want to wait for the inferior anymore. */
5757 ecs->wait_some_more = 0;
5758 }
5759
5760 /* Called when we should continue running the inferior, because the
5761 current event doesn't cause a user visible stop. This does the
5762 resuming part; waiting for the next event is done elsewhere. */
5763
5764 static void
5765 keep_going (struct execution_control_state *ecs)
5766 {
5767 /* Make sure normal_stop is called if we get a QUIT handled before
5768 reaching resume. */
5769 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5770
5771 /* Save the pc before execution, to compare with pc after stop. */
5772 ecs->event_thread->prev_pc
5773 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5774
5775 if (ecs->event_thread->control.trap_expected
5776 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5777 {
5778 /* We haven't yet gotten our trap, and either: intercepted a
5779 non-signal event (e.g., a fork); or took a signal which we
5780 are supposed to pass through to the inferior. Simply
5781 continue. */
5782 discard_cleanups (old_cleanups);
5783 resume (currently_stepping (ecs->event_thread),
5784 ecs->event_thread->suspend.stop_signal);
5785 }
5786 else
5787 {
5788 volatile struct gdb_exception e;
5789 struct regcache *regcache = get_current_regcache ();
5790
5791 /* Either the trap was not expected, but we are continuing
5792 anyway (if we got a signal, the user asked it be passed to
5793 the child)
5794 -- or --
5795 We got our expected trap, but decided we should resume from
5796 it.
5797
5798 We're going to run this baby now!
5799
5800 Note that insert_breakpoints won't try to re-insert
5801 already inserted breakpoints. Therefore, we don't
5802 care if breakpoints were already inserted, or not. */
5803
5804 /* If we need to step over a breakpoint, and we're not using
5805 displaced stepping to do so, insert all breakpoints
5806 (watchpoints, etc.) but the one we're stepping over, step one
5807 instruction, and then re-insert the breakpoint when that step
5808 is finished. */
5809 if ((ecs->hit_singlestep_breakpoint
5810 || thread_still_needs_step_over (ecs->event_thread))
5811 && !use_displaced_stepping (get_regcache_arch (regcache)))
5812 {
5813 set_step_over_info (get_regcache_aspace (regcache),
5814 regcache_read_pc (regcache));
5815 }
5816 else
5817 clear_step_over_info ();
5818
5819 /* Stop stepping if inserting breakpoints fails. */
5820 TRY_CATCH (e, RETURN_MASK_ERROR)
5821 {
5822 insert_breakpoints ();
5823 }
5824 if (e.reason < 0)
5825 {
5826 exception_print (gdb_stderr, e);
5827 stop_waiting (ecs);
5828 return;
5829 }
5830
5831 ecs->event_thread->control.trap_expected
5832 = (ecs->event_thread->stepping_over_breakpoint
5833 || ecs->hit_singlestep_breakpoint);
5834
5835 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
5836 explicitly specifies that such a signal should be delivered
5837 to the target program). Typically, that would occur when a
5838 user is debugging a target monitor on a simulator: the target
5839 monitor sets a breakpoint; the simulator encounters this
5840 breakpoint and halts the simulation handing control to GDB;
5841 GDB, noting that the stop address doesn't map to any known
5842 breakpoint, returns control back to the simulator; the
5843 simulator then delivers the hardware equivalent of a
5844 GDB_SIGNAL_TRAP to the program being debugged. */
5845 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5846 && !signal_program[ecs->event_thread->suspend.stop_signal])
5847 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5848
5849 discard_cleanups (old_cleanups);
5850 resume (currently_stepping (ecs->event_thread),
5851 ecs->event_thread->suspend.stop_signal);
5852 }
5853
5854 prepare_to_wait (ecs);
5855 }
5856
5857 /* This function normally comes after a resume, before
5858 handle_inferior_event exits. It takes care of any last bits of
5859 housekeeping, and sets the all-important wait_some_more flag. */
5860
5861 static void
5862 prepare_to_wait (struct execution_control_state *ecs)
5863 {
5864 if (debug_infrun)
5865 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5866
5867 /* This is the old end of the while loop. Let everybody know we
5868 want to wait for the inferior some more and get called again
5869 soon. */
5870 ecs->wait_some_more = 1;
5871 }
5872
5873 /* We are done with the step range of a step/next/si/ni command.
5874 Called once for each n of a "step n" operation. Notify observers
5875 if not in the middle of doing a "step N" operation for N > 1. */
5876
5877 static void
5878 end_stepping_range (struct execution_control_state *ecs)
5879 {
5880 ecs->event_thread->control.stop_step = 1;
5881 if (!ecs->event_thread->step_multi)
5882 observer_notify_end_stepping_range ();
5883 stop_waiting (ecs);
5884 }
5885
5886 /* Several print_*_reason functions to print why the inferior has stopped.
5887 We always print something when the inferior exits, or receives a signal.
5888 The rest of the cases are dealt with later on in normal_stop and
5889 print_it_typical. Ideally there should be a call to one of these
5890 print_*_reason functions functions from handle_inferior_event each time
5891 stop_waiting is called.
5892
5893 Note that we don't call these directly, instead we delegate that to
5894 the interpreters, through observers. Interpreters then call these
5895 with whatever uiout is right. */
5896
5897 void
5898 print_end_stepping_range_reason (struct ui_out *uiout)
5899 {
5900 /* For CLI-like interpreters, print nothing. */
5901
5902 if (ui_out_is_mi_like_p (uiout))
5903 {
5904 ui_out_field_string (uiout, "reason",
5905 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5906 }
5907 }
5908
5909 void
5910 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
5911 {
5912 annotate_signalled ();
5913 if (ui_out_is_mi_like_p (uiout))
5914 ui_out_field_string
5915 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5916 ui_out_text (uiout, "\nProgram terminated with signal ");
5917 annotate_signal_name ();
5918 ui_out_field_string (uiout, "signal-name",
5919 gdb_signal_to_name (siggnal));
5920 annotate_signal_name_end ();
5921 ui_out_text (uiout, ", ");
5922 annotate_signal_string ();
5923 ui_out_field_string (uiout, "signal-meaning",
5924 gdb_signal_to_string (siggnal));
5925 annotate_signal_string_end ();
5926 ui_out_text (uiout, ".\n");
5927 ui_out_text (uiout, "The program no longer exists.\n");
5928 }
5929
5930 void
5931 print_exited_reason (struct ui_out *uiout, int exitstatus)
5932 {
5933 struct inferior *inf = current_inferior ();
5934 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5935
5936 annotate_exited (exitstatus);
5937 if (exitstatus)
5938 {
5939 if (ui_out_is_mi_like_p (uiout))
5940 ui_out_field_string (uiout, "reason",
5941 async_reason_lookup (EXEC_ASYNC_EXITED));
5942 ui_out_text (uiout, "[Inferior ");
5943 ui_out_text (uiout, plongest (inf->num));
5944 ui_out_text (uiout, " (");
5945 ui_out_text (uiout, pidstr);
5946 ui_out_text (uiout, ") exited with code ");
5947 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5948 ui_out_text (uiout, "]\n");
5949 }
5950 else
5951 {
5952 if (ui_out_is_mi_like_p (uiout))
5953 ui_out_field_string
5954 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5955 ui_out_text (uiout, "[Inferior ");
5956 ui_out_text (uiout, plongest (inf->num));
5957 ui_out_text (uiout, " (");
5958 ui_out_text (uiout, pidstr);
5959 ui_out_text (uiout, ") exited normally]\n");
5960 }
5961 }
5962
5963 void
5964 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
5965 {
5966 annotate_signal ();
5967
5968 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5969 {
5970 struct thread_info *t = inferior_thread ();
5971
5972 ui_out_text (uiout, "\n[");
5973 ui_out_field_string (uiout, "thread-name",
5974 target_pid_to_str (t->ptid));
5975 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5976 ui_out_text (uiout, " stopped");
5977 }
5978 else
5979 {
5980 ui_out_text (uiout, "\nProgram received signal ");
5981 annotate_signal_name ();
5982 if (ui_out_is_mi_like_p (uiout))
5983 ui_out_field_string
5984 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5985 ui_out_field_string (uiout, "signal-name",
5986 gdb_signal_to_name (siggnal));
5987 annotate_signal_name_end ();
5988 ui_out_text (uiout, ", ");
5989 annotate_signal_string ();
5990 ui_out_field_string (uiout, "signal-meaning",
5991 gdb_signal_to_string (siggnal));
5992 annotate_signal_string_end ();
5993 }
5994 ui_out_text (uiout, ".\n");
5995 }
5996
5997 void
5998 print_no_history_reason (struct ui_out *uiout)
5999 {
6000 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
6001 }
6002
6003 /* Print current location without a level number, if we have changed
6004 functions or hit a breakpoint. Print source line if we have one.
6005 bpstat_print contains the logic deciding in detail what to print,
6006 based on the event(s) that just occurred. */
6007
6008 void
6009 print_stop_event (struct target_waitstatus *ws)
6010 {
6011 int bpstat_ret;
6012 int source_flag;
6013 int do_frame_printing = 1;
6014 struct thread_info *tp = inferior_thread ();
6015
6016 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6017 switch (bpstat_ret)
6018 {
6019 case PRINT_UNKNOWN:
6020 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6021 should) carry around the function and does (or should) use
6022 that when doing a frame comparison. */
6023 if (tp->control.stop_step
6024 && frame_id_eq (tp->control.step_frame_id,
6025 get_frame_id (get_current_frame ()))
6026 && step_start_function == find_pc_function (stop_pc))
6027 {
6028 /* Finished step, just print source line. */
6029 source_flag = SRC_LINE;
6030 }
6031 else
6032 {
6033 /* Print location and source line. */
6034 source_flag = SRC_AND_LOC;
6035 }
6036 break;
6037 case PRINT_SRC_AND_LOC:
6038 /* Print location and source line. */
6039 source_flag = SRC_AND_LOC;
6040 break;
6041 case PRINT_SRC_ONLY:
6042 source_flag = SRC_LINE;
6043 break;
6044 case PRINT_NOTHING:
6045 /* Something bogus. */
6046 source_flag = SRC_LINE;
6047 do_frame_printing = 0;
6048 break;
6049 default:
6050 internal_error (__FILE__, __LINE__, _("Unknown value."));
6051 }
6052
6053 /* The behavior of this routine with respect to the source
6054 flag is:
6055 SRC_LINE: Print only source line
6056 LOCATION: Print only location
6057 SRC_AND_LOC: Print location and source line. */
6058 if (do_frame_printing)
6059 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6060
6061 /* Display the auto-display expressions. */
6062 do_displays ();
6063 }
6064
6065 /* Here to return control to GDB when the inferior stops for real.
6066 Print appropriate messages, remove breakpoints, give terminal our modes.
6067
6068 STOP_PRINT_FRAME nonzero means print the executing frame
6069 (pc, function, args, file, line number and line text).
6070 BREAKPOINTS_FAILED nonzero means stop was due to error
6071 attempting to insert breakpoints. */
6072
6073 void
6074 normal_stop (void)
6075 {
6076 struct target_waitstatus last;
6077 ptid_t last_ptid;
6078 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6079
6080 get_last_target_status (&last_ptid, &last);
6081
6082 /* If an exception is thrown from this point on, make sure to
6083 propagate GDB's knowledge of the executing state to the
6084 frontend/user running state. A QUIT is an easy exception to see
6085 here, so do this before any filtered output. */
6086 if (!non_stop)
6087 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6088 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6089 && last.kind != TARGET_WAITKIND_EXITED
6090 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6091 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6092
6093 /* As with the notification of thread events, we want to delay
6094 notifying the user that we've switched thread context until
6095 the inferior actually stops.
6096
6097 There's no point in saying anything if the inferior has exited.
6098 Note that SIGNALLED here means "exited with a signal", not
6099 "received a signal".
6100
6101 Also skip saying anything in non-stop mode. In that mode, as we
6102 don't want GDB to switch threads behind the user's back, to avoid
6103 races where the user is typing a command to apply to thread x,
6104 but GDB switches to thread y before the user finishes entering
6105 the command, fetch_inferior_event installs a cleanup to restore
6106 the current thread back to the thread the user had selected right
6107 after this event is handled, so we're not really switching, only
6108 informing of a stop. */
6109 if (!non_stop
6110 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6111 && target_has_execution
6112 && last.kind != TARGET_WAITKIND_SIGNALLED
6113 && last.kind != TARGET_WAITKIND_EXITED
6114 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6115 {
6116 target_terminal_ours_for_output ();
6117 printf_filtered (_("[Switching to %s]\n"),
6118 target_pid_to_str (inferior_ptid));
6119 annotate_thread_changed ();
6120 previous_inferior_ptid = inferior_ptid;
6121 }
6122
6123 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6124 {
6125 gdb_assert (sync_execution || !target_can_async_p ());
6126
6127 target_terminal_ours_for_output ();
6128 printf_filtered (_("No unwaited-for children left.\n"));
6129 }
6130
6131 if (!breakpoints_always_inserted_mode () && target_has_execution)
6132 {
6133 if (remove_breakpoints ())
6134 {
6135 target_terminal_ours_for_output ();
6136 printf_filtered (_("Cannot remove breakpoints because "
6137 "program is no longer writable.\nFurther "
6138 "execution is probably impossible.\n"));
6139 }
6140 }
6141
6142 /* If an auto-display called a function and that got a signal,
6143 delete that auto-display to avoid an infinite recursion. */
6144
6145 if (stopped_by_random_signal)
6146 disable_current_display ();
6147
6148 /* Don't print a message if in the middle of doing a "step n"
6149 operation for n > 1 */
6150 if (target_has_execution
6151 && last.kind != TARGET_WAITKIND_SIGNALLED
6152 && last.kind != TARGET_WAITKIND_EXITED
6153 && inferior_thread ()->step_multi
6154 && inferior_thread ()->control.stop_step)
6155 goto done;
6156
6157 target_terminal_ours ();
6158 async_enable_stdin ();
6159
6160 /* Set the current source location. This will also happen if we
6161 display the frame below, but the current SAL will be incorrect
6162 during a user hook-stop function. */
6163 if (has_stack_frames () && !stop_stack_dummy)
6164 set_current_sal_from_frame (get_current_frame ());
6165
6166 /* Let the user/frontend see the threads as stopped, but do nothing
6167 if the thread was running an infcall. We may be e.g., evaluating
6168 a breakpoint condition. In that case, the thread had state
6169 THREAD_RUNNING before the infcall, and shall remain set to
6170 running, all without informing the user/frontend about state
6171 transition changes. If this is actually a call command, then the
6172 thread was originally already stopped, so there's no state to
6173 finish either. */
6174 if (target_has_execution && inferior_thread ()->control.in_infcall)
6175 discard_cleanups (old_chain);
6176 else
6177 do_cleanups (old_chain);
6178
6179 /* Look up the hook_stop and run it (CLI internally handles problem
6180 of stop_command's pre-hook not existing). */
6181 if (stop_command)
6182 catch_errors (hook_stop_stub, stop_command,
6183 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6184
6185 if (!has_stack_frames ())
6186 goto done;
6187
6188 if (last.kind == TARGET_WAITKIND_SIGNALLED
6189 || last.kind == TARGET_WAITKIND_EXITED)
6190 goto done;
6191
6192 /* Select innermost stack frame - i.e., current frame is frame 0,
6193 and current location is based on that.
6194 Don't do this on return from a stack dummy routine,
6195 or if the program has exited. */
6196
6197 if (!stop_stack_dummy)
6198 {
6199 select_frame (get_current_frame ());
6200
6201 /* If --batch-silent is enabled then there's no need to print the current
6202 source location, and to try risks causing an error message about
6203 missing source files. */
6204 if (stop_print_frame && !batch_silent)
6205 print_stop_event (&last);
6206 }
6207
6208 /* Save the function value return registers, if we care.
6209 We might be about to restore their previous contents. */
6210 if (inferior_thread ()->control.proceed_to_finish
6211 && execution_direction != EXEC_REVERSE)
6212 {
6213 /* This should not be necessary. */
6214 if (stop_registers)
6215 regcache_xfree (stop_registers);
6216
6217 /* NB: The copy goes through to the target picking up the value of
6218 all the registers. */
6219 stop_registers = regcache_dup (get_current_regcache ());
6220 }
6221
6222 if (stop_stack_dummy == STOP_STACK_DUMMY)
6223 {
6224 /* Pop the empty frame that contains the stack dummy.
6225 This also restores inferior state prior to the call
6226 (struct infcall_suspend_state). */
6227 struct frame_info *frame = get_current_frame ();
6228
6229 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6230 frame_pop (frame);
6231 /* frame_pop() calls reinit_frame_cache as the last thing it
6232 does which means there's currently no selected frame. We
6233 don't need to re-establish a selected frame if the dummy call
6234 returns normally, that will be done by
6235 restore_infcall_control_state. However, we do have to handle
6236 the case where the dummy call is returning after being
6237 stopped (e.g. the dummy call previously hit a breakpoint).
6238 We can't know which case we have so just always re-establish
6239 a selected frame here. */
6240 select_frame (get_current_frame ());
6241 }
6242
6243 done:
6244 annotate_stopped ();
6245
6246 /* Suppress the stop observer if we're in the middle of:
6247
6248 - a step n (n > 1), as there still more steps to be done.
6249
6250 - a "finish" command, as the observer will be called in
6251 finish_command_continuation, so it can include the inferior
6252 function's return value.
6253
6254 - calling an inferior function, as we pretend we inferior didn't
6255 run at all. The return value of the call is handled by the
6256 expression evaluator, through call_function_by_hand. */
6257
6258 if (!target_has_execution
6259 || last.kind == TARGET_WAITKIND_SIGNALLED
6260 || last.kind == TARGET_WAITKIND_EXITED
6261 || last.kind == TARGET_WAITKIND_NO_RESUMED
6262 || (!(inferior_thread ()->step_multi
6263 && inferior_thread ()->control.stop_step)
6264 && !(inferior_thread ()->control.stop_bpstat
6265 && inferior_thread ()->control.proceed_to_finish)
6266 && !inferior_thread ()->control.in_infcall))
6267 {
6268 if (!ptid_equal (inferior_ptid, null_ptid))
6269 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6270 stop_print_frame);
6271 else
6272 observer_notify_normal_stop (NULL, stop_print_frame);
6273 }
6274
6275 if (target_has_execution)
6276 {
6277 if (last.kind != TARGET_WAITKIND_SIGNALLED
6278 && last.kind != TARGET_WAITKIND_EXITED)
6279 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6280 Delete any breakpoint that is to be deleted at the next stop. */
6281 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6282 }
6283
6284 /* Try to get rid of automatically added inferiors that are no
6285 longer needed. Keeping those around slows down things linearly.
6286 Note that this never removes the current inferior. */
6287 prune_inferiors ();
6288 }
6289
6290 static int
6291 hook_stop_stub (void *cmd)
6292 {
6293 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6294 return (0);
6295 }
6296 \f
6297 int
6298 signal_stop_state (int signo)
6299 {
6300 return signal_stop[signo];
6301 }
6302
6303 int
6304 signal_print_state (int signo)
6305 {
6306 return signal_print[signo];
6307 }
6308
6309 int
6310 signal_pass_state (int signo)
6311 {
6312 return signal_program[signo];
6313 }
6314
6315 static void
6316 signal_cache_update (int signo)
6317 {
6318 if (signo == -1)
6319 {
6320 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6321 signal_cache_update (signo);
6322
6323 return;
6324 }
6325
6326 signal_pass[signo] = (signal_stop[signo] == 0
6327 && signal_print[signo] == 0
6328 && signal_program[signo] == 1
6329 && signal_catch[signo] == 0);
6330 }
6331
6332 int
6333 signal_stop_update (int signo, int state)
6334 {
6335 int ret = signal_stop[signo];
6336
6337 signal_stop[signo] = state;
6338 signal_cache_update (signo);
6339 return ret;
6340 }
6341
6342 int
6343 signal_print_update (int signo, int state)
6344 {
6345 int ret = signal_print[signo];
6346
6347 signal_print[signo] = state;
6348 signal_cache_update (signo);
6349 return ret;
6350 }
6351
6352 int
6353 signal_pass_update (int signo, int state)
6354 {
6355 int ret = signal_program[signo];
6356
6357 signal_program[signo] = state;
6358 signal_cache_update (signo);
6359 return ret;
6360 }
6361
6362 /* Update the global 'signal_catch' from INFO and notify the
6363 target. */
6364
6365 void
6366 signal_catch_update (const unsigned int *info)
6367 {
6368 int i;
6369
6370 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6371 signal_catch[i] = info[i] > 0;
6372 signal_cache_update (-1);
6373 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6374 }
6375
6376 static void
6377 sig_print_header (void)
6378 {
6379 printf_filtered (_("Signal Stop\tPrint\tPass "
6380 "to program\tDescription\n"));
6381 }
6382
6383 static void
6384 sig_print_info (enum gdb_signal oursig)
6385 {
6386 const char *name = gdb_signal_to_name (oursig);
6387 int name_padding = 13 - strlen (name);
6388
6389 if (name_padding <= 0)
6390 name_padding = 0;
6391
6392 printf_filtered ("%s", name);
6393 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6394 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6395 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6396 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6397 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6398 }
6399
6400 /* Specify how various signals in the inferior should be handled. */
6401
6402 static void
6403 handle_command (char *args, int from_tty)
6404 {
6405 char **argv;
6406 int digits, wordlen;
6407 int sigfirst, signum, siglast;
6408 enum gdb_signal oursig;
6409 int allsigs;
6410 int nsigs;
6411 unsigned char *sigs;
6412 struct cleanup *old_chain;
6413
6414 if (args == NULL)
6415 {
6416 error_no_arg (_("signal to handle"));
6417 }
6418
6419 /* Allocate and zero an array of flags for which signals to handle. */
6420
6421 nsigs = (int) GDB_SIGNAL_LAST;
6422 sigs = (unsigned char *) alloca (nsigs);
6423 memset (sigs, 0, nsigs);
6424
6425 /* Break the command line up into args. */
6426
6427 argv = gdb_buildargv (args);
6428 old_chain = make_cleanup_freeargv (argv);
6429
6430 /* Walk through the args, looking for signal oursigs, signal names, and
6431 actions. Signal numbers and signal names may be interspersed with
6432 actions, with the actions being performed for all signals cumulatively
6433 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6434
6435 while (*argv != NULL)
6436 {
6437 wordlen = strlen (*argv);
6438 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6439 {;
6440 }
6441 allsigs = 0;
6442 sigfirst = siglast = -1;
6443
6444 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6445 {
6446 /* Apply action to all signals except those used by the
6447 debugger. Silently skip those. */
6448 allsigs = 1;
6449 sigfirst = 0;
6450 siglast = nsigs - 1;
6451 }
6452 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6453 {
6454 SET_SIGS (nsigs, sigs, signal_stop);
6455 SET_SIGS (nsigs, sigs, signal_print);
6456 }
6457 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6458 {
6459 UNSET_SIGS (nsigs, sigs, signal_program);
6460 }
6461 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6462 {
6463 SET_SIGS (nsigs, sigs, signal_print);
6464 }
6465 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6466 {
6467 SET_SIGS (nsigs, sigs, signal_program);
6468 }
6469 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6470 {
6471 UNSET_SIGS (nsigs, sigs, signal_stop);
6472 }
6473 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6474 {
6475 SET_SIGS (nsigs, sigs, signal_program);
6476 }
6477 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6478 {
6479 UNSET_SIGS (nsigs, sigs, signal_print);
6480 UNSET_SIGS (nsigs, sigs, signal_stop);
6481 }
6482 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6483 {
6484 UNSET_SIGS (nsigs, sigs, signal_program);
6485 }
6486 else if (digits > 0)
6487 {
6488 /* It is numeric. The numeric signal refers to our own
6489 internal signal numbering from target.h, not to host/target
6490 signal number. This is a feature; users really should be
6491 using symbolic names anyway, and the common ones like
6492 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6493
6494 sigfirst = siglast = (int)
6495 gdb_signal_from_command (atoi (*argv));
6496 if ((*argv)[digits] == '-')
6497 {
6498 siglast = (int)
6499 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6500 }
6501 if (sigfirst > siglast)
6502 {
6503 /* Bet he didn't figure we'd think of this case... */
6504 signum = sigfirst;
6505 sigfirst = siglast;
6506 siglast = signum;
6507 }
6508 }
6509 else
6510 {
6511 oursig = gdb_signal_from_name (*argv);
6512 if (oursig != GDB_SIGNAL_UNKNOWN)
6513 {
6514 sigfirst = siglast = (int) oursig;
6515 }
6516 else
6517 {
6518 /* Not a number and not a recognized flag word => complain. */
6519 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6520 }
6521 }
6522
6523 /* If any signal numbers or symbol names were found, set flags for
6524 which signals to apply actions to. */
6525
6526 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6527 {
6528 switch ((enum gdb_signal) signum)
6529 {
6530 case GDB_SIGNAL_TRAP:
6531 case GDB_SIGNAL_INT:
6532 if (!allsigs && !sigs[signum])
6533 {
6534 if (query (_("%s is used by the debugger.\n\
6535 Are you sure you want to change it? "),
6536 gdb_signal_to_name ((enum gdb_signal) signum)))
6537 {
6538 sigs[signum] = 1;
6539 }
6540 else
6541 {
6542 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6543 gdb_flush (gdb_stdout);
6544 }
6545 }
6546 break;
6547 case GDB_SIGNAL_0:
6548 case GDB_SIGNAL_DEFAULT:
6549 case GDB_SIGNAL_UNKNOWN:
6550 /* Make sure that "all" doesn't print these. */
6551 break;
6552 default:
6553 sigs[signum] = 1;
6554 break;
6555 }
6556 }
6557
6558 argv++;
6559 }
6560
6561 for (signum = 0; signum < nsigs; signum++)
6562 if (sigs[signum])
6563 {
6564 signal_cache_update (-1);
6565 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6566 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
6567
6568 if (from_tty)
6569 {
6570 /* Show the results. */
6571 sig_print_header ();
6572 for (; signum < nsigs; signum++)
6573 if (sigs[signum])
6574 sig_print_info (signum);
6575 }
6576
6577 break;
6578 }
6579
6580 do_cleanups (old_chain);
6581 }
6582
6583 /* Complete the "handle" command. */
6584
6585 static VEC (char_ptr) *
6586 handle_completer (struct cmd_list_element *ignore,
6587 const char *text, const char *word)
6588 {
6589 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
6590 static const char * const keywords[] =
6591 {
6592 "all",
6593 "stop",
6594 "ignore",
6595 "print",
6596 "pass",
6597 "nostop",
6598 "noignore",
6599 "noprint",
6600 "nopass",
6601 NULL,
6602 };
6603
6604 vec_signals = signal_completer (ignore, text, word);
6605 vec_keywords = complete_on_enum (keywords, word, word);
6606
6607 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
6608 VEC_free (char_ptr, vec_signals);
6609 VEC_free (char_ptr, vec_keywords);
6610 return return_val;
6611 }
6612
6613 static void
6614 xdb_handle_command (char *args, int from_tty)
6615 {
6616 char **argv;
6617 struct cleanup *old_chain;
6618
6619 if (args == NULL)
6620 error_no_arg (_("xdb command"));
6621
6622 /* Break the command line up into args. */
6623
6624 argv = gdb_buildargv (args);
6625 old_chain = make_cleanup_freeargv (argv);
6626 if (argv[1] != (char *) NULL)
6627 {
6628 char *argBuf;
6629 int bufLen;
6630
6631 bufLen = strlen (argv[0]) + 20;
6632 argBuf = (char *) xmalloc (bufLen);
6633 if (argBuf)
6634 {
6635 int validFlag = 1;
6636 enum gdb_signal oursig;
6637
6638 oursig = gdb_signal_from_name (argv[0]);
6639 memset (argBuf, 0, bufLen);
6640 if (strcmp (argv[1], "Q") == 0)
6641 sprintf (argBuf, "%s %s", argv[0], "noprint");
6642 else
6643 {
6644 if (strcmp (argv[1], "s") == 0)
6645 {
6646 if (!signal_stop[oursig])
6647 sprintf (argBuf, "%s %s", argv[0], "stop");
6648 else
6649 sprintf (argBuf, "%s %s", argv[0], "nostop");
6650 }
6651 else if (strcmp (argv[1], "i") == 0)
6652 {
6653 if (!signal_program[oursig])
6654 sprintf (argBuf, "%s %s", argv[0], "pass");
6655 else
6656 sprintf (argBuf, "%s %s", argv[0], "nopass");
6657 }
6658 else if (strcmp (argv[1], "r") == 0)
6659 {
6660 if (!signal_print[oursig])
6661 sprintf (argBuf, "%s %s", argv[0], "print");
6662 else
6663 sprintf (argBuf, "%s %s", argv[0], "noprint");
6664 }
6665 else
6666 validFlag = 0;
6667 }
6668 if (validFlag)
6669 handle_command (argBuf, from_tty);
6670 else
6671 printf_filtered (_("Invalid signal handling flag.\n"));
6672 if (argBuf)
6673 xfree (argBuf);
6674 }
6675 }
6676 do_cleanups (old_chain);
6677 }
6678
6679 enum gdb_signal
6680 gdb_signal_from_command (int num)
6681 {
6682 if (num >= 1 && num <= 15)
6683 return (enum gdb_signal) num;
6684 error (_("Only signals 1-15 are valid as numeric signals.\n\
6685 Use \"info signals\" for a list of symbolic signals."));
6686 }
6687
6688 /* Print current contents of the tables set by the handle command.
6689 It is possible we should just be printing signals actually used
6690 by the current target (but for things to work right when switching
6691 targets, all signals should be in the signal tables). */
6692
6693 static void
6694 signals_info (char *signum_exp, int from_tty)
6695 {
6696 enum gdb_signal oursig;
6697
6698 sig_print_header ();
6699
6700 if (signum_exp)
6701 {
6702 /* First see if this is a symbol name. */
6703 oursig = gdb_signal_from_name (signum_exp);
6704 if (oursig == GDB_SIGNAL_UNKNOWN)
6705 {
6706 /* No, try numeric. */
6707 oursig =
6708 gdb_signal_from_command (parse_and_eval_long (signum_exp));
6709 }
6710 sig_print_info (oursig);
6711 return;
6712 }
6713
6714 printf_filtered ("\n");
6715 /* These ugly casts brought to you by the native VAX compiler. */
6716 for (oursig = GDB_SIGNAL_FIRST;
6717 (int) oursig < (int) GDB_SIGNAL_LAST;
6718 oursig = (enum gdb_signal) ((int) oursig + 1))
6719 {
6720 QUIT;
6721
6722 if (oursig != GDB_SIGNAL_UNKNOWN
6723 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
6724 sig_print_info (oursig);
6725 }
6726
6727 printf_filtered (_("\nUse the \"handle\" command "
6728 "to change these tables.\n"));
6729 }
6730
6731 /* Check if it makes sense to read $_siginfo from the current thread
6732 at this point. If not, throw an error. */
6733
6734 static void
6735 validate_siginfo_access (void)
6736 {
6737 /* No current inferior, no siginfo. */
6738 if (ptid_equal (inferior_ptid, null_ptid))
6739 error (_("No thread selected."));
6740
6741 /* Don't try to read from a dead thread. */
6742 if (is_exited (inferior_ptid))
6743 error (_("The current thread has terminated"));
6744
6745 /* ... or from a spinning thread. */
6746 if (is_running (inferior_ptid))
6747 error (_("Selected thread is running."));
6748 }
6749
6750 /* The $_siginfo convenience variable is a bit special. We don't know
6751 for sure the type of the value until we actually have a chance to
6752 fetch the data. The type can change depending on gdbarch, so it is
6753 also dependent on which thread you have selected.
6754
6755 1. making $_siginfo be an internalvar that creates a new value on
6756 access.
6757
6758 2. making the value of $_siginfo be an lval_computed value. */
6759
6760 /* This function implements the lval_computed support for reading a
6761 $_siginfo value. */
6762
6763 static void
6764 siginfo_value_read (struct value *v)
6765 {
6766 LONGEST transferred;
6767
6768 validate_siginfo_access ();
6769
6770 transferred =
6771 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6772 NULL,
6773 value_contents_all_raw (v),
6774 value_offset (v),
6775 TYPE_LENGTH (value_type (v)));
6776
6777 if (transferred != TYPE_LENGTH (value_type (v)))
6778 error (_("Unable to read siginfo"));
6779 }
6780
6781 /* This function implements the lval_computed support for writing a
6782 $_siginfo value. */
6783
6784 static void
6785 siginfo_value_write (struct value *v, struct value *fromval)
6786 {
6787 LONGEST transferred;
6788
6789 validate_siginfo_access ();
6790
6791 transferred = target_write (&current_target,
6792 TARGET_OBJECT_SIGNAL_INFO,
6793 NULL,
6794 value_contents_all_raw (fromval),
6795 value_offset (v),
6796 TYPE_LENGTH (value_type (fromval)));
6797
6798 if (transferred != TYPE_LENGTH (value_type (fromval)))
6799 error (_("Unable to write siginfo"));
6800 }
6801
6802 static const struct lval_funcs siginfo_value_funcs =
6803 {
6804 siginfo_value_read,
6805 siginfo_value_write
6806 };
6807
6808 /* Return a new value with the correct type for the siginfo object of
6809 the current thread using architecture GDBARCH. Return a void value
6810 if there's no object available. */
6811
6812 static struct value *
6813 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
6814 void *ignore)
6815 {
6816 if (target_has_stack
6817 && !ptid_equal (inferior_ptid, null_ptid)
6818 && gdbarch_get_siginfo_type_p (gdbarch))
6819 {
6820 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6821
6822 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6823 }
6824
6825 return allocate_value (builtin_type (gdbarch)->builtin_void);
6826 }
6827
6828 \f
6829 /* infcall_suspend_state contains state about the program itself like its
6830 registers and any signal it received when it last stopped.
6831 This state must be restored regardless of how the inferior function call
6832 ends (either successfully, or after it hits a breakpoint or signal)
6833 if the program is to properly continue where it left off. */
6834
6835 struct infcall_suspend_state
6836 {
6837 struct thread_suspend_state thread_suspend;
6838 #if 0 /* Currently unused and empty structures are not valid C. */
6839 struct inferior_suspend_state inferior_suspend;
6840 #endif
6841
6842 /* Other fields: */
6843 CORE_ADDR stop_pc;
6844 struct regcache *registers;
6845
6846 /* Format of SIGINFO_DATA or NULL if it is not present. */
6847 struct gdbarch *siginfo_gdbarch;
6848
6849 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6850 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6851 content would be invalid. */
6852 gdb_byte *siginfo_data;
6853 };
6854
6855 struct infcall_suspend_state *
6856 save_infcall_suspend_state (void)
6857 {
6858 struct infcall_suspend_state *inf_state;
6859 struct thread_info *tp = inferior_thread ();
6860 #if 0
6861 struct inferior *inf = current_inferior ();
6862 #endif
6863 struct regcache *regcache = get_current_regcache ();
6864 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6865 gdb_byte *siginfo_data = NULL;
6866
6867 if (gdbarch_get_siginfo_type_p (gdbarch))
6868 {
6869 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6870 size_t len = TYPE_LENGTH (type);
6871 struct cleanup *back_to;
6872
6873 siginfo_data = xmalloc (len);
6874 back_to = make_cleanup (xfree, siginfo_data);
6875
6876 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6877 siginfo_data, 0, len) == len)
6878 discard_cleanups (back_to);
6879 else
6880 {
6881 /* Errors ignored. */
6882 do_cleanups (back_to);
6883 siginfo_data = NULL;
6884 }
6885 }
6886
6887 inf_state = XCNEW (struct infcall_suspend_state);
6888
6889 if (siginfo_data)
6890 {
6891 inf_state->siginfo_gdbarch = gdbarch;
6892 inf_state->siginfo_data = siginfo_data;
6893 }
6894
6895 inf_state->thread_suspend = tp->suspend;
6896 #if 0 /* Currently unused and empty structures are not valid C. */
6897 inf_state->inferior_suspend = inf->suspend;
6898 #endif
6899
6900 /* run_inferior_call will not use the signal due to its `proceed' call with
6901 GDB_SIGNAL_0 anyway. */
6902 tp->suspend.stop_signal = GDB_SIGNAL_0;
6903
6904 inf_state->stop_pc = stop_pc;
6905
6906 inf_state->registers = regcache_dup (regcache);
6907
6908 return inf_state;
6909 }
6910
6911 /* Restore inferior session state to INF_STATE. */
6912
6913 void
6914 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6915 {
6916 struct thread_info *tp = inferior_thread ();
6917 #if 0
6918 struct inferior *inf = current_inferior ();
6919 #endif
6920 struct regcache *regcache = get_current_regcache ();
6921 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6922
6923 tp->suspend = inf_state->thread_suspend;
6924 #if 0 /* Currently unused and empty structures are not valid C. */
6925 inf->suspend = inf_state->inferior_suspend;
6926 #endif
6927
6928 stop_pc = inf_state->stop_pc;
6929
6930 if (inf_state->siginfo_gdbarch == gdbarch)
6931 {
6932 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6933
6934 /* Errors ignored. */
6935 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6936 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
6937 }
6938
6939 /* The inferior can be gone if the user types "print exit(0)"
6940 (and perhaps other times). */
6941 if (target_has_execution)
6942 /* NB: The register write goes through to the target. */
6943 regcache_cpy (regcache, inf_state->registers);
6944
6945 discard_infcall_suspend_state (inf_state);
6946 }
6947
6948 static void
6949 do_restore_infcall_suspend_state_cleanup (void *state)
6950 {
6951 restore_infcall_suspend_state (state);
6952 }
6953
6954 struct cleanup *
6955 make_cleanup_restore_infcall_suspend_state
6956 (struct infcall_suspend_state *inf_state)
6957 {
6958 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6959 }
6960
6961 void
6962 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6963 {
6964 regcache_xfree (inf_state->registers);
6965 xfree (inf_state->siginfo_data);
6966 xfree (inf_state);
6967 }
6968
6969 struct regcache *
6970 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6971 {
6972 return inf_state->registers;
6973 }
6974
6975 /* infcall_control_state contains state regarding gdb's control of the
6976 inferior itself like stepping control. It also contains session state like
6977 the user's currently selected frame. */
6978
6979 struct infcall_control_state
6980 {
6981 struct thread_control_state thread_control;
6982 struct inferior_control_state inferior_control;
6983
6984 /* Other fields: */
6985 enum stop_stack_kind stop_stack_dummy;
6986 int stopped_by_random_signal;
6987 int stop_after_trap;
6988
6989 /* ID if the selected frame when the inferior function call was made. */
6990 struct frame_id selected_frame_id;
6991 };
6992
6993 /* Save all of the information associated with the inferior<==>gdb
6994 connection. */
6995
6996 struct infcall_control_state *
6997 save_infcall_control_state (void)
6998 {
6999 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
7000 struct thread_info *tp = inferior_thread ();
7001 struct inferior *inf = current_inferior ();
7002
7003 inf_status->thread_control = tp->control;
7004 inf_status->inferior_control = inf->control;
7005
7006 tp->control.step_resume_breakpoint = NULL;
7007 tp->control.exception_resume_breakpoint = NULL;
7008
7009 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
7010 chain. If caller's caller is walking the chain, they'll be happier if we
7011 hand them back the original chain when restore_infcall_control_state is
7012 called. */
7013 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
7014
7015 /* Other fields: */
7016 inf_status->stop_stack_dummy = stop_stack_dummy;
7017 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7018 inf_status->stop_after_trap = stop_after_trap;
7019
7020 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7021
7022 return inf_status;
7023 }
7024
7025 static int
7026 restore_selected_frame (void *args)
7027 {
7028 struct frame_id *fid = (struct frame_id *) args;
7029 struct frame_info *frame;
7030
7031 frame = frame_find_by_id (*fid);
7032
7033 /* If inf_status->selected_frame_id is NULL, there was no previously
7034 selected frame. */
7035 if (frame == NULL)
7036 {
7037 warning (_("Unable to restore previously selected frame."));
7038 return 0;
7039 }
7040
7041 select_frame (frame);
7042
7043 return (1);
7044 }
7045
7046 /* Restore inferior session state to INF_STATUS. */
7047
7048 void
7049 restore_infcall_control_state (struct infcall_control_state *inf_status)
7050 {
7051 struct thread_info *tp = inferior_thread ();
7052 struct inferior *inf = current_inferior ();
7053
7054 if (tp->control.step_resume_breakpoint)
7055 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7056
7057 if (tp->control.exception_resume_breakpoint)
7058 tp->control.exception_resume_breakpoint->disposition
7059 = disp_del_at_next_stop;
7060
7061 /* Handle the bpstat_copy of the chain. */
7062 bpstat_clear (&tp->control.stop_bpstat);
7063
7064 tp->control = inf_status->thread_control;
7065 inf->control = inf_status->inferior_control;
7066
7067 /* Other fields: */
7068 stop_stack_dummy = inf_status->stop_stack_dummy;
7069 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7070 stop_after_trap = inf_status->stop_after_trap;
7071
7072 if (target_has_stack)
7073 {
7074 /* The point of catch_errors is that if the stack is clobbered,
7075 walking the stack might encounter a garbage pointer and
7076 error() trying to dereference it. */
7077 if (catch_errors
7078 (restore_selected_frame, &inf_status->selected_frame_id,
7079 "Unable to restore previously selected frame:\n",
7080 RETURN_MASK_ERROR) == 0)
7081 /* Error in restoring the selected frame. Select the innermost
7082 frame. */
7083 select_frame (get_current_frame ());
7084 }
7085
7086 xfree (inf_status);
7087 }
7088
7089 static void
7090 do_restore_infcall_control_state_cleanup (void *sts)
7091 {
7092 restore_infcall_control_state (sts);
7093 }
7094
7095 struct cleanup *
7096 make_cleanup_restore_infcall_control_state
7097 (struct infcall_control_state *inf_status)
7098 {
7099 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7100 }
7101
7102 void
7103 discard_infcall_control_state (struct infcall_control_state *inf_status)
7104 {
7105 if (inf_status->thread_control.step_resume_breakpoint)
7106 inf_status->thread_control.step_resume_breakpoint->disposition
7107 = disp_del_at_next_stop;
7108
7109 if (inf_status->thread_control.exception_resume_breakpoint)
7110 inf_status->thread_control.exception_resume_breakpoint->disposition
7111 = disp_del_at_next_stop;
7112
7113 /* See save_infcall_control_state for info on stop_bpstat. */
7114 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7115
7116 xfree (inf_status);
7117 }
7118 \f
7119 /* restore_inferior_ptid() will be used by the cleanup machinery
7120 to restore the inferior_ptid value saved in a call to
7121 save_inferior_ptid(). */
7122
7123 static void
7124 restore_inferior_ptid (void *arg)
7125 {
7126 ptid_t *saved_ptid_ptr = arg;
7127
7128 inferior_ptid = *saved_ptid_ptr;
7129 xfree (arg);
7130 }
7131
7132 /* Save the value of inferior_ptid so that it may be restored by a
7133 later call to do_cleanups(). Returns the struct cleanup pointer
7134 needed for later doing the cleanup. */
7135
7136 struct cleanup *
7137 save_inferior_ptid (void)
7138 {
7139 ptid_t *saved_ptid_ptr;
7140
7141 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7142 *saved_ptid_ptr = inferior_ptid;
7143 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7144 }
7145
7146 /* See inferior.h. */
7147
7148 void
7149 clear_exit_convenience_vars (void)
7150 {
7151 clear_internalvar (lookup_internalvar ("_exitsignal"));
7152 clear_internalvar (lookup_internalvar ("_exitcode"));
7153 }
7154 \f
7155
7156 /* User interface for reverse debugging:
7157 Set exec-direction / show exec-direction commands
7158 (returns error unless target implements to_set_exec_direction method). */
7159
7160 int execution_direction = EXEC_FORWARD;
7161 static const char exec_forward[] = "forward";
7162 static const char exec_reverse[] = "reverse";
7163 static const char *exec_direction = exec_forward;
7164 static const char *const exec_direction_names[] = {
7165 exec_forward,
7166 exec_reverse,
7167 NULL
7168 };
7169
7170 static void
7171 set_exec_direction_func (char *args, int from_tty,
7172 struct cmd_list_element *cmd)
7173 {
7174 if (target_can_execute_reverse)
7175 {
7176 if (!strcmp (exec_direction, exec_forward))
7177 execution_direction = EXEC_FORWARD;
7178 else if (!strcmp (exec_direction, exec_reverse))
7179 execution_direction = EXEC_REVERSE;
7180 }
7181 else
7182 {
7183 exec_direction = exec_forward;
7184 error (_("Target does not support this operation."));
7185 }
7186 }
7187
7188 static void
7189 show_exec_direction_func (struct ui_file *out, int from_tty,
7190 struct cmd_list_element *cmd, const char *value)
7191 {
7192 switch (execution_direction) {
7193 case EXEC_FORWARD:
7194 fprintf_filtered (out, _("Forward.\n"));
7195 break;
7196 case EXEC_REVERSE:
7197 fprintf_filtered (out, _("Reverse.\n"));
7198 break;
7199 default:
7200 internal_error (__FILE__, __LINE__,
7201 _("bogus execution_direction value: %d"),
7202 (int) execution_direction);
7203 }
7204 }
7205
7206 static void
7207 show_schedule_multiple (struct ui_file *file, int from_tty,
7208 struct cmd_list_element *c, const char *value)
7209 {
7210 fprintf_filtered (file, _("Resuming the execution of threads "
7211 "of all processes is %s.\n"), value);
7212 }
7213
7214 /* Implementation of `siginfo' variable. */
7215
7216 static const struct internalvar_funcs siginfo_funcs =
7217 {
7218 siginfo_make_value,
7219 NULL,
7220 NULL
7221 };
7222
7223 void
7224 _initialize_infrun (void)
7225 {
7226 int i;
7227 int numsigs;
7228 struct cmd_list_element *c;
7229
7230 add_info ("signals", signals_info, _("\
7231 What debugger does when program gets various signals.\n\
7232 Specify a signal as argument to print info on that signal only."));
7233 add_info_alias ("handle", "signals", 0);
7234
7235 c = add_com ("handle", class_run, handle_command, _("\
7236 Specify how to handle signals.\n\
7237 Usage: handle SIGNAL [ACTIONS]\n\
7238 Args are signals and actions to apply to those signals.\n\
7239 If no actions are specified, the current settings for the specified signals\n\
7240 will be displayed instead.\n\
7241 \n\
7242 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7243 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7244 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7245 The special arg \"all\" is recognized to mean all signals except those\n\
7246 used by the debugger, typically SIGTRAP and SIGINT.\n\
7247 \n\
7248 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7249 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7250 Stop means reenter debugger if this signal happens (implies print).\n\
7251 Print means print a message if this signal happens.\n\
7252 Pass means let program see this signal; otherwise program doesn't know.\n\
7253 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7254 Pass and Stop may be combined.\n\
7255 \n\
7256 Multiple signals may be specified. Signal numbers and signal names\n\
7257 may be interspersed with actions, with the actions being performed for\n\
7258 all signals cumulatively specified."));
7259 set_cmd_completer (c, handle_completer);
7260
7261 if (xdb_commands)
7262 {
7263 add_com ("lz", class_info, signals_info, _("\
7264 What debugger does when program gets various signals.\n\
7265 Specify a signal as argument to print info on that signal only."));
7266 add_com ("z", class_run, xdb_handle_command, _("\
7267 Specify how to handle a signal.\n\
7268 Args are signals and actions to apply to those signals.\n\
7269 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7270 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7271 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7272 The special arg \"all\" is recognized to mean all signals except those\n\
7273 used by the debugger, typically SIGTRAP and SIGINT.\n\
7274 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7275 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7276 nopass), \"Q\" (noprint)\n\
7277 Stop means reenter debugger if this signal happens (implies print).\n\
7278 Print means print a message if this signal happens.\n\
7279 Pass means let program see this signal; otherwise program doesn't know.\n\
7280 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7281 Pass and Stop may be combined."));
7282 }
7283
7284 if (!dbx_commands)
7285 stop_command = add_cmd ("stop", class_obscure,
7286 not_just_help_class_command, _("\
7287 There is no `stop' command, but you can set a hook on `stop'.\n\
7288 This allows you to set a list of commands to be run each time execution\n\
7289 of the program stops."), &cmdlist);
7290
7291 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7292 Set inferior debugging."), _("\
7293 Show inferior debugging."), _("\
7294 When non-zero, inferior specific debugging is enabled."),
7295 NULL,
7296 show_debug_infrun,
7297 &setdebuglist, &showdebuglist);
7298
7299 add_setshow_boolean_cmd ("displaced", class_maintenance,
7300 &debug_displaced, _("\
7301 Set displaced stepping debugging."), _("\
7302 Show displaced stepping debugging."), _("\
7303 When non-zero, displaced stepping specific debugging is enabled."),
7304 NULL,
7305 show_debug_displaced,
7306 &setdebuglist, &showdebuglist);
7307
7308 add_setshow_boolean_cmd ("non-stop", no_class,
7309 &non_stop_1, _("\
7310 Set whether gdb controls the inferior in non-stop mode."), _("\
7311 Show whether gdb controls the inferior in non-stop mode."), _("\
7312 When debugging a multi-threaded program and this setting is\n\
7313 off (the default, also called all-stop mode), when one thread stops\n\
7314 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7315 all other threads in the program while you interact with the thread of\n\
7316 interest. When you continue or step a thread, you can allow the other\n\
7317 threads to run, or have them remain stopped, but while you inspect any\n\
7318 thread's state, all threads stop.\n\
7319 \n\
7320 In non-stop mode, when one thread stops, other threads can continue\n\
7321 to run freely. You'll be able to step each thread independently,\n\
7322 leave it stopped or free to run as needed."),
7323 set_non_stop,
7324 show_non_stop,
7325 &setlist,
7326 &showlist);
7327
7328 numsigs = (int) GDB_SIGNAL_LAST;
7329 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7330 signal_print = (unsigned char *)
7331 xmalloc (sizeof (signal_print[0]) * numsigs);
7332 signal_program = (unsigned char *)
7333 xmalloc (sizeof (signal_program[0]) * numsigs);
7334 signal_catch = (unsigned char *)
7335 xmalloc (sizeof (signal_catch[0]) * numsigs);
7336 signal_pass = (unsigned char *)
7337 xmalloc (sizeof (signal_program[0]) * numsigs);
7338 for (i = 0; i < numsigs; i++)
7339 {
7340 signal_stop[i] = 1;
7341 signal_print[i] = 1;
7342 signal_program[i] = 1;
7343 signal_catch[i] = 0;
7344 }
7345
7346 /* Signals caused by debugger's own actions
7347 should not be given to the program afterwards. */
7348 signal_program[GDB_SIGNAL_TRAP] = 0;
7349 signal_program[GDB_SIGNAL_INT] = 0;
7350
7351 /* Signals that are not errors should not normally enter the debugger. */
7352 signal_stop[GDB_SIGNAL_ALRM] = 0;
7353 signal_print[GDB_SIGNAL_ALRM] = 0;
7354 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7355 signal_print[GDB_SIGNAL_VTALRM] = 0;
7356 signal_stop[GDB_SIGNAL_PROF] = 0;
7357 signal_print[GDB_SIGNAL_PROF] = 0;
7358 signal_stop[GDB_SIGNAL_CHLD] = 0;
7359 signal_print[GDB_SIGNAL_CHLD] = 0;
7360 signal_stop[GDB_SIGNAL_IO] = 0;
7361 signal_print[GDB_SIGNAL_IO] = 0;
7362 signal_stop[GDB_SIGNAL_POLL] = 0;
7363 signal_print[GDB_SIGNAL_POLL] = 0;
7364 signal_stop[GDB_SIGNAL_URG] = 0;
7365 signal_print[GDB_SIGNAL_URG] = 0;
7366 signal_stop[GDB_SIGNAL_WINCH] = 0;
7367 signal_print[GDB_SIGNAL_WINCH] = 0;
7368 signal_stop[GDB_SIGNAL_PRIO] = 0;
7369 signal_print[GDB_SIGNAL_PRIO] = 0;
7370
7371 /* These signals are used internally by user-level thread
7372 implementations. (See signal(5) on Solaris.) Like the above
7373 signals, a healthy program receives and handles them as part of
7374 its normal operation. */
7375 signal_stop[GDB_SIGNAL_LWP] = 0;
7376 signal_print[GDB_SIGNAL_LWP] = 0;
7377 signal_stop[GDB_SIGNAL_WAITING] = 0;
7378 signal_print[GDB_SIGNAL_WAITING] = 0;
7379 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7380 signal_print[GDB_SIGNAL_CANCEL] = 0;
7381
7382 /* Update cached state. */
7383 signal_cache_update (-1);
7384
7385 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7386 &stop_on_solib_events, _("\
7387 Set stopping for shared library events."), _("\
7388 Show stopping for shared library events."), _("\
7389 If nonzero, gdb will give control to the user when the dynamic linker\n\
7390 notifies gdb of shared library events. The most common event of interest\n\
7391 to the user would be loading/unloading of a new library."),
7392 set_stop_on_solib_events,
7393 show_stop_on_solib_events,
7394 &setlist, &showlist);
7395
7396 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7397 follow_fork_mode_kind_names,
7398 &follow_fork_mode_string, _("\
7399 Set debugger response to a program call of fork or vfork."), _("\
7400 Show debugger response to a program call of fork or vfork."), _("\
7401 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7402 parent - the original process is debugged after a fork\n\
7403 child - the new process is debugged after a fork\n\
7404 The unfollowed process will continue to run.\n\
7405 By default, the debugger will follow the parent process."),
7406 NULL,
7407 show_follow_fork_mode_string,
7408 &setlist, &showlist);
7409
7410 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7411 follow_exec_mode_names,
7412 &follow_exec_mode_string, _("\
7413 Set debugger response to a program call of exec."), _("\
7414 Show debugger response to a program call of exec."), _("\
7415 An exec call replaces the program image of a process.\n\
7416 \n\
7417 follow-exec-mode can be:\n\
7418 \n\
7419 new - the debugger creates a new inferior and rebinds the process\n\
7420 to this new inferior. The program the process was running before\n\
7421 the exec call can be restarted afterwards by restarting the original\n\
7422 inferior.\n\
7423 \n\
7424 same - the debugger keeps the process bound to the same inferior.\n\
7425 The new executable image replaces the previous executable loaded in\n\
7426 the inferior. Restarting the inferior after the exec call restarts\n\
7427 the executable the process was running after the exec call.\n\
7428 \n\
7429 By default, the debugger will use the same inferior."),
7430 NULL,
7431 show_follow_exec_mode_string,
7432 &setlist, &showlist);
7433
7434 add_setshow_enum_cmd ("scheduler-locking", class_run,
7435 scheduler_enums, &scheduler_mode, _("\
7436 Set mode for locking scheduler during execution."), _("\
7437 Show mode for locking scheduler during execution."), _("\
7438 off == no locking (threads may preempt at any time)\n\
7439 on == full locking (no thread except the current thread may run)\n\
7440 step == scheduler locked during every single-step operation.\n\
7441 In this mode, no other thread may run during a step command.\n\
7442 Other threads may run while stepping over a function call ('next')."),
7443 set_schedlock_func, /* traps on target vector */
7444 show_scheduler_mode,
7445 &setlist, &showlist);
7446
7447 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7448 Set mode for resuming threads of all processes."), _("\
7449 Show mode for resuming threads of all processes."), _("\
7450 When on, execution commands (such as 'continue' or 'next') resume all\n\
7451 threads of all processes. When off (which is the default), execution\n\
7452 commands only resume the threads of the current process. The set of\n\
7453 threads that are resumed is further refined by the scheduler-locking\n\
7454 mode (see help set scheduler-locking)."),
7455 NULL,
7456 show_schedule_multiple,
7457 &setlist, &showlist);
7458
7459 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7460 Set mode of the step operation."), _("\
7461 Show mode of the step operation."), _("\
7462 When set, doing a step over a function without debug line information\n\
7463 will stop at the first instruction of that function. Otherwise, the\n\
7464 function is skipped and the step command stops at a different source line."),
7465 NULL,
7466 show_step_stop_if_no_debug,
7467 &setlist, &showlist);
7468
7469 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7470 &can_use_displaced_stepping, _("\
7471 Set debugger's willingness to use displaced stepping."), _("\
7472 Show debugger's willingness to use displaced stepping."), _("\
7473 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7474 supported by the target architecture. If off, gdb will not use displaced\n\
7475 stepping to step over breakpoints, even if such is supported by the target\n\
7476 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7477 if the target architecture supports it and non-stop mode is active, but will not\n\
7478 use it in all-stop mode (see help set non-stop)."),
7479 NULL,
7480 show_can_use_displaced_stepping,
7481 &setlist, &showlist);
7482
7483 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7484 &exec_direction, _("Set direction of execution.\n\
7485 Options are 'forward' or 'reverse'."),
7486 _("Show direction of execution (forward/reverse)."),
7487 _("Tells gdb whether to execute forward or backward."),
7488 set_exec_direction_func, show_exec_direction_func,
7489 &setlist, &showlist);
7490
7491 /* Set/show detach-on-fork: user-settable mode. */
7492
7493 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7494 Set whether gdb will detach the child of a fork."), _("\
7495 Show whether gdb will detach the child of a fork."), _("\
7496 Tells gdb whether to detach the child of a fork."),
7497 NULL, NULL, &setlist, &showlist);
7498
7499 /* Set/show disable address space randomization mode. */
7500
7501 add_setshow_boolean_cmd ("disable-randomization", class_support,
7502 &disable_randomization, _("\
7503 Set disabling of debuggee's virtual address space randomization."), _("\
7504 Show disabling of debuggee's virtual address space randomization."), _("\
7505 When this mode is on (which is the default), randomization of the virtual\n\
7506 address space is disabled. Standalone programs run with the randomization\n\
7507 enabled by default on some platforms."),
7508 &set_disable_randomization,
7509 &show_disable_randomization,
7510 &setlist, &showlist);
7511
7512 /* ptid initializations */
7513 inferior_ptid = null_ptid;
7514 target_last_wait_ptid = minus_one_ptid;
7515
7516 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7517 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7518 observer_attach_thread_exit (infrun_thread_thread_exit);
7519 observer_attach_inferior_exit (infrun_inferior_exit);
7520
7521 /* Explicitly create without lookup, since that tries to create a
7522 value with a void typed value, and when we get here, gdbarch
7523 isn't initialized yet. At this point, we're quite sure there
7524 isn't another convenience variable of the same name. */
7525 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7526
7527 add_setshow_boolean_cmd ("observer", no_class,
7528 &observer_mode_1, _("\
7529 Set whether gdb controls the inferior in observer mode."), _("\
7530 Show whether gdb controls the inferior in observer mode."), _("\
7531 In observer mode, GDB can get data from the inferior, but not\n\
7532 affect its execution. Registers and memory may not be changed,\n\
7533 breakpoints may not be set, and the program cannot be interrupted\n\
7534 or signalled."),
7535 set_observer_mode,
7536 show_observer_mode,
7537 &setlist,
7538 &showlist);
7539 }
This page took 0.388325 seconds and 4 git commands to generate.