Add new infrun.h header.
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2014 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <string.h>
24 #include <ctype.h>
25 #include "symtab.h"
26 #include "frame.h"
27 #include "inferior.h"
28 #include "exceptions.h"
29 #include "breakpoint.h"
30 #include "gdb_wait.h"
31 #include "gdbcore.h"
32 #include "gdbcmd.h"
33 #include "cli/cli-script.h"
34 #include "target.h"
35 #include "gdbthread.h"
36 #include "annotate.h"
37 #include "symfile.h"
38 #include "top.h"
39 #include <signal.h>
40 #include "inf-loop.h"
41 #include "regcache.h"
42 #include "value.h"
43 #include "observer.h"
44 #include "language.h"
45 #include "solib.h"
46 #include "main.h"
47 #include "dictionary.h"
48 #include "block.h"
49 #include "gdb_assert.h"
50 #include "mi/mi-common.h"
51 #include "event-top.h"
52 #include "record.h"
53 #include "record-full.h"
54 #include "inline-frame.h"
55 #include "jit.h"
56 #include "tracepoint.h"
57 #include "continuations.h"
58 #include "interps.h"
59 #include "skip.h"
60 #include "probe.h"
61 #include "objfiles.h"
62 #include "completer.h"
63 #include "target-descriptions.h"
64 #include "target-dcache.h"
65
66 /* Prototypes for local functions */
67
68 static void signals_info (char *, int);
69
70 static void handle_command (char *, int);
71
72 static void sig_print_info (enum gdb_signal);
73
74 static void sig_print_header (void);
75
76 static void resume_cleanups (void *);
77
78 static int hook_stop_stub (void *);
79
80 static int restore_selected_frame (void *);
81
82 static int follow_fork (void);
83
84 static void set_schedlock_func (char *args, int from_tty,
85 struct cmd_list_element *c);
86
87 static int currently_stepping (struct thread_info *tp);
88
89 static void xdb_handle_command (char *args, int from_tty);
90
91 static void print_exited_reason (int exitstatus);
92
93 static void print_signal_exited_reason (enum gdb_signal siggnal);
94
95 static void print_no_history_reason (void);
96
97 static void print_signal_received_reason (enum gdb_signal siggnal);
98
99 static void print_end_stepping_range_reason (void);
100
101 void _initialize_infrun (void);
102
103 void nullify_last_target_wait_ptid (void);
104
105 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
106
107 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
108
109 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
110
111 /* When set, stop the 'step' command if we enter a function which has
112 no line number information. The normal behavior is that we step
113 over such function. */
114 int step_stop_if_no_debug = 0;
115 static void
116 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
117 struct cmd_list_element *c, const char *value)
118 {
119 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
120 }
121
122 /* In asynchronous mode, but simulating synchronous execution. */
123
124 int sync_execution = 0;
125
126 /* proceed and normal_stop use this to notify the user when the
127 inferior stopped in a different thread than it had been running
128 in. */
129
130 static ptid_t previous_inferior_ptid;
131
132 /* If set (default for legacy reasons), when following a fork, GDB
133 will detach from one of the fork branches, child or parent.
134 Exactly which branch is detached depends on 'set follow-fork-mode'
135 setting. */
136
137 static int detach_fork = 1;
138
139 int debug_displaced = 0;
140 static void
141 show_debug_displaced (struct ui_file *file, int from_tty,
142 struct cmd_list_element *c, const char *value)
143 {
144 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
145 }
146
147 unsigned int debug_infrun = 0;
148 static void
149 show_debug_infrun (struct ui_file *file, int from_tty,
150 struct cmd_list_element *c, const char *value)
151 {
152 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
153 }
154
155
156 /* Support for disabling address space randomization. */
157
158 int disable_randomization = 1;
159
160 static void
161 show_disable_randomization (struct ui_file *file, int from_tty,
162 struct cmd_list_element *c, const char *value)
163 {
164 if (target_supports_disable_randomization ())
165 fprintf_filtered (file,
166 _("Disabling randomization of debuggee's "
167 "virtual address space is %s.\n"),
168 value);
169 else
170 fputs_filtered (_("Disabling randomization of debuggee's "
171 "virtual address space is unsupported on\n"
172 "this platform.\n"), file);
173 }
174
175 static void
176 set_disable_randomization (char *args, int from_tty,
177 struct cmd_list_element *c)
178 {
179 if (!target_supports_disable_randomization ())
180 error (_("Disabling randomization of debuggee's "
181 "virtual address space is unsupported on\n"
182 "this platform."));
183 }
184
185 /* User interface for non-stop mode. */
186
187 int non_stop = 0;
188 static int non_stop_1 = 0;
189
190 static void
191 set_non_stop (char *args, int from_tty,
192 struct cmd_list_element *c)
193 {
194 if (target_has_execution)
195 {
196 non_stop_1 = non_stop;
197 error (_("Cannot change this setting while the inferior is running."));
198 }
199
200 non_stop = non_stop_1;
201 }
202
203 static void
204 show_non_stop (struct ui_file *file, int from_tty,
205 struct cmd_list_element *c, const char *value)
206 {
207 fprintf_filtered (file,
208 _("Controlling the inferior in non-stop mode is %s.\n"),
209 value);
210 }
211
212 /* "Observer mode" is somewhat like a more extreme version of
213 non-stop, in which all GDB operations that might affect the
214 target's execution have been disabled. */
215
216 int observer_mode = 0;
217 static int observer_mode_1 = 0;
218
219 static void
220 set_observer_mode (char *args, int from_tty,
221 struct cmd_list_element *c)
222 {
223 if (target_has_execution)
224 {
225 observer_mode_1 = observer_mode;
226 error (_("Cannot change this setting while the inferior is running."));
227 }
228
229 observer_mode = observer_mode_1;
230
231 may_write_registers = !observer_mode;
232 may_write_memory = !observer_mode;
233 may_insert_breakpoints = !observer_mode;
234 may_insert_tracepoints = !observer_mode;
235 /* We can insert fast tracepoints in or out of observer mode,
236 but enable them if we're going into this mode. */
237 if (observer_mode)
238 may_insert_fast_tracepoints = 1;
239 may_stop = !observer_mode;
240 update_target_permissions ();
241
242 /* Going *into* observer mode we must force non-stop, then
243 going out we leave it that way. */
244 if (observer_mode)
245 {
246 target_async_permitted = 1;
247 pagination_enabled = 0;
248 non_stop = non_stop_1 = 1;
249 }
250
251 if (from_tty)
252 printf_filtered (_("Observer mode is now %s.\n"),
253 (observer_mode ? "on" : "off"));
254 }
255
256 static void
257 show_observer_mode (struct ui_file *file, int from_tty,
258 struct cmd_list_element *c, const char *value)
259 {
260 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
261 }
262
263 /* This updates the value of observer mode based on changes in
264 permissions. Note that we are deliberately ignoring the values of
265 may-write-registers and may-write-memory, since the user may have
266 reason to enable these during a session, for instance to turn on a
267 debugging-related global. */
268
269 void
270 update_observer_mode (void)
271 {
272 int newval;
273
274 newval = (!may_insert_breakpoints
275 && !may_insert_tracepoints
276 && may_insert_fast_tracepoints
277 && !may_stop
278 && non_stop);
279
280 /* Let the user know if things change. */
281 if (newval != observer_mode)
282 printf_filtered (_("Observer mode is now %s.\n"),
283 (newval ? "on" : "off"));
284
285 observer_mode = observer_mode_1 = newval;
286 }
287
288 /* Tables of how to react to signals; the user sets them. */
289
290 static unsigned char *signal_stop;
291 static unsigned char *signal_print;
292 static unsigned char *signal_program;
293
294 /* Table of signals that are registered with "catch signal". A
295 non-zero entry indicates that the signal is caught by some "catch
296 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
297 signals. */
298 static unsigned char *signal_catch;
299
300 /* Table of signals that the target may silently handle.
301 This is automatically determined from the flags above,
302 and simply cached here. */
303 static unsigned char *signal_pass;
304
305 #define SET_SIGS(nsigs,sigs,flags) \
306 do { \
307 int signum = (nsigs); \
308 while (signum-- > 0) \
309 if ((sigs)[signum]) \
310 (flags)[signum] = 1; \
311 } while (0)
312
313 #define UNSET_SIGS(nsigs,sigs,flags) \
314 do { \
315 int signum = (nsigs); \
316 while (signum-- > 0) \
317 if ((sigs)[signum]) \
318 (flags)[signum] = 0; \
319 } while (0)
320
321 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
322 this function is to avoid exporting `signal_program'. */
323
324 void
325 update_signals_program_target (void)
326 {
327 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
328 }
329
330 /* Value to pass to target_resume() to cause all threads to resume. */
331
332 #define RESUME_ALL minus_one_ptid
333
334 /* Command list pointer for the "stop" placeholder. */
335
336 static struct cmd_list_element *stop_command;
337
338 /* Function inferior was in as of last step command. */
339
340 static struct symbol *step_start_function;
341
342 /* Nonzero if we want to give control to the user when we're notified
343 of shared library events by the dynamic linker. */
344 int stop_on_solib_events;
345
346 /* Enable or disable optional shared library event breakpoints
347 as appropriate when the above flag is changed. */
348
349 static void
350 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
351 {
352 update_solib_breakpoints ();
353 }
354
355 static void
356 show_stop_on_solib_events (struct ui_file *file, int from_tty,
357 struct cmd_list_element *c, const char *value)
358 {
359 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
360 value);
361 }
362
363 /* Nonzero means expecting a trace trap
364 and should stop the inferior and return silently when it happens. */
365
366 int stop_after_trap;
367
368 /* Save register contents here when executing a "finish" command or are
369 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
370 Thus this contains the return value from the called function (assuming
371 values are returned in a register). */
372
373 struct regcache *stop_registers;
374
375 /* Nonzero after stop if current stack frame should be printed. */
376
377 static int stop_print_frame;
378
379 /* This is a cached copy of the pid/waitstatus of the last event
380 returned by target_wait()/deprecated_target_wait_hook(). This
381 information is returned by get_last_target_status(). */
382 static ptid_t target_last_wait_ptid;
383 static struct target_waitstatus target_last_waitstatus;
384
385 static void context_switch (ptid_t ptid);
386
387 void init_thread_stepping_state (struct thread_info *tss);
388
389 static void init_infwait_state (void);
390
391 static const char follow_fork_mode_child[] = "child";
392 static const char follow_fork_mode_parent[] = "parent";
393
394 static const char *const follow_fork_mode_kind_names[] = {
395 follow_fork_mode_child,
396 follow_fork_mode_parent,
397 NULL
398 };
399
400 static const char *follow_fork_mode_string = follow_fork_mode_parent;
401 static void
402 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
403 struct cmd_list_element *c, const char *value)
404 {
405 fprintf_filtered (file,
406 _("Debugger response to a program "
407 "call of fork or vfork is \"%s\".\n"),
408 value);
409 }
410 \f
411
412 /* Tell the target to follow the fork we're stopped at. Returns true
413 if the inferior should be resumed; false, if the target for some
414 reason decided it's best not to resume. */
415
416 static int
417 follow_fork (void)
418 {
419 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
420 int should_resume = 1;
421 struct thread_info *tp;
422
423 /* Copy user stepping state to the new inferior thread. FIXME: the
424 followed fork child thread should have a copy of most of the
425 parent thread structure's run control related fields, not just these.
426 Initialized to avoid "may be used uninitialized" warnings from gcc. */
427 struct breakpoint *step_resume_breakpoint = NULL;
428 struct breakpoint *exception_resume_breakpoint = NULL;
429 CORE_ADDR step_range_start = 0;
430 CORE_ADDR step_range_end = 0;
431 struct frame_id step_frame_id = { 0 };
432 struct interp *command_interp = NULL;
433
434 if (!non_stop)
435 {
436 ptid_t wait_ptid;
437 struct target_waitstatus wait_status;
438
439 /* Get the last target status returned by target_wait(). */
440 get_last_target_status (&wait_ptid, &wait_status);
441
442 /* If not stopped at a fork event, then there's nothing else to
443 do. */
444 if (wait_status.kind != TARGET_WAITKIND_FORKED
445 && wait_status.kind != TARGET_WAITKIND_VFORKED)
446 return 1;
447
448 /* Check if we switched over from WAIT_PTID, since the event was
449 reported. */
450 if (!ptid_equal (wait_ptid, minus_one_ptid)
451 && !ptid_equal (inferior_ptid, wait_ptid))
452 {
453 /* We did. Switch back to WAIT_PTID thread, to tell the
454 target to follow it (in either direction). We'll
455 afterwards refuse to resume, and inform the user what
456 happened. */
457 switch_to_thread (wait_ptid);
458 should_resume = 0;
459 }
460 }
461
462 tp = inferior_thread ();
463
464 /* If there were any forks/vforks that were caught and are now to be
465 followed, then do so now. */
466 switch (tp->pending_follow.kind)
467 {
468 case TARGET_WAITKIND_FORKED:
469 case TARGET_WAITKIND_VFORKED:
470 {
471 ptid_t parent, child;
472
473 /* If the user did a next/step, etc, over a fork call,
474 preserve the stepping state in the fork child. */
475 if (follow_child && should_resume)
476 {
477 step_resume_breakpoint = clone_momentary_breakpoint
478 (tp->control.step_resume_breakpoint);
479 step_range_start = tp->control.step_range_start;
480 step_range_end = tp->control.step_range_end;
481 step_frame_id = tp->control.step_frame_id;
482 exception_resume_breakpoint
483 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
484 command_interp = tp->control.command_interp;
485
486 /* For now, delete the parent's sr breakpoint, otherwise,
487 parent/child sr breakpoints are considered duplicates,
488 and the child version will not be installed. Remove
489 this when the breakpoints module becomes aware of
490 inferiors and address spaces. */
491 delete_step_resume_breakpoint (tp);
492 tp->control.step_range_start = 0;
493 tp->control.step_range_end = 0;
494 tp->control.step_frame_id = null_frame_id;
495 delete_exception_resume_breakpoint (tp);
496 tp->control.command_interp = NULL;
497 }
498
499 parent = inferior_ptid;
500 child = tp->pending_follow.value.related_pid;
501
502 /* Tell the target to do whatever is necessary to follow
503 either parent or child. */
504 if (target_follow_fork (follow_child, detach_fork))
505 {
506 /* Target refused to follow, or there's some other reason
507 we shouldn't resume. */
508 should_resume = 0;
509 }
510 else
511 {
512 /* This pending follow fork event is now handled, one way
513 or another. The previous selected thread may be gone
514 from the lists by now, but if it is still around, need
515 to clear the pending follow request. */
516 tp = find_thread_ptid (parent);
517 if (tp)
518 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
519
520 /* This makes sure we don't try to apply the "Switched
521 over from WAIT_PID" logic above. */
522 nullify_last_target_wait_ptid ();
523
524 /* If we followed the child, switch to it... */
525 if (follow_child)
526 {
527 switch_to_thread (child);
528
529 /* ... and preserve the stepping state, in case the
530 user was stepping over the fork call. */
531 if (should_resume)
532 {
533 tp = inferior_thread ();
534 tp->control.step_resume_breakpoint
535 = step_resume_breakpoint;
536 tp->control.step_range_start = step_range_start;
537 tp->control.step_range_end = step_range_end;
538 tp->control.step_frame_id = step_frame_id;
539 tp->control.exception_resume_breakpoint
540 = exception_resume_breakpoint;
541 tp->control.command_interp = command_interp;
542 }
543 else
544 {
545 /* If we get here, it was because we're trying to
546 resume from a fork catchpoint, but, the user
547 has switched threads away from the thread that
548 forked. In that case, the resume command
549 issued is most likely not applicable to the
550 child, so just warn, and refuse to resume. */
551 warning (_("Not resuming: switched threads "
552 "before following fork child.\n"));
553 }
554
555 /* Reset breakpoints in the child as appropriate. */
556 follow_inferior_reset_breakpoints ();
557 }
558 else
559 switch_to_thread (parent);
560 }
561 }
562 break;
563 case TARGET_WAITKIND_SPURIOUS:
564 /* Nothing to follow. */
565 break;
566 default:
567 internal_error (__FILE__, __LINE__,
568 "Unexpected pending_follow.kind %d\n",
569 tp->pending_follow.kind);
570 break;
571 }
572
573 return should_resume;
574 }
575
576 void
577 follow_inferior_reset_breakpoints (void)
578 {
579 struct thread_info *tp = inferior_thread ();
580
581 /* Was there a step_resume breakpoint? (There was if the user
582 did a "next" at the fork() call.) If so, explicitly reset its
583 thread number.
584
585 step_resumes are a form of bp that are made to be per-thread.
586 Since we created the step_resume bp when the parent process
587 was being debugged, and now are switching to the child process,
588 from the breakpoint package's viewpoint, that's a switch of
589 "threads". We must update the bp's notion of which thread
590 it is for, or it'll be ignored when it triggers. */
591
592 if (tp->control.step_resume_breakpoint)
593 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
594
595 if (tp->control.exception_resume_breakpoint)
596 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
597
598 /* Reinsert all breakpoints in the child. The user may have set
599 breakpoints after catching the fork, in which case those
600 were never set in the child, but only in the parent. This makes
601 sure the inserted breakpoints match the breakpoint list. */
602
603 breakpoint_re_set ();
604 insert_breakpoints ();
605 }
606
607 /* The child has exited or execed: resume threads of the parent the
608 user wanted to be executing. */
609
610 static int
611 proceed_after_vfork_done (struct thread_info *thread,
612 void *arg)
613 {
614 int pid = * (int *) arg;
615
616 if (ptid_get_pid (thread->ptid) == pid
617 && is_running (thread->ptid)
618 && !is_executing (thread->ptid)
619 && !thread->stop_requested
620 && thread->suspend.stop_signal == GDB_SIGNAL_0)
621 {
622 if (debug_infrun)
623 fprintf_unfiltered (gdb_stdlog,
624 "infrun: resuming vfork parent thread %s\n",
625 target_pid_to_str (thread->ptid));
626
627 switch_to_thread (thread->ptid);
628 clear_proceed_status ();
629 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
630 }
631
632 return 0;
633 }
634
635 /* Called whenever we notice an exec or exit event, to handle
636 detaching or resuming a vfork parent. */
637
638 static void
639 handle_vfork_child_exec_or_exit (int exec)
640 {
641 struct inferior *inf = current_inferior ();
642
643 if (inf->vfork_parent)
644 {
645 int resume_parent = -1;
646
647 /* This exec or exit marks the end of the shared memory region
648 between the parent and the child. If the user wanted to
649 detach from the parent, now is the time. */
650
651 if (inf->vfork_parent->pending_detach)
652 {
653 struct thread_info *tp;
654 struct cleanup *old_chain;
655 struct program_space *pspace;
656 struct address_space *aspace;
657
658 /* follow-fork child, detach-on-fork on. */
659
660 inf->vfork_parent->pending_detach = 0;
661
662 if (!exec)
663 {
664 /* If we're handling a child exit, then inferior_ptid
665 points at the inferior's pid, not to a thread. */
666 old_chain = save_inferior_ptid ();
667 save_current_program_space ();
668 save_current_inferior ();
669 }
670 else
671 old_chain = save_current_space_and_thread ();
672
673 /* We're letting loose of the parent. */
674 tp = any_live_thread_of_process (inf->vfork_parent->pid);
675 switch_to_thread (tp->ptid);
676
677 /* We're about to detach from the parent, which implicitly
678 removes breakpoints from its address space. There's a
679 catch here: we want to reuse the spaces for the child,
680 but, parent/child are still sharing the pspace at this
681 point, although the exec in reality makes the kernel give
682 the child a fresh set of new pages. The problem here is
683 that the breakpoints module being unaware of this, would
684 likely chose the child process to write to the parent
685 address space. Swapping the child temporarily away from
686 the spaces has the desired effect. Yes, this is "sort
687 of" a hack. */
688
689 pspace = inf->pspace;
690 aspace = inf->aspace;
691 inf->aspace = NULL;
692 inf->pspace = NULL;
693
694 if (debug_infrun || info_verbose)
695 {
696 target_terminal_ours ();
697
698 if (exec)
699 fprintf_filtered (gdb_stdlog,
700 "Detaching vfork parent process "
701 "%d after child exec.\n",
702 inf->vfork_parent->pid);
703 else
704 fprintf_filtered (gdb_stdlog,
705 "Detaching vfork parent process "
706 "%d after child exit.\n",
707 inf->vfork_parent->pid);
708 }
709
710 target_detach (NULL, 0);
711
712 /* Put it back. */
713 inf->pspace = pspace;
714 inf->aspace = aspace;
715
716 do_cleanups (old_chain);
717 }
718 else if (exec)
719 {
720 /* We're staying attached to the parent, so, really give the
721 child a new address space. */
722 inf->pspace = add_program_space (maybe_new_address_space ());
723 inf->aspace = inf->pspace->aspace;
724 inf->removable = 1;
725 set_current_program_space (inf->pspace);
726
727 resume_parent = inf->vfork_parent->pid;
728
729 /* Break the bonds. */
730 inf->vfork_parent->vfork_child = NULL;
731 }
732 else
733 {
734 struct cleanup *old_chain;
735 struct program_space *pspace;
736
737 /* If this is a vfork child exiting, then the pspace and
738 aspaces were shared with the parent. Since we're
739 reporting the process exit, we'll be mourning all that is
740 found in the address space, and switching to null_ptid,
741 preparing to start a new inferior. But, since we don't
742 want to clobber the parent's address/program spaces, we
743 go ahead and create a new one for this exiting
744 inferior. */
745
746 /* Switch to null_ptid, so that clone_program_space doesn't want
747 to read the selected frame of a dead process. */
748 old_chain = save_inferior_ptid ();
749 inferior_ptid = null_ptid;
750
751 /* This inferior is dead, so avoid giving the breakpoints
752 module the option to write through to it (cloning a
753 program space resets breakpoints). */
754 inf->aspace = NULL;
755 inf->pspace = NULL;
756 pspace = add_program_space (maybe_new_address_space ());
757 set_current_program_space (pspace);
758 inf->removable = 1;
759 inf->symfile_flags = SYMFILE_NO_READ;
760 clone_program_space (pspace, inf->vfork_parent->pspace);
761 inf->pspace = pspace;
762 inf->aspace = pspace->aspace;
763
764 /* Put back inferior_ptid. We'll continue mourning this
765 inferior. */
766 do_cleanups (old_chain);
767
768 resume_parent = inf->vfork_parent->pid;
769 /* Break the bonds. */
770 inf->vfork_parent->vfork_child = NULL;
771 }
772
773 inf->vfork_parent = NULL;
774
775 gdb_assert (current_program_space == inf->pspace);
776
777 if (non_stop && resume_parent != -1)
778 {
779 /* If the user wanted the parent to be running, let it go
780 free now. */
781 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
782
783 if (debug_infrun)
784 fprintf_unfiltered (gdb_stdlog,
785 "infrun: resuming vfork parent process %d\n",
786 resume_parent);
787
788 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
789
790 do_cleanups (old_chain);
791 }
792 }
793 }
794
795 /* Enum strings for "set|show follow-exec-mode". */
796
797 static const char follow_exec_mode_new[] = "new";
798 static const char follow_exec_mode_same[] = "same";
799 static const char *const follow_exec_mode_names[] =
800 {
801 follow_exec_mode_new,
802 follow_exec_mode_same,
803 NULL,
804 };
805
806 static const char *follow_exec_mode_string = follow_exec_mode_same;
807 static void
808 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
809 struct cmd_list_element *c, const char *value)
810 {
811 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
812 }
813
814 /* EXECD_PATHNAME is assumed to be non-NULL. */
815
816 static void
817 follow_exec (ptid_t pid, char *execd_pathname)
818 {
819 struct thread_info *th = inferior_thread ();
820 struct inferior *inf = current_inferior ();
821
822 /* This is an exec event that we actually wish to pay attention to.
823 Refresh our symbol table to the newly exec'd program, remove any
824 momentary bp's, etc.
825
826 If there are breakpoints, they aren't really inserted now,
827 since the exec() transformed our inferior into a fresh set
828 of instructions.
829
830 We want to preserve symbolic breakpoints on the list, since
831 we have hopes that they can be reset after the new a.out's
832 symbol table is read.
833
834 However, any "raw" breakpoints must be removed from the list
835 (e.g., the solib bp's), since their address is probably invalid
836 now.
837
838 And, we DON'T want to call delete_breakpoints() here, since
839 that may write the bp's "shadow contents" (the instruction
840 value that was overwritten witha TRAP instruction). Since
841 we now have a new a.out, those shadow contents aren't valid. */
842
843 mark_breakpoints_out ();
844
845 update_breakpoints_after_exec ();
846
847 /* If there was one, it's gone now. We cannot truly step-to-next
848 statement through an exec(). */
849 th->control.step_resume_breakpoint = NULL;
850 th->control.exception_resume_breakpoint = NULL;
851 th->control.step_range_start = 0;
852 th->control.step_range_end = 0;
853
854 /* The target reports the exec event to the main thread, even if
855 some other thread does the exec, and even if the main thread was
856 already stopped --- if debugging in non-stop mode, it's possible
857 the user had the main thread held stopped in the previous image
858 --- release it now. This is the same behavior as step-over-exec
859 with scheduler-locking on in all-stop mode. */
860 th->stop_requested = 0;
861
862 /* What is this a.out's name? */
863 printf_unfiltered (_("%s is executing new program: %s\n"),
864 target_pid_to_str (inferior_ptid),
865 execd_pathname);
866
867 /* We've followed the inferior through an exec. Therefore, the
868 inferior has essentially been killed & reborn. */
869
870 gdb_flush (gdb_stdout);
871
872 breakpoint_init_inferior (inf_execd);
873
874 if (gdb_sysroot && *gdb_sysroot)
875 {
876 char *name = alloca (strlen (gdb_sysroot)
877 + strlen (execd_pathname)
878 + 1);
879
880 strcpy (name, gdb_sysroot);
881 strcat (name, execd_pathname);
882 execd_pathname = name;
883 }
884
885 /* Reset the shared library package. This ensures that we get a
886 shlib event when the child reaches "_start", at which point the
887 dld will have had a chance to initialize the child. */
888 /* Also, loading a symbol file below may trigger symbol lookups, and
889 we don't want those to be satisfied by the libraries of the
890 previous incarnation of this process. */
891 no_shared_libraries (NULL, 0);
892
893 if (follow_exec_mode_string == follow_exec_mode_new)
894 {
895 struct program_space *pspace;
896
897 /* The user wants to keep the old inferior and program spaces
898 around. Create a new fresh one, and switch to it. */
899
900 inf = add_inferior (current_inferior ()->pid);
901 pspace = add_program_space (maybe_new_address_space ());
902 inf->pspace = pspace;
903 inf->aspace = pspace->aspace;
904
905 exit_inferior_num_silent (current_inferior ()->num);
906
907 set_current_inferior (inf);
908 set_current_program_space (pspace);
909 }
910 else
911 {
912 /* The old description may no longer be fit for the new image.
913 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
914 old description; we'll read a new one below. No need to do
915 this on "follow-exec-mode new", as the old inferior stays
916 around (its description is later cleared/refetched on
917 restart). */
918 target_clear_description ();
919 }
920
921 gdb_assert (current_program_space == inf->pspace);
922
923 /* That a.out is now the one to use. */
924 exec_file_attach (execd_pathname, 0);
925
926 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
927 (Position Independent Executable) main symbol file will get applied by
928 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
929 the breakpoints with the zero displacement. */
930
931 symbol_file_add (execd_pathname,
932 (inf->symfile_flags
933 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
934 NULL, 0);
935
936 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
937 set_initial_language ();
938
939 /* If the target can specify a description, read it. Must do this
940 after flipping to the new executable (because the target supplied
941 description must be compatible with the executable's
942 architecture, and the old executable may e.g., be 32-bit, while
943 the new one 64-bit), and before anything involving memory or
944 registers. */
945 target_find_description ();
946
947 solib_create_inferior_hook (0);
948
949 jit_inferior_created_hook ();
950
951 breakpoint_re_set ();
952
953 /* Reinsert all breakpoints. (Those which were symbolic have
954 been reset to the proper address in the new a.out, thanks
955 to symbol_file_command...). */
956 insert_breakpoints ();
957
958 /* The next resume of this inferior should bring it to the shlib
959 startup breakpoints. (If the user had also set bp's on
960 "main" from the old (parent) process, then they'll auto-
961 matically get reset there in the new process.). */
962 }
963
964 /* Non-zero if we just simulating a single-step. This is needed
965 because we cannot remove the breakpoints in the inferior process
966 until after the `wait' in `wait_for_inferior'. */
967 static int singlestep_breakpoints_inserted_p = 0;
968
969 /* The thread we inserted single-step breakpoints for. */
970 static ptid_t singlestep_ptid;
971
972 /* PC when we started this single-step. */
973 static CORE_ADDR singlestep_pc;
974
975 /* Info about an instruction that is being stepped over. Invalid if
976 ASPACE is NULL. */
977
978 struct step_over_info
979 {
980 /* The instruction's address space. */
981 struct address_space *aspace;
982
983 /* The instruction's address. */
984 CORE_ADDR address;
985 };
986
987 /* The step-over info of the location that is being stepped over.
988
989 Note that with async/breakpoint always-inserted mode, a user might
990 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
991 being stepped over. As setting a new breakpoint inserts all
992 breakpoints, we need to make sure the breakpoint being stepped over
993 isn't inserted then. We do that by only clearing the step-over
994 info when the step-over is actually finished (or aborted).
995
996 Presently GDB can only step over one breakpoint at any given time.
997 Given threads that can't run code in the same address space as the
998 breakpoint's can't really miss the breakpoint, GDB could be taught
999 to step-over at most one breakpoint per address space (so this info
1000 could move to the address space object if/when GDB is extended).
1001 The set of breakpoints being stepped over will normally be much
1002 smaller than the set of all breakpoints, so a flag in the
1003 breakpoint location structure would be wasteful. A separate list
1004 also saves complexity and run-time, as otherwise we'd have to go
1005 through all breakpoint locations clearing their flag whenever we
1006 start a new sequence. Similar considerations weigh against storing
1007 this info in the thread object. Plus, not all step overs actually
1008 have breakpoint locations -- e.g., stepping past a single-step
1009 breakpoint, or stepping to complete a non-continuable
1010 watchpoint. */
1011 static struct step_over_info step_over_info;
1012
1013 /* Record the address of the breakpoint/instruction we're currently
1014 stepping over. */
1015
1016 static void
1017 set_step_over_info (struct address_space *aspace, CORE_ADDR address)
1018 {
1019 step_over_info.aspace = aspace;
1020 step_over_info.address = address;
1021 }
1022
1023 /* Called when we're not longer stepping over a breakpoint / an
1024 instruction, so all breakpoints are free to be (re)inserted. */
1025
1026 static void
1027 clear_step_over_info (void)
1028 {
1029 step_over_info.aspace = NULL;
1030 step_over_info.address = 0;
1031 }
1032
1033 /* See inferior.h. */
1034
1035 int
1036 stepping_past_instruction_at (struct address_space *aspace,
1037 CORE_ADDR address)
1038 {
1039 return (step_over_info.aspace != NULL
1040 && breakpoint_address_match (aspace, address,
1041 step_over_info.aspace,
1042 step_over_info.address));
1043 }
1044
1045 \f
1046 /* Displaced stepping. */
1047
1048 /* In non-stop debugging mode, we must take special care to manage
1049 breakpoints properly; in particular, the traditional strategy for
1050 stepping a thread past a breakpoint it has hit is unsuitable.
1051 'Displaced stepping' is a tactic for stepping one thread past a
1052 breakpoint it has hit while ensuring that other threads running
1053 concurrently will hit the breakpoint as they should.
1054
1055 The traditional way to step a thread T off a breakpoint in a
1056 multi-threaded program in all-stop mode is as follows:
1057
1058 a0) Initially, all threads are stopped, and breakpoints are not
1059 inserted.
1060 a1) We single-step T, leaving breakpoints uninserted.
1061 a2) We insert breakpoints, and resume all threads.
1062
1063 In non-stop debugging, however, this strategy is unsuitable: we
1064 don't want to have to stop all threads in the system in order to
1065 continue or step T past a breakpoint. Instead, we use displaced
1066 stepping:
1067
1068 n0) Initially, T is stopped, other threads are running, and
1069 breakpoints are inserted.
1070 n1) We copy the instruction "under" the breakpoint to a separate
1071 location, outside the main code stream, making any adjustments
1072 to the instruction, register, and memory state as directed by
1073 T's architecture.
1074 n2) We single-step T over the instruction at its new location.
1075 n3) We adjust the resulting register and memory state as directed
1076 by T's architecture. This includes resetting T's PC to point
1077 back into the main instruction stream.
1078 n4) We resume T.
1079
1080 This approach depends on the following gdbarch methods:
1081
1082 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1083 indicate where to copy the instruction, and how much space must
1084 be reserved there. We use these in step n1.
1085
1086 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1087 address, and makes any necessary adjustments to the instruction,
1088 register contents, and memory. We use this in step n1.
1089
1090 - gdbarch_displaced_step_fixup adjusts registers and memory after
1091 we have successfuly single-stepped the instruction, to yield the
1092 same effect the instruction would have had if we had executed it
1093 at its original address. We use this in step n3.
1094
1095 - gdbarch_displaced_step_free_closure provides cleanup.
1096
1097 The gdbarch_displaced_step_copy_insn and
1098 gdbarch_displaced_step_fixup functions must be written so that
1099 copying an instruction with gdbarch_displaced_step_copy_insn,
1100 single-stepping across the copied instruction, and then applying
1101 gdbarch_displaced_insn_fixup should have the same effects on the
1102 thread's memory and registers as stepping the instruction in place
1103 would have. Exactly which responsibilities fall to the copy and
1104 which fall to the fixup is up to the author of those functions.
1105
1106 See the comments in gdbarch.sh for details.
1107
1108 Note that displaced stepping and software single-step cannot
1109 currently be used in combination, although with some care I think
1110 they could be made to. Software single-step works by placing
1111 breakpoints on all possible subsequent instructions; if the
1112 displaced instruction is a PC-relative jump, those breakpoints
1113 could fall in very strange places --- on pages that aren't
1114 executable, or at addresses that are not proper instruction
1115 boundaries. (We do generally let other threads run while we wait
1116 to hit the software single-step breakpoint, and they might
1117 encounter such a corrupted instruction.) One way to work around
1118 this would be to have gdbarch_displaced_step_copy_insn fully
1119 simulate the effect of PC-relative instructions (and return NULL)
1120 on architectures that use software single-stepping.
1121
1122 In non-stop mode, we can have independent and simultaneous step
1123 requests, so more than one thread may need to simultaneously step
1124 over a breakpoint. The current implementation assumes there is
1125 only one scratch space per process. In this case, we have to
1126 serialize access to the scratch space. If thread A wants to step
1127 over a breakpoint, but we are currently waiting for some other
1128 thread to complete a displaced step, we leave thread A stopped and
1129 place it in the displaced_step_request_queue. Whenever a displaced
1130 step finishes, we pick the next thread in the queue and start a new
1131 displaced step operation on it. See displaced_step_prepare and
1132 displaced_step_fixup for details. */
1133
1134 struct displaced_step_request
1135 {
1136 ptid_t ptid;
1137 struct displaced_step_request *next;
1138 };
1139
1140 /* Per-inferior displaced stepping state. */
1141 struct displaced_step_inferior_state
1142 {
1143 /* Pointer to next in linked list. */
1144 struct displaced_step_inferior_state *next;
1145
1146 /* The process this displaced step state refers to. */
1147 int pid;
1148
1149 /* A queue of pending displaced stepping requests. One entry per
1150 thread that needs to do a displaced step. */
1151 struct displaced_step_request *step_request_queue;
1152
1153 /* If this is not null_ptid, this is the thread carrying out a
1154 displaced single-step in process PID. This thread's state will
1155 require fixing up once it has completed its step. */
1156 ptid_t step_ptid;
1157
1158 /* The architecture the thread had when we stepped it. */
1159 struct gdbarch *step_gdbarch;
1160
1161 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1162 for post-step cleanup. */
1163 struct displaced_step_closure *step_closure;
1164
1165 /* The address of the original instruction, and the copy we
1166 made. */
1167 CORE_ADDR step_original, step_copy;
1168
1169 /* Saved contents of copy area. */
1170 gdb_byte *step_saved_copy;
1171 };
1172
1173 /* The list of states of processes involved in displaced stepping
1174 presently. */
1175 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1176
1177 /* Get the displaced stepping state of process PID. */
1178
1179 static struct displaced_step_inferior_state *
1180 get_displaced_stepping_state (int pid)
1181 {
1182 struct displaced_step_inferior_state *state;
1183
1184 for (state = displaced_step_inferior_states;
1185 state != NULL;
1186 state = state->next)
1187 if (state->pid == pid)
1188 return state;
1189
1190 return NULL;
1191 }
1192
1193 /* Add a new displaced stepping state for process PID to the displaced
1194 stepping state list, or return a pointer to an already existing
1195 entry, if it already exists. Never returns NULL. */
1196
1197 static struct displaced_step_inferior_state *
1198 add_displaced_stepping_state (int pid)
1199 {
1200 struct displaced_step_inferior_state *state;
1201
1202 for (state = displaced_step_inferior_states;
1203 state != NULL;
1204 state = state->next)
1205 if (state->pid == pid)
1206 return state;
1207
1208 state = xcalloc (1, sizeof (*state));
1209 state->pid = pid;
1210 state->next = displaced_step_inferior_states;
1211 displaced_step_inferior_states = state;
1212
1213 return state;
1214 }
1215
1216 /* If inferior is in displaced stepping, and ADDR equals to starting address
1217 of copy area, return corresponding displaced_step_closure. Otherwise,
1218 return NULL. */
1219
1220 struct displaced_step_closure*
1221 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1222 {
1223 struct displaced_step_inferior_state *displaced
1224 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1225
1226 /* If checking the mode of displaced instruction in copy area. */
1227 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1228 && (displaced->step_copy == addr))
1229 return displaced->step_closure;
1230
1231 return NULL;
1232 }
1233
1234 /* Remove the displaced stepping state of process PID. */
1235
1236 static void
1237 remove_displaced_stepping_state (int pid)
1238 {
1239 struct displaced_step_inferior_state *it, **prev_next_p;
1240
1241 gdb_assert (pid != 0);
1242
1243 it = displaced_step_inferior_states;
1244 prev_next_p = &displaced_step_inferior_states;
1245 while (it)
1246 {
1247 if (it->pid == pid)
1248 {
1249 *prev_next_p = it->next;
1250 xfree (it);
1251 return;
1252 }
1253
1254 prev_next_p = &it->next;
1255 it = *prev_next_p;
1256 }
1257 }
1258
1259 static void
1260 infrun_inferior_exit (struct inferior *inf)
1261 {
1262 remove_displaced_stepping_state (inf->pid);
1263 }
1264
1265 /* If ON, and the architecture supports it, GDB will use displaced
1266 stepping to step over breakpoints. If OFF, or if the architecture
1267 doesn't support it, GDB will instead use the traditional
1268 hold-and-step approach. If AUTO (which is the default), GDB will
1269 decide which technique to use to step over breakpoints depending on
1270 which of all-stop or non-stop mode is active --- displaced stepping
1271 in non-stop mode; hold-and-step in all-stop mode. */
1272
1273 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1274
1275 static void
1276 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1277 struct cmd_list_element *c,
1278 const char *value)
1279 {
1280 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1281 fprintf_filtered (file,
1282 _("Debugger's willingness to use displaced stepping "
1283 "to step over breakpoints is %s (currently %s).\n"),
1284 value, non_stop ? "on" : "off");
1285 else
1286 fprintf_filtered (file,
1287 _("Debugger's willingness to use displaced stepping "
1288 "to step over breakpoints is %s.\n"), value);
1289 }
1290
1291 /* Return non-zero if displaced stepping can/should be used to step
1292 over breakpoints. */
1293
1294 static int
1295 use_displaced_stepping (struct gdbarch *gdbarch)
1296 {
1297 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1298 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1299 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1300 && find_record_target () == NULL);
1301 }
1302
1303 /* Clean out any stray displaced stepping state. */
1304 static void
1305 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1306 {
1307 /* Indicate that there is no cleanup pending. */
1308 displaced->step_ptid = null_ptid;
1309
1310 if (displaced->step_closure)
1311 {
1312 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1313 displaced->step_closure);
1314 displaced->step_closure = NULL;
1315 }
1316 }
1317
1318 static void
1319 displaced_step_clear_cleanup (void *arg)
1320 {
1321 struct displaced_step_inferior_state *state = arg;
1322
1323 displaced_step_clear (state);
1324 }
1325
1326 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1327 void
1328 displaced_step_dump_bytes (struct ui_file *file,
1329 const gdb_byte *buf,
1330 size_t len)
1331 {
1332 int i;
1333
1334 for (i = 0; i < len; i++)
1335 fprintf_unfiltered (file, "%02x ", buf[i]);
1336 fputs_unfiltered ("\n", file);
1337 }
1338
1339 /* Prepare to single-step, using displaced stepping.
1340
1341 Note that we cannot use displaced stepping when we have a signal to
1342 deliver. If we have a signal to deliver and an instruction to step
1343 over, then after the step, there will be no indication from the
1344 target whether the thread entered a signal handler or ignored the
1345 signal and stepped over the instruction successfully --- both cases
1346 result in a simple SIGTRAP. In the first case we mustn't do a
1347 fixup, and in the second case we must --- but we can't tell which.
1348 Comments in the code for 'random signals' in handle_inferior_event
1349 explain how we handle this case instead.
1350
1351 Returns 1 if preparing was successful -- this thread is going to be
1352 stepped now; or 0 if displaced stepping this thread got queued. */
1353 static int
1354 displaced_step_prepare (ptid_t ptid)
1355 {
1356 struct cleanup *old_cleanups, *ignore_cleanups;
1357 struct thread_info *tp = find_thread_ptid (ptid);
1358 struct regcache *regcache = get_thread_regcache (ptid);
1359 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1360 CORE_ADDR original, copy;
1361 ULONGEST len;
1362 struct displaced_step_closure *closure;
1363 struct displaced_step_inferior_state *displaced;
1364 int status;
1365
1366 /* We should never reach this function if the architecture does not
1367 support displaced stepping. */
1368 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1369
1370 /* Disable range stepping while executing in the scratch pad. We
1371 want a single-step even if executing the displaced instruction in
1372 the scratch buffer lands within the stepping range (e.g., a
1373 jump/branch). */
1374 tp->control.may_range_step = 0;
1375
1376 /* We have to displaced step one thread at a time, as we only have
1377 access to a single scratch space per inferior. */
1378
1379 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1380
1381 if (!ptid_equal (displaced->step_ptid, null_ptid))
1382 {
1383 /* Already waiting for a displaced step to finish. Defer this
1384 request and place in queue. */
1385 struct displaced_step_request *req, *new_req;
1386
1387 if (debug_displaced)
1388 fprintf_unfiltered (gdb_stdlog,
1389 "displaced: defering step of %s\n",
1390 target_pid_to_str (ptid));
1391
1392 new_req = xmalloc (sizeof (*new_req));
1393 new_req->ptid = ptid;
1394 new_req->next = NULL;
1395
1396 if (displaced->step_request_queue)
1397 {
1398 for (req = displaced->step_request_queue;
1399 req && req->next;
1400 req = req->next)
1401 ;
1402 req->next = new_req;
1403 }
1404 else
1405 displaced->step_request_queue = new_req;
1406
1407 return 0;
1408 }
1409 else
1410 {
1411 if (debug_displaced)
1412 fprintf_unfiltered (gdb_stdlog,
1413 "displaced: stepping %s now\n",
1414 target_pid_to_str (ptid));
1415 }
1416
1417 displaced_step_clear (displaced);
1418
1419 old_cleanups = save_inferior_ptid ();
1420 inferior_ptid = ptid;
1421
1422 original = regcache_read_pc (regcache);
1423
1424 copy = gdbarch_displaced_step_location (gdbarch);
1425 len = gdbarch_max_insn_length (gdbarch);
1426
1427 /* Save the original contents of the copy area. */
1428 displaced->step_saved_copy = xmalloc (len);
1429 ignore_cleanups = make_cleanup (free_current_contents,
1430 &displaced->step_saved_copy);
1431 status = target_read_memory (copy, displaced->step_saved_copy, len);
1432 if (status != 0)
1433 throw_error (MEMORY_ERROR,
1434 _("Error accessing memory address %s (%s) for "
1435 "displaced-stepping scratch space."),
1436 paddress (gdbarch, copy), safe_strerror (status));
1437 if (debug_displaced)
1438 {
1439 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1440 paddress (gdbarch, copy));
1441 displaced_step_dump_bytes (gdb_stdlog,
1442 displaced->step_saved_copy,
1443 len);
1444 };
1445
1446 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1447 original, copy, regcache);
1448
1449 /* We don't support the fully-simulated case at present. */
1450 gdb_assert (closure);
1451
1452 /* Save the information we need to fix things up if the step
1453 succeeds. */
1454 displaced->step_ptid = ptid;
1455 displaced->step_gdbarch = gdbarch;
1456 displaced->step_closure = closure;
1457 displaced->step_original = original;
1458 displaced->step_copy = copy;
1459
1460 make_cleanup (displaced_step_clear_cleanup, displaced);
1461
1462 /* Resume execution at the copy. */
1463 regcache_write_pc (regcache, copy);
1464
1465 discard_cleanups (ignore_cleanups);
1466
1467 do_cleanups (old_cleanups);
1468
1469 if (debug_displaced)
1470 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1471 paddress (gdbarch, copy));
1472
1473 return 1;
1474 }
1475
1476 static void
1477 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1478 const gdb_byte *myaddr, int len)
1479 {
1480 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1481
1482 inferior_ptid = ptid;
1483 write_memory (memaddr, myaddr, len);
1484 do_cleanups (ptid_cleanup);
1485 }
1486
1487 /* Restore the contents of the copy area for thread PTID. */
1488
1489 static void
1490 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1491 ptid_t ptid)
1492 {
1493 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1494
1495 write_memory_ptid (ptid, displaced->step_copy,
1496 displaced->step_saved_copy, len);
1497 if (debug_displaced)
1498 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1499 target_pid_to_str (ptid),
1500 paddress (displaced->step_gdbarch,
1501 displaced->step_copy));
1502 }
1503
1504 static void
1505 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1506 {
1507 struct cleanup *old_cleanups;
1508 struct displaced_step_inferior_state *displaced
1509 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1510
1511 /* Was any thread of this process doing a displaced step? */
1512 if (displaced == NULL)
1513 return;
1514
1515 /* Was this event for the pid we displaced? */
1516 if (ptid_equal (displaced->step_ptid, null_ptid)
1517 || ! ptid_equal (displaced->step_ptid, event_ptid))
1518 return;
1519
1520 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1521
1522 displaced_step_restore (displaced, displaced->step_ptid);
1523
1524 /* Did the instruction complete successfully? */
1525 if (signal == GDB_SIGNAL_TRAP)
1526 {
1527 /* Fix up the resulting state. */
1528 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1529 displaced->step_closure,
1530 displaced->step_original,
1531 displaced->step_copy,
1532 get_thread_regcache (displaced->step_ptid));
1533 }
1534 else
1535 {
1536 /* Since the instruction didn't complete, all we can do is
1537 relocate the PC. */
1538 struct regcache *regcache = get_thread_regcache (event_ptid);
1539 CORE_ADDR pc = regcache_read_pc (regcache);
1540
1541 pc = displaced->step_original + (pc - displaced->step_copy);
1542 regcache_write_pc (regcache, pc);
1543 }
1544
1545 do_cleanups (old_cleanups);
1546
1547 displaced->step_ptid = null_ptid;
1548
1549 /* Are there any pending displaced stepping requests? If so, run
1550 one now. Leave the state object around, since we're likely to
1551 need it again soon. */
1552 while (displaced->step_request_queue)
1553 {
1554 struct displaced_step_request *head;
1555 ptid_t ptid;
1556 struct regcache *regcache;
1557 struct gdbarch *gdbarch;
1558 CORE_ADDR actual_pc;
1559 struct address_space *aspace;
1560
1561 head = displaced->step_request_queue;
1562 ptid = head->ptid;
1563 displaced->step_request_queue = head->next;
1564 xfree (head);
1565
1566 context_switch (ptid);
1567
1568 regcache = get_thread_regcache (ptid);
1569 actual_pc = regcache_read_pc (regcache);
1570 aspace = get_regcache_aspace (regcache);
1571
1572 if (breakpoint_here_p (aspace, actual_pc))
1573 {
1574 if (debug_displaced)
1575 fprintf_unfiltered (gdb_stdlog,
1576 "displaced: stepping queued %s now\n",
1577 target_pid_to_str (ptid));
1578
1579 displaced_step_prepare (ptid);
1580
1581 gdbarch = get_regcache_arch (regcache);
1582
1583 if (debug_displaced)
1584 {
1585 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1586 gdb_byte buf[4];
1587
1588 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1589 paddress (gdbarch, actual_pc));
1590 read_memory (actual_pc, buf, sizeof (buf));
1591 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1592 }
1593
1594 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1595 displaced->step_closure))
1596 target_resume (ptid, 1, GDB_SIGNAL_0);
1597 else
1598 target_resume (ptid, 0, GDB_SIGNAL_0);
1599
1600 /* Done, we're stepping a thread. */
1601 break;
1602 }
1603 else
1604 {
1605 int step;
1606 struct thread_info *tp = inferior_thread ();
1607
1608 /* The breakpoint we were sitting under has since been
1609 removed. */
1610 tp->control.trap_expected = 0;
1611
1612 /* Go back to what we were trying to do. */
1613 step = currently_stepping (tp);
1614
1615 if (debug_displaced)
1616 fprintf_unfiltered (gdb_stdlog,
1617 "displaced: breakpoint is gone: %s, step(%d)\n",
1618 target_pid_to_str (tp->ptid), step);
1619
1620 target_resume (ptid, step, GDB_SIGNAL_0);
1621 tp->suspend.stop_signal = GDB_SIGNAL_0;
1622
1623 /* This request was discarded. See if there's any other
1624 thread waiting for its turn. */
1625 }
1626 }
1627 }
1628
1629 /* Update global variables holding ptids to hold NEW_PTID if they were
1630 holding OLD_PTID. */
1631 static void
1632 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1633 {
1634 struct displaced_step_request *it;
1635 struct displaced_step_inferior_state *displaced;
1636
1637 if (ptid_equal (inferior_ptid, old_ptid))
1638 inferior_ptid = new_ptid;
1639
1640 if (ptid_equal (singlestep_ptid, old_ptid))
1641 singlestep_ptid = new_ptid;
1642
1643 for (displaced = displaced_step_inferior_states;
1644 displaced;
1645 displaced = displaced->next)
1646 {
1647 if (ptid_equal (displaced->step_ptid, old_ptid))
1648 displaced->step_ptid = new_ptid;
1649
1650 for (it = displaced->step_request_queue; it; it = it->next)
1651 if (ptid_equal (it->ptid, old_ptid))
1652 it->ptid = new_ptid;
1653 }
1654 }
1655
1656 \f
1657 /* Resuming. */
1658
1659 /* Things to clean up if we QUIT out of resume (). */
1660 static void
1661 resume_cleanups (void *ignore)
1662 {
1663 normal_stop ();
1664 }
1665
1666 static const char schedlock_off[] = "off";
1667 static const char schedlock_on[] = "on";
1668 static const char schedlock_step[] = "step";
1669 static const char *const scheduler_enums[] = {
1670 schedlock_off,
1671 schedlock_on,
1672 schedlock_step,
1673 NULL
1674 };
1675 static const char *scheduler_mode = schedlock_off;
1676 static void
1677 show_scheduler_mode (struct ui_file *file, int from_tty,
1678 struct cmd_list_element *c, const char *value)
1679 {
1680 fprintf_filtered (file,
1681 _("Mode for locking scheduler "
1682 "during execution is \"%s\".\n"),
1683 value);
1684 }
1685
1686 static void
1687 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1688 {
1689 if (!target_can_lock_scheduler)
1690 {
1691 scheduler_mode = schedlock_off;
1692 error (_("Target '%s' cannot support this command."), target_shortname);
1693 }
1694 }
1695
1696 /* True if execution commands resume all threads of all processes by
1697 default; otherwise, resume only threads of the current inferior
1698 process. */
1699 int sched_multi = 0;
1700
1701 /* Try to setup for software single stepping over the specified location.
1702 Return 1 if target_resume() should use hardware single step.
1703
1704 GDBARCH the current gdbarch.
1705 PC the location to step over. */
1706
1707 static int
1708 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1709 {
1710 int hw_step = 1;
1711
1712 if (execution_direction == EXEC_FORWARD
1713 && gdbarch_software_single_step_p (gdbarch)
1714 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1715 {
1716 hw_step = 0;
1717 /* Do not pull these breakpoints until after a `wait' in
1718 `wait_for_inferior'. */
1719 singlestep_breakpoints_inserted_p = 1;
1720 singlestep_ptid = inferior_ptid;
1721 singlestep_pc = pc;
1722 }
1723 return hw_step;
1724 }
1725
1726 /* Return a ptid representing the set of threads that we will proceed,
1727 in the perspective of the user/frontend. We may actually resume
1728 fewer threads at first, e.g., if a thread is stopped at a
1729 breakpoint that needs stepping-off, but that should not be visible
1730 to the user/frontend, and neither should the frontend/user be
1731 allowed to proceed any of the threads that happen to be stopped for
1732 internal run control handling, if a previous command wanted them
1733 resumed. */
1734
1735 ptid_t
1736 user_visible_resume_ptid (int step)
1737 {
1738 /* By default, resume all threads of all processes. */
1739 ptid_t resume_ptid = RESUME_ALL;
1740
1741 /* Maybe resume only all threads of the current process. */
1742 if (!sched_multi && target_supports_multi_process ())
1743 {
1744 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1745 }
1746
1747 /* Maybe resume a single thread after all. */
1748 if (non_stop)
1749 {
1750 /* With non-stop mode on, threads are always handled
1751 individually. */
1752 resume_ptid = inferior_ptid;
1753 }
1754 else if ((scheduler_mode == schedlock_on)
1755 || (scheduler_mode == schedlock_step
1756 && (step || singlestep_breakpoints_inserted_p)))
1757 {
1758 /* User-settable 'scheduler' mode requires solo thread resume. */
1759 resume_ptid = inferior_ptid;
1760 }
1761
1762 return resume_ptid;
1763 }
1764
1765 /* Resume the inferior, but allow a QUIT. This is useful if the user
1766 wants to interrupt some lengthy single-stepping operation
1767 (for child processes, the SIGINT goes to the inferior, and so
1768 we get a SIGINT random_signal, but for remote debugging and perhaps
1769 other targets, that's not true).
1770
1771 STEP nonzero if we should step (zero to continue instead).
1772 SIG is the signal to give the inferior (zero for none). */
1773 void
1774 resume (int step, enum gdb_signal sig)
1775 {
1776 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1777 struct regcache *regcache = get_current_regcache ();
1778 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1779 struct thread_info *tp = inferior_thread ();
1780 CORE_ADDR pc = regcache_read_pc (regcache);
1781 struct address_space *aspace = get_regcache_aspace (regcache);
1782 ptid_t resume_ptid;
1783
1784 QUIT;
1785
1786 if (current_inferior ()->waiting_for_vfork_done)
1787 {
1788 /* Don't try to single-step a vfork parent that is waiting for
1789 the child to get out of the shared memory region (by exec'ing
1790 or exiting). This is particularly important on software
1791 single-step archs, as the child process would trip on the
1792 software single step breakpoint inserted for the parent
1793 process. Since the parent will not actually execute any
1794 instruction until the child is out of the shared region (such
1795 are vfork's semantics), it is safe to simply continue it.
1796 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1797 the parent, and tell it to `keep_going', which automatically
1798 re-sets it stepping. */
1799 if (debug_infrun)
1800 fprintf_unfiltered (gdb_stdlog,
1801 "infrun: resume : clear step\n");
1802 step = 0;
1803 }
1804
1805 if (debug_infrun)
1806 fprintf_unfiltered (gdb_stdlog,
1807 "infrun: resume (step=%d, signal=%s), "
1808 "trap_expected=%d, current thread [%s] at %s\n",
1809 step, gdb_signal_to_symbol_string (sig),
1810 tp->control.trap_expected,
1811 target_pid_to_str (inferior_ptid),
1812 paddress (gdbarch, pc));
1813
1814 /* Normally, by the time we reach `resume', the breakpoints are either
1815 removed or inserted, as appropriate. The exception is if we're sitting
1816 at a permanent breakpoint; we need to step over it, but permanent
1817 breakpoints can't be removed. So we have to test for it here. */
1818 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1819 {
1820 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1821 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1822 else
1823 error (_("\
1824 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1825 how to step past a permanent breakpoint on this architecture. Try using\n\
1826 a command like `return' or `jump' to continue execution."));
1827 }
1828
1829 /* If we have a breakpoint to step over, make sure to do a single
1830 step only. Same if we have software watchpoints. */
1831 if (tp->control.trap_expected || bpstat_should_step ())
1832 tp->control.may_range_step = 0;
1833
1834 /* If enabled, step over breakpoints by executing a copy of the
1835 instruction at a different address.
1836
1837 We can't use displaced stepping when we have a signal to deliver;
1838 the comments for displaced_step_prepare explain why. The
1839 comments in the handle_inferior event for dealing with 'random
1840 signals' explain what we do instead.
1841
1842 We can't use displaced stepping when we are waiting for vfork_done
1843 event, displaced stepping breaks the vfork child similarly as single
1844 step software breakpoint. */
1845 if (use_displaced_stepping (gdbarch)
1846 && (tp->control.trap_expected
1847 || (step && gdbarch_software_single_step_p (gdbarch)))
1848 && sig == GDB_SIGNAL_0
1849 && !current_inferior ()->waiting_for_vfork_done)
1850 {
1851 struct displaced_step_inferior_state *displaced;
1852
1853 if (!displaced_step_prepare (inferior_ptid))
1854 {
1855 /* Got placed in displaced stepping queue. Will be resumed
1856 later when all the currently queued displaced stepping
1857 requests finish. The thread is not executing at this point,
1858 and the call to set_executing will be made later. But we
1859 need to call set_running here, since from frontend point of view,
1860 the thread is running. */
1861 set_running (inferior_ptid, 1);
1862 discard_cleanups (old_cleanups);
1863 return;
1864 }
1865
1866 /* Update pc to reflect the new address from which we will execute
1867 instructions due to displaced stepping. */
1868 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
1869
1870 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1871 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1872 displaced->step_closure);
1873 }
1874
1875 /* Do we need to do it the hard way, w/temp breakpoints? */
1876 else if (step)
1877 step = maybe_software_singlestep (gdbarch, pc);
1878
1879 /* Currently, our software single-step implementation leads to different
1880 results than hardware single-stepping in one situation: when stepping
1881 into delivering a signal which has an associated signal handler,
1882 hardware single-step will stop at the first instruction of the handler,
1883 while software single-step will simply skip execution of the handler.
1884
1885 For now, this difference in behavior is accepted since there is no
1886 easy way to actually implement single-stepping into a signal handler
1887 without kernel support.
1888
1889 However, there is one scenario where this difference leads to follow-on
1890 problems: if we're stepping off a breakpoint by removing all breakpoints
1891 and then single-stepping. In this case, the software single-step
1892 behavior means that even if there is a *breakpoint* in the signal
1893 handler, GDB still would not stop.
1894
1895 Fortunately, we can at least fix this particular issue. We detect
1896 here the case where we are about to deliver a signal while software
1897 single-stepping with breakpoints removed. In this situation, we
1898 revert the decisions to remove all breakpoints and insert single-
1899 step breakpoints, and instead we install a step-resume breakpoint
1900 at the current address, deliver the signal without stepping, and
1901 once we arrive back at the step-resume breakpoint, actually step
1902 over the breakpoint we originally wanted to step over. */
1903 if (singlestep_breakpoints_inserted_p
1904 && tp->control.trap_expected && sig != GDB_SIGNAL_0)
1905 {
1906 /* If we have nested signals or a pending signal is delivered
1907 immediately after a handler returns, might might already have
1908 a step-resume breakpoint set on the earlier handler. We cannot
1909 set another step-resume breakpoint; just continue on until the
1910 original breakpoint is hit. */
1911 if (tp->control.step_resume_breakpoint == NULL)
1912 {
1913 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1914 tp->step_after_step_resume_breakpoint = 1;
1915 }
1916
1917 remove_single_step_breakpoints ();
1918 singlestep_breakpoints_inserted_p = 0;
1919
1920 clear_step_over_info ();
1921 tp->control.trap_expected = 0;
1922
1923 insert_breakpoints ();
1924 }
1925
1926 /* If STEP is set, it's a request to use hardware stepping
1927 facilities. But in that case, we should never
1928 use singlestep breakpoint. */
1929 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1930
1931 /* Decide the set of threads to ask the target to resume. Start
1932 by assuming everything will be resumed, than narrow the set
1933 by applying increasingly restricting conditions. */
1934 resume_ptid = user_visible_resume_ptid (step);
1935
1936 /* Maybe resume a single thread after all. */
1937 if ((step || singlestep_breakpoints_inserted_p)
1938 && tp->control.trap_expected)
1939 {
1940 /* We're allowing a thread to run past a breakpoint it has
1941 hit, by single-stepping the thread with the breakpoint
1942 removed. In which case, we need to single-step only this
1943 thread, and keep others stopped, as they can miss this
1944 breakpoint if allowed to run. */
1945 resume_ptid = inferior_ptid;
1946 }
1947
1948 if (gdbarch_cannot_step_breakpoint (gdbarch))
1949 {
1950 /* Most targets can step a breakpoint instruction, thus
1951 executing it normally. But if this one cannot, just
1952 continue and we will hit it anyway. */
1953 if (step && breakpoint_inserted_here_p (aspace, pc))
1954 step = 0;
1955 }
1956
1957 if (debug_displaced
1958 && use_displaced_stepping (gdbarch)
1959 && tp->control.trap_expected)
1960 {
1961 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1962 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1963 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1964 gdb_byte buf[4];
1965
1966 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1967 paddress (resume_gdbarch, actual_pc));
1968 read_memory (actual_pc, buf, sizeof (buf));
1969 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1970 }
1971
1972 if (tp->control.may_range_step)
1973 {
1974 /* If we're resuming a thread with the PC out of the step
1975 range, then we're doing some nested/finer run control
1976 operation, like stepping the thread out of the dynamic
1977 linker or the displaced stepping scratch pad. We
1978 shouldn't have allowed a range step then. */
1979 gdb_assert (pc_in_thread_step_range (pc, tp));
1980 }
1981
1982 /* Install inferior's terminal modes. */
1983 target_terminal_inferior ();
1984
1985 /* Avoid confusing the next resume, if the next stop/resume
1986 happens to apply to another thread. */
1987 tp->suspend.stop_signal = GDB_SIGNAL_0;
1988
1989 /* Advise target which signals may be handled silently. If we have
1990 removed breakpoints because we are stepping over one (which can
1991 happen only if we are not using displaced stepping), we need to
1992 receive all signals to avoid accidentally skipping a breakpoint
1993 during execution of a signal handler. */
1994 if ((step || singlestep_breakpoints_inserted_p)
1995 && tp->control.trap_expected
1996 && !use_displaced_stepping (gdbarch))
1997 target_pass_signals (0, NULL);
1998 else
1999 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2000
2001 target_resume (resume_ptid, step, sig);
2002
2003 discard_cleanups (old_cleanups);
2004 }
2005 \f
2006 /* Proceeding. */
2007
2008 /* Clear out all variables saying what to do when inferior is continued.
2009 First do this, then set the ones you want, then call `proceed'. */
2010
2011 static void
2012 clear_proceed_status_thread (struct thread_info *tp)
2013 {
2014 if (debug_infrun)
2015 fprintf_unfiltered (gdb_stdlog,
2016 "infrun: clear_proceed_status_thread (%s)\n",
2017 target_pid_to_str (tp->ptid));
2018
2019 tp->control.trap_expected = 0;
2020 tp->control.step_range_start = 0;
2021 tp->control.step_range_end = 0;
2022 tp->control.may_range_step = 0;
2023 tp->control.step_frame_id = null_frame_id;
2024 tp->control.step_stack_frame_id = null_frame_id;
2025 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2026 tp->stop_requested = 0;
2027
2028 tp->control.stop_step = 0;
2029
2030 tp->control.proceed_to_finish = 0;
2031
2032 tp->control.command_interp = NULL;
2033
2034 /* Discard any remaining commands or status from previous stop. */
2035 bpstat_clear (&tp->control.stop_bpstat);
2036 }
2037
2038 static int
2039 clear_proceed_status_callback (struct thread_info *tp, void *data)
2040 {
2041 if (is_exited (tp->ptid))
2042 return 0;
2043
2044 clear_proceed_status_thread (tp);
2045 return 0;
2046 }
2047
2048 void
2049 clear_proceed_status (void)
2050 {
2051 if (!non_stop)
2052 {
2053 /* In all-stop mode, delete the per-thread status of all
2054 threads, even if inferior_ptid is null_ptid, there may be
2055 threads on the list. E.g., we may be launching a new
2056 process, while selecting the executable. */
2057 iterate_over_threads (clear_proceed_status_callback, NULL);
2058 }
2059
2060 if (!ptid_equal (inferior_ptid, null_ptid))
2061 {
2062 struct inferior *inferior;
2063
2064 if (non_stop)
2065 {
2066 /* If in non-stop mode, only delete the per-thread status of
2067 the current thread. */
2068 clear_proceed_status_thread (inferior_thread ());
2069 }
2070
2071 inferior = current_inferior ();
2072 inferior->control.stop_soon = NO_STOP_QUIETLY;
2073 }
2074
2075 stop_after_trap = 0;
2076
2077 clear_step_over_info ();
2078
2079 observer_notify_about_to_proceed ();
2080
2081 if (stop_registers)
2082 {
2083 regcache_xfree (stop_registers);
2084 stop_registers = NULL;
2085 }
2086 }
2087
2088 /* Returns true if TP is still stopped at a breakpoint that needs
2089 stepping-over in order to make progress. If the breakpoint is gone
2090 meanwhile, we can skip the whole step-over dance. */
2091
2092 static int
2093 thread_still_needs_step_over (struct thread_info *tp)
2094 {
2095 if (tp->stepping_over_breakpoint)
2096 {
2097 struct regcache *regcache = get_thread_regcache (tp->ptid);
2098
2099 if (breakpoint_here_p (get_regcache_aspace (regcache),
2100 regcache_read_pc (regcache)))
2101 return 1;
2102
2103 tp->stepping_over_breakpoint = 0;
2104 }
2105
2106 return 0;
2107 }
2108
2109 /* Returns true if scheduler locking applies. STEP indicates whether
2110 we're about to do a step/next-like command to a thread. */
2111
2112 static int
2113 schedlock_applies (int step)
2114 {
2115 return (scheduler_mode == schedlock_on
2116 || (scheduler_mode == schedlock_step
2117 && step));
2118 }
2119
2120 /* Look a thread other than EXCEPT that has previously reported a
2121 breakpoint event, and thus needs a step-over in order to make
2122 progress. Returns NULL is none is found. STEP indicates whether
2123 we're about to step the current thread, in order to decide whether
2124 "set scheduler-locking step" applies. */
2125
2126 static struct thread_info *
2127 find_thread_needs_step_over (int step, struct thread_info *except)
2128 {
2129 struct thread_info *tp, *current;
2130
2131 /* With non-stop mode on, threads are always handled individually. */
2132 gdb_assert (! non_stop);
2133
2134 current = inferior_thread ();
2135
2136 /* If scheduler locking applies, we can avoid iterating over all
2137 threads. */
2138 if (schedlock_applies (step))
2139 {
2140 if (except != current
2141 && thread_still_needs_step_over (current))
2142 return current;
2143
2144 return NULL;
2145 }
2146
2147 ALL_THREADS (tp)
2148 {
2149 /* Ignore the EXCEPT thread. */
2150 if (tp == except)
2151 continue;
2152 /* Ignore threads of processes we're not resuming. */
2153 if (!sched_multi
2154 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2155 continue;
2156
2157 if (thread_still_needs_step_over (tp))
2158 return tp;
2159 }
2160
2161 return NULL;
2162 }
2163
2164 /* Basic routine for continuing the program in various fashions.
2165
2166 ADDR is the address to resume at, or -1 for resume where stopped.
2167 SIGGNAL is the signal to give it, or 0 for none,
2168 or -1 for act according to how it stopped.
2169 STEP is nonzero if should trap after one instruction.
2170 -1 means return after that and print nothing.
2171 You should probably set various step_... variables
2172 before calling here, if you are stepping.
2173
2174 You should call clear_proceed_status before calling proceed. */
2175
2176 void
2177 proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
2178 {
2179 struct regcache *regcache;
2180 struct gdbarch *gdbarch;
2181 struct thread_info *tp;
2182 CORE_ADDR pc;
2183 struct address_space *aspace;
2184
2185 /* If we're stopped at a fork/vfork, follow the branch set by the
2186 "set follow-fork-mode" command; otherwise, we'll just proceed
2187 resuming the current thread. */
2188 if (!follow_fork ())
2189 {
2190 /* The target for some reason decided not to resume. */
2191 normal_stop ();
2192 if (target_can_async_p ())
2193 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2194 return;
2195 }
2196
2197 /* We'll update this if & when we switch to a new thread. */
2198 previous_inferior_ptid = inferior_ptid;
2199
2200 regcache = get_current_regcache ();
2201 gdbarch = get_regcache_arch (regcache);
2202 aspace = get_regcache_aspace (regcache);
2203 pc = regcache_read_pc (regcache);
2204 tp = inferior_thread ();
2205
2206 if (step > 0)
2207 step_start_function = find_pc_function (pc);
2208 if (step < 0)
2209 stop_after_trap = 1;
2210
2211 /* Fill in with reasonable starting values. */
2212 init_thread_stepping_state (tp);
2213
2214 if (addr == (CORE_ADDR) -1)
2215 {
2216 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2217 && execution_direction != EXEC_REVERSE)
2218 /* There is a breakpoint at the address we will resume at,
2219 step one instruction before inserting breakpoints so that
2220 we do not stop right away (and report a second hit at this
2221 breakpoint).
2222
2223 Note, we don't do this in reverse, because we won't
2224 actually be executing the breakpoint insn anyway.
2225 We'll be (un-)executing the previous instruction. */
2226 tp->stepping_over_breakpoint = 1;
2227 else if (gdbarch_single_step_through_delay_p (gdbarch)
2228 && gdbarch_single_step_through_delay (gdbarch,
2229 get_current_frame ()))
2230 /* We stepped onto an instruction that needs to be stepped
2231 again before re-inserting the breakpoint, do so. */
2232 tp->stepping_over_breakpoint = 1;
2233 }
2234 else
2235 {
2236 regcache_write_pc (regcache, addr);
2237 }
2238
2239 /* Record the interpreter that issued the execution command that
2240 caused this thread to resume. If the top level interpreter is
2241 MI/async, and the execution command was a CLI command
2242 (next/step/etc.), we'll want to print stop event output to the MI
2243 console channel (the stepped-to line, etc.), as if the user
2244 entered the execution command on a real GDB console. */
2245 inferior_thread ()->control.command_interp = command_interp ();
2246
2247 if (debug_infrun)
2248 fprintf_unfiltered (gdb_stdlog,
2249 "infrun: proceed (addr=%s, signal=%s, step=%d)\n",
2250 paddress (gdbarch, addr),
2251 gdb_signal_to_symbol_string (siggnal), step);
2252
2253 if (non_stop)
2254 /* In non-stop, each thread is handled individually. The context
2255 must already be set to the right thread here. */
2256 ;
2257 else
2258 {
2259 struct thread_info *step_over;
2260
2261 /* In a multi-threaded task we may select another thread and
2262 then continue or step.
2263
2264 But if the old thread was stopped at a breakpoint, it will
2265 immediately cause another breakpoint stop without any
2266 execution (i.e. it will report a breakpoint hit incorrectly).
2267 So we must step over it first.
2268
2269 Look for a thread other than the current (TP) that reported a
2270 breakpoint hit and hasn't been resumed yet since. */
2271 step_over = find_thread_needs_step_over (step, tp);
2272 if (step_over != NULL)
2273 {
2274 if (debug_infrun)
2275 fprintf_unfiltered (gdb_stdlog,
2276 "infrun: need to step-over [%s] first\n",
2277 target_pid_to_str (step_over->ptid));
2278
2279 /* Store the prev_pc for the stepping thread too, needed by
2280 switch_back_to_stepping thread. */
2281 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2282 switch_to_thread (step_over->ptid);
2283 tp = step_over;
2284 }
2285 }
2286
2287 /* If we need to step over a breakpoint, and we're not using
2288 displaced stepping to do so, insert all breakpoints (watchpoints,
2289 etc.) but the one we're stepping over, step one instruction, and
2290 then re-insert the breakpoint when that step is finished. */
2291 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2292 {
2293 struct regcache *regcache = get_current_regcache ();
2294
2295 set_step_over_info (get_regcache_aspace (regcache),
2296 regcache_read_pc (regcache));
2297 }
2298 else
2299 clear_step_over_info ();
2300
2301 insert_breakpoints ();
2302
2303 tp->control.trap_expected = tp->stepping_over_breakpoint;
2304
2305 if (!non_stop)
2306 {
2307 /* Pass the last stop signal to the thread we're resuming,
2308 irrespective of whether the current thread is the thread that
2309 got the last event or not. This was historically GDB's
2310 behaviour before keeping a stop_signal per thread. */
2311
2312 struct thread_info *last_thread;
2313 ptid_t last_ptid;
2314 struct target_waitstatus last_status;
2315
2316 get_last_target_status (&last_ptid, &last_status);
2317 if (!ptid_equal (inferior_ptid, last_ptid)
2318 && !ptid_equal (last_ptid, null_ptid)
2319 && !ptid_equal (last_ptid, minus_one_ptid))
2320 {
2321 last_thread = find_thread_ptid (last_ptid);
2322 if (last_thread)
2323 {
2324 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2325 last_thread->suspend.stop_signal = GDB_SIGNAL_0;
2326 }
2327 }
2328 }
2329
2330 if (siggnal != GDB_SIGNAL_DEFAULT)
2331 tp->suspend.stop_signal = siggnal;
2332 /* If this signal should not be seen by program,
2333 give it zero. Used for debugging signals. */
2334 else if (!signal_program[tp->suspend.stop_signal])
2335 tp->suspend.stop_signal = GDB_SIGNAL_0;
2336
2337 annotate_starting ();
2338
2339 /* Make sure that output from GDB appears before output from the
2340 inferior. */
2341 gdb_flush (gdb_stdout);
2342
2343 /* Refresh prev_pc value just prior to resuming. This used to be
2344 done in stop_stepping, however, setting prev_pc there did not handle
2345 scenarios such as inferior function calls or returning from
2346 a function via the return command. In those cases, the prev_pc
2347 value was not set properly for subsequent commands. The prev_pc value
2348 is used to initialize the starting line number in the ecs. With an
2349 invalid value, the gdb next command ends up stopping at the position
2350 represented by the next line table entry past our start position.
2351 On platforms that generate one line table entry per line, this
2352 is not a problem. However, on the ia64, the compiler generates
2353 extraneous line table entries that do not increase the line number.
2354 When we issue the gdb next command on the ia64 after an inferior call
2355 or a return command, we often end up a few instructions forward, still
2356 within the original line we started.
2357
2358 An attempt was made to refresh the prev_pc at the same time the
2359 execution_control_state is initialized (for instance, just before
2360 waiting for an inferior event). But this approach did not work
2361 because of platforms that use ptrace, where the pc register cannot
2362 be read unless the inferior is stopped. At that point, we are not
2363 guaranteed the inferior is stopped and so the regcache_read_pc() call
2364 can fail. Setting the prev_pc value here ensures the value is updated
2365 correctly when the inferior is stopped. */
2366 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2367
2368 /* Reset to normal state. */
2369 init_infwait_state ();
2370
2371 /* Resume inferior. */
2372 resume (tp->control.trap_expected || step || bpstat_should_step (),
2373 tp->suspend.stop_signal);
2374
2375 /* Wait for it to stop (if not standalone)
2376 and in any case decode why it stopped, and act accordingly. */
2377 /* Do this only if we are not using the event loop, or if the target
2378 does not support asynchronous execution. */
2379 if (!target_can_async_p ())
2380 {
2381 wait_for_inferior ();
2382 normal_stop ();
2383 }
2384 }
2385 \f
2386
2387 /* Start remote-debugging of a machine over a serial link. */
2388
2389 void
2390 start_remote (int from_tty)
2391 {
2392 struct inferior *inferior;
2393
2394 inferior = current_inferior ();
2395 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2396
2397 /* Always go on waiting for the target, regardless of the mode. */
2398 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2399 indicate to wait_for_inferior that a target should timeout if
2400 nothing is returned (instead of just blocking). Because of this,
2401 targets expecting an immediate response need to, internally, set
2402 things up so that the target_wait() is forced to eventually
2403 timeout. */
2404 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2405 differentiate to its caller what the state of the target is after
2406 the initial open has been performed. Here we're assuming that
2407 the target has stopped. It should be possible to eventually have
2408 target_open() return to the caller an indication that the target
2409 is currently running and GDB state should be set to the same as
2410 for an async run. */
2411 wait_for_inferior ();
2412
2413 /* Now that the inferior has stopped, do any bookkeeping like
2414 loading shared libraries. We want to do this before normal_stop,
2415 so that the displayed frame is up to date. */
2416 post_create_inferior (&current_target, from_tty);
2417
2418 normal_stop ();
2419 }
2420
2421 /* Initialize static vars when a new inferior begins. */
2422
2423 void
2424 init_wait_for_inferior (void)
2425 {
2426 /* These are meaningless until the first time through wait_for_inferior. */
2427
2428 breakpoint_init_inferior (inf_starting);
2429
2430 clear_proceed_status ();
2431
2432 target_last_wait_ptid = minus_one_ptid;
2433
2434 previous_inferior_ptid = inferior_ptid;
2435 init_infwait_state ();
2436
2437 /* Discard any skipped inlined frames. */
2438 clear_inline_frame_state (minus_one_ptid);
2439
2440 singlestep_ptid = null_ptid;
2441 singlestep_pc = 0;
2442 }
2443
2444 \f
2445 /* This enum encodes possible reasons for doing a target_wait, so that
2446 wfi can call target_wait in one place. (Ultimately the call will be
2447 moved out of the infinite loop entirely.) */
2448
2449 enum infwait_states
2450 {
2451 infwait_normal_state,
2452 infwait_step_watch_state,
2453 infwait_nonstep_watch_state
2454 };
2455
2456 /* The PTID we'll do a target_wait on.*/
2457 ptid_t waiton_ptid;
2458
2459 /* Current inferior wait state. */
2460 static enum infwait_states infwait_state;
2461
2462 /* Data to be passed around while handling an event. This data is
2463 discarded between events. */
2464 struct execution_control_state
2465 {
2466 ptid_t ptid;
2467 /* The thread that got the event, if this was a thread event; NULL
2468 otherwise. */
2469 struct thread_info *event_thread;
2470
2471 struct target_waitstatus ws;
2472 int stop_func_filled_in;
2473 CORE_ADDR stop_func_start;
2474 CORE_ADDR stop_func_end;
2475 const char *stop_func_name;
2476 int wait_some_more;
2477
2478 /* We were in infwait_step_watch_state or
2479 infwait_nonstep_watch_state state, and the thread reported an
2480 event. */
2481 int stepped_after_stopped_by_watchpoint;
2482
2483 /* True if the event thread hit the single-step breakpoint of
2484 another thread. Thus the event doesn't cause a stop, the thread
2485 needs to be single-stepped past the single-step breakpoint before
2486 we can switch back to the original stepping thread. */
2487 int hit_singlestep_breakpoint;
2488 };
2489
2490 static void handle_inferior_event (struct execution_control_state *ecs);
2491
2492 static void handle_step_into_function (struct gdbarch *gdbarch,
2493 struct execution_control_state *ecs);
2494 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2495 struct execution_control_state *ecs);
2496 static void handle_signal_stop (struct execution_control_state *ecs);
2497 static void check_exception_resume (struct execution_control_state *,
2498 struct frame_info *);
2499
2500 static void stop_stepping (struct execution_control_state *ecs);
2501 static void prepare_to_wait (struct execution_control_state *ecs);
2502 static void keep_going (struct execution_control_state *ecs);
2503 static void process_event_stop_test (struct execution_control_state *ecs);
2504 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2505
2506 /* Callback for iterate over threads. If the thread is stopped, but
2507 the user/frontend doesn't know about that yet, go through
2508 normal_stop, as if the thread had just stopped now. ARG points at
2509 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2510 ptid_is_pid(PTID) is true, applies to all threads of the process
2511 pointed at by PTID. Otherwise, apply only to the thread pointed by
2512 PTID. */
2513
2514 static int
2515 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2516 {
2517 ptid_t ptid = * (ptid_t *) arg;
2518
2519 if ((ptid_equal (info->ptid, ptid)
2520 || ptid_equal (minus_one_ptid, ptid)
2521 || (ptid_is_pid (ptid)
2522 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2523 && is_running (info->ptid)
2524 && !is_executing (info->ptid))
2525 {
2526 struct cleanup *old_chain;
2527 struct execution_control_state ecss;
2528 struct execution_control_state *ecs = &ecss;
2529
2530 memset (ecs, 0, sizeof (*ecs));
2531
2532 old_chain = make_cleanup_restore_current_thread ();
2533
2534 overlay_cache_invalid = 1;
2535 /* Flush target cache before starting to handle each event.
2536 Target was running and cache could be stale. This is just a
2537 heuristic. Running threads may modify target memory, but we
2538 don't get any event. */
2539 target_dcache_invalidate ();
2540
2541 /* Go through handle_inferior_event/normal_stop, so we always
2542 have consistent output as if the stop event had been
2543 reported. */
2544 ecs->ptid = info->ptid;
2545 ecs->event_thread = find_thread_ptid (info->ptid);
2546 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2547 ecs->ws.value.sig = GDB_SIGNAL_0;
2548
2549 handle_inferior_event (ecs);
2550
2551 if (!ecs->wait_some_more)
2552 {
2553 struct thread_info *tp;
2554
2555 normal_stop ();
2556
2557 /* Finish off the continuations. */
2558 tp = inferior_thread ();
2559 do_all_intermediate_continuations_thread (tp, 1);
2560 do_all_continuations_thread (tp, 1);
2561 }
2562
2563 do_cleanups (old_chain);
2564 }
2565
2566 return 0;
2567 }
2568
2569 /* This function is attached as a "thread_stop_requested" observer.
2570 Cleanup local state that assumed the PTID was to be resumed, and
2571 report the stop to the frontend. */
2572
2573 static void
2574 infrun_thread_stop_requested (ptid_t ptid)
2575 {
2576 struct displaced_step_inferior_state *displaced;
2577
2578 /* PTID was requested to stop. Remove it from the displaced
2579 stepping queue, so we don't try to resume it automatically. */
2580
2581 for (displaced = displaced_step_inferior_states;
2582 displaced;
2583 displaced = displaced->next)
2584 {
2585 struct displaced_step_request *it, **prev_next_p;
2586
2587 it = displaced->step_request_queue;
2588 prev_next_p = &displaced->step_request_queue;
2589 while (it)
2590 {
2591 if (ptid_match (it->ptid, ptid))
2592 {
2593 *prev_next_p = it->next;
2594 it->next = NULL;
2595 xfree (it);
2596 }
2597 else
2598 {
2599 prev_next_p = &it->next;
2600 }
2601
2602 it = *prev_next_p;
2603 }
2604 }
2605
2606 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2607 }
2608
2609 static void
2610 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2611 {
2612 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2613 nullify_last_target_wait_ptid ();
2614 }
2615
2616 /* Callback for iterate_over_threads. */
2617
2618 static int
2619 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2620 {
2621 if (is_exited (info->ptid))
2622 return 0;
2623
2624 delete_step_resume_breakpoint (info);
2625 delete_exception_resume_breakpoint (info);
2626 return 0;
2627 }
2628
2629 /* In all-stop, delete the step resume breakpoint of any thread that
2630 had one. In non-stop, delete the step resume breakpoint of the
2631 thread that just stopped. */
2632
2633 static void
2634 delete_step_thread_step_resume_breakpoint (void)
2635 {
2636 if (!target_has_execution
2637 || ptid_equal (inferior_ptid, null_ptid))
2638 /* If the inferior has exited, we have already deleted the step
2639 resume breakpoints out of GDB's lists. */
2640 return;
2641
2642 if (non_stop)
2643 {
2644 /* If in non-stop mode, only delete the step-resume or
2645 longjmp-resume breakpoint of the thread that just stopped
2646 stepping. */
2647 struct thread_info *tp = inferior_thread ();
2648
2649 delete_step_resume_breakpoint (tp);
2650 delete_exception_resume_breakpoint (tp);
2651 }
2652 else
2653 /* In all-stop mode, delete all step-resume and longjmp-resume
2654 breakpoints of any thread that had them. */
2655 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2656 }
2657
2658 /* A cleanup wrapper. */
2659
2660 static void
2661 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2662 {
2663 delete_step_thread_step_resume_breakpoint ();
2664 }
2665
2666 /* Pretty print the results of target_wait, for debugging purposes. */
2667
2668 static void
2669 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2670 const struct target_waitstatus *ws)
2671 {
2672 char *status_string = target_waitstatus_to_string (ws);
2673 struct ui_file *tmp_stream = mem_fileopen ();
2674 char *text;
2675
2676 /* The text is split over several lines because it was getting too long.
2677 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2678 output as a unit; we want only one timestamp printed if debug_timestamp
2679 is set. */
2680
2681 fprintf_unfiltered (tmp_stream,
2682 "infrun: target_wait (%d", ptid_get_pid (waiton_ptid));
2683 if (ptid_get_pid (waiton_ptid) != -1)
2684 fprintf_unfiltered (tmp_stream,
2685 " [%s]", target_pid_to_str (waiton_ptid));
2686 fprintf_unfiltered (tmp_stream, ", status) =\n");
2687 fprintf_unfiltered (tmp_stream,
2688 "infrun: %d [%s],\n",
2689 ptid_get_pid (result_ptid),
2690 target_pid_to_str (result_ptid));
2691 fprintf_unfiltered (tmp_stream,
2692 "infrun: %s\n",
2693 status_string);
2694
2695 text = ui_file_xstrdup (tmp_stream, NULL);
2696
2697 /* This uses %s in part to handle %'s in the text, but also to avoid
2698 a gcc error: the format attribute requires a string literal. */
2699 fprintf_unfiltered (gdb_stdlog, "%s", text);
2700
2701 xfree (status_string);
2702 xfree (text);
2703 ui_file_delete (tmp_stream);
2704 }
2705
2706 /* Prepare and stabilize the inferior for detaching it. E.g.,
2707 detaching while a thread is displaced stepping is a recipe for
2708 crashing it, as nothing would readjust the PC out of the scratch
2709 pad. */
2710
2711 void
2712 prepare_for_detach (void)
2713 {
2714 struct inferior *inf = current_inferior ();
2715 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2716 struct cleanup *old_chain_1;
2717 struct displaced_step_inferior_state *displaced;
2718
2719 displaced = get_displaced_stepping_state (inf->pid);
2720
2721 /* Is any thread of this process displaced stepping? If not,
2722 there's nothing else to do. */
2723 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2724 return;
2725
2726 if (debug_infrun)
2727 fprintf_unfiltered (gdb_stdlog,
2728 "displaced-stepping in-process while detaching");
2729
2730 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2731 inf->detaching = 1;
2732
2733 while (!ptid_equal (displaced->step_ptid, null_ptid))
2734 {
2735 struct cleanup *old_chain_2;
2736 struct execution_control_state ecss;
2737 struct execution_control_state *ecs;
2738
2739 ecs = &ecss;
2740 memset (ecs, 0, sizeof (*ecs));
2741
2742 overlay_cache_invalid = 1;
2743 /* Flush target cache before starting to handle each event.
2744 Target was running and cache could be stale. This is just a
2745 heuristic. Running threads may modify target memory, but we
2746 don't get any event. */
2747 target_dcache_invalidate ();
2748
2749 if (deprecated_target_wait_hook)
2750 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2751 else
2752 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2753
2754 if (debug_infrun)
2755 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2756
2757 /* If an error happens while handling the event, propagate GDB's
2758 knowledge of the executing state to the frontend/user running
2759 state. */
2760 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2761 &minus_one_ptid);
2762
2763 /* Now figure out what to do with the result of the result. */
2764 handle_inferior_event (ecs);
2765
2766 /* No error, don't finish the state yet. */
2767 discard_cleanups (old_chain_2);
2768
2769 /* Breakpoints and watchpoints are not installed on the target
2770 at this point, and signals are passed directly to the
2771 inferior, so this must mean the process is gone. */
2772 if (!ecs->wait_some_more)
2773 {
2774 discard_cleanups (old_chain_1);
2775 error (_("Program exited while detaching"));
2776 }
2777 }
2778
2779 discard_cleanups (old_chain_1);
2780 }
2781
2782 /* Wait for control to return from inferior to debugger.
2783
2784 If inferior gets a signal, we may decide to start it up again
2785 instead of returning. That is why there is a loop in this function.
2786 When this function actually returns it means the inferior
2787 should be left stopped and GDB should read more commands. */
2788
2789 void
2790 wait_for_inferior (void)
2791 {
2792 struct cleanup *old_cleanups;
2793
2794 if (debug_infrun)
2795 fprintf_unfiltered
2796 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2797
2798 old_cleanups =
2799 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2800
2801 while (1)
2802 {
2803 struct execution_control_state ecss;
2804 struct execution_control_state *ecs = &ecss;
2805 struct cleanup *old_chain;
2806
2807 memset (ecs, 0, sizeof (*ecs));
2808
2809 overlay_cache_invalid = 1;
2810
2811 /* Flush target cache before starting to handle each event.
2812 Target was running and cache could be stale. This is just a
2813 heuristic. Running threads may modify target memory, but we
2814 don't get any event. */
2815 target_dcache_invalidate ();
2816
2817 if (deprecated_target_wait_hook)
2818 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2819 else
2820 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2821
2822 if (debug_infrun)
2823 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2824
2825 /* If an error happens while handling the event, propagate GDB's
2826 knowledge of the executing state to the frontend/user running
2827 state. */
2828 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2829
2830 /* Now figure out what to do with the result of the result. */
2831 handle_inferior_event (ecs);
2832
2833 /* No error, don't finish the state yet. */
2834 discard_cleanups (old_chain);
2835
2836 if (!ecs->wait_some_more)
2837 break;
2838 }
2839
2840 do_cleanups (old_cleanups);
2841 }
2842
2843 /* Asynchronous version of wait_for_inferior. It is called by the
2844 event loop whenever a change of state is detected on the file
2845 descriptor corresponding to the target. It can be called more than
2846 once to complete a single execution command. In such cases we need
2847 to keep the state in a global variable ECSS. If it is the last time
2848 that this function is called for a single execution command, then
2849 report to the user that the inferior has stopped, and do the
2850 necessary cleanups. */
2851
2852 void
2853 fetch_inferior_event (void *client_data)
2854 {
2855 struct execution_control_state ecss;
2856 struct execution_control_state *ecs = &ecss;
2857 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2858 struct cleanup *ts_old_chain;
2859 int was_sync = sync_execution;
2860 int cmd_done = 0;
2861
2862 memset (ecs, 0, sizeof (*ecs));
2863
2864 /* We're handling a live event, so make sure we're doing live
2865 debugging. If we're looking at traceframes while the target is
2866 running, we're going to need to get back to that mode after
2867 handling the event. */
2868 if (non_stop)
2869 {
2870 make_cleanup_restore_current_traceframe ();
2871 set_current_traceframe (-1);
2872 }
2873
2874 if (non_stop)
2875 /* In non-stop mode, the user/frontend should not notice a thread
2876 switch due to internal events. Make sure we reverse to the
2877 user selected thread and frame after handling the event and
2878 running any breakpoint commands. */
2879 make_cleanup_restore_current_thread ();
2880
2881 overlay_cache_invalid = 1;
2882 /* Flush target cache before starting to handle each event. Target
2883 was running and cache could be stale. This is just a heuristic.
2884 Running threads may modify target memory, but we don't get any
2885 event. */
2886 target_dcache_invalidate ();
2887
2888 make_cleanup_restore_integer (&execution_direction);
2889 execution_direction = target_execution_direction ();
2890
2891 if (deprecated_target_wait_hook)
2892 ecs->ptid =
2893 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2894 else
2895 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2896
2897 if (debug_infrun)
2898 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2899
2900 /* If an error happens while handling the event, propagate GDB's
2901 knowledge of the executing state to the frontend/user running
2902 state. */
2903 if (!non_stop)
2904 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2905 else
2906 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2907
2908 /* Get executed before make_cleanup_restore_current_thread above to apply
2909 still for the thread which has thrown the exception. */
2910 make_bpstat_clear_actions_cleanup ();
2911
2912 /* Now figure out what to do with the result of the result. */
2913 handle_inferior_event (ecs);
2914
2915 if (!ecs->wait_some_more)
2916 {
2917 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2918
2919 delete_step_thread_step_resume_breakpoint ();
2920
2921 /* We may not find an inferior if this was a process exit. */
2922 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2923 normal_stop ();
2924
2925 if (target_has_execution
2926 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2927 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2928 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2929 && ecs->event_thread->step_multi
2930 && ecs->event_thread->control.stop_step)
2931 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2932 else
2933 {
2934 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2935 cmd_done = 1;
2936 }
2937 }
2938
2939 /* No error, don't finish the thread states yet. */
2940 discard_cleanups (ts_old_chain);
2941
2942 /* Revert thread and frame. */
2943 do_cleanups (old_chain);
2944
2945 /* If the inferior was in sync execution mode, and now isn't,
2946 restore the prompt (a synchronous execution command has finished,
2947 and we're ready for input). */
2948 if (interpreter_async && was_sync && !sync_execution)
2949 display_gdb_prompt (0);
2950
2951 if (cmd_done
2952 && !was_sync
2953 && exec_done_display_p
2954 && (ptid_equal (inferior_ptid, null_ptid)
2955 || !is_running (inferior_ptid)))
2956 printf_unfiltered (_("completed.\n"));
2957 }
2958
2959 /* Record the frame and location we're currently stepping through. */
2960 void
2961 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2962 {
2963 struct thread_info *tp = inferior_thread ();
2964
2965 tp->control.step_frame_id = get_frame_id (frame);
2966 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2967
2968 tp->current_symtab = sal.symtab;
2969 tp->current_line = sal.line;
2970 }
2971
2972 /* Clear context switchable stepping state. */
2973
2974 void
2975 init_thread_stepping_state (struct thread_info *tss)
2976 {
2977 tss->stepping_over_breakpoint = 0;
2978 tss->step_after_step_resume_breakpoint = 0;
2979 }
2980
2981 /* Set the cached copy of the last ptid/waitstatus. */
2982
2983 static void
2984 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
2985 {
2986 target_last_wait_ptid = ptid;
2987 target_last_waitstatus = status;
2988 }
2989
2990 /* Return the cached copy of the last pid/waitstatus returned by
2991 target_wait()/deprecated_target_wait_hook(). The data is actually
2992 cached by handle_inferior_event(), which gets called immediately
2993 after target_wait()/deprecated_target_wait_hook(). */
2994
2995 void
2996 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2997 {
2998 *ptidp = target_last_wait_ptid;
2999 *status = target_last_waitstatus;
3000 }
3001
3002 void
3003 nullify_last_target_wait_ptid (void)
3004 {
3005 target_last_wait_ptid = minus_one_ptid;
3006 }
3007
3008 /* Switch thread contexts. */
3009
3010 static void
3011 context_switch (ptid_t ptid)
3012 {
3013 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3014 {
3015 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3016 target_pid_to_str (inferior_ptid));
3017 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3018 target_pid_to_str (ptid));
3019 }
3020
3021 switch_to_thread (ptid);
3022 }
3023
3024 static void
3025 adjust_pc_after_break (struct execution_control_state *ecs)
3026 {
3027 struct regcache *regcache;
3028 struct gdbarch *gdbarch;
3029 struct address_space *aspace;
3030 CORE_ADDR breakpoint_pc, decr_pc;
3031
3032 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3033 we aren't, just return.
3034
3035 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3036 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3037 implemented by software breakpoints should be handled through the normal
3038 breakpoint layer.
3039
3040 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3041 different signals (SIGILL or SIGEMT for instance), but it is less
3042 clear where the PC is pointing afterwards. It may not match
3043 gdbarch_decr_pc_after_break. I don't know any specific target that
3044 generates these signals at breakpoints (the code has been in GDB since at
3045 least 1992) so I can not guess how to handle them here.
3046
3047 In earlier versions of GDB, a target with
3048 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3049 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3050 target with both of these set in GDB history, and it seems unlikely to be
3051 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3052
3053 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3054 return;
3055
3056 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3057 return;
3058
3059 /* In reverse execution, when a breakpoint is hit, the instruction
3060 under it has already been de-executed. The reported PC always
3061 points at the breakpoint address, so adjusting it further would
3062 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3063 architecture:
3064
3065 B1 0x08000000 : INSN1
3066 B2 0x08000001 : INSN2
3067 0x08000002 : INSN3
3068 PC -> 0x08000003 : INSN4
3069
3070 Say you're stopped at 0x08000003 as above. Reverse continuing
3071 from that point should hit B2 as below. Reading the PC when the
3072 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3073 been de-executed already.
3074
3075 B1 0x08000000 : INSN1
3076 B2 PC -> 0x08000001 : INSN2
3077 0x08000002 : INSN3
3078 0x08000003 : INSN4
3079
3080 We can't apply the same logic as for forward execution, because
3081 we would wrongly adjust the PC to 0x08000000, since there's a
3082 breakpoint at PC - 1. We'd then report a hit on B1, although
3083 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3084 behaviour. */
3085 if (execution_direction == EXEC_REVERSE)
3086 return;
3087
3088 /* If this target does not decrement the PC after breakpoints, then
3089 we have nothing to do. */
3090 regcache = get_thread_regcache (ecs->ptid);
3091 gdbarch = get_regcache_arch (regcache);
3092
3093 decr_pc = target_decr_pc_after_break (gdbarch);
3094 if (decr_pc == 0)
3095 return;
3096
3097 aspace = get_regcache_aspace (regcache);
3098
3099 /* Find the location where (if we've hit a breakpoint) the
3100 breakpoint would be. */
3101 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3102
3103 /* Check whether there actually is a software breakpoint inserted at
3104 that location.
3105
3106 If in non-stop mode, a race condition is possible where we've
3107 removed a breakpoint, but stop events for that breakpoint were
3108 already queued and arrive later. To suppress those spurious
3109 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3110 and retire them after a number of stop events are reported. */
3111 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3112 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3113 {
3114 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3115
3116 if (record_full_is_used ())
3117 record_full_gdb_operation_disable_set ();
3118
3119 /* When using hardware single-step, a SIGTRAP is reported for both
3120 a completed single-step and a software breakpoint. Need to
3121 differentiate between the two, as the latter needs adjusting
3122 but the former does not.
3123
3124 The SIGTRAP can be due to a completed hardware single-step only if
3125 - we didn't insert software single-step breakpoints
3126 - the thread to be examined is still the current thread
3127 - this thread is currently being stepped
3128
3129 If any of these events did not occur, we must have stopped due
3130 to hitting a software breakpoint, and have to back up to the
3131 breakpoint address.
3132
3133 As a special case, we could have hardware single-stepped a
3134 software breakpoint. In this case (prev_pc == breakpoint_pc),
3135 we also need to back up to the breakpoint address. */
3136
3137 if (singlestep_breakpoints_inserted_p
3138 || !ptid_equal (ecs->ptid, inferior_ptid)
3139 || !currently_stepping (ecs->event_thread)
3140 || ecs->event_thread->prev_pc == breakpoint_pc)
3141 regcache_write_pc (regcache, breakpoint_pc);
3142
3143 do_cleanups (old_cleanups);
3144 }
3145 }
3146
3147 static void
3148 init_infwait_state (void)
3149 {
3150 waiton_ptid = pid_to_ptid (-1);
3151 infwait_state = infwait_normal_state;
3152 }
3153
3154 static int
3155 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3156 {
3157 for (frame = get_prev_frame (frame);
3158 frame != NULL;
3159 frame = get_prev_frame (frame))
3160 {
3161 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3162 return 1;
3163 if (get_frame_type (frame) != INLINE_FRAME)
3164 break;
3165 }
3166
3167 return 0;
3168 }
3169
3170 /* Auxiliary function that handles syscall entry/return events.
3171 It returns 1 if the inferior should keep going (and GDB
3172 should ignore the event), or 0 if the event deserves to be
3173 processed. */
3174
3175 static int
3176 handle_syscall_event (struct execution_control_state *ecs)
3177 {
3178 struct regcache *regcache;
3179 int syscall_number;
3180
3181 if (!ptid_equal (ecs->ptid, inferior_ptid))
3182 context_switch (ecs->ptid);
3183
3184 regcache = get_thread_regcache (ecs->ptid);
3185 syscall_number = ecs->ws.value.syscall_number;
3186 stop_pc = regcache_read_pc (regcache);
3187
3188 if (catch_syscall_enabled () > 0
3189 && catching_syscall_number (syscall_number) > 0)
3190 {
3191 if (debug_infrun)
3192 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3193 syscall_number);
3194
3195 ecs->event_thread->control.stop_bpstat
3196 = bpstat_stop_status (get_regcache_aspace (regcache),
3197 stop_pc, ecs->ptid, &ecs->ws);
3198
3199 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3200 {
3201 /* Catchpoint hit. */
3202 return 0;
3203 }
3204 }
3205
3206 /* If no catchpoint triggered for this, then keep going. */
3207 keep_going (ecs);
3208 return 1;
3209 }
3210
3211 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3212
3213 static void
3214 fill_in_stop_func (struct gdbarch *gdbarch,
3215 struct execution_control_state *ecs)
3216 {
3217 if (!ecs->stop_func_filled_in)
3218 {
3219 /* Don't care about return value; stop_func_start and stop_func_name
3220 will both be 0 if it doesn't work. */
3221 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3222 &ecs->stop_func_start, &ecs->stop_func_end);
3223 ecs->stop_func_start
3224 += gdbarch_deprecated_function_start_offset (gdbarch);
3225
3226 if (gdbarch_skip_entrypoint_p (gdbarch))
3227 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3228 ecs->stop_func_start);
3229
3230 ecs->stop_func_filled_in = 1;
3231 }
3232 }
3233
3234
3235 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3236
3237 static enum stop_kind
3238 get_inferior_stop_soon (ptid_t ptid)
3239 {
3240 struct inferior *inf = find_inferior_pid (ptid_get_pid (ptid));
3241
3242 gdb_assert (inf != NULL);
3243 return inf->control.stop_soon;
3244 }
3245
3246 /* Given an execution control state that has been freshly filled in by
3247 an event from the inferior, figure out what it means and take
3248 appropriate action.
3249
3250 The alternatives are:
3251
3252 1) stop_stepping and return; to really stop and return to the
3253 debugger.
3254
3255 2) keep_going and return; to wait for the next event (set
3256 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3257 once). */
3258
3259 static void
3260 handle_inferior_event (struct execution_control_state *ecs)
3261 {
3262 enum stop_kind stop_soon;
3263
3264 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3265 {
3266 /* We had an event in the inferior, but we are not interested in
3267 handling it at this level. The lower layers have already
3268 done what needs to be done, if anything.
3269
3270 One of the possible circumstances for this is when the
3271 inferior produces output for the console. The inferior has
3272 not stopped, and we are ignoring the event. Another possible
3273 circumstance is any event which the lower level knows will be
3274 reported multiple times without an intervening resume. */
3275 if (debug_infrun)
3276 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3277 prepare_to_wait (ecs);
3278 return;
3279 }
3280
3281 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3282 && target_can_async_p () && !sync_execution)
3283 {
3284 /* There were no unwaited-for children left in the target, but,
3285 we're not synchronously waiting for events either. Just
3286 ignore. Otherwise, if we were running a synchronous
3287 execution command, we need to cancel it and give the user
3288 back the terminal. */
3289 if (debug_infrun)
3290 fprintf_unfiltered (gdb_stdlog,
3291 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3292 prepare_to_wait (ecs);
3293 return;
3294 }
3295
3296 /* Cache the last pid/waitstatus. */
3297 set_last_target_status (ecs->ptid, ecs->ws);
3298
3299 /* Always clear state belonging to the previous time we stopped. */
3300 stop_stack_dummy = STOP_NONE;
3301
3302 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3303 {
3304 /* No unwaited-for children left. IOW, all resumed children
3305 have exited. */
3306 if (debug_infrun)
3307 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3308
3309 stop_print_frame = 0;
3310 stop_stepping (ecs);
3311 return;
3312 }
3313
3314 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3315 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3316 {
3317 ecs->event_thread = find_thread_ptid (ecs->ptid);
3318 /* If it's a new thread, add it to the thread database. */
3319 if (ecs->event_thread == NULL)
3320 ecs->event_thread = add_thread (ecs->ptid);
3321
3322 /* Disable range stepping. If the next step request could use a
3323 range, this will be end up re-enabled then. */
3324 ecs->event_thread->control.may_range_step = 0;
3325 }
3326
3327 /* Dependent on valid ECS->EVENT_THREAD. */
3328 adjust_pc_after_break (ecs);
3329
3330 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3331 reinit_frame_cache ();
3332
3333 breakpoint_retire_moribund ();
3334
3335 /* First, distinguish signals caused by the debugger from signals
3336 that have to do with the program's own actions. Note that
3337 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3338 on the operating system version. Here we detect when a SIGILL or
3339 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3340 something similar for SIGSEGV, since a SIGSEGV will be generated
3341 when we're trying to execute a breakpoint instruction on a
3342 non-executable stack. This happens for call dummy breakpoints
3343 for architectures like SPARC that place call dummies on the
3344 stack. */
3345 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3346 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3347 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3348 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3349 {
3350 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3351
3352 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3353 regcache_read_pc (regcache)))
3354 {
3355 if (debug_infrun)
3356 fprintf_unfiltered (gdb_stdlog,
3357 "infrun: Treating signal as SIGTRAP\n");
3358 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3359 }
3360 }
3361
3362 /* Mark the non-executing threads accordingly. In all-stop, all
3363 threads of all processes are stopped when we get any event
3364 reported. In non-stop mode, only the event thread stops. If
3365 we're handling a process exit in non-stop mode, there's nothing
3366 to do, as threads of the dead process are gone, and threads of
3367 any other process were left running. */
3368 if (!non_stop)
3369 set_executing (minus_one_ptid, 0);
3370 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3371 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3372 set_executing (ecs->ptid, 0);
3373
3374 switch (infwait_state)
3375 {
3376 case infwait_normal_state:
3377 if (debug_infrun)
3378 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3379 break;
3380
3381 case infwait_step_watch_state:
3382 if (debug_infrun)
3383 fprintf_unfiltered (gdb_stdlog,
3384 "infrun: infwait_step_watch_state\n");
3385
3386 ecs->stepped_after_stopped_by_watchpoint = 1;
3387 break;
3388
3389 case infwait_nonstep_watch_state:
3390 if (debug_infrun)
3391 fprintf_unfiltered (gdb_stdlog,
3392 "infrun: infwait_nonstep_watch_state\n");
3393 insert_breakpoints ();
3394
3395 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3396 handle things like signals arriving and other things happening
3397 in combination correctly? */
3398 ecs->stepped_after_stopped_by_watchpoint = 1;
3399 break;
3400
3401 default:
3402 internal_error (__FILE__, __LINE__, _("bad switch"));
3403 }
3404
3405 infwait_state = infwait_normal_state;
3406 waiton_ptid = pid_to_ptid (-1);
3407
3408 switch (ecs->ws.kind)
3409 {
3410 case TARGET_WAITKIND_LOADED:
3411 if (debug_infrun)
3412 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3413 if (!ptid_equal (ecs->ptid, inferior_ptid))
3414 context_switch (ecs->ptid);
3415 /* Ignore gracefully during startup of the inferior, as it might
3416 be the shell which has just loaded some objects, otherwise
3417 add the symbols for the newly loaded objects. Also ignore at
3418 the beginning of an attach or remote session; we will query
3419 the full list of libraries once the connection is
3420 established. */
3421
3422 stop_soon = get_inferior_stop_soon (ecs->ptid);
3423 if (stop_soon == NO_STOP_QUIETLY)
3424 {
3425 struct regcache *regcache;
3426
3427 regcache = get_thread_regcache (ecs->ptid);
3428
3429 handle_solib_event ();
3430
3431 ecs->event_thread->control.stop_bpstat
3432 = bpstat_stop_status (get_regcache_aspace (regcache),
3433 stop_pc, ecs->ptid, &ecs->ws);
3434
3435 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3436 {
3437 /* A catchpoint triggered. */
3438 process_event_stop_test (ecs);
3439 return;
3440 }
3441
3442 /* If requested, stop when the dynamic linker notifies
3443 gdb of events. This allows the user to get control
3444 and place breakpoints in initializer routines for
3445 dynamically loaded objects (among other things). */
3446 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3447 if (stop_on_solib_events)
3448 {
3449 /* Make sure we print "Stopped due to solib-event" in
3450 normal_stop. */
3451 stop_print_frame = 1;
3452
3453 stop_stepping (ecs);
3454 return;
3455 }
3456 }
3457
3458 /* If we are skipping through a shell, or through shared library
3459 loading that we aren't interested in, resume the program. If
3460 we're running the program normally, also resume. */
3461 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3462 {
3463 /* Loading of shared libraries might have changed breakpoint
3464 addresses. Make sure new breakpoints are inserted. */
3465 if (stop_soon == NO_STOP_QUIETLY
3466 && !breakpoints_always_inserted_mode ())
3467 insert_breakpoints ();
3468 resume (0, GDB_SIGNAL_0);
3469 prepare_to_wait (ecs);
3470 return;
3471 }
3472
3473 /* But stop if we're attaching or setting up a remote
3474 connection. */
3475 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3476 || stop_soon == STOP_QUIETLY_REMOTE)
3477 {
3478 if (debug_infrun)
3479 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3480 stop_stepping (ecs);
3481 return;
3482 }
3483
3484 internal_error (__FILE__, __LINE__,
3485 _("unhandled stop_soon: %d"), (int) stop_soon);
3486
3487 case TARGET_WAITKIND_SPURIOUS:
3488 if (debug_infrun)
3489 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3490 if (!ptid_equal (ecs->ptid, inferior_ptid))
3491 context_switch (ecs->ptid);
3492 resume (0, GDB_SIGNAL_0);
3493 prepare_to_wait (ecs);
3494 return;
3495
3496 case TARGET_WAITKIND_EXITED:
3497 case TARGET_WAITKIND_SIGNALLED:
3498 if (debug_infrun)
3499 {
3500 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3501 fprintf_unfiltered (gdb_stdlog,
3502 "infrun: TARGET_WAITKIND_EXITED\n");
3503 else
3504 fprintf_unfiltered (gdb_stdlog,
3505 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3506 }
3507
3508 inferior_ptid = ecs->ptid;
3509 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3510 set_current_program_space (current_inferior ()->pspace);
3511 handle_vfork_child_exec_or_exit (0);
3512 target_terminal_ours (); /* Must do this before mourn anyway. */
3513
3514 /* Clearing any previous state of convenience variables. */
3515 clear_exit_convenience_vars ();
3516
3517 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3518 {
3519 /* Record the exit code in the convenience variable $_exitcode, so
3520 that the user can inspect this again later. */
3521 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3522 (LONGEST) ecs->ws.value.integer);
3523
3524 /* Also record this in the inferior itself. */
3525 current_inferior ()->has_exit_code = 1;
3526 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3527
3528 /* Support the --return-child-result option. */
3529 return_child_result_value = ecs->ws.value.integer;
3530
3531 print_exited_reason (ecs->ws.value.integer);
3532 }
3533 else
3534 {
3535 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3536 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3537
3538 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3539 {
3540 /* Set the value of the internal variable $_exitsignal,
3541 which holds the signal uncaught by the inferior. */
3542 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3543 gdbarch_gdb_signal_to_target (gdbarch,
3544 ecs->ws.value.sig));
3545 }
3546 else
3547 {
3548 /* We don't have access to the target's method used for
3549 converting between signal numbers (GDB's internal
3550 representation <-> target's representation).
3551 Therefore, we cannot do a good job at displaying this
3552 information to the user. It's better to just warn
3553 her about it (if infrun debugging is enabled), and
3554 give up. */
3555 if (debug_infrun)
3556 fprintf_filtered (gdb_stdlog, _("\
3557 Cannot fill $_exitsignal with the correct signal number.\n"));
3558 }
3559
3560 print_signal_exited_reason (ecs->ws.value.sig);
3561 }
3562
3563 gdb_flush (gdb_stdout);
3564 target_mourn_inferior ();
3565 singlestep_breakpoints_inserted_p = 0;
3566 cancel_single_step_breakpoints ();
3567 stop_print_frame = 0;
3568 stop_stepping (ecs);
3569 return;
3570
3571 /* The following are the only cases in which we keep going;
3572 the above cases end in a continue or goto. */
3573 case TARGET_WAITKIND_FORKED:
3574 case TARGET_WAITKIND_VFORKED:
3575 if (debug_infrun)
3576 {
3577 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3578 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3579 else
3580 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3581 }
3582
3583 /* Check whether the inferior is displaced stepping. */
3584 {
3585 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3586 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3587 struct displaced_step_inferior_state *displaced
3588 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3589
3590 /* If checking displaced stepping is supported, and thread
3591 ecs->ptid is displaced stepping. */
3592 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3593 {
3594 struct inferior *parent_inf
3595 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3596 struct regcache *child_regcache;
3597 CORE_ADDR parent_pc;
3598
3599 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3600 indicating that the displaced stepping of syscall instruction
3601 has been done. Perform cleanup for parent process here. Note
3602 that this operation also cleans up the child process for vfork,
3603 because their pages are shared. */
3604 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3605
3606 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3607 {
3608 /* Restore scratch pad for child process. */
3609 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3610 }
3611
3612 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3613 the child's PC is also within the scratchpad. Set the child's PC
3614 to the parent's PC value, which has already been fixed up.
3615 FIXME: we use the parent's aspace here, although we're touching
3616 the child, because the child hasn't been added to the inferior
3617 list yet at this point. */
3618
3619 child_regcache
3620 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3621 gdbarch,
3622 parent_inf->aspace);
3623 /* Read PC value of parent process. */
3624 parent_pc = regcache_read_pc (regcache);
3625
3626 if (debug_displaced)
3627 fprintf_unfiltered (gdb_stdlog,
3628 "displaced: write child pc from %s to %s\n",
3629 paddress (gdbarch,
3630 regcache_read_pc (child_regcache)),
3631 paddress (gdbarch, parent_pc));
3632
3633 regcache_write_pc (child_regcache, parent_pc);
3634 }
3635 }
3636
3637 if (!ptid_equal (ecs->ptid, inferior_ptid))
3638 context_switch (ecs->ptid);
3639
3640 /* Immediately detach breakpoints from the child before there's
3641 any chance of letting the user delete breakpoints from the
3642 breakpoint lists. If we don't do this early, it's easy to
3643 leave left over traps in the child, vis: "break foo; catch
3644 fork; c; <fork>; del; c; <child calls foo>". We only follow
3645 the fork on the last `continue', and by that time the
3646 breakpoint at "foo" is long gone from the breakpoint table.
3647 If we vforked, then we don't need to unpatch here, since both
3648 parent and child are sharing the same memory pages; we'll
3649 need to unpatch at follow/detach time instead to be certain
3650 that new breakpoints added between catchpoint hit time and
3651 vfork follow are detached. */
3652 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3653 {
3654 /* This won't actually modify the breakpoint list, but will
3655 physically remove the breakpoints from the child. */
3656 detach_breakpoints (ecs->ws.value.related_pid);
3657 }
3658
3659 if (singlestep_breakpoints_inserted_p)
3660 {
3661 /* Pull the single step breakpoints out of the target. */
3662 remove_single_step_breakpoints ();
3663 singlestep_breakpoints_inserted_p = 0;
3664 }
3665
3666 /* In case the event is caught by a catchpoint, remember that
3667 the event is to be followed at the next resume of the thread,
3668 and not immediately. */
3669 ecs->event_thread->pending_follow = ecs->ws;
3670
3671 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3672
3673 ecs->event_thread->control.stop_bpstat
3674 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3675 stop_pc, ecs->ptid, &ecs->ws);
3676
3677 /* If no catchpoint triggered for this, then keep going. Note
3678 that we're interested in knowing the bpstat actually causes a
3679 stop, not just if it may explain the signal. Software
3680 watchpoints, for example, always appear in the bpstat. */
3681 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3682 {
3683 ptid_t parent;
3684 ptid_t child;
3685 int should_resume;
3686 int follow_child
3687 = (follow_fork_mode_string == follow_fork_mode_child);
3688
3689 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3690
3691 should_resume = follow_fork ();
3692
3693 parent = ecs->ptid;
3694 child = ecs->ws.value.related_pid;
3695
3696 /* In non-stop mode, also resume the other branch. */
3697 if (non_stop && !detach_fork)
3698 {
3699 if (follow_child)
3700 switch_to_thread (parent);
3701 else
3702 switch_to_thread (child);
3703
3704 ecs->event_thread = inferior_thread ();
3705 ecs->ptid = inferior_ptid;
3706 keep_going (ecs);
3707 }
3708
3709 if (follow_child)
3710 switch_to_thread (child);
3711 else
3712 switch_to_thread (parent);
3713
3714 ecs->event_thread = inferior_thread ();
3715 ecs->ptid = inferior_ptid;
3716
3717 if (should_resume)
3718 keep_going (ecs);
3719 else
3720 stop_stepping (ecs);
3721 return;
3722 }
3723 process_event_stop_test (ecs);
3724 return;
3725
3726 case TARGET_WAITKIND_VFORK_DONE:
3727 /* Done with the shared memory region. Re-insert breakpoints in
3728 the parent, and keep going. */
3729
3730 if (debug_infrun)
3731 fprintf_unfiltered (gdb_stdlog,
3732 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3733
3734 if (!ptid_equal (ecs->ptid, inferior_ptid))
3735 context_switch (ecs->ptid);
3736
3737 current_inferior ()->waiting_for_vfork_done = 0;
3738 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3739 /* This also takes care of reinserting breakpoints in the
3740 previously locked inferior. */
3741 keep_going (ecs);
3742 return;
3743
3744 case TARGET_WAITKIND_EXECD:
3745 if (debug_infrun)
3746 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3747
3748 if (!ptid_equal (ecs->ptid, inferior_ptid))
3749 context_switch (ecs->ptid);
3750
3751 singlestep_breakpoints_inserted_p = 0;
3752 cancel_single_step_breakpoints ();
3753
3754 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3755
3756 /* Do whatever is necessary to the parent branch of the vfork. */
3757 handle_vfork_child_exec_or_exit (1);
3758
3759 /* This causes the eventpoints and symbol table to be reset.
3760 Must do this now, before trying to determine whether to
3761 stop. */
3762 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3763
3764 ecs->event_thread->control.stop_bpstat
3765 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3766 stop_pc, ecs->ptid, &ecs->ws);
3767
3768 /* Note that this may be referenced from inside
3769 bpstat_stop_status above, through inferior_has_execd. */
3770 xfree (ecs->ws.value.execd_pathname);
3771 ecs->ws.value.execd_pathname = NULL;
3772
3773 /* If no catchpoint triggered for this, then keep going. */
3774 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3775 {
3776 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3777 keep_going (ecs);
3778 return;
3779 }
3780 process_event_stop_test (ecs);
3781 return;
3782
3783 /* Be careful not to try to gather much state about a thread
3784 that's in a syscall. It's frequently a losing proposition. */
3785 case TARGET_WAITKIND_SYSCALL_ENTRY:
3786 if (debug_infrun)
3787 fprintf_unfiltered (gdb_stdlog,
3788 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3789 /* Getting the current syscall number. */
3790 if (handle_syscall_event (ecs) == 0)
3791 process_event_stop_test (ecs);
3792 return;
3793
3794 /* Before examining the threads further, step this thread to
3795 get it entirely out of the syscall. (We get notice of the
3796 event when the thread is just on the verge of exiting a
3797 syscall. Stepping one instruction seems to get it back
3798 into user code.) */
3799 case TARGET_WAITKIND_SYSCALL_RETURN:
3800 if (debug_infrun)
3801 fprintf_unfiltered (gdb_stdlog,
3802 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3803 if (handle_syscall_event (ecs) == 0)
3804 process_event_stop_test (ecs);
3805 return;
3806
3807 case TARGET_WAITKIND_STOPPED:
3808 if (debug_infrun)
3809 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3810 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3811 handle_signal_stop (ecs);
3812 return;
3813
3814 case TARGET_WAITKIND_NO_HISTORY:
3815 if (debug_infrun)
3816 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
3817 /* Reverse execution: target ran out of history info. */
3818
3819 /* Pull the single step breakpoints out of the target. */
3820 if (singlestep_breakpoints_inserted_p)
3821 {
3822 if (!ptid_equal (ecs->ptid, inferior_ptid))
3823 context_switch (ecs->ptid);
3824 remove_single_step_breakpoints ();
3825 singlestep_breakpoints_inserted_p = 0;
3826 }
3827 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3828 print_no_history_reason ();
3829 stop_stepping (ecs);
3830 return;
3831 }
3832 }
3833
3834 /* Come here when the program has stopped with a signal. */
3835
3836 static void
3837 handle_signal_stop (struct execution_control_state *ecs)
3838 {
3839 struct frame_info *frame;
3840 struct gdbarch *gdbarch;
3841 int stopped_by_watchpoint;
3842 enum stop_kind stop_soon;
3843 int random_signal;
3844
3845 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
3846
3847 /* Do we need to clean up the state of a thread that has
3848 completed a displaced single-step? (Doing so usually affects
3849 the PC, so do it here, before we set stop_pc.) */
3850 displaced_step_fixup (ecs->ptid,
3851 ecs->event_thread->suspend.stop_signal);
3852
3853 /* If we either finished a single-step or hit a breakpoint, but
3854 the user wanted this thread to be stopped, pretend we got a
3855 SIG0 (generic unsignaled stop). */
3856 if (ecs->event_thread->stop_requested
3857 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3858 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3859
3860 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3861
3862 if (debug_infrun)
3863 {
3864 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3865 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3866 struct cleanup *old_chain = save_inferior_ptid ();
3867
3868 inferior_ptid = ecs->ptid;
3869
3870 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3871 paddress (gdbarch, stop_pc));
3872 if (target_stopped_by_watchpoint ())
3873 {
3874 CORE_ADDR addr;
3875
3876 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3877
3878 if (target_stopped_data_address (&current_target, &addr))
3879 fprintf_unfiltered (gdb_stdlog,
3880 "infrun: stopped data address = %s\n",
3881 paddress (gdbarch, addr));
3882 else
3883 fprintf_unfiltered (gdb_stdlog,
3884 "infrun: (no data address available)\n");
3885 }
3886
3887 do_cleanups (old_chain);
3888 }
3889
3890 /* This is originated from start_remote(), start_inferior() and
3891 shared libraries hook functions. */
3892 stop_soon = get_inferior_stop_soon (ecs->ptid);
3893 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3894 {
3895 if (!ptid_equal (ecs->ptid, inferior_ptid))
3896 context_switch (ecs->ptid);
3897 if (debug_infrun)
3898 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3899 stop_print_frame = 1;
3900 stop_stepping (ecs);
3901 return;
3902 }
3903
3904 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3905 && stop_after_trap)
3906 {
3907 if (!ptid_equal (ecs->ptid, inferior_ptid))
3908 context_switch (ecs->ptid);
3909 if (debug_infrun)
3910 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3911 stop_print_frame = 0;
3912 stop_stepping (ecs);
3913 return;
3914 }
3915
3916 /* This originates from attach_command(). We need to overwrite
3917 the stop_signal here, because some kernels don't ignore a
3918 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3919 See more comments in inferior.h. On the other hand, if we
3920 get a non-SIGSTOP, report it to the user - assume the backend
3921 will handle the SIGSTOP if it should show up later.
3922
3923 Also consider that the attach is complete when we see a
3924 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3925 target extended-remote report it instead of a SIGSTOP
3926 (e.g. gdbserver). We already rely on SIGTRAP being our
3927 signal, so this is no exception.
3928
3929 Also consider that the attach is complete when we see a
3930 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3931 the target to stop all threads of the inferior, in case the
3932 low level attach operation doesn't stop them implicitly. If
3933 they weren't stopped implicitly, then the stub will report a
3934 GDB_SIGNAL_0, meaning: stopped for no particular reason
3935 other than GDB's request. */
3936 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3937 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
3938 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3939 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
3940 {
3941 stop_print_frame = 1;
3942 stop_stepping (ecs);
3943 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3944 return;
3945 }
3946
3947 /* See if something interesting happened to the non-current thread. If
3948 so, then switch to that thread. */
3949 if (!ptid_equal (ecs->ptid, inferior_ptid))
3950 {
3951 if (debug_infrun)
3952 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3953
3954 context_switch (ecs->ptid);
3955
3956 if (deprecated_context_hook)
3957 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3958 }
3959
3960 /* At this point, get hold of the now-current thread's frame. */
3961 frame = get_current_frame ();
3962 gdbarch = get_frame_arch (frame);
3963
3964 /* Pull the single step breakpoints out of the target. */
3965 if (singlestep_breakpoints_inserted_p)
3966 {
3967 /* However, before doing so, if this single-step breakpoint was
3968 actually for another thread, set this thread up for moving
3969 past it. */
3970 if (!ptid_equal (ecs->ptid, singlestep_ptid)
3971 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3972 {
3973 struct regcache *regcache;
3974 struct address_space *aspace;
3975 CORE_ADDR pc;
3976
3977 regcache = get_thread_regcache (ecs->ptid);
3978 aspace = get_regcache_aspace (regcache);
3979 pc = regcache_read_pc (regcache);
3980 if (single_step_breakpoint_inserted_here_p (aspace, pc))
3981 {
3982 if (debug_infrun)
3983 {
3984 fprintf_unfiltered (gdb_stdlog,
3985 "infrun: [%s] hit step over single-step"
3986 " breakpoint of [%s]\n",
3987 target_pid_to_str (ecs->ptid),
3988 target_pid_to_str (singlestep_ptid));
3989 }
3990 ecs->hit_singlestep_breakpoint = 1;
3991 }
3992 }
3993
3994 remove_single_step_breakpoints ();
3995 singlestep_breakpoints_inserted_p = 0;
3996 }
3997
3998 if (ecs->stepped_after_stopped_by_watchpoint)
3999 stopped_by_watchpoint = 0;
4000 else
4001 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4002
4003 /* If necessary, step over this watchpoint. We'll be back to display
4004 it in a moment. */
4005 if (stopped_by_watchpoint
4006 && (target_have_steppable_watchpoint
4007 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4008 {
4009 /* At this point, we are stopped at an instruction which has
4010 attempted to write to a piece of memory under control of
4011 a watchpoint. The instruction hasn't actually executed
4012 yet. If we were to evaluate the watchpoint expression
4013 now, we would get the old value, and therefore no change
4014 would seem to have occurred.
4015
4016 In order to make watchpoints work `right', we really need
4017 to complete the memory write, and then evaluate the
4018 watchpoint expression. We do this by single-stepping the
4019 target.
4020
4021 It may not be necessary to disable the watchpoint to stop over
4022 it. For example, the PA can (with some kernel cooperation)
4023 single step over a watchpoint without disabling the watchpoint.
4024
4025 It is far more common to need to disable a watchpoint to step
4026 the inferior over it. If we have non-steppable watchpoints,
4027 we must disable the current watchpoint; it's simplest to
4028 disable all watchpoints and breakpoints. */
4029 int hw_step = 1;
4030
4031 if (!target_have_steppable_watchpoint)
4032 {
4033 remove_breakpoints ();
4034 /* See comment in resume why we need to stop bypassing signals
4035 while breakpoints have been removed. */
4036 target_pass_signals (0, NULL);
4037 }
4038 /* Single step */
4039 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4040 target_resume (ecs->ptid, hw_step, GDB_SIGNAL_0);
4041 waiton_ptid = ecs->ptid;
4042 if (target_have_steppable_watchpoint)
4043 infwait_state = infwait_step_watch_state;
4044 else
4045 infwait_state = infwait_nonstep_watch_state;
4046 prepare_to_wait (ecs);
4047 return;
4048 }
4049
4050 ecs->event_thread->stepping_over_breakpoint = 0;
4051 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4052 ecs->event_thread->control.stop_step = 0;
4053 stop_print_frame = 1;
4054 stopped_by_random_signal = 0;
4055
4056 /* Hide inlined functions starting here, unless we just performed stepi or
4057 nexti. After stepi and nexti, always show the innermost frame (not any
4058 inline function call sites). */
4059 if (ecs->event_thread->control.step_range_end != 1)
4060 {
4061 struct address_space *aspace =
4062 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4063
4064 /* skip_inline_frames is expensive, so we avoid it if we can
4065 determine that the address is one where functions cannot have
4066 been inlined. This improves performance with inferiors that
4067 load a lot of shared libraries, because the solib event
4068 breakpoint is defined as the address of a function (i.e. not
4069 inline). Note that we have to check the previous PC as well
4070 as the current one to catch cases when we have just
4071 single-stepped off a breakpoint prior to reinstating it.
4072 Note that we're assuming that the code we single-step to is
4073 not inline, but that's not definitive: there's nothing
4074 preventing the event breakpoint function from containing
4075 inlined code, and the single-step ending up there. If the
4076 user had set a breakpoint on that inlined code, the missing
4077 skip_inline_frames call would break things. Fortunately
4078 that's an extremely unlikely scenario. */
4079 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4080 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4081 && ecs->event_thread->control.trap_expected
4082 && pc_at_non_inline_function (aspace,
4083 ecs->event_thread->prev_pc,
4084 &ecs->ws)))
4085 {
4086 skip_inline_frames (ecs->ptid);
4087
4088 /* Re-fetch current thread's frame in case that invalidated
4089 the frame cache. */
4090 frame = get_current_frame ();
4091 gdbarch = get_frame_arch (frame);
4092 }
4093 }
4094
4095 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4096 && ecs->event_thread->control.trap_expected
4097 && gdbarch_single_step_through_delay_p (gdbarch)
4098 && currently_stepping (ecs->event_thread))
4099 {
4100 /* We're trying to step off a breakpoint. Turns out that we're
4101 also on an instruction that needs to be stepped multiple
4102 times before it's been fully executing. E.g., architectures
4103 with a delay slot. It needs to be stepped twice, once for
4104 the instruction and once for the delay slot. */
4105 int step_through_delay
4106 = gdbarch_single_step_through_delay (gdbarch, frame);
4107
4108 if (debug_infrun && step_through_delay)
4109 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4110 if (ecs->event_thread->control.step_range_end == 0
4111 && step_through_delay)
4112 {
4113 /* The user issued a continue when stopped at a breakpoint.
4114 Set up for another trap and get out of here. */
4115 ecs->event_thread->stepping_over_breakpoint = 1;
4116 keep_going (ecs);
4117 return;
4118 }
4119 else if (step_through_delay)
4120 {
4121 /* The user issued a step when stopped at a breakpoint.
4122 Maybe we should stop, maybe we should not - the delay
4123 slot *might* correspond to a line of source. In any
4124 case, don't decide that here, just set
4125 ecs->stepping_over_breakpoint, making sure we
4126 single-step again before breakpoints are re-inserted. */
4127 ecs->event_thread->stepping_over_breakpoint = 1;
4128 }
4129 }
4130
4131 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4132 handles this event. */
4133 ecs->event_thread->control.stop_bpstat
4134 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4135 stop_pc, ecs->ptid, &ecs->ws);
4136
4137 /* Following in case break condition called a
4138 function. */
4139 stop_print_frame = 1;
4140
4141 /* This is where we handle "moribund" watchpoints. Unlike
4142 software breakpoints traps, hardware watchpoint traps are
4143 always distinguishable from random traps. If no high-level
4144 watchpoint is associated with the reported stop data address
4145 anymore, then the bpstat does not explain the signal ---
4146 simply make sure to ignore it if `stopped_by_watchpoint' is
4147 set. */
4148
4149 if (debug_infrun
4150 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4151 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4152 GDB_SIGNAL_TRAP)
4153 && stopped_by_watchpoint)
4154 fprintf_unfiltered (gdb_stdlog,
4155 "infrun: no user watchpoint explains "
4156 "watchpoint SIGTRAP, ignoring\n");
4157
4158 /* NOTE: cagney/2003-03-29: These checks for a random signal
4159 at one stage in the past included checks for an inferior
4160 function call's call dummy's return breakpoint. The original
4161 comment, that went with the test, read:
4162
4163 ``End of a stack dummy. Some systems (e.g. Sony news) give
4164 another signal besides SIGTRAP, so check here as well as
4165 above.''
4166
4167 If someone ever tries to get call dummys on a
4168 non-executable stack to work (where the target would stop
4169 with something like a SIGSEGV), then those tests might need
4170 to be re-instated. Given, however, that the tests were only
4171 enabled when momentary breakpoints were not being used, I
4172 suspect that it won't be the case.
4173
4174 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4175 be necessary for call dummies on a non-executable stack on
4176 SPARC. */
4177
4178 /* See if the breakpoints module can explain the signal. */
4179 random_signal
4180 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4181 ecs->event_thread->suspend.stop_signal);
4182
4183 /* If not, perhaps stepping/nexting can. */
4184 if (random_signal)
4185 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4186 && currently_stepping (ecs->event_thread));
4187
4188 /* Perhaps the thread hit a single-step breakpoint of _another_
4189 thread. Single-step breakpoints are transparent to the
4190 breakpoints module. */
4191 if (random_signal)
4192 random_signal = !ecs->hit_singlestep_breakpoint;
4193
4194 /* No? Perhaps we got a moribund watchpoint. */
4195 if (random_signal)
4196 random_signal = !stopped_by_watchpoint;
4197
4198 /* For the program's own signals, act according to
4199 the signal handling tables. */
4200
4201 if (random_signal)
4202 {
4203 /* Signal not for debugging purposes. */
4204 int printed = 0;
4205 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4206 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4207
4208 if (debug_infrun)
4209 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4210 gdb_signal_to_symbol_string (stop_signal));
4211
4212 stopped_by_random_signal = 1;
4213
4214 if (signal_print[ecs->event_thread->suspend.stop_signal])
4215 {
4216 printed = 1;
4217 target_terminal_ours_for_output ();
4218 print_signal_received_reason
4219 (ecs->event_thread->suspend.stop_signal);
4220 }
4221 /* Always stop on signals if we're either just gaining control
4222 of the program, or the user explicitly requested this thread
4223 to remain stopped. */
4224 if (stop_soon != NO_STOP_QUIETLY
4225 || ecs->event_thread->stop_requested
4226 || (!inf->detaching
4227 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4228 {
4229 stop_stepping (ecs);
4230 return;
4231 }
4232 /* If not going to stop, give terminal back
4233 if we took it away. */
4234 else if (printed)
4235 target_terminal_inferior ();
4236
4237 /* Clear the signal if it should not be passed. */
4238 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4239 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4240
4241 if (ecs->event_thread->prev_pc == stop_pc
4242 && ecs->event_thread->control.trap_expected
4243 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4244 {
4245 /* We were just starting a new sequence, attempting to
4246 single-step off of a breakpoint and expecting a SIGTRAP.
4247 Instead this signal arrives. This signal will take us out
4248 of the stepping range so GDB needs to remember to, when
4249 the signal handler returns, resume stepping off that
4250 breakpoint. */
4251 /* To simplify things, "continue" is forced to use the same
4252 code paths as single-step - set a breakpoint at the
4253 signal return address and then, once hit, step off that
4254 breakpoint. */
4255 if (debug_infrun)
4256 fprintf_unfiltered (gdb_stdlog,
4257 "infrun: signal arrived while stepping over "
4258 "breakpoint\n");
4259
4260 insert_hp_step_resume_breakpoint_at_frame (frame);
4261 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4262 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4263 ecs->event_thread->control.trap_expected = 0;
4264
4265 /* If we were nexting/stepping some other thread, switch to
4266 it, so that we don't continue it, losing control. */
4267 if (!switch_back_to_stepped_thread (ecs))
4268 keep_going (ecs);
4269 return;
4270 }
4271
4272 if (ecs->event_thread->control.step_range_end != 0
4273 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4274 && pc_in_thread_step_range (stop_pc, ecs->event_thread)
4275 && frame_id_eq (get_stack_frame_id (frame),
4276 ecs->event_thread->control.step_stack_frame_id)
4277 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4278 {
4279 /* The inferior is about to take a signal that will take it
4280 out of the single step range. Set a breakpoint at the
4281 current PC (which is presumably where the signal handler
4282 will eventually return) and then allow the inferior to
4283 run free.
4284
4285 Note that this is only needed for a signal delivered
4286 while in the single-step range. Nested signals aren't a
4287 problem as they eventually all return. */
4288 if (debug_infrun)
4289 fprintf_unfiltered (gdb_stdlog,
4290 "infrun: signal may take us out of "
4291 "single-step range\n");
4292
4293 insert_hp_step_resume_breakpoint_at_frame (frame);
4294 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4295 ecs->event_thread->control.trap_expected = 0;
4296 keep_going (ecs);
4297 return;
4298 }
4299
4300 /* Note: step_resume_breakpoint may be non-NULL. This occures
4301 when either there's a nested signal, or when there's a
4302 pending signal enabled just as the signal handler returns
4303 (leaving the inferior at the step-resume-breakpoint without
4304 actually executing it). Either way continue until the
4305 breakpoint is really hit. */
4306
4307 if (!switch_back_to_stepped_thread (ecs))
4308 {
4309 if (debug_infrun)
4310 fprintf_unfiltered (gdb_stdlog,
4311 "infrun: random signal, keep going\n");
4312
4313 keep_going (ecs);
4314 }
4315 return;
4316 }
4317
4318 process_event_stop_test (ecs);
4319 }
4320
4321 /* Come here when we've got some debug event / signal we can explain
4322 (IOW, not a random signal), and test whether it should cause a
4323 stop, or whether we should resume the inferior (transparently).
4324 E.g., could be a breakpoint whose condition evaluates false; we
4325 could be still stepping within the line; etc. */
4326
4327 static void
4328 process_event_stop_test (struct execution_control_state *ecs)
4329 {
4330 struct symtab_and_line stop_pc_sal;
4331 struct frame_info *frame;
4332 struct gdbarch *gdbarch;
4333 CORE_ADDR jmp_buf_pc;
4334 struct bpstat_what what;
4335
4336 /* Handle cases caused by hitting a breakpoint. */
4337
4338 frame = get_current_frame ();
4339 gdbarch = get_frame_arch (frame);
4340
4341 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4342
4343 if (what.call_dummy)
4344 {
4345 stop_stack_dummy = what.call_dummy;
4346 }
4347
4348 /* If we hit an internal event that triggers symbol changes, the
4349 current frame will be invalidated within bpstat_what (e.g., if we
4350 hit an internal solib event). Re-fetch it. */
4351 frame = get_current_frame ();
4352 gdbarch = get_frame_arch (frame);
4353
4354 switch (what.main_action)
4355 {
4356 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4357 /* If we hit the breakpoint at longjmp while stepping, we
4358 install a momentary breakpoint at the target of the
4359 jmp_buf. */
4360
4361 if (debug_infrun)
4362 fprintf_unfiltered (gdb_stdlog,
4363 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4364
4365 ecs->event_thread->stepping_over_breakpoint = 1;
4366
4367 if (what.is_longjmp)
4368 {
4369 struct value *arg_value;
4370
4371 /* If we set the longjmp breakpoint via a SystemTap probe,
4372 then use it to extract the arguments. The destination PC
4373 is the third argument to the probe. */
4374 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4375 if (arg_value)
4376 jmp_buf_pc = value_as_address (arg_value);
4377 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4378 || !gdbarch_get_longjmp_target (gdbarch,
4379 frame, &jmp_buf_pc))
4380 {
4381 if (debug_infrun)
4382 fprintf_unfiltered (gdb_stdlog,
4383 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4384 "(!gdbarch_get_longjmp_target)\n");
4385 keep_going (ecs);
4386 return;
4387 }
4388
4389 /* Insert a breakpoint at resume address. */
4390 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4391 }
4392 else
4393 check_exception_resume (ecs, frame);
4394 keep_going (ecs);
4395 return;
4396
4397 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4398 {
4399 struct frame_info *init_frame;
4400
4401 /* There are several cases to consider.
4402
4403 1. The initiating frame no longer exists. In this case we
4404 must stop, because the exception or longjmp has gone too
4405 far.
4406
4407 2. The initiating frame exists, and is the same as the
4408 current frame. We stop, because the exception or longjmp
4409 has been caught.
4410
4411 3. The initiating frame exists and is different from the
4412 current frame. This means the exception or longjmp has
4413 been caught beneath the initiating frame, so keep going.
4414
4415 4. longjmp breakpoint has been placed just to protect
4416 against stale dummy frames and user is not interested in
4417 stopping around longjmps. */
4418
4419 if (debug_infrun)
4420 fprintf_unfiltered (gdb_stdlog,
4421 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4422
4423 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4424 != NULL);
4425 delete_exception_resume_breakpoint (ecs->event_thread);
4426
4427 if (what.is_longjmp)
4428 {
4429 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread->num);
4430
4431 if (!frame_id_p (ecs->event_thread->initiating_frame))
4432 {
4433 /* Case 4. */
4434 keep_going (ecs);
4435 return;
4436 }
4437 }
4438
4439 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4440
4441 if (init_frame)
4442 {
4443 struct frame_id current_id
4444 = get_frame_id (get_current_frame ());
4445 if (frame_id_eq (current_id,
4446 ecs->event_thread->initiating_frame))
4447 {
4448 /* Case 2. Fall through. */
4449 }
4450 else
4451 {
4452 /* Case 3. */
4453 keep_going (ecs);
4454 return;
4455 }
4456 }
4457
4458 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4459 exists. */
4460 delete_step_resume_breakpoint (ecs->event_thread);
4461
4462 ecs->event_thread->control.stop_step = 1;
4463 print_end_stepping_range_reason ();
4464 stop_stepping (ecs);
4465 }
4466 return;
4467
4468 case BPSTAT_WHAT_SINGLE:
4469 if (debug_infrun)
4470 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4471 ecs->event_thread->stepping_over_breakpoint = 1;
4472 /* Still need to check other stuff, at least the case where we
4473 are stepping and step out of the right range. */
4474 break;
4475
4476 case BPSTAT_WHAT_STEP_RESUME:
4477 if (debug_infrun)
4478 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4479
4480 delete_step_resume_breakpoint (ecs->event_thread);
4481 if (ecs->event_thread->control.proceed_to_finish
4482 && execution_direction == EXEC_REVERSE)
4483 {
4484 struct thread_info *tp = ecs->event_thread;
4485
4486 /* We are finishing a function in reverse, and just hit the
4487 step-resume breakpoint at the start address of the
4488 function, and we're almost there -- just need to back up
4489 by one more single-step, which should take us back to the
4490 function call. */
4491 tp->control.step_range_start = tp->control.step_range_end = 1;
4492 keep_going (ecs);
4493 return;
4494 }
4495 fill_in_stop_func (gdbarch, ecs);
4496 if (stop_pc == ecs->stop_func_start
4497 && execution_direction == EXEC_REVERSE)
4498 {
4499 /* We are stepping over a function call in reverse, and just
4500 hit the step-resume breakpoint at the start address of
4501 the function. Go back to single-stepping, which should
4502 take us back to the function call. */
4503 ecs->event_thread->stepping_over_breakpoint = 1;
4504 keep_going (ecs);
4505 return;
4506 }
4507 break;
4508
4509 case BPSTAT_WHAT_STOP_NOISY:
4510 if (debug_infrun)
4511 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4512 stop_print_frame = 1;
4513
4514 /* Assume the thread stopped for a breapoint. We'll still check
4515 whether a/the breakpoint is there when the thread is next
4516 resumed. */
4517 ecs->event_thread->stepping_over_breakpoint = 1;
4518
4519 stop_stepping (ecs);
4520 return;
4521
4522 case BPSTAT_WHAT_STOP_SILENT:
4523 if (debug_infrun)
4524 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4525 stop_print_frame = 0;
4526
4527 /* Assume the thread stopped for a breapoint. We'll still check
4528 whether a/the breakpoint is there when the thread is next
4529 resumed. */
4530 ecs->event_thread->stepping_over_breakpoint = 1;
4531 stop_stepping (ecs);
4532 return;
4533
4534 case BPSTAT_WHAT_HP_STEP_RESUME:
4535 if (debug_infrun)
4536 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4537
4538 delete_step_resume_breakpoint (ecs->event_thread);
4539 if (ecs->event_thread->step_after_step_resume_breakpoint)
4540 {
4541 /* Back when the step-resume breakpoint was inserted, we
4542 were trying to single-step off a breakpoint. Go back to
4543 doing that. */
4544 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4545 ecs->event_thread->stepping_over_breakpoint = 1;
4546 keep_going (ecs);
4547 return;
4548 }
4549 break;
4550
4551 case BPSTAT_WHAT_KEEP_CHECKING:
4552 break;
4553 }
4554
4555 /* We come here if we hit a breakpoint but should not stop for it.
4556 Possibly we also were stepping and should stop for that. So fall
4557 through and test for stepping. But, if not stepping, do not
4558 stop. */
4559
4560 /* In all-stop mode, if we're currently stepping but have stopped in
4561 some other thread, we need to switch back to the stepped thread. */
4562 if (switch_back_to_stepped_thread (ecs))
4563 return;
4564
4565 if (ecs->event_thread->control.step_resume_breakpoint)
4566 {
4567 if (debug_infrun)
4568 fprintf_unfiltered (gdb_stdlog,
4569 "infrun: step-resume breakpoint is inserted\n");
4570
4571 /* Having a step-resume breakpoint overrides anything
4572 else having to do with stepping commands until
4573 that breakpoint is reached. */
4574 keep_going (ecs);
4575 return;
4576 }
4577
4578 if (ecs->event_thread->control.step_range_end == 0)
4579 {
4580 if (debug_infrun)
4581 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4582 /* Likewise if we aren't even stepping. */
4583 keep_going (ecs);
4584 return;
4585 }
4586
4587 /* Re-fetch current thread's frame in case the code above caused
4588 the frame cache to be re-initialized, making our FRAME variable
4589 a dangling pointer. */
4590 frame = get_current_frame ();
4591 gdbarch = get_frame_arch (frame);
4592 fill_in_stop_func (gdbarch, ecs);
4593
4594 /* If stepping through a line, keep going if still within it.
4595
4596 Note that step_range_end is the address of the first instruction
4597 beyond the step range, and NOT the address of the last instruction
4598 within it!
4599
4600 Note also that during reverse execution, we may be stepping
4601 through a function epilogue and therefore must detect when
4602 the current-frame changes in the middle of a line. */
4603
4604 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4605 && (execution_direction != EXEC_REVERSE
4606 || frame_id_eq (get_frame_id (frame),
4607 ecs->event_thread->control.step_frame_id)))
4608 {
4609 if (debug_infrun)
4610 fprintf_unfiltered
4611 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4612 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4613 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4614
4615 /* Tentatively re-enable range stepping; `resume' disables it if
4616 necessary (e.g., if we're stepping over a breakpoint or we
4617 have software watchpoints). */
4618 ecs->event_thread->control.may_range_step = 1;
4619
4620 /* When stepping backward, stop at beginning of line range
4621 (unless it's the function entry point, in which case
4622 keep going back to the call point). */
4623 if (stop_pc == ecs->event_thread->control.step_range_start
4624 && stop_pc != ecs->stop_func_start
4625 && execution_direction == EXEC_REVERSE)
4626 {
4627 ecs->event_thread->control.stop_step = 1;
4628 print_end_stepping_range_reason ();
4629 stop_stepping (ecs);
4630 }
4631 else
4632 keep_going (ecs);
4633
4634 return;
4635 }
4636
4637 /* We stepped out of the stepping range. */
4638
4639 /* If we are stepping at the source level and entered the runtime
4640 loader dynamic symbol resolution code...
4641
4642 EXEC_FORWARD: we keep on single stepping until we exit the run
4643 time loader code and reach the callee's address.
4644
4645 EXEC_REVERSE: we've already executed the callee (backward), and
4646 the runtime loader code is handled just like any other
4647 undebuggable function call. Now we need only keep stepping
4648 backward through the trampoline code, and that's handled further
4649 down, so there is nothing for us to do here. */
4650
4651 if (execution_direction != EXEC_REVERSE
4652 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4653 && in_solib_dynsym_resolve_code (stop_pc))
4654 {
4655 CORE_ADDR pc_after_resolver =
4656 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4657
4658 if (debug_infrun)
4659 fprintf_unfiltered (gdb_stdlog,
4660 "infrun: stepped into dynsym resolve code\n");
4661
4662 if (pc_after_resolver)
4663 {
4664 /* Set up a step-resume breakpoint at the address
4665 indicated by SKIP_SOLIB_RESOLVER. */
4666 struct symtab_and_line sr_sal;
4667
4668 init_sal (&sr_sal);
4669 sr_sal.pc = pc_after_resolver;
4670 sr_sal.pspace = get_frame_program_space (frame);
4671
4672 insert_step_resume_breakpoint_at_sal (gdbarch,
4673 sr_sal, null_frame_id);
4674 }
4675
4676 keep_going (ecs);
4677 return;
4678 }
4679
4680 if (ecs->event_thread->control.step_range_end != 1
4681 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4682 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4683 && get_frame_type (frame) == SIGTRAMP_FRAME)
4684 {
4685 if (debug_infrun)
4686 fprintf_unfiltered (gdb_stdlog,
4687 "infrun: stepped into signal trampoline\n");
4688 /* The inferior, while doing a "step" or "next", has ended up in
4689 a signal trampoline (either by a signal being delivered or by
4690 the signal handler returning). Just single-step until the
4691 inferior leaves the trampoline (either by calling the handler
4692 or returning). */
4693 keep_going (ecs);
4694 return;
4695 }
4696
4697 /* If we're in the return path from a shared library trampoline,
4698 we want to proceed through the trampoline when stepping. */
4699 /* macro/2012-04-25: This needs to come before the subroutine
4700 call check below as on some targets return trampolines look
4701 like subroutine calls (MIPS16 return thunks). */
4702 if (gdbarch_in_solib_return_trampoline (gdbarch,
4703 stop_pc, ecs->stop_func_name)
4704 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4705 {
4706 /* Determine where this trampoline returns. */
4707 CORE_ADDR real_stop_pc;
4708
4709 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4710
4711 if (debug_infrun)
4712 fprintf_unfiltered (gdb_stdlog,
4713 "infrun: stepped into solib return tramp\n");
4714
4715 /* Only proceed through if we know where it's going. */
4716 if (real_stop_pc)
4717 {
4718 /* And put the step-breakpoint there and go until there. */
4719 struct symtab_and_line sr_sal;
4720
4721 init_sal (&sr_sal); /* initialize to zeroes */
4722 sr_sal.pc = real_stop_pc;
4723 sr_sal.section = find_pc_overlay (sr_sal.pc);
4724 sr_sal.pspace = get_frame_program_space (frame);
4725
4726 /* Do not specify what the fp should be when we stop since
4727 on some machines the prologue is where the new fp value
4728 is established. */
4729 insert_step_resume_breakpoint_at_sal (gdbarch,
4730 sr_sal, null_frame_id);
4731
4732 /* Restart without fiddling with the step ranges or
4733 other state. */
4734 keep_going (ecs);
4735 return;
4736 }
4737 }
4738
4739 /* Check for subroutine calls. The check for the current frame
4740 equalling the step ID is not necessary - the check of the
4741 previous frame's ID is sufficient - but it is a common case and
4742 cheaper than checking the previous frame's ID.
4743
4744 NOTE: frame_id_eq will never report two invalid frame IDs as
4745 being equal, so to get into this block, both the current and
4746 previous frame must have valid frame IDs. */
4747 /* The outer_frame_id check is a heuristic to detect stepping
4748 through startup code. If we step over an instruction which
4749 sets the stack pointer from an invalid value to a valid value,
4750 we may detect that as a subroutine call from the mythical
4751 "outermost" function. This could be fixed by marking
4752 outermost frames as !stack_p,code_p,special_p. Then the
4753 initial outermost frame, before sp was valid, would
4754 have code_addr == &_start. See the comment in frame_id_eq
4755 for more. */
4756 if (!frame_id_eq (get_stack_frame_id (frame),
4757 ecs->event_thread->control.step_stack_frame_id)
4758 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4759 ecs->event_thread->control.step_stack_frame_id)
4760 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4761 outer_frame_id)
4762 || step_start_function != find_pc_function (stop_pc))))
4763 {
4764 CORE_ADDR real_stop_pc;
4765
4766 if (debug_infrun)
4767 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4768
4769 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4770 || ((ecs->event_thread->control.step_range_end == 1)
4771 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4772 ecs->stop_func_start)))
4773 {
4774 /* I presume that step_over_calls is only 0 when we're
4775 supposed to be stepping at the assembly language level
4776 ("stepi"). Just stop. */
4777 /* Also, maybe we just did a "nexti" inside a prolog, so we
4778 thought it was a subroutine call but it was not. Stop as
4779 well. FENN */
4780 /* And this works the same backward as frontward. MVS */
4781 ecs->event_thread->control.stop_step = 1;
4782 print_end_stepping_range_reason ();
4783 stop_stepping (ecs);
4784 return;
4785 }
4786
4787 /* Reverse stepping through solib trampolines. */
4788
4789 if (execution_direction == EXEC_REVERSE
4790 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4791 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4792 || (ecs->stop_func_start == 0
4793 && in_solib_dynsym_resolve_code (stop_pc))))
4794 {
4795 /* Any solib trampoline code can be handled in reverse
4796 by simply continuing to single-step. We have already
4797 executed the solib function (backwards), and a few
4798 steps will take us back through the trampoline to the
4799 caller. */
4800 keep_going (ecs);
4801 return;
4802 }
4803
4804 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4805 {
4806 /* We're doing a "next".
4807
4808 Normal (forward) execution: set a breakpoint at the
4809 callee's return address (the address at which the caller
4810 will resume).
4811
4812 Reverse (backward) execution. set the step-resume
4813 breakpoint at the start of the function that we just
4814 stepped into (backwards), and continue to there. When we
4815 get there, we'll need to single-step back to the caller. */
4816
4817 if (execution_direction == EXEC_REVERSE)
4818 {
4819 /* If we're already at the start of the function, we've either
4820 just stepped backward into a single instruction function,
4821 or stepped back out of a signal handler to the first instruction
4822 of the function. Just keep going, which will single-step back
4823 to the caller. */
4824 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
4825 {
4826 struct symtab_and_line sr_sal;
4827
4828 /* Normal function call return (static or dynamic). */
4829 init_sal (&sr_sal);
4830 sr_sal.pc = ecs->stop_func_start;
4831 sr_sal.pspace = get_frame_program_space (frame);
4832 insert_step_resume_breakpoint_at_sal (gdbarch,
4833 sr_sal, null_frame_id);
4834 }
4835 }
4836 else
4837 insert_step_resume_breakpoint_at_caller (frame);
4838
4839 keep_going (ecs);
4840 return;
4841 }
4842
4843 /* If we are in a function call trampoline (a stub between the
4844 calling routine and the real function), locate the real
4845 function. That's what tells us (a) whether we want to step
4846 into it at all, and (b) what prologue we want to run to the
4847 end of, if we do step into it. */
4848 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4849 if (real_stop_pc == 0)
4850 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4851 if (real_stop_pc != 0)
4852 ecs->stop_func_start = real_stop_pc;
4853
4854 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4855 {
4856 struct symtab_and_line sr_sal;
4857
4858 init_sal (&sr_sal);
4859 sr_sal.pc = ecs->stop_func_start;
4860 sr_sal.pspace = get_frame_program_space (frame);
4861
4862 insert_step_resume_breakpoint_at_sal (gdbarch,
4863 sr_sal, null_frame_id);
4864 keep_going (ecs);
4865 return;
4866 }
4867
4868 /* If we have line number information for the function we are
4869 thinking of stepping into and the function isn't on the skip
4870 list, step into it.
4871
4872 If there are several symtabs at that PC (e.g. with include
4873 files), just want to know whether *any* of them have line
4874 numbers. find_pc_line handles this. */
4875 {
4876 struct symtab_and_line tmp_sal;
4877
4878 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4879 if (tmp_sal.line != 0
4880 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4881 &tmp_sal))
4882 {
4883 if (execution_direction == EXEC_REVERSE)
4884 handle_step_into_function_backward (gdbarch, ecs);
4885 else
4886 handle_step_into_function (gdbarch, ecs);
4887 return;
4888 }
4889 }
4890
4891 /* If we have no line number and the step-stop-if-no-debug is
4892 set, we stop the step so that the user has a chance to switch
4893 in assembly mode. */
4894 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4895 && step_stop_if_no_debug)
4896 {
4897 ecs->event_thread->control.stop_step = 1;
4898 print_end_stepping_range_reason ();
4899 stop_stepping (ecs);
4900 return;
4901 }
4902
4903 if (execution_direction == EXEC_REVERSE)
4904 {
4905 /* If we're already at the start of the function, we've either just
4906 stepped backward into a single instruction function without line
4907 number info, or stepped back out of a signal handler to the first
4908 instruction of the function without line number info. Just keep
4909 going, which will single-step back to the caller. */
4910 if (ecs->stop_func_start != stop_pc)
4911 {
4912 /* Set a breakpoint at callee's start address.
4913 From there we can step once and be back in the caller. */
4914 struct symtab_and_line sr_sal;
4915
4916 init_sal (&sr_sal);
4917 sr_sal.pc = ecs->stop_func_start;
4918 sr_sal.pspace = get_frame_program_space (frame);
4919 insert_step_resume_breakpoint_at_sal (gdbarch,
4920 sr_sal, null_frame_id);
4921 }
4922 }
4923 else
4924 /* Set a breakpoint at callee's return address (the address
4925 at which the caller will resume). */
4926 insert_step_resume_breakpoint_at_caller (frame);
4927
4928 keep_going (ecs);
4929 return;
4930 }
4931
4932 /* Reverse stepping through solib trampolines. */
4933
4934 if (execution_direction == EXEC_REVERSE
4935 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4936 {
4937 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4938 || (ecs->stop_func_start == 0
4939 && in_solib_dynsym_resolve_code (stop_pc)))
4940 {
4941 /* Any solib trampoline code can be handled in reverse
4942 by simply continuing to single-step. We have already
4943 executed the solib function (backwards), and a few
4944 steps will take us back through the trampoline to the
4945 caller. */
4946 keep_going (ecs);
4947 return;
4948 }
4949 else if (in_solib_dynsym_resolve_code (stop_pc))
4950 {
4951 /* Stepped backward into the solib dynsym resolver.
4952 Set a breakpoint at its start and continue, then
4953 one more step will take us out. */
4954 struct symtab_and_line sr_sal;
4955
4956 init_sal (&sr_sal);
4957 sr_sal.pc = ecs->stop_func_start;
4958 sr_sal.pspace = get_frame_program_space (frame);
4959 insert_step_resume_breakpoint_at_sal (gdbarch,
4960 sr_sal, null_frame_id);
4961 keep_going (ecs);
4962 return;
4963 }
4964 }
4965
4966 stop_pc_sal = find_pc_line (stop_pc, 0);
4967
4968 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4969 the trampoline processing logic, however, there are some trampolines
4970 that have no names, so we should do trampoline handling first. */
4971 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4972 && ecs->stop_func_name == NULL
4973 && stop_pc_sal.line == 0)
4974 {
4975 if (debug_infrun)
4976 fprintf_unfiltered (gdb_stdlog,
4977 "infrun: stepped into undebuggable function\n");
4978
4979 /* The inferior just stepped into, or returned to, an
4980 undebuggable function (where there is no debugging information
4981 and no line number corresponding to the address where the
4982 inferior stopped). Since we want to skip this kind of code,
4983 we keep going until the inferior returns from this
4984 function - unless the user has asked us not to (via
4985 set step-mode) or we no longer know how to get back
4986 to the call site. */
4987 if (step_stop_if_no_debug
4988 || !frame_id_p (frame_unwind_caller_id (frame)))
4989 {
4990 /* If we have no line number and the step-stop-if-no-debug
4991 is set, we stop the step so that the user has a chance to
4992 switch in assembly mode. */
4993 ecs->event_thread->control.stop_step = 1;
4994 print_end_stepping_range_reason ();
4995 stop_stepping (ecs);
4996 return;
4997 }
4998 else
4999 {
5000 /* Set a breakpoint at callee's return address (the address
5001 at which the caller will resume). */
5002 insert_step_resume_breakpoint_at_caller (frame);
5003 keep_going (ecs);
5004 return;
5005 }
5006 }
5007
5008 if (ecs->event_thread->control.step_range_end == 1)
5009 {
5010 /* It is stepi or nexti. We always want to stop stepping after
5011 one instruction. */
5012 if (debug_infrun)
5013 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5014 ecs->event_thread->control.stop_step = 1;
5015 print_end_stepping_range_reason ();
5016 stop_stepping (ecs);
5017 return;
5018 }
5019
5020 if (stop_pc_sal.line == 0)
5021 {
5022 /* We have no line number information. That means to stop
5023 stepping (does this always happen right after one instruction,
5024 when we do "s" in a function with no line numbers,
5025 or can this happen as a result of a return or longjmp?). */
5026 if (debug_infrun)
5027 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5028 ecs->event_thread->control.stop_step = 1;
5029 print_end_stepping_range_reason ();
5030 stop_stepping (ecs);
5031 return;
5032 }
5033
5034 /* Look for "calls" to inlined functions, part one. If the inline
5035 frame machinery detected some skipped call sites, we have entered
5036 a new inline function. */
5037
5038 if (frame_id_eq (get_frame_id (get_current_frame ()),
5039 ecs->event_thread->control.step_frame_id)
5040 && inline_skipped_frames (ecs->ptid))
5041 {
5042 struct symtab_and_line call_sal;
5043
5044 if (debug_infrun)
5045 fprintf_unfiltered (gdb_stdlog,
5046 "infrun: stepped into inlined function\n");
5047
5048 find_frame_sal (get_current_frame (), &call_sal);
5049
5050 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5051 {
5052 /* For "step", we're going to stop. But if the call site
5053 for this inlined function is on the same source line as
5054 we were previously stepping, go down into the function
5055 first. Otherwise stop at the call site. */
5056
5057 if (call_sal.line == ecs->event_thread->current_line
5058 && call_sal.symtab == ecs->event_thread->current_symtab)
5059 step_into_inline_frame (ecs->ptid);
5060
5061 ecs->event_thread->control.stop_step = 1;
5062 print_end_stepping_range_reason ();
5063 stop_stepping (ecs);
5064 return;
5065 }
5066 else
5067 {
5068 /* For "next", we should stop at the call site if it is on a
5069 different source line. Otherwise continue through the
5070 inlined function. */
5071 if (call_sal.line == ecs->event_thread->current_line
5072 && call_sal.symtab == ecs->event_thread->current_symtab)
5073 keep_going (ecs);
5074 else
5075 {
5076 ecs->event_thread->control.stop_step = 1;
5077 print_end_stepping_range_reason ();
5078 stop_stepping (ecs);
5079 }
5080 return;
5081 }
5082 }
5083
5084 /* Look for "calls" to inlined functions, part two. If we are still
5085 in the same real function we were stepping through, but we have
5086 to go further up to find the exact frame ID, we are stepping
5087 through a more inlined call beyond its call site. */
5088
5089 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5090 && !frame_id_eq (get_frame_id (get_current_frame ()),
5091 ecs->event_thread->control.step_frame_id)
5092 && stepped_in_from (get_current_frame (),
5093 ecs->event_thread->control.step_frame_id))
5094 {
5095 if (debug_infrun)
5096 fprintf_unfiltered (gdb_stdlog,
5097 "infrun: stepping through inlined function\n");
5098
5099 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5100 keep_going (ecs);
5101 else
5102 {
5103 ecs->event_thread->control.stop_step = 1;
5104 print_end_stepping_range_reason ();
5105 stop_stepping (ecs);
5106 }
5107 return;
5108 }
5109
5110 if ((stop_pc == stop_pc_sal.pc)
5111 && (ecs->event_thread->current_line != stop_pc_sal.line
5112 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5113 {
5114 /* We are at the start of a different line. So stop. Note that
5115 we don't stop if we step into the middle of a different line.
5116 That is said to make things like for (;;) statements work
5117 better. */
5118 if (debug_infrun)
5119 fprintf_unfiltered (gdb_stdlog,
5120 "infrun: stepped to a different line\n");
5121 ecs->event_thread->control.stop_step = 1;
5122 print_end_stepping_range_reason ();
5123 stop_stepping (ecs);
5124 return;
5125 }
5126
5127 /* We aren't done stepping.
5128
5129 Optimize by setting the stepping range to the line.
5130 (We might not be in the original line, but if we entered a
5131 new line in mid-statement, we continue stepping. This makes
5132 things like for(;;) statements work better.) */
5133
5134 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5135 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5136 ecs->event_thread->control.may_range_step = 1;
5137 set_step_info (frame, stop_pc_sal);
5138
5139 if (debug_infrun)
5140 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5141 keep_going (ecs);
5142 }
5143
5144 /* In all-stop mode, if we're currently stepping but have stopped in
5145 some other thread, we may need to switch back to the stepped
5146 thread. Returns true we set the inferior running, false if we left
5147 it stopped (and the event needs further processing). */
5148
5149 static int
5150 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5151 {
5152 if (!non_stop)
5153 {
5154 struct thread_info *tp;
5155 struct thread_info *stepping_thread;
5156 struct thread_info *step_over;
5157
5158 /* If any thread is blocked on some internal breakpoint, and we
5159 simply need to step over that breakpoint to get it going
5160 again, do that first. */
5161
5162 /* However, if we see an event for the stepping thread, then we
5163 know all other threads have been moved past their breakpoints
5164 already. Let the caller check whether the step is finished,
5165 etc., before deciding to move it past a breakpoint. */
5166 if (ecs->event_thread->control.step_range_end != 0)
5167 return 0;
5168
5169 /* Check if the current thread is blocked on an incomplete
5170 step-over, interrupted by a random signal. */
5171 if (ecs->event_thread->control.trap_expected
5172 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5173 {
5174 if (debug_infrun)
5175 {
5176 fprintf_unfiltered (gdb_stdlog,
5177 "infrun: need to finish step-over of [%s]\n",
5178 target_pid_to_str (ecs->event_thread->ptid));
5179 }
5180 keep_going (ecs);
5181 return 1;
5182 }
5183
5184 /* Check if the current thread is blocked by a single-step
5185 breakpoint of another thread. */
5186 if (ecs->hit_singlestep_breakpoint)
5187 {
5188 if (debug_infrun)
5189 {
5190 fprintf_unfiltered (gdb_stdlog,
5191 "infrun: need to step [%s] over single-step "
5192 "breakpoint\n",
5193 target_pid_to_str (ecs->ptid));
5194 }
5195 keep_going (ecs);
5196 return 1;
5197 }
5198
5199 /* Otherwise, we no longer expect a trap in the current thread.
5200 Clear the trap_expected flag before switching back -- this is
5201 what keep_going does as well, if we call it. */
5202 ecs->event_thread->control.trap_expected = 0;
5203
5204 /* If scheduler locking applies even if not stepping, there's no
5205 need to walk over threads. Above we've checked whether the
5206 current thread is stepping. If some other thread not the
5207 event thread is stepping, then it must be that scheduler
5208 locking is not in effect. */
5209 if (schedlock_applies (0))
5210 return 0;
5211
5212 /* Look for the stepping/nexting thread, and check if any other
5213 thread other than the stepping thread needs to start a
5214 step-over. Do all step-overs before actually proceeding with
5215 step/next/etc. */
5216 stepping_thread = NULL;
5217 step_over = NULL;
5218 ALL_THREADS (tp)
5219 {
5220 /* Ignore threads of processes we're not resuming. */
5221 if (!sched_multi
5222 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
5223 continue;
5224
5225 /* When stepping over a breakpoint, we lock all threads
5226 except the one that needs to move past the breakpoint.
5227 If a non-event thread has this set, the "incomplete
5228 step-over" check above should have caught it earlier. */
5229 gdb_assert (!tp->control.trap_expected);
5230
5231 /* Did we find the stepping thread? */
5232 if (tp->control.step_range_end)
5233 {
5234 /* Yep. There should only one though. */
5235 gdb_assert (stepping_thread == NULL);
5236
5237 /* The event thread is handled at the top, before we
5238 enter this loop. */
5239 gdb_assert (tp != ecs->event_thread);
5240
5241 /* If some thread other than the event thread is
5242 stepping, then scheduler locking can't be in effect,
5243 otherwise we wouldn't have resumed the current event
5244 thread in the first place. */
5245 gdb_assert (!schedlock_applies (1));
5246
5247 stepping_thread = tp;
5248 }
5249 else if (thread_still_needs_step_over (tp))
5250 {
5251 step_over = tp;
5252
5253 /* At the top we've returned early if the event thread
5254 is stepping. If some other thread not the event
5255 thread is stepping, then scheduler locking can't be
5256 in effect, and we can resume this thread. No need to
5257 keep looking for the stepping thread then. */
5258 break;
5259 }
5260 }
5261
5262 if (step_over != NULL)
5263 {
5264 tp = step_over;
5265 if (debug_infrun)
5266 {
5267 fprintf_unfiltered (gdb_stdlog,
5268 "infrun: need to step-over [%s]\n",
5269 target_pid_to_str (tp->ptid));
5270 }
5271
5272 /* Only the stepping thread should have this set. */
5273 gdb_assert (tp->control.step_range_end == 0);
5274
5275 ecs->ptid = tp->ptid;
5276 ecs->event_thread = tp;
5277 switch_to_thread (ecs->ptid);
5278 keep_going (ecs);
5279 return 1;
5280 }
5281
5282 if (stepping_thread != NULL)
5283 {
5284 struct frame_info *frame;
5285 struct gdbarch *gdbarch;
5286
5287 tp = stepping_thread;
5288
5289 /* If the stepping thread exited, then don't try to switch
5290 back and resume it, which could fail in several different
5291 ways depending on the target. Instead, just keep going.
5292
5293 We can find a stepping dead thread in the thread list in
5294 two cases:
5295
5296 - The target supports thread exit events, and when the
5297 target tries to delete the thread from the thread list,
5298 inferior_ptid pointed at the exiting thread. In such
5299 case, calling delete_thread does not really remove the
5300 thread from the list; instead, the thread is left listed,
5301 with 'exited' state.
5302
5303 - The target's debug interface does not support thread
5304 exit events, and so we have no idea whatsoever if the
5305 previously stepping thread is still alive. For that
5306 reason, we need to synchronously query the target
5307 now. */
5308 if (is_exited (tp->ptid)
5309 || !target_thread_alive (tp->ptid))
5310 {
5311 if (debug_infrun)
5312 fprintf_unfiltered (gdb_stdlog,
5313 "infrun: not switching back to "
5314 "stepped thread, it has vanished\n");
5315
5316 delete_thread (tp->ptid);
5317 keep_going (ecs);
5318 return 1;
5319 }
5320
5321 if (debug_infrun)
5322 fprintf_unfiltered (gdb_stdlog,
5323 "infrun: switching back to stepped thread\n");
5324
5325 ecs->event_thread = tp;
5326 ecs->ptid = tp->ptid;
5327 context_switch (ecs->ptid);
5328
5329 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5330 frame = get_current_frame ();
5331 gdbarch = get_frame_arch (frame);
5332
5333 /* If the PC of the thread we were trying to single-step has
5334 changed, then that thread has trapped or been signaled,
5335 but the event has not been reported to GDB yet. Re-poll
5336 the target looking for this particular thread's event
5337 (i.e. temporarily enable schedlock) by:
5338
5339 - setting a break at the current PC
5340 - resuming that particular thread, only (by setting
5341 trap expected)
5342
5343 This prevents us continuously moving the single-step
5344 breakpoint forward, one instruction at a time,
5345 overstepping. */
5346
5347 if (gdbarch_software_single_step_p (gdbarch)
5348 && stop_pc != tp->prev_pc)
5349 {
5350 if (debug_infrun)
5351 fprintf_unfiltered (gdb_stdlog,
5352 "infrun: expected thread advanced also\n");
5353
5354 insert_single_step_breakpoint (get_frame_arch (frame),
5355 get_frame_address_space (frame),
5356 stop_pc);
5357 singlestep_breakpoints_inserted_p = 1;
5358 ecs->event_thread->control.trap_expected = 1;
5359 singlestep_ptid = inferior_ptid;
5360 singlestep_pc = stop_pc;
5361
5362 resume (0, GDB_SIGNAL_0);
5363 prepare_to_wait (ecs);
5364 }
5365 else
5366 {
5367 if (debug_infrun)
5368 fprintf_unfiltered (gdb_stdlog,
5369 "infrun: expected thread still "
5370 "hasn't advanced\n");
5371 keep_going (ecs);
5372 }
5373
5374 return 1;
5375 }
5376 }
5377 return 0;
5378 }
5379
5380 /* Is thread TP in the middle of single-stepping? */
5381
5382 static int
5383 currently_stepping (struct thread_info *tp)
5384 {
5385 return ((tp->control.step_range_end
5386 && tp->control.step_resume_breakpoint == NULL)
5387 || tp->control.trap_expected
5388 || bpstat_should_step ());
5389 }
5390
5391 /* Inferior has stepped into a subroutine call with source code that
5392 we should not step over. Do step to the first line of code in
5393 it. */
5394
5395 static void
5396 handle_step_into_function (struct gdbarch *gdbarch,
5397 struct execution_control_state *ecs)
5398 {
5399 struct symtab *s;
5400 struct symtab_and_line stop_func_sal, sr_sal;
5401
5402 fill_in_stop_func (gdbarch, ecs);
5403
5404 s = find_pc_symtab (stop_pc);
5405 if (s && s->language != language_asm)
5406 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5407 ecs->stop_func_start);
5408
5409 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5410 /* Use the step_resume_break to step until the end of the prologue,
5411 even if that involves jumps (as it seems to on the vax under
5412 4.2). */
5413 /* If the prologue ends in the middle of a source line, continue to
5414 the end of that source line (if it is still within the function).
5415 Otherwise, just go to end of prologue. */
5416 if (stop_func_sal.end
5417 && stop_func_sal.pc != ecs->stop_func_start
5418 && stop_func_sal.end < ecs->stop_func_end)
5419 ecs->stop_func_start = stop_func_sal.end;
5420
5421 /* Architectures which require breakpoint adjustment might not be able
5422 to place a breakpoint at the computed address. If so, the test
5423 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5424 ecs->stop_func_start to an address at which a breakpoint may be
5425 legitimately placed.
5426
5427 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5428 made, GDB will enter an infinite loop when stepping through
5429 optimized code consisting of VLIW instructions which contain
5430 subinstructions corresponding to different source lines. On
5431 FR-V, it's not permitted to place a breakpoint on any but the
5432 first subinstruction of a VLIW instruction. When a breakpoint is
5433 set, GDB will adjust the breakpoint address to the beginning of
5434 the VLIW instruction. Thus, we need to make the corresponding
5435 adjustment here when computing the stop address. */
5436
5437 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5438 {
5439 ecs->stop_func_start
5440 = gdbarch_adjust_breakpoint_address (gdbarch,
5441 ecs->stop_func_start);
5442 }
5443
5444 if (ecs->stop_func_start == stop_pc)
5445 {
5446 /* We are already there: stop now. */
5447 ecs->event_thread->control.stop_step = 1;
5448 print_end_stepping_range_reason ();
5449 stop_stepping (ecs);
5450 return;
5451 }
5452 else
5453 {
5454 /* Put the step-breakpoint there and go until there. */
5455 init_sal (&sr_sal); /* initialize to zeroes */
5456 sr_sal.pc = ecs->stop_func_start;
5457 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5458 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5459
5460 /* Do not specify what the fp should be when we stop since on
5461 some machines the prologue is where the new fp value is
5462 established. */
5463 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5464
5465 /* And make sure stepping stops right away then. */
5466 ecs->event_thread->control.step_range_end
5467 = ecs->event_thread->control.step_range_start;
5468 }
5469 keep_going (ecs);
5470 }
5471
5472 /* Inferior has stepped backward into a subroutine call with source
5473 code that we should not step over. Do step to the beginning of the
5474 last line of code in it. */
5475
5476 static void
5477 handle_step_into_function_backward (struct gdbarch *gdbarch,
5478 struct execution_control_state *ecs)
5479 {
5480 struct symtab *s;
5481 struct symtab_and_line stop_func_sal;
5482
5483 fill_in_stop_func (gdbarch, ecs);
5484
5485 s = find_pc_symtab (stop_pc);
5486 if (s && s->language != language_asm)
5487 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5488 ecs->stop_func_start);
5489
5490 stop_func_sal = find_pc_line (stop_pc, 0);
5491
5492 /* OK, we're just going to keep stepping here. */
5493 if (stop_func_sal.pc == stop_pc)
5494 {
5495 /* We're there already. Just stop stepping now. */
5496 ecs->event_thread->control.stop_step = 1;
5497 print_end_stepping_range_reason ();
5498 stop_stepping (ecs);
5499 }
5500 else
5501 {
5502 /* Else just reset the step range and keep going.
5503 No step-resume breakpoint, they don't work for
5504 epilogues, which can have multiple entry paths. */
5505 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5506 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5507 keep_going (ecs);
5508 }
5509 return;
5510 }
5511
5512 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5513 This is used to both functions and to skip over code. */
5514
5515 static void
5516 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5517 struct symtab_and_line sr_sal,
5518 struct frame_id sr_id,
5519 enum bptype sr_type)
5520 {
5521 /* There should never be more than one step-resume or longjmp-resume
5522 breakpoint per thread, so we should never be setting a new
5523 step_resume_breakpoint when one is already active. */
5524 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5525 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5526
5527 if (debug_infrun)
5528 fprintf_unfiltered (gdb_stdlog,
5529 "infrun: inserting step-resume breakpoint at %s\n",
5530 paddress (gdbarch, sr_sal.pc));
5531
5532 inferior_thread ()->control.step_resume_breakpoint
5533 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5534 }
5535
5536 void
5537 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5538 struct symtab_and_line sr_sal,
5539 struct frame_id sr_id)
5540 {
5541 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5542 sr_sal, sr_id,
5543 bp_step_resume);
5544 }
5545
5546 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5547 This is used to skip a potential signal handler.
5548
5549 This is called with the interrupted function's frame. The signal
5550 handler, when it returns, will resume the interrupted function at
5551 RETURN_FRAME.pc. */
5552
5553 static void
5554 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5555 {
5556 struct symtab_and_line sr_sal;
5557 struct gdbarch *gdbarch;
5558
5559 gdb_assert (return_frame != NULL);
5560 init_sal (&sr_sal); /* initialize to zeros */
5561
5562 gdbarch = get_frame_arch (return_frame);
5563 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5564 sr_sal.section = find_pc_overlay (sr_sal.pc);
5565 sr_sal.pspace = get_frame_program_space (return_frame);
5566
5567 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5568 get_stack_frame_id (return_frame),
5569 bp_hp_step_resume);
5570 }
5571
5572 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5573 is used to skip a function after stepping into it (for "next" or if
5574 the called function has no debugging information).
5575
5576 The current function has almost always been reached by single
5577 stepping a call or return instruction. NEXT_FRAME belongs to the
5578 current function, and the breakpoint will be set at the caller's
5579 resume address.
5580
5581 This is a separate function rather than reusing
5582 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5583 get_prev_frame, which may stop prematurely (see the implementation
5584 of frame_unwind_caller_id for an example). */
5585
5586 static void
5587 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5588 {
5589 struct symtab_and_line sr_sal;
5590 struct gdbarch *gdbarch;
5591
5592 /* We shouldn't have gotten here if we don't know where the call site
5593 is. */
5594 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5595
5596 init_sal (&sr_sal); /* initialize to zeros */
5597
5598 gdbarch = frame_unwind_caller_arch (next_frame);
5599 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5600 frame_unwind_caller_pc (next_frame));
5601 sr_sal.section = find_pc_overlay (sr_sal.pc);
5602 sr_sal.pspace = frame_unwind_program_space (next_frame);
5603
5604 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5605 frame_unwind_caller_id (next_frame));
5606 }
5607
5608 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5609 new breakpoint at the target of a jmp_buf. The handling of
5610 longjmp-resume uses the same mechanisms used for handling
5611 "step-resume" breakpoints. */
5612
5613 static void
5614 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5615 {
5616 /* There should never be more than one longjmp-resume breakpoint per
5617 thread, so we should never be setting a new
5618 longjmp_resume_breakpoint when one is already active. */
5619 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
5620
5621 if (debug_infrun)
5622 fprintf_unfiltered (gdb_stdlog,
5623 "infrun: inserting longjmp-resume breakpoint at %s\n",
5624 paddress (gdbarch, pc));
5625
5626 inferior_thread ()->control.exception_resume_breakpoint =
5627 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5628 }
5629
5630 /* Insert an exception resume breakpoint. TP is the thread throwing
5631 the exception. The block B is the block of the unwinder debug hook
5632 function. FRAME is the frame corresponding to the call to this
5633 function. SYM is the symbol of the function argument holding the
5634 target PC of the exception. */
5635
5636 static void
5637 insert_exception_resume_breakpoint (struct thread_info *tp,
5638 struct block *b,
5639 struct frame_info *frame,
5640 struct symbol *sym)
5641 {
5642 volatile struct gdb_exception e;
5643
5644 /* We want to ignore errors here. */
5645 TRY_CATCH (e, RETURN_MASK_ERROR)
5646 {
5647 struct symbol *vsym;
5648 struct value *value;
5649 CORE_ADDR handler;
5650 struct breakpoint *bp;
5651
5652 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5653 value = read_var_value (vsym, frame);
5654 /* If the value was optimized out, revert to the old behavior. */
5655 if (! value_optimized_out (value))
5656 {
5657 handler = value_as_address (value);
5658
5659 if (debug_infrun)
5660 fprintf_unfiltered (gdb_stdlog,
5661 "infrun: exception resume at %lx\n",
5662 (unsigned long) handler);
5663
5664 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5665 handler, bp_exception_resume);
5666
5667 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5668 frame = NULL;
5669
5670 bp->thread = tp->num;
5671 inferior_thread ()->control.exception_resume_breakpoint = bp;
5672 }
5673 }
5674 }
5675
5676 /* A helper for check_exception_resume that sets an
5677 exception-breakpoint based on a SystemTap probe. */
5678
5679 static void
5680 insert_exception_resume_from_probe (struct thread_info *tp,
5681 const struct bound_probe *probe,
5682 struct frame_info *frame)
5683 {
5684 struct value *arg_value;
5685 CORE_ADDR handler;
5686 struct breakpoint *bp;
5687
5688 arg_value = probe_safe_evaluate_at_pc (frame, 1);
5689 if (!arg_value)
5690 return;
5691
5692 handler = value_as_address (arg_value);
5693
5694 if (debug_infrun)
5695 fprintf_unfiltered (gdb_stdlog,
5696 "infrun: exception resume at %s\n",
5697 paddress (get_objfile_arch (probe->objfile),
5698 handler));
5699
5700 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5701 handler, bp_exception_resume);
5702 bp->thread = tp->num;
5703 inferior_thread ()->control.exception_resume_breakpoint = bp;
5704 }
5705
5706 /* This is called when an exception has been intercepted. Check to
5707 see whether the exception's destination is of interest, and if so,
5708 set an exception resume breakpoint there. */
5709
5710 static void
5711 check_exception_resume (struct execution_control_state *ecs,
5712 struct frame_info *frame)
5713 {
5714 volatile struct gdb_exception e;
5715 struct bound_probe probe;
5716 struct symbol *func;
5717
5718 /* First see if this exception unwinding breakpoint was set via a
5719 SystemTap probe point. If so, the probe has two arguments: the
5720 CFA and the HANDLER. We ignore the CFA, extract the handler, and
5721 set a breakpoint there. */
5722 probe = find_probe_by_pc (get_frame_pc (frame));
5723 if (probe.probe)
5724 {
5725 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
5726 return;
5727 }
5728
5729 func = get_frame_function (frame);
5730 if (!func)
5731 return;
5732
5733 TRY_CATCH (e, RETURN_MASK_ERROR)
5734 {
5735 struct block *b;
5736 struct block_iterator iter;
5737 struct symbol *sym;
5738 int argno = 0;
5739
5740 /* The exception breakpoint is a thread-specific breakpoint on
5741 the unwinder's debug hook, declared as:
5742
5743 void _Unwind_DebugHook (void *cfa, void *handler);
5744
5745 The CFA argument indicates the frame to which control is
5746 about to be transferred. HANDLER is the destination PC.
5747
5748 We ignore the CFA and set a temporary breakpoint at HANDLER.
5749 This is not extremely efficient but it avoids issues in gdb
5750 with computing the DWARF CFA, and it also works even in weird
5751 cases such as throwing an exception from inside a signal
5752 handler. */
5753
5754 b = SYMBOL_BLOCK_VALUE (func);
5755 ALL_BLOCK_SYMBOLS (b, iter, sym)
5756 {
5757 if (!SYMBOL_IS_ARGUMENT (sym))
5758 continue;
5759
5760 if (argno == 0)
5761 ++argno;
5762 else
5763 {
5764 insert_exception_resume_breakpoint (ecs->event_thread,
5765 b, frame, sym);
5766 break;
5767 }
5768 }
5769 }
5770 }
5771
5772 static void
5773 stop_stepping (struct execution_control_state *ecs)
5774 {
5775 if (debug_infrun)
5776 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5777
5778 clear_step_over_info ();
5779
5780 /* Let callers know we don't want to wait for the inferior anymore. */
5781 ecs->wait_some_more = 0;
5782 }
5783
5784 /* Called when we should continue running the inferior, because the
5785 current event doesn't cause a user visible stop. This does the
5786 resuming part; waiting for the next event is done elsewhere. */
5787
5788 static void
5789 keep_going (struct execution_control_state *ecs)
5790 {
5791 /* Make sure normal_stop is called if we get a QUIT handled before
5792 reaching resume. */
5793 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5794
5795 /* Save the pc before execution, to compare with pc after stop. */
5796 ecs->event_thread->prev_pc
5797 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5798
5799 if (ecs->event_thread->control.trap_expected
5800 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5801 {
5802 /* We haven't yet gotten our trap, and either: intercepted a
5803 non-signal event (e.g., a fork); or took a signal which we
5804 are supposed to pass through to the inferior. Simply
5805 continue. */
5806 discard_cleanups (old_cleanups);
5807 resume (currently_stepping (ecs->event_thread),
5808 ecs->event_thread->suspend.stop_signal);
5809 }
5810 else
5811 {
5812 volatile struct gdb_exception e;
5813 struct regcache *regcache = get_current_regcache ();
5814
5815 /* Either the trap was not expected, but we are continuing
5816 anyway (if we got a signal, the user asked it be passed to
5817 the child)
5818 -- or --
5819 We got our expected trap, but decided we should resume from
5820 it.
5821
5822 We're going to run this baby now!
5823
5824 Note that insert_breakpoints won't try to re-insert
5825 already inserted breakpoints. Therefore, we don't
5826 care if breakpoints were already inserted, or not. */
5827
5828 /* If we need to step over a breakpoint, and we're not using
5829 displaced stepping to do so, insert all breakpoints
5830 (watchpoints, etc.) but the one we're stepping over, step one
5831 instruction, and then re-insert the breakpoint when that step
5832 is finished. */
5833 if ((ecs->hit_singlestep_breakpoint
5834 || thread_still_needs_step_over (ecs->event_thread))
5835 && !use_displaced_stepping (get_regcache_arch (regcache)))
5836 {
5837 set_step_over_info (get_regcache_aspace (regcache),
5838 regcache_read_pc (regcache));
5839 }
5840 else
5841 clear_step_over_info ();
5842
5843 /* Stop stepping if inserting breakpoints fails. */
5844 TRY_CATCH (e, RETURN_MASK_ERROR)
5845 {
5846 insert_breakpoints ();
5847 }
5848 if (e.reason < 0)
5849 {
5850 exception_print (gdb_stderr, e);
5851 stop_stepping (ecs);
5852 return;
5853 }
5854
5855 ecs->event_thread->control.trap_expected
5856 = (ecs->event_thread->stepping_over_breakpoint
5857 || ecs->hit_singlestep_breakpoint);
5858
5859 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
5860 explicitly specifies that such a signal should be delivered
5861 to the target program). Typically, that would occur when a
5862 user is debugging a target monitor on a simulator: the target
5863 monitor sets a breakpoint; the simulator encounters this
5864 breakpoint and halts the simulation handing control to GDB;
5865 GDB, noting that the stop address doesn't map to any known
5866 breakpoint, returns control back to the simulator; the
5867 simulator then delivers the hardware equivalent of a
5868 GDB_SIGNAL_TRAP to the program being debugged. */
5869 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5870 && !signal_program[ecs->event_thread->suspend.stop_signal])
5871 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5872
5873 discard_cleanups (old_cleanups);
5874 resume (currently_stepping (ecs->event_thread),
5875 ecs->event_thread->suspend.stop_signal);
5876 }
5877
5878 prepare_to_wait (ecs);
5879 }
5880
5881 /* This function normally comes after a resume, before
5882 handle_inferior_event exits. It takes care of any last bits of
5883 housekeeping, and sets the all-important wait_some_more flag. */
5884
5885 static void
5886 prepare_to_wait (struct execution_control_state *ecs)
5887 {
5888 if (debug_infrun)
5889 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5890
5891 /* This is the old end of the while loop. Let everybody know we
5892 want to wait for the inferior some more and get called again
5893 soon. */
5894 ecs->wait_some_more = 1;
5895 }
5896
5897 /* Several print_*_reason functions to print why the inferior has stopped.
5898 We always print something when the inferior exits, or receives a signal.
5899 The rest of the cases are dealt with later on in normal_stop and
5900 print_it_typical. Ideally there should be a call to one of these
5901 print_*_reason functions functions from handle_inferior_event each time
5902 stop_stepping is called. */
5903
5904 /* Print why the inferior has stopped.
5905 We are done with a step/next/si/ni command, print why the inferior has
5906 stopped. For now print nothing. Print a message only if not in the middle
5907 of doing a "step n" operation for n > 1. */
5908
5909 static void
5910 print_end_stepping_range_reason (void)
5911 {
5912 if ((!inferior_thread ()->step_multi
5913 || !inferior_thread ()->control.stop_step)
5914 && ui_out_is_mi_like_p (current_uiout))
5915 ui_out_field_string (current_uiout, "reason",
5916 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5917 }
5918
5919 /* The inferior was terminated by a signal, print why it stopped. */
5920
5921 static void
5922 print_signal_exited_reason (enum gdb_signal siggnal)
5923 {
5924 struct ui_out *uiout = current_uiout;
5925
5926 annotate_signalled ();
5927 if (ui_out_is_mi_like_p (uiout))
5928 ui_out_field_string
5929 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5930 ui_out_text (uiout, "\nProgram terminated with signal ");
5931 annotate_signal_name ();
5932 ui_out_field_string (uiout, "signal-name",
5933 gdb_signal_to_name (siggnal));
5934 annotate_signal_name_end ();
5935 ui_out_text (uiout, ", ");
5936 annotate_signal_string ();
5937 ui_out_field_string (uiout, "signal-meaning",
5938 gdb_signal_to_string (siggnal));
5939 annotate_signal_string_end ();
5940 ui_out_text (uiout, ".\n");
5941 ui_out_text (uiout, "The program no longer exists.\n");
5942 }
5943
5944 /* The inferior program is finished, print why it stopped. */
5945
5946 static void
5947 print_exited_reason (int exitstatus)
5948 {
5949 struct inferior *inf = current_inferior ();
5950 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5951 struct ui_out *uiout = current_uiout;
5952
5953 annotate_exited (exitstatus);
5954 if (exitstatus)
5955 {
5956 if (ui_out_is_mi_like_p (uiout))
5957 ui_out_field_string (uiout, "reason",
5958 async_reason_lookup (EXEC_ASYNC_EXITED));
5959 ui_out_text (uiout, "[Inferior ");
5960 ui_out_text (uiout, plongest (inf->num));
5961 ui_out_text (uiout, " (");
5962 ui_out_text (uiout, pidstr);
5963 ui_out_text (uiout, ") exited with code ");
5964 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5965 ui_out_text (uiout, "]\n");
5966 }
5967 else
5968 {
5969 if (ui_out_is_mi_like_p (uiout))
5970 ui_out_field_string
5971 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5972 ui_out_text (uiout, "[Inferior ");
5973 ui_out_text (uiout, plongest (inf->num));
5974 ui_out_text (uiout, " (");
5975 ui_out_text (uiout, pidstr);
5976 ui_out_text (uiout, ") exited normally]\n");
5977 }
5978 }
5979
5980 /* Signal received, print why the inferior has stopped. The signal table
5981 tells us to print about it. */
5982
5983 static void
5984 print_signal_received_reason (enum gdb_signal siggnal)
5985 {
5986 struct ui_out *uiout = current_uiout;
5987
5988 annotate_signal ();
5989
5990 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5991 {
5992 struct thread_info *t = inferior_thread ();
5993
5994 ui_out_text (uiout, "\n[");
5995 ui_out_field_string (uiout, "thread-name",
5996 target_pid_to_str (t->ptid));
5997 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5998 ui_out_text (uiout, " stopped");
5999 }
6000 else
6001 {
6002 ui_out_text (uiout, "\nProgram received signal ");
6003 annotate_signal_name ();
6004 if (ui_out_is_mi_like_p (uiout))
6005 ui_out_field_string
6006 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
6007 ui_out_field_string (uiout, "signal-name",
6008 gdb_signal_to_name (siggnal));
6009 annotate_signal_name_end ();
6010 ui_out_text (uiout, ", ");
6011 annotate_signal_string ();
6012 ui_out_field_string (uiout, "signal-meaning",
6013 gdb_signal_to_string (siggnal));
6014 annotate_signal_string_end ();
6015 }
6016 ui_out_text (uiout, ".\n");
6017 }
6018
6019 /* Reverse execution: target ran out of history info, print why the inferior
6020 has stopped. */
6021
6022 static void
6023 print_no_history_reason (void)
6024 {
6025 ui_out_text (current_uiout, "\nNo more reverse-execution history.\n");
6026 }
6027
6028 /* Print current location without a level number, if we have changed
6029 functions or hit a breakpoint. Print source line if we have one.
6030 bpstat_print contains the logic deciding in detail what to print,
6031 based on the event(s) that just occurred. */
6032
6033 void
6034 print_stop_event (struct target_waitstatus *ws)
6035 {
6036 int bpstat_ret;
6037 int source_flag;
6038 int do_frame_printing = 1;
6039 struct thread_info *tp = inferior_thread ();
6040
6041 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6042 switch (bpstat_ret)
6043 {
6044 case PRINT_UNKNOWN:
6045 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6046 should) carry around the function and does (or should) use
6047 that when doing a frame comparison. */
6048 if (tp->control.stop_step
6049 && frame_id_eq (tp->control.step_frame_id,
6050 get_frame_id (get_current_frame ()))
6051 && step_start_function == find_pc_function (stop_pc))
6052 {
6053 /* Finished step, just print source line. */
6054 source_flag = SRC_LINE;
6055 }
6056 else
6057 {
6058 /* Print location and source line. */
6059 source_flag = SRC_AND_LOC;
6060 }
6061 break;
6062 case PRINT_SRC_AND_LOC:
6063 /* Print location and source line. */
6064 source_flag = SRC_AND_LOC;
6065 break;
6066 case PRINT_SRC_ONLY:
6067 source_flag = SRC_LINE;
6068 break;
6069 case PRINT_NOTHING:
6070 /* Something bogus. */
6071 source_flag = SRC_LINE;
6072 do_frame_printing = 0;
6073 break;
6074 default:
6075 internal_error (__FILE__, __LINE__, _("Unknown value."));
6076 }
6077
6078 /* The behavior of this routine with respect to the source
6079 flag is:
6080 SRC_LINE: Print only source line
6081 LOCATION: Print only location
6082 SRC_AND_LOC: Print location and source line. */
6083 if (do_frame_printing)
6084 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6085
6086 /* Display the auto-display expressions. */
6087 do_displays ();
6088 }
6089
6090 /* Here to return control to GDB when the inferior stops for real.
6091 Print appropriate messages, remove breakpoints, give terminal our modes.
6092
6093 STOP_PRINT_FRAME nonzero means print the executing frame
6094 (pc, function, args, file, line number and line text).
6095 BREAKPOINTS_FAILED nonzero means stop was due to error
6096 attempting to insert breakpoints. */
6097
6098 void
6099 normal_stop (void)
6100 {
6101 struct target_waitstatus last;
6102 ptid_t last_ptid;
6103 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6104
6105 get_last_target_status (&last_ptid, &last);
6106
6107 /* If an exception is thrown from this point on, make sure to
6108 propagate GDB's knowledge of the executing state to the
6109 frontend/user running state. A QUIT is an easy exception to see
6110 here, so do this before any filtered output. */
6111 if (!non_stop)
6112 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6113 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6114 && last.kind != TARGET_WAITKIND_EXITED
6115 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6116 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6117
6118 /* As with the notification of thread events, we want to delay
6119 notifying the user that we've switched thread context until
6120 the inferior actually stops.
6121
6122 There's no point in saying anything if the inferior has exited.
6123 Note that SIGNALLED here means "exited with a signal", not
6124 "received a signal".
6125
6126 Also skip saying anything in non-stop mode. In that mode, as we
6127 don't want GDB to switch threads behind the user's back, to avoid
6128 races where the user is typing a command to apply to thread x,
6129 but GDB switches to thread y before the user finishes entering
6130 the command, fetch_inferior_event installs a cleanup to restore
6131 the current thread back to the thread the user had selected right
6132 after this event is handled, so we're not really switching, only
6133 informing of a stop. */
6134 if (!non_stop
6135 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6136 && target_has_execution
6137 && last.kind != TARGET_WAITKIND_SIGNALLED
6138 && last.kind != TARGET_WAITKIND_EXITED
6139 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6140 {
6141 target_terminal_ours_for_output ();
6142 printf_filtered (_("[Switching to %s]\n"),
6143 target_pid_to_str (inferior_ptid));
6144 annotate_thread_changed ();
6145 previous_inferior_ptid = inferior_ptid;
6146 }
6147
6148 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6149 {
6150 gdb_assert (sync_execution || !target_can_async_p ());
6151
6152 target_terminal_ours_for_output ();
6153 printf_filtered (_("No unwaited-for children left.\n"));
6154 }
6155
6156 if (!breakpoints_always_inserted_mode () && target_has_execution)
6157 {
6158 if (remove_breakpoints ())
6159 {
6160 target_terminal_ours_for_output ();
6161 printf_filtered (_("Cannot remove breakpoints because "
6162 "program is no longer writable.\nFurther "
6163 "execution is probably impossible.\n"));
6164 }
6165 }
6166
6167 /* If an auto-display called a function and that got a signal,
6168 delete that auto-display to avoid an infinite recursion. */
6169
6170 if (stopped_by_random_signal)
6171 disable_current_display ();
6172
6173 /* Don't print a message if in the middle of doing a "step n"
6174 operation for n > 1 */
6175 if (target_has_execution
6176 && last.kind != TARGET_WAITKIND_SIGNALLED
6177 && last.kind != TARGET_WAITKIND_EXITED
6178 && inferior_thread ()->step_multi
6179 && inferior_thread ()->control.stop_step)
6180 goto done;
6181
6182 target_terminal_ours ();
6183 async_enable_stdin ();
6184
6185 /* Set the current source location. This will also happen if we
6186 display the frame below, but the current SAL will be incorrect
6187 during a user hook-stop function. */
6188 if (has_stack_frames () && !stop_stack_dummy)
6189 set_current_sal_from_frame (get_current_frame ());
6190
6191 /* Let the user/frontend see the threads as stopped. */
6192 do_cleanups (old_chain);
6193
6194 /* Look up the hook_stop and run it (CLI internally handles problem
6195 of stop_command's pre-hook not existing). */
6196 if (stop_command)
6197 catch_errors (hook_stop_stub, stop_command,
6198 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6199
6200 if (!has_stack_frames ())
6201 goto done;
6202
6203 if (last.kind == TARGET_WAITKIND_SIGNALLED
6204 || last.kind == TARGET_WAITKIND_EXITED)
6205 goto done;
6206
6207 /* Select innermost stack frame - i.e., current frame is frame 0,
6208 and current location is based on that.
6209 Don't do this on return from a stack dummy routine,
6210 or if the program has exited. */
6211
6212 if (!stop_stack_dummy)
6213 {
6214 select_frame (get_current_frame ());
6215
6216 /* If --batch-silent is enabled then there's no need to print the current
6217 source location, and to try risks causing an error message about
6218 missing source files. */
6219 if (stop_print_frame && !batch_silent)
6220 print_stop_event (&last);
6221 }
6222
6223 /* Save the function value return registers, if we care.
6224 We might be about to restore their previous contents. */
6225 if (inferior_thread ()->control.proceed_to_finish
6226 && execution_direction != EXEC_REVERSE)
6227 {
6228 /* This should not be necessary. */
6229 if (stop_registers)
6230 regcache_xfree (stop_registers);
6231
6232 /* NB: The copy goes through to the target picking up the value of
6233 all the registers. */
6234 stop_registers = regcache_dup (get_current_regcache ());
6235 }
6236
6237 if (stop_stack_dummy == STOP_STACK_DUMMY)
6238 {
6239 /* Pop the empty frame that contains the stack dummy.
6240 This also restores inferior state prior to the call
6241 (struct infcall_suspend_state). */
6242 struct frame_info *frame = get_current_frame ();
6243
6244 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6245 frame_pop (frame);
6246 /* frame_pop() calls reinit_frame_cache as the last thing it
6247 does which means there's currently no selected frame. We
6248 don't need to re-establish a selected frame if the dummy call
6249 returns normally, that will be done by
6250 restore_infcall_control_state. However, we do have to handle
6251 the case where the dummy call is returning after being
6252 stopped (e.g. the dummy call previously hit a breakpoint).
6253 We can't know which case we have so just always re-establish
6254 a selected frame here. */
6255 select_frame (get_current_frame ());
6256 }
6257
6258 done:
6259 annotate_stopped ();
6260
6261 /* Suppress the stop observer if we're in the middle of:
6262
6263 - a step n (n > 1), as there still more steps to be done.
6264
6265 - a "finish" command, as the observer will be called in
6266 finish_command_continuation, so it can include the inferior
6267 function's return value.
6268
6269 - calling an inferior function, as we pretend we inferior didn't
6270 run at all. The return value of the call is handled by the
6271 expression evaluator, through call_function_by_hand. */
6272
6273 if (!target_has_execution
6274 || last.kind == TARGET_WAITKIND_SIGNALLED
6275 || last.kind == TARGET_WAITKIND_EXITED
6276 || last.kind == TARGET_WAITKIND_NO_RESUMED
6277 || (!(inferior_thread ()->step_multi
6278 && inferior_thread ()->control.stop_step)
6279 && !(inferior_thread ()->control.stop_bpstat
6280 && inferior_thread ()->control.proceed_to_finish)
6281 && !inferior_thread ()->control.in_infcall))
6282 {
6283 if (!ptid_equal (inferior_ptid, null_ptid))
6284 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6285 stop_print_frame);
6286 else
6287 observer_notify_normal_stop (NULL, stop_print_frame);
6288 }
6289
6290 if (target_has_execution)
6291 {
6292 if (last.kind != TARGET_WAITKIND_SIGNALLED
6293 && last.kind != TARGET_WAITKIND_EXITED)
6294 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6295 Delete any breakpoint that is to be deleted at the next stop. */
6296 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6297 }
6298
6299 /* Try to get rid of automatically added inferiors that are no
6300 longer needed. Keeping those around slows down things linearly.
6301 Note that this never removes the current inferior. */
6302 prune_inferiors ();
6303 }
6304
6305 static int
6306 hook_stop_stub (void *cmd)
6307 {
6308 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6309 return (0);
6310 }
6311 \f
6312 int
6313 signal_stop_state (int signo)
6314 {
6315 return signal_stop[signo];
6316 }
6317
6318 int
6319 signal_print_state (int signo)
6320 {
6321 return signal_print[signo];
6322 }
6323
6324 int
6325 signal_pass_state (int signo)
6326 {
6327 return signal_program[signo];
6328 }
6329
6330 static void
6331 signal_cache_update (int signo)
6332 {
6333 if (signo == -1)
6334 {
6335 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6336 signal_cache_update (signo);
6337
6338 return;
6339 }
6340
6341 signal_pass[signo] = (signal_stop[signo] == 0
6342 && signal_print[signo] == 0
6343 && signal_program[signo] == 1
6344 && signal_catch[signo] == 0);
6345 }
6346
6347 int
6348 signal_stop_update (int signo, int state)
6349 {
6350 int ret = signal_stop[signo];
6351
6352 signal_stop[signo] = state;
6353 signal_cache_update (signo);
6354 return ret;
6355 }
6356
6357 int
6358 signal_print_update (int signo, int state)
6359 {
6360 int ret = signal_print[signo];
6361
6362 signal_print[signo] = state;
6363 signal_cache_update (signo);
6364 return ret;
6365 }
6366
6367 int
6368 signal_pass_update (int signo, int state)
6369 {
6370 int ret = signal_program[signo];
6371
6372 signal_program[signo] = state;
6373 signal_cache_update (signo);
6374 return ret;
6375 }
6376
6377 /* Update the global 'signal_catch' from INFO and notify the
6378 target. */
6379
6380 void
6381 signal_catch_update (const unsigned int *info)
6382 {
6383 int i;
6384
6385 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6386 signal_catch[i] = info[i] > 0;
6387 signal_cache_update (-1);
6388 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6389 }
6390
6391 static void
6392 sig_print_header (void)
6393 {
6394 printf_filtered (_("Signal Stop\tPrint\tPass "
6395 "to program\tDescription\n"));
6396 }
6397
6398 static void
6399 sig_print_info (enum gdb_signal oursig)
6400 {
6401 const char *name = gdb_signal_to_name (oursig);
6402 int name_padding = 13 - strlen (name);
6403
6404 if (name_padding <= 0)
6405 name_padding = 0;
6406
6407 printf_filtered ("%s", name);
6408 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6409 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6410 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6411 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6412 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6413 }
6414
6415 /* Specify how various signals in the inferior should be handled. */
6416
6417 static void
6418 handle_command (char *args, int from_tty)
6419 {
6420 char **argv;
6421 int digits, wordlen;
6422 int sigfirst, signum, siglast;
6423 enum gdb_signal oursig;
6424 int allsigs;
6425 int nsigs;
6426 unsigned char *sigs;
6427 struct cleanup *old_chain;
6428
6429 if (args == NULL)
6430 {
6431 error_no_arg (_("signal to handle"));
6432 }
6433
6434 /* Allocate and zero an array of flags for which signals to handle. */
6435
6436 nsigs = (int) GDB_SIGNAL_LAST;
6437 sigs = (unsigned char *) alloca (nsigs);
6438 memset (sigs, 0, nsigs);
6439
6440 /* Break the command line up into args. */
6441
6442 argv = gdb_buildargv (args);
6443 old_chain = make_cleanup_freeargv (argv);
6444
6445 /* Walk through the args, looking for signal oursigs, signal names, and
6446 actions. Signal numbers and signal names may be interspersed with
6447 actions, with the actions being performed for all signals cumulatively
6448 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6449
6450 while (*argv != NULL)
6451 {
6452 wordlen = strlen (*argv);
6453 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6454 {;
6455 }
6456 allsigs = 0;
6457 sigfirst = siglast = -1;
6458
6459 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6460 {
6461 /* Apply action to all signals except those used by the
6462 debugger. Silently skip those. */
6463 allsigs = 1;
6464 sigfirst = 0;
6465 siglast = nsigs - 1;
6466 }
6467 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6468 {
6469 SET_SIGS (nsigs, sigs, signal_stop);
6470 SET_SIGS (nsigs, sigs, signal_print);
6471 }
6472 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6473 {
6474 UNSET_SIGS (nsigs, sigs, signal_program);
6475 }
6476 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6477 {
6478 SET_SIGS (nsigs, sigs, signal_print);
6479 }
6480 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6481 {
6482 SET_SIGS (nsigs, sigs, signal_program);
6483 }
6484 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6485 {
6486 UNSET_SIGS (nsigs, sigs, signal_stop);
6487 }
6488 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6489 {
6490 SET_SIGS (nsigs, sigs, signal_program);
6491 }
6492 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6493 {
6494 UNSET_SIGS (nsigs, sigs, signal_print);
6495 UNSET_SIGS (nsigs, sigs, signal_stop);
6496 }
6497 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6498 {
6499 UNSET_SIGS (nsigs, sigs, signal_program);
6500 }
6501 else if (digits > 0)
6502 {
6503 /* It is numeric. The numeric signal refers to our own
6504 internal signal numbering from target.h, not to host/target
6505 signal number. This is a feature; users really should be
6506 using symbolic names anyway, and the common ones like
6507 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6508
6509 sigfirst = siglast = (int)
6510 gdb_signal_from_command (atoi (*argv));
6511 if ((*argv)[digits] == '-')
6512 {
6513 siglast = (int)
6514 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6515 }
6516 if (sigfirst > siglast)
6517 {
6518 /* Bet he didn't figure we'd think of this case... */
6519 signum = sigfirst;
6520 sigfirst = siglast;
6521 siglast = signum;
6522 }
6523 }
6524 else
6525 {
6526 oursig = gdb_signal_from_name (*argv);
6527 if (oursig != GDB_SIGNAL_UNKNOWN)
6528 {
6529 sigfirst = siglast = (int) oursig;
6530 }
6531 else
6532 {
6533 /* Not a number and not a recognized flag word => complain. */
6534 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6535 }
6536 }
6537
6538 /* If any signal numbers or symbol names were found, set flags for
6539 which signals to apply actions to. */
6540
6541 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6542 {
6543 switch ((enum gdb_signal) signum)
6544 {
6545 case GDB_SIGNAL_TRAP:
6546 case GDB_SIGNAL_INT:
6547 if (!allsigs && !sigs[signum])
6548 {
6549 if (query (_("%s is used by the debugger.\n\
6550 Are you sure you want to change it? "),
6551 gdb_signal_to_name ((enum gdb_signal) signum)))
6552 {
6553 sigs[signum] = 1;
6554 }
6555 else
6556 {
6557 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6558 gdb_flush (gdb_stdout);
6559 }
6560 }
6561 break;
6562 case GDB_SIGNAL_0:
6563 case GDB_SIGNAL_DEFAULT:
6564 case GDB_SIGNAL_UNKNOWN:
6565 /* Make sure that "all" doesn't print these. */
6566 break;
6567 default:
6568 sigs[signum] = 1;
6569 break;
6570 }
6571 }
6572
6573 argv++;
6574 }
6575
6576 for (signum = 0; signum < nsigs; signum++)
6577 if (sigs[signum])
6578 {
6579 signal_cache_update (-1);
6580 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6581 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
6582
6583 if (from_tty)
6584 {
6585 /* Show the results. */
6586 sig_print_header ();
6587 for (; signum < nsigs; signum++)
6588 if (sigs[signum])
6589 sig_print_info (signum);
6590 }
6591
6592 break;
6593 }
6594
6595 do_cleanups (old_chain);
6596 }
6597
6598 /* Complete the "handle" command. */
6599
6600 static VEC (char_ptr) *
6601 handle_completer (struct cmd_list_element *ignore,
6602 const char *text, const char *word)
6603 {
6604 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
6605 static const char * const keywords[] =
6606 {
6607 "all",
6608 "stop",
6609 "ignore",
6610 "print",
6611 "pass",
6612 "nostop",
6613 "noignore",
6614 "noprint",
6615 "nopass",
6616 NULL,
6617 };
6618
6619 vec_signals = signal_completer (ignore, text, word);
6620 vec_keywords = complete_on_enum (keywords, word, word);
6621
6622 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
6623 VEC_free (char_ptr, vec_signals);
6624 VEC_free (char_ptr, vec_keywords);
6625 return return_val;
6626 }
6627
6628 static void
6629 xdb_handle_command (char *args, int from_tty)
6630 {
6631 char **argv;
6632 struct cleanup *old_chain;
6633
6634 if (args == NULL)
6635 error_no_arg (_("xdb command"));
6636
6637 /* Break the command line up into args. */
6638
6639 argv = gdb_buildargv (args);
6640 old_chain = make_cleanup_freeargv (argv);
6641 if (argv[1] != (char *) NULL)
6642 {
6643 char *argBuf;
6644 int bufLen;
6645
6646 bufLen = strlen (argv[0]) + 20;
6647 argBuf = (char *) xmalloc (bufLen);
6648 if (argBuf)
6649 {
6650 int validFlag = 1;
6651 enum gdb_signal oursig;
6652
6653 oursig = gdb_signal_from_name (argv[0]);
6654 memset (argBuf, 0, bufLen);
6655 if (strcmp (argv[1], "Q") == 0)
6656 sprintf (argBuf, "%s %s", argv[0], "noprint");
6657 else
6658 {
6659 if (strcmp (argv[1], "s") == 0)
6660 {
6661 if (!signal_stop[oursig])
6662 sprintf (argBuf, "%s %s", argv[0], "stop");
6663 else
6664 sprintf (argBuf, "%s %s", argv[0], "nostop");
6665 }
6666 else if (strcmp (argv[1], "i") == 0)
6667 {
6668 if (!signal_program[oursig])
6669 sprintf (argBuf, "%s %s", argv[0], "pass");
6670 else
6671 sprintf (argBuf, "%s %s", argv[0], "nopass");
6672 }
6673 else if (strcmp (argv[1], "r") == 0)
6674 {
6675 if (!signal_print[oursig])
6676 sprintf (argBuf, "%s %s", argv[0], "print");
6677 else
6678 sprintf (argBuf, "%s %s", argv[0], "noprint");
6679 }
6680 else
6681 validFlag = 0;
6682 }
6683 if (validFlag)
6684 handle_command (argBuf, from_tty);
6685 else
6686 printf_filtered (_("Invalid signal handling flag.\n"));
6687 if (argBuf)
6688 xfree (argBuf);
6689 }
6690 }
6691 do_cleanups (old_chain);
6692 }
6693
6694 enum gdb_signal
6695 gdb_signal_from_command (int num)
6696 {
6697 if (num >= 1 && num <= 15)
6698 return (enum gdb_signal) num;
6699 error (_("Only signals 1-15 are valid as numeric signals.\n\
6700 Use \"info signals\" for a list of symbolic signals."));
6701 }
6702
6703 /* Print current contents of the tables set by the handle command.
6704 It is possible we should just be printing signals actually used
6705 by the current target (but for things to work right when switching
6706 targets, all signals should be in the signal tables). */
6707
6708 static void
6709 signals_info (char *signum_exp, int from_tty)
6710 {
6711 enum gdb_signal oursig;
6712
6713 sig_print_header ();
6714
6715 if (signum_exp)
6716 {
6717 /* First see if this is a symbol name. */
6718 oursig = gdb_signal_from_name (signum_exp);
6719 if (oursig == GDB_SIGNAL_UNKNOWN)
6720 {
6721 /* No, try numeric. */
6722 oursig =
6723 gdb_signal_from_command (parse_and_eval_long (signum_exp));
6724 }
6725 sig_print_info (oursig);
6726 return;
6727 }
6728
6729 printf_filtered ("\n");
6730 /* These ugly casts brought to you by the native VAX compiler. */
6731 for (oursig = GDB_SIGNAL_FIRST;
6732 (int) oursig < (int) GDB_SIGNAL_LAST;
6733 oursig = (enum gdb_signal) ((int) oursig + 1))
6734 {
6735 QUIT;
6736
6737 if (oursig != GDB_SIGNAL_UNKNOWN
6738 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
6739 sig_print_info (oursig);
6740 }
6741
6742 printf_filtered (_("\nUse the \"handle\" command "
6743 "to change these tables.\n"));
6744 }
6745
6746 /* Check if it makes sense to read $_siginfo from the current thread
6747 at this point. If not, throw an error. */
6748
6749 static void
6750 validate_siginfo_access (void)
6751 {
6752 /* No current inferior, no siginfo. */
6753 if (ptid_equal (inferior_ptid, null_ptid))
6754 error (_("No thread selected."));
6755
6756 /* Don't try to read from a dead thread. */
6757 if (is_exited (inferior_ptid))
6758 error (_("The current thread has terminated"));
6759
6760 /* ... or from a spinning thread. */
6761 if (is_running (inferior_ptid))
6762 error (_("Selected thread is running."));
6763 }
6764
6765 /* The $_siginfo convenience variable is a bit special. We don't know
6766 for sure the type of the value until we actually have a chance to
6767 fetch the data. The type can change depending on gdbarch, so it is
6768 also dependent on which thread you have selected.
6769
6770 1. making $_siginfo be an internalvar that creates a new value on
6771 access.
6772
6773 2. making the value of $_siginfo be an lval_computed value. */
6774
6775 /* This function implements the lval_computed support for reading a
6776 $_siginfo value. */
6777
6778 static void
6779 siginfo_value_read (struct value *v)
6780 {
6781 LONGEST transferred;
6782
6783 validate_siginfo_access ();
6784
6785 transferred =
6786 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6787 NULL,
6788 value_contents_all_raw (v),
6789 value_offset (v),
6790 TYPE_LENGTH (value_type (v)));
6791
6792 if (transferred != TYPE_LENGTH (value_type (v)))
6793 error (_("Unable to read siginfo"));
6794 }
6795
6796 /* This function implements the lval_computed support for writing a
6797 $_siginfo value. */
6798
6799 static void
6800 siginfo_value_write (struct value *v, struct value *fromval)
6801 {
6802 LONGEST transferred;
6803
6804 validate_siginfo_access ();
6805
6806 transferred = target_write (&current_target,
6807 TARGET_OBJECT_SIGNAL_INFO,
6808 NULL,
6809 value_contents_all_raw (fromval),
6810 value_offset (v),
6811 TYPE_LENGTH (value_type (fromval)));
6812
6813 if (transferred != TYPE_LENGTH (value_type (fromval)))
6814 error (_("Unable to write siginfo"));
6815 }
6816
6817 static const struct lval_funcs siginfo_value_funcs =
6818 {
6819 siginfo_value_read,
6820 siginfo_value_write
6821 };
6822
6823 /* Return a new value with the correct type for the siginfo object of
6824 the current thread using architecture GDBARCH. Return a void value
6825 if there's no object available. */
6826
6827 static struct value *
6828 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
6829 void *ignore)
6830 {
6831 if (target_has_stack
6832 && !ptid_equal (inferior_ptid, null_ptid)
6833 && gdbarch_get_siginfo_type_p (gdbarch))
6834 {
6835 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6836
6837 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6838 }
6839
6840 return allocate_value (builtin_type (gdbarch)->builtin_void);
6841 }
6842
6843 \f
6844 /* infcall_suspend_state contains state about the program itself like its
6845 registers and any signal it received when it last stopped.
6846 This state must be restored regardless of how the inferior function call
6847 ends (either successfully, or after it hits a breakpoint or signal)
6848 if the program is to properly continue where it left off. */
6849
6850 struct infcall_suspend_state
6851 {
6852 struct thread_suspend_state thread_suspend;
6853 #if 0 /* Currently unused and empty structures are not valid C. */
6854 struct inferior_suspend_state inferior_suspend;
6855 #endif
6856
6857 /* Other fields: */
6858 CORE_ADDR stop_pc;
6859 struct regcache *registers;
6860
6861 /* Format of SIGINFO_DATA or NULL if it is not present. */
6862 struct gdbarch *siginfo_gdbarch;
6863
6864 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6865 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6866 content would be invalid. */
6867 gdb_byte *siginfo_data;
6868 };
6869
6870 struct infcall_suspend_state *
6871 save_infcall_suspend_state (void)
6872 {
6873 struct infcall_suspend_state *inf_state;
6874 struct thread_info *tp = inferior_thread ();
6875 #if 0
6876 struct inferior *inf = current_inferior ();
6877 #endif
6878 struct regcache *regcache = get_current_regcache ();
6879 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6880 gdb_byte *siginfo_data = NULL;
6881
6882 if (gdbarch_get_siginfo_type_p (gdbarch))
6883 {
6884 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6885 size_t len = TYPE_LENGTH (type);
6886 struct cleanup *back_to;
6887
6888 siginfo_data = xmalloc (len);
6889 back_to = make_cleanup (xfree, siginfo_data);
6890
6891 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6892 siginfo_data, 0, len) == len)
6893 discard_cleanups (back_to);
6894 else
6895 {
6896 /* Errors ignored. */
6897 do_cleanups (back_to);
6898 siginfo_data = NULL;
6899 }
6900 }
6901
6902 inf_state = XCNEW (struct infcall_suspend_state);
6903
6904 if (siginfo_data)
6905 {
6906 inf_state->siginfo_gdbarch = gdbarch;
6907 inf_state->siginfo_data = siginfo_data;
6908 }
6909
6910 inf_state->thread_suspend = tp->suspend;
6911 #if 0 /* Currently unused and empty structures are not valid C. */
6912 inf_state->inferior_suspend = inf->suspend;
6913 #endif
6914
6915 /* run_inferior_call will not use the signal due to its `proceed' call with
6916 GDB_SIGNAL_0 anyway. */
6917 tp->suspend.stop_signal = GDB_SIGNAL_0;
6918
6919 inf_state->stop_pc = stop_pc;
6920
6921 inf_state->registers = regcache_dup (regcache);
6922
6923 return inf_state;
6924 }
6925
6926 /* Restore inferior session state to INF_STATE. */
6927
6928 void
6929 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6930 {
6931 struct thread_info *tp = inferior_thread ();
6932 #if 0
6933 struct inferior *inf = current_inferior ();
6934 #endif
6935 struct regcache *regcache = get_current_regcache ();
6936 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6937
6938 tp->suspend = inf_state->thread_suspend;
6939 #if 0 /* Currently unused and empty structures are not valid C. */
6940 inf->suspend = inf_state->inferior_suspend;
6941 #endif
6942
6943 stop_pc = inf_state->stop_pc;
6944
6945 if (inf_state->siginfo_gdbarch == gdbarch)
6946 {
6947 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6948
6949 /* Errors ignored. */
6950 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6951 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
6952 }
6953
6954 /* The inferior can be gone if the user types "print exit(0)"
6955 (and perhaps other times). */
6956 if (target_has_execution)
6957 /* NB: The register write goes through to the target. */
6958 regcache_cpy (regcache, inf_state->registers);
6959
6960 discard_infcall_suspend_state (inf_state);
6961 }
6962
6963 static void
6964 do_restore_infcall_suspend_state_cleanup (void *state)
6965 {
6966 restore_infcall_suspend_state (state);
6967 }
6968
6969 struct cleanup *
6970 make_cleanup_restore_infcall_suspend_state
6971 (struct infcall_suspend_state *inf_state)
6972 {
6973 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6974 }
6975
6976 void
6977 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6978 {
6979 regcache_xfree (inf_state->registers);
6980 xfree (inf_state->siginfo_data);
6981 xfree (inf_state);
6982 }
6983
6984 struct regcache *
6985 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6986 {
6987 return inf_state->registers;
6988 }
6989
6990 /* infcall_control_state contains state regarding gdb's control of the
6991 inferior itself like stepping control. It also contains session state like
6992 the user's currently selected frame. */
6993
6994 struct infcall_control_state
6995 {
6996 struct thread_control_state thread_control;
6997 struct inferior_control_state inferior_control;
6998
6999 /* Other fields: */
7000 enum stop_stack_kind stop_stack_dummy;
7001 int stopped_by_random_signal;
7002 int stop_after_trap;
7003
7004 /* ID if the selected frame when the inferior function call was made. */
7005 struct frame_id selected_frame_id;
7006 };
7007
7008 /* Save all of the information associated with the inferior<==>gdb
7009 connection. */
7010
7011 struct infcall_control_state *
7012 save_infcall_control_state (void)
7013 {
7014 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
7015 struct thread_info *tp = inferior_thread ();
7016 struct inferior *inf = current_inferior ();
7017
7018 inf_status->thread_control = tp->control;
7019 inf_status->inferior_control = inf->control;
7020
7021 tp->control.step_resume_breakpoint = NULL;
7022 tp->control.exception_resume_breakpoint = NULL;
7023
7024 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
7025 chain. If caller's caller is walking the chain, they'll be happier if we
7026 hand them back the original chain when restore_infcall_control_state is
7027 called. */
7028 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
7029
7030 /* Other fields: */
7031 inf_status->stop_stack_dummy = stop_stack_dummy;
7032 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7033 inf_status->stop_after_trap = stop_after_trap;
7034
7035 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7036
7037 return inf_status;
7038 }
7039
7040 static int
7041 restore_selected_frame (void *args)
7042 {
7043 struct frame_id *fid = (struct frame_id *) args;
7044 struct frame_info *frame;
7045
7046 frame = frame_find_by_id (*fid);
7047
7048 /* If inf_status->selected_frame_id is NULL, there was no previously
7049 selected frame. */
7050 if (frame == NULL)
7051 {
7052 warning (_("Unable to restore previously selected frame."));
7053 return 0;
7054 }
7055
7056 select_frame (frame);
7057
7058 return (1);
7059 }
7060
7061 /* Restore inferior session state to INF_STATUS. */
7062
7063 void
7064 restore_infcall_control_state (struct infcall_control_state *inf_status)
7065 {
7066 struct thread_info *tp = inferior_thread ();
7067 struct inferior *inf = current_inferior ();
7068
7069 if (tp->control.step_resume_breakpoint)
7070 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7071
7072 if (tp->control.exception_resume_breakpoint)
7073 tp->control.exception_resume_breakpoint->disposition
7074 = disp_del_at_next_stop;
7075
7076 /* Handle the bpstat_copy of the chain. */
7077 bpstat_clear (&tp->control.stop_bpstat);
7078
7079 tp->control = inf_status->thread_control;
7080 inf->control = inf_status->inferior_control;
7081
7082 /* Other fields: */
7083 stop_stack_dummy = inf_status->stop_stack_dummy;
7084 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7085 stop_after_trap = inf_status->stop_after_trap;
7086
7087 if (target_has_stack)
7088 {
7089 /* The point of catch_errors is that if the stack is clobbered,
7090 walking the stack might encounter a garbage pointer and
7091 error() trying to dereference it. */
7092 if (catch_errors
7093 (restore_selected_frame, &inf_status->selected_frame_id,
7094 "Unable to restore previously selected frame:\n",
7095 RETURN_MASK_ERROR) == 0)
7096 /* Error in restoring the selected frame. Select the innermost
7097 frame. */
7098 select_frame (get_current_frame ());
7099 }
7100
7101 xfree (inf_status);
7102 }
7103
7104 static void
7105 do_restore_infcall_control_state_cleanup (void *sts)
7106 {
7107 restore_infcall_control_state (sts);
7108 }
7109
7110 struct cleanup *
7111 make_cleanup_restore_infcall_control_state
7112 (struct infcall_control_state *inf_status)
7113 {
7114 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7115 }
7116
7117 void
7118 discard_infcall_control_state (struct infcall_control_state *inf_status)
7119 {
7120 if (inf_status->thread_control.step_resume_breakpoint)
7121 inf_status->thread_control.step_resume_breakpoint->disposition
7122 = disp_del_at_next_stop;
7123
7124 if (inf_status->thread_control.exception_resume_breakpoint)
7125 inf_status->thread_control.exception_resume_breakpoint->disposition
7126 = disp_del_at_next_stop;
7127
7128 /* See save_infcall_control_state for info on stop_bpstat. */
7129 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7130
7131 xfree (inf_status);
7132 }
7133 \f
7134 /* restore_inferior_ptid() will be used by the cleanup machinery
7135 to restore the inferior_ptid value saved in a call to
7136 save_inferior_ptid(). */
7137
7138 static void
7139 restore_inferior_ptid (void *arg)
7140 {
7141 ptid_t *saved_ptid_ptr = arg;
7142
7143 inferior_ptid = *saved_ptid_ptr;
7144 xfree (arg);
7145 }
7146
7147 /* Save the value of inferior_ptid so that it may be restored by a
7148 later call to do_cleanups(). Returns the struct cleanup pointer
7149 needed for later doing the cleanup. */
7150
7151 struct cleanup *
7152 save_inferior_ptid (void)
7153 {
7154 ptid_t *saved_ptid_ptr;
7155
7156 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7157 *saved_ptid_ptr = inferior_ptid;
7158 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7159 }
7160
7161 /* See inferior.h. */
7162
7163 void
7164 clear_exit_convenience_vars (void)
7165 {
7166 clear_internalvar (lookup_internalvar ("_exitsignal"));
7167 clear_internalvar (lookup_internalvar ("_exitcode"));
7168 }
7169 \f
7170
7171 /* User interface for reverse debugging:
7172 Set exec-direction / show exec-direction commands
7173 (returns error unless target implements to_set_exec_direction method). */
7174
7175 int execution_direction = EXEC_FORWARD;
7176 static const char exec_forward[] = "forward";
7177 static const char exec_reverse[] = "reverse";
7178 static const char *exec_direction = exec_forward;
7179 static const char *const exec_direction_names[] = {
7180 exec_forward,
7181 exec_reverse,
7182 NULL
7183 };
7184
7185 static void
7186 set_exec_direction_func (char *args, int from_tty,
7187 struct cmd_list_element *cmd)
7188 {
7189 if (target_can_execute_reverse)
7190 {
7191 if (!strcmp (exec_direction, exec_forward))
7192 execution_direction = EXEC_FORWARD;
7193 else if (!strcmp (exec_direction, exec_reverse))
7194 execution_direction = EXEC_REVERSE;
7195 }
7196 else
7197 {
7198 exec_direction = exec_forward;
7199 error (_("Target does not support this operation."));
7200 }
7201 }
7202
7203 static void
7204 show_exec_direction_func (struct ui_file *out, int from_tty,
7205 struct cmd_list_element *cmd, const char *value)
7206 {
7207 switch (execution_direction) {
7208 case EXEC_FORWARD:
7209 fprintf_filtered (out, _("Forward.\n"));
7210 break;
7211 case EXEC_REVERSE:
7212 fprintf_filtered (out, _("Reverse.\n"));
7213 break;
7214 default:
7215 internal_error (__FILE__, __LINE__,
7216 _("bogus execution_direction value: %d"),
7217 (int) execution_direction);
7218 }
7219 }
7220
7221 static void
7222 show_schedule_multiple (struct ui_file *file, int from_tty,
7223 struct cmd_list_element *c, const char *value)
7224 {
7225 fprintf_filtered (file, _("Resuming the execution of threads "
7226 "of all processes is %s.\n"), value);
7227 }
7228
7229 /* Implementation of `siginfo' variable. */
7230
7231 static const struct internalvar_funcs siginfo_funcs =
7232 {
7233 siginfo_make_value,
7234 NULL,
7235 NULL
7236 };
7237
7238 void
7239 _initialize_infrun (void)
7240 {
7241 int i;
7242 int numsigs;
7243 struct cmd_list_element *c;
7244
7245 add_info ("signals", signals_info, _("\
7246 What debugger does when program gets various signals.\n\
7247 Specify a signal as argument to print info on that signal only."));
7248 add_info_alias ("handle", "signals", 0);
7249
7250 c = add_com ("handle", class_run, handle_command, _("\
7251 Specify how to handle signals.\n\
7252 Usage: handle SIGNAL [ACTIONS]\n\
7253 Args are signals and actions to apply to those signals.\n\
7254 If no actions are specified, the current settings for the specified signals\n\
7255 will be displayed instead.\n\
7256 \n\
7257 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7258 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7259 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7260 The special arg \"all\" is recognized to mean all signals except those\n\
7261 used by the debugger, typically SIGTRAP and SIGINT.\n\
7262 \n\
7263 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7264 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7265 Stop means reenter debugger if this signal happens (implies print).\n\
7266 Print means print a message if this signal happens.\n\
7267 Pass means let program see this signal; otherwise program doesn't know.\n\
7268 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7269 Pass and Stop may be combined.\n\
7270 \n\
7271 Multiple signals may be specified. Signal numbers and signal names\n\
7272 may be interspersed with actions, with the actions being performed for\n\
7273 all signals cumulatively specified."));
7274 set_cmd_completer (c, handle_completer);
7275
7276 if (xdb_commands)
7277 {
7278 add_com ("lz", class_info, signals_info, _("\
7279 What debugger does when program gets various signals.\n\
7280 Specify a signal as argument to print info on that signal only."));
7281 add_com ("z", class_run, xdb_handle_command, _("\
7282 Specify how to handle a signal.\n\
7283 Args are signals and actions to apply to those signals.\n\
7284 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7285 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7286 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7287 The special arg \"all\" is recognized to mean all signals except those\n\
7288 used by the debugger, typically SIGTRAP and SIGINT.\n\
7289 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7290 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7291 nopass), \"Q\" (noprint)\n\
7292 Stop means reenter debugger if this signal happens (implies print).\n\
7293 Print means print a message if this signal happens.\n\
7294 Pass means let program see this signal; otherwise program doesn't know.\n\
7295 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7296 Pass and Stop may be combined."));
7297 }
7298
7299 if (!dbx_commands)
7300 stop_command = add_cmd ("stop", class_obscure,
7301 not_just_help_class_command, _("\
7302 There is no `stop' command, but you can set a hook on `stop'.\n\
7303 This allows you to set a list of commands to be run each time execution\n\
7304 of the program stops."), &cmdlist);
7305
7306 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7307 Set inferior debugging."), _("\
7308 Show inferior debugging."), _("\
7309 When non-zero, inferior specific debugging is enabled."),
7310 NULL,
7311 show_debug_infrun,
7312 &setdebuglist, &showdebuglist);
7313
7314 add_setshow_boolean_cmd ("displaced", class_maintenance,
7315 &debug_displaced, _("\
7316 Set displaced stepping debugging."), _("\
7317 Show displaced stepping debugging."), _("\
7318 When non-zero, displaced stepping specific debugging is enabled."),
7319 NULL,
7320 show_debug_displaced,
7321 &setdebuglist, &showdebuglist);
7322
7323 add_setshow_boolean_cmd ("non-stop", no_class,
7324 &non_stop_1, _("\
7325 Set whether gdb controls the inferior in non-stop mode."), _("\
7326 Show whether gdb controls the inferior in non-stop mode."), _("\
7327 When debugging a multi-threaded program and this setting is\n\
7328 off (the default, also called all-stop mode), when one thread stops\n\
7329 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7330 all other threads in the program while you interact with the thread of\n\
7331 interest. When you continue or step a thread, you can allow the other\n\
7332 threads to run, or have them remain stopped, but while you inspect any\n\
7333 thread's state, all threads stop.\n\
7334 \n\
7335 In non-stop mode, when one thread stops, other threads can continue\n\
7336 to run freely. You'll be able to step each thread independently,\n\
7337 leave it stopped or free to run as needed."),
7338 set_non_stop,
7339 show_non_stop,
7340 &setlist,
7341 &showlist);
7342
7343 numsigs = (int) GDB_SIGNAL_LAST;
7344 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7345 signal_print = (unsigned char *)
7346 xmalloc (sizeof (signal_print[0]) * numsigs);
7347 signal_program = (unsigned char *)
7348 xmalloc (sizeof (signal_program[0]) * numsigs);
7349 signal_catch = (unsigned char *)
7350 xmalloc (sizeof (signal_catch[0]) * numsigs);
7351 signal_pass = (unsigned char *)
7352 xmalloc (sizeof (signal_program[0]) * numsigs);
7353 for (i = 0; i < numsigs; i++)
7354 {
7355 signal_stop[i] = 1;
7356 signal_print[i] = 1;
7357 signal_program[i] = 1;
7358 signal_catch[i] = 0;
7359 }
7360
7361 /* Signals caused by debugger's own actions
7362 should not be given to the program afterwards. */
7363 signal_program[GDB_SIGNAL_TRAP] = 0;
7364 signal_program[GDB_SIGNAL_INT] = 0;
7365
7366 /* Signals that are not errors should not normally enter the debugger. */
7367 signal_stop[GDB_SIGNAL_ALRM] = 0;
7368 signal_print[GDB_SIGNAL_ALRM] = 0;
7369 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7370 signal_print[GDB_SIGNAL_VTALRM] = 0;
7371 signal_stop[GDB_SIGNAL_PROF] = 0;
7372 signal_print[GDB_SIGNAL_PROF] = 0;
7373 signal_stop[GDB_SIGNAL_CHLD] = 0;
7374 signal_print[GDB_SIGNAL_CHLD] = 0;
7375 signal_stop[GDB_SIGNAL_IO] = 0;
7376 signal_print[GDB_SIGNAL_IO] = 0;
7377 signal_stop[GDB_SIGNAL_POLL] = 0;
7378 signal_print[GDB_SIGNAL_POLL] = 0;
7379 signal_stop[GDB_SIGNAL_URG] = 0;
7380 signal_print[GDB_SIGNAL_URG] = 0;
7381 signal_stop[GDB_SIGNAL_WINCH] = 0;
7382 signal_print[GDB_SIGNAL_WINCH] = 0;
7383 signal_stop[GDB_SIGNAL_PRIO] = 0;
7384 signal_print[GDB_SIGNAL_PRIO] = 0;
7385
7386 /* These signals are used internally by user-level thread
7387 implementations. (See signal(5) on Solaris.) Like the above
7388 signals, a healthy program receives and handles them as part of
7389 its normal operation. */
7390 signal_stop[GDB_SIGNAL_LWP] = 0;
7391 signal_print[GDB_SIGNAL_LWP] = 0;
7392 signal_stop[GDB_SIGNAL_WAITING] = 0;
7393 signal_print[GDB_SIGNAL_WAITING] = 0;
7394 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7395 signal_print[GDB_SIGNAL_CANCEL] = 0;
7396
7397 /* Update cached state. */
7398 signal_cache_update (-1);
7399
7400 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7401 &stop_on_solib_events, _("\
7402 Set stopping for shared library events."), _("\
7403 Show stopping for shared library events."), _("\
7404 If nonzero, gdb will give control to the user when the dynamic linker\n\
7405 notifies gdb of shared library events. The most common event of interest\n\
7406 to the user would be loading/unloading of a new library."),
7407 set_stop_on_solib_events,
7408 show_stop_on_solib_events,
7409 &setlist, &showlist);
7410
7411 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7412 follow_fork_mode_kind_names,
7413 &follow_fork_mode_string, _("\
7414 Set debugger response to a program call of fork or vfork."), _("\
7415 Show debugger response to a program call of fork or vfork."), _("\
7416 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7417 parent - the original process is debugged after a fork\n\
7418 child - the new process is debugged after a fork\n\
7419 The unfollowed process will continue to run.\n\
7420 By default, the debugger will follow the parent process."),
7421 NULL,
7422 show_follow_fork_mode_string,
7423 &setlist, &showlist);
7424
7425 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7426 follow_exec_mode_names,
7427 &follow_exec_mode_string, _("\
7428 Set debugger response to a program call of exec."), _("\
7429 Show debugger response to a program call of exec."), _("\
7430 An exec call replaces the program image of a process.\n\
7431 \n\
7432 follow-exec-mode can be:\n\
7433 \n\
7434 new - the debugger creates a new inferior and rebinds the process\n\
7435 to this new inferior. The program the process was running before\n\
7436 the exec call can be restarted afterwards by restarting the original\n\
7437 inferior.\n\
7438 \n\
7439 same - the debugger keeps the process bound to the same inferior.\n\
7440 The new executable image replaces the previous executable loaded in\n\
7441 the inferior. Restarting the inferior after the exec call restarts\n\
7442 the executable the process was running after the exec call.\n\
7443 \n\
7444 By default, the debugger will use the same inferior."),
7445 NULL,
7446 show_follow_exec_mode_string,
7447 &setlist, &showlist);
7448
7449 add_setshow_enum_cmd ("scheduler-locking", class_run,
7450 scheduler_enums, &scheduler_mode, _("\
7451 Set mode for locking scheduler during execution."), _("\
7452 Show mode for locking scheduler during execution."), _("\
7453 off == no locking (threads may preempt at any time)\n\
7454 on == full locking (no thread except the current thread may run)\n\
7455 step == scheduler locked during every single-step operation.\n\
7456 In this mode, no other thread may run during a step command.\n\
7457 Other threads may run while stepping over a function call ('next')."),
7458 set_schedlock_func, /* traps on target vector */
7459 show_scheduler_mode,
7460 &setlist, &showlist);
7461
7462 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7463 Set mode for resuming threads of all processes."), _("\
7464 Show mode for resuming threads of all processes."), _("\
7465 When on, execution commands (such as 'continue' or 'next') resume all\n\
7466 threads of all processes. When off (which is the default), execution\n\
7467 commands only resume the threads of the current process. The set of\n\
7468 threads that are resumed is further refined by the scheduler-locking\n\
7469 mode (see help set scheduler-locking)."),
7470 NULL,
7471 show_schedule_multiple,
7472 &setlist, &showlist);
7473
7474 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7475 Set mode of the step operation."), _("\
7476 Show mode of the step operation."), _("\
7477 When set, doing a step over a function without debug line information\n\
7478 will stop at the first instruction of that function. Otherwise, the\n\
7479 function is skipped and the step command stops at a different source line."),
7480 NULL,
7481 show_step_stop_if_no_debug,
7482 &setlist, &showlist);
7483
7484 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7485 &can_use_displaced_stepping, _("\
7486 Set debugger's willingness to use displaced stepping."), _("\
7487 Show debugger's willingness to use displaced stepping."), _("\
7488 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7489 supported by the target architecture. If off, gdb will not use displaced\n\
7490 stepping to step over breakpoints, even if such is supported by the target\n\
7491 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7492 if the target architecture supports it and non-stop mode is active, but will not\n\
7493 use it in all-stop mode (see help set non-stop)."),
7494 NULL,
7495 show_can_use_displaced_stepping,
7496 &setlist, &showlist);
7497
7498 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7499 &exec_direction, _("Set direction of execution.\n\
7500 Options are 'forward' or 'reverse'."),
7501 _("Show direction of execution (forward/reverse)."),
7502 _("Tells gdb whether to execute forward or backward."),
7503 set_exec_direction_func, show_exec_direction_func,
7504 &setlist, &showlist);
7505
7506 /* Set/show detach-on-fork: user-settable mode. */
7507
7508 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7509 Set whether gdb will detach the child of a fork."), _("\
7510 Show whether gdb will detach the child of a fork."), _("\
7511 Tells gdb whether to detach the child of a fork."),
7512 NULL, NULL, &setlist, &showlist);
7513
7514 /* Set/show disable address space randomization mode. */
7515
7516 add_setshow_boolean_cmd ("disable-randomization", class_support,
7517 &disable_randomization, _("\
7518 Set disabling of debuggee's virtual address space randomization."), _("\
7519 Show disabling of debuggee's virtual address space randomization."), _("\
7520 When this mode is on (which is the default), randomization of the virtual\n\
7521 address space is disabled. Standalone programs run with the randomization\n\
7522 enabled by default on some platforms."),
7523 &set_disable_randomization,
7524 &show_disable_randomization,
7525 &setlist, &showlist);
7526
7527 /* ptid initializations */
7528 inferior_ptid = null_ptid;
7529 target_last_wait_ptid = minus_one_ptid;
7530
7531 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7532 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7533 observer_attach_thread_exit (infrun_thread_thread_exit);
7534 observer_attach_inferior_exit (infrun_inferior_exit);
7535
7536 /* Explicitly create without lookup, since that tries to create a
7537 value with a void typed value, and when we get here, gdbarch
7538 isn't initialized yet. At this point, we're quite sure there
7539 isn't another convenience variable of the same name. */
7540 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7541
7542 add_setshow_boolean_cmd ("observer", no_class,
7543 &observer_mode_1, _("\
7544 Set whether gdb controls the inferior in observer mode."), _("\
7545 Show whether gdb controls the inferior in observer mode."), _("\
7546 In observer mode, GDB can get data from the inferior, but not\n\
7547 affect its execution. Registers and memory may not be changed,\n\
7548 breakpoints may not be set, and the program cannot be interrupted\n\
7549 or signalled."),
7550 set_observer_mode,
7551 show_observer_mode,
7552 &setlist,
7553 &showlist);
7554 }
This page took 0.184291 seconds and 4 git commands to generate.