Copy file1.txt to remote host in dw2-filename.exp and dw2-anonymous-func.exp
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2014 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include <string.h>
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "exceptions.h"
28 #include "breakpoint.h"
29 #include "gdb_wait.h"
30 #include "gdbcore.h"
31 #include "gdbcmd.h"
32 #include "cli/cli-script.h"
33 #include "target.h"
34 #include "gdbthread.h"
35 #include "annotate.h"
36 #include "symfile.h"
37 #include "top.h"
38 #include <signal.h>
39 #include "inf-loop.h"
40 #include "regcache.h"
41 #include "value.h"
42 #include "observer.h"
43 #include "language.h"
44 #include "solib.h"
45 #include "main.h"
46 #include "dictionary.h"
47 #include "block.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "record-full.h"
53 #include "inline-frame.h"
54 #include "jit.h"
55 #include "tracepoint.h"
56 #include "continuations.h"
57 #include "interps.h"
58 #include "skip.h"
59 #include "probe.h"
60 #include "objfiles.h"
61 #include "completer.h"
62 #include "target-descriptions.h"
63 #include "target-dcache.h"
64
65 /* Prototypes for local functions */
66
67 static void signals_info (char *, int);
68
69 static void handle_command (char *, int);
70
71 static void sig_print_info (enum gdb_signal);
72
73 static void sig_print_header (void);
74
75 static void resume_cleanups (void *);
76
77 static int hook_stop_stub (void *);
78
79 static int restore_selected_frame (void *);
80
81 static int follow_fork (void);
82
83 static void set_schedlock_func (char *args, int from_tty,
84 struct cmd_list_element *c);
85
86 static int currently_stepping (struct thread_info *tp);
87
88 static void xdb_handle_command (char *args, int from_tty);
89
90 static void print_exited_reason (int exitstatus);
91
92 static void print_signal_exited_reason (enum gdb_signal siggnal);
93
94 static void print_no_history_reason (void);
95
96 static void print_signal_received_reason (enum gdb_signal siggnal);
97
98 static void print_end_stepping_range_reason (void);
99
100 void _initialize_infrun (void);
101
102 void nullify_last_target_wait_ptid (void);
103
104 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
105
106 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
107
108 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
109
110 /* When set, stop the 'step' command if we enter a function which has
111 no line number information. The normal behavior is that we step
112 over such function. */
113 int step_stop_if_no_debug = 0;
114 static void
115 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
116 struct cmd_list_element *c, const char *value)
117 {
118 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
119 }
120
121 /* In asynchronous mode, but simulating synchronous execution. */
122
123 int sync_execution = 0;
124
125 /* proceed and normal_stop use this to notify the user when the
126 inferior stopped in a different thread than it had been running
127 in. */
128
129 static ptid_t previous_inferior_ptid;
130
131 /* If set (default for legacy reasons), when following a fork, GDB
132 will detach from one of the fork branches, child or parent.
133 Exactly which branch is detached depends on 'set follow-fork-mode'
134 setting. */
135
136 static int detach_fork = 1;
137
138 int debug_displaced = 0;
139 static void
140 show_debug_displaced (struct ui_file *file, int from_tty,
141 struct cmd_list_element *c, const char *value)
142 {
143 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
144 }
145
146 unsigned int debug_infrun = 0;
147 static void
148 show_debug_infrun (struct ui_file *file, int from_tty,
149 struct cmd_list_element *c, const char *value)
150 {
151 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
152 }
153
154
155 /* Support for disabling address space randomization. */
156
157 int disable_randomization = 1;
158
159 static void
160 show_disable_randomization (struct ui_file *file, int from_tty,
161 struct cmd_list_element *c, const char *value)
162 {
163 if (target_supports_disable_randomization ())
164 fprintf_filtered (file,
165 _("Disabling randomization of debuggee's "
166 "virtual address space is %s.\n"),
167 value);
168 else
169 fputs_filtered (_("Disabling randomization of debuggee's "
170 "virtual address space is unsupported on\n"
171 "this platform.\n"), file);
172 }
173
174 static void
175 set_disable_randomization (char *args, int from_tty,
176 struct cmd_list_element *c)
177 {
178 if (!target_supports_disable_randomization ())
179 error (_("Disabling randomization of debuggee's "
180 "virtual address space is unsupported on\n"
181 "this platform."));
182 }
183
184 /* User interface for non-stop mode. */
185
186 int non_stop = 0;
187 static int non_stop_1 = 0;
188
189 static void
190 set_non_stop (char *args, int from_tty,
191 struct cmd_list_element *c)
192 {
193 if (target_has_execution)
194 {
195 non_stop_1 = non_stop;
196 error (_("Cannot change this setting while the inferior is running."));
197 }
198
199 non_stop = non_stop_1;
200 }
201
202 static void
203 show_non_stop (struct ui_file *file, int from_tty,
204 struct cmd_list_element *c, const char *value)
205 {
206 fprintf_filtered (file,
207 _("Controlling the inferior in non-stop mode is %s.\n"),
208 value);
209 }
210
211 /* "Observer mode" is somewhat like a more extreme version of
212 non-stop, in which all GDB operations that might affect the
213 target's execution have been disabled. */
214
215 int observer_mode = 0;
216 static int observer_mode_1 = 0;
217
218 static void
219 set_observer_mode (char *args, int from_tty,
220 struct cmd_list_element *c)
221 {
222 if (target_has_execution)
223 {
224 observer_mode_1 = observer_mode;
225 error (_("Cannot change this setting while the inferior is running."));
226 }
227
228 observer_mode = observer_mode_1;
229
230 may_write_registers = !observer_mode;
231 may_write_memory = !observer_mode;
232 may_insert_breakpoints = !observer_mode;
233 may_insert_tracepoints = !observer_mode;
234 /* We can insert fast tracepoints in or out of observer mode,
235 but enable them if we're going into this mode. */
236 if (observer_mode)
237 may_insert_fast_tracepoints = 1;
238 may_stop = !observer_mode;
239 update_target_permissions ();
240
241 /* Going *into* observer mode we must force non-stop, then
242 going out we leave it that way. */
243 if (observer_mode)
244 {
245 target_async_permitted = 1;
246 pagination_enabled = 0;
247 non_stop = non_stop_1 = 1;
248 }
249
250 if (from_tty)
251 printf_filtered (_("Observer mode is now %s.\n"),
252 (observer_mode ? "on" : "off"));
253 }
254
255 static void
256 show_observer_mode (struct ui_file *file, int from_tty,
257 struct cmd_list_element *c, const char *value)
258 {
259 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
260 }
261
262 /* This updates the value of observer mode based on changes in
263 permissions. Note that we are deliberately ignoring the values of
264 may-write-registers and may-write-memory, since the user may have
265 reason to enable these during a session, for instance to turn on a
266 debugging-related global. */
267
268 void
269 update_observer_mode (void)
270 {
271 int newval;
272
273 newval = (!may_insert_breakpoints
274 && !may_insert_tracepoints
275 && may_insert_fast_tracepoints
276 && !may_stop
277 && non_stop);
278
279 /* Let the user know if things change. */
280 if (newval != observer_mode)
281 printf_filtered (_("Observer mode is now %s.\n"),
282 (newval ? "on" : "off"));
283
284 observer_mode = observer_mode_1 = newval;
285 }
286
287 /* Tables of how to react to signals; the user sets them. */
288
289 static unsigned char *signal_stop;
290 static unsigned char *signal_print;
291 static unsigned char *signal_program;
292
293 /* Table of signals that are registered with "catch signal". A
294 non-zero entry indicates that the signal is caught by some "catch
295 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
296 signals. */
297 static unsigned char *signal_catch;
298
299 /* Table of signals that the target may silently handle.
300 This is automatically determined from the flags above,
301 and simply cached here. */
302 static unsigned char *signal_pass;
303
304 #define SET_SIGS(nsigs,sigs,flags) \
305 do { \
306 int signum = (nsigs); \
307 while (signum-- > 0) \
308 if ((sigs)[signum]) \
309 (flags)[signum] = 1; \
310 } while (0)
311
312 #define UNSET_SIGS(nsigs,sigs,flags) \
313 do { \
314 int signum = (nsigs); \
315 while (signum-- > 0) \
316 if ((sigs)[signum]) \
317 (flags)[signum] = 0; \
318 } while (0)
319
320 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
321 this function is to avoid exporting `signal_program'. */
322
323 void
324 update_signals_program_target (void)
325 {
326 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
327 }
328
329 /* Value to pass to target_resume() to cause all threads to resume. */
330
331 #define RESUME_ALL minus_one_ptid
332
333 /* Command list pointer for the "stop" placeholder. */
334
335 static struct cmd_list_element *stop_command;
336
337 /* Function inferior was in as of last step command. */
338
339 static struct symbol *step_start_function;
340
341 /* Nonzero if we want to give control to the user when we're notified
342 of shared library events by the dynamic linker. */
343 int stop_on_solib_events;
344
345 /* Enable or disable optional shared library event breakpoints
346 as appropriate when the above flag is changed. */
347
348 static void
349 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
350 {
351 update_solib_breakpoints ();
352 }
353
354 static void
355 show_stop_on_solib_events (struct ui_file *file, int from_tty,
356 struct cmd_list_element *c, const char *value)
357 {
358 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
359 value);
360 }
361
362 /* Nonzero means expecting a trace trap
363 and should stop the inferior and return silently when it happens. */
364
365 int stop_after_trap;
366
367 /* Save register contents here when executing a "finish" command or are
368 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
369 Thus this contains the return value from the called function (assuming
370 values are returned in a register). */
371
372 struct regcache *stop_registers;
373
374 /* Nonzero after stop if current stack frame should be printed. */
375
376 static int stop_print_frame;
377
378 /* This is a cached copy of the pid/waitstatus of the last event
379 returned by target_wait()/deprecated_target_wait_hook(). This
380 information is returned by get_last_target_status(). */
381 static ptid_t target_last_wait_ptid;
382 static struct target_waitstatus target_last_waitstatus;
383
384 static void context_switch (ptid_t ptid);
385
386 void init_thread_stepping_state (struct thread_info *tss);
387
388 static void init_infwait_state (void);
389
390 static const char follow_fork_mode_child[] = "child";
391 static const char follow_fork_mode_parent[] = "parent";
392
393 static const char *const follow_fork_mode_kind_names[] = {
394 follow_fork_mode_child,
395 follow_fork_mode_parent,
396 NULL
397 };
398
399 static const char *follow_fork_mode_string = follow_fork_mode_parent;
400 static void
401 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
402 struct cmd_list_element *c, const char *value)
403 {
404 fprintf_filtered (file,
405 _("Debugger response to a program "
406 "call of fork or vfork is \"%s\".\n"),
407 value);
408 }
409 \f
410
411 /* Tell the target to follow the fork we're stopped at. Returns true
412 if the inferior should be resumed; false, if the target for some
413 reason decided it's best not to resume. */
414
415 static int
416 follow_fork (void)
417 {
418 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
419 int should_resume = 1;
420 struct thread_info *tp;
421
422 /* Copy user stepping state to the new inferior thread. FIXME: the
423 followed fork child thread should have a copy of most of the
424 parent thread structure's run control related fields, not just these.
425 Initialized to avoid "may be used uninitialized" warnings from gcc. */
426 struct breakpoint *step_resume_breakpoint = NULL;
427 struct breakpoint *exception_resume_breakpoint = NULL;
428 CORE_ADDR step_range_start = 0;
429 CORE_ADDR step_range_end = 0;
430 struct frame_id step_frame_id = { 0 };
431
432 if (!non_stop)
433 {
434 ptid_t wait_ptid;
435 struct target_waitstatus wait_status;
436
437 /* Get the last target status returned by target_wait(). */
438 get_last_target_status (&wait_ptid, &wait_status);
439
440 /* If not stopped at a fork event, then there's nothing else to
441 do. */
442 if (wait_status.kind != TARGET_WAITKIND_FORKED
443 && wait_status.kind != TARGET_WAITKIND_VFORKED)
444 return 1;
445
446 /* Check if we switched over from WAIT_PTID, since the event was
447 reported. */
448 if (!ptid_equal (wait_ptid, minus_one_ptid)
449 && !ptid_equal (inferior_ptid, wait_ptid))
450 {
451 /* We did. Switch back to WAIT_PTID thread, to tell the
452 target to follow it (in either direction). We'll
453 afterwards refuse to resume, and inform the user what
454 happened. */
455 switch_to_thread (wait_ptid);
456 should_resume = 0;
457 }
458 }
459
460 tp = inferior_thread ();
461
462 /* If there were any forks/vforks that were caught and are now to be
463 followed, then do so now. */
464 switch (tp->pending_follow.kind)
465 {
466 case TARGET_WAITKIND_FORKED:
467 case TARGET_WAITKIND_VFORKED:
468 {
469 ptid_t parent, child;
470
471 /* If the user did a next/step, etc, over a fork call,
472 preserve the stepping state in the fork child. */
473 if (follow_child && should_resume)
474 {
475 step_resume_breakpoint = clone_momentary_breakpoint
476 (tp->control.step_resume_breakpoint);
477 step_range_start = tp->control.step_range_start;
478 step_range_end = tp->control.step_range_end;
479 step_frame_id = tp->control.step_frame_id;
480 exception_resume_breakpoint
481 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
482
483 /* For now, delete the parent's sr breakpoint, otherwise,
484 parent/child sr breakpoints are considered duplicates,
485 and the child version will not be installed. Remove
486 this when the breakpoints module becomes aware of
487 inferiors and address spaces. */
488 delete_step_resume_breakpoint (tp);
489 tp->control.step_range_start = 0;
490 tp->control.step_range_end = 0;
491 tp->control.step_frame_id = null_frame_id;
492 delete_exception_resume_breakpoint (tp);
493 }
494
495 parent = inferior_ptid;
496 child = tp->pending_follow.value.related_pid;
497
498 /* Tell the target to do whatever is necessary to follow
499 either parent or child. */
500 if (target_follow_fork (follow_child, detach_fork))
501 {
502 /* Target refused to follow, or there's some other reason
503 we shouldn't resume. */
504 should_resume = 0;
505 }
506 else
507 {
508 /* This pending follow fork event is now handled, one way
509 or another. The previous selected thread may be gone
510 from the lists by now, but if it is still around, need
511 to clear the pending follow request. */
512 tp = find_thread_ptid (parent);
513 if (tp)
514 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
515
516 /* This makes sure we don't try to apply the "Switched
517 over from WAIT_PID" logic above. */
518 nullify_last_target_wait_ptid ();
519
520 /* If we followed the child, switch to it... */
521 if (follow_child)
522 {
523 switch_to_thread (child);
524
525 /* ... and preserve the stepping state, in case the
526 user was stepping over the fork call. */
527 if (should_resume)
528 {
529 tp = inferior_thread ();
530 tp->control.step_resume_breakpoint
531 = step_resume_breakpoint;
532 tp->control.step_range_start = step_range_start;
533 tp->control.step_range_end = step_range_end;
534 tp->control.step_frame_id = step_frame_id;
535 tp->control.exception_resume_breakpoint
536 = exception_resume_breakpoint;
537 }
538 else
539 {
540 /* If we get here, it was because we're trying to
541 resume from a fork catchpoint, but, the user
542 has switched threads away from the thread that
543 forked. In that case, the resume command
544 issued is most likely not applicable to the
545 child, so just warn, and refuse to resume. */
546 warning (_("Not resuming: switched threads "
547 "before following fork child.\n"));
548 }
549
550 /* Reset breakpoints in the child as appropriate. */
551 follow_inferior_reset_breakpoints ();
552 }
553 else
554 switch_to_thread (parent);
555 }
556 }
557 break;
558 case TARGET_WAITKIND_SPURIOUS:
559 /* Nothing to follow. */
560 break;
561 default:
562 internal_error (__FILE__, __LINE__,
563 "Unexpected pending_follow.kind %d\n",
564 tp->pending_follow.kind);
565 break;
566 }
567
568 return should_resume;
569 }
570
571 void
572 follow_inferior_reset_breakpoints (void)
573 {
574 struct thread_info *tp = inferior_thread ();
575
576 /* Was there a step_resume breakpoint? (There was if the user
577 did a "next" at the fork() call.) If so, explicitly reset its
578 thread number.
579
580 step_resumes are a form of bp that are made to be per-thread.
581 Since we created the step_resume bp when the parent process
582 was being debugged, and now are switching to the child process,
583 from the breakpoint package's viewpoint, that's a switch of
584 "threads". We must update the bp's notion of which thread
585 it is for, or it'll be ignored when it triggers. */
586
587 if (tp->control.step_resume_breakpoint)
588 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
589
590 if (tp->control.exception_resume_breakpoint)
591 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
592
593 /* Reinsert all breakpoints in the child. The user may have set
594 breakpoints after catching the fork, in which case those
595 were never set in the child, but only in the parent. This makes
596 sure the inserted breakpoints match the breakpoint list. */
597
598 breakpoint_re_set ();
599 insert_breakpoints ();
600 }
601
602 /* The child has exited or execed: resume threads of the parent the
603 user wanted to be executing. */
604
605 static int
606 proceed_after_vfork_done (struct thread_info *thread,
607 void *arg)
608 {
609 int pid = * (int *) arg;
610
611 if (ptid_get_pid (thread->ptid) == pid
612 && is_running (thread->ptid)
613 && !is_executing (thread->ptid)
614 && !thread->stop_requested
615 && thread->suspend.stop_signal == GDB_SIGNAL_0)
616 {
617 if (debug_infrun)
618 fprintf_unfiltered (gdb_stdlog,
619 "infrun: resuming vfork parent thread %s\n",
620 target_pid_to_str (thread->ptid));
621
622 switch_to_thread (thread->ptid);
623 clear_proceed_status ();
624 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
625 }
626
627 return 0;
628 }
629
630 /* Called whenever we notice an exec or exit event, to handle
631 detaching or resuming a vfork parent. */
632
633 static void
634 handle_vfork_child_exec_or_exit (int exec)
635 {
636 struct inferior *inf = current_inferior ();
637
638 if (inf->vfork_parent)
639 {
640 int resume_parent = -1;
641
642 /* This exec or exit marks the end of the shared memory region
643 between the parent and the child. If the user wanted to
644 detach from the parent, now is the time. */
645
646 if (inf->vfork_parent->pending_detach)
647 {
648 struct thread_info *tp;
649 struct cleanup *old_chain;
650 struct program_space *pspace;
651 struct address_space *aspace;
652
653 /* follow-fork child, detach-on-fork on. */
654
655 inf->vfork_parent->pending_detach = 0;
656
657 if (!exec)
658 {
659 /* If we're handling a child exit, then inferior_ptid
660 points at the inferior's pid, not to a thread. */
661 old_chain = save_inferior_ptid ();
662 save_current_program_space ();
663 save_current_inferior ();
664 }
665 else
666 old_chain = save_current_space_and_thread ();
667
668 /* We're letting loose of the parent. */
669 tp = any_live_thread_of_process (inf->vfork_parent->pid);
670 switch_to_thread (tp->ptid);
671
672 /* We're about to detach from the parent, which implicitly
673 removes breakpoints from its address space. There's a
674 catch here: we want to reuse the spaces for the child,
675 but, parent/child are still sharing the pspace at this
676 point, although the exec in reality makes the kernel give
677 the child a fresh set of new pages. The problem here is
678 that the breakpoints module being unaware of this, would
679 likely chose the child process to write to the parent
680 address space. Swapping the child temporarily away from
681 the spaces has the desired effect. Yes, this is "sort
682 of" a hack. */
683
684 pspace = inf->pspace;
685 aspace = inf->aspace;
686 inf->aspace = NULL;
687 inf->pspace = NULL;
688
689 if (debug_infrun || info_verbose)
690 {
691 target_terminal_ours ();
692
693 if (exec)
694 fprintf_filtered (gdb_stdlog,
695 "Detaching vfork parent process "
696 "%d after child exec.\n",
697 inf->vfork_parent->pid);
698 else
699 fprintf_filtered (gdb_stdlog,
700 "Detaching vfork parent process "
701 "%d after child exit.\n",
702 inf->vfork_parent->pid);
703 }
704
705 target_detach (NULL, 0);
706
707 /* Put it back. */
708 inf->pspace = pspace;
709 inf->aspace = aspace;
710
711 do_cleanups (old_chain);
712 }
713 else if (exec)
714 {
715 /* We're staying attached to the parent, so, really give the
716 child a new address space. */
717 inf->pspace = add_program_space (maybe_new_address_space ());
718 inf->aspace = inf->pspace->aspace;
719 inf->removable = 1;
720 set_current_program_space (inf->pspace);
721
722 resume_parent = inf->vfork_parent->pid;
723
724 /* Break the bonds. */
725 inf->vfork_parent->vfork_child = NULL;
726 }
727 else
728 {
729 struct cleanup *old_chain;
730 struct program_space *pspace;
731
732 /* If this is a vfork child exiting, then the pspace and
733 aspaces were shared with the parent. Since we're
734 reporting the process exit, we'll be mourning all that is
735 found in the address space, and switching to null_ptid,
736 preparing to start a new inferior. But, since we don't
737 want to clobber the parent's address/program spaces, we
738 go ahead and create a new one for this exiting
739 inferior. */
740
741 /* Switch to null_ptid, so that clone_program_space doesn't want
742 to read the selected frame of a dead process. */
743 old_chain = save_inferior_ptid ();
744 inferior_ptid = null_ptid;
745
746 /* This inferior is dead, so avoid giving the breakpoints
747 module the option to write through to it (cloning a
748 program space resets breakpoints). */
749 inf->aspace = NULL;
750 inf->pspace = NULL;
751 pspace = add_program_space (maybe_new_address_space ());
752 set_current_program_space (pspace);
753 inf->removable = 1;
754 inf->symfile_flags = SYMFILE_NO_READ;
755 clone_program_space (pspace, inf->vfork_parent->pspace);
756 inf->pspace = pspace;
757 inf->aspace = pspace->aspace;
758
759 /* Put back inferior_ptid. We'll continue mourning this
760 inferior. */
761 do_cleanups (old_chain);
762
763 resume_parent = inf->vfork_parent->pid;
764 /* Break the bonds. */
765 inf->vfork_parent->vfork_child = NULL;
766 }
767
768 inf->vfork_parent = NULL;
769
770 gdb_assert (current_program_space == inf->pspace);
771
772 if (non_stop && resume_parent != -1)
773 {
774 /* If the user wanted the parent to be running, let it go
775 free now. */
776 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
777
778 if (debug_infrun)
779 fprintf_unfiltered (gdb_stdlog,
780 "infrun: resuming vfork parent process %d\n",
781 resume_parent);
782
783 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
784
785 do_cleanups (old_chain);
786 }
787 }
788 }
789
790 /* Enum strings for "set|show follow-exec-mode". */
791
792 static const char follow_exec_mode_new[] = "new";
793 static const char follow_exec_mode_same[] = "same";
794 static const char *const follow_exec_mode_names[] =
795 {
796 follow_exec_mode_new,
797 follow_exec_mode_same,
798 NULL,
799 };
800
801 static const char *follow_exec_mode_string = follow_exec_mode_same;
802 static void
803 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
804 struct cmd_list_element *c, const char *value)
805 {
806 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
807 }
808
809 /* EXECD_PATHNAME is assumed to be non-NULL. */
810
811 static void
812 follow_exec (ptid_t pid, char *execd_pathname)
813 {
814 struct thread_info *th = inferior_thread ();
815 struct inferior *inf = current_inferior ();
816
817 /* This is an exec event that we actually wish to pay attention to.
818 Refresh our symbol table to the newly exec'd program, remove any
819 momentary bp's, etc.
820
821 If there are breakpoints, they aren't really inserted now,
822 since the exec() transformed our inferior into a fresh set
823 of instructions.
824
825 We want to preserve symbolic breakpoints on the list, since
826 we have hopes that they can be reset after the new a.out's
827 symbol table is read.
828
829 However, any "raw" breakpoints must be removed from the list
830 (e.g., the solib bp's), since their address is probably invalid
831 now.
832
833 And, we DON'T want to call delete_breakpoints() here, since
834 that may write the bp's "shadow contents" (the instruction
835 value that was overwritten witha TRAP instruction). Since
836 we now have a new a.out, those shadow contents aren't valid. */
837
838 mark_breakpoints_out ();
839
840 update_breakpoints_after_exec ();
841
842 /* If there was one, it's gone now. We cannot truly step-to-next
843 statement through an exec(). */
844 th->control.step_resume_breakpoint = NULL;
845 th->control.exception_resume_breakpoint = NULL;
846 th->control.step_range_start = 0;
847 th->control.step_range_end = 0;
848
849 /* The target reports the exec event to the main thread, even if
850 some other thread does the exec, and even if the main thread was
851 already stopped --- if debugging in non-stop mode, it's possible
852 the user had the main thread held stopped in the previous image
853 --- release it now. This is the same behavior as step-over-exec
854 with scheduler-locking on in all-stop mode. */
855 th->stop_requested = 0;
856
857 /* What is this a.out's name? */
858 printf_unfiltered (_("%s is executing new program: %s\n"),
859 target_pid_to_str (inferior_ptid),
860 execd_pathname);
861
862 /* We've followed the inferior through an exec. Therefore, the
863 inferior has essentially been killed & reborn. */
864
865 gdb_flush (gdb_stdout);
866
867 breakpoint_init_inferior (inf_execd);
868
869 if (gdb_sysroot && *gdb_sysroot)
870 {
871 char *name = alloca (strlen (gdb_sysroot)
872 + strlen (execd_pathname)
873 + 1);
874
875 strcpy (name, gdb_sysroot);
876 strcat (name, execd_pathname);
877 execd_pathname = name;
878 }
879
880 /* Reset the shared library package. This ensures that we get a
881 shlib event when the child reaches "_start", at which point the
882 dld will have had a chance to initialize the child. */
883 /* Also, loading a symbol file below may trigger symbol lookups, and
884 we don't want those to be satisfied by the libraries of the
885 previous incarnation of this process. */
886 no_shared_libraries (NULL, 0);
887
888 if (follow_exec_mode_string == follow_exec_mode_new)
889 {
890 struct program_space *pspace;
891
892 /* The user wants to keep the old inferior and program spaces
893 around. Create a new fresh one, and switch to it. */
894
895 inf = add_inferior (current_inferior ()->pid);
896 pspace = add_program_space (maybe_new_address_space ());
897 inf->pspace = pspace;
898 inf->aspace = pspace->aspace;
899
900 exit_inferior_num_silent (current_inferior ()->num);
901
902 set_current_inferior (inf);
903 set_current_program_space (pspace);
904 }
905 else
906 {
907 /* The old description may no longer be fit for the new image.
908 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
909 old description; we'll read a new one below. No need to do
910 this on "follow-exec-mode new", as the old inferior stays
911 around (its description is later cleared/refetched on
912 restart). */
913 target_clear_description ();
914 }
915
916 gdb_assert (current_program_space == inf->pspace);
917
918 /* That a.out is now the one to use. */
919 exec_file_attach (execd_pathname, 0);
920
921 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
922 (Position Independent Executable) main symbol file will get applied by
923 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
924 the breakpoints with the zero displacement. */
925
926 symbol_file_add (execd_pathname,
927 (inf->symfile_flags
928 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
929 NULL, 0);
930
931 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
932 set_initial_language ();
933
934 /* If the target can specify a description, read it. Must do this
935 after flipping to the new executable (because the target supplied
936 description must be compatible with the executable's
937 architecture, and the old executable may e.g., be 32-bit, while
938 the new one 64-bit), and before anything involving memory or
939 registers. */
940 target_find_description ();
941
942 solib_create_inferior_hook (0);
943
944 jit_inferior_created_hook ();
945
946 breakpoint_re_set ();
947
948 /* Reinsert all breakpoints. (Those which were symbolic have
949 been reset to the proper address in the new a.out, thanks
950 to symbol_file_command...). */
951 insert_breakpoints ();
952
953 /* The next resume of this inferior should bring it to the shlib
954 startup breakpoints. (If the user had also set bp's on
955 "main" from the old (parent) process, then they'll auto-
956 matically get reset there in the new process.). */
957 }
958
959 /* Non-zero if we just simulating a single-step. This is needed
960 because we cannot remove the breakpoints in the inferior process
961 until after the `wait' in `wait_for_inferior'. */
962 static int singlestep_breakpoints_inserted_p = 0;
963
964 /* The thread we inserted single-step breakpoints for. */
965 static ptid_t singlestep_ptid;
966
967 /* PC when we started this single-step. */
968 static CORE_ADDR singlestep_pc;
969
970 /* Info about an instruction that is being stepped over. Invalid if
971 ASPACE is NULL. */
972
973 struct step_over_info
974 {
975 /* The instruction's address space. */
976 struct address_space *aspace;
977
978 /* The instruction's address. */
979 CORE_ADDR address;
980 };
981
982 /* The step-over info of the location that is being stepped over.
983
984 Note that with async/breakpoint always-inserted mode, a user might
985 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
986 being stepped over. As setting a new breakpoint inserts all
987 breakpoints, we need to make sure the breakpoint being stepped over
988 isn't inserted then. We do that by only clearing the step-over
989 info when the step-over is actually finished (or aborted).
990
991 Presently GDB can only step over one breakpoint at any given time.
992 Given threads that can't run code in the same address space as the
993 breakpoint's can't really miss the breakpoint, GDB could be taught
994 to step-over at most one breakpoint per address space (so this info
995 could move to the address space object if/when GDB is extended).
996 The set of breakpoints being stepped over will normally be much
997 smaller than the set of all breakpoints, so a flag in the
998 breakpoint location structure would be wasteful. A separate list
999 also saves complexity and run-time, as otherwise we'd have to go
1000 through all breakpoint locations clearing their flag whenever we
1001 start a new sequence. Similar considerations weigh against storing
1002 this info in the thread object. Plus, not all step overs actually
1003 have breakpoint locations -- e.g., stepping past a single-step
1004 breakpoint, or stepping to complete a non-continuable
1005 watchpoint. */
1006 static struct step_over_info step_over_info;
1007
1008 /* Record the address of the breakpoint/instruction we're currently
1009 stepping over. */
1010
1011 static void
1012 set_step_over_info (struct address_space *aspace, CORE_ADDR address)
1013 {
1014 step_over_info.aspace = aspace;
1015 step_over_info.address = address;
1016 }
1017
1018 /* Called when we're not longer stepping over a breakpoint / an
1019 instruction, so all breakpoints are free to be (re)inserted. */
1020
1021 static void
1022 clear_step_over_info (void)
1023 {
1024 step_over_info.aspace = NULL;
1025 step_over_info.address = 0;
1026 }
1027
1028 /* See inferior.h. */
1029
1030 int
1031 stepping_past_instruction_at (struct address_space *aspace,
1032 CORE_ADDR address)
1033 {
1034 return (step_over_info.aspace != NULL
1035 && breakpoint_address_match (aspace, address,
1036 step_over_info.aspace,
1037 step_over_info.address));
1038 }
1039
1040 \f
1041 /* Displaced stepping. */
1042
1043 /* In non-stop debugging mode, we must take special care to manage
1044 breakpoints properly; in particular, the traditional strategy for
1045 stepping a thread past a breakpoint it has hit is unsuitable.
1046 'Displaced stepping' is a tactic for stepping one thread past a
1047 breakpoint it has hit while ensuring that other threads running
1048 concurrently will hit the breakpoint as they should.
1049
1050 The traditional way to step a thread T off a breakpoint in a
1051 multi-threaded program in all-stop mode is as follows:
1052
1053 a0) Initially, all threads are stopped, and breakpoints are not
1054 inserted.
1055 a1) We single-step T, leaving breakpoints uninserted.
1056 a2) We insert breakpoints, and resume all threads.
1057
1058 In non-stop debugging, however, this strategy is unsuitable: we
1059 don't want to have to stop all threads in the system in order to
1060 continue or step T past a breakpoint. Instead, we use displaced
1061 stepping:
1062
1063 n0) Initially, T is stopped, other threads are running, and
1064 breakpoints are inserted.
1065 n1) We copy the instruction "under" the breakpoint to a separate
1066 location, outside the main code stream, making any adjustments
1067 to the instruction, register, and memory state as directed by
1068 T's architecture.
1069 n2) We single-step T over the instruction at its new location.
1070 n3) We adjust the resulting register and memory state as directed
1071 by T's architecture. This includes resetting T's PC to point
1072 back into the main instruction stream.
1073 n4) We resume T.
1074
1075 This approach depends on the following gdbarch methods:
1076
1077 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1078 indicate where to copy the instruction, and how much space must
1079 be reserved there. We use these in step n1.
1080
1081 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1082 address, and makes any necessary adjustments to the instruction,
1083 register contents, and memory. We use this in step n1.
1084
1085 - gdbarch_displaced_step_fixup adjusts registers and memory after
1086 we have successfuly single-stepped the instruction, to yield the
1087 same effect the instruction would have had if we had executed it
1088 at its original address. We use this in step n3.
1089
1090 - gdbarch_displaced_step_free_closure provides cleanup.
1091
1092 The gdbarch_displaced_step_copy_insn and
1093 gdbarch_displaced_step_fixup functions must be written so that
1094 copying an instruction with gdbarch_displaced_step_copy_insn,
1095 single-stepping across the copied instruction, and then applying
1096 gdbarch_displaced_insn_fixup should have the same effects on the
1097 thread's memory and registers as stepping the instruction in place
1098 would have. Exactly which responsibilities fall to the copy and
1099 which fall to the fixup is up to the author of those functions.
1100
1101 See the comments in gdbarch.sh for details.
1102
1103 Note that displaced stepping and software single-step cannot
1104 currently be used in combination, although with some care I think
1105 they could be made to. Software single-step works by placing
1106 breakpoints on all possible subsequent instructions; if the
1107 displaced instruction is a PC-relative jump, those breakpoints
1108 could fall in very strange places --- on pages that aren't
1109 executable, or at addresses that are not proper instruction
1110 boundaries. (We do generally let other threads run while we wait
1111 to hit the software single-step breakpoint, and they might
1112 encounter such a corrupted instruction.) One way to work around
1113 this would be to have gdbarch_displaced_step_copy_insn fully
1114 simulate the effect of PC-relative instructions (and return NULL)
1115 on architectures that use software single-stepping.
1116
1117 In non-stop mode, we can have independent and simultaneous step
1118 requests, so more than one thread may need to simultaneously step
1119 over a breakpoint. The current implementation assumes there is
1120 only one scratch space per process. In this case, we have to
1121 serialize access to the scratch space. If thread A wants to step
1122 over a breakpoint, but we are currently waiting for some other
1123 thread to complete a displaced step, we leave thread A stopped and
1124 place it in the displaced_step_request_queue. Whenever a displaced
1125 step finishes, we pick the next thread in the queue and start a new
1126 displaced step operation on it. See displaced_step_prepare and
1127 displaced_step_fixup for details. */
1128
1129 struct displaced_step_request
1130 {
1131 ptid_t ptid;
1132 struct displaced_step_request *next;
1133 };
1134
1135 /* Per-inferior displaced stepping state. */
1136 struct displaced_step_inferior_state
1137 {
1138 /* Pointer to next in linked list. */
1139 struct displaced_step_inferior_state *next;
1140
1141 /* The process this displaced step state refers to. */
1142 int pid;
1143
1144 /* A queue of pending displaced stepping requests. One entry per
1145 thread that needs to do a displaced step. */
1146 struct displaced_step_request *step_request_queue;
1147
1148 /* If this is not null_ptid, this is the thread carrying out a
1149 displaced single-step in process PID. This thread's state will
1150 require fixing up once it has completed its step. */
1151 ptid_t step_ptid;
1152
1153 /* The architecture the thread had when we stepped it. */
1154 struct gdbarch *step_gdbarch;
1155
1156 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1157 for post-step cleanup. */
1158 struct displaced_step_closure *step_closure;
1159
1160 /* The address of the original instruction, and the copy we
1161 made. */
1162 CORE_ADDR step_original, step_copy;
1163
1164 /* Saved contents of copy area. */
1165 gdb_byte *step_saved_copy;
1166 };
1167
1168 /* The list of states of processes involved in displaced stepping
1169 presently. */
1170 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1171
1172 /* Get the displaced stepping state of process PID. */
1173
1174 static struct displaced_step_inferior_state *
1175 get_displaced_stepping_state (int pid)
1176 {
1177 struct displaced_step_inferior_state *state;
1178
1179 for (state = displaced_step_inferior_states;
1180 state != NULL;
1181 state = state->next)
1182 if (state->pid == pid)
1183 return state;
1184
1185 return NULL;
1186 }
1187
1188 /* Add a new displaced stepping state for process PID to the displaced
1189 stepping state list, or return a pointer to an already existing
1190 entry, if it already exists. Never returns NULL. */
1191
1192 static struct displaced_step_inferior_state *
1193 add_displaced_stepping_state (int pid)
1194 {
1195 struct displaced_step_inferior_state *state;
1196
1197 for (state = displaced_step_inferior_states;
1198 state != NULL;
1199 state = state->next)
1200 if (state->pid == pid)
1201 return state;
1202
1203 state = xcalloc (1, sizeof (*state));
1204 state->pid = pid;
1205 state->next = displaced_step_inferior_states;
1206 displaced_step_inferior_states = state;
1207
1208 return state;
1209 }
1210
1211 /* If inferior is in displaced stepping, and ADDR equals to starting address
1212 of copy area, return corresponding displaced_step_closure. Otherwise,
1213 return NULL. */
1214
1215 struct displaced_step_closure*
1216 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1217 {
1218 struct displaced_step_inferior_state *displaced
1219 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1220
1221 /* If checking the mode of displaced instruction in copy area. */
1222 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1223 && (displaced->step_copy == addr))
1224 return displaced->step_closure;
1225
1226 return NULL;
1227 }
1228
1229 /* Remove the displaced stepping state of process PID. */
1230
1231 static void
1232 remove_displaced_stepping_state (int pid)
1233 {
1234 struct displaced_step_inferior_state *it, **prev_next_p;
1235
1236 gdb_assert (pid != 0);
1237
1238 it = displaced_step_inferior_states;
1239 prev_next_p = &displaced_step_inferior_states;
1240 while (it)
1241 {
1242 if (it->pid == pid)
1243 {
1244 *prev_next_p = it->next;
1245 xfree (it);
1246 return;
1247 }
1248
1249 prev_next_p = &it->next;
1250 it = *prev_next_p;
1251 }
1252 }
1253
1254 static void
1255 infrun_inferior_exit (struct inferior *inf)
1256 {
1257 remove_displaced_stepping_state (inf->pid);
1258 }
1259
1260 /* If ON, and the architecture supports it, GDB will use displaced
1261 stepping to step over breakpoints. If OFF, or if the architecture
1262 doesn't support it, GDB will instead use the traditional
1263 hold-and-step approach. If AUTO (which is the default), GDB will
1264 decide which technique to use to step over breakpoints depending on
1265 which of all-stop or non-stop mode is active --- displaced stepping
1266 in non-stop mode; hold-and-step in all-stop mode. */
1267
1268 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1269
1270 static void
1271 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1272 struct cmd_list_element *c,
1273 const char *value)
1274 {
1275 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1276 fprintf_filtered (file,
1277 _("Debugger's willingness to use displaced stepping "
1278 "to step over breakpoints is %s (currently %s).\n"),
1279 value, non_stop ? "on" : "off");
1280 else
1281 fprintf_filtered (file,
1282 _("Debugger's willingness to use displaced stepping "
1283 "to step over breakpoints is %s.\n"), value);
1284 }
1285
1286 /* Return non-zero if displaced stepping can/should be used to step
1287 over breakpoints. */
1288
1289 static int
1290 use_displaced_stepping (struct gdbarch *gdbarch)
1291 {
1292 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1293 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1294 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1295 && find_record_target () == NULL);
1296 }
1297
1298 /* Clean out any stray displaced stepping state. */
1299 static void
1300 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1301 {
1302 /* Indicate that there is no cleanup pending. */
1303 displaced->step_ptid = null_ptid;
1304
1305 if (displaced->step_closure)
1306 {
1307 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1308 displaced->step_closure);
1309 displaced->step_closure = NULL;
1310 }
1311 }
1312
1313 static void
1314 displaced_step_clear_cleanup (void *arg)
1315 {
1316 struct displaced_step_inferior_state *state = arg;
1317
1318 displaced_step_clear (state);
1319 }
1320
1321 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1322 void
1323 displaced_step_dump_bytes (struct ui_file *file,
1324 const gdb_byte *buf,
1325 size_t len)
1326 {
1327 int i;
1328
1329 for (i = 0; i < len; i++)
1330 fprintf_unfiltered (file, "%02x ", buf[i]);
1331 fputs_unfiltered ("\n", file);
1332 }
1333
1334 /* Prepare to single-step, using displaced stepping.
1335
1336 Note that we cannot use displaced stepping when we have a signal to
1337 deliver. If we have a signal to deliver and an instruction to step
1338 over, then after the step, there will be no indication from the
1339 target whether the thread entered a signal handler or ignored the
1340 signal and stepped over the instruction successfully --- both cases
1341 result in a simple SIGTRAP. In the first case we mustn't do a
1342 fixup, and in the second case we must --- but we can't tell which.
1343 Comments in the code for 'random signals' in handle_inferior_event
1344 explain how we handle this case instead.
1345
1346 Returns 1 if preparing was successful -- this thread is going to be
1347 stepped now; or 0 if displaced stepping this thread got queued. */
1348 static int
1349 displaced_step_prepare (ptid_t ptid)
1350 {
1351 struct cleanup *old_cleanups, *ignore_cleanups;
1352 struct thread_info *tp = find_thread_ptid (ptid);
1353 struct regcache *regcache = get_thread_regcache (ptid);
1354 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1355 CORE_ADDR original, copy;
1356 ULONGEST len;
1357 struct displaced_step_closure *closure;
1358 struct displaced_step_inferior_state *displaced;
1359 int status;
1360
1361 /* We should never reach this function if the architecture does not
1362 support displaced stepping. */
1363 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1364
1365 /* Disable range stepping while executing in the scratch pad. We
1366 want a single-step even if executing the displaced instruction in
1367 the scratch buffer lands within the stepping range (e.g., a
1368 jump/branch). */
1369 tp->control.may_range_step = 0;
1370
1371 /* We have to displaced step one thread at a time, as we only have
1372 access to a single scratch space per inferior. */
1373
1374 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1375
1376 if (!ptid_equal (displaced->step_ptid, null_ptid))
1377 {
1378 /* Already waiting for a displaced step to finish. Defer this
1379 request and place in queue. */
1380 struct displaced_step_request *req, *new_req;
1381
1382 if (debug_displaced)
1383 fprintf_unfiltered (gdb_stdlog,
1384 "displaced: defering step of %s\n",
1385 target_pid_to_str (ptid));
1386
1387 new_req = xmalloc (sizeof (*new_req));
1388 new_req->ptid = ptid;
1389 new_req->next = NULL;
1390
1391 if (displaced->step_request_queue)
1392 {
1393 for (req = displaced->step_request_queue;
1394 req && req->next;
1395 req = req->next)
1396 ;
1397 req->next = new_req;
1398 }
1399 else
1400 displaced->step_request_queue = new_req;
1401
1402 return 0;
1403 }
1404 else
1405 {
1406 if (debug_displaced)
1407 fprintf_unfiltered (gdb_stdlog,
1408 "displaced: stepping %s now\n",
1409 target_pid_to_str (ptid));
1410 }
1411
1412 displaced_step_clear (displaced);
1413
1414 old_cleanups = save_inferior_ptid ();
1415 inferior_ptid = ptid;
1416
1417 original = regcache_read_pc (regcache);
1418
1419 copy = gdbarch_displaced_step_location (gdbarch);
1420 len = gdbarch_max_insn_length (gdbarch);
1421
1422 /* Save the original contents of the copy area. */
1423 displaced->step_saved_copy = xmalloc (len);
1424 ignore_cleanups = make_cleanup (free_current_contents,
1425 &displaced->step_saved_copy);
1426 status = target_read_memory (copy, displaced->step_saved_copy, len);
1427 if (status != 0)
1428 throw_error (MEMORY_ERROR,
1429 _("Error accessing memory address %s (%s) for "
1430 "displaced-stepping scratch space."),
1431 paddress (gdbarch, copy), safe_strerror (status));
1432 if (debug_displaced)
1433 {
1434 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1435 paddress (gdbarch, copy));
1436 displaced_step_dump_bytes (gdb_stdlog,
1437 displaced->step_saved_copy,
1438 len);
1439 };
1440
1441 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1442 original, copy, regcache);
1443
1444 /* We don't support the fully-simulated case at present. */
1445 gdb_assert (closure);
1446
1447 /* Save the information we need to fix things up if the step
1448 succeeds. */
1449 displaced->step_ptid = ptid;
1450 displaced->step_gdbarch = gdbarch;
1451 displaced->step_closure = closure;
1452 displaced->step_original = original;
1453 displaced->step_copy = copy;
1454
1455 make_cleanup (displaced_step_clear_cleanup, displaced);
1456
1457 /* Resume execution at the copy. */
1458 regcache_write_pc (regcache, copy);
1459
1460 discard_cleanups (ignore_cleanups);
1461
1462 do_cleanups (old_cleanups);
1463
1464 if (debug_displaced)
1465 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1466 paddress (gdbarch, copy));
1467
1468 return 1;
1469 }
1470
1471 static void
1472 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1473 const gdb_byte *myaddr, int len)
1474 {
1475 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1476
1477 inferior_ptid = ptid;
1478 write_memory (memaddr, myaddr, len);
1479 do_cleanups (ptid_cleanup);
1480 }
1481
1482 /* Restore the contents of the copy area for thread PTID. */
1483
1484 static void
1485 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1486 ptid_t ptid)
1487 {
1488 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1489
1490 write_memory_ptid (ptid, displaced->step_copy,
1491 displaced->step_saved_copy, len);
1492 if (debug_displaced)
1493 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1494 target_pid_to_str (ptid),
1495 paddress (displaced->step_gdbarch,
1496 displaced->step_copy));
1497 }
1498
1499 static void
1500 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1501 {
1502 struct cleanup *old_cleanups;
1503 struct displaced_step_inferior_state *displaced
1504 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1505
1506 /* Was any thread of this process doing a displaced step? */
1507 if (displaced == NULL)
1508 return;
1509
1510 /* Was this event for the pid we displaced? */
1511 if (ptid_equal (displaced->step_ptid, null_ptid)
1512 || ! ptid_equal (displaced->step_ptid, event_ptid))
1513 return;
1514
1515 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1516
1517 displaced_step_restore (displaced, displaced->step_ptid);
1518
1519 /* Did the instruction complete successfully? */
1520 if (signal == GDB_SIGNAL_TRAP)
1521 {
1522 /* Fix up the resulting state. */
1523 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1524 displaced->step_closure,
1525 displaced->step_original,
1526 displaced->step_copy,
1527 get_thread_regcache (displaced->step_ptid));
1528 }
1529 else
1530 {
1531 /* Since the instruction didn't complete, all we can do is
1532 relocate the PC. */
1533 struct regcache *regcache = get_thread_regcache (event_ptid);
1534 CORE_ADDR pc = regcache_read_pc (regcache);
1535
1536 pc = displaced->step_original + (pc - displaced->step_copy);
1537 regcache_write_pc (regcache, pc);
1538 }
1539
1540 do_cleanups (old_cleanups);
1541
1542 displaced->step_ptid = null_ptid;
1543
1544 /* Are there any pending displaced stepping requests? If so, run
1545 one now. Leave the state object around, since we're likely to
1546 need it again soon. */
1547 while (displaced->step_request_queue)
1548 {
1549 struct displaced_step_request *head;
1550 ptid_t ptid;
1551 struct regcache *regcache;
1552 struct gdbarch *gdbarch;
1553 CORE_ADDR actual_pc;
1554 struct address_space *aspace;
1555
1556 head = displaced->step_request_queue;
1557 ptid = head->ptid;
1558 displaced->step_request_queue = head->next;
1559 xfree (head);
1560
1561 context_switch (ptid);
1562
1563 regcache = get_thread_regcache (ptid);
1564 actual_pc = regcache_read_pc (regcache);
1565 aspace = get_regcache_aspace (regcache);
1566
1567 if (breakpoint_here_p (aspace, actual_pc))
1568 {
1569 if (debug_displaced)
1570 fprintf_unfiltered (gdb_stdlog,
1571 "displaced: stepping queued %s now\n",
1572 target_pid_to_str (ptid));
1573
1574 displaced_step_prepare (ptid);
1575
1576 gdbarch = get_regcache_arch (regcache);
1577
1578 if (debug_displaced)
1579 {
1580 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1581 gdb_byte buf[4];
1582
1583 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1584 paddress (gdbarch, actual_pc));
1585 read_memory (actual_pc, buf, sizeof (buf));
1586 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1587 }
1588
1589 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1590 displaced->step_closure))
1591 target_resume (ptid, 1, GDB_SIGNAL_0);
1592 else
1593 target_resume (ptid, 0, GDB_SIGNAL_0);
1594
1595 /* Done, we're stepping a thread. */
1596 break;
1597 }
1598 else
1599 {
1600 int step;
1601 struct thread_info *tp = inferior_thread ();
1602
1603 /* The breakpoint we were sitting under has since been
1604 removed. */
1605 tp->control.trap_expected = 0;
1606
1607 /* Go back to what we were trying to do. */
1608 step = currently_stepping (tp);
1609
1610 if (debug_displaced)
1611 fprintf_unfiltered (gdb_stdlog,
1612 "displaced: breakpoint is gone: %s, step(%d)\n",
1613 target_pid_to_str (tp->ptid), step);
1614
1615 target_resume (ptid, step, GDB_SIGNAL_0);
1616 tp->suspend.stop_signal = GDB_SIGNAL_0;
1617
1618 /* This request was discarded. See if there's any other
1619 thread waiting for its turn. */
1620 }
1621 }
1622 }
1623
1624 /* Update global variables holding ptids to hold NEW_PTID if they were
1625 holding OLD_PTID. */
1626 static void
1627 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1628 {
1629 struct displaced_step_request *it;
1630 struct displaced_step_inferior_state *displaced;
1631
1632 if (ptid_equal (inferior_ptid, old_ptid))
1633 inferior_ptid = new_ptid;
1634
1635 if (ptid_equal (singlestep_ptid, old_ptid))
1636 singlestep_ptid = new_ptid;
1637
1638 for (displaced = displaced_step_inferior_states;
1639 displaced;
1640 displaced = displaced->next)
1641 {
1642 if (ptid_equal (displaced->step_ptid, old_ptid))
1643 displaced->step_ptid = new_ptid;
1644
1645 for (it = displaced->step_request_queue; it; it = it->next)
1646 if (ptid_equal (it->ptid, old_ptid))
1647 it->ptid = new_ptid;
1648 }
1649 }
1650
1651 \f
1652 /* Resuming. */
1653
1654 /* Things to clean up if we QUIT out of resume (). */
1655 static void
1656 resume_cleanups (void *ignore)
1657 {
1658 normal_stop ();
1659 }
1660
1661 static const char schedlock_off[] = "off";
1662 static const char schedlock_on[] = "on";
1663 static const char schedlock_step[] = "step";
1664 static const char *const scheduler_enums[] = {
1665 schedlock_off,
1666 schedlock_on,
1667 schedlock_step,
1668 NULL
1669 };
1670 static const char *scheduler_mode = schedlock_off;
1671 static void
1672 show_scheduler_mode (struct ui_file *file, int from_tty,
1673 struct cmd_list_element *c, const char *value)
1674 {
1675 fprintf_filtered (file,
1676 _("Mode for locking scheduler "
1677 "during execution is \"%s\".\n"),
1678 value);
1679 }
1680
1681 static void
1682 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1683 {
1684 if (!target_can_lock_scheduler)
1685 {
1686 scheduler_mode = schedlock_off;
1687 error (_("Target '%s' cannot support this command."), target_shortname);
1688 }
1689 }
1690
1691 /* True if execution commands resume all threads of all processes by
1692 default; otherwise, resume only threads of the current inferior
1693 process. */
1694 int sched_multi = 0;
1695
1696 /* Try to setup for software single stepping over the specified location.
1697 Return 1 if target_resume() should use hardware single step.
1698
1699 GDBARCH the current gdbarch.
1700 PC the location to step over. */
1701
1702 static int
1703 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1704 {
1705 int hw_step = 1;
1706
1707 if (execution_direction == EXEC_FORWARD
1708 && gdbarch_software_single_step_p (gdbarch)
1709 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1710 {
1711 hw_step = 0;
1712 /* Do not pull these breakpoints until after a `wait' in
1713 `wait_for_inferior'. */
1714 singlestep_breakpoints_inserted_p = 1;
1715 singlestep_ptid = inferior_ptid;
1716 singlestep_pc = pc;
1717 }
1718 return hw_step;
1719 }
1720
1721 /* Return a ptid representing the set of threads that we will proceed,
1722 in the perspective of the user/frontend. We may actually resume
1723 fewer threads at first, e.g., if a thread is stopped at a
1724 breakpoint that needs stepping-off, but that should not be visible
1725 to the user/frontend, and neither should the frontend/user be
1726 allowed to proceed any of the threads that happen to be stopped for
1727 internal run control handling, if a previous command wanted them
1728 resumed. */
1729
1730 ptid_t
1731 user_visible_resume_ptid (int step)
1732 {
1733 /* By default, resume all threads of all processes. */
1734 ptid_t resume_ptid = RESUME_ALL;
1735
1736 /* Maybe resume only all threads of the current process. */
1737 if (!sched_multi && target_supports_multi_process ())
1738 {
1739 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1740 }
1741
1742 /* Maybe resume a single thread after all. */
1743 if (non_stop)
1744 {
1745 /* With non-stop mode on, threads are always handled
1746 individually. */
1747 resume_ptid = inferior_ptid;
1748 }
1749 else if ((scheduler_mode == schedlock_on)
1750 || (scheduler_mode == schedlock_step
1751 && (step || singlestep_breakpoints_inserted_p)))
1752 {
1753 /* User-settable 'scheduler' mode requires solo thread resume. */
1754 resume_ptid = inferior_ptid;
1755 }
1756
1757 return resume_ptid;
1758 }
1759
1760 /* Resume the inferior, but allow a QUIT. This is useful if the user
1761 wants to interrupt some lengthy single-stepping operation
1762 (for child processes, the SIGINT goes to the inferior, and so
1763 we get a SIGINT random_signal, but for remote debugging and perhaps
1764 other targets, that's not true).
1765
1766 STEP nonzero if we should step (zero to continue instead).
1767 SIG is the signal to give the inferior (zero for none). */
1768 void
1769 resume (int step, enum gdb_signal sig)
1770 {
1771 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1772 struct regcache *regcache = get_current_regcache ();
1773 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1774 struct thread_info *tp = inferior_thread ();
1775 CORE_ADDR pc = regcache_read_pc (regcache);
1776 struct address_space *aspace = get_regcache_aspace (regcache);
1777 ptid_t resume_ptid;
1778
1779 QUIT;
1780
1781 if (current_inferior ()->waiting_for_vfork_done)
1782 {
1783 /* Don't try to single-step a vfork parent that is waiting for
1784 the child to get out of the shared memory region (by exec'ing
1785 or exiting). This is particularly important on software
1786 single-step archs, as the child process would trip on the
1787 software single step breakpoint inserted for the parent
1788 process. Since the parent will not actually execute any
1789 instruction until the child is out of the shared region (such
1790 are vfork's semantics), it is safe to simply continue it.
1791 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1792 the parent, and tell it to `keep_going', which automatically
1793 re-sets it stepping. */
1794 if (debug_infrun)
1795 fprintf_unfiltered (gdb_stdlog,
1796 "infrun: resume : clear step\n");
1797 step = 0;
1798 }
1799
1800 if (debug_infrun)
1801 fprintf_unfiltered (gdb_stdlog,
1802 "infrun: resume (step=%d, signal=%s), "
1803 "trap_expected=%d, current thread [%s] at %s\n",
1804 step, gdb_signal_to_symbol_string (sig),
1805 tp->control.trap_expected,
1806 target_pid_to_str (inferior_ptid),
1807 paddress (gdbarch, pc));
1808
1809 /* Normally, by the time we reach `resume', the breakpoints are either
1810 removed or inserted, as appropriate. The exception is if we're sitting
1811 at a permanent breakpoint; we need to step over it, but permanent
1812 breakpoints can't be removed. So we have to test for it here. */
1813 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1814 {
1815 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1816 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1817 else
1818 error (_("\
1819 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1820 how to step past a permanent breakpoint on this architecture. Try using\n\
1821 a command like `return' or `jump' to continue execution."));
1822 }
1823
1824 /* If we have a breakpoint to step over, make sure to do a single
1825 step only. Same if we have software watchpoints. */
1826 if (tp->control.trap_expected || bpstat_should_step ())
1827 tp->control.may_range_step = 0;
1828
1829 /* If enabled, step over breakpoints by executing a copy of the
1830 instruction at a different address.
1831
1832 We can't use displaced stepping when we have a signal to deliver;
1833 the comments for displaced_step_prepare explain why. The
1834 comments in the handle_inferior event for dealing with 'random
1835 signals' explain what we do instead.
1836
1837 We can't use displaced stepping when we are waiting for vfork_done
1838 event, displaced stepping breaks the vfork child similarly as single
1839 step software breakpoint. */
1840 if (use_displaced_stepping (gdbarch)
1841 && (tp->control.trap_expected
1842 || (step && gdbarch_software_single_step_p (gdbarch)))
1843 && sig == GDB_SIGNAL_0
1844 && !current_inferior ()->waiting_for_vfork_done)
1845 {
1846 struct displaced_step_inferior_state *displaced;
1847
1848 if (!displaced_step_prepare (inferior_ptid))
1849 {
1850 /* Got placed in displaced stepping queue. Will be resumed
1851 later when all the currently queued displaced stepping
1852 requests finish. The thread is not executing at this point,
1853 and the call to set_executing will be made later. But we
1854 need to call set_running here, since from frontend point of view,
1855 the thread is running. */
1856 set_running (inferior_ptid, 1);
1857 discard_cleanups (old_cleanups);
1858 return;
1859 }
1860
1861 /* Update pc to reflect the new address from which we will execute
1862 instructions due to displaced stepping. */
1863 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
1864
1865 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1866 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1867 displaced->step_closure);
1868 }
1869
1870 /* Do we need to do it the hard way, w/temp breakpoints? */
1871 else if (step)
1872 step = maybe_software_singlestep (gdbarch, pc);
1873
1874 /* Currently, our software single-step implementation leads to different
1875 results than hardware single-stepping in one situation: when stepping
1876 into delivering a signal which has an associated signal handler,
1877 hardware single-step will stop at the first instruction of the handler,
1878 while software single-step will simply skip execution of the handler.
1879
1880 For now, this difference in behavior is accepted since there is no
1881 easy way to actually implement single-stepping into a signal handler
1882 without kernel support.
1883
1884 However, there is one scenario where this difference leads to follow-on
1885 problems: if we're stepping off a breakpoint by removing all breakpoints
1886 and then single-stepping. In this case, the software single-step
1887 behavior means that even if there is a *breakpoint* in the signal
1888 handler, GDB still would not stop.
1889
1890 Fortunately, we can at least fix this particular issue. We detect
1891 here the case where we are about to deliver a signal while software
1892 single-stepping with breakpoints removed. In this situation, we
1893 revert the decisions to remove all breakpoints and insert single-
1894 step breakpoints, and instead we install a step-resume breakpoint
1895 at the current address, deliver the signal without stepping, and
1896 once we arrive back at the step-resume breakpoint, actually step
1897 over the breakpoint we originally wanted to step over. */
1898 if (singlestep_breakpoints_inserted_p
1899 && tp->control.trap_expected && sig != GDB_SIGNAL_0)
1900 {
1901 /* If we have nested signals or a pending signal is delivered
1902 immediately after a handler returns, might might already have
1903 a step-resume breakpoint set on the earlier handler. We cannot
1904 set another step-resume breakpoint; just continue on until the
1905 original breakpoint is hit. */
1906 if (tp->control.step_resume_breakpoint == NULL)
1907 {
1908 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1909 tp->step_after_step_resume_breakpoint = 1;
1910 }
1911
1912 remove_single_step_breakpoints ();
1913 singlestep_breakpoints_inserted_p = 0;
1914
1915 clear_step_over_info ();
1916 tp->control.trap_expected = 0;
1917
1918 insert_breakpoints ();
1919 }
1920
1921 /* If STEP is set, it's a request to use hardware stepping
1922 facilities. But in that case, we should never
1923 use singlestep breakpoint. */
1924 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1925
1926 /* Decide the set of threads to ask the target to resume. Start
1927 by assuming everything will be resumed, than narrow the set
1928 by applying increasingly restricting conditions. */
1929 resume_ptid = user_visible_resume_ptid (step);
1930
1931 /* Maybe resume a single thread after all. */
1932 if ((step || singlestep_breakpoints_inserted_p)
1933 && tp->control.trap_expected)
1934 {
1935 /* We're allowing a thread to run past a breakpoint it has
1936 hit, by single-stepping the thread with the breakpoint
1937 removed. In which case, we need to single-step only this
1938 thread, and keep others stopped, as they can miss this
1939 breakpoint if allowed to run. */
1940 resume_ptid = inferior_ptid;
1941 }
1942
1943 if (gdbarch_cannot_step_breakpoint (gdbarch))
1944 {
1945 /* Most targets can step a breakpoint instruction, thus
1946 executing it normally. But if this one cannot, just
1947 continue and we will hit it anyway. */
1948 if (step && breakpoint_inserted_here_p (aspace, pc))
1949 step = 0;
1950 }
1951
1952 if (debug_displaced
1953 && use_displaced_stepping (gdbarch)
1954 && tp->control.trap_expected)
1955 {
1956 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1957 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1958 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1959 gdb_byte buf[4];
1960
1961 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1962 paddress (resume_gdbarch, actual_pc));
1963 read_memory (actual_pc, buf, sizeof (buf));
1964 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1965 }
1966
1967 if (tp->control.may_range_step)
1968 {
1969 /* If we're resuming a thread with the PC out of the step
1970 range, then we're doing some nested/finer run control
1971 operation, like stepping the thread out of the dynamic
1972 linker or the displaced stepping scratch pad. We
1973 shouldn't have allowed a range step then. */
1974 gdb_assert (pc_in_thread_step_range (pc, tp));
1975 }
1976
1977 /* Install inferior's terminal modes. */
1978 target_terminal_inferior ();
1979
1980 /* Avoid confusing the next resume, if the next stop/resume
1981 happens to apply to another thread. */
1982 tp->suspend.stop_signal = GDB_SIGNAL_0;
1983
1984 /* Advise target which signals may be handled silently. If we have
1985 removed breakpoints because we are stepping over one (which can
1986 happen only if we are not using displaced stepping), we need to
1987 receive all signals to avoid accidentally skipping a breakpoint
1988 during execution of a signal handler. */
1989 if ((step || singlestep_breakpoints_inserted_p)
1990 && tp->control.trap_expected
1991 && !use_displaced_stepping (gdbarch))
1992 target_pass_signals (0, NULL);
1993 else
1994 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
1995
1996 target_resume (resume_ptid, step, sig);
1997
1998 discard_cleanups (old_cleanups);
1999 }
2000 \f
2001 /* Proceeding. */
2002
2003 /* Clear out all variables saying what to do when inferior is continued.
2004 First do this, then set the ones you want, then call `proceed'. */
2005
2006 static void
2007 clear_proceed_status_thread (struct thread_info *tp)
2008 {
2009 if (debug_infrun)
2010 fprintf_unfiltered (gdb_stdlog,
2011 "infrun: clear_proceed_status_thread (%s)\n",
2012 target_pid_to_str (tp->ptid));
2013
2014 tp->control.trap_expected = 0;
2015 tp->control.step_range_start = 0;
2016 tp->control.step_range_end = 0;
2017 tp->control.may_range_step = 0;
2018 tp->control.step_frame_id = null_frame_id;
2019 tp->control.step_stack_frame_id = null_frame_id;
2020 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2021 tp->stop_requested = 0;
2022
2023 tp->control.stop_step = 0;
2024
2025 tp->control.proceed_to_finish = 0;
2026
2027 /* Discard any remaining commands or status from previous stop. */
2028 bpstat_clear (&tp->control.stop_bpstat);
2029 }
2030
2031 static int
2032 clear_proceed_status_callback (struct thread_info *tp, void *data)
2033 {
2034 if (is_exited (tp->ptid))
2035 return 0;
2036
2037 clear_proceed_status_thread (tp);
2038 return 0;
2039 }
2040
2041 void
2042 clear_proceed_status (void)
2043 {
2044 if (!non_stop)
2045 {
2046 /* In all-stop mode, delete the per-thread status of all
2047 threads, even if inferior_ptid is null_ptid, there may be
2048 threads on the list. E.g., we may be launching a new
2049 process, while selecting the executable. */
2050 iterate_over_threads (clear_proceed_status_callback, NULL);
2051 }
2052
2053 if (!ptid_equal (inferior_ptid, null_ptid))
2054 {
2055 struct inferior *inferior;
2056
2057 if (non_stop)
2058 {
2059 /* If in non-stop mode, only delete the per-thread status of
2060 the current thread. */
2061 clear_proceed_status_thread (inferior_thread ());
2062 }
2063
2064 inferior = current_inferior ();
2065 inferior->control.stop_soon = NO_STOP_QUIETLY;
2066 }
2067
2068 stop_after_trap = 0;
2069
2070 clear_step_over_info ();
2071
2072 observer_notify_about_to_proceed ();
2073
2074 if (stop_registers)
2075 {
2076 regcache_xfree (stop_registers);
2077 stop_registers = NULL;
2078 }
2079 }
2080
2081 /* Returns true if TP is still stopped at a breakpoint that needs
2082 stepping-over in order to make progress. If the breakpoint is gone
2083 meanwhile, we can skip the whole step-over dance. */
2084
2085 static int
2086 thread_still_needs_step_over (struct thread_info *tp)
2087 {
2088 if (tp->stepping_over_breakpoint)
2089 {
2090 struct regcache *regcache = get_thread_regcache (tp->ptid);
2091
2092 if (breakpoint_here_p (get_regcache_aspace (regcache),
2093 regcache_read_pc (regcache)))
2094 return 1;
2095
2096 tp->stepping_over_breakpoint = 0;
2097 }
2098
2099 return 0;
2100 }
2101
2102 /* Returns true if scheduler locking applies. STEP indicates whether
2103 we're about to do a step/next-like command to a thread. */
2104
2105 static int
2106 schedlock_applies (int step)
2107 {
2108 return (scheduler_mode == schedlock_on
2109 || (scheduler_mode == schedlock_step
2110 && step));
2111 }
2112
2113 /* Look a thread other than EXCEPT that has previously reported a
2114 breakpoint event, and thus needs a step-over in order to make
2115 progress. Returns NULL is none is found. STEP indicates whether
2116 we're about to step the current thread, in order to decide whether
2117 "set scheduler-locking step" applies. */
2118
2119 static struct thread_info *
2120 find_thread_needs_step_over (int step, struct thread_info *except)
2121 {
2122 struct thread_info *tp, *current;
2123
2124 /* With non-stop mode on, threads are always handled individually. */
2125 gdb_assert (! non_stop);
2126
2127 current = inferior_thread ();
2128
2129 /* If scheduler locking applies, we can avoid iterating over all
2130 threads. */
2131 if (schedlock_applies (step))
2132 {
2133 if (except != current
2134 && thread_still_needs_step_over (current))
2135 return current;
2136
2137 return NULL;
2138 }
2139
2140 ALL_THREADS (tp)
2141 {
2142 /* Ignore the EXCEPT thread. */
2143 if (tp == except)
2144 continue;
2145 /* Ignore threads of processes we're not resuming. */
2146 if (!sched_multi
2147 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2148 continue;
2149
2150 if (thread_still_needs_step_over (tp))
2151 return tp;
2152 }
2153
2154 return NULL;
2155 }
2156
2157 /* Basic routine for continuing the program in various fashions.
2158
2159 ADDR is the address to resume at, or -1 for resume where stopped.
2160 SIGGNAL is the signal to give it, or 0 for none,
2161 or -1 for act according to how it stopped.
2162 STEP is nonzero if should trap after one instruction.
2163 -1 means return after that and print nothing.
2164 You should probably set various step_... variables
2165 before calling here, if you are stepping.
2166
2167 You should call clear_proceed_status before calling proceed. */
2168
2169 void
2170 proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
2171 {
2172 struct regcache *regcache;
2173 struct gdbarch *gdbarch;
2174 struct thread_info *tp;
2175 CORE_ADDR pc;
2176 struct address_space *aspace;
2177
2178 /* If we're stopped at a fork/vfork, follow the branch set by the
2179 "set follow-fork-mode" command; otherwise, we'll just proceed
2180 resuming the current thread. */
2181 if (!follow_fork ())
2182 {
2183 /* The target for some reason decided not to resume. */
2184 normal_stop ();
2185 if (target_can_async_p ())
2186 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2187 return;
2188 }
2189
2190 /* We'll update this if & when we switch to a new thread. */
2191 previous_inferior_ptid = inferior_ptid;
2192
2193 regcache = get_current_regcache ();
2194 gdbarch = get_regcache_arch (regcache);
2195 aspace = get_regcache_aspace (regcache);
2196 pc = regcache_read_pc (regcache);
2197 tp = inferior_thread ();
2198
2199 if (step > 0)
2200 step_start_function = find_pc_function (pc);
2201 if (step < 0)
2202 stop_after_trap = 1;
2203
2204 /* Fill in with reasonable starting values. */
2205 init_thread_stepping_state (tp);
2206
2207 if (addr == (CORE_ADDR) -1)
2208 {
2209 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2210 && execution_direction != EXEC_REVERSE)
2211 /* There is a breakpoint at the address we will resume at,
2212 step one instruction before inserting breakpoints so that
2213 we do not stop right away (and report a second hit at this
2214 breakpoint).
2215
2216 Note, we don't do this in reverse, because we won't
2217 actually be executing the breakpoint insn anyway.
2218 We'll be (un-)executing the previous instruction. */
2219 tp->stepping_over_breakpoint = 1;
2220 else if (gdbarch_single_step_through_delay_p (gdbarch)
2221 && gdbarch_single_step_through_delay (gdbarch,
2222 get_current_frame ()))
2223 /* We stepped onto an instruction that needs to be stepped
2224 again before re-inserting the breakpoint, do so. */
2225 tp->stepping_over_breakpoint = 1;
2226 }
2227 else
2228 {
2229 regcache_write_pc (regcache, addr);
2230 }
2231
2232 if (debug_infrun)
2233 fprintf_unfiltered (gdb_stdlog,
2234 "infrun: proceed (addr=%s, signal=%s, step=%d)\n",
2235 paddress (gdbarch, addr),
2236 gdb_signal_to_symbol_string (siggnal), step);
2237
2238 if (non_stop)
2239 /* In non-stop, each thread is handled individually. The context
2240 must already be set to the right thread here. */
2241 ;
2242 else
2243 {
2244 struct thread_info *step_over;
2245
2246 /* In a multi-threaded task we may select another thread and
2247 then continue or step.
2248
2249 But if the old thread was stopped at a breakpoint, it will
2250 immediately cause another breakpoint stop without any
2251 execution (i.e. it will report a breakpoint hit incorrectly).
2252 So we must step over it first.
2253
2254 Look for a thread other than the current (TP) that reported a
2255 breakpoint hit and hasn't been resumed yet since. */
2256 step_over = find_thread_needs_step_over (step, tp);
2257 if (step_over != NULL)
2258 {
2259 if (debug_infrun)
2260 fprintf_unfiltered (gdb_stdlog,
2261 "infrun: need to step-over [%s] first\n",
2262 target_pid_to_str (step_over->ptid));
2263
2264 /* Store the prev_pc for the stepping thread too, needed by
2265 switch_back_to_stepping thread. */
2266 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2267 switch_to_thread (step_over->ptid);
2268 tp = step_over;
2269 }
2270 }
2271
2272 /* If we need to step over a breakpoint, and we're not using
2273 displaced stepping to do so, insert all breakpoints (watchpoints,
2274 etc.) but the one we're stepping over, step one instruction, and
2275 then re-insert the breakpoint when that step is finished. */
2276 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2277 {
2278 struct regcache *regcache = get_current_regcache ();
2279
2280 set_step_over_info (get_regcache_aspace (regcache),
2281 regcache_read_pc (regcache));
2282 }
2283 else
2284 clear_step_over_info ();
2285
2286 insert_breakpoints ();
2287
2288 tp->control.trap_expected = tp->stepping_over_breakpoint;
2289
2290 if (!non_stop)
2291 {
2292 /* Pass the last stop signal to the thread we're resuming,
2293 irrespective of whether the current thread is the thread that
2294 got the last event or not. This was historically GDB's
2295 behaviour before keeping a stop_signal per thread. */
2296
2297 struct thread_info *last_thread;
2298 ptid_t last_ptid;
2299 struct target_waitstatus last_status;
2300
2301 get_last_target_status (&last_ptid, &last_status);
2302 if (!ptid_equal (inferior_ptid, last_ptid)
2303 && !ptid_equal (last_ptid, null_ptid)
2304 && !ptid_equal (last_ptid, minus_one_ptid))
2305 {
2306 last_thread = find_thread_ptid (last_ptid);
2307 if (last_thread)
2308 {
2309 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2310 last_thread->suspend.stop_signal = GDB_SIGNAL_0;
2311 }
2312 }
2313 }
2314
2315 if (siggnal != GDB_SIGNAL_DEFAULT)
2316 tp->suspend.stop_signal = siggnal;
2317 /* If this signal should not be seen by program,
2318 give it zero. Used for debugging signals. */
2319 else if (!signal_program[tp->suspend.stop_signal])
2320 tp->suspend.stop_signal = GDB_SIGNAL_0;
2321
2322 annotate_starting ();
2323
2324 /* Make sure that output from GDB appears before output from the
2325 inferior. */
2326 gdb_flush (gdb_stdout);
2327
2328 /* Refresh prev_pc value just prior to resuming. This used to be
2329 done in stop_stepping, however, setting prev_pc there did not handle
2330 scenarios such as inferior function calls or returning from
2331 a function via the return command. In those cases, the prev_pc
2332 value was not set properly for subsequent commands. The prev_pc value
2333 is used to initialize the starting line number in the ecs. With an
2334 invalid value, the gdb next command ends up stopping at the position
2335 represented by the next line table entry past our start position.
2336 On platforms that generate one line table entry per line, this
2337 is not a problem. However, on the ia64, the compiler generates
2338 extraneous line table entries that do not increase the line number.
2339 When we issue the gdb next command on the ia64 after an inferior call
2340 or a return command, we often end up a few instructions forward, still
2341 within the original line we started.
2342
2343 An attempt was made to refresh the prev_pc at the same time the
2344 execution_control_state is initialized (for instance, just before
2345 waiting for an inferior event). But this approach did not work
2346 because of platforms that use ptrace, where the pc register cannot
2347 be read unless the inferior is stopped. At that point, we are not
2348 guaranteed the inferior is stopped and so the regcache_read_pc() call
2349 can fail. Setting the prev_pc value here ensures the value is updated
2350 correctly when the inferior is stopped. */
2351 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2352
2353 /* Reset to normal state. */
2354 init_infwait_state ();
2355
2356 /* Resume inferior. */
2357 resume (tp->control.trap_expected || step || bpstat_should_step (),
2358 tp->suspend.stop_signal);
2359
2360 /* Wait for it to stop (if not standalone)
2361 and in any case decode why it stopped, and act accordingly. */
2362 /* Do this only if we are not using the event loop, or if the target
2363 does not support asynchronous execution. */
2364 if (!target_can_async_p ())
2365 {
2366 wait_for_inferior ();
2367 normal_stop ();
2368 }
2369 }
2370 \f
2371
2372 /* Start remote-debugging of a machine over a serial link. */
2373
2374 void
2375 start_remote (int from_tty)
2376 {
2377 struct inferior *inferior;
2378
2379 inferior = current_inferior ();
2380 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2381
2382 /* Always go on waiting for the target, regardless of the mode. */
2383 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2384 indicate to wait_for_inferior that a target should timeout if
2385 nothing is returned (instead of just blocking). Because of this,
2386 targets expecting an immediate response need to, internally, set
2387 things up so that the target_wait() is forced to eventually
2388 timeout. */
2389 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2390 differentiate to its caller what the state of the target is after
2391 the initial open has been performed. Here we're assuming that
2392 the target has stopped. It should be possible to eventually have
2393 target_open() return to the caller an indication that the target
2394 is currently running and GDB state should be set to the same as
2395 for an async run. */
2396 wait_for_inferior ();
2397
2398 /* Now that the inferior has stopped, do any bookkeeping like
2399 loading shared libraries. We want to do this before normal_stop,
2400 so that the displayed frame is up to date. */
2401 post_create_inferior (&current_target, from_tty);
2402
2403 normal_stop ();
2404 }
2405
2406 /* Initialize static vars when a new inferior begins. */
2407
2408 void
2409 init_wait_for_inferior (void)
2410 {
2411 /* These are meaningless until the first time through wait_for_inferior. */
2412
2413 breakpoint_init_inferior (inf_starting);
2414
2415 clear_proceed_status ();
2416
2417 target_last_wait_ptid = minus_one_ptid;
2418
2419 previous_inferior_ptid = inferior_ptid;
2420 init_infwait_state ();
2421
2422 /* Discard any skipped inlined frames. */
2423 clear_inline_frame_state (minus_one_ptid);
2424
2425 singlestep_ptid = null_ptid;
2426 singlestep_pc = 0;
2427 }
2428
2429 \f
2430 /* This enum encodes possible reasons for doing a target_wait, so that
2431 wfi can call target_wait in one place. (Ultimately the call will be
2432 moved out of the infinite loop entirely.) */
2433
2434 enum infwait_states
2435 {
2436 infwait_normal_state,
2437 infwait_step_watch_state,
2438 infwait_nonstep_watch_state
2439 };
2440
2441 /* The PTID we'll do a target_wait on.*/
2442 ptid_t waiton_ptid;
2443
2444 /* Current inferior wait state. */
2445 static enum infwait_states infwait_state;
2446
2447 /* Data to be passed around while handling an event. This data is
2448 discarded between events. */
2449 struct execution_control_state
2450 {
2451 ptid_t ptid;
2452 /* The thread that got the event, if this was a thread event; NULL
2453 otherwise. */
2454 struct thread_info *event_thread;
2455
2456 struct target_waitstatus ws;
2457 int stop_func_filled_in;
2458 CORE_ADDR stop_func_start;
2459 CORE_ADDR stop_func_end;
2460 const char *stop_func_name;
2461 int wait_some_more;
2462
2463 /* We were in infwait_step_watch_state or
2464 infwait_nonstep_watch_state state, and the thread reported an
2465 event. */
2466 int stepped_after_stopped_by_watchpoint;
2467
2468 /* True if the event thread hit the single-step breakpoint of
2469 another thread. Thus the event doesn't cause a stop, the thread
2470 needs to be single-stepped past the single-step breakpoint before
2471 we can switch back to the original stepping thread. */
2472 int hit_singlestep_breakpoint;
2473 };
2474
2475 static void handle_inferior_event (struct execution_control_state *ecs);
2476
2477 static void handle_step_into_function (struct gdbarch *gdbarch,
2478 struct execution_control_state *ecs);
2479 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2480 struct execution_control_state *ecs);
2481 static void handle_signal_stop (struct execution_control_state *ecs);
2482 static void check_exception_resume (struct execution_control_state *,
2483 struct frame_info *);
2484
2485 static void stop_stepping (struct execution_control_state *ecs);
2486 static void prepare_to_wait (struct execution_control_state *ecs);
2487 static void keep_going (struct execution_control_state *ecs);
2488 static void process_event_stop_test (struct execution_control_state *ecs);
2489 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2490
2491 /* Callback for iterate over threads. If the thread is stopped, but
2492 the user/frontend doesn't know about that yet, go through
2493 normal_stop, as if the thread had just stopped now. ARG points at
2494 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2495 ptid_is_pid(PTID) is true, applies to all threads of the process
2496 pointed at by PTID. Otherwise, apply only to the thread pointed by
2497 PTID. */
2498
2499 static int
2500 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2501 {
2502 ptid_t ptid = * (ptid_t *) arg;
2503
2504 if ((ptid_equal (info->ptid, ptid)
2505 || ptid_equal (minus_one_ptid, ptid)
2506 || (ptid_is_pid (ptid)
2507 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2508 && is_running (info->ptid)
2509 && !is_executing (info->ptid))
2510 {
2511 struct cleanup *old_chain;
2512 struct execution_control_state ecss;
2513 struct execution_control_state *ecs = &ecss;
2514
2515 memset (ecs, 0, sizeof (*ecs));
2516
2517 old_chain = make_cleanup_restore_current_thread ();
2518
2519 overlay_cache_invalid = 1;
2520 /* Flush target cache before starting to handle each event.
2521 Target was running and cache could be stale. This is just a
2522 heuristic. Running threads may modify target memory, but we
2523 don't get any event. */
2524 target_dcache_invalidate ();
2525
2526 /* Go through handle_inferior_event/normal_stop, so we always
2527 have consistent output as if the stop event had been
2528 reported. */
2529 ecs->ptid = info->ptid;
2530 ecs->event_thread = find_thread_ptid (info->ptid);
2531 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2532 ecs->ws.value.sig = GDB_SIGNAL_0;
2533
2534 handle_inferior_event (ecs);
2535
2536 if (!ecs->wait_some_more)
2537 {
2538 struct thread_info *tp;
2539
2540 normal_stop ();
2541
2542 /* Finish off the continuations. */
2543 tp = inferior_thread ();
2544 do_all_intermediate_continuations_thread (tp, 1);
2545 do_all_continuations_thread (tp, 1);
2546 }
2547
2548 do_cleanups (old_chain);
2549 }
2550
2551 return 0;
2552 }
2553
2554 /* This function is attached as a "thread_stop_requested" observer.
2555 Cleanup local state that assumed the PTID was to be resumed, and
2556 report the stop to the frontend. */
2557
2558 static void
2559 infrun_thread_stop_requested (ptid_t ptid)
2560 {
2561 struct displaced_step_inferior_state *displaced;
2562
2563 /* PTID was requested to stop. Remove it from the displaced
2564 stepping queue, so we don't try to resume it automatically. */
2565
2566 for (displaced = displaced_step_inferior_states;
2567 displaced;
2568 displaced = displaced->next)
2569 {
2570 struct displaced_step_request *it, **prev_next_p;
2571
2572 it = displaced->step_request_queue;
2573 prev_next_p = &displaced->step_request_queue;
2574 while (it)
2575 {
2576 if (ptid_match (it->ptid, ptid))
2577 {
2578 *prev_next_p = it->next;
2579 it->next = NULL;
2580 xfree (it);
2581 }
2582 else
2583 {
2584 prev_next_p = &it->next;
2585 }
2586
2587 it = *prev_next_p;
2588 }
2589 }
2590
2591 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2592 }
2593
2594 static void
2595 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2596 {
2597 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2598 nullify_last_target_wait_ptid ();
2599 }
2600
2601 /* Callback for iterate_over_threads. */
2602
2603 static int
2604 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2605 {
2606 if (is_exited (info->ptid))
2607 return 0;
2608
2609 delete_step_resume_breakpoint (info);
2610 delete_exception_resume_breakpoint (info);
2611 return 0;
2612 }
2613
2614 /* In all-stop, delete the step resume breakpoint of any thread that
2615 had one. In non-stop, delete the step resume breakpoint of the
2616 thread that just stopped. */
2617
2618 static void
2619 delete_step_thread_step_resume_breakpoint (void)
2620 {
2621 if (!target_has_execution
2622 || ptid_equal (inferior_ptid, null_ptid))
2623 /* If the inferior has exited, we have already deleted the step
2624 resume breakpoints out of GDB's lists. */
2625 return;
2626
2627 if (non_stop)
2628 {
2629 /* If in non-stop mode, only delete the step-resume or
2630 longjmp-resume breakpoint of the thread that just stopped
2631 stepping. */
2632 struct thread_info *tp = inferior_thread ();
2633
2634 delete_step_resume_breakpoint (tp);
2635 delete_exception_resume_breakpoint (tp);
2636 }
2637 else
2638 /* In all-stop mode, delete all step-resume and longjmp-resume
2639 breakpoints of any thread that had them. */
2640 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2641 }
2642
2643 /* A cleanup wrapper. */
2644
2645 static void
2646 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2647 {
2648 delete_step_thread_step_resume_breakpoint ();
2649 }
2650
2651 /* Pretty print the results of target_wait, for debugging purposes. */
2652
2653 static void
2654 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2655 const struct target_waitstatus *ws)
2656 {
2657 char *status_string = target_waitstatus_to_string (ws);
2658 struct ui_file *tmp_stream = mem_fileopen ();
2659 char *text;
2660
2661 /* The text is split over several lines because it was getting too long.
2662 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2663 output as a unit; we want only one timestamp printed if debug_timestamp
2664 is set. */
2665
2666 fprintf_unfiltered (tmp_stream,
2667 "infrun: target_wait (%d", ptid_get_pid (waiton_ptid));
2668 if (ptid_get_pid (waiton_ptid) != -1)
2669 fprintf_unfiltered (tmp_stream,
2670 " [%s]", target_pid_to_str (waiton_ptid));
2671 fprintf_unfiltered (tmp_stream, ", status) =\n");
2672 fprintf_unfiltered (tmp_stream,
2673 "infrun: %d [%s],\n",
2674 ptid_get_pid (result_ptid),
2675 target_pid_to_str (result_ptid));
2676 fprintf_unfiltered (tmp_stream,
2677 "infrun: %s\n",
2678 status_string);
2679
2680 text = ui_file_xstrdup (tmp_stream, NULL);
2681
2682 /* This uses %s in part to handle %'s in the text, but also to avoid
2683 a gcc error: the format attribute requires a string literal. */
2684 fprintf_unfiltered (gdb_stdlog, "%s", text);
2685
2686 xfree (status_string);
2687 xfree (text);
2688 ui_file_delete (tmp_stream);
2689 }
2690
2691 /* Prepare and stabilize the inferior for detaching it. E.g.,
2692 detaching while a thread is displaced stepping is a recipe for
2693 crashing it, as nothing would readjust the PC out of the scratch
2694 pad. */
2695
2696 void
2697 prepare_for_detach (void)
2698 {
2699 struct inferior *inf = current_inferior ();
2700 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2701 struct cleanup *old_chain_1;
2702 struct displaced_step_inferior_state *displaced;
2703
2704 displaced = get_displaced_stepping_state (inf->pid);
2705
2706 /* Is any thread of this process displaced stepping? If not,
2707 there's nothing else to do. */
2708 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2709 return;
2710
2711 if (debug_infrun)
2712 fprintf_unfiltered (gdb_stdlog,
2713 "displaced-stepping in-process while detaching");
2714
2715 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2716 inf->detaching = 1;
2717
2718 while (!ptid_equal (displaced->step_ptid, null_ptid))
2719 {
2720 struct cleanup *old_chain_2;
2721 struct execution_control_state ecss;
2722 struct execution_control_state *ecs;
2723
2724 ecs = &ecss;
2725 memset (ecs, 0, sizeof (*ecs));
2726
2727 overlay_cache_invalid = 1;
2728 /* Flush target cache before starting to handle each event.
2729 Target was running and cache could be stale. This is just a
2730 heuristic. Running threads may modify target memory, but we
2731 don't get any event. */
2732 target_dcache_invalidate ();
2733
2734 if (deprecated_target_wait_hook)
2735 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2736 else
2737 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2738
2739 if (debug_infrun)
2740 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2741
2742 /* If an error happens while handling the event, propagate GDB's
2743 knowledge of the executing state to the frontend/user running
2744 state. */
2745 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2746 &minus_one_ptid);
2747
2748 /* Now figure out what to do with the result of the result. */
2749 handle_inferior_event (ecs);
2750
2751 /* No error, don't finish the state yet. */
2752 discard_cleanups (old_chain_2);
2753
2754 /* Breakpoints and watchpoints are not installed on the target
2755 at this point, and signals are passed directly to the
2756 inferior, so this must mean the process is gone. */
2757 if (!ecs->wait_some_more)
2758 {
2759 discard_cleanups (old_chain_1);
2760 error (_("Program exited while detaching"));
2761 }
2762 }
2763
2764 discard_cleanups (old_chain_1);
2765 }
2766
2767 /* Wait for control to return from inferior to debugger.
2768
2769 If inferior gets a signal, we may decide to start it up again
2770 instead of returning. That is why there is a loop in this function.
2771 When this function actually returns it means the inferior
2772 should be left stopped and GDB should read more commands. */
2773
2774 void
2775 wait_for_inferior (void)
2776 {
2777 struct cleanup *old_cleanups;
2778
2779 if (debug_infrun)
2780 fprintf_unfiltered
2781 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2782
2783 old_cleanups =
2784 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2785
2786 while (1)
2787 {
2788 struct execution_control_state ecss;
2789 struct execution_control_state *ecs = &ecss;
2790 struct cleanup *old_chain;
2791
2792 memset (ecs, 0, sizeof (*ecs));
2793
2794 overlay_cache_invalid = 1;
2795
2796 /* Flush target cache before starting to handle each event.
2797 Target was running and cache could be stale. This is just a
2798 heuristic. Running threads may modify target memory, but we
2799 don't get any event. */
2800 target_dcache_invalidate ();
2801
2802 if (deprecated_target_wait_hook)
2803 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2804 else
2805 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2806
2807 if (debug_infrun)
2808 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2809
2810 /* If an error happens while handling the event, propagate GDB's
2811 knowledge of the executing state to the frontend/user running
2812 state. */
2813 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2814
2815 /* Now figure out what to do with the result of the result. */
2816 handle_inferior_event (ecs);
2817
2818 /* No error, don't finish the state yet. */
2819 discard_cleanups (old_chain);
2820
2821 if (!ecs->wait_some_more)
2822 break;
2823 }
2824
2825 do_cleanups (old_cleanups);
2826 }
2827
2828 /* Asynchronous version of wait_for_inferior. It is called by the
2829 event loop whenever a change of state is detected on the file
2830 descriptor corresponding to the target. It can be called more than
2831 once to complete a single execution command. In such cases we need
2832 to keep the state in a global variable ECSS. If it is the last time
2833 that this function is called for a single execution command, then
2834 report to the user that the inferior has stopped, and do the
2835 necessary cleanups. */
2836
2837 void
2838 fetch_inferior_event (void *client_data)
2839 {
2840 struct execution_control_state ecss;
2841 struct execution_control_state *ecs = &ecss;
2842 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2843 struct cleanup *ts_old_chain;
2844 int was_sync = sync_execution;
2845 int cmd_done = 0;
2846
2847 memset (ecs, 0, sizeof (*ecs));
2848
2849 /* We're handling a live event, so make sure we're doing live
2850 debugging. If we're looking at traceframes while the target is
2851 running, we're going to need to get back to that mode after
2852 handling the event. */
2853 if (non_stop)
2854 {
2855 make_cleanup_restore_current_traceframe ();
2856 set_current_traceframe (-1);
2857 }
2858
2859 if (non_stop)
2860 /* In non-stop mode, the user/frontend should not notice a thread
2861 switch due to internal events. Make sure we reverse to the
2862 user selected thread and frame after handling the event and
2863 running any breakpoint commands. */
2864 make_cleanup_restore_current_thread ();
2865
2866 overlay_cache_invalid = 1;
2867 /* Flush target cache before starting to handle each event. Target
2868 was running and cache could be stale. This is just a heuristic.
2869 Running threads may modify target memory, but we don't get any
2870 event. */
2871 target_dcache_invalidate ();
2872
2873 make_cleanup_restore_integer (&execution_direction);
2874 execution_direction = target_execution_direction ();
2875
2876 if (deprecated_target_wait_hook)
2877 ecs->ptid =
2878 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2879 else
2880 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2881
2882 if (debug_infrun)
2883 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2884
2885 /* If an error happens while handling the event, propagate GDB's
2886 knowledge of the executing state to the frontend/user running
2887 state. */
2888 if (!non_stop)
2889 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2890 else
2891 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2892
2893 /* Get executed before make_cleanup_restore_current_thread above to apply
2894 still for the thread which has thrown the exception. */
2895 make_bpstat_clear_actions_cleanup ();
2896
2897 /* Now figure out what to do with the result of the result. */
2898 handle_inferior_event (ecs);
2899
2900 if (!ecs->wait_some_more)
2901 {
2902 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2903
2904 delete_step_thread_step_resume_breakpoint ();
2905
2906 /* We may not find an inferior if this was a process exit. */
2907 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2908 normal_stop ();
2909
2910 if (target_has_execution
2911 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2912 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2913 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2914 && ecs->event_thread->step_multi
2915 && ecs->event_thread->control.stop_step)
2916 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2917 else
2918 {
2919 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2920 cmd_done = 1;
2921 }
2922 }
2923
2924 /* No error, don't finish the thread states yet. */
2925 discard_cleanups (ts_old_chain);
2926
2927 /* Revert thread and frame. */
2928 do_cleanups (old_chain);
2929
2930 /* If the inferior was in sync execution mode, and now isn't,
2931 restore the prompt (a synchronous execution command has finished,
2932 and we're ready for input). */
2933 if (interpreter_async && was_sync && !sync_execution)
2934 display_gdb_prompt (0);
2935
2936 if (cmd_done
2937 && !was_sync
2938 && exec_done_display_p
2939 && (ptid_equal (inferior_ptid, null_ptid)
2940 || !is_running (inferior_ptid)))
2941 printf_unfiltered (_("completed.\n"));
2942 }
2943
2944 /* Record the frame and location we're currently stepping through. */
2945 void
2946 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2947 {
2948 struct thread_info *tp = inferior_thread ();
2949
2950 tp->control.step_frame_id = get_frame_id (frame);
2951 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2952
2953 tp->current_symtab = sal.symtab;
2954 tp->current_line = sal.line;
2955 }
2956
2957 /* Clear context switchable stepping state. */
2958
2959 void
2960 init_thread_stepping_state (struct thread_info *tss)
2961 {
2962 tss->stepping_over_breakpoint = 0;
2963 tss->step_after_step_resume_breakpoint = 0;
2964 }
2965
2966 /* Set the cached copy of the last ptid/waitstatus. */
2967
2968 static void
2969 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
2970 {
2971 target_last_wait_ptid = ptid;
2972 target_last_waitstatus = status;
2973 }
2974
2975 /* Return the cached copy of the last pid/waitstatus returned by
2976 target_wait()/deprecated_target_wait_hook(). The data is actually
2977 cached by handle_inferior_event(), which gets called immediately
2978 after target_wait()/deprecated_target_wait_hook(). */
2979
2980 void
2981 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2982 {
2983 *ptidp = target_last_wait_ptid;
2984 *status = target_last_waitstatus;
2985 }
2986
2987 void
2988 nullify_last_target_wait_ptid (void)
2989 {
2990 target_last_wait_ptid = minus_one_ptid;
2991 }
2992
2993 /* Switch thread contexts. */
2994
2995 static void
2996 context_switch (ptid_t ptid)
2997 {
2998 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
2999 {
3000 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3001 target_pid_to_str (inferior_ptid));
3002 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3003 target_pid_to_str (ptid));
3004 }
3005
3006 switch_to_thread (ptid);
3007 }
3008
3009 static void
3010 adjust_pc_after_break (struct execution_control_state *ecs)
3011 {
3012 struct regcache *regcache;
3013 struct gdbarch *gdbarch;
3014 struct address_space *aspace;
3015 CORE_ADDR breakpoint_pc, decr_pc;
3016
3017 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3018 we aren't, just return.
3019
3020 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3021 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3022 implemented by software breakpoints should be handled through the normal
3023 breakpoint layer.
3024
3025 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3026 different signals (SIGILL or SIGEMT for instance), but it is less
3027 clear where the PC is pointing afterwards. It may not match
3028 gdbarch_decr_pc_after_break. I don't know any specific target that
3029 generates these signals at breakpoints (the code has been in GDB since at
3030 least 1992) so I can not guess how to handle them here.
3031
3032 In earlier versions of GDB, a target with
3033 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3034 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3035 target with both of these set in GDB history, and it seems unlikely to be
3036 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3037
3038 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3039 return;
3040
3041 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3042 return;
3043
3044 /* In reverse execution, when a breakpoint is hit, the instruction
3045 under it has already been de-executed. The reported PC always
3046 points at the breakpoint address, so adjusting it further would
3047 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3048 architecture:
3049
3050 B1 0x08000000 : INSN1
3051 B2 0x08000001 : INSN2
3052 0x08000002 : INSN3
3053 PC -> 0x08000003 : INSN4
3054
3055 Say you're stopped at 0x08000003 as above. Reverse continuing
3056 from that point should hit B2 as below. Reading the PC when the
3057 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3058 been de-executed already.
3059
3060 B1 0x08000000 : INSN1
3061 B2 PC -> 0x08000001 : INSN2
3062 0x08000002 : INSN3
3063 0x08000003 : INSN4
3064
3065 We can't apply the same logic as for forward execution, because
3066 we would wrongly adjust the PC to 0x08000000, since there's a
3067 breakpoint at PC - 1. We'd then report a hit on B1, although
3068 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3069 behaviour. */
3070 if (execution_direction == EXEC_REVERSE)
3071 return;
3072
3073 /* If this target does not decrement the PC after breakpoints, then
3074 we have nothing to do. */
3075 regcache = get_thread_regcache (ecs->ptid);
3076 gdbarch = get_regcache_arch (regcache);
3077
3078 decr_pc = target_decr_pc_after_break (gdbarch);
3079 if (decr_pc == 0)
3080 return;
3081
3082 aspace = get_regcache_aspace (regcache);
3083
3084 /* Find the location where (if we've hit a breakpoint) the
3085 breakpoint would be. */
3086 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3087
3088 /* Check whether there actually is a software breakpoint inserted at
3089 that location.
3090
3091 If in non-stop mode, a race condition is possible where we've
3092 removed a breakpoint, but stop events for that breakpoint were
3093 already queued and arrive later. To suppress those spurious
3094 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3095 and retire them after a number of stop events are reported. */
3096 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3097 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3098 {
3099 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3100
3101 if (record_full_is_used ())
3102 record_full_gdb_operation_disable_set ();
3103
3104 /* When using hardware single-step, a SIGTRAP is reported for both
3105 a completed single-step and a software breakpoint. Need to
3106 differentiate between the two, as the latter needs adjusting
3107 but the former does not.
3108
3109 The SIGTRAP can be due to a completed hardware single-step only if
3110 - we didn't insert software single-step breakpoints
3111 - the thread to be examined is still the current thread
3112 - this thread is currently being stepped
3113
3114 If any of these events did not occur, we must have stopped due
3115 to hitting a software breakpoint, and have to back up to the
3116 breakpoint address.
3117
3118 As a special case, we could have hardware single-stepped a
3119 software breakpoint. In this case (prev_pc == breakpoint_pc),
3120 we also need to back up to the breakpoint address. */
3121
3122 if (singlestep_breakpoints_inserted_p
3123 || !ptid_equal (ecs->ptid, inferior_ptid)
3124 || !currently_stepping (ecs->event_thread)
3125 || ecs->event_thread->prev_pc == breakpoint_pc)
3126 regcache_write_pc (regcache, breakpoint_pc);
3127
3128 do_cleanups (old_cleanups);
3129 }
3130 }
3131
3132 static void
3133 init_infwait_state (void)
3134 {
3135 waiton_ptid = pid_to_ptid (-1);
3136 infwait_state = infwait_normal_state;
3137 }
3138
3139 static int
3140 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3141 {
3142 for (frame = get_prev_frame (frame);
3143 frame != NULL;
3144 frame = get_prev_frame (frame))
3145 {
3146 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3147 return 1;
3148 if (get_frame_type (frame) != INLINE_FRAME)
3149 break;
3150 }
3151
3152 return 0;
3153 }
3154
3155 /* Auxiliary function that handles syscall entry/return events.
3156 It returns 1 if the inferior should keep going (and GDB
3157 should ignore the event), or 0 if the event deserves to be
3158 processed. */
3159
3160 static int
3161 handle_syscall_event (struct execution_control_state *ecs)
3162 {
3163 struct regcache *regcache;
3164 int syscall_number;
3165
3166 if (!ptid_equal (ecs->ptid, inferior_ptid))
3167 context_switch (ecs->ptid);
3168
3169 regcache = get_thread_regcache (ecs->ptid);
3170 syscall_number = ecs->ws.value.syscall_number;
3171 stop_pc = regcache_read_pc (regcache);
3172
3173 if (catch_syscall_enabled () > 0
3174 && catching_syscall_number (syscall_number) > 0)
3175 {
3176 if (debug_infrun)
3177 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3178 syscall_number);
3179
3180 ecs->event_thread->control.stop_bpstat
3181 = bpstat_stop_status (get_regcache_aspace (regcache),
3182 stop_pc, ecs->ptid, &ecs->ws);
3183
3184 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3185 {
3186 /* Catchpoint hit. */
3187 return 0;
3188 }
3189 }
3190
3191 /* If no catchpoint triggered for this, then keep going. */
3192 keep_going (ecs);
3193 return 1;
3194 }
3195
3196 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3197
3198 static void
3199 fill_in_stop_func (struct gdbarch *gdbarch,
3200 struct execution_control_state *ecs)
3201 {
3202 if (!ecs->stop_func_filled_in)
3203 {
3204 /* Don't care about return value; stop_func_start and stop_func_name
3205 will both be 0 if it doesn't work. */
3206 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3207 &ecs->stop_func_start, &ecs->stop_func_end);
3208 ecs->stop_func_start
3209 += gdbarch_deprecated_function_start_offset (gdbarch);
3210
3211 if (gdbarch_skip_entrypoint_p (gdbarch))
3212 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3213 ecs->stop_func_start);
3214
3215 ecs->stop_func_filled_in = 1;
3216 }
3217 }
3218
3219
3220 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3221
3222 static enum stop_kind
3223 get_inferior_stop_soon (ptid_t ptid)
3224 {
3225 struct inferior *inf = find_inferior_pid (ptid_get_pid (ptid));
3226
3227 gdb_assert (inf != NULL);
3228 return inf->control.stop_soon;
3229 }
3230
3231 /* Given an execution control state that has been freshly filled in by
3232 an event from the inferior, figure out what it means and take
3233 appropriate action.
3234
3235 The alternatives are:
3236
3237 1) stop_stepping and return; to really stop and return to the
3238 debugger.
3239
3240 2) keep_going and return; to wait for the next event (set
3241 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3242 once). */
3243
3244 static void
3245 handle_inferior_event (struct execution_control_state *ecs)
3246 {
3247 enum stop_kind stop_soon;
3248
3249 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3250 {
3251 /* We had an event in the inferior, but we are not interested in
3252 handling it at this level. The lower layers have already
3253 done what needs to be done, if anything.
3254
3255 One of the possible circumstances for this is when the
3256 inferior produces output for the console. The inferior has
3257 not stopped, and we are ignoring the event. Another possible
3258 circumstance is any event which the lower level knows will be
3259 reported multiple times without an intervening resume. */
3260 if (debug_infrun)
3261 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3262 prepare_to_wait (ecs);
3263 return;
3264 }
3265
3266 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3267 && target_can_async_p () && !sync_execution)
3268 {
3269 /* There were no unwaited-for children left in the target, but,
3270 we're not synchronously waiting for events either. Just
3271 ignore. Otherwise, if we were running a synchronous
3272 execution command, we need to cancel it and give the user
3273 back the terminal. */
3274 if (debug_infrun)
3275 fprintf_unfiltered (gdb_stdlog,
3276 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3277 prepare_to_wait (ecs);
3278 return;
3279 }
3280
3281 /* Cache the last pid/waitstatus. */
3282 set_last_target_status (ecs->ptid, ecs->ws);
3283
3284 /* Always clear state belonging to the previous time we stopped. */
3285 stop_stack_dummy = STOP_NONE;
3286
3287 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3288 {
3289 /* No unwaited-for children left. IOW, all resumed children
3290 have exited. */
3291 if (debug_infrun)
3292 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3293
3294 stop_print_frame = 0;
3295 stop_stepping (ecs);
3296 return;
3297 }
3298
3299 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3300 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3301 {
3302 ecs->event_thread = find_thread_ptid (ecs->ptid);
3303 /* If it's a new thread, add it to the thread database. */
3304 if (ecs->event_thread == NULL)
3305 ecs->event_thread = add_thread (ecs->ptid);
3306
3307 /* Disable range stepping. If the next step request could use a
3308 range, this will be end up re-enabled then. */
3309 ecs->event_thread->control.may_range_step = 0;
3310 }
3311
3312 /* Dependent on valid ECS->EVENT_THREAD. */
3313 adjust_pc_after_break (ecs);
3314
3315 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3316 reinit_frame_cache ();
3317
3318 breakpoint_retire_moribund ();
3319
3320 /* First, distinguish signals caused by the debugger from signals
3321 that have to do with the program's own actions. Note that
3322 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3323 on the operating system version. Here we detect when a SIGILL or
3324 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3325 something similar for SIGSEGV, since a SIGSEGV will be generated
3326 when we're trying to execute a breakpoint instruction on a
3327 non-executable stack. This happens for call dummy breakpoints
3328 for architectures like SPARC that place call dummies on the
3329 stack. */
3330 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3331 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3332 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3333 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3334 {
3335 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3336
3337 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3338 regcache_read_pc (regcache)))
3339 {
3340 if (debug_infrun)
3341 fprintf_unfiltered (gdb_stdlog,
3342 "infrun: Treating signal as SIGTRAP\n");
3343 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3344 }
3345 }
3346
3347 /* Mark the non-executing threads accordingly. In all-stop, all
3348 threads of all processes are stopped when we get any event
3349 reported. In non-stop mode, only the event thread stops. If
3350 we're handling a process exit in non-stop mode, there's nothing
3351 to do, as threads of the dead process are gone, and threads of
3352 any other process were left running. */
3353 if (!non_stop)
3354 set_executing (minus_one_ptid, 0);
3355 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3356 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3357 set_executing (ecs->ptid, 0);
3358
3359 switch (infwait_state)
3360 {
3361 case infwait_normal_state:
3362 if (debug_infrun)
3363 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3364 break;
3365
3366 case infwait_step_watch_state:
3367 if (debug_infrun)
3368 fprintf_unfiltered (gdb_stdlog,
3369 "infrun: infwait_step_watch_state\n");
3370
3371 ecs->stepped_after_stopped_by_watchpoint = 1;
3372 break;
3373
3374 case infwait_nonstep_watch_state:
3375 if (debug_infrun)
3376 fprintf_unfiltered (gdb_stdlog,
3377 "infrun: infwait_nonstep_watch_state\n");
3378 insert_breakpoints ();
3379
3380 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3381 handle things like signals arriving and other things happening
3382 in combination correctly? */
3383 ecs->stepped_after_stopped_by_watchpoint = 1;
3384 break;
3385
3386 default:
3387 internal_error (__FILE__, __LINE__, _("bad switch"));
3388 }
3389
3390 infwait_state = infwait_normal_state;
3391 waiton_ptid = pid_to_ptid (-1);
3392
3393 switch (ecs->ws.kind)
3394 {
3395 case TARGET_WAITKIND_LOADED:
3396 if (debug_infrun)
3397 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3398 if (!ptid_equal (ecs->ptid, inferior_ptid))
3399 context_switch (ecs->ptid);
3400 /* Ignore gracefully during startup of the inferior, as it might
3401 be the shell which has just loaded some objects, otherwise
3402 add the symbols for the newly loaded objects. Also ignore at
3403 the beginning of an attach or remote session; we will query
3404 the full list of libraries once the connection is
3405 established. */
3406
3407 stop_soon = get_inferior_stop_soon (ecs->ptid);
3408 if (stop_soon == NO_STOP_QUIETLY)
3409 {
3410 struct regcache *regcache;
3411
3412 regcache = get_thread_regcache (ecs->ptid);
3413
3414 handle_solib_event ();
3415
3416 ecs->event_thread->control.stop_bpstat
3417 = bpstat_stop_status (get_regcache_aspace (regcache),
3418 stop_pc, ecs->ptid, &ecs->ws);
3419
3420 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3421 {
3422 /* A catchpoint triggered. */
3423 process_event_stop_test (ecs);
3424 return;
3425 }
3426
3427 /* If requested, stop when the dynamic linker notifies
3428 gdb of events. This allows the user to get control
3429 and place breakpoints in initializer routines for
3430 dynamically loaded objects (among other things). */
3431 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3432 if (stop_on_solib_events)
3433 {
3434 /* Make sure we print "Stopped due to solib-event" in
3435 normal_stop. */
3436 stop_print_frame = 1;
3437
3438 stop_stepping (ecs);
3439 return;
3440 }
3441 }
3442
3443 /* If we are skipping through a shell, or through shared library
3444 loading that we aren't interested in, resume the program. If
3445 we're running the program normally, also resume. */
3446 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3447 {
3448 /* Loading of shared libraries might have changed breakpoint
3449 addresses. Make sure new breakpoints are inserted. */
3450 if (stop_soon == NO_STOP_QUIETLY
3451 && !breakpoints_always_inserted_mode ())
3452 insert_breakpoints ();
3453 resume (0, GDB_SIGNAL_0);
3454 prepare_to_wait (ecs);
3455 return;
3456 }
3457
3458 /* But stop if we're attaching or setting up a remote
3459 connection. */
3460 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3461 || stop_soon == STOP_QUIETLY_REMOTE)
3462 {
3463 if (debug_infrun)
3464 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3465 stop_stepping (ecs);
3466 return;
3467 }
3468
3469 internal_error (__FILE__, __LINE__,
3470 _("unhandled stop_soon: %d"), (int) stop_soon);
3471
3472 case TARGET_WAITKIND_SPURIOUS:
3473 if (debug_infrun)
3474 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3475 if (!ptid_equal (ecs->ptid, inferior_ptid))
3476 context_switch (ecs->ptid);
3477 resume (0, GDB_SIGNAL_0);
3478 prepare_to_wait (ecs);
3479 return;
3480
3481 case TARGET_WAITKIND_EXITED:
3482 case TARGET_WAITKIND_SIGNALLED:
3483 if (debug_infrun)
3484 {
3485 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3486 fprintf_unfiltered (gdb_stdlog,
3487 "infrun: TARGET_WAITKIND_EXITED\n");
3488 else
3489 fprintf_unfiltered (gdb_stdlog,
3490 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3491 }
3492
3493 inferior_ptid = ecs->ptid;
3494 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3495 set_current_program_space (current_inferior ()->pspace);
3496 handle_vfork_child_exec_or_exit (0);
3497 target_terminal_ours (); /* Must do this before mourn anyway. */
3498
3499 /* Clearing any previous state of convenience variables. */
3500 clear_exit_convenience_vars ();
3501
3502 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3503 {
3504 /* Record the exit code in the convenience variable $_exitcode, so
3505 that the user can inspect this again later. */
3506 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3507 (LONGEST) ecs->ws.value.integer);
3508
3509 /* Also record this in the inferior itself. */
3510 current_inferior ()->has_exit_code = 1;
3511 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3512
3513 print_exited_reason (ecs->ws.value.integer);
3514 }
3515 else
3516 {
3517 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3518 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3519
3520 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3521 {
3522 /* Set the value of the internal variable $_exitsignal,
3523 which holds the signal uncaught by the inferior. */
3524 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3525 gdbarch_gdb_signal_to_target (gdbarch,
3526 ecs->ws.value.sig));
3527 }
3528 else
3529 {
3530 /* We don't have access to the target's method used for
3531 converting between signal numbers (GDB's internal
3532 representation <-> target's representation).
3533 Therefore, we cannot do a good job at displaying this
3534 information to the user. It's better to just warn
3535 her about it (if infrun debugging is enabled), and
3536 give up. */
3537 if (debug_infrun)
3538 fprintf_filtered (gdb_stdlog, _("\
3539 Cannot fill $_exitsignal with the correct signal number.\n"));
3540 }
3541
3542 print_signal_exited_reason (ecs->ws.value.sig);
3543 }
3544
3545 gdb_flush (gdb_stdout);
3546 target_mourn_inferior ();
3547 singlestep_breakpoints_inserted_p = 0;
3548 cancel_single_step_breakpoints ();
3549 stop_print_frame = 0;
3550 stop_stepping (ecs);
3551 return;
3552
3553 /* The following are the only cases in which we keep going;
3554 the above cases end in a continue or goto. */
3555 case TARGET_WAITKIND_FORKED:
3556 case TARGET_WAITKIND_VFORKED:
3557 if (debug_infrun)
3558 {
3559 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3560 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3561 else
3562 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3563 }
3564
3565 /* Check whether the inferior is displaced stepping. */
3566 {
3567 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3568 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3569 struct displaced_step_inferior_state *displaced
3570 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3571
3572 /* If checking displaced stepping is supported, and thread
3573 ecs->ptid is displaced stepping. */
3574 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3575 {
3576 struct inferior *parent_inf
3577 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3578 struct regcache *child_regcache;
3579 CORE_ADDR parent_pc;
3580
3581 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3582 indicating that the displaced stepping of syscall instruction
3583 has been done. Perform cleanup for parent process here. Note
3584 that this operation also cleans up the child process for vfork,
3585 because their pages are shared. */
3586 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3587
3588 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3589 {
3590 /* Restore scratch pad for child process. */
3591 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3592 }
3593
3594 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3595 the child's PC is also within the scratchpad. Set the child's PC
3596 to the parent's PC value, which has already been fixed up.
3597 FIXME: we use the parent's aspace here, although we're touching
3598 the child, because the child hasn't been added to the inferior
3599 list yet at this point. */
3600
3601 child_regcache
3602 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3603 gdbarch,
3604 parent_inf->aspace);
3605 /* Read PC value of parent process. */
3606 parent_pc = regcache_read_pc (regcache);
3607
3608 if (debug_displaced)
3609 fprintf_unfiltered (gdb_stdlog,
3610 "displaced: write child pc from %s to %s\n",
3611 paddress (gdbarch,
3612 regcache_read_pc (child_regcache)),
3613 paddress (gdbarch, parent_pc));
3614
3615 regcache_write_pc (child_regcache, parent_pc);
3616 }
3617 }
3618
3619 if (!ptid_equal (ecs->ptid, inferior_ptid))
3620 context_switch (ecs->ptid);
3621
3622 /* Immediately detach breakpoints from the child before there's
3623 any chance of letting the user delete breakpoints from the
3624 breakpoint lists. If we don't do this early, it's easy to
3625 leave left over traps in the child, vis: "break foo; catch
3626 fork; c; <fork>; del; c; <child calls foo>". We only follow
3627 the fork on the last `continue', and by that time the
3628 breakpoint at "foo" is long gone from the breakpoint table.
3629 If we vforked, then we don't need to unpatch here, since both
3630 parent and child are sharing the same memory pages; we'll
3631 need to unpatch at follow/detach time instead to be certain
3632 that new breakpoints added between catchpoint hit time and
3633 vfork follow are detached. */
3634 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3635 {
3636 /* This won't actually modify the breakpoint list, but will
3637 physically remove the breakpoints from the child. */
3638 detach_breakpoints (ecs->ws.value.related_pid);
3639 }
3640
3641 if (singlestep_breakpoints_inserted_p)
3642 {
3643 /* Pull the single step breakpoints out of the target. */
3644 remove_single_step_breakpoints ();
3645 singlestep_breakpoints_inserted_p = 0;
3646 }
3647
3648 /* In case the event is caught by a catchpoint, remember that
3649 the event is to be followed at the next resume of the thread,
3650 and not immediately. */
3651 ecs->event_thread->pending_follow = ecs->ws;
3652
3653 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3654
3655 ecs->event_thread->control.stop_bpstat
3656 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3657 stop_pc, ecs->ptid, &ecs->ws);
3658
3659 /* If no catchpoint triggered for this, then keep going. Note
3660 that we're interested in knowing the bpstat actually causes a
3661 stop, not just if it may explain the signal. Software
3662 watchpoints, for example, always appear in the bpstat. */
3663 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3664 {
3665 ptid_t parent;
3666 ptid_t child;
3667 int should_resume;
3668 int follow_child
3669 = (follow_fork_mode_string == follow_fork_mode_child);
3670
3671 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3672
3673 should_resume = follow_fork ();
3674
3675 parent = ecs->ptid;
3676 child = ecs->ws.value.related_pid;
3677
3678 /* In non-stop mode, also resume the other branch. */
3679 if (non_stop && !detach_fork)
3680 {
3681 if (follow_child)
3682 switch_to_thread (parent);
3683 else
3684 switch_to_thread (child);
3685
3686 ecs->event_thread = inferior_thread ();
3687 ecs->ptid = inferior_ptid;
3688 keep_going (ecs);
3689 }
3690
3691 if (follow_child)
3692 switch_to_thread (child);
3693 else
3694 switch_to_thread (parent);
3695
3696 ecs->event_thread = inferior_thread ();
3697 ecs->ptid = inferior_ptid;
3698
3699 if (should_resume)
3700 keep_going (ecs);
3701 else
3702 stop_stepping (ecs);
3703 return;
3704 }
3705 process_event_stop_test (ecs);
3706 return;
3707
3708 case TARGET_WAITKIND_VFORK_DONE:
3709 /* Done with the shared memory region. Re-insert breakpoints in
3710 the parent, and keep going. */
3711
3712 if (debug_infrun)
3713 fprintf_unfiltered (gdb_stdlog,
3714 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3715
3716 if (!ptid_equal (ecs->ptid, inferior_ptid))
3717 context_switch (ecs->ptid);
3718
3719 current_inferior ()->waiting_for_vfork_done = 0;
3720 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3721 /* This also takes care of reinserting breakpoints in the
3722 previously locked inferior. */
3723 keep_going (ecs);
3724 return;
3725
3726 case TARGET_WAITKIND_EXECD:
3727 if (debug_infrun)
3728 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3729
3730 if (!ptid_equal (ecs->ptid, inferior_ptid))
3731 context_switch (ecs->ptid);
3732
3733 singlestep_breakpoints_inserted_p = 0;
3734 cancel_single_step_breakpoints ();
3735
3736 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3737
3738 /* Do whatever is necessary to the parent branch of the vfork. */
3739 handle_vfork_child_exec_or_exit (1);
3740
3741 /* This causes the eventpoints and symbol table to be reset.
3742 Must do this now, before trying to determine whether to
3743 stop. */
3744 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3745
3746 ecs->event_thread->control.stop_bpstat
3747 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3748 stop_pc, ecs->ptid, &ecs->ws);
3749
3750 /* Note that this may be referenced from inside
3751 bpstat_stop_status above, through inferior_has_execd. */
3752 xfree (ecs->ws.value.execd_pathname);
3753 ecs->ws.value.execd_pathname = NULL;
3754
3755 /* If no catchpoint triggered for this, then keep going. */
3756 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3757 {
3758 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3759 keep_going (ecs);
3760 return;
3761 }
3762 process_event_stop_test (ecs);
3763 return;
3764
3765 /* Be careful not to try to gather much state about a thread
3766 that's in a syscall. It's frequently a losing proposition. */
3767 case TARGET_WAITKIND_SYSCALL_ENTRY:
3768 if (debug_infrun)
3769 fprintf_unfiltered (gdb_stdlog,
3770 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3771 /* Getting the current syscall number. */
3772 if (handle_syscall_event (ecs) == 0)
3773 process_event_stop_test (ecs);
3774 return;
3775
3776 /* Before examining the threads further, step this thread to
3777 get it entirely out of the syscall. (We get notice of the
3778 event when the thread is just on the verge of exiting a
3779 syscall. Stepping one instruction seems to get it back
3780 into user code.) */
3781 case TARGET_WAITKIND_SYSCALL_RETURN:
3782 if (debug_infrun)
3783 fprintf_unfiltered (gdb_stdlog,
3784 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3785 if (handle_syscall_event (ecs) == 0)
3786 process_event_stop_test (ecs);
3787 return;
3788
3789 case TARGET_WAITKIND_STOPPED:
3790 if (debug_infrun)
3791 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3792 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3793 handle_signal_stop (ecs);
3794 return;
3795
3796 case TARGET_WAITKIND_NO_HISTORY:
3797 if (debug_infrun)
3798 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
3799 /* Reverse execution: target ran out of history info. */
3800
3801 /* Pull the single step breakpoints out of the target. */
3802 if (singlestep_breakpoints_inserted_p)
3803 {
3804 if (!ptid_equal (ecs->ptid, inferior_ptid))
3805 context_switch (ecs->ptid);
3806 remove_single_step_breakpoints ();
3807 singlestep_breakpoints_inserted_p = 0;
3808 }
3809 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3810 print_no_history_reason ();
3811 stop_stepping (ecs);
3812 return;
3813 }
3814 }
3815
3816 /* Come here when the program has stopped with a signal. */
3817
3818 static void
3819 handle_signal_stop (struct execution_control_state *ecs)
3820 {
3821 struct frame_info *frame;
3822 struct gdbarch *gdbarch;
3823 int stopped_by_watchpoint;
3824 enum stop_kind stop_soon;
3825 int random_signal;
3826
3827 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
3828
3829 /* Do we need to clean up the state of a thread that has
3830 completed a displaced single-step? (Doing so usually affects
3831 the PC, so do it here, before we set stop_pc.) */
3832 displaced_step_fixup (ecs->ptid,
3833 ecs->event_thread->suspend.stop_signal);
3834
3835 /* If we either finished a single-step or hit a breakpoint, but
3836 the user wanted this thread to be stopped, pretend we got a
3837 SIG0 (generic unsignaled stop). */
3838 if (ecs->event_thread->stop_requested
3839 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3840 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3841
3842 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3843
3844 if (debug_infrun)
3845 {
3846 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3847 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3848 struct cleanup *old_chain = save_inferior_ptid ();
3849
3850 inferior_ptid = ecs->ptid;
3851
3852 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3853 paddress (gdbarch, stop_pc));
3854 if (target_stopped_by_watchpoint ())
3855 {
3856 CORE_ADDR addr;
3857
3858 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3859
3860 if (target_stopped_data_address (&current_target, &addr))
3861 fprintf_unfiltered (gdb_stdlog,
3862 "infrun: stopped data address = %s\n",
3863 paddress (gdbarch, addr));
3864 else
3865 fprintf_unfiltered (gdb_stdlog,
3866 "infrun: (no data address available)\n");
3867 }
3868
3869 do_cleanups (old_chain);
3870 }
3871
3872 /* This is originated from start_remote(), start_inferior() and
3873 shared libraries hook functions. */
3874 stop_soon = get_inferior_stop_soon (ecs->ptid);
3875 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3876 {
3877 if (!ptid_equal (ecs->ptid, inferior_ptid))
3878 context_switch (ecs->ptid);
3879 if (debug_infrun)
3880 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3881 stop_print_frame = 1;
3882 stop_stepping (ecs);
3883 return;
3884 }
3885
3886 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3887 && stop_after_trap)
3888 {
3889 if (!ptid_equal (ecs->ptid, inferior_ptid))
3890 context_switch (ecs->ptid);
3891 if (debug_infrun)
3892 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3893 stop_print_frame = 0;
3894 stop_stepping (ecs);
3895 return;
3896 }
3897
3898 /* This originates from attach_command(). We need to overwrite
3899 the stop_signal here, because some kernels don't ignore a
3900 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3901 See more comments in inferior.h. On the other hand, if we
3902 get a non-SIGSTOP, report it to the user - assume the backend
3903 will handle the SIGSTOP if it should show up later.
3904
3905 Also consider that the attach is complete when we see a
3906 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3907 target extended-remote report it instead of a SIGSTOP
3908 (e.g. gdbserver). We already rely on SIGTRAP being our
3909 signal, so this is no exception.
3910
3911 Also consider that the attach is complete when we see a
3912 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3913 the target to stop all threads of the inferior, in case the
3914 low level attach operation doesn't stop them implicitly. If
3915 they weren't stopped implicitly, then the stub will report a
3916 GDB_SIGNAL_0, meaning: stopped for no particular reason
3917 other than GDB's request. */
3918 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3919 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
3920 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3921 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
3922 {
3923 stop_print_frame = 1;
3924 stop_stepping (ecs);
3925 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3926 return;
3927 }
3928
3929 /* See if something interesting happened to the non-current thread. If
3930 so, then switch to that thread. */
3931 if (!ptid_equal (ecs->ptid, inferior_ptid))
3932 {
3933 if (debug_infrun)
3934 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3935
3936 context_switch (ecs->ptid);
3937
3938 if (deprecated_context_hook)
3939 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3940 }
3941
3942 /* At this point, get hold of the now-current thread's frame. */
3943 frame = get_current_frame ();
3944 gdbarch = get_frame_arch (frame);
3945
3946 /* Pull the single step breakpoints out of the target. */
3947 if (singlestep_breakpoints_inserted_p)
3948 {
3949 /* However, before doing so, if this single-step breakpoint was
3950 actually for another thread, set this thread up for moving
3951 past it. */
3952 if (!ptid_equal (ecs->ptid, singlestep_ptid)
3953 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3954 {
3955 struct regcache *regcache;
3956 struct address_space *aspace;
3957 CORE_ADDR pc;
3958
3959 regcache = get_thread_regcache (ecs->ptid);
3960 aspace = get_regcache_aspace (regcache);
3961 pc = regcache_read_pc (regcache);
3962 if (single_step_breakpoint_inserted_here_p (aspace, pc))
3963 {
3964 if (debug_infrun)
3965 {
3966 fprintf_unfiltered (gdb_stdlog,
3967 "infrun: [%s] hit step over single-step"
3968 " breakpoint of [%s]\n",
3969 target_pid_to_str (ecs->ptid),
3970 target_pid_to_str (singlestep_ptid));
3971 }
3972 ecs->hit_singlestep_breakpoint = 1;
3973 }
3974 }
3975
3976 remove_single_step_breakpoints ();
3977 singlestep_breakpoints_inserted_p = 0;
3978 }
3979
3980 if (ecs->stepped_after_stopped_by_watchpoint)
3981 stopped_by_watchpoint = 0;
3982 else
3983 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3984
3985 /* If necessary, step over this watchpoint. We'll be back to display
3986 it in a moment. */
3987 if (stopped_by_watchpoint
3988 && (target_have_steppable_watchpoint
3989 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3990 {
3991 /* At this point, we are stopped at an instruction which has
3992 attempted to write to a piece of memory under control of
3993 a watchpoint. The instruction hasn't actually executed
3994 yet. If we were to evaluate the watchpoint expression
3995 now, we would get the old value, and therefore no change
3996 would seem to have occurred.
3997
3998 In order to make watchpoints work `right', we really need
3999 to complete the memory write, and then evaluate the
4000 watchpoint expression. We do this by single-stepping the
4001 target.
4002
4003 It may not be necessary to disable the watchpoint to stop over
4004 it. For example, the PA can (with some kernel cooperation)
4005 single step over a watchpoint without disabling the watchpoint.
4006
4007 It is far more common to need to disable a watchpoint to step
4008 the inferior over it. If we have non-steppable watchpoints,
4009 we must disable the current watchpoint; it's simplest to
4010 disable all watchpoints and breakpoints. */
4011 int hw_step = 1;
4012
4013 if (!target_have_steppable_watchpoint)
4014 {
4015 remove_breakpoints ();
4016 /* See comment in resume why we need to stop bypassing signals
4017 while breakpoints have been removed. */
4018 target_pass_signals (0, NULL);
4019 }
4020 /* Single step */
4021 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4022 target_resume (ecs->ptid, hw_step, GDB_SIGNAL_0);
4023 waiton_ptid = ecs->ptid;
4024 if (target_have_steppable_watchpoint)
4025 infwait_state = infwait_step_watch_state;
4026 else
4027 infwait_state = infwait_nonstep_watch_state;
4028 prepare_to_wait (ecs);
4029 return;
4030 }
4031
4032 ecs->event_thread->stepping_over_breakpoint = 0;
4033 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4034 ecs->event_thread->control.stop_step = 0;
4035 stop_print_frame = 1;
4036 stopped_by_random_signal = 0;
4037
4038 /* Hide inlined functions starting here, unless we just performed stepi or
4039 nexti. After stepi and nexti, always show the innermost frame (not any
4040 inline function call sites). */
4041 if (ecs->event_thread->control.step_range_end != 1)
4042 {
4043 struct address_space *aspace =
4044 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4045
4046 /* skip_inline_frames is expensive, so we avoid it if we can
4047 determine that the address is one where functions cannot have
4048 been inlined. This improves performance with inferiors that
4049 load a lot of shared libraries, because the solib event
4050 breakpoint is defined as the address of a function (i.e. not
4051 inline). Note that we have to check the previous PC as well
4052 as the current one to catch cases when we have just
4053 single-stepped off a breakpoint prior to reinstating it.
4054 Note that we're assuming that the code we single-step to is
4055 not inline, but that's not definitive: there's nothing
4056 preventing the event breakpoint function from containing
4057 inlined code, and the single-step ending up there. If the
4058 user had set a breakpoint on that inlined code, the missing
4059 skip_inline_frames call would break things. Fortunately
4060 that's an extremely unlikely scenario. */
4061 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4062 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4063 && ecs->event_thread->control.trap_expected
4064 && pc_at_non_inline_function (aspace,
4065 ecs->event_thread->prev_pc,
4066 &ecs->ws)))
4067 {
4068 skip_inline_frames (ecs->ptid);
4069
4070 /* Re-fetch current thread's frame in case that invalidated
4071 the frame cache. */
4072 frame = get_current_frame ();
4073 gdbarch = get_frame_arch (frame);
4074 }
4075 }
4076
4077 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4078 && ecs->event_thread->control.trap_expected
4079 && gdbarch_single_step_through_delay_p (gdbarch)
4080 && currently_stepping (ecs->event_thread))
4081 {
4082 /* We're trying to step off a breakpoint. Turns out that we're
4083 also on an instruction that needs to be stepped multiple
4084 times before it's been fully executing. E.g., architectures
4085 with a delay slot. It needs to be stepped twice, once for
4086 the instruction and once for the delay slot. */
4087 int step_through_delay
4088 = gdbarch_single_step_through_delay (gdbarch, frame);
4089
4090 if (debug_infrun && step_through_delay)
4091 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4092 if (ecs->event_thread->control.step_range_end == 0
4093 && step_through_delay)
4094 {
4095 /* The user issued a continue when stopped at a breakpoint.
4096 Set up for another trap and get out of here. */
4097 ecs->event_thread->stepping_over_breakpoint = 1;
4098 keep_going (ecs);
4099 return;
4100 }
4101 else if (step_through_delay)
4102 {
4103 /* The user issued a step when stopped at a breakpoint.
4104 Maybe we should stop, maybe we should not - the delay
4105 slot *might* correspond to a line of source. In any
4106 case, don't decide that here, just set
4107 ecs->stepping_over_breakpoint, making sure we
4108 single-step again before breakpoints are re-inserted. */
4109 ecs->event_thread->stepping_over_breakpoint = 1;
4110 }
4111 }
4112
4113 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4114 handles this event. */
4115 ecs->event_thread->control.stop_bpstat
4116 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4117 stop_pc, ecs->ptid, &ecs->ws);
4118
4119 /* Following in case break condition called a
4120 function. */
4121 stop_print_frame = 1;
4122
4123 /* This is where we handle "moribund" watchpoints. Unlike
4124 software breakpoints traps, hardware watchpoint traps are
4125 always distinguishable from random traps. If no high-level
4126 watchpoint is associated with the reported stop data address
4127 anymore, then the bpstat does not explain the signal ---
4128 simply make sure to ignore it if `stopped_by_watchpoint' is
4129 set. */
4130
4131 if (debug_infrun
4132 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4133 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4134 GDB_SIGNAL_TRAP)
4135 && stopped_by_watchpoint)
4136 fprintf_unfiltered (gdb_stdlog,
4137 "infrun: no user watchpoint explains "
4138 "watchpoint SIGTRAP, ignoring\n");
4139
4140 /* NOTE: cagney/2003-03-29: These checks for a random signal
4141 at one stage in the past included checks for an inferior
4142 function call's call dummy's return breakpoint. The original
4143 comment, that went with the test, read:
4144
4145 ``End of a stack dummy. Some systems (e.g. Sony news) give
4146 another signal besides SIGTRAP, so check here as well as
4147 above.''
4148
4149 If someone ever tries to get call dummys on a
4150 non-executable stack to work (where the target would stop
4151 with something like a SIGSEGV), then those tests might need
4152 to be re-instated. Given, however, that the tests were only
4153 enabled when momentary breakpoints were not being used, I
4154 suspect that it won't be the case.
4155
4156 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4157 be necessary for call dummies on a non-executable stack on
4158 SPARC. */
4159
4160 /* See if the breakpoints module can explain the signal. */
4161 random_signal
4162 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4163 ecs->event_thread->suspend.stop_signal);
4164
4165 /* If not, perhaps stepping/nexting can. */
4166 if (random_signal)
4167 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4168 && currently_stepping (ecs->event_thread));
4169
4170 /* Perhaps the thread hit a single-step breakpoint of _another_
4171 thread. Single-step breakpoints are transparent to the
4172 breakpoints module. */
4173 if (random_signal)
4174 random_signal = !ecs->hit_singlestep_breakpoint;
4175
4176 /* No? Perhaps we got a moribund watchpoint. */
4177 if (random_signal)
4178 random_signal = !stopped_by_watchpoint;
4179
4180 /* For the program's own signals, act according to
4181 the signal handling tables. */
4182
4183 if (random_signal)
4184 {
4185 /* Signal not for debugging purposes. */
4186 int printed = 0;
4187 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4188 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4189
4190 if (debug_infrun)
4191 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4192 gdb_signal_to_symbol_string (stop_signal));
4193
4194 stopped_by_random_signal = 1;
4195
4196 if (signal_print[ecs->event_thread->suspend.stop_signal])
4197 {
4198 printed = 1;
4199 target_terminal_ours_for_output ();
4200 print_signal_received_reason
4201 (ecs->event_thread->suspend.stop_signal);
4202 }
4203 /* Always stop on signals if we're either just gaining control
4204 of the program, or the user explicitly requested this thread
4205 to remain stopped. */
4206 if (stop_soon != NO_STOP_QUIETLY
4207 || ecs->event_thread->stop_requested
4208 || (!inf->detaching
4209 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4210 {
4211 stop_stepping (ecs);
4212 return;
4213 }
4214 /* If not going to stop, give terminal back
4215 if we took it away. */
4216 else if (printed)
4217 target_terminal_inferior ();
4218
4219 /* Clear the signal if it should not be passed. */
4220 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4221 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4222
4223 if (ecs->event_thread->prev_pc == stop_pc
4224 && ecs->event_thread->control.trap_expected
4225 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4226 {
4227 /* We were just starting a new sequence, attempting to
4228 single-step off of a breakpoint and expecting a SIGTRAP.
4229 Instead this signal arrives. This signal will take us out
4230 of the stepping range so GDB needs to remember to, when
4231 the signal handler returns, resume stepping off that
4232 breakpoint. */
4233 /* To simplify things, "continue" is forced to use the same
4234 code paths as single-step - set a breakpoint at the
4235 signal return address and then, once hit, step off that
4236 breakpoint. */
4237 if (debug_infrun)
4238 fprintf_unfiltered (gdb_stdlog,
4239 "infrun: signal arrived while stepping over "
4240 "breakpoint\n");
4241
4242 insert_hp_step_resume_breakpoint_at_frame (frame);
4243 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4244 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4245 ecs->event_thread->control.trap_expected = 0;
4246
4247 /* If we were nexting/stepping some other thread, switch to
4248 it, so that we don't continue it, losing control. */
4249 if (!switch_back_to_stepped_thread (ecs))
4250 keep_going (ecs);
4251 return;
4252 }
4253
4254 if (ecs->event_thread->control.step_range_end != 0
4255 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4256 && pc_in_thread_step_range (stop_pc, ecs->event_thread)
4257 && frame_id_eq (get_stack_frame_id (frame),
4258 ecs->event_thread->control.step_stack_frame_id)
4259 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4260 {
4261 /* The inferior is about to take a signal that will take it
4262 out of the single step range. Set a breakpoint at the
4263 current PC (which is presumably where the signal handler
4264 will eventually return) and then allow the inferior to
4265 run free.
4266
4267 Note that this is only needed for a signal delivered
4268 while in the single-step range. Nested signals aren't a
4269 problem as they eventually all return. */
4270 if (debug_infrun)
4271 fprintf_unfiltered (gdb_stdlog,
4272 "infrun: signal may take us out of "
4273 "single-step range\n");
4274
4275 insert_hp_step_resume_breakpoint_at_frame (frame);
4276 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4277 ecs->event_thread->control.trap_expected = 0;
4278 keep_going (ecs);
4279 return;
4280 }
4281
4282 /* Note: step_resume_breakpoint may be non-NULL. This occures
4283 when either there's a nested signal, or when there's a
4284 pending signal enabled just as the signal handler returns
4285 (leaving the inferior at the step-resume-breakpoint without
4286 actually executing it). Either way continue until the
4287 breakpoint is really hit. */
4288
4289 if (!switch_back_to_stepped_thread (ecs))
4290 {
4291 if (debug_infrun)
4292 fprintf_unfiltered (gdb_stdlog,
4293 "infrun: random signal, keep going\n");
4294
4295 keep_going (ecs);
4296 }
4297 return;
4298 }
4299
4300 process_event_stop_test (ecs);
4301 }
4302
4303 /* Come here when we've got some debug event / signal we can explain
4304 (IOW, not a random signal), and test whether it should cause a
4305 stop, or whether we should resume the inferior (transparently).
4306 E.g., could be a breakpoint whose condition evaluates false; we
4307 could be still stepping within the line; etc. */
4308
4309 static void
4310 process_event_stop_test (struct execution_control_state *ecs)
4311 {
4312 struct symtab_and_line stop_pc_sal;
4313 struct frame_info *frame;
4314 struct gdbarch *gdbarch;
4315 CORE_ADDR jmp_buf_pc;
4316 struct bpstat_what what;
4317
4318 /* Handle cases caused by hitting a breakpoint. */
4319
4320 frame = get_current_frame ();
4321 gdbarch = get_frame_arch (frame);
4322
4323 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4324
4325 if (what.call_dummy)
4326 {
4327 stop_stack_dummy = what.call_dummy;
4328 }
4329
4330 /* If we hit an internal event that triggers symbol changes, the
4331 current frame will be invalidated within bpstat_what (e.g., if we
4332 hit an internal solib event). Re-fetch it. */
4333 frame = get_current_frame ();
4334 gdbarch = get_frame_arch (frame);
4335
4336 switch (what.main_action)
4337 {
4338 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4339 /* If we hit the breakpoint at longjmp while stepping, we
4340 install a momentary breakpoint at the target of the
4341 jmp_buf. */
4342
4343 if (debug_infrun)
4344 fprintf_unfiltered (gdb_stdlog,
4345 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4346
4347 ecs->event_thread->stepping_over_breakpoint = 1;
4348
4349 if (what.is_longjmp)
4350 {
4351 struct value *arg_value;
4352
4353 /* If we set the longjmp breakpoint via a SystemTap probe,
4354 then use it to extract the arguments. The destination PC
4355 is the third argument to the probe. */
4356 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4357 if (arg_value)
4358 jmp_buf_pc = value_as_address (arg_value);
4359 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4360 || !gdbarch_get_longjmp_target (gdbarch,
4361 frame, &jmp_buf_pc))
4362 {
4363 if (debug_infrun)
4364 fprintf_unfiltered (gdb_stdlog,
4365 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4366 "(!gdbarch_get_longjmp_target)\n");
4367 keep_going (ecs);
4368 return;
4369 }
4370
4371 /* Insert a breakpoint at resume address. */
4372 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4373 }
4374 else
4375 check_exception_resume (ecs, frame);
4376 keep_going (ecs);
4377 return;
4378
4379 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4380 {
4381 struct frame_info *init_frame;
4382
4383 /* There are several cases to consider.
4384
4385 1. The initiating frame no longer exists. In this case we
4386 must stop, because the exception or longjmp has gone too
4387 far.
4388
4389 2. The initiating frame exists, and is the same as the
4390 current frame. We stop, because the exception or longjmp
4391 has been caught.
4392
4393 3. The initiating frame exists and is different from the
4394 current frame. This means the exception or longjmp has
4395 been caught beneath the initiating frame, so keep going.
4396
4397 4. longjmp breakpoint has been placed just to protect
4398 against stale dummy frames and user is not interested in
4399 stopping around longjmps. */
4400
4401 if (debug_infrun)
4402 fprintf_unfiltered (gdb_stdlog,
4403 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4404
4405 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4406 != NULL);
4407 delete_exception_resume_breakpoint (ecs->event_thread);
4408
4409 if (what.is_longjmp)
4410 {
4411 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread->num);
4412
4413 if (!frame_id_p (ecs->event_thread->initiating_frame))
4414 {
4415 /* Case 4. */
4416 keep_going (ecs);
4417 return;
4418 }
4419 }
4420
4421 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4422
4423 if (init_frame)
4424 {
4425 struct frame_id current_id
4426 = get_frame_id (get_current_frame ());
4427 if (frame_id_eq (current_id,
4428 ecs->event_thread->initiating_frame))
4429 {
4430 /* Case 2. Fall through. */
4431 }
4432 else
4433 {
4434 /* Case 3. */
4435 keep_going (ecs);
4436 return;
4437 }
4438 }
4439
4440 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4441 exists. */
4442 delete_step_resume_breakpoint (ecs->event_thread);
4443
4444 ecs->event_thread->control.stop_step = 1;
4445 print_end_stepping_range_reason ();
4446 stop_stepping (ecs);
4447 }
4448 return;
4449
4450 case BPSTAT_WHAT_SINGLE:
4451 if (debug_infrun)
4452 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4453 ecs->event_thread->stepping_over_breakpoint = 1;
4454 /* Still need to check other stuff, at least the case where we
4455 are stepping and step out of the right range. */
4456 break;
4457
4458 case BPSTAT_WHAT_STEP_RESUME:
4459 if (debug_infrun)
4460 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4461
4462 delete_step_resume_breakpoint (ecs->event_thread);
4463 if (ecs->event_thread->control.proceed_to_finish
4464 && execution_direction == EXEC_REVERSE)
4465 {
4466 struct thread_info *tp = ecs->event_thread;
4467
4468 /* We are finishing a function in reverse, and just hit the
4469 step-resume breakpoint at the start address of the
4470 function, and we're almost there -- just need to back up
4471 by one more single-step, which should take us back to the
4472 function call. */
4473 tp->control.step_range_start = tp->control.step_range_end = 1;
4474 keep_going (ecs);
4475 return;
4476 }
4477 fill_in_stop_func (gdbarch, ecs);
4478 if (stop_pc == ecs->stop_func_start
4479 && execution_direction == EXEC_REVERSE)
4480 {
4481 /* We are stepping over a function call in reverse, and just
4482 hit the step-resume breakpoint at the start address of
4483 the function. Go back to single-stepping, which should
4484 take us back to the function call. */
4485 ecs->event_thread->stepping_over_breakpoint = 1;
4486 keep_going (ecs);
4487 return;
4488 }
4489 break;
4490
4491 case BPSTAT_WHAT_STOP_NOISY:
4492 if (debug_infrun)
4493 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4494 stop_print_frame = 1;
4495
4496 /* Assume the thread stopped for a breapoint. We'll still check
4497 whether a/the breakpoint is there when the thread is next
4498 resumed. */
4499 ecs->event_thread->stepping_over_breakpoint = 1;
4500
4501 stop_stepping (ecs);
4502 return;
4503
4504 case BPSTAT_WHAT_STOP_SILENT:
4505 if (debug_infrun)
4506 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4507 stop_print_frame = 0;
4508
4509 /* Assume the thread stopped for a breapoint. We'll still check
4510 whether a/the breakpoint is there when the thread is next
4511 resumed. */
4512 ecs->event_thread->stepping_over_breakpoint = 1;
4513 stop_stepping (ecs);
4514 return;
4515
4516 case BPSTAT_WHAT_HP_STEP_RESUME:
4517 if (debug_infrun)
4518 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4519
4520 delete_step_resume_breakpoint (ecs->event_thread);
4521 if (ecs->event_thread->step_after_step_resume_breakpoint)
4522 {
4523 /* Back when the step-resume breakpoint was inserted, we
4524 were trying to single-step off a breakpoint. Go back to
4525 doing that. */
4526 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4527 ecs->event_thread->stepping_over_breakpoint = 1;
4528 keep_going (ecs);
4529 return;
4530 }
4531 break;
4532
4533 case BPSTAT_WHAT_KEEP_CHECKING:
4534 break;
4535 }
4536
4537 /* We come here if we hit a breakpoint but should not stop for it.
4538 Possibly we also were stepping and should stop for that. So fall
4539 through and test for stepping. But, if not stepping, do not
4540 stop. */
4541
4542 /* In all-stop mode, if we're currently stepping but have stopped in
4543 some other thread, we need to switch back to the stepped thread. */
4544 if (switch_back_to_stepped_thread (ecs))
4545 return;
4546
4547 if (ecs->event_thread->control.step_resume_breakpoint)
4548 {
4549 if (debug_infrun)
4550 fprintf_unfiltered (gdb_stdlog,
4551 "infrun: step-resume breakpoint is inserted\n");
4552
4553 /* Having a step-resume breakpoint overrides anything
4554 else having to do with stepping commands until
4555 that breakpoint is reached. */
4556 keep_going (ecs);
4557 return;
4558 }
4559
4560 if (ecs->event_thread->control.step_range_end == 0)
4561 {
4562 if (debug_infrun)
4563 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4564 /* Likewise if we aren't even stepping. */
4565 keep_going (ecs);
4566 return;
4567 }
4568
4569 /* Re-fetch current thread's frame in case the code above caused
4570 the frame cache to be re-initialized, making our FRAME variable
4571 a dangling pointer. */
4572 frame = get_current_frame ();
4573 gdbarch = get_frame_arch (frame);
4574 fill_in_stop_func (gdbarch, ecs);
4575
4576 /* If stepping through a line, keep going if still within it.
4577
4578 Note that step_range_end is the address of the first instruction
4579 beyond the step range, and NOT the address of the last instruction
4580 within it!
4581
4582 Note also that during reverse execution, we may be stepping
4583 through a function epilogue and therefore must detect when
4584 the current-frame changes in the middle of a line. */
4585
4586 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4587 && (execution_direction != EXEC_REVERSE
4588 || frame_id_eq (get_frame_id (frame),
4589 ecs->event_thread->control.step_frame_id)))
4590 {
4591 if (debug_infrun)
4592 fprintf_unfiltered
4593 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4594 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4595 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4596
4597 /* Tentatively re-enable range stepping; `resume' disables it if
4598 necessary (e.g., if we're stepping over a breakpoint or we
4599 have software watchpoints). */
4600 ecs->event_thread->control.may_range_step = 1;
4601
4602 /* When stepping backward, stop at beginning of line range
4603 (unless it's the function entry point, in which case
4604 keep going back to the call point). */
4605 if (stop_pc == ecs->event_thread->control.step_range_start
4606 && stop_pc != ecs->stop_func_start
4607 && execution_direction == EXEC_REVERSE)
4608 {
4609 ecs->event_thread->control.stop_step = 1;
4610 print_end_stepping_range_reason ();
4611 stop_stepping (ecs);
4612 }
4613 else
4614 keep_going (ecs);
4615
4616 return;
4617 }
4618
4619 /* We stepped out of the stepping range. */
4620
4621 /* If we are stepping at the source level and entered the runtime
4622 loader dynamic symbol resolution code...
4623
4624 EXEC_FORWARD: we keep on single stepping until we exit the run
4625 time loader code and reach the callee's address.
4626
4627 EXEC_REVERSE: we've already executed the callee (backward), and
4628 the runtime loader code is handled just like any other
4629 undebuggable function call. Now we need only keep stepping
4630 backward through the trampoline code, and that's handled further
4631 down, so there is nothing for us to do here. */
4632
4633 if (execution_direction != EXEC_REVERSE
4634 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4635 && in_solib_dynsym_resolve_code (stop_pc))
4636 {
4637 CORE_ADDR pc_after_resolver =
4638 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4639
4640 if (debug_infrun)
4641 fprintf_unfiltered (gdb_stdlog,
4642 "infrun: stepped into dynsym resolve code\n");
4643
4644 if (pc_after_resolver)
4645 {
4646 /* Set up a step-resume breakpoint at the address
4647 indicated by SKIP_SOLIB_RESOLVER. */
4648 struct symtab_and_line sr_sal;
4649
4650 init_sal (&sr_sal);
4651 sr_sal.pc = pc_after_resolver;
4652 sr_sal.pspace = get_frame_program_space (frame);
4653
4654 insert_step_resume_breakpoint_at_sal (gdbarch,
4655 sr_sal, null_frame_id);
4656 }
4657
4658 keep_going (ecs);
4659 return;
4660 }
4661
4662 if (ecs->event_thread->control.step_range_end != 1
4663 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4664 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4665 && get_frame_type (frame) == SIGTRAMP_FRAME)
4666 {
4667 if (debug_infrun)
4668 fprintf_unfiltered (gdb_stdlog,
4669 "infrun: stepped into signal trampoline\n");
4670 /* The inferior, while doing a "step" or "next", has ended up in
4671 a signal trampoline (either by a signal being delivered or by
4672 the signal handler returning). Just single-step until the
4673 inferior leaves the trampoline (either by calling the handler
4674 or returning). */
4675 keep_going (ecs);
4676 return;
4677 }
4678
4679 /* If we're in the return path from a shared library trampoline,
4680 we want to proceed through the trampoline when stepping. */
4681 /* macro/2012-04-25: This needs to come before the subroutine
4682 call check below as on some targets return trampolines look
4683 like subroutine calls (MIPS16 return thunks). */
4684 if (gdbarch_in_solib_return_trampoline (gdbarch,
4685 stop_pc, ecs->stop_func_name)
4686 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4687 {
4688 /* Determine where this trampoline returns. */
4689 CORE_ADDR real_stop_pc;
4690
4691 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4692
4693 if (debug_infrun)
4694 fprintf_unfiltered (gdb_stdlog,
4695 "infrun: stepped into solib return tramp\n");
4696
4697 /* Only proceed through if we know where it's going. */
4698 if (real_stop_pc)
4699 {
4700 /* And put the step-breakpoint there and go until there. */
4701 struct symtab_and_line sr_sal;
4702
4703 init_sal (&sr_sal); /* initialize to zeroes */
4704 sr_sal.pc = real_stop_pc;
4705 sr_sal.section = find_pc_overlay (sr_sal.pc);
4706 sr_sal.pspace = get_frame_program_space (frame);
4707
4708 /* Do not specify what the fp should be when we stop since
4709 on some machines the prologue is where the new fp value
4710 is established. */
4711 insert_step_resume_breakpoint_at_sal (gdbarch,
4712 sr_sal, null_frame_id);
4713
4714 /* Restart without fiddling with the step ranges or
4715 other state. */
4716 keep_going (ecs);
4717 return;
4718 }
4719 }
4720
4721 /* Check for subroutine calls. The check for the current frame
4722 equalling the step ID is not necessary - the check of the
4723 previous frame's ID is sufficient - but it is a common case and
4724 cheaper than checking the previous frame's ID.
4725
4726 NOTE: frame_id_eq will never report two invalid frame IDs as
4727 being equal, so to get into this block, both the current and
4728 previous frame must have valid frame IDs. */
4729 /* The outer_frame_id check is a heuristic to detect stepping
4730 through startup code. If we step over an instruction which
4731 sets the stack pointer from an invalid value to a valid value,
4732 we may detect that as a subroutine call from the mythical
4733 "outermost" function. This could be fixed by marking
4734 outermost frames as !stack_p,code_p,special_p. Then the
4735 initial outermost frame, before sp was valid, would
4736 have code_addr == &_start. See the comment in frame_id_eq
4737 for more. */
4738 if (!frame_id_eq (get_stack_frame_id (frame),
4739 ecs->event_thread->control.step_stack_frame_id)
4740 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4741 ecs->event_thread->control.step_stack_frame_id)
4742 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4743 outer_frame_id)
4744 || step_start_function != find_pc_function (stop_pc))))
4745 {
4746 CORE_ADDR real_stop_pc;
4747
4748 if (debug_infrun)
4749 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4750
4751 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4752 || ((ecs->event_thread->control.step_range_end == 1)
4753 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4754 ecs->stop_func_start)))
4755 {
4756 /* I presume that step_over_calls is only 0 when we're
4757 supposed to be stepping at the assembly language level
4758 ("stepi"). Just stop. */
4759 /* Also, maybe we just did a "nexti" inside a prolog, so we
4760 thought it was a subroutine call but it was not. Stop as
4761 well. FENN */
4762 /* And this works the same backward as frontward. MVS */
4763 ecs->event_thread->control.stop_step = 1;
4764 print_end_stepping_range_reason ();
4765 stop_stepping (ecs);
4766 return;
4767 }
4768
4769 /* Reverse stepping through solib trampolines. */
4770
4771 if (execution_direction == EXEC_REVERSE
4772 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4773 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4774 || (ecs->stop_func_start == 0
4775 && in_solib_dynsym_resolve_code (stop_pc))))
4776 {
4777 /* Any solib trampoline code can be handled in reverse
4778 by simply continuing to single-step. We have already
4779 executed the solib function (backwards), and a few
4780 steps will take us back through the trampoline to the
4781 caller. */
4782 keep_going (ecs);
4783 return;
4784 }
4785
4786 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4787 {
4788 /* We're doing a "next".
4789
4790 Normal (forward) execution: set a breakpoint at the
4791 callee's return address (the address at which the caller
4792 will resume).
4793
4794 Reverse (backward) execution. set the step-resume
4795 breakpoint at the start of the function that we just
4796 stepped into (backwards), and continue to there. When we
4797 get there, we'll need to single-step back to the caller. */
4798
4799 if (execution_direction == EXEC_REVERSE)
4800 {
4801 /* If we're already at the start of the function, we've either
4802 just stepped backward into a single instruction function,
4803 or stepped back out of a signal handler to the first instruction
4804 of the function. Just keep going, which will single-step back
4805 to the caller. */
4806 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
4807 {
4808 struct symtab_and_line sr_sal;
4809
4810 /* Normal function call return (static or dynamic). */
4811 init_sal (&sr_sal);
4812 sr_sal.pc = ecs->stop_func_start;
4813 sr_sal.pspace = get_frame_program_space (frame);
4814 insert_step_resume_breakpoint_at_sal (gdbarch,
4815 sr_sal, null_frame_id);
4816 }
4817 }
4818 else
4819 insert_step_resume_breakpoint_at_caller (frame);
4820
4821 keep_going (ecs);
4822 return;
4823 }
4824
4825 /* If we are in a function call trampoline (a stub between the
4826 calling routine and the real function), locate the real
4827 function. That's what tells us (a) whether we want to step
4828 into it at all, and (b) what prologue we want to run to the
4829 end of, if we do step into it. */
4830 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4831 if (real_stop_pc == 0)
4832 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4833 if (real_stop_pc != 0)
4834 ecs->stop_func_start = real_stop_pc;
4835
4836 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4837 {
4838 struct symtab_and_line sr_sal;
4839
4840 init_sal (&sr_sal);
4841 sr_sal.pc = ecs->stop_func_start;
4842 sr_sal.pspace = get_frame_program_space (frame);
4843
4844 insert_step_resume_breakpoint_at_sal (gdbarch,
4845 sr_sal, null_frame_id);
4846 keep_going (ecs);
4847 return;
4848 }
4849
4850 /* If we have line number information for the function we are
4851 thinking of stepping into and the function isn't on the skip
4852 list, step into it.
4853
4854 If there are several symtabs at that PC (e.g. with include
4855 files), just want to know whether *any* of them have line
4856 numbers. find_pc_line handles this. */
4857 {
4858 struct symtab_and_line tmp_sal;
4859
4860 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4861 if (tmp_sal.line != 0
4862 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4863 &tmp_sal))
4864 {
4865 if (execution_direction == EXEC_REVERSE)
4866 handle_step_into_function_backward (gdbarch, ecs);
4867 else
4868 handle_step_into_function (gdbarch, ecs);
4869 return;
4870 }
4871 }
4872
4873 /* If we have no line number and the step-stop-if-no-debug is
4874 set, we stop the step so that the user has a chance to switch
4875 in assembly mode. */
4876 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4877 && step_stop_if_no_debug)
4878 {
4879 ecs->event_thread->control.stop_step = 1;
4880 print_end_stepping_range_reason ();
4881 stop_stepping (ecs);
4882 return;
4883 }
4884
4885 if (execution_direction == EXEC_REVERSE)
4886 {
4887 /* If we're already at the start of the function, we've either just
4888 stepped backward into a single instruction function without line
4889 number info, or stepped back out of a signal handler to the first
4890 instruction of the function without line number info. Just keep
4891 going, which will single-step back to the caller. */
4892 if (ecs->stop_func_start != stop_pc)
4893 {
4894 /* Set a breakpoint at callee's start address.
4895 From there we can step once and be back in the caller. */
4896 struct symtab_and_line sr_sal;
4897
4898 init_sal (&sr_sal);
4899 sr_sal.pc = ecs->stop_func_start;
4900 sr_sal.pspace = get_frame_program_space (frame);
4901 insert_step_resume_breakpoint_at_sal (gdbarch,
4902 sr_sal, null_frame_id);
4903 }
4904 }
4905 else
4906 /* Set a breakpoint at callee's return address (the address
4907 at which the caller will resume). */
4908 insert_step_resume_breakpoint_at_caller (frame);
4909
4910 keep_going (ecs);
4911 return;
4912 }
4913
4914 /* Reverse stepping through solib trampolines. */
4915
4916 if (execution_direction == EXEC_REVERSE
4917 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4918 {
4919 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4920 || (ecs->stop_func_start == 0
4921 && in_solib_dynsym_resolve_code (stop_pc)))
4922 {
4923 /* Any solib trampoline code can be handled in reverse
4924 by simply continuing to single-step. We have already
4925 executed the solib function (backwards), and a few
4926 steps will take us back through the trampoline to the
4927 caller. */
4928 keep_going (ecs);
4929 return;
4930 }
4931 else if (in_solib_dynsym_resolve_code (stop_pc))
4932 {
4933 /* Stepped backward into the solib dynsym resolver.
4934 Set a breakpoint at its start and continue, then
4935 one more step will take us out. */
4936 struct symtab_and_line sr_sal;
4937
4938 init_sal (&sr_sal);
4939 sr_sal.pc = ecs->stop_func_start;
4940 sr_sal.pspace = get_frame_program_space (frame);
4941 insert_step_resume_breakpoint_at_sal (gdbarch,
4942 sr_sal, null_frame_id);
4943 keep_going (ecs);
4944 return;
4945 }
4946 }
4947
4948 stop_pc_sal = find_pc_line (stop_pc, 0);
4949
4950 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4951 the trampoline processing logic, however, there are some trampolines
4952 that have no names, so we should do trampoline handling first. */
4953 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4954 && ecs->stop_func_name == NULL
4955 && stop_pc_sal.line == 0)
4956 {
4957 if (debug_infrun)
4958 fprintf_unfiltered (gdb_stdlog,
4959 "infrun: stepped into undebuggable function\n");
4960
4961 /* The inferior just stepped into, or returned to, an
4962 undebuggable function (where there is no debugging information
4963 and no line number corresponding to the address where the
4964 inferior stopped). Since we want to skip this kind of code,
4965 we keep going until the inferior returns from this
4966 function - unless the user has asked us not to (via
4967 set step-mode) or we no longer know how to get back
4968 to the call site. */
4969 if (step_stop_if_no_debug
4970 || !frame_id_p (frame_unwind_caller_id (frame)))
4971 {
4972 /* If we have no line number and the step-stop-if-no-debug
4973 is set, we stop the step so that the user has a chance to
4974 switch in assembly mode. */
4975 ecs->event_thread->control.stop_step = 1;
4976 print_end_stepping_range_reason ();
4977 stop_stepping (ecs);
4978 return;
4979 }
4980 else
4981 {
4982 /* Set a breakpoint at callee's return address (the address
4983 at which the caller will resume). */
4984 insert_step_resume_breakpoint_at_caller (frame);
4985 keep_going (ecs);
4986 return;
4987 }
4988 }
4989
4990 if (ecs->event_thread->control.step_range_end == 1)
4991 {
4992 /* It is stepi or nexti. We always want to stop stepping after
4993 one instruction. */
4994 if (debug_infrun)
4995 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4996 ecs->event_thread->control.stop_step = 1;
4997 print_end_stepping_range_reason ();
4998 stop_stepping (ecs);
4999 return;
5000 }
5001
5002 if (stop_pc_sal.line == 0)
5003 {
5004 /* We have no line number information. That means to stop
5005 stepping (does this always happen right after one instruction,
5006 when we do "s" in a function with no line numbers,
5007 or can this happen as a result of a return or longjmp?). */
5008 if (debug_infrun)
5009 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5010 ecs->event_thread->control.stop_step = 1;
5011 print_end_stepping_range_reason ();
5012 stop_stepping (ecs);
5013 return;
5014 }
5015
5016 /* Look for "calls" to inlined functions, part one. If the inline
5017 frame machinery detected some skipped call sites, we have entered
5018 a new inline function. */
5019
5020 if (frame_id_eq (get_frame_id (get_current_frame ()),
5021 ecs->event_thread->control.step_frame_id)
5022 && inline_skipped_frames (ecs->ptid))
5023 {
5024 struct symtab_and_line call_sal;
5025
5026 if (debug_infrun)
5027 fprintf_unfiltered (gdb_stdlog,
5028 "infrun: stepped into inlined function\n");
5029
5030 find_frame_sal (get_current_frame (), &call_sal);
5031
5032 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5033 {
5034 /* For "step", we're going to stop. But if the call site
5035 for this inlined function is on the same source line as
5036 we were previously stepping, go down into the function
5037 first. Otherwise stop at the call site. */
5038
5039 if (call_sal.line == ecs->event_thread->current_line
5040 && call_sal.symtab == ecs->event_thread->current_symtab)
5041 step_into_inline_frame (ecs->ptid);
5042
5043 ecs->event_thread->control.stop_step = 1;
5044 print_end_stepping_range_reason ();
5045 stop_stepping (ecs);
5046 return;
5047 }
5048 else
5049 {
5050 /* For "next", we should stop at the call site if it is on a
5051 different source line. Otherwise continue through the
5052 inlined function. */
5053 if (call_sal.line == ecs->event_thread->current_line
5054 && call_sal.symtab == ecs->event_thread->current_symtab)
5055 keep_going (ecs);
5056 else
5057 {
5058 ecs->event_thread->control.stop_step = 1;
5059 print_end_stepping_range_reason ();
5060 stop_stepping (ecs);
5061 }
5062 return;
5063 }
5064 }
5065
5066 /* Look for "calls" to inlined functions, part two. If we are still
5067 in the same real function we were stepping through, but we have
5068 to go further up to find the exact frame ID, we are stepping
5069 through a more inlined call beyond its call site. */
5070
5071 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5072 && !frame_id_eq (get_frame_id (get_current_frame ()),
5073 ecs->event_thread->control.step_frame_id)
5074 && stepped_in_from (get_current_frame (),
5075 ecs->event_thread->control.step_frame_id))
5076 {
5077 if (debug_infrun)
5078 fprintf_unfiltered (gdb_stdlog,
5079 "infrun: stepping through inlined function\n");
5080
5081 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5082 keep_going (ecs);
5083 else
5084 {
5085 ecs->event_thread->control.stop_step = 1;
5086 print_end_stepping_range_reason ();
5087 stop_stepping (ecs);
5088 }
5089 return;
5090 }
5091
5092 if ((stop_pc == stop_pc_sal.pc)
5093 && (ecs->event_thread->current_line != stop_pc_sal.line
5094 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5095 {
5096 /* We are at the start of a different line. So stop. Note that
5097 we don't stop if we step into the middle of a different line.
5098 That is said to make things like for (;;) statements work
5099 better. */
5100 if (debug_infrun)
5101 fprintf_unfiltered (gdb_stdlog,
5102 "infrun: stepped to a different line\n");
5103 ecs->event_thread->control.stop_step = 1;
5104 print_end_stepping_range_reason ();
5105 stop_stepping (ecs);
5106 return;
5107 }
5108
5109 /* We aren't done stepping.
5110
5111 Optimize by setting the stepping range to the line.
5112 (We might not be in the original line, but if we entered a
5113 new line in mid-statement, we continue stepping. This makes
5114 things like for(;;) statements work better.) */
5115
5116 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5117 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5118 ecs->event_thread->control.may_range_step = 1;
5119 set_step_info (frame, stop_pc_sal);
5120
5121 if (debug_infrun)
5122 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5123 keep_going (ecs);
5124 }
5125
5126 /* In all-stop mode, if we're currently stepping but have stopped in
5127 some other thread, we may need to switch back to the stepped
5128 thread. Returns true we set the inferior running, false if we left
5129 it stopped (and the event needs further processing). */
5130
5131 static int
5132 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5133 {
5134 if (!non_stop)
5135 {
5136 struct thread_info *tp;
5137 struct thread_info *stepping_thread;
5138 struct thread_info *step_over;
5139
5140 /* If any thread is blocked on some internal breakpoint, and we
5141 simply need to step over that breakpoint to get it going
5142 again, do that first. */
5143
5144 /* However, if we see an event for the stepping thread, then we
5145 know all other threads have been moved past their breakpoints
5146 already. Let the caller check whether the step is finished,
5147 etc., before deciding to move it past a breakpoint. */
5148 if (ecs->event_thread->control.step_range_end != 0)
5149 return 0;
5150
5151 /* Check if the current thread is blocked on an incomplete
5152 step-over, interrupted by a random signal. */
5153 if (ecs->event_thread->control.trap_expected
5154 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5155 {
5156 if (debug_infrun)
5157 {
5158 fprintf_unfiltered (gdb_stdlog,
5159 "infrun: need to finish step-over of [%s]\n",
5160 target_pid_to_str (ecs->event_thread->ptid));
5161 }
5162 keep_going (ecs);
5163 return 1;
5164 }
5165
5166 /* Check if the current thread is blocked by a single-step
5167 breakpoint of another thread. */
5168 if (ecs->hit_singlestep_breakpoint)
5169 {
5170 if (debug_infrun)
5171 {
5172 fprintf_unfiltered (gdb_stdlog,
5173 "infrun: need to step [%s] over single-step "
5174 "breakpoint\n",
5175 target_pid_to_str (ecs->ptid));
5176 }
5177 keep_going (ecs);
5178 return 1;
5179 }
5180
5181 /* Otherwise, we no longer expect a trap in the current thread.
5182 Clear the trap_expected flag before switching back -- this is
5183 what keep_going does as well, if we call it. */
5184 ecs->event_thread->control.trap_expected = 0;
5185
5186 /* If scheduler locking applies even if not stepping, there's no
5187 need to walk over threads. Above we've checked whether the
5188 current thread is stepping. If some other thread not the
5189 event thread is stepping, then it must be that scheduler
5190 locking is not in effect. */
5191 if (schedlock_applies (0))
5192 return 0;
5193
5194 /* Look for the stepping/nexting thread, and check if any other
5195 thread other than the stepping thread needs to start a
5196 step-over. Do all step-overs before actually proceeding with
5197 step/next/etc. */
5198 stepping_thread = NULL;
5199 step_over = NULL;
5200 ALL_THREADS (tp)
5201 {
5202 /* Ignore threads of processes we're not resuming. */
5203 if (!sched_multi
5204 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
5205 continue;
5206
5207 /* When stepping over a breakpoint, we lock all threads
5208 except the one that needs to move past the breakpoint.
5209 If a non-event thread has this set, the "incomplete
5210 step-over" check above should have caught it earlier. */
5211 gdb_assert (!tp->control.trap_expected);
5212
5213 /* Did we find the stepping thread? */
5214 if (tp->control.step_range_end)
5215 {
5216 /* Yep. There should only one though. */
5217 gdb_assert (stepping_thread == NULL);
5218
5219 /* The event thread is handled at the top, before we
5220 enter this loop. */
5221 gdb_assert (tp != ecs->event_thread);
5222
5223 /* If some thread other than the event thread is
5224 stepping, then scheduler locking can't be in effect,
5225 otherwise we wouldn't have resumed the current event
5226 thread in the first place. */
5227 gdb_assert (!schedlock_applies (1));
5228
5229 stepping_thread = tp;
5230 }
5231 else if (thread_still_needs_step_over (tp))
5232 {
5233 step_over = tp;
5234
5235 /* At the top we've returned early if the event thread
5236 is stepping. If some other thread not the event
5237 thread is stepping, then scheduler locking can't be
5238 in effect, and we can resume this thread. No need to
5239 keep looking for the stepping thread then. */
5240 break;
5241 }
5242 }
5243
5244 if (step_over != NULL)
5245 {
5246 tp = step_over;
5247 if (debug_infrun)
5248 {
5249 fprintf_unfiltered (gdb_stdlog,
5250 "infrun: need to step-over [%s]\n",
5251 target_pid_to_str (tp->ptid));
5252 }
5253
5254 /* Only the stepping thread should have this set. */
5255 gdb_assert (tp->control.step_range_end == 0);
5256
5257 ecs->ptid = tp->ptid;
5258 ecs->event_thread = tp;
5259 switch_to_thread (ecs->ptid);
5260 keep_going (ecs);
5261 return 1;
5262 }
5263
5264 if (stepping_thread != NULL)
5265 {
5266 struct frame_info *frame;
5267 struct gdbarch *gdbarch;
5268
5269 tp = stepping_thread;
5270
5271 /* If the stepping thread exited, then don't try to switch
5272 back and resume it, which could fail in several different
5273 ways depending on the target. Instead, just keep going.
5274
5275 We can find a stepping dead thread in the thread list in
5276 two cases:
5277
5278 - The target supports thread exit events, and when the
5279 target tries to delete the thread from the thread list,
5280 inferior_ptid pointed at the exiting thread. In such
5281 case, calling delete_thread does not really remove the
5282 thread from the list; instead, the thread is left listed,
5283 with 'exited' state.
5284
5285 - The target's debug interface does not support thread
5286 exit events, and so we have no idea whatsoever if the
5287 previously stepping thread is still alive. For that
5288 reason, we need to synchronously query the target
5289 now. */
5290 if (is_exited (tp->ptid)
5291 || !target_thread_alive (tp->ptid))
5292 {
5293 if (debug_infrun)
5294 fprintf_unfiltered (gdb_stdlog,
5295 "infrun: not switching back to "
5296 "stepped thread, it has vanished\n");
5297
5298 delete_thread (tp->ptid);
5299 keep_going (ecs);
5300 return 1;
5301 }
5302
5303 if (debug_infrun)
5304 fprintf_unfiltered (gdb_stdlog,
5305 "infrun: switching back to stepped thread\n");
5306
5307 ecs->event_thread = tp;
5308 ecs->ptid = tp->ptid;
5309 context_switch (ecs->ptid);
5310
5311 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5312 frame = get_current_frame ();
5313 gdbarch = get_frame_arch (frame);
5314
5315 /* If the PC of the thread we were trying to single-step has
5316 changed, then that thread has trapped or been signaled,
5317 but the event has not been reported to GDB yet. Re-poll
5318 the target looking for this particular thread's event
5319 (i.e. temporarily enable schedlock) by:
5320
5321 - setting a break at the current PC
5322 - resuming that particular thread, only (by setting
5323 trap expected)
5324
5325 This prevents us continuously moving the single-step
5326 breakpoint forward, one instruction at a time,
5327 overstepping. */
5328
5329 if (gdbarch_software_single_step_p (gdbarch)
5330 && stop_pc != tp->prev_pc)
5331 {
5332 if (debug_infrun)
5333 fprintf_unfiltered (gdb_stdlog,
5334 "infrun: expected thread advanced also\n");
5335
5336 insert_single_step_breakpoint (get_frame_arch (frame),
5337 get_frame_address_space (frame),
5338 stop_pc);
5339 singlestep_breakpoints_inserted_p = 1;
5340 ecs->event_thread->control.trap_expected = 1;
5341 singlestep_ptid = inferior_ptid;
5342 singlestep_pc = stop_pc;
5343
5344 resume (0, GDB_SIGNAL_0);
5345 prepare_to_wait (ecs);
5346 }
5347 else
5348 {
5349 if (debug_infrun)
5350 fprintf_unfiltered (gdb_stdlog,
5351 "infrun: expected thread still "
5352 "hasn't advanced\n");
5353 keep_going (ecs);
5354 }
5355
5356 return 1;
5357 }
5358 }
5359 return 0;
5360 }
5361
5362 /* Is thread TP in the middle of single-stepping? */
5363
5364 static int
5365 currently_stepping (struct thread_info *tp)
5366 {
5367 return ((tp->control.step_range_end
5368 && tp->control.step_resume_breakpoint == NULL)
5369 || tp->control.trap_expected
5370 || bpstat_should_step ());
5371 }
5372
5373 /* Inferior has stepped into a subroutine call with source code that
5374 we should not step over. Do step to the first line of code in
5375 it. */
5376
5377 static void
5378 handle_step_into_function (struct gdbarch *gdbarch,
5379 struct execution_control_state *ecs)
5380 {
5381 struct symtab *s;
5382 struct symtab_and_line stop_func_sal, sr_sal;
5383
5384 fill_in_stop_func (gdbarch, ecs);
5385
5386 s = find_pc_symtab (stop_pc);
5387 if (s && s->language != language_asm)
5388 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5389 ecs->stop_func_start);
5390
5391 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5392 /* Use the step_resume_break to step until the end of the prologue,
5393 even if that involves jumps (as it seems to on the vax under
5394 4.2). */
5395 /* If the prologue ends in the middle of a source line, continue to
5396 the end of that source line (if it is still within the function).
5397 Otherwise, just go to end of prologue. */
5398 if (stop_func_sal.end
5399 && stop_func_sal.pc != ecs->stop_func_start
5400 && stop_func_sal.end < ecs->stop_func_end)
5401 ecs->stop_func_start = stop_func_sal.end;
5402
5403 /* Architectures which require breakpoint adjustment might not be able
5404 to place a breakpoint at the computed address. If so, the test
5405 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5406 ecs->stop_func_start to an address at which a breakpoint may be
5407 legitimately placed.
5408
5409 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5410 made, GDB will enter an infinite loop when stepping through
5411 optimized code consisting of VLIW instructions which contain
5412 subinstructions corresponding to different source lines. On
5413 FR-V, it's not permitted to place a breakpoint on any but the
5414 first subinstruction of a VLIW instruction. When a breakpoint is
5415 set, GDB will adjust the breakpoint address to the beginning of
5416 the VLIW instruction. Thus, we need to make the corresponding
5417 adjustment here when computing the stop address. */
5418
5419 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5420 {
5421 ecs->stop_func_start
5422 = gdbarch_adjust_breakpoint_address (gdbarch,
5423 ecs->stop_func_start);
5424 }
5425
5426 if (ecs->stop_func_start == stop_pc)
5427 {
5428 /* We are already there: stop now. */
5429 ecs->event_thread->control.stop_step = 1;
5430 print_end_stepping_range_reason ();
5431 stop_stepping (ecs);
5432 return;
5433 }
5434 else
5435 {
5436 /* Put the step-breakpoint there and go until there. */
5437 init_sal (&sr_sal); /* initialize to zeroes */
5438 sr_sal.pc = ecs->stop_func_start;
5439 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5440 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5441
5442 /* Do not specify what the fp should be when we stop since on
5443 some machines the prologue is where the new fp value is
5444 established. */
5445 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5446
5447 /* And make sure stepping stops right away then. */
5448 ecs->event_thread->control.step_range_end
5449 = ecs->event_thread->control.step_range_start;
5450 }
5451 keep_going (ecs);
5452 }
5453
5454 /* Inferior has stepped backward into a subroutine call with source
5455 code that we should not step over. Do step to the beginning of the
5456 last line of code in it. */
5457
5458 static void
5459 handle_step_into_function_backward (struct gdbarch *gdbarch,
5460 struct execution_control_state *ecs)
5461 {
5462 struct symtab *s;
5463 struct symtab_and_line stop_func_sal;
5464
5465 fill_in_stop_func (gdbarch, ecs);
5466
5467 s = find_pc_symtab (stop_pc);
5468 if (s && s->language != language_asm)
5469 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5470 ecs->stop_func_start);
5471
5472 stop_func_sal = find_pc_line (stop_pc, 0);
5473
5474 /* OK, we're just going to keep stepping here. */
5475 if (stop_func_sal.pc == stop_pc)
5476 {
5477 /* We're there already. Just stop stepping now. */
5478 ecs->event_thread->control.stop_step = 1;
5479 print_end_stepping_range_reason ();
5480 stop_stepping (ecs);
5481 }
5482 else
5483 {
5484 /* Else just reset the step range and keep going.
5485 No step-resume breakpoint, they don't work for
5486 epilogues, which can have multiple entry paths. */
5487 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5488 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5489 keep_going (ecs);
5490 }
5491 return;
5492 }
5493
5494 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5495 This is used to both functions and to skip over code. */
5496
5497 static void
5498 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5499 struct symtab_and_line sr_sal,
5500 struct frame_id sr_id,
5501 enum bptype sr_type)
5502 {
5503 /* There should never be more than one step-resume or longjmp-resume
5504 breakpoint per thread, so we should never be setting a new
5505 step_resume_breakpoint when one is already active. */
5506 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5507 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5508
5509 if (debug_infrun)
5510 fprintf_unfiltered (gdb_stdlog,
5511 "infrun: inserting step-resume breakpoint at %s\n",
5512 paddress (gdbarch, sr_sal.pc));
5513
5514 inferior_thread ()->control.step_resume_breakpoint
5515 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5516 }
5517
5518 void
5519 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5520 struct symtab_and_line sr_sal,
5521 struct frame_id sr_id)
5522 {
5523 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5524 sr_sal, sr_id,
5525 bp_step_resume);
5526 }
5527
5528 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5529 This is used to skip a potential signal handler.
5530
5531 This is called with the interrupted function's frame. The signal
5532 handler, when it returns, will resume the interrupted function at
5533 RETURN_FRAME.pc. */
5534
5535 static void
5536 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5537 {
5538 struct symtab_and_line sr_sal;
5539 struct gdbarch *gdbarch;
5540
5541 gdb_assert (return_frame != NULL);
5542 init_sal (&sr_sal); /* initialize to zeros */
5543
5544 gdbarch = get_frame_arch (return_frame);
5545 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5546 sr_sal.section = find_pc_overlay (sr_sal.pc);
5547 sr_sal.pspace = get_frame_program_space (return_frame);
5548
5549 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5550 get_stack_frame_id (return_frame),
5551 bp_hp_step_resume);
5552 }
5553
5554 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5555 is used to skip a function after stepping into it (for "next" or if
5556 the called function has no debugging information).
5557
5558 The current function has almost always been reached by single
5559 stepping a call or return instruction. NEXT_FRAME belongs to the
5560 current function, and the breakpoint will be set at the caller's
5561 resume address.
5562
5563 This is a separate function rather than reusing
5564 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5565 get_prev_frame, which may stop prematurely (see the implementation
5566 of frame_unwind_caller_id for an example). */
5567
5568 static void
5569 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5570 {
5571 struct symtab_and_line sr_sal;
5572 struct gdbarch *gdbarch;
5573
5574 /* We shouldn't have gotten here if we don't know where the call site
5575 is. */
5576 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5577
5578 init_sal (&sr_sal); /* initialize to zeros */
5579
5580 gdbarch = frame_unwind_caller_arch (next_frame);
5581 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5582 frame_unwind_caller_pc (next_frame));
5583 sr_sal.section = find_pc_overlay (sr_sal.pc);
5584 sr_sal.pspace = frame_unwind_program_space (next_frame);
5585
5586 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5587 frame_unwind_caller_id (next_frame));
5588 }
5589
5590 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5591 new breakpoint at the target of a jmp_buf. The handling of
5592 longjmp-resume uses the same mechanisms used for handling
5593 "step-resume" breakpoints. */
5594
5595 static void
5596 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5597 {
5598 /* There should never be more than one longjmp-resume breakpoint per
5599 thread, so we should never be setting a new
5600 longjmp_resume_breakpoint when one is already active. */
5601 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
5602
5603 if (debug_infrun)
5604 fprintf_unfiltered (gdb_stdlog,
5605 "infrun: inserting longjmp-resume breakpoint at %s\n",
5606 paddress (gdbarch, pc));
5607
5608 inferior_thread ()->control.exception_resume_breakpoint =
5609 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5610 }
5611
5612 /* Insert an exception resume breakpoint. TP is the thread throwing
5613 the exception. The block B is the block of the unwinder debug hook
5614 function. FRAME is the frame corresponding to the call to this
5615 function. SYM is the symbol of the function argument holding the
5616 target PC of the exception. */
5617
5618 static void
5619 insert_exception_resume_breakpoint (struct thread_info *tp,
5620 struct block *b,
5621 struct frame_info *frame,
5622 struct symbol *sym)
5623 {
5624 volatile struct gdb_exception e;
5625
5626 /* We want to ignore errors here. */
5627 TRY_CATCH (e, RETURN_MASK_ERROR)
5628 {
5629 struct symbol *vsym;
5630 struct value *value;
5631 CORE_ADDR handler;
5632 struct breakpoint *bp;
5633
5634 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5635 value = read_var_value (vsym, frame);
5636 /* If the value was optimized out, revert to the old behavior. */
5637 if (! value_optimized_out (value))
5638 {
5639 handler = value_as_address (value);
5640
5641 if (debug_infrun)
5642 fprintf_unfiltered (gdb_stdlog,
5643 "infrun: exception resume at %lx\n",
5644 (unsigned long) handler);
5645
5646 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5647 handler, bp_exception_resume);
5648
5649 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5650 frame = NULL;
5651
5652 bp->thread = tp->num;
5653 inferior_thread ()->control.exception_resume_breakpoint = bp;
5654 }
5655 }
5656 }
5657
5658 /* A helper for check_exception_resume that sets an
5659 exception-breakpoint based on a SystemTap probe. */
5660
5661 static void
5662 insert_exception_resume_from_probe (struct thread_info *tp,
5663 const struct bound_probe *probe,
5664 struct frame_info *frame)
5665 {
5666 struct value *arg_value;
5667 CORE_ADDR handler;
5668 struct breakpoint *bp;
5669
5670 arg_value = probe_safe_evaluate_at_pc (frame, 1);
5671 if (!arg_value)
5672 return;
5673
5674 handler = value_as_address (arg_value);
5675
5676 if (debug_infrun)
5677 fprintf_unfiltered (gdb_stdlog,
5678 "infrun: exception resume at %s\n",
5679 paddress (get_objfile_arch (probe->objfile),
5680 handler));
5681
5682 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5683 handler, bp_exception_resume);
5684 bp->thread = tp->num;
5685 inferior_thread ()->control.exception_resume_breakpoint = bp;
5686 }
5687
5688 /* This is called when an exception has been intercepted. Check to
5689 see whether the exception's destination is of interest, and if so,
5690 set an exception resume breakpoint there. */
5691
5692 static void
5693 check_exception_resume (struct execution_control_state *ecs,
5694 struct frame_info *frame)
5695 {
5696 volatile struct gdb_exception e;
5697 struct bound_probe probe;
5698 struct symbol *func;
5699
5700 /* First see if this exception unwinding breakpoint was set via a
5701 SystemTap probe point. If so, the probe has two arguments: the
5702 CFA and the HANDLER. We ignore the CFA, extract the handler, and
5703 set a breakpoint there. */
5704 probe = find_probe_by_pc (get_frame_pc (frame));
5705 if (probe.probe)
5706 {
5707 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
5708 return;
5709 }
5710
5711 func = get_frame_function (frame);
5712 if (!func)
5713 return;
5714
5715 TRY_CATCH (e, RETURN_MASK_ERROR)
5716 {
5717 struct block *b;
5718 struct block_iterator iter;
5719 struct symbol *sym;
5720 int argno = 0;
5721
5722 /* The exception breakpoint is a thread-specific breakpoint on
5723 the unwinder's debug hook, declared as:
5724
5725 void _Unwind_DebugHook (void *cfa, void *handler);
5726
5727 The CFA argument indicates the frame to which control is
5728 about to be transferred. HANDLER is the destination PC.
5729
5730 We ignore the CFA and set a temporary breakpoint at HANDLER.
5731 This is not extremely efficient but it avoids issues in gdb
5732 with computing the DWARF CFA, and it also works even in weird
5733 cases such as throwing an exception from inside a signal
5734 handler. */
5735
5736 b = SYMBOL_BLOCK_VALUE (func);
5737 ALL_BLOCK_SYMBOLS (b, iter, sym)
5738 {
5739 if (!SYMBOL_IS_ARGUMENT (sym))
5740 continue;
5741
5742 if (argno == 0)
5743 ++argno;
5744 else
5745 {
5746 insert_exception_resume_breakpoint (ecs->event_thread,
5747 b, frame, sym);
5748 break;
5749 }
5750 }
5751 }
5752 }
5753
5754 static void
5755 stop_stepping (struct execution_control_state *ecs)
5756 {
5757 if (debug_infrun)
5758 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5759
5760 clear_step_over_info ();
5761
5762 /* Let callers know we don't want to wait for the inferior anymore. */
5763 ecs->wait_some_more = 0;
5764 }
5765
5766 /* Called when we should continue running the inferior, because the
5767 current event doesn't cause a user visible stop. This does the
5768 resuming part; waiting for the next event is done elsewhere. */
5769
5770 static void
5771 keep_going (struct execution_control_state *ecs)
5772 {
5773 /* Make sure normal_stop is called if we get a QUIT handled before
5774 reaching resume. */
5775 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5776
5777 /* Save the pc before execution, to compare with pc after stop. */
5778 ecs->event_thread->prev_pc
5779 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5780
5781 if (ecs->event_thread->control.trap_expected
5782 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5783 {
5784 /* We haven't yet gotten our trap, and either: intercepted a
5785 non-signal event (e.g., a fork); or took a signal which we
5786 are supposed to pass through to the inferior. Simply
5787 continue. */
5788 discard_cleanups (old_cleanups);
5789 resume (currently_stepping (ecs->event_thread),
5790 ecs->event_thread->suspend.stop_signal);
5791 }
5792 else
5793 {
5794 volatile struct gdb_exception e;
5795 struct regcache *regcache = get_current_regcache ();
5796
5797 /* Either the trap was not expected, but we are continuing
5798 anyway (if we got a signal, the user asked it be passed to
5799 the child)
5800 -- or --
5801 We got our expected trap, but decided we should resume from
5802 it.
5803
5804 We're going to run this baby now!
5805
5806 Note that insert_breakpoints won't try to re-insert
5807 already inserted breakpoints. Therefore, we don't
5808 care if breakpoints were already inserted, or not. */
5809
5810 /* If we need to step over a breakpoint, and we're not using
5811 displaced stepping to do so, insert all breakpoints
5812 (watchpoints, etc.) but the one we're stepping over, step one
5813 instruction, and then re-insert the breakpoint when that step
5814 is finished. */
5815 if ((ecs->hit_singlestep_breakpoint
5816 || thread_still_needs_step_over (ecs->event_thread))
5817 && !use_displaced_stepping (get_regcache_arch (regcache)))
5818 {
5819 set_step_over_info (get_regcache_aspace (regcache),
5820 regcache_read_pc (regcache));
5821 }
5822 else
5823 clear_step_over_info ();
5824
5825 /* Stop stepping if inserting breakpoints fails. */
5826 TRY_CATCH (e, RETURN_MASK_ERROR)
5827 {
5828 insert_breakpoints ();
5829 }
5830 if (e.reason < 0)
5831 {
5832 exception_print (gdb_stderr, e);
5833 stop_stepping (ecs);
5834 return;
5835 }
5836
5837 ecs->event_thread->control.trap_expected
5838 = (ecs->event_thread->stepping_over_breakpoint
5839 || ecs->hit_singlestep_breakpoint);
5840
5841 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
5842 explicitly specifies that such a signal should be delivered
5843 to the target program). Typically, that would occur when a
5844 user is debugging a target monitor on a simulator: the target
5845 monitor sets a breakpoint; the simulator encounters this
5846 breakpoint and halts the simulation handing control to GDB;
5847 GDB, noting that the stop address doesn't map to any known
5848 breakpoint, returns control back to the simulator; the
5849 simulator then delivers the hardware equivalent of a
5850 GDB_SIGNAL_TRAP to the program being debugged. */
5851 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5852 && !signal_program[ecs->event_thread->suspend.stop_signal])
5853 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5854
5855 discard_cleanups (old_cleanups);
5856 resume (currently_stepping (ecs->event_thread),
5857 ecs->event_thread->suspend.stop_signal);
5858 }
5859
5860 prepare_to_wait (ecs);
5861 }
5862
5863 /* This function normally comes after a resume, before
5864 handle_inferior_event exits. It takes care of any last bits of
5865 housekeeping, and sets the all-important wait_some_more flag. */
5866
5867 static void
5868 prepare_to_wait (struct execution_control_state *ecs)
5869 {
5870 if (debug_infrun)
5871 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5872
5873 /* This is the old end of the while loop. Let everybody know we
5874 want to wait for the inferior some more and get called again
5875 soon. */
5876 ecs->wait_some_more = 1;
5877 }
5878
5879 /* Several print_*_reason functions to print why the inferior has stopped.
5880 We always print something when the inferior exits, or receives a signal.
5881 The rest of the cases are dealt with later on in normal_stop and
5882 print_it_typical. Ideally there should be a call to one of these
5883 print_*_reason functions functions from handle_inferior_event each time
5884 stop_stepping is called. */
5885
5886 /* Print why the inferior has stopped.
5887 We are done with a step/next/si/ni command, print why the inferior has
5888 stopped. For now print nothing. Print a message only if not in the middle
5889 of doing a "step n" operation for n > 1. */
5890
5891 static void
5892 print_end_stepping_range_reason (void)
5893 {
5894 if ((!inferior_thread ()->step_multi
5895 || !inferior_thread ()->control.stop_step)
5896 && ui_out_is_mi_like_p (current_uiout))
5897 ui_out_field_string (current_uiout, "reason",
5898 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5899 }
5900
5901 /* The inferior was terminated by a signal, print why it stopped. */
5902
5903 static void
5904 print_signal_exited_reason (enum gdb_signal siggnal)
5905 {
5906 struct ui_out *uiout = current_uiout;
5907
5908 annotate_signalled ();
5909 if (ui_out_is_mi_like_p (uiout))
5910 ui_out_field_string
5911 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5912 ui_out_text (uiout, "\nProgram terminated with signal ");
5913 annotate_signal_name ();
5914 ui_out_field_string (uiout, "signal-name",
5915 gdb_signal_to_name (siggnal));
5916 annotate_signal_name_end ();
5917 ui_out_text (uiout, ", ");
5918 annotate_signal_string ();
5919 ui_out_field_string (uiout, "signal-meaning",
5920 gdb_signal_to_string (siggnal));
5921 annotate_signal_string_end ();
5922 ui_out_text (uiout, ".\n");
5923 ui_out_text (uiout, "The program no longer exists.\n");
5924 }
5925
5926 /* The inferior program is finished, print why it stopped. */
5927
5928 static void
5929 print_exited_reason (int exitstatus)
5930 {
5931 struct inferior *inf = current_inferior ();
5932 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5933 struct ui_out *uiout = current_uiout;
5934
5935 annotate_exited (exitstatus);
5936 if (exitstatus)
5937 {
5938 if (ui_out_is_mi_like_p (uiout))
5939 ui_out_field_string (uiout, "reason",
5940 async_reason_lookup (EXEC_ASYNC_EXITED));
5941 ui_out_text (uiout, "[Inferior ");
5942 ui_out_text (uiout, plongest (inf->num));
5943 ui_out_text (uiout, " (");
5944 ui_out_text (uiout, pidstr);
5945 ui_out_text (uiout, ") exited with code ");
5946 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5947 ui_out_text (uiout, "]\n");
5948 }
5949 else
5950 {
5951 if (ui_out_is_mi_like_p (uiout))
5952 ui_out_field_string
5953 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5954 ui_out_text (uiout, "[Inferior ");
5955 ui_out_text (uiout, plongest (inf->num));
5956 ui_out_text (uiout, " (");
5957 ui_out_text (uiout, pidstr);
5958 ui_out_text (uiout, ") exited normally]\n");
5959 }
5960 /* Support the --return-child-result option. */
5961 return_child_result_value = exitstatus;
5962 }
5963
5964 /* Signal received, print why the inferior has stopped. The signal table
5965 tells us to print about it. */
5966
5967 static void
5968 print_signal_received_reason (enum gdb_signal siggnal)
5969 {
5970 struct ui_out *uiout = current_uiout;
5971
5972 annotate_signal ();
5973
5974 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5975 {
5976 struct thread_info *t = inferior_thread ();
5977
5978 ui_out_text (uiout, "\n[");
5979 ui_out_field_string (uiout, "thread-name",
5980 target_pid_to_str (t->ptid));
5981 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5982 ui_out_text (uiout, " stopped");
5983 }
5984 else
5985 {
5986 ui_out_text (uiout, "\nProgram received signal ");
5987 annotate_signal_name ();
5988 if (ui_out_is_mi_like_p (uiout))
5989 ui_out_field_string
5990 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5991 ui_out_field_string (uiout, "signal-name",
5992 gdb_signal_to_name (siggnal));
5993 annotate_signal_name_end ();
5994 ui_out_text (uiout, ", ");
5995 annotate_signal_string ();
5996 ui_out_field_string (uiout, "signal-meaning",
5997 gdb_signal_to_string (siggnal));
5998 annotate_signal_string_end ();
5999 }
6000 ui_out_text (uiout, ".\n");
6001 }
6002
6003 /* Reverse execution: target ran out of history info, print why the inferior
6004 has stopped. */
6005
6006 static void
6007 print_no_history_reason (void)
6008 {
6009 ui_out_text (current_uiout, "\nNo more reverse-execution history.\n");
6010 }
6011
6012 /* Print current location without a level number, if we have changed
6013 functions or hit a breakpoint. Print source line if we have one.
6014 bpstat_print contains the logic deciding in detail what to print,
6015 based on the event(s) that just occurred. */
6016
6017 void
6018 print_stop_event (struct target_waitstatus *ws)
6019 {
6020 int bpstat_ret;
6021 int source_flag;
6022 int do_frame_printing = 1;
6023 struct thread_info *tp = inferior_thread ();
6024
6025 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6026 switch (bpstat_ret)
6027 {
6028 case PRINT_UNKNOWN:
6029 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6030 should) carry around the function and does (or should) use
6031 that when doing a frame comparison. */
6032 if (tp->control.stop_step
6033 && frame_id_eq (tp->control.step_frame_id,
6034 get_frame_id (get_current_frame ()))
6035 && step_start_function == find_pc_function (stop_pc))
6036 {
6037 /* Finished step, just print source line. */
6038 source_flag = SRC_LINE;
6039 }
6040 else
6041 {
6042 /* Print location and source line. */
6043 source_flag = SRC_AND_LOC;
6044 }
6045 break;
6046 case PRINT_SRC_AND_LOC:
6047 /* Print location and source line. */
6048 source_flag = SRC_AND_LOC;
6049 break;
6050 case PRINT_SRC_ONLY:
6051 source_flag = SRC_LINE;
6052 break;
6053 case PRINT_NOTHING:
6054 /* Something bogus. */
6055 source_flag = SRC_LINE;
6056 do_frame_printing = 0;
6057 break;
6058 default:
6059 internal_error (__FILE__, __LINE__, _("Unknown value."));
6060 }
6061
6062 /* The behavior of this routine with respect to the source
6063 flag is:
6064 SRC_LINE: Print only source line
6065 LOCATION: Print only location
6066 SRC_AND_LOC: Print location and source line. */
6067 if (do_frame_printing)
6068 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6069
6070 /* Display the auto-display expressions. */
6071 do_displays ();
6072 }
6073
6074 /* Here to return control to GDB when the inferior stops for real.
6075 Print appropriate messages, remove breakpoints, give terminal our modes.
6076
6077 STOP_PRINT_FRAME nonzero means print the executing frame
6078 (pc, function, args, file, line number and line text).
6079 BREAKPOINTS_FAILED nonzero means stop was due to error
6080 attempting to insert breakpoints. */
6081
6082 void
6083 normal_stop (void)
6084 {
6085 struct target_waitstatus last;
6086 ptid_t last_ptid;
6087 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6088
6089 get_last_target_status (&last_ptid, &last);
6090
6091 /* If an exception is thrown from this point on, make sure to
6092 propagate GDB's knowledge of the executing state to the
6093 frontend/user running state. A QUIT is an easy exception to see
6094 here, so do this before any filtered output. */
6095 if (!non_stop)
6096 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6097 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6098 && last.kind != TARGET_WAITKIND_EXITED
6099 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6100 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6101
6102 /* As with the notification of thread events, we want to delay
6103 notifying the user that we've switched thread context until
6104 the inferior actually stops.
6105
6106 There's no point in saying anything if the inferior has exited.
6107 Note that SIGNALLED here means "exited with a signal", not
6108 "received a signal".
6109
6110 Also skip saying anything in non-stop mode. In that mode, as we
6111 don't want GDB to switch threads behind the user's back, to avoid
6112 races where the user is typing a command to apply to thread x,
6113 but GDB switches to thread y before the user finishes entering
6114 the command, fetch_inferior_event installs a cleanup to restore
6115 the current thread back to the thread the user had selected right
6116 after this event is handled, so we're not really switching, only
6117 informing of a stop. */
6118 if (!non_stop
6119 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6120 && target_has_execution
6121 && last.kind != TARGET_WAITKIND_SIGNALLED
6122 && last.kind != TARGET_WAITKIND_EXITED
6123 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6124 {
6125 target_terminal_ours_for_output ();
6126 printf_filtered (_("[Switching to %s]\n"),
6127 target_pid_to_str (inferior_ptid));
6128 annotate_thread_changed ();
6129 previous_inferior_ptid = inferior_ptid;
6130 }
6131
6132 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6133 {
6134 gdb_assert (sync_execution || !target_can_async_p ());
6135
6136 target_terminal_ours_for_output ();
6137 printf_filtered (_("No unwaited-for children left.\n"));
6138 }
6139
6140 if (!breakpoints_always_inserted_mode () && target_has_execution)
6141 {
6142 if (remove_breakpoints ())
6143 {
6144 target_terminal_ours_for_output ();
6145 printf_filtered (_("Cannot remove breakpoints because "
6146 "program is no longer writable.\nFurther "
6147 "execution is probably impossible.\n"));
6148 }
6149 }
6150
6151 /* If an auto-display called a function and that got a signal,
6152 delete that auto-display to avoid an infinite recursion. */
6153
6154 if (stopped_by_random_signal)
6155 disable_current_display ();
6156
6157 /* Don't print a message if in the middle of doing a "step n"
6158 operation for n > 1 */
6159 if (target_has_execution
6160 && last.kind != TARGET_WAITKIND_SIGNALLED
6161 && last.kind != TARGET_WAITKIND_EXITED
6162 && inferior_thread ()->step_multi
6163 && inferior_thread ()->control.stop_step)
6164 goto done;
6165
6166 target_terminal_ours ();
6167 async_enable_stdin ();
6168
6169 /* Set the current source location. This will also happen if we
6170 display the frame below, but the current SAL will be incorrect
6171 during a user hook-stop function. */
6172 if (has_stack_frames () && !stop_stack_dummy)
6173 set_current_sal_from_frame (get_current_frame (), 1);
6174
6175 /* Let the user/frontend see the threads as stopped. */
6176 do_cleanups (old_chain);
6177
6178 /* Look up the hook_stop and run it (CLI internally handles problem
6179 of stop_command's pre-hook not existing). */
6180 if (stop_command)
6181 catch_errors (hook_stop_stub, stop_command,
6182 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6183
6184 if (!has_stack_frames ())
6185 goto done;
6186
6187 if (last.kind == TARGET_WAITKIND_SIGNALLED
6188 || last.kind == TARGET_WAITKIND_EXITED)
6189 goto done;
6190
6191 /* Select innermost stack frame - i.e., current frame is frame 0,
6192 and current location is based on that.
6193 Don't do this on return from a stack dummy routine,
6194 or if the program has exited. */
6195
6196 if (!stop_stack_dummy)
6197 {
6198 select_frame (get_current_frame ());
6199
6200 /* If --batch-silent is enabled then there's no need to print the current
6201 source location, and to try risks causing an error message about
6202 missing source files. */
6203 if (stop_print_frame && !batch_silent)
6204 print_stop_event (&last);
6205 }
6206
6207 /* Save the function value return registers, if we care.
6208 We might be about to restore their previous contents. */
6209 if (inferior_thread ()->control.proceed_to_finish
6210 && execution_direction != EXEC_REVERSE)
6211 {
6212 /* This should not be necessary. */
6213 if (stop_registers)
6214 regcache_xfree (stop_registers);
6215
6216 /* NB: The copy goes through to the target picking up the value of
6217 all the registers. */
6218 stop_registers = regcache_dup (get_current_regcache ());
6219 }
6220
6221 if (stop_stack_dummy == STOP_STACK_DUMMY)
6222 {
6223 /* Pop the empty frame that contains the stack dummy.
6224 This also restores inferior state prior to the call
6225 (struct infcall_suspend_state). */
6226 struct frame_info *frame = get_current_frame ();
6227
6228 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6229 frame_pop (frame);
6230 /* frame_pop() calls reinit_frame_cache as the last thing it
6231 does which means there's currently no selected frame. We
6232 don't need to re-establish a selected frame if the dummy call
6233 returns normally, that will be done by
6234 restore_infcall_control_state. However, we do have to handle
6235 the case where the dummy call is returning after being
6236 stopped (e.g. the dummy call previously hit a breakpoint).
6237 We can't know which case we have so just always re-establish
6238 a selected frame here. */
6239 select_frame (get_current_frame ());
6240 }
6241
6242 done:
6243 annotate_stopped ();
6244
6245 /* Suppress the stop observer if we're in the middle of:
6246
6247 - a step n (n > 1), as there still more steps to be done.
6248
6249 - a "finish" command, as the observer will be called in
6250 finish_command_continuation, so it can include the inferior
6251 function's return value.
6252
6253 - calling an inferior function, as we pretend we inferior didn't
6254 run at all. The return value of the call is handled by the
6255 expression evaluator, through call_function_by_hand. */
6256
6257 if (!target_has_execution
6258 || last.kind == TARGET_WAITKIND_SIGNALLED
6259 || last.kind == TARGET_WAITKIND_EXITED
6260 || last.kind == TARGET_WAITKIND_NO_RESUMED
6261 || (!(inferior_thread ()->step_multi
6262 && inferior_thread ()->control.stop_step)
6263 && !(inferior_thread ()->control.stop_bpstat
6264 && inferior_thread ()->control.proceed_to_finish)
6265 && !inferior_thread ()->control.in_infcall))
6266 {
6267 if (!ptid_equal (inferior_ptid, null_ptid))
6268 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6269 stop_print_frame);
6270 else
6271 observer_notify_normal_stop (NULL, stop_print_frame);
6272 }
6273
6274 if (target_has_execution)
6275 {
6276 if (last.kind != TARGET_WAITKIND_SIGNALLED
6277 && last.kind != TARGET_WAITKIND_EXITED)
6278 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6279 Delete any breakpoint that is to be deleted at the next stop. */
6280 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6281 }
6282
6283 /* Try to get rid of automatically added inferiors that are no
6284 longer needed. Keeping those around slows down things linearly.
6285 Note that this never removes the current inferior. */
6286 prune_inferiors ();
6287 }
6288
6289 static int
6290 hook_stop_stub (void *cmd)
6291 {
6292 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6293 return (0);
6294 }
6295 \f
6296 int
6297 signal_stop_state (int signo)
6298 {
6299 return signal_stop[signo];
6300 }
6301
6302 int
6303 signal_print_state (int signo)
6304 {
6305 return signal_print[signo];
6306 }
6307
6308 int
6309 signal_pass_state (int signo)
6310 {
6311 return signal_program[signo];
6312 }
6313
6314 static void
6315 signal_cache_update (int signo)
6316 {
6317 if (signo == -1)
6318 {
6319 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6320 signal_cache_update (signo);
6321
6322 return;
6323 }
6324
6325 signal_pass[signo] = (signal_stop[signo] == 0
6326 && signal_print[signo] == 0
6327 && signal_program[signo] == 1
6328 && signal_catch[signo] == 0);
6329 }
6330
6331 int
6332 signal_stop_update (int signo, int state)
6333 {
6334 int ret = signal_stop[signo];
6335
6336 signal_stop[signo] = state;
6337 signal_cache_update (signo);
6338 return ret;
6339 }
6340
6341 int
6342 signal_print_update (int signo, int state)
6343 {
6344 int ret = signal_print[signo];
6345
6346 signal_print[signo] = state;
6347 signal_cache_update (signo);
6348 return ret;
6349 }
6350
6351 int
6352 signal_pass_update (int signo, int state)
6353 {
6354 int ret = signal_program[signo];
6355
6356 signal_program[signo] = state;
6357 signal_cache_update (signo);
6358 return ret;
6359 }
6360
6361 /* Update the global 'signal_catch' from INFO and notify the
6362 target. */
6363
6364 void
6365 signal_catch_update (const unsigned int *info)
6366 {
6367 int i;
6368
6369 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6370 signal_catch[i] = info[i] > 0;
6371 signal_cache_update (-1);
6372 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6373 }
6374
6375 static void
6376 sig_print_header (void)
6377 {
6378 printf_filtered (_("Signal Stop\tPrint\tPass "
6379 "to program\tDescription\n"));
6380 }
6381
6382 static void
6383 sig_print_info (enum gdb_signal oursig)
6384 {
6385 const char *name = gdb_signal_to_name (oursig);
6386 int name_padding = 13 - strlen (name);
6387
6388 if (name_padding <= 0)
6389 name_padding = 0;
6390
6391 printf_filtered ("%s", name);
6392 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6393 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6394 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6395 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6396 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6397 }
6398
6399 /* Specify how various signals in the inferior should be handled. */
6400
6401 static void
6402 handle_command (char *args, int from_tty)
6403 {
6404 char **argv;
6405 int digits, wordlen;
6406 int sigfirst, signum, siglast;
6407 enum gdb_signal oursig;
6408 int allsigs;
6409 int nsigs;
6410 unsigned char *sigs;
6411 struct cleanup *old_chain;
6412
6413 if (args == NULL)
6414 {
6415 error_no_arg (_("signal to handle"));
6416 }
6417
6418 /* Allocate and zero an array of flags for which signals to handle. */
6419
6420 nsigs = (int) GDB_SIGNAL_LAST;
6421 sigs = (unsigned char *) alloca (nsigs);
6422 memset (sigs, 0, nsigs);
6423
6424 /* Break the command line up into args. */
6425
6426 argv = gdb_buildargv (args);
6427 old_chain = make_cleanup_freeargv (argv);
6428
6429 /* Walk through the args, looking for signal oursigs, signal names, and
6430 actions. Signal numbers and signal names may be interspersed with
6431 actions, with the actions being performed for all signals cumulatively
6432 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6433
6434 while (*argv != NULL)
6435 {
6436 wordlen = strlen (*argv);
6437 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6438 {;
6439 }
6440 allsigs = 0;
6441 sigfirst = siglast = -1;
6442
6443 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6444 {
6445 /* Apply action to all signals except those used by the
6446 debugger. Silently skip those. */
6447 allsigs = 1;
6448 sigfirst = 0;
6449 siglast = nsigs - 1;
6450 }
6451 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6452 {
6453 SET_SIGS (nsigs, sigs, signal_stop);
6454 SET_SIGS (nsigs, sigs, signal_print);
6455 }
6456 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6457 {
6458 UNSET_SIGS (nsigs, sigs, signal_program);
6459 }
6460 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6461 {
6462 SET_SIGS (nsigs, sigs, signal_print);
6463 }
6464 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6465 {
6466 SET_SIGS (nsigs, sigs, signal_program);
6467 }
6468 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6469 {
6470 UNSET_SIGS (nsigs, sigs, signal_stop);
6471 }
6472 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6473 {
6474 SET_SIGS (nsigs, sigs, signal_program);
6475 }
6476 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6477 {
6478 UNSET_SIGS (nsigs, sigs, signal_print);
6479 UNSET_SIGS (nsigs, sigs, signal_stop);
6480 }
6481 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6482 {
6483 UNSET_SIGS (nsigs, sigs, signal_program);
6484 }
6485 else if (digits > 0)
6486 {
6487 /* It is numeric. The numeric signal refers to our own
6488 internal signal numbering from target.h, not to host/target
6489 signal number. This is a feature; users really should be
6490 using symbolic names anyway, and the common ones like
6491 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6492
6493 sigfirst = siglast = (int)
6494 gdb_signal_from_command (atoi (*argv));
6495 if ((*argv)[digits] == '-')
6496 {
6497 siglast = (int)
6498 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6499 }
6500 if (sigfirst > siglast)
6501 {
6502 /* Bet he didn't figure we'd think of this case... */
6503 signum = sigfirst;
6504 sigfirst = siglast;
6505 siglast = signum;
6506 }
6507 }
6508 else
6509 {
6510 oursig = gdb_signal_from_name (*argv);
6511 if (oursig != GDB_SIGNAL_UNKNOWN)
6512 {
6513 sigfirst = siglast = (int) oursig;
6514 }
6515 else
6516 {
6517 /* Not a number and not a recognized flag word => complain. */
6518 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6519 }
6520 }
6521
6522 /* If any signal numbers or symbol names were found, set flags for
6523 which signals to apply actions to. */
6524
6525 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6526 {
6527 switch ((enum gdb_signal) signum)
6528 {
6529 case GDB_SIGNAL_TRAP:
6530 case GDB_SIGNAL_INT:
6531 if (!allsigs && !sigs[signum])
6532 {
6533 if (query (_("%s is used by the debugger.\n\
6534 Are you sure you want to change it? "),
6535 gdb_signal_to_name ((enum gdb_signal) signum)))
6536 {
6537 sigs[signum] = 1;
6538 }
6539 else
6540 {
6541 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6542 gdb_flush (gdb_stdout);
6543 }
6544 }
6545 break;
6546 case GDB_SIGNAL_0:
6547 case GDB_SIGNAL_DEFAULT:
6548 case GDB_SIGNAL_UNKNOWN:
6549 /* Make sure that "all" doesn't print these. */
6550 break;
6551 default:
6552 sigs[signum] = 1;
6553 break;
6554 }
6555 }
6556
6557 argv++;
6558 }
6559
6560 for (signum = 0; signum < nsigs; signum++)
6561 if (sigs[signum])
6562 {
6563 signal_cache_update (-1);
6564 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6565 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
6566
6567 if (from_tty)
6568 {
6569 /* Show the results. */
6570 sig_print_header ();
6571 for (; signum < nsigs; signum++)
6572 if (sigs[signum])
6573 sig_print_info (signum);
6574 }
6575
6576 break;
6577 }
6578
6579 do_cleanups (old_chain);
6580 }
6581
6582 /* Complete the "handle" command. */
6583
6584 static VEC (char_ptr) *
6585 handle_completer (struct cmd_list_element *ignore,
6586 const char *text, const char *word)
6587 {
6588 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
6589 static const char * const keywords[] =
6590 {
6591 "all",
6592 "stop",
6593 "ignore",
6594 "print",
6595 "pass",
6596 "nostop",
6597 "noignore",
6598 "noprint",
6599 "nopass",
6600 NULL,
6601 };
6602
6603 vec_signals = signal_completer (ignore, text, word);
6604 vec_keywords = complete_on_enum (keywords, word, word);
6605
6606 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
6607 VEC_free (char_ptr, vec_signals);
6608 VEC_free (char_ptr, vec_keywords);
6609 return return_val;
6610 }
6611
6612 static void
6613 xdb_handle_command (char *args, int from_tty)
6614 {
6615 char **argv;
6616 struct cleanup *old_chain;
6617
6618 if (args == NULL)
6619 error_no_arg (_("xdb command"));
6620
6621 /* Break the command line up into args. */
6622
6623 argv = gdb_buildargv (args);
6624 old_chain = make_cleanup_freeargv (argv);
6625 if (argv[1] != (char *) NULL)
6626 {
6627 char *argBuf;
6628 int bufLen;
6629
6630 bufLen = strlen (argv[0]) + 20;
6631 argBuf = (char *) xmalloc (bufLen);
6632 if (argBuf)
6633 {
6634 int validFlag = 1;
6635 enum gdb_signal oursig;
6636
6637 oursig = gdb_signal_from_name (argv[0]);
6638 memset (argBuf, 0, bufLen);
6639 if (strcmp (argv[1], "Q") == 0)
6640 sprintf (argBuf, "%s %s", argv[0], "noprint");
6641 else
6642 {
6643 if (strcmp (argv[1], "s") == 0)
6644 {
6645 if (!signal_stop[oursig])
6646 sprintf (argBuf, "%s %s", argv[0], "stop");
6647 else
6648 sprintf (argBuf, "%s %s", argv[0], "nostop");
6649 }
6650 else if (strcmp (argv[1], "i") == 0)
6651 {
6652 if (!signal_program[oursig])
6653 sprintf (argBuf, "%s %s", argv[0], "pass");
6654 else
6655 sprintf (argBuf, "%s %s", argv[0], "nopass");
6656 }
6657 else if (strcmp (argv[1], "r") == 0)
6658 {
6659 if (!signal_print[oursig])
6660 sprintf (argBuf, "%s %s", argv[0], "print");
6661 else
6662 sprintf (argBuf, "%s %s", argv[0], "noprint");
6663 }
6664 else
6665 validFlag = 0;
6666 }
6667 if (validFlag)
6668 handle_command (argBuf, from_tty);
6669 else
6670 printf_filtered (_("Invalid signal handling flag.\n"));
6671 if (argBuf)
6672 xfree (argBuf);
6673 }
6674 }
6675 do_cleanups (old_chain);
6676 }
6677
6678 enum gdb_signal
6679 gdb_signal_from_command (int num)
6680 {
6681 if (num >= 1 && num <= 15)
6682 return (enum gdb_signal) num;
6683 error (_("Only signals 1-15 are valid as numeric signals.\n\
6684 Use \"info signals\" for a list of symbolic signals."));
6685 }
6686
6687 /* Print current contents of the tables set by the handle command.
6688 It is possible we should just be printing signals actually used
6689 by the current target (but for things to work right when switching
6690 targets, all signals should be in the signal tables). */
6691
6692 static void
6693 signals_info (char *signum_exp, int from_tty)
6694 {
6695 enum gdb_signal oursig;
6696
6697 sig_print_header ();
6698
6699 if (signum_exp)
6700 {
6701 /* First see if this is a symbol name. */
6702 oursig = gdb_signal_from_name (signum_exp);
6703 if (oursig == GDB_SIGNAL_UNKNOWN)
6704 {
6705 /* No, try numeric. */
6706 oursig =
6707 gdb_signal_from_command (parse_and_eval_long (signum_exp));
6708 }
6709 sig_print_info (oursig);
6710 return;
6711 }
6712
6713 printf_filtered ("\n");
6714 /* These ugly casts brought to you by the native VAX compiler. */
6715 for (oursig = GDB_SIGNAL_FIRST;
6716 (int) oursig < (int) GDB_SIGNAL_LAST;
6717 oursig = (enum gdb_signal) ((int) oursig + 1))
6718 {
6719 QUIT;
6720
6721 if (oursig != GDB_SIGNAL_UNKNOWN
6722 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
6723 sig_print_info (oursig);
6724 }
6725
6726 printf_filtered (_("\nUse the \"handle\" command "
6727 "to change these tables.\n"));
6728 }
6729
6730 /* Check if it makes sense to read $_siginfo from the current thread
6731 at this point. If not, throw an error. */
6732
6733 static void
6734 validate_siginfo_access (void)
6735 {
6736 /* No current inferior, no siginfo. */
6737 if (ptid_equal (inferior_ptid, null_ptid))
6738 error (_("No thread selected."));
6739
6740 /* Don't try to read from a dead thread. */
6741 if (is_exited (inferior_ptid))
6742 error (_("The current thread has terminated"));
6743
6744 /* ... or from a spinning thread. */
6745 if (is_running (inferior_ptid))
6746 error (_("Selected thread is running."));
6747 }
6748
6749 /* The $_siginfo convenience variable is a bit special. We don't know
6750 for sure the type of the value until we actually have a chance to
6751 fetch the data. The type can change depending on gdbarch, so it is
6752 also dependent on which thread you have selected.
6753
6754 1. making $_siginfo be an internalvar that creates a new value on
6755 access.
6756
6757 2. making the value of $_siginfo be an lval_computed value. */
6758
6759 /* This function implements the lval_computed support for reading a
6760 $_siginfo value. */
6761
6762 static void
6763 siginfo_value_read (struct value *v)
6764 {
6765 LONGEST transferred;
6766
6767 validate_siginfo_access ();
6768
6769 transferred =
6770 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6771 NULL,
6772 value_contents_all_raw (v),
6773 value_offset (v),
6774 TYPE_LENGTH (value_type (v)));
6775
6776 if (transferred != TYPE_LENGTH (value_type (v)))
6777 error (_("Unable to read siginfo"));
6778 }
6779
6780 /* This function implements the lval_computed support for writing a
6781 $_siginfo value. */
6782
6783 static void
6784 siginfo_value_write (struct value *v, struct value *fromval)
6785 {
6786 LONGEST transferred;
6787
6788 validate_siginfo_access ();
6789
6790 transferred = target_write (&current_target,
6791 TARGET_OBJECT_SIGNAL_INFO,
6792 NULL,
6793 value_contents_all_raw (fromval),
6794 value_offset (v),
6795 TYPE_LENGTH (value_type (fromval)));
6796
6797 if (transferred != TYPE_LENGTH (value_type (fromval)))
6798 error (_("Unable to write siginfo"));
6799 }
6800
6801 static const struct lval_funcs siginfo_value_funcs =
6802 {
6803 siginfo_value_read,
6804 siginfo_value_write
6805 };
6806
6807 /* Return a new value with the correct type for the siginfo object of
6808 the current thread using architecture GDBARCH. Return a void value
6809 if there's no object available. */
6810
6811 static struct value *
6812 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
6813 void *ignore)
6814 {
6815 if (target_has_stack
6816 && !ptid_equal (inferior_ptid, null_ptid)
6817 && gdbarch_get_siginfo_type_p (gdbarch))
6818 {
6819 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6820
6821 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6822 }
6823
6824 return allocate_value (builtin_type (gdbarch)->builtin_void);
6825 }
6826
6827 \f
6828 /* infcall_suspend_state contains state about the program itself like its
6829 registers and any signal it received when it last stopped.
6830 This state must be restored regardless of how the inferior function call
6831 ends (either successfully, or after it hits a breakpoint or signal)
6832 if the program is to properly continue where it left off. */
6833
6834 struct infcall_suspend_state
6835 {
6836 struct thread_suspend_state thread_suspend;
6837 #if 0 /* Currently unused and empty structures are not valid C. */
6838 struct inferior_suspend_state inferior_suspend;
6839 #endif
6840
6841 /* Other fields: */
6842 CORE_ADDR stop_pc;
6843 struct regcache *registers;
6844
6845 /* Format of SIGINFO_DATA or NULL if it is not present. */
6846 struct gdbarch *siginfo_gdbarch;
6847
6848 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6849 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6850 content would be invalid. */
6851 gdb_byte *siginfo_data;
6852 };
6853
6854 struct infcall_suspend_state *
6855 save_infcall_suspend_state (void)
6856 {
6857 struct infcall_suspend_state *inf_state;
6858 struct thread_info *tp = inferior_thread ();
6859 #if 0
6860 struct inferior *inf = current_inferior ();
6861 #endif
6862 struct regcache *regcache = get_current_regcache ();
6863 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6864 gdb_byte *siginfo_data = NULL;
6865
6866 if (gdbarch_get_siginfo_type_p (gdbarch))
6867 {
6868 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6869 size_t len = TYPE_LENGTH (type);
6870 struct cleanup *back_to;
6871
6872 siginfo_data = xmalloc (len);
6873 back_to = make_cleanup (xfree, siginfo_data);
6874
6875 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6876 siginfo_data, 0, len) == len)
6877 discard_cleanups (back_to);
6878 else
6879 {
6880 /* Errors ignored. */
6881 do_cleanups (back_to);
6882 siginfo_data = NULL;
6883 }
6884 }
6885
6886 inf_state = XCNEW (struct infcall_suspend_state);
6887
6888 if (siginfo_data)
6889 {
6890 inf_state->siginfo_gdbarch = gdbarch;
6891 inf_state->siginfo_data = siginfo_data;
6892 }
6893
6894 inf_state->thread_suspend = tp->suspend;
6895 #if 0 /* Currently unused and empty structures are not valid C. */
6896 inf_state->inferior_suspend = inf->suspend;
6897 #endif
6898
6899 /* run_inferior_call will not use the signal due to its `proceed' call with
6900 GDB_SIGNAL_0 anyway. */
6901 tp->suspend.stop_signal = GDB_SIGNAL_0;
6902
6903 inf_state->stop_pc = stop_pc;
6904
6905 inf_state->registers = regcache_dup (regcache);
6906
6907 return inf_state;
6908 }
6909
6910 /* Restore inferior session state to INF_STATE. */
6911
6912 void
6913 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6914 {
6915 struct thread_info *tp = inferior_thread ();
6916 #if 0
6917 struct inferior *inf = current_inferior ();
6918 #endif
6919 struct regcache *regcache = get_current_regcache ();
6920 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6921
6922 tp->suspend = inf_state->thread_suspend;
6923 #if 0 /* Currently unused and empty structures are not valid C. */
6924 inf->suspend = inf_state->inferior_suspend;
6925 #endif
6926
6927 stop_pc = inf_state->stop_pc;
6928
6929 if (inf_state->siginfo_gdbarch == gdbarch)
6930 {
6931 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6932
6933 /* Errors ignored. */
6934 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6935 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
6936 }
6937
6938 /* The inferior can be gone if the user types "print exit(0)"
6939 (and perhaps other times). */
6940 if (target_has_execution)
6941 /* NB: The register write goes through to the target. */
6942 regcache_cpy (regcache, inf_state->registers);
6943
6944 discard_infcall_suspend_state (inf_state);
6945 }
6946
6947 static void
6948 do_restore_infcall_suspend_state_cleanup (void *state)
6949 {
6950 restore_infcall_suspend_state (state);
6951 }
6952
6953 struct cleanup *
6954 make_cleanup_restore_infcall_suspend_state
6955 (struct infcall_suspend_state *inf_state)
6956 {
6957 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6958 }
6959
6960 void
6961 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6962 {
6963 regcache_xfree (inf_state->registers);
6964 xfree (inf_state->siginfo_data);
6965 xfree (inf_state);
6966 }
6967
6968 struct regcache *
6969 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6970 {
6971 return inf_state->registers;
6972 }
6973
6974 /* infcall_control_state contains state regarding gdb's control of the
6975 inferior itself like stepping control. It also contains session state like
6976 the user's currently selected frame. */
6977
6978 struct infcall_control_state
6979 {
6980 struct thread_control_state thread_control;
6981 struct inferior_control_state inferior_control;
6982
6983 /* Other fields: */
6984 enum stop_stack_kind stop_stack_dummy;
6985 int stopped_by_random_signal;
6986 int stop_after_trap;
6987
6988 /* ID if the selected frame when the inferior function call was made. */
6989 struct frame_id selected_frame_id;
6990 };
6991
6992 /* Save all of the information associated with the inferior<==>gdb
6993 connection. */
6994
6995 struct infcall_control_state *
6996 save_infcall_control_state (void)
6997 {
6998 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
6999 struct thread_info *tp = inferior_thread ();
7000 struct inferior *inf = current_inferior ();
7001
7002 inf_status->thread_control = tp->control;
7003 inf_status->inferior_control = inf->control;
7004
7005 tp->control.step_resume_breakpoint = NULL;
7006 tp->control.exception_resume_breakpoint = NULL;
7007
7008 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
7009 chain. If caller's caller is walking the chain, they'll be happier if we
7010 hand them back the original chain when restore_infcall_control_state is
7011 called. */
7012 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
7013
7014 /* Other fields: */
7015 inf_status->stop_stack_dummy = stop_stack_dummy;
7016 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7017 inf_status->stop_after_trap = stop_after_trap;
7018
7019 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7020
7021 return inf_status;
7022 }
7023
7024 static int
7025 restore_selected_frame (void *args)
7026 {
7027 struct frame_id *fid = (struct frame_id *) args;
7028 struct frame_info *frame;
7029
7030 frame = frame_find_by_id (*fid);
7031
7032 /* If inf_status->selected_frame_id is NULL, there was no previously
7033 selected frame. */
7034 if (frame == NULL)
7035 {
7036 warning (_("Unable to restore previously selected frame."));
7037 return 0;
7038 }
7039
7040 select_frame (frame);
7041
7042 return (1);
7043 }
7044
7045 /* Restore inferior session state to INF_STATUS. */
7046
7047 void
7048 restore_infcall_control_state (struct infcall_control_state *inf_status)
7049 {
7050 struct thread_info *tp = inferior_thread ();
7051 struct inferior *inf = current_inferior ();
7052
7053 if (tp->control.step_resume_breakpoint)
7054 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7055
7056 if (tp->control.exception_resume_breakpoint)
7057 tp->control.exception_resume_breakpoint->disposition
7058 = disp_del_at_next_stop;
7059
7060 /* Handle the bpstat_copy of the chain. */
7061 bpstat_clear (&tp->control.stop_bpstat);
7062
7063 tp->control = inf_status->thread_control;
7064 inf->control = inf_status->inferior_control;
7065
7066 /* Other fields: */
7067 stop_stack_dummy = inf_status->stop_stack_dummy;
7068 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7069 stop_after_trap = inf_status->stop_after_trap;
7070
7071 if (target_has_stack)
7072 {
7073 /* The point of catch_errors is that if the stack is clobbered,
7074 walking the stack might encounter a garbage pointer and
7075 error() trying to dereference it. */
7076 if (catch_errors
7077 (restore_selected_frame, &inf_status->selected_frame_id,
7078 "Unable to restore previously selected frame:\n",
7079 RETURN_MASK_ERROR) == 0)
7080 /* Error in restoring the selected frame. Select the innermost
7081 frame. */
7082 select_frame (get_current_frame ());
7083 }
7084
7085 xfree (inf_status);
7086 }
7087
7088 static void
7089 do_restore_infcall_control_state_cleanup (void *sts)
7090 {
7091 restore_infcall_control_state (sts);
7092 }
7093
7094 struct cleanup *
7095 make_cleanup_restore_infcall_control_state
7096 (struct infcall_control_state *inf_status)
7097 {
7098 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7099 }
7100
7101 void
7102 discard_infcall_control_state (struct infcall_control_state *inf_status)
7103 {
7104 if (inf_status->thread_control.step_resume_breakpoint)
7105 inf_status->thread_control.step_resume_breakpoint->disposition
7106 = disp_del_at_next_stop;
7107
7108 if (inf_status->thread_control.exception_resume_breakpoint)
7109 inf_status->thread_control.exception_resume_breakpoint->disposition
7110 = disp_del_at_next_stop;
7111
7112 /* See save_infcall_control_state for info on stop_bpstat. */
7113 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7114
7115 xfree (inf_status);
7116 }
7117 \f
7118 /* restore_inferior_ptid() will be used by the cleanup machinery
7119 to restore the inferior_ptid value saved in a call to
7120 save_inferior_ptid(). */
7121
7122 static void
7123 restore_inferior_ptid (void *arg)
7124 {
7125 ptid_t *saved_ptid_ptr = arg;
7126
7127 inferior_ptid = *saved_ptid_ptr;
7128 xfree (arg);
7129 }
7130
7131 /* Save the value of inferior_ptid so that it may be restored by a
7132 later call to do_cleanups(). Returns the struct cleanup pointer
7133 needed for later doing the cleanup. */
7134
7135 struct cleanup *
7136 save_inferior_ptid (void)
7137 {
7138 ptid_t *saved_ptid_ptr;
7139
7140 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7141 *saved_ptid_ptr = inferior_ptid;
7142 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7143 }
7144
7145 /* See inferior.h. */
7146
7147 void
7148 clear_exit_convenience_vars (void)
7149 {
7150 clear_internalvar (lookup_internalvar ("_exitsignal"));
7151 clear_internalvar (lookup_internalvar ("_exitcode"));
7152 }
7153 \f
7154
7155 /* User interface for reverse debugging:
7156 Set exec-direction / show exec-direction commands
7157 (returns error unless target implements to_set_exec_direction method). */
7158
7159 int execution_direction = EXEC_FORWARD;
7160 static const char exec_forward[] = "forward";
7161 static const char exec_reverse[] = "reverse";
7162 static const char *exec_direction = exec_forward;
7163 static const char *const exec_direction_names[] = {
7164 exec_forward,
7165 exec_reverse,
7166 NULL
7167 };
7168
7169 static void
7170 set_exec_direction_func (char *args, int from_tty,
7171 struct cmd_list_element *cmd)
7172 {
7173 if (target_can_execute_reverse)
7174 {
7175 if (!strcmp (exec_direction, exec_forward))
7176 execution_direction = EXEC_FORWARD;
7177 else if (!strcmp (exec_direction, exec_reverse))
7178 execution_direction = EXEC_REVERSE;
7179 }
7180 else
7181 {
7182 exec_direction = exec_forward;
7183 error (_("Target does not support this operation."));
7184 }
7185 }
7186
7187 static void
7188 show_exec_direction_func (struct ui_file *out, int from_tty,
7189 struct cmd_list_element *cmd, const char *value)
7190 {
7191 switch (execution_direction) {
7192 case EXEC_FORWARD:
7193 fprintf_filtered (out, _("Forward.\n"));
7194 break;
7195 case EXEC_REVERSE:
7196 fprintf_filtered (out, _("Reverse.\n"));
7197 break;
7198 default:
7199 internal_error (__FILE__, __LINE__,
7200 _("bogus execution_direction value: %d"),
7201 (int) execution_direction);
7202 }
7203 }
7204
7205 static void
7206 show_schedule_multiple (struct ui_file *file, int from_tty,
7207 struct cmd_list_element *c, const char *value)
7208 {
7209 fprintf_filtered (file, _("Resuming the execution of threads "
7210 "of all processes is %s.\n"), value);
7211 }
7212
7213 /* Implementation of `siginfo' variable. */
7214
7215 static const struct internalvar_funcs siginfo_funcs =
7216 {
7217 siginfo_make_value,
7218 NULL,
7219 NULL
7220 };
7221
7222 void
7223 _initialize_infrun (void)
7224 {
7225 int i;
7226 int numsigs;
7227 struct cmd_list_element *c;
7228
7229 add_info ("signals", signals_info, _("\
7230 What debugger does when program gets various signals.\n\
7231 Specify a signal as argument to print info on that signal only."));
7232 add_info_alias ("handle", "signals", 0);
7233
7234 c = add_com ("handle", class_run, handle_command, _("\
7235 Specify how to handle signals.\n\
7236 Usage: handle SIGNAL [ACTIONS]\n\
7237 Args are signals and actions to apply to those signals.\n\
7238 If no actions are specified, the current settings for the specified signals\n\
7239 will be displayed instead.\n\
7240 \n\
7241 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7242 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7243 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7244 The special arg \"all\" is recognized to mean all signals except those\n\
7245 used by the debugger, typically SIGTRAP and SIGINT.\n\
7246 \n\
7247 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7248 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7249 Stop means reenter debugger if this signal happens (implies print).\n\
7250 Print means print a message if this signal happens.\n\
7251 Pass means let program see this signal; otherwise program doesn't know.\n\
7252 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7253 Pass and Stop may be combined.\n\
7254 \n\
7255 Multiple signals may be specified. Signal numbers and signal names\n\
7256 may be interspersed with actions, with the actions being performed for\n\
7257 all signals cumulatively specified."));
7258 set_cmd_completer (c, handle_completer);
7259
7260 if (xdb_commands)
7261 {
7262 add_com ("lz", class_info, signals_info, _("\
7263 What debugger does when program gets various signals.\n\
7264 Specify a signal as argument to print info on that signal only."));
7265 add_com ("z", class_run, xdb_handle_command, _("\
7266 Specify how to handle a signal.\n\
7267 Args are signals and actions to apply to those signals.\n\
7268 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7269 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7270 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7271 The special arg \"all\" is recognized to mean all signals except those\n\
7272 used by the debugger, typically SIGTRAP and SIGINT.\n\
7273 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7274 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7275 nopass), \"Q\" (noprint)\n\
7276 Stop means reenter debugger if this signal happens (implies print).\n\
7277 Print means print a message if this signal happens.\n\
7278 Pass means let program see this signal; otherwise program doesn't know.\n\
7279 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7280 Pass and Stop may be combined."));
7281 }
7282
7283 if (!dbx_commands)
7284 stop_command = add_cmd ("stop", class_obscure,
7285 not_just_help_class_command, _("\
7286 There is no `stop' command, but you can set a hook on `stop'.\n\
7287 This allows you to set a list of commands to be run each time execution\n\
7288 of the program stops."), &cmdlist);
7289
7290 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7291 Set inferior debugging."), _("\
7292 Show inferior debugging."), _("\
7293 When non-zero, inferior specific debugging is enabled."),
7294 NULL,
7295 show_debug_infrun,
7296 &setdebuglist, &showdebuglist);
7297
7298 add_setshow_boolean_cmd ("displaced", class_maintenance,
7299 &debug_displaced, _("\
7300 Set displaced stepping debugging."), _("\
7301 Show displaced stepping debugging."), _("\
7302 When non-zero, displaced stepping specific debugging is enabled."),
7303 NULL,
7304 show_debug_displaced,
7305 &setdebuglist, &showdebuglist);
7306
7307 add_setshow_boolean_cmd ("non-stop", no_class,
7308 &non_stop_1, _("\
7309 Set whether gdb controls the inferior in non-stop mode."), _("\
7310 Show whether gdb controls the inferior in non-stop mode."), _("\
7311 When debugging a multi-threaded program and this setting is\n\
7312 off (the default, also called all-stop mode), when one thread stops\n\
7313 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7314 all other threads in the program while you interact with the thread of\n\
7315 interest. When you continue or step a thread, you can allow the other\n\
7316 threads to run, or have them remain stopped, but while you inspect any\n\
7317 thread's state, all threads stop.\n\
7318 \n\
7319 In non-stop mode, when one thread stops, other threads can continue\n\
7320 to run freely. You'll be able to step each thread independently,\n\
7321 leave it stopped or free to run as needed."),
7322 set_non_stop,
7323 show_non_stop,
7324 &setlist,
7325 &showlist);
7326
7327 numsigs = (int) GDB_SIGNAL_LAST;
7328 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7329 signal_print = (unsigned char *)
7330 xmalloc (sizeof (signal_print[0]) * numsigs);
7331 signal_program = (unsigned char *)
7332 xmalloc (sizeof (signal_program[0]) * numsigs);
7333 signal_catch = (unsigned char *)
7334 xmalloc (sizeof (signal_catch[0]) * numsigs);
7335 signal_pass = (unsigned char *)
7336 xmalloc (sizeof (signal_program[0]) * numsigs);
7337 for (i = 0; i < numsigs; i++)
7338 {
7339 signal_stop[i] = 1;
7340 signal_print[i] = 1;
7341 signal_program[i] = 1;
7342 signal_catch[i] = 0;
7343 }
7344
7345 /* Signals caused by debugger's own actions
7346 should not be given to the program afterwards. */
7347 signal_program[GDB_SIGNAL_TRAP] = 0;
7348 signal_program[GDB_SIGNAL_INT] = 0;
7349
7350 /* Signals that are not errors should not normally enter the debugger. */
7351 signal_stop[GDB_SIGNAL_ALRM] = 0;
7352 signal_print[GDB_SIGNAL_ALRM] = 0;
7353 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7354 signal_print[GDB_SIGNAL_VTALRM] = 0;
7355 signal_stop[GDB_SIGNAL_PROF] = 0;
7356 signal_print[GDB_SIGNAL_PROF] = 0;
7357 signal_stop[GDB_SIGNAL_CHLD] = 0;
7358 signal_print[GDB_SIGNAL_CHLD] = 0;
7359 signal_stop[GDB_SIGNAL_IO] = 0;
7360 signal_print[GDB_SIGNAL_IO] = 0;
7361 signal_stop[GDB_SIGNAL_POLL] = 0;
7362 signal_print[GDB_SIGNAL_POLL] = 0;
7363 signal_stop[GDB_SIGNAL_URG] = 0;
7364 signal_print[GDB_SIGNAL_URG] = 0;
7365 signal_stop[GDB_SIGNAL_WINCH] = 0;
7366 signal_print[GDB_SIGNAL_WINCH] = 0;
7367 signal_stop[GDB_SIGNAL_PRIO] = 0;
7368 signal_print[GDB_SIGNAL_PRIO] = 0;
7369
7370 /* These signals are used internally by user-level thread
7371 implementations. (See signal(5) on Solaris.) Like the above
7372 signals, a healthy program receives and handles them as part of
7373 its normal operation. */
7374 signal_stop[GDB_SIGNAL_LWP] = 0;
7375 signal_print[GDB_SIGNAL_LWP] = 0;
7376 signal_stop[GDB_SIGNAL_WAITING] = 0;
7377 signal_print[GDB_SIGNAL_WAITING] = 0;
7378 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7379 signal_print[GDB_SIGNAL_CANCEL] = 0;
7380
7381 /* Update cached state. */
7382 signal_cache_update (-1);
7383
7384 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7385 &stop_on_solib_events, _("\
7386 Set stopping for shared library events."), _("\
7387 Show stopping for shared library events."), _("\
7388 If nonzero, gdb will give control to the user when the dynamic linker\n\
7389 notifies gdb of shared library events. The most common event of interest\n\
7390 to the user would be loading/unloading of a new library."),
7391 set_stop_on_solib_events,
7392 show_stop_on_solib_events,
7393 &setlist, &showlist);
7394
7395 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7396 follow_fork_mode_kind_names,
7397 &follow_fork_mode_string, _("\
7398 Set debugger response to a program call of fork or vfork."), _("\
7399 Show debugger response to a program call of fork or vfork."), _("\
7400 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7401 parent - the original process is debugged after a fork\n\
7402 child - the new process is debugged after a fork\n\
7403 The unfollowed process will continue to run.\n\
7404 By default, the debugger will follow the parent process."),
7405 NULL,
7406 show_follow_fork_mode_string,
7407 &setlist, &showlist);
7408
7409 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7410 follow_exec_mode_names,
7411 &follow_exec_mode_string, _("\
7412 Set debugger response to a program call of exec."), _("\
7413 Show debugger response to a program call of exec."), _("\
7414 An exec call replaces the program image of a process.\n\
7415 \n\
7416 follow-exec-mode can be:\n\
7417 \n\
7418 new - the debugger creates a new inferior and rebinds the process\n\
7419 to this new inferior. The program the process was running before\n\
7420 the exec call can be restarted afterwards by restarting the original\n\
7421 inferior.\n\
7422 \n\
7423 same - the debugger keeps the process bound to the same inferior.\n\
7424 The new executable image replaces the previous executable loaded in\n\
7425 the inferior. Restarting the inferior after the exec call restarts\n\
7426 the executable the process was running after the exec call.\n\
7427 \n\
7428 By default, the debugger will use the same inferior."),
7429 NULL,
7430 show_follow_exec_mode_string,
7431 &setlist, &showlist);
7432
7433 add_setshow_enum_cmd ("scheduler-locking", class_run,
7434 scheduler_enums, &scheduler_mode, _("\
7435 Set mode for locking scheduler during execution."), _("\
7436 Show mode for locking scheduler during execution."), _("\
7437 off == no locking (threads may preempt at any time)\n\
7438 on == full locking (no thread except the current thread may run)\n\
7439 step == scheduler locked during every single-step operation.\n\
7440 In this mode, no other thread may run during a step command.\n\
7441 Other threads may run while stepping over a function call ('next')."),
7442 set_schedlock_func, /* traps on target vector */
7443 show_scheduler_mode,
7444 &setlist, &showlist);
7445
7446 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7447 Set mode for resuming threads of all processes."), _("\
7448 Show mode for resuming threads of all processes."), _("\
7449 When on, execution commands (such as 'continue' or 'next') resume all\n\
7450 threads of all processes. When off (which is the default), execution\n\
7451 commands only resume the threads of the current process. The set of\n\
7452 threads that are resumed is further refined by the scheduler-locking\n\
7453 mode (see help set scheduler-locking)."),
7454 NULL,
7455 show_schedule_multiple,
7456 &setlist, &showlist);
7457
7458 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7459 Set mode of the step operation."), _("\
7460 Show mode of the step operation."), _("\
7461 When set, doing a step over a function without debug line information\n\
7462 will stop at the first instruction of that function. Otherwise, the\n\
7463 function is skipped and the step command stops at a different source line."),
7464 NULL,
7465 show_step_stop_if_no_debug,
7466 &setlist, &showlist);
7467
7468 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7469 &can_use_displaced_stepping, _("\
7470 Set debugger's willingness to use displaced stepping."), _("\
7471 Show debugger's willingness to use displaced stepping."), _("\
7472 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7473 supported by the target architecture. If off, gdb will not use displaced\n\
7474 stepping to step over breakpoints, even if such is supported by the target\n\
7475 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7476 if the target architecture supports it and non-stop mode is active, but will not\n\
7477 use it in all-stop mode (see help set non-stop)."),
7478 NULL,
7479 show_can_use_displaced_stepping,
7480 &setlist, &showlist);
7481
7482 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7483 &exec_direction, _("Set direction of execution.\n\
7484 Options are 'forward' or 'reverse'."),
7485 _("Show direction of execution (forward/reverse)."),
7486 _("Tells gdb whether to execute forward or backward."),
7487 set_exec_direction_func, show_exec_direction_func,
7488 &setlist, &showlist);
7489
7490 /* Set/show detach-on-fork: user-settable mode. */
7491
7492 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7493 Set whether gdb will detach the child of a fork."), _("\
7494 Show whether gdb will detach the child of a fork."), _("\
7495 Tells gdb whether to detach the child of a fork."),
7496 NULL, NULL, &setlist, &showlist);
7497
7498 /* Set/show disable address space randomization mode. */
7499
7500 add_setshow_boolean_cmd ("disable-randomization", class_support,
7501 &disable_randomization, _("\
7502 Set disabling of debuggee's virtual address space randomization."), _("\
7503 Show disabling of debuggee's virtual address space randomization."), _("\
7504 When this mode is on (which is the default), randomization of the virtual\n\
7505 address space is disabled. Standalone programs run with the randomization\n\
7506 enabled by default on some platforms."),
7507 &set_disable_randomization,
7508 &show_disable_randomization,
7509 &setlist, &showlist);
7510
7511 /* ptid initializations */
7512 inferior_ptid = null_ptid;
7513 target_last_wait_ptid = minus_one_ptid;
7514
7515 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7516 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7517 observer_attach_thread_exit (infrun_thread_thread_exit);
7518 observer_attach_inferior_exit (infrun_inferior_exit);
7519
7520 /* Explicitly create without lookup, since that tries to create a
7521 value with a void typed value, and when we get here, gdbarch
7522 isn't initialized yet. At this point, we're quite sure there
7523 isn't another convenience variable of the same name. */
7524 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7525
7526 add_setshow_boolean_cmd ("observer", no_class,
7527 &observer_mode_1, _("\
7528 Set whether gdb controls the inferior in observer mode."), _("\
7529 Show whether gdb controls the inferior in observer mode."), _("\
7530 In observer mode, GDB can get data from the inferior, but not\n\
7531 affect its execution. Registers and memory may not be changed,\n\
7532 breakpoints may not be set, and the program cannot be interrupted\n\
7533 or signalled."),
7534 set_observer_mode,
7535 show_observer_mode,
7536 &setlist,
7537 &showlist);
7538 }
This page took 0.201288 seconds and 4 git commands to generate.