gdb/
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2012 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "gdb_string.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "exceptions.h"
28 #include "breakpoint.h"
29 #include "gdb_wait.h"
30 #include "gdbcore.h"
31 #include "gdbcmd.h"
32 #include "cli/cli-script.h"
33 #include "target.h"
34 #include "gdbthread.h"
35 #include "annotate.h"
36 #include "symfile.h"
37 #include "top.h"
38 #include <signal.h>
39 #include "inf-loop.h"
40 #include "regcache.h"
41 #include "value.h"
42 #include "observer.h"
43 #include "language.h"
44 #include "solib.h"
45 #include "main.h"
46 #include "dictionary.h"
47 #include "block.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55 #include "continuations.h"
56 #include "interps.h"
57 #include "skip.h"
58
59 /* Prototypes for local functions */
60
61 static void signals_info (char *, int);
62
63 static void handle_command (char *, int);
64
65 static void sig_print_info (enum target_signal);
66
67 static void sig_print_header (void);
68
69 static void resume_cleanups (void *);
70
71 static int hook_stop_stub (void *);
72
73 static int restore_selected_frame (void *);
74
75 static int follow_fork (void);
76
77 static void set_schedlock_func (char *args, int from_tty,
78 struct cmd_list_element *c);
79
80 static int currently_stepping (struct thread_info *tp);
81
82 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
83 void *data);
84
85 static void xdb_handle_command (char *args, int from_tty);
86
87 static int prepare_to_proceed (int);
88
89 static void print_exited_reason (int exitstatus);
90
91 static void print_signal_exited_reason (enum target_signal siggnal);
92
93 static void print_no_history_reason (void);
94
95 static void print_signal_received_reason (enum target_signal siggnal);
96
97 static void print_end_stepping_range_reason (void);
98
99 void _initialize_infrun (void);
100
101 void nullify_last_target_wait_ptid (void);
102
103 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
104
105 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
106
107 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
108
109 /* When set, stop the 'step' command if we enter a function which has
110 no line number information. The normal behavior is that we step
111 over such function. */
112 int step_stop_if_no_debug = 0;
113 static void
114 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
115 struct cmd_list_element *c, const char *value)
116 {
117 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
118 }
119
120 /* In asynchronous mode, but simulating synchronous execution. */
121
122 int sync_execution = 0;
123
124 /* wait_for_inferior and normal_stop use this to notify the user
125 when the inferior stopped in a different thread than it had been
126 running in. */
127
128 static ptid_t previous_inferior_ptid;
129
130 /* Default behavior is to detach newly forked processes (legacy). */
131 int detach_fork = 1;
132
133 int debug_displaced = 0;
134 static void
135 show_debug_displaced (struct ui_file *file, int from_tty,
136 struct cmd_list_element *c, const char *value)
137 {
138 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
139 }
140
141 int debug_infrun = 0;
142 static void
143 show_debug_infrun (struct ui_file *file, int from_tty,
144 struct cmd_list_element *c, const char *value)
145 {
146 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
147 }
148
149
150 /* Support for disabling address space randomization. */
151
152 int disable_randomization = 1;
153
154 static void
155 show_disable_randomization (struct ui_file *file, int from_tty,
156 struct cmd_list_element *c, const char *value)
157 {
158 if (target_supports_disable_randomization ())
159 fprintf_filtered (file,
160 _("Disabling randomization of debuggee's "
161 "virtual address space is %s.\n"),
162 value);
163 else
164 fputs_filtered (_("Disabling randomization of debuggee's "
165 "virtual address space is unsupported on\n"
166 "this platform.\n"), file);
167 }
168
169 static void
170 set_disable_randomization (char *args, int from_tty,
171 struct cmd_list_element *c)
172 {
173 if (!target_supports_disable_randomization ())
174 error (_("Disabling randomization of debuggee's "
175 "virtual address space is unsupported on\n"
176 "this platform."));
177 }
178
179
180 /* If the program uses ELF-style shared libraries, then calls to
181 functions in shared libraries go through stubs, which live in a
182 table called the PLT (Procedure Linkage Table). The first time the
183 function is called, the stub sends control to the dynamic linker,
184 which looks up the function's real address, patches the stub so
185 that future calls will go directly to the function, and then passes
186 control to the function.
187
188 If we are stepping at the source level, we don't want to see any of
189 this --- we just want to skip over the stub and the dynamic linker.
190 The simple approach is to single-step until control leaves the
191 dynamic linker.
192
193 However, on some systems (e.g., Red Hat's 5.2 distribution) the
194 dynamic linker calls functions in the shared C library, so you
195 can't tell from the PC alone whether the dynamic linker is still
196 running. In this case, we use a step-resume breakpoint to get us
197 past the dynamic linker, as if we were using "next" to step over a
198 function call.
199
200 in_solib_dynsym_resolve_code() says whether we're in the dynamic
201 linker code or not. Normally, this means we single-step. However,
202 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
203 address where we can place a step-resume breakpoint to get past the
204 linker's symbol resolution function.
205
206 in_solib_dynsym_resolve_code() can generally be implemented in a
207 pretty portable way, by comparing the PC against the address ranges
208 of the dynamic linker's sections.
209
210 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
211 it depends on internal details of the dynamic linker. It's usually
212 not too hard to figure out where to put a breakpoint, but it
213 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
214 sanity checking. If it can't figure things out, returning zero and
215 getting the (possibly confusing) stepping behavior is better than
216 signalling an error, which will obscure the change in the
217 inferior's state. */
218
219 /* This function returns TRUE if pc is the address of an instruction
220 that lies within the dynamic linker (such as the event hook, or the
221 dld itself).
222
223 This function must be used only when a dynamic linker event has
224 been caught, and the inferior is being stepped out of the hook, or
225 undefined results are guaranteed. */
226
227 #ifndef SOLIB_IN_DYNAMIC_LINKER
228 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
229 #endif
230
231 /* "Observer mode" is somewhat like a more extreme version of
232 non-stop, in which all GDB operations that might affect the
233 target's execution have been disabled. */
234
235 static int non_stop_1 = 0;
236
237 int observer_mode = 0;
238 static int observer_mode_1 = 0;
239
240 static void
241 set_observer_mode (char *args, int from_tty,
242 struct cmd_list_element *c)
243 {
244 extern int pagination_enabled;
245
246 if (target_has_execution)
247 {
248 observer_mode_1 = observer_mode;
249 error (_("Cannot change this setting while the inferior is running."));
250 }
251
252 observer_mode = observer_mode_1;
253
254 may_write_registers = !observer_mode;
255 may_write_memory = !observer_mode;
256 may_insert_breakpoints = !observer_mode;
257 may_insert_tracepoints = !observer_mode;
258 /* We can insert fast tracepoints in or out of observer mode,
259 but enable them if we're going into this mode. */
260 if (observer_mode)
261 may_insert_fast_tracepoints = 1;
262 may_stop = !observer_mode;
263 update_target_permissions ();
264
265 /* Going *into* observer mode we must force non-stop, then
266 going out we leave it that way. */
267 if (observer_mode)
268 {
269 target_async_permitted = 1;
270 pagination_enabled = 0;
271 non_stop = non_stop_1 = 1;
272 }
273
274 if (from_tty)
275 printf_filtered (_("Observer mode is now %s.\n"),
276 (observer_mode ? "on" : "off"));
277 }
278
279 static void
280 show_observer_mode (struct ui_file *file, int from_tty,
281 struct cmd_list_element *c, const char *value)
282 {
283 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
284 }
285
286 /* This updates the value of observer mode based on changes in
287 permissions. Note that we are deliberately ignoring the values of
288 may-write-registers and may-write-memory, since the user may have
289 reason to enable these during a session, for instance to turn on a
290 debugging-related global. */
291
292 void
293 update_observer_mode (void)
294 {
295 int newval;
296
297 newval = (!may_insert_breakpoints
298 && !may_insert_tracepoints
299 && may_insert_fast_tracepoints
300 && !may_stop
301 && non_stop);
302
303 /* Let the user know if things change. */
304 if (newval != observer_mode)
305 printf_filtered (_("Observer mode is now %s.\n"),
306 (newval ? "on" : "off"));
307
308 observer_mode = observer_mode_1 = newval;
309 }
310
311 /* Tables of how to react to signals; the user sets them. */
312
313 static unsigned char *signal_stop;
314 static unsigned char *signal_print;
315 static unsigned char *signal_program;
316
317 /* Table of signals that the target may silently handle.
318 This is automatically determined from the flags above,
319 and simply cached here. */
320 static unsigned char *signal_pass;
321
322 #define SET_SIGS(nsigs,sigs,flags) \
323 do { \
324 int signum = (nsigs); \
325 while (signum-- > 0) \
326 if ((sigs)[signum]) \
327 (flags)[signum] = 1; \
328 } while (0)
329
330 #define UNSET_SIGS(nsigs,sigs,flags) \
331 do { \
332 int signum = (nsigs); \
333 while (signum-- > 0) \
334 if ((sigs)[signum]) \
335 (flags)[signum] = 0; \
336 } while (0)
337
338 /* Value to pass to target_resume() to cause all threads to resume. */
339
340 #define RESUME_ALL minus_one_ptid
341
342 /* Command list pointer for the "stop" placeholder. */
343
344 static struct cmd_list_element *stop_command;
345
346 /* Function inferior was in as of last step command. */
347
348 static struct symbol *step_start_function;
349
350 /* Nonzero if we want to give control to the user when we're notified
351 of shared library events by the dynamic linker. */
352 int stop_on_solib_events;
353 static void
354 show_stop_on_solib_events (struct ui_file *file, int from_tty,
355 struct cmd_list_element *c, const char *value)
356 {
357 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
358 value);
359 }
360
361 /* Nonzero means expecting a trace trap
362 and should stop the inferior and return silently when it happens. */
363
364 int stop_after_trap;
365
366 /* Save register contents here when executing a "finish" command or are
367 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
368 Thus this contains the return value from the called function (assuming
369 values are returned in a register). */
370
371 struct regcache *stop_registers;
372
373 /* Nonzero after stop if current stack frame should be printed. */
374
375 static int stop_print_frame;
376
377 /* This is a cached copy of the pid/waitstatus of the last event
378 returned by target_wait()/deprecated_target_wait_hook(). This
379 information is returned by get_last_target_status(). */
380 static ptid_t target_last_wait_ptid;
381 static struct target_waitstatus target_last_waitstatus;
382
383 static void context_switch (ptid_t ptid);
384
385 void init_thread_stepping_state (struct thread_info *tss);
386
387 void init_infwait_state (void);
388
389 static const char follow_fork_mode_child[] = "child";
390 static const char follow_fork_mode_parent[] = "parent";
391
392 static const char *const follow_fork_mode_kind_names[] = {
393 follow_fork_mode_child,
394 follow_fork_mode_parent,
395 NULL
396 };
397
398 static const char *follow_fork_mode_string = follow_fork_mode_parent;
399 static void
400 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
401 struct cmd_list_element *c, const char *value)
402 {
403 fprintf_filtered (file,
404 _("Debugger response to a program "
405 "call of fork or vfork is \"%s\".\n"),
406 value);
407 }
408 \f
409
410 /* Tell the target to follow the fork we're stopped at. Returns true
411 if the inferior should be resumed; false, if the target for some
412 reason decided it's best not to resume. */
413
414 static int
415 follow_fork (void)
416 {
417 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
418 int should_resume = 1;
419 struct thread_info *tp;
420
421 /* Copy user stepping state to the new inferior thread. FIXME: the
422 followed fork child thread should have a copy of most of the
423 parent thread structure's run control related fields, not just these.
424 Initialized to avoid "may be used uninitialized" warnings from gcc. */
425 struct breakpoint *step_resume_breakpoint = NULL;
426 struct breakpoint *exception_resume_breakpoint = NULL;
427 CORE_ADDR step_range_start = 0;
428 CORE_ADDR step_range_end = 0;
429 struct frame_id step_frame_id = { 0 };
430
431 if (!non_stop)
432 {
433 ptid_t wait_ptid;
434 struct target_waitstatus wait_status;
435
436 /* Get the last target status returned by target_wait(). */
437 get_last_target_status (&wait_ptid, &wait_status);
438
439 /* If not stopped at a fork event, then there's nothing else to
440 do. */
441 if (wait_status.kind != TARGET_WAITKIND_FORKED
442 && wait_status.kind != TARGET_WAITKIND_VFORKED)
443 return 1;
444
445 /* Check if we switched over from WAIT_PTID, since the event was
446 reported. */
447 if (!ptid_equal (wait_ptid, minus_one_ptid)
448 && !ptid_equal (inferior_ptid, wait_ptid))
449 {
450 /* We did. Switch back to WAIT_PTID thread, to tell the
451 target to follow it (in either direction). We'll
452 afterwards refuse to resume, and inform the user what
453 happened. */
454 switch_to_thread (wait_ptid);
455 should_resume = 0;
456 }
457 }
458
459 tp = inferior_thread ();
460
461 /* If there were any forks/vforks that were caught and are now to be
462 followed, then do so now. */
463 switch (tp->pending_follow.kind)
464 {
465 case TARGET_WAITKIND_FORKED:
466 case TARGET_WAITKIND_VFORKED:
467 {
468 ptid_t parent, child;
469
470 /* If the user did a next/step, etc, over a fork call,
471 preserve the stepping state in the fork child. */
472 if (follow_child && should_resume)
473 {
474 step_resume_breakpoint = clone_momentary_breakpoint
475 (tp->control.step_resume_breakpoint);
476 step_range_start = tp->control.step_range_start;
477 step_range_end = tp->control.step_range_end;
478 step_frame_id = tp->control.step_frame_id;
479 exception_resume_breakpoint
480 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
481
482 /* For now, delete the parent's sr breakpoint, otherwise,
483 parent/child sr breakpoints are considered duplicates,
484 and the child version will not be installed. Remove
485 this when the breakpoints module becomes aware of
486 inferiors and address spaces. */
487 delete_step_resume_breakpoint (tp);
488 tp->control.step_range_start = 0;
489 tp->control.step_range_end = 0;
490 tp->control.step_frame_id = null_frame_id;
491 delete_exception_resume_breakpoint (tp);
492 }
493
494 parent = inferior_ptid;
495 child = tp->pending_follow.value.related_pid;
496
497 /* Tell the target to do whatever is necessary to follow
498 either parent or child. */
499 if (target_follow_fork (follow_child))
500 {
501 /* Target refused to follow, or there's some other reason
502 we shouldn't resume. */
503 should_resume = 0;
504 }
505 else
506 {
507 /* This pending follow fork event is now handled, one way
508 or another. The previous selected thread may be gone
509 from the lists by now, but if it is still around, need
510 to clear the pending follow request. */
511 tp = find_thread_ptid (parent);
512 if (tp)
513 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
514
515 /* This makes sure we don't try to apply the "Switched
516 over from WAIT_PID" logic above. */
517 nullify_last_target_wait_ptid ();
518
519 /* If we followed the child, switch to it... */
520 if (follow_child)
521 {
522 switch_to_thread (child);
523
524 /* ... and preserve the stepping state, in case the
525 user was stepping over the fork call. */
526 if (should_resume)
527 {
528 tp = inferior_thread ();
529 tp->control.step_resume_breakpoint
530 = step_resume_breakpoint;
531 tp->control.step_range_start = step_range_start;
532 tp->control.step_range_end = step_range_end;
533 tp->control.step_frame_id = step_frame_id;
534 tp->control.exception_resume_breakpoint
535 = exception_resume_breakpoint;
536 }
537 else
538 {
539 /* If we get here, it was because we're trying to
540 resume from a fork catchpoint, but, the user
541 has switched threads away from the thread that
542 forked. In that case, the resume command
543 issued is most likely not applicable to the
544 child, so just warn, and refuse to resume. */
545 warning (_("Not resuming: switched threads "
546 "before following fork child.\n"));
547 }
548
549 /* Reset breakpoints in the child as appropriate. */
550 follow_inferior_reset_breakpoints ();
551 }
552 else
553 switch_to_thread (parent);
554 }
555 }
556 break;
557 case TARGET_WAITKIND_SPURIOUS:
558 /* Nothing to follow. */
559 break;
560 default:
561 internal_error (__FILE__, __LINE__,
562 "Unexpected pending_follow.kind %d\n",
563 tp->pending_follow.kind);
564 break;
565 }
566
567 return should_resume;
568 }
569
570 void
571 follow_inferior_reset_breakpoints (void)
572 {
573 struct thread_info *tp = inferior_thread ();
574
575 /* Was there a step_resume breakpoint? (There was if the user
576 did a "next" at the fork() call.) If so, explicitly reset its
577 thread number.
578
579 step_resumes are a form of bp that are made to be per-thread.
580 Since we created the step_resume bp when the parent process
581 was being debugged, and now are switching to the child process,
582 from the breakpoint package's viewpoint, that's a switch of
583 "threads". We must update the bp's notion of which thread
584 it is for, or it'll be ignored when it triggers. */
585
586 if (tp->control.step_resume_breakpoint)
587 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
588
589 if (tp->control.exception_resume_breakpoint)
590 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
591
592 /* Reinsert all breakpoints in the child. The user may have set
593 breakpoints after catching the fork, in which case those
594 were never set in the child, but only in the parent. This makes
595 sure the inserted breakpoints match the breakpoint list. */
596
597 breakpoint_re_set ();
598 insert_breakpoints ();
599 }
600
601 /* The child has exited or execed: resume threads of the parent the
602 user wanted to be executing. */
603
604 static int
605 proceed_after_vfork_done (struct thread_info *thread,
606 void *arg)
607 {
608 int pid = * (int *) arg;
609
610 if (ptid_get_pid (thread->ptid) == pid
611 && is_running (thread->ptid)
612 && !is_executing (thread->ptid)
613 && !thread->stop_requested
614 && thread->suspend.stop_signal == TARGET_SIGNAL_0)
615 {
616 if (debug_infrun)
617 fprintf_unfiltered (gdb_stdlog,
618 "infrun: resuming vfork parent thread %s\n",
619 target_pid_to_str (thread->ptid));
620
621 switch_to_thread (thread->ptid);
622 clear_proceed_status ();
623 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
624 }
625
626 return 0;
627 }
628
629 /* Called whenever we notice an exec or exit event, to handle
630 detaching or resuming a vfork parent. */
631
632 static void
633 handle_vfork_child_exec_or_exit (int exec)
634 {
635 struct inferior *inf = current_inferior ();
636
637 if (inf->vfork_parent)
638 {
639 int resume_parent = -1;
640
641 /* This exec or exit marks the end of the shared memory region
642 between the parent and the child. If the user wanted to
643 detach from the parent, now is the time. */
644
645 if (inf->vfork_parent->pending_detach)
646 {
647 struct thread_info *tp;
648 struct cleanup *old_chain;
649 struct program_space *pspace;
650 struct address_space *aspace;
651
652 /* follow-fork child, detach-on-fork on. */
653
654 old_chain = make_cleanup_restore_current_thread ();
655
656 /* We're letting loose of the parent. */
657 tp = any_live_thread_of_process (inf->vfork_parent->pid);
658 switch_to_thread (tp->ptid);
659
660 /* We're about to detach from the parent, which implicitly
661 removes breakpoints from its address space. There's a
662 catch here: we want to reuse the spaces for the child,
663 but, parent/child are still sharing the pspace at this
664 point, although the exec in reality makes the kernel give
665 the child a fresh set of new pages. The problem here is
666 that the breakpoints module being unaware of this, would
667 likely chose the child process to write to the parent
668 address space. Swapping the child temporarily away from
669 the spaces has the desired effect. Yes, this is "sort
670 of" a hack. */
671
672 pspace = inf->pspace;
673 aspace = inf->aspace;
674 inf->aspace = NULL;
675 inf->pspace = NULL;
676
677 if (debug_infrun || info_verbose)
678 {
679 target_terminal_ours ();
680
681 if (exec)
682 fprintf_filtered (gdb_stdlog,
683 "Detaching vfork parent process "
684 "%d after child exec.\n",
685 inf->vfork_parent->pid);
686 else
687 fprintf_filtered (gdb_stdlog,
688 "Detaching vfork parent process "
689 "%d after child exit.\n",
690 inf->vfork_parent->pid);
691 }
692
693 target_detach (NULL, 0);
694
695 /* Put it back. */
696 inf->pspace = pspace;
697 inf->aspace = aspace;
698
699 do_cleanups (old_chain);
700 }
701 else if (exec)
702 {
703 /* We're staying attached to the parent, so, really give the
704 child a new address space. */
705 inf->pspace = add_program_space (maybe_new_address_space ());
706 inf->aspace = inf->pspace->aspace;
707 inf->removable = 1;
708 set_current_program_space (inf->pspace);
709
710 resume_parent = inf->vfork_parent->pid;
711
712 /* Break the bonds. */
713 inf->vfork_parent->vfork_child = NULL;
714 }
715 else
716 {
717 struct cleanup *old_chain;
718 struct program_space *pspace;
719
720 /* If this is a vfork child exiting, then the pspace and
721 aspaces were shared with the parent. Since we're
722 reporting the process exit, we'll be mourning all that is
723 found in the address space, and switching to null_ptid,
724 preparing to start a new inferior. But, since we don't
725 want to clobber the parent's address/program spaces, we
726 go ahead and create a new one for this exiting
727 inferior. */
728
729 /* Switch to null_ptid, so that clone_program_space doesn't want
730 to read the selected frame of a dead process. */
731 old_chain = save_inferior_ptid ();
732 inferior_ptid = null_ptid;
733
734 /* This inferior is dead, so avoid giving the breakpoints
735 module the option to write through to it (cloning a
736 program space resets breakpoints). */
737 inf->aspace = NULL;
738 inf->pspace = NULL;
739 pspace = add_program_space (maybe_new_address_space ());
740 set_current_program_space (pspace);
741 inf->removable = 1;
742 clone_program_space (pspace, inf->vfork_parent->pspace);
743 inf->pspace = pspace;
744 inf->aspace = pspace->aspace;
745
746 /* Put back inferior_ptid. We'll continue mourning this
747 inferior. */
748 do_cleanups (old_chain);
749
750 resume_parent = inf->vfork_parent->pid;
751 /* Break the bonds. */
752 inf->vfork_parent->vfork_child = NULL;
753 }
754
755 inf->vfork_parent = NULL;
756
757 gdb_assert (current_program_space == inf->pspace);
758
759 if (non_stop && resume_parent != -1)
760 {
761 /* If the user wanted the parent to be running, let it go
762 free now. */
763 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
764
765 if (debug_infrun)
766 fprintf_unfiltered (gdb_stdlog,
767 "infrun: resuming vfork parent process %d\n",
768 resume_parent);
769
770 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
771
772 do_cleanups (old_chain);
773 }
774 }
775 }
776
777 /* Enum strings for "set|show displaced-stepping". */
778
779 static const char follow_exec_mode_new[] = "new";
780 static const char follow_exec_mode_same[] = "same";
781 static const char *const follow_exec_mode_names[] =
782 {
783 follow_exec_mode_new,
784 follow_exec_mode_same,
785 NULL,
786 };
787
788 static const char *follow_exec_mode_string = follow_exec_mode_same;
789 static void
790 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
791 struct cmd_list_element *c, const char *value)
792 {
793 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
794 }
795
796 /* EXECD_PATHNAME is assumed to be non-NULL. */
797
798 static void
799 follow_exec (ptid_t pid, char *execd_pathname)
800 {
801 struct thread_info *th = inferior_thread ();
802 struct inferior *inf = current_inferior ();
803
804 /* This is an exec event that we actually wish to pay attention to.
805 Refresh our symbol table to the newly exec'd program, remove any
806 momentary bp's, etc.
807
808 If there are breakpoints, they aren't really inserted now,
809 since the exec() transformed our inferior into a fresh set
810 of instructions.
811
812 We want to preserve symbolic breakpoints on the list, since
813 we have hopes that they can be reset after the new a.out's
814 symbol table is read.
815
816 However, any "raw" breakpoints must be removed from the list
817 (e.g., the solib bp's), since their address is probably invalid
818 now.
819
820 And, we DON'T want to call delete_breakpoints() here, since
821 that may write the bp's "shadow contents" (the instruction
822 value that was overwritten witha TRAP instruction). Since
823 we now have a new a.out, those shadow contents aren't valid. */
824
825 mark_breakpoints_out ();
826
827 update_breakpoints_after_exec ();
828
829 /* If there was one, it's gone now. We cannot truly step-to-next
830 statement through an exec(). */
831 th->control.step_resume_breakpoint = NULL;
832 th->control.exception_resume_breakpoint = NULL;
833 th->control.step_range_start = 0;
834 th->control.step_range_end = 0;
835
836 /* The target reports the exec event to the main thread, even if
837 some other thread does the exec, and even if the main thread was
838 already stopped --- if debugging in non-stop mode, it's possible
839 the user had the main thread held stopped in the previous image
840 --- release it now. This is the same behavior as step-over-exec
841 with scheduler-locking on in all-stop mode. */
842 th->stop_requested = 0;
843
844 /* What is this a.out's name? */
845 printf_unfiltered (_("%s is executing new program: %s\n"),
846 target_pid_to_str (inferior_ptid),
847 execd_pathname);
848
849 /* We've followed the inferior through an exec. Therefore, the
850 inferior has essentially been killed & reborn. */
851
852 gdb_flush (gdb_stdout);
853
854 breakpoint_init_inferior (inf_execd);
855
856 if (gdb_sysroot && *gdb_sysroot)
857 {
858 char *name = alloca (strlen (gdb_sysroot)
859 + strlen (execd_pathname)
860 + 1);
861
862 strcpy (name, gdb_sysroot);
863 strcat (name, execd_pathname);
864 execd_pathname = name;
865 }
866
867 /* Reset the shared library package. This ensures that we get a
868 shlib event when the child reaches "_start", at which point the
869 dld will have had a chance to initialize the child. */
870 /* Also, loading a symbol file below may trigger symbol lookups, and
871 we don't want those to be satisfied by the libraries of the
872 previous incarnation of this process. */
873 no_shared_libraries (NULL, 0);
874
875 if (follow_exec_mode_string == follow_exec_mode_new)
876 {
877 struct program_space *pspace;
878
879 /* The user wants to keep the old inferior and program spaces
880 around. Create a new fresh one, and switch to it. */
881
882 inf = add_inferior (current_inferior ()->pid);
883 pspace = add_program_space (maybe_new_address_space ());
884 inf->pspace = pspace;
885 inf->aspace = pspace->aspace;
886
887 exit_inferior_num_silent (current_inferior ()->num);
888
889 set_current_inferior (inf);
890 set_current_program_space (pspace);
891 }
892
893 gdb_assert (current_program_space == inf->pspace);
894
895 /* That a.out is now the one to use. */
896 exec_file_attach (execd_pathname, 0);
897
898 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
899 (Position Independent Executable) main symbol file will get applied by
900 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
901 the breakpoints with the zero displacement. */
902
903 symbol_file_add (execd_pathname, SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET,
904 NULL, 0);
905
906 set_initial_language ();
907
908 #ifdef SOLIB_CREATE_INFERIOR_HOOK
909 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
910 #else
911 solib_create_inferior_hook (0);
912 #endif
913
914 jit_inferior_created_hook ();
915
916 breakpoint_re_set ();
917
918 /* Reinsert all breakpoints. (Those which were symbolic have
919 been reset to the proper address in the new a.out, thanks
920 to symbol_file_command...). */
921 insert_breakpoints ();
922
923 /* The next resume of this inferior should bring it to the shlib
924 startup breakpoints. (If the user had also set bp's on
925 "main" from the old (parent) process, then they'll auto-
926 matically get reset there in the new process.). */
927 }
928
929 /* Non-zero if we just simulating a single-step. This is needed
930 because we cannot remove the breakpoints in the inferior process
931 until after the `wait' in `wait_for_inferior'. */
932 static int singlestep_breakpoints_inserted_p = 0;
933
934 /* The thread we inserted single-step breakpoints for. */
935 static ptid_t singlestep_ptid;
936
937 /* PC when we started this single-step. */
938 static CORE_ADDR singlestep_pc;
939
940 /* If another thread hit the singlestep breakpoint, we save the original
941 thread here so that we can resume single-stepping it later. */
942 static ptid_t saved_singlestep_ptid;
943 static int stepping_past_singlestep_breakpoint;
944
945 /* If not equal to null_ptid, this means that after stepping over breakpoint
946 is finished, we need to switch to deferred_step_ptid, and step it.
947
948 The use case is when one thread has hit a breakpoint, and then the user
949 has switched to another thread and issued 'step'. We need to step over
950 breakpoint in the thread which hit the breakpoint, but then continue
951 stepping the thread user has selected. */
952 static ptid_t deferred_step_ptid;
953 \f
954 /* Displaced stepping. */
955
956 /* In non-stop debugging mode, we must take special care to manage
957 breakpoints properly; in particular, the traditional strategy for
958 stepping a thread past a breakpoint it has hit is unsuitable.
959 'Displaced stepping' is a tactic for stepping one thread past a
960 breakpoint it has hit while ensuring that other threads running
961 concurrently will hit the breakpoint as they should.
962
963 The traditional way to step a thread T off a breakpoint in a
964 multi-threaded program in all-stop mode is as follows:
965
966 a0) Initially, all threads are stopped, and breakpoints are not
967 inserted.
968 a1) We single-step T, leaving breakpoints uninserted.
969 a2) We insert breakpoints, and resume all threads.
970
971 In non-stop debugging, however, this strategy is unsuitable: we
972 don't want to have to stop all threads in the system in order to
973 continue or step T past a breakpoint. Instead, we use displaced
974 stepping:
975
976 n0) Initially, T is stopped, other threads are running, and
977 breakpoints are inserted.
978 n1) We copy the instruction "under" the breakpoint to a separate
979 location, outside the main code stream, making any adjustments
980 to the instruction, register, and memory state as directed by
981 T's architecture.
982 n2) We single-step T over the instruction at its new location.
983 n3) We adjust the resulting register and memory state as directed
984 by T's architecture. This includes resetting T's PC to point
985 back into the main instruction stream.
986 n4) We resume T.
987
988 This approach depends on the following gdbarch methods:
989
990 - gdbarch_max_insn_length and gdbarch_displaced_step_location
991 indicate where to copy the instruction, and how much space must
992 be reserved there. We use these in step n1.
993
994 - gdbarch_displaced_step_copy_insn copies a instruction to a new
995 address, and makes any necessary adjustments to the instruction,
996 register contents, and memory. We use this in step n1.
997
998 - gdbarch_displaced_step_fixup adjusts registers and memory after
999 we have successfuly single-stepped the instruction, to yield the
1000 same effect the instruction would have had if we had executed it
1001 at its original address. We use this in step n3.
1002
1003 - gdbarch_displaced_step_free_closure provides cleanup.
1004
1005 The gdbarch_displaced_step_copy_insn and
1006 gdbarch_displaced_step_fixup functions must be written so that
1007 copying an instruction with gdbarch_displaced_step_copy_insn,
1008 single-stepping across the copied instruction, and then applying
1009 gdbarch_displaced_insn_fixup should have the same effects on the
1010 thread's memory and registers as stepping the instruction in place
1011 would have. Exactly which responsibilities fall to the copy and
1012 which fall to the fixup is up to the author of those functions.
1013
1014 See the comments in gdbarch.sh for details.
1015
1016 Note that displaced stepping and software single-step cannot
1017 currently be used in combination, although with some care I think
1018 they could be made to. Software single-step works by placing
1019 breakpoints on all possible subsequent instructions; if the
1020 displaced instruction is a PC-relative jump, those breakpoints
1021 could fall in very strange places --- on pages that aren't
1022 executable, or at addresses that are not proper instruction
1023 boundaries. (We do generally let other threads run while we wait
1024 to hit the software single-step breakpoint, and they might
1025 encounter such a corrupted instruction.) One way to work around
1026 this would be to have gdbarch_displaced_step_copy_insn fully
1027 simulate the effect of PC-relative instructions (and return NULL)
1028 on architectures that use software single-stepping.
1029
1030 In non-stop mode, we can have independent and simultaneous step
1031 requests, so more than one thread may need to simultaneously step
1032 over a breakpoint. The current implementation assumes there is
1033 only one scratch space per process. In this case, we have to
1034 serialize access to the scratch space. If thread A wants to step
1035 over a breakpoint, but we are currently waiting for some other
1036 thread to complete a displaced step, we leave thread A stopped and
1037 place it in the displaced_step_request_queue. Whenever a displaced
1038 step finishes, we pick the next thread in the queue and start a new
1039 displaced step operation on it. See displaced_step_prepare and
1040 displaced_step_fixup for details. */
1041
1042 struct displaced_step_request
1043 {
1044 ptid_t ptid;
1045 struct displaced_step_request *next;
1046 };
1047
1048 /* Per-inferior displaced stepping state. */
1049 struct displaced_step_inferior_state
1050 {
1051 /* Pointer to next in linked list. */
1052 struct displaced_step_inferior_state *next;
1053
1054 /* The process this displaced step state refers to. */
1055 int pid;
1056
1057 /* A queue of pending displaced stepping requests. One entry per
1058 thread that needs to do a displaced step. */
1059 struct displaced_step_request *step_request_queue;
1060
1061 /* If this is not null_ptid, this is the thread carrying out a
1062 displaced single-step in process PID. This thread's state will
1063 require fixing up once it has completed its step. */
1064 ptid_t step_ptid;
1065
1066 /* The architecture the thread had when we stepped it. */
1067 struct gdbarch *step_gdbarch;
1068
1069 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1070 for post-step cleanup. */
1071 struct displaced_step_closure *step_closure;
1072
1073 /* The address of the original instruction, and the copy we
1074 made. */
1075 CORE_ADDR step_original, step_copy;
1076
1077 /* Saved contents of copy area. */
1078 gdb_byte *step_saved_copy;
1079 };
1080
1081 /* The list of states of processes involved in displaced stepping
1082 presently. */
1083 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1084
1085 /* Get the displaced stepping state of process PID. */
1086
1087 static struct displaced_step_inferior_state *
1088 get_displaced_stepping_state (int pid)
1089 {
1090 struct displaced_step_inferior_state *state;
1091
1092 for (state = displaced_step_inferior_states;
1093 state != NULL;
1094 state = state->next)
1095 if (state->pid == pid)
1096 return state;
1097
1098 return NULL;
1099 }
1100
1101 /* Add a new displaced stepping state for process PID to the displaced
1102 stepping state list, or return a pointer to an already existing
1103 entry, if it already exists. Never returns NULL. */
1104
1105 static struct displaced_step_inferior_state *
1106 add_displaced_stepping_state (int pid)
1107 {
1108 struct displaced_step_inferior_state *state;
1109
1110 for (state = displaced_step_inferior_states;
1111 state != NULL;
1112 state = state->next)
1113 if (state->pid == pid)
1114 return state;
1115
1116 state = xcalloc (1, sizeof (*state));
1117 state->pid = pid;
1118 state->next = displaced_step_inferior_states;
1119 displaced_step_inferior_states = state;
1120
1121 return state;
1122 }
1123
1124 /* If inferior is in displaced stepping, and ADDR equals to starting address
1125 of copy area, return corresponding displaced_step_closure. Otherwise,
1126 return NULL. */
1127
1128 struct displaced_step_closure*
1129 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1130 {
1131 struct displaced_step_inferior_state *displaced
1132 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1133
1134 /* If checking the mode of displaced instruction in copy area. */
1135 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1136 && (displaced->step_copy == addr))
1137 return displaced->step_closure;
1138
1139 return NULL;
1140 }
1141
1142 /* Remove the displaced stepping state of process PID. */
1143
1144 static void
1145 remove_displaced_stepping_state (int pid)
1146 {
1147 struct displaced_step_inferior_state *it, **prev_next_p;
1148
1149 gdb_assert (pid != 0);
1150
1151 it = displaced_step_inferior_states;
1152 prev_next_p = &displaced_step_inferior_states;
1153 while (it)
1154 {
1155 if (it->pid == pid)
1156 {
1157 *prev_next_p = it->next;
1158 xfree (it);
1159 return;
1160 }
1161
1162 prev_next_p = &it->next;
1163 it = *prev_next_p;
1164 }
1165 }
1166
1167 static void
1168 infrun_inferior_exit (struct inferior *inf)
1169 {
1170 remove_displaced_stepping_state (inf->pid);
1171 }
1172
1173 /* Enum strings for "set|show displaced-stepping". */
1174
1175 static const char can_use_displaced_stepping_auto[] = "auto";
1176 static const char can_use_displaced_stepping_on[] = "on";
1177 static const char can_use_displaced_stepping_off[] = "off";
1178 static const char *const can_use_displaced_stepping_enum[] =
1179 {
1180 can_use_displaced_stepping_auto,
1181 can_use_displaced_stepping_on,
1182 can_use_displaced_stepping_off,
1183 NULL,
1184 };
1185
1186 /* If ON, and the architecture supports it, GDB will use displaced
1187 stepping to step over breakpoints. If OFF, or if the architecture
1188 doesn't support it, GDB will instead use the traditional
1189 hold-and-step approach. If AUTO (which is the default), GDB will
1190 decide which technique to use to step over breakpoints depending on
1191 which of all-stop or non-stop mode is active --- displaced stepping
1192 in non-stop mode; hold-and-step in all-stop mode. */
1193
1194 static const char *can_use_displaced_stepping =
1195 can_use_displaced_stepping_auto;
1196
1197 static void
1198 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1199 struct cmd_list_element *c,
1200 const char *value)
1201 {
1202 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1203 fprintf_filtered (file,
1204 _("Debugger's willingness to use displaced stepping "
1205 "to step over breakpoints is %s (currently %s).\n"),
1206 value, non_stop ? "on" : "off");
1207 else
1208 fprintf_filtered (file,
1209 _("Debugger's willingness to use displaced stepping "
1210 "to step over breakpoints is %s.\n"), value);
1211 }
1212
1213 /* Return non-zero if displaced stepping can/should be used to step
1214 over breakpoints. */
1215
1216 static int
1217 use_displaced_stepping (struct gdbarch *gdbarch)
1218 {
1219 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1220 && non_stop)
1221 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1222 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1223 && !RECORD_IS_USED);
1224 }
1225
1226 /* Clean out any stray displaced stepping state. */
1227 static void
1228 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1229 {
1230 /* Indicate that there is no cleanup pending. */
1231 displaced->step_ptid = null_ptid;
1232
1233 if (displaced->step_closure)
1234 {
1235 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1236 displaced->step_closure);
1237 displaced->step_closure = NULL;
1238 }
1239 }
1240
1241 static void
1242 displaced_step_clear_cleanup (void *arg)
1243 {
1244 struct displaced_step_inferior_state *state = arg;
1245
1246 displaced_step_clear (state);
1247 }
1248
1249 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1250 void
1251 displaced_step_dump_bytes (struct ui_file *file,
1252 const gdb_byte *buf,
1253 size_t len)
1254 {
1255 int i;
1256
1257 for (i = 0; i < len; i++)
1258 fprintf_unfiltered (file, "%02x ", buf[i]);
1259 fputs_unfiltered ("\n", file);
1260 }
1261
1262 /* Prepare to single-step, using displaced stepping.
1263
1264 Note that we cannot use displaced stepping when we have a signal to
1265 deliver. If we have a signal to deliver and an instruction to step
1266 over, then after the step, there will be no indication from the
1267 target whether the thread entered a signal handler or ignored the
1268 signal and stepped over the instruction successfully --- both cases
1269 result in a simple SIGTRAP. In the first case we mustn't do a
1270 fixup, and in the second case we must --- but we can't tell which.
1271 Comments in the code for 'random signals' in handle_inferior_event
1272 explain how we handle this case instead.
1273
1274 Returns 1 if preparing was successful -- this thread is going to be
1275 stepped now; or 0 if displaced stepping this thread got queued. */
1276 static int
1277 displaced_step_prepare (ptid_t ptid)
1278 {
1279 struct cleanup *old_cleanups, *ignore_cleanups;
1280 struct regcache *regcache = get_thread_regcache (ptid);
1281 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1282 CORE_ADDR original, copy;
1283 ULONGEST len;
1284 struct displaced_step_closure *closure;
1285 struct displaced_step_inferior_state *displaced;
1286
1287 /* We should never reach this function if the architecture does not
1288 support displaced stepping. */
1289 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1290
1291 /* We have to displaced step one thread at a time, as we only have
1292 access to a single scratch space per inferior. */
1293
1294 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1295
1296 if (!ptid_equal (displaced->step_ptid, null_ptid))
1297 {
1298 /* Already waiting for a displaced step to finish. Defer this
1299 request and place in queue. */
1300 struct displaced_step_request *req, *new_req;
1301
1302 if (debug_displaced)
1303 fprintf_unfiltered (gdb_stdlog,
1304 "displaced: defering step of %s\n",
1305 target_pid_to_str (ptid));
1306
1307 new_req = xmalloc (sizeof (*new_req));
1308 new_req->ptid = ptid;
1309 new_req->next = NULL;
1310
1311 if (displaced->step_request_queue)
1312 {
1313 for (req = displaced->step_request_queue;
1314 req && req->next;
1315 req = req->next)
1316 ;
1317 req->next = new_req;
1318 }
1319 else
1320 displaced->step_request_queue = new_req;
1321
1322 return 0;
1323 }
1324 else
1325 {
1326 if (debug_displaced)
1327 fprintf_unfiltered (gdb_stdlog,
1328 "displaced: stepping %s now\n",
1329 target_pid_to_str (ptid));
1330 }
1331
1332 displaced_step_clear (displaced);
1333
1334 old_cleanups = save_inferior_ptid ();
1335 inferior_ptid = ptid;
1336
1337 original = regcache_read_pc (regcache);
1338
1339 copy = gdbarch_displaced_step_location (gdbarch);
1340 len = gdbarch_max_insn_length (gdbarch);
1341
1342 /* Save the original contents of the copy area. */
1343 displaced->step_saved_copy = xmalloc (len);
1344 ignore_cleanups = make_cleanup (free_current_contents,
1345 &displaced->step_saved_copy);
1346 read_memory (copy, displaced->step_saved_copy, len);
1347 if (debug_displaced)
1348 {
1349 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1350 paddress (gdbarch, copy));
1351 displaced_step_dump_bytes (gdb_stdlog,
1352 displaced->step_saved_copy,
1353 len);
1354 };
1355
1356 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1357 original, copy, regcache);
1358
1359 /* We don't support the fully-simulated case at present. */
1360 gdb_assert (closure);
1361
1362 /* Save the information we need to fix things up if the step
1363 succeeds. */
1364 displaced->step_ptid = ptid;
1365 displaced->step_gdbarch = gdbarch;
1366 displaced->step_closure = closure;
1367 displaced->step_original = original;
1368 displaced->step_copy = copy;
1369
1370 make_cleanup (displaced_step_clear_cleanup, displaced);
1371
1372 /* Resume execution at the copy. */
1373 regcache_write_pc (regcache, copy);
1374
1375 discard_cleanups (ignore_cleanups);
1376
1377 do_cleanups (old_cleanups);
1378
1379 if (debug_displaced)
1380 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1381 paddress (gdbarch, copy));
1382
1383 return 1;
1384 }
1385
1386 static void
1387 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1388 const gdb_byte *myaddr, int len)
1389 {
1390 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1391
1392 inferior_ptid = ptid;
1393 write_memory (memaddr, myaddr, len);
1394 do_cleanups (ptid_cleanup);
1395 }
1396
1397 /* Restore the contents of the copy area for thread PTID. */
1398
1399 static void
1400 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1401 ptid_t ptid)
1402 {
1403 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1404
1405 write_memory_ptid (ptid, displaced->step_copy,
1406 displaced->step_saved_copy, len);
1407 if (debug_displaced)
1408 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1409 target_pid_to_str (ptid),
1410 paddress (displaced->step_gdbarch,
1411 displaced->step_copy));
1412 }
1413
1414 static void
1415 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1416 {
1417 struct cleanup *old_cleanups;
1418 struct displaced_step_inferior_state *displaced
1419 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1420
1421 /* Was any thread of this process doing a displaced step? */
1422 if (displaced == NULL)
1423 return;
1424
1425 /* Was this event for the pid we displaced? */
1426 if (ptid_equal (displaced->step_ptid, null_ptid)
1427 || ! ptid_equal (displaced->step_ptid, event_ptid))
1428 return;
1429
1430 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1431
1432 displaced_step_restore (displaced, displaced->step_ptid);
1433
1434 /* Did the instruction complete successfully? */
1435 if (signal == TARGET_SIGNAL_TRAP)
1436 {
1437 /* Fix up the resulting state. */
1438 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1439 displaced->step_closure,
1440 displaced->step_original,
1441 displaced->step_copy,
1442 get_thread_regcache (displaced->step_ptid));
1443 }
1444 else
1445 {
1446 /* Since the instruction didn't complete, all we can do is
1447 relocate the PC. */
1448 struct regcache *regcache = get_thread_regcache (event_ptid);
1449 CORE_ADDR pc = regcache_read_pc (regcache);
1450
1451 pc = displaced->step_original + (pc - displaced->step_copy);
1452 regcache_write_pc (regcache, pc);
1453 }
1454
1455 do_cleanups (old_cleanups);
1456
1457 displaced->step_ptid = null_ptid;
1458
1459 /* Are there any pending displaced stepping requests? If so, run
1460 one now. Leave the state object around, since we're likely to
1461 need it again soon. */
1462 while (displaced->step_request_queue)
1463 {
1464 struct displaced_step_request *head;
1465 ptid_t ptid;
1466 struct regcache *regcache;
1467 struct gdbarch *gdbarch;
1468 CORE_ADDR actual_pc;
1469 struct address_space *aspace;
1470
1471 head = displaced->step_request_queue;
1472 ptid = head->ptid;
1473 displaced->step_request_queue = head->next;
1474 xfree (head);
1475
1476 context_switch (ptid);
1477
1478 regcache = get_thread_regcache (ptid);
1479 actual_pc = regcache_read_pc (regcache);
1480 aspace = get_regcache_aspace (regcache);
1481
1482 if (breakpoint_here_p (aspace, actual_pc))
1483 {
1484 if (debug_displaced)
1485 fprintf_unfiltered (gdb_stdlog,
1486 "displaced: stepping queued %s now\n",
1487 target_pid_to_str (ptid));
1488
1489 displaced_step_prepare (ptid);
1490
1491 gdbarch = get_regcache_arch (regcache);
1492
1493 if (debug_displaced)
1494 {
1495 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1496 gdb_byte buf[4];
1497
1498 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1499 paddress (gdbarch, actual_pc));
1500 read_memory (actual_pc, buf, sizeof (buf));
1501 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1502 }
1503
1504 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1505 displaced->step_closure))
1506 target_resume (ptid, 1, TARGET_SIGNAL_0);
1507 else
1508 target_resume (ptid, 0, TARGET_SIGNAL_0);
1509
1510 /* Done, we're stepping a thread. */
1511 break;
1512 }
1513 else
1514 {
1515 int step;
1516 struct thread_info *tp = inferior_thread ();
1517
1518 /* The breakpoint we were sitting under has since been
1519 removed. */
1520 tp->control.trap_expected = 0;
1521
1522 /* Go back to what we were trying to do. */
1523 step = currently_stepping (tp);
1524
1525 if (debug_displaced)
1526 fprintf_unfiltered (gdb_stdlog,
1527 "breakpoint is gone %s: step(%d)\n",
1528 target_pid_to_str (tp->ptid), step);
1529
1530 target_resume (ptid, step, TARGET_SIGNAL_0);
1531 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1532
1533 /* This request was discarded. See if there's any other
1534 thread waiting for its turn. */
1535 }
1536 }
1537 }
1538
1539 /* Update global variables holding ptids to hold NEW_PTID if they were
1540 holding OLD_PTID. */
1541 static void
1542 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1543 {
1544 struct displaced_step_request *it;
1545 struct displaced_step_inferior_state *displaced;
1546
1547 if (ptid_equal (inferior_ptid, old_ptid))
1548 inferior_ptid = new_ptid;
1549
1550 if (ptid_equal (singlestep_ptid, old_ptid))
1551 singlestep_ptid = new_ptid;
1552
1553 if (ptid_equal (deferred_step_ptid, old_ptid))
1554 deferred_step_ptid = new_ptid;
1555
1556 for (displaced = displaced_step_inferior_states;
1557 displaced;
1558 displaced = displaced->next)
1559 {
1560 if (ptid_equal (displaced->step_ptid, old_ptid))
1561 displaced->step_ptid = new_ptid;
1562
1563 for (it = displaced->step_request_queue; it; it = it->next)
1564 if (ptid_equal (it->ptid, old_ptid))
1565 it->ptid = new_ptid;
1566 }
1567 }
1568
1569 \f
1570 /* Resuming. */
1571
1572 /* Things to clean up if we QUIT out of resume (). */
1573 static void
1574 resume_cleanups (void *ignore)
1575 {
1576 normal_stop ();
1577 }
1578
1579 static const char schedlock_off[] = "off";
1580 static const char schedlock_on[] = "on";
1581 static const char schedlock_step[] = "step";
1582 static const char *const scheduler_enums[] = {
1583 schedlock_off,
1584 schedlock_on,
1585 schedlock_step,
1586 NULL
1587 };
1588 static const char *scheduler_mode = schedlock_off;
1589 static void
1590 show_scheduler_mode (struct ui_file *file, int from_tty,
1591 struct cmd_list_element *c, const char *value)
1592 {
1593 fprintf_filtered (file,
1594 _("Mode for locking scheduler "
1595 "during execution is \"%s\".\n"),
1596 value);
1597 }
1598
1599 static void
1600 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1601 {
1602 if (!target_can_lock_scheduler)
1603 {
1604 scheduler_mode = schedlock_off;
1605 error (_("Target '%s' cannot support this command."), target_shortname);
1606 }
1607 }
1608
1609 /* True if execution commands resume all threads of all processes by
1610 default; otherwise, resume only threads of the current inferior
1611 process. */
1612 int sched_multi = 0;
1613
1614 /* Try to setup for software single stepping over the specified location.
1615 Return 1 if target_resume() should use hardware single step.
1616
1617 GDBARCH the current gdbarch.
1618 PC the location to step over. */
1619
1620 static int
1621 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1622 {
1623 int hw_step = 1;
1624
1625 if (execution_direction == EXEC_FORWARD
1626 && gdbarch_software_single_step_p (gdbarch)
1627 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1628 {
1629 hw_step = 0;
1630 /* Do not pull these breakpoints until after a `wait' in
1631 `wait_for_inferior'. */
1632 singlestep_breakpoints_inserted_p = 1;
1633 singlestep_ptid = inferior_ptid;
1634 singlestep_pc = pc;
1635 }
1636 return hw_step;
1637 }
1638
1639 /* Return a ptid representing the set of threads that we will proceed,
1640 in the perspective of the user/frontend. We may actually resume
1641 fewer threads at first, e.g., if a thread is stopped at a
1642 breakpoint that needs stepping-off, but that should not be visible
1643 to the user/frontend, and neither should the frontend/user be
1644 allowed to proceed any of the threads that happen to be stopped for
1645 internal run control handling, if a previous command wanted them
1646 resumed. */
1647
1648 ptid_t
1649 user_visible_resume_ptid (int step)
1650 {
1651 /* By default, resume all threads of all processes. */
1652 ptid_t resume_ptid = RESUME_ALL;
1653
1654 /* Maybe resume only all threads of the current process. */
1655 if (!sched_multi && target_supports_multi_process ())
1656 {
1657 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1658 }
1659
1660 /* Maybe resume a single thread after all. */
1661 if (non_stop)
1662 {
1663 /* With non-stop mode on, threads are always handled
1664 individually. */
1665 resume_ptid = inferior_ptid;
1666 }
1667 else if ((scheduler_mode == schedlock_on)
1668 || (scheduler_mode == schedlock_step
1669 && (step || singlestep_breakpoints_inserted_p)))
1670 {
1671 /* User-settable 'scheduler' mode requires solo thread resume. */
1672 resume_ptid = inferior_ptid;
1673 }
1674
1675 return resume_ptid;
1676 }
1677
1678 /* Resume the inferior, but allow a QUIT. This is useful if the user
1679 wants to interrupt some lengthy single-stepping operation
1680 (for child processes, the SIGINT goes to the inferior, and so
1681 we get a SIGINT random_signal, but for remote debugging and perhaps
1682 other targets, that's not true).
1683
1684 STEP nonzero if we should step (zero to continue instead).
1685 SIG is the signal to give the inferior (zero for none). */
1686 void
1687 resume (int step, enum target_signal sig)
1688 {
1689 int should_resume = 1;
1690 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1691 struct regcache *regcache = get_current_regcache ();
1692 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1693 struct thread_info *tp = inferior_thread ();
1694 CORE_ADDR pc = regcache_read_pc (regcache);
1695 struct address_space *aspace = get_regcache_aspace (regcache);
1696
1697 QUIT;
1698
1699 if (current_inferior ()->waiting_for_vfork_done)
1700 {
1701 /* Don't try to single-step a vfork parent that is waiting for
1702 the child to get out of the shared memory region (by exec'ing
1703 or exiting). This is particularly important on software
1704 single-step archs, as the child process would trip on the
1705 software single step breakpoint inserted for the parent
1706 process. Since the parent will not actually execute any
1707 instruction until the child is out of the shared region (such
1708 are vfork's semantics), it is safe to simply continue it.
1709 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1710 the parent, and tell it to `keep_going', which automatically
1711 re-sets it stepping. */
1712 if (debug_infrun)
1713 fprintf_unfiltered (gdb_stdlog,
1714 "infrun: resume : clear step\n");
1715 step = 0;
1716 }
1717
1718 if (debug_infrun)
1719 fprintf_unfiltered (gdb_stdlog,
1720 "infrun: resume (step=%d, signal=%d), "
1721 "trap_expected=%d, current thread [%s] at %s\n",
1722 step, sig, tp->control.trap_expected,
1723 target_pid_to_str (inferior_ptid),
1724 paddress (gdbarch, pc));
1725
1726 /* Normally, by the time we reach `resume', the breakpoints are either
1727 removed or inserted, as appropriate. The exception is if we're sitting
1728 at a permanent breakpoint; we need to step over it, but permanent
1729 breakpoints can't be removed. So we have to test for it here. */
1730 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1731 {
1732 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1733 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1734 else
1735 error (_("\
1736 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1737 how to step past a permanent breakpoint on this architecture. Try using\n\
1738 a command like `return' or `jump' to continue execution."));
1739 }
1740
1741 /* If enabled, step over breakpoints by executing a copy of the
1742 instruction at a different address.
1743
1744 We can't use displaced stepping when we have a signal to deliver;
1745 the comments for displaced_step_prepare explain why. The
1746 comments in the handle_inferior event for dealing with 'random
1747 signals' explain what we do instead.
1748
1749 We can't use displaced stepping when we are waiting for vfork_done
1750 event, displaced stepping breaks the vfork child similarly as single
1751 step software breakpoint. */
1752 if (use_displaced_stepping (gdbarch)
1753 && (tp->control.trap_expected
1754 || (step && gdbarch_software_single_step_p (gdbarch)))
1755 && sig == TARGET_SIGNAL_0
1756 && !current_inferior ()->waiting_for_vfork_done)
1757 {
1758 struct displaced_step_inferior_state *displaced;
1759
1760 if (!displaced_step_prepare (inferior_ptid))
1761 {
1762 /* Got placed in displaced stepping queue. Will be resumed
1763 later when all the currently queued displaced stepping
1764 requests finish. The thread is not executing at this point,
1765 and the call to set_executing will be made later. But we
1766 need to call set_running here, since from frontend point of view,
1767 the thread is running. */
1768 set_running (inferior_ptid, 1);
1769 discard_cleanups (old_cleanups);
1770 return;
1771 }
1772
1773 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1774 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1775 displaced->step_closure);
1776 }
1777
1778 /* Do we need to do it the hard way, w/temp breakpoints? */
1779 else if (step)
1780 step = maybe_software_singlestep (gdbarch, pc);
1781
1782 /* Currently, our software single-step implementation leads to different
1783 results than hardware single-stepping in one situation: when stepping
1784 into delivering a signal which has an associated signal handler,
1785 hardware single-step will stop at the first instruction of the handler,
1786 while software single-step will simply skip execution of the handler.
1787
1788 For now, this difference in behavior is accepted since there is no
1789 easy way to actually implement single-stepping into a signal handler
1790 without kernel support.
1791
1792 However, there is one scenario where this difference leads to follow-on
1793 problems: if we're stepping off a breakpoint by removing all breakpoints
1794 and then single-stepping. In this case, the software single-step
1795 behavior means that even if there is a *breakpoint* in the signal
1796 handler, GDB still would not stop.
1797
1798 Fortunately, we can at least fix this particular issue. We detect
1799 here the case where we are about to deliver a signal while software
1800 single-stepping with breakpoints removed. In this situation, we
1801 revert the decisions to remove all breakpoints and insert single-
1802 step breakpoints, and instead we install a step-resume breakpoint
1803 at the current address, deliver the signal without stepping, and
1804 once we arrive back at the step-resume breakpoint, actually step
1805 over the breakpoint we originally wanted to step over. */
1806 if (singlestep_breakpoints_inserted_p
1807 && tp->control.trap_expected && sig != TARGET_SIGNAL_0)
1808 {
1809 /* If we have nested signals or a pending signal is delivered
1810 immediately after a handler returns, might might already have
1811 a step-resume breakpoint set on the earlier handler. We cannot
1812 set another step-resume breakpoint; just continue on until the
1813 original breakpoint is hit. */
1814 if (tp->control.step_resume_breakpoint == NULL)
1815 {
1816 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1817 tp->step_after_step_resume_breakpoint = 1;
1818 }
1819
1820 remove_single_step_breakpoints ();
1821 singlestep_breakpoints_inserted_p = 0;
1822
1823 insert_breakpoints ();
1824 tp->control.trap_expected = 0;
1825 }
1826
1827 if (should_resume)
1828 {
1829 ptid_t resume_ptid;
1830
1831 /* If STEP is set, it's a request to use hardware stepping
1832 facilities. But in that case, we should never
1833 use singlestep breakpoint. */
1834 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1835
1836 /* Decide the set of threads to ask the target to resume. Start
1837 by assuming everything will be resumed, than narrow the set
1838 by applying increasingly restricting conditions. */
1839 resume_ptid = user_visible_resume_ptid (step);
1840
1841 /* Maybe resume a single thread after all. */
1842 if (singlestep_breakpoints_inserted_p
1843 && stepping_past_singlestep_breakpoint)
1844 {
1845 /* The situation here is as follows. In thread T1 we wanted to
1846 single-step. Lacking hardware single-stepping we've
1847 set breakpoint at the PC of the next instruction -- call it
1848 P. After resuming, we've hit that breakpoint in thread T2.
1849 Now we've removed original breakpoint, inserted breakpoint
1850 at P+1, and try to step to advance T2 past breakpoint.
1851 We need to step only T2, as if T1 is allowed to freely run,
1852 it can run past P, and if other threads are allowed to run,
1853 they can hit breakpoint at P+1, and nested hits of single-step
1854 breakpoints is not something we'd want -- that's complicated
1855 to support, and has no value. */
1856 resume_ptid = inferior_ptid;
1857 }
1858 else if ((step || singlestep_breakpoints_inserted_p)
1859 && tp->control.trap_expected)
1860 {
1861 /* We're allowing a thread to run past a breakpoint it has
1862 hit, by single-stepping the thread with the breakpoint
1863 removed. In which case, we need to single-step only this
1864 thread, and keep others stopped, as they can miss this
1865 breakpoint if allowed to run.
1866
1867 The current code actually removes all breakpoints when
1868 doing this, not just the one being stepped over, so if we
1869 let other threads run, we can actually miss any
1870 breakpoint, not just the one at PC. */
1871 resume_ptid = inferior_ptid;
1872 }
1873
1874 if (gdbarch_cannot_step_breakpoint (gdbarch))
1875 {
1876 /* Most targets can step a breakpoint instruction, thus
1877 executing it normally. But if this one cannot, just
1878 continue and we will hit it anyway. */
1879 if (step && breakpoint_inserted_here_p (aspace, pc))
1880 step = 0;
1881 }
1882
1883 if (debug_displaced
1884 && use_displaced_stepping (gdbarch)
1885 && tp->control.trap_expected)
1886 {
1887 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1888 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1889 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1890 gdb_byte buf[4];
1891
1892 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1893 paddress (resume_gdbarch, actual_pc));
1894 read_memory (actual_pc, buf, sizeof (buf));
1895 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1896 }
1897
1898 /* Install inferior's terminal modes. */
1899 target_terminal_inferior ();
1900
1901 /* Avoid confusing the next resume, if the next stop/resume
1902 happens to apply to another thread. */
1903 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1904
1905 /* Advise target which signals may be handled silently. If we have
1906 removed breakpoints because we are stepping over one (which can
1907 happen only if we are not using displaced stepping), we need to
1908 receive all signals to avoid accidentally skipping a breakpoint
1909 during execution of a signal handler. */
1910 if ((step || singlestep_breakpoints_inserted_p)
1911 && tp->control.trap_expected
1912 && !use_displaced_stepping (gdbarch))
1913 target_pass_signals (0, NULL);
1914 else
1915 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
1916
1917 target_resume (resume_ptid, step, sig);
1918 }
1919
1920 discard_cleanups (old_cleanups);
1921 }
1922 \f
1923 /* Proceeding. */
1924
1925 /* Clear out all variables saying what to do when inferior is continued.
1926 First do this, then set the ones you want, then call `proceed'. */
1927
1928 static void
1929 clear_proceed_status_thread (struct thread_info *tp)
1930 {
1931 if (debug_infrun)
1932 fprintf_unfiltered (gdb_stdlog,
1933 "infrun: clear_proceed_status_thread (%s)\n",
1934 target_pid_to_str (tp->ptid));
1935
1936 tp->control.trap_expected = 0;
1937 tp->control.step_range_start = 0;
1938 tp->control.step_range_end = 0;
1939 tp->control.step_frame_id = null_frame_id;
1940 tp->control.step_stack_frame_id = null_frame_id;
1941 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
1942 tp->stop_requested = 0;
1943
1944 tp->control.stop_step = 0;
1945
1946 tp->control.proceed_to_finish = 0;
1947
1948 /* Discard any remaining commands or status from previous stop. */
1949 bpstat_clear (&tp->control.stop_bpstat);
1950 }
1951
1952 static int
1953 clear_proceed_status_callback (struct thread_info *tp, void *data)
1954 {
1955 if (is_exited (tp->ptid))
1956 return 0;
1957
1958 clear_proceed_status_thread (tp);
1959 return 0;
1960 }
1961
1962 void
1963 clear_proceed_status (void)
1964 {
1965 if (!non_stop)
1966 {
1967 /* In all-stop mode, delete the per-thread status of all
1968 threads, even if inferior_ptid is null_ptid, there may be
1969 threads on the list. E.g., we may be launching a new
1970 process, while selecting the executable. */
1971 iterate_over_threads (clear_proceed_status_callback, NULL);
1972 }
1973
1974 if (!ptid_equal (inferior_ptid, null_ptid))
1975 {
1976 struct inferior *inferior;
1977
1978 if (non_stop)
1979 {
1980 /* If in non-stop mode, only delete the per-thread status of
1981 the current thread. */
1982 clear_proceed_status_thread (inferior_thread ());
1983 }
1984
1985 inferior = current_inferior ();
1986 inferior->control.stop_soon = NO_STOP_QUIETLY;
1987 }
1988
1989 stop_after_trap = 0;
1990
1991 observer_notify_about_to_proceed ();
1992
1993 if (stop_registers)
1994 {
1995 regcache_xfree (stop_registers);
1996 stop_registers = NULL;
1997 }
1998 }
1999
2000 /* Check the current thread against the thread that reported the most recent
2001 event. If a step-over is required return TRUE and set the current thread
2002 to the old thread. Otherwise return FALSE.
2003
2004 This should be suitable for any targets that support threads. */
2005
2006 static int
2007 prepare_to_proceed (int step)
2008 {
2009 ptid_t wait_ptid;
2010 struct target_waitstatus wait_status;
2011 int schedlock_enabled;
2012
2013 /* With non-stop mode on, threads are always handled individually. */
2014 gdb_assert (! non_stop);
2015
2016 /* Get the last target status returned by target_wait(). */
2017 get_last_target_status (&wait_ptid, &wait_status);
2018
2019 /* Make sure we were stopped at a breakpoint. */
2020 if (wait_status.kind != TARGET_WAITKIND_STOPPED
2021 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
2022 && wait_status.value.sig != TARGET_SIGNAL_ILL
2023 && wait_status.value.sig != TARGET_SIGNAL_SEGV
2024 && wait_status.value.sig != TARGET_SIGNAL_EMT))
2025 {
2026 return 0;
2027 }
2028
2029 schedlock_enabled = (scheduler_mode == schedlock_on
2030 || (scheduler_mode == schedlock_step
2031 && step));
2032
2033 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
2034 if (schedlock_enabled)
2035 return 0;
2036
2037 /* Don't switch over if we're about to resume some other process
2038 other than WAIT_PTID's, and schedule-multiple is off. */
2039 if (!sched_multi
2040 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
2041 return 0;
2042
2043 /* Switched over from WAIT_PID. */
2044 if (!ptid_equal (wait_ptid, minus_one_ptid)
2045 && !ptid_equal (inferior_ptid, wait_ptid))
2046 {
2047 struct regcache *regcache = get_thread_regcache (wait_ptid);
2048
2049 if (breakpoint_here_p (get_regcache_aspace (regcache),
2050 regcache_read_pc (regcache)))
2051 {
2052 /* If stepping, remember current thread to switch back to. */
2053 if (step)
2054 deferred_step_ptid = inferior_ptid;
2055
2056 /* Switch back to WAIT_PID thread. */
2057 switch_to_thread (wait_ptid);
2058
2059 if (debug_infrun)
2060 fprintf_unfiltered (gdb_stdlog,
2061 "infrun: prepare_to_proceed (step=%d), "
2062 "switched to [%s]\n",
2063 step, target_pid_to_str (inferior_ptid));
2064
2065 /* We return 1 to indicate that there is a breakpoint here,
2066 so we need to step over it before continuing to avoid
2067 hitting it straight away. */
2068 return 1;
2069 }
2070 }
2071
2072 return 0;
2073 }
2074
2075 /* Basic routine for continuing the program in various fashions.
2076
2077 ADDR is the address to resume at, or -1 for resume where stopped.
2078 SIGGNAL is the signal to give it, or 0 for none,
2079 or -1 for act according to how it stopped.
2080 STEP is nonzero if should trap after one instruction.
2081 -1 means return after that and print nothing.
2082 You should probably set various step_... variables
2083 before calling here, if you are stepping.
2084
2085 You should call clear_proceed_status before calling proceed. */
2086
2087 void
2088 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
2089 {
2090 struct regcache *regcache;
2091 struct gdbarch *gdbarch;
2092 struct thread_info *tp;
2093 CORE_ADDR pc;
2094 struct address_space *aspace;
2095 int oneproc = 0;
2096
2097 /* If we're stopped at a fork/vfork, follow the branch set by the
2098 "set follow-fork-mode" command; otherwise, we'll just proceed
2099 resuming the current thread. */
2100 if (!follow_fork ())
2101 {
2102 /* The target for some reason decided not to resume. */
2103 normal_stop ();
2104 if (target_can_async_p ())
2105 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2106 return;
2107 }
2108
2109 /* We'll update this if & when we switch to a new thread. */
2110 previous_inferior_ptid = inferior_ptid;
2111
2112 regcache = get_current_regcache ();
2113 gdbarch = get_regcache_arch (regcache);
2114 aspace = get_regcache_aspace (regcache);
2115 pc = regcache_read_pc (regcache);
2116
2117 if (step > 0)
2118 step_start_function = find_pc_function (pc);
2119 if (step < 0)
2120 stop_after_trap = 1;
2121
2122 if (addr == (CORE_ADDR) -1)
2123 {
2124 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2125 && execution_direction != EXEC_REVERSE)
2126 /* There is a breakpoint at the address we will resume at,
2127 step one instruction before inserting breakpoints so that
2128 we do not stop right away (and report a second hit at this
2129 breakpoint).
2130
2131 Note, we don't do this in reverse, because we won't
2132 actually be executing the breakpoint insn anyway.
2133 We'll be (un-)executing the previous instruction. */
2134
2135 oneproc = 1;
2136 else if (gdbarch_single_step_through_delay_p (gdbarch)
2137 && gdbarch_single_step_through_delay (gdbarch,
2138 get_current_frame ()))
2139 /* We stepped onto an instruction that needs to be stepped
2140 again before re-inserting the breakpoint, do so. */
2141 oneproc = 1;
2142 }
2143 else
2144 {
2145 regcache_write_pc (regcache, addr);
2146 }
2147
2148 if (debug_infrun)
2149 fprintf_unfiltered (gdb_stdlog,
2150 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
2151 paddress (gdbarch, addr), siggnal, step);
2152
2153 if (non_stop)
2154 /* In non-stop, each thread is handled individually. The context
2155 must already be set to the right thread here. */
2156 ;
2157 else
2158 {
2159 /* In a multi-threaded task we may select another thread and
2160 then continue or step.
2161
2162 But if the old thread was stopped at a breakpoint, it will
2163 immediately cause another breakpoint stop without any
2164 execution (i.e. it will report a breakpoint hit incorrectly).
2165 So we must step over it first.
2166
2167 prepare_to_proceed checks the current thread against the
2168 thread that reported the most recent event. If a step-over
2169 is required it returns TRUE and sets the current thread to
2170 the old thread. */
2171 if (prepare_to_proceed (step))
2172 oneproc = 1;
2173 }
2174
2175 /* prepare_to_proceed may change the current thread. */
2176 tp = inferior_thread ();
2177
2178 if (oneproc)
2179 {
2180 tp->control.trap_expected = 1;
2181 /* If displaced stepping is enabled, we can step over the
2182 breakpoint without hitting it, so leave all breakpoints
2183 inserted. Otherwise we need to disable all breakpoints, step
2184 one instruction, and then re-add them when that step is
2185 finished. */
2186 if (!use_displaced_stepping (gdbarch))
2187 remove_breakpoints ();
2188 }
2189
2190 /* We can insert breakpoints if we're not trying to step over one,
2191 or if we are stepping over one but we're using displaced stepping
2192 to do so. */
2193 if (! tp->control.trap_expected || use_displaced_stepping (gdbarch))
2194 insert_breakpoints ();
2195
2196 if (!non_stop)
2197 {
2198 /* Pass the last stop signal to the thread we're resuming,
2199 irrespective of whether the current thread is the thread that
2200 got the last event or not. This was historically GDB's
2201 behaviour before keeping a stop_signal per thread. */
2202
2203 struct thread_info *last_thread;
2204 ptid_t last_ptid;
2205 struct target_waitstatus last_status;
2206
2207 get_last_target_status (&last_ptid, &last_status);
2208 if (!ptid_equal (inferior_ptid, last_ptid)
2209 && !ptid_equal (last_ptid, null_ptid)
2210 && !ptid_equal (last_ptid, minus_one_ptid))
2211 {
2212 last_thread = find_thread_ptid (last_ptid);
2213 if (last_thread)
2214 {
2215 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2216 last_thread->suspend.stop_signal = TARGET_SIGNAL_0;
2217 }
2218 }
2219 }
2220
2221 if (siggnal != TARGET_SIGNAL_DEFAULT)
2222 tp->suspend.stop_signal = siggnal;
2223 /* If this signal should not be seen by program,
2224 give it zero. Used for debugging signals. */
2225 else if (!signal_program[tp->suspend.stop_signal])
2226 tp->suspend.stop_signal = TARGET_SIGNAL_0;
2227
2228 annotate_starting ();
2229
2230 /* Make sure that output from GDB appears before output from the
2231 inferior. */
2232 gdb_flush (gdb_stdout);
2233
2234 /* Refresh prev_pc value just prior to resuming. This used to be
2235 done in stop_stepping, however, setting prev_pc there did not handle
2236 scenarios such as inferior function calls or returning from
2237 a function via the return command. In those cases, the prev_pc
2238 value was not set properly for subsequent commands. The prev_pc value
2239 is used to initialize the starting line number in the ecs. With an
2240 invalid value, the gdb next command ends up stopping at the position
2241 represented by the next line table entry past our start position.
2242 On platforms that generate one line table entry per line, this
2243 is not a problem. However, on the ia64, the compiler generates
2244 extraneous line table entries that do not increase the line number.
2245 When we issue the gdb next command on the ia64 after an inferior call
2246 or a return command, we often end up a few instructions forward, still
2247 within the original line we started.
2248
2249 An attempt was made to refresh the prev_pc at the same time the
2250 execution_control_state is initialized (for instance, just before
2251 waiting for an inferior event). But this approach did not work
2252 because of platforms that use ptrace, where the pc register cannot
2253 be read unless the inferior is stopped. At that point, we are not
2254 guaranteed the inferior is stopped and so the regcache_read_pc() call
2255 can fail. Setting the prev_pc value here ensures the value is updated
2256 correctly when the inferior is stopped. */
2257 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2258
2259 /* Fill in with reasonable starting values. */
2260 init_thread_stepping_state (tp);
2261
2262 /* Reset to normal state. */
2263 init_infwait_state ();
2264
2265 /* Resume inferior. */
2266 resume (oneproc || step || bpstat_should_step (), tp->suspend.stop_signal);
2267
2268 /* Wait for it to stop (if not standalone)
2269 and in any case decode why it stopped, and act accordingly. */
2270 /* Do this only if we are not using the event loop, or if the target
2271 does not support asynchronous execution. */
2272 if (!target_can_async_p ())
2273 {
2274 wait_for_inferior ();
2275 normal_stop ();
2276 }
2277 }
2278 \f
2279
2280 /* Start remote-debugging of a machine over a serial link. */
2281
2282 void
2283 start_remote (int from_tty)
2284 {
2285 struct inferior *inferior;
2286
2287 inferior = current_inferior ();
2288 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2289
2290 /* Always go on waiting for the target, regardless of the mode. */
2291 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2292 indicate to wait_for_inferior that a target should timeout if
2293 nothing is returned (instead of just blocking). Because of this,
2294 targets expecting an immediate response need to, internally, set
2295 things up so that the target_wait() is forced to eventually
2296 timeout. */
2297 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2298 differentiate to its caller what the state of the target is after
2299 the initial open has been performed. Here we're assuming that
2300 the target has stopped. It should be possible to eventually have
2301 target_open() return to the caller an indication that the target
2302 is currently running and GDB state should be set to the same as
2303 for an async run. */
2304 wait_for_inferior ();
2305
2306 /* Now that the inferior has stopped, do any bookkeeping like
2307 loading shared libraries. We want to do this before normal_stop,
2308 so that the displayed frame is up to date. */
2309 post_create_inferior (&current_target, from_tty);
2310
2311 normal_stop ();
2312 }
2313
2314 /* Initialize static vars when a new inferior begins. */
2315
2316 void
2317 init_wait_for_inferior (void)
2318 {
2319 /* These are meaningless until the first time through wait_for_inferior. */
2320
2321 breakpoint_init_inferior (inf_starting);
2322
2323 clear_proceed_status ();
2324
2325 stepping_past_singlestep_breakpoint = 0;
2326 deferred_step_ptid = null_ptid;
2327
2328 target_last_wait_ptid = minus_one_ptid;
2329
2330 previous_inferior_ptid = inferior_ptid;
2331 init_infwait_state ();
2332
2333 /* Discard any skipped inlined frames. */
2334 clear_inline_frame_state (minus_one_ptid);
2335 }
2336
2337 \f
2338 /* This enum encodes possible reasons for doing a target_wait, so that
2339 wfi can call target_wait in one place. (Ultimately the call will be
2340 moved out of the infinite loop entirely.) */
2341
2342 enum infwait_states
2343 {
2344 infwait_normal_state,
2345 infwait_thread_hop_state,
2346 infwait_step_watch_state,
2347 infwait_nonstep_watch_state
2348 };
2349
2350 /* The PTID we'll do a target_wait on.*/
2351 ptid_t waiton_ptid;
2352
2353 /* Current inferior wait state. */
2354 enum infwait_states infwait_state;
2355
2356 /* Data to be passed around while handling an event. This data is
2357 discarded between events. */
2358 struct execution_control_state
2359 {
2360 ptid_t ptid;
2361 /* The thread that got the event, if this was a thread event; NULL
2362 otherwise. */
2363 struct thread_info *event_thread;
2364
2365 struct target_waitstatus ws;
2366 int random_signal;
2367 int stop_func_filled_in;
2368 CORE_ADDR stop_func_start;
2369 CORE_ADDR stop_func_end;
2370 char *stop_func_name;
2371 int new_thread_event;
2372 int wait_some_more;
2373 };
2374
2375 static void handle_inferior_event (struct execution_control_state *ecs);
2376
2377 static void handle_step_into_function (struct gdbarch *gdbarch,
2378 struct execution_control_state *ecs);
2379 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2380 struct execution_control_state *ecs);
2381 static void check_exception_resume (struct execution_control_state *,
2382 struct frame_info *, struct symbol *);
2383
2384 static void stop_stepping (struct execution_control_state *ecs);
2385 static void prepare_to_wait (struct execution_control_state *ecs);
2386 static void keep_going (struct execution_control_state *ecs);
2387
2388 /* Callback for iterate over threads. If the thread is stopped, but
2389 the user/frontend doesn't know about that yet, go through
2390 normal_stop, as if the thread had just stopped now. ARG points at
2391 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2392 ptid_is_pid(PTID) is true, applies to all threads of the process
2393 pointed at by PTID. Otherwise, apply only to the thread pointed by
2394 PTID. */
2395
2396 static int
2397 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2398 {
2399 ptid_t ptid = * (ptid_t *) arg;
2400
2401 if ((ptid_equal (info->ptid, ptid)
2402 || ptid_equal (minus_one_ptid, ptid)
2403 || (ptid_is_pid (ptid)
2404 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2405 && is_running (info->ptid)
2406 && !is_executing (info->ptid))
2407 {
2408 struct cleanup *old_chain;
2409 struct execution_control_state ecss;
2410 struct execution_control_state *ecs = &ecss;
2411
2412 memset (ecs, 0, sizeof (*ecs));
2413
2414 old_chain = make_cleanup_restore_current_thread ();
2415
2416 switch_to_thread (info->ptid);
2417
2418 /* Go through handle_inferior_event/normal_stop, so we always
2419 have consistent output as if the stop event had been
2420 reported. */
2421 ecs->ptid = info->ptid;
2422 ecs->event_thread = find_thread_ptid (info->ptid);
2423 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2424 ecs->ws.value.sig = TARGET_SIGNAL_0;
2425
2426 handle_inferior_event (ecs);
2427
2428 if (!ecs->wait_some_more)
2429 {
2430 struct thread_info *tp;
2431
2432 normal_stop ();
2433
2434 /* Finish off the continuations. */
2435 tp = inferior_thread ();
2436 do_all_intermediate_continuations_thread (tp, 1);
2437 do_all_continuations_thread (tp, 1);
2438 }
2439
2440 do_cleanups (old_chain);
2441 }
2442
2443 return 0;
2444 }
2445
2446 /* This function is attached as a "thread_stop_requested" observer.
2447 Cleanup local state that assumed the PTID was to be resumed, and
2448 report the stop to the frontend. */
2449
2450 static void
2451 infrun_thread_stop_requested (ptid_t ptid)
2452 {
2453 struct displaced_step_inferior_state *displaced;
2454
2455 /* PTID was requested to stop. Remove it from the displaced
2456 stepping queue, so we don't try to resume it automatically. */
2457
2458 for (displaced = displaced_step_inferior_states;
2459 displaced;
2460 displaced = displaced->next)
2461 {
2462 struct displaced_step_request *it, **prev_next_p;
2463
2464 it = displaced->step_request_queue;
2465 prev_next_p = &displaced->step_request_queue;
2466 while (it)
2467 {
2468 if (ptid_match (it->ptid, ptid))
2469 {
2470 *prev_next_p = it->next;
2471 it->next = NULL;
2472 xfree (it);
2473 }
2474 else
2475 {
2476 prev_next_p = &it->next;
2477 }
2478
2479 it = *prev_next_p;
2480 }
2481 }
2482
2483 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2484 }
2485
2486 static void
2487 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2488 {
2489 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2490 nullify_last_target_wait_ptid ();
2491 }
2492
2493 /* Callback for iterate_over_threads. */
2494
2495 static int
2496 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2497 {
2498 if (is_exited (info->ptid))
2499 return 0;
2500
2501 delete_step_resume_breakpoint (info);
2502 delete_exception_resume_breakpoint (info);
2503 return 0;
2504 }
2505
2506 /* In all-stop, delete the step resume breakpoint of any thread that
2507 had one. In non-stop, delete the step resume breakpoint of the
2508 thread that just stopped. */
2509
2510 static void
2511 delete_step_thread_step_resume_breakpoint (void)
2512 {
2513 if (!target_has_execution
2514 || ptid_equal (inferior_ptid, null_ptid))
2515 /* If the inferior has exited, we have already deleted the step
2516 resume breakpoints out of GDB's lists. */
2517 return;
2518
2519 if (non_stop)
2520 {
2521 /* If in non-stop mode, only delete the step-resume or
2522 longjmp-resume breakpoint of the thread that just stopped
2523 stepping. */
2524 struct thread_info *tp = inferior_thread ();
2525
2526 delete_step_resume_breakpoint (tp);
2527 delete_exception_resume_breakpoint (tp);
2528 }
2529 else
2530 /* In all-stop mode, delete all step-resume and longjmp-resume
2531 breakpoints of any thread that had them. */
2532 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2533 }
2534
2535 /* A cleanup wrapper. */
2536
2537 static void
2538 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2539 {
2540 delete_step_thread_step_resume_breakpoint ();
2541 }
2542
2543 /* Pretty print the results of target_wait, for debugging purposes. */
2544
2545 static void
2546 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2547 const struct target_waitstatus *ws)
2548 {
2549 char *status_string = target_waitstatus_to_string (ws);
2550 struct ui_file *tmp_stream = mem_fileopen ();
2551 char *text;
2552
2553 /* The text is split over several lines because it was getting too long.
2554 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2555 output as a unit; we want only one timestamp printed if debug_timestamp
2556 is set. */
2557
2558 fprintf_unfiltered (tmp_stream,
2559 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2560 if (PIDGET (waiton_ptid) != -1)
2561 fprintf_unfiltered (tmp_stream,
2562 " [%s]", target_pid_to_str (waiton_ptid));
2563 fprintf_unfiltered (tmp_stream, ", status) =\n");
2564 fprintf_unfiltered (tmp_stream,
2565 "infrun: %d [%s],\n",
2566 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2567 fprintf_unfiltered (tmp_stream,
2568 "infrun: %s\n",
2569 status_string);
2570
2571 text = ui_file_xstrdup (tmp_stream, NULL);
2572
2573 /* This uses %s in part to handle %'s in the text, but also to avoid
2574 a gcc error: the format attribute requires a string literal. */
2575 fprintf_unfiltered (gdb_stdlog, "%s", text);
2576
2577 xfree (status_string);
2578 xfree (text);
2579 ui_file_delete (tmp_stream);
2580 }
2581
2582 /* Prepare and stabilize the inferior for detaching it. E.g.,
2583 detaching while a thread is displaced stepping is a recipe for
2584 crashing it, as nothing would readjust the PC out of the scratch
2585 pad. */
2586
2587 void
2588 prepare_for_detach (void)
2589 {
2590 struct inferior *inf = current_inferior ();
2591 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2592 struct cleanup *old_chain_1;
2593 struct displaced_step_inferior_state *displaced;
2594
2595 displaced = get_displaced_stepping_state (inf->pid);
2596
2597 /* Is any thread of this process displaced stepping? If not,
2598 there's nothing else to do. */
2599 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2600 return;
2601
2602 if (debug_infrun)
2603 fprintf_unfiltered (gdb_stdlog,
2604 "displaced-stepping in-process while detaching");
2605
2606 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2607 inf->detaching = 1;
2608
2609 while (!ptid_equal (displaced->step_ptid, null_ptid))
2610 {
2611 struct cleanup *old_chain_2;
2612 struct execution_control_state ecss;
2613 struct execution_control_state *ecs;
2614
2615 ecs = &ecss;
2616 memset (ecs, 0, sizeof (*ecs));
2617
2618 overlay_cache_invalid = 1;
2619
2620 if (deprecated_target_wait_hook)
2621 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2622 else
2623 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2624
2625 if (debug_infrun)
2626 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2627
2628 /* If an error happens while handling the event, propagate GDB's
2629 knowledge of the executing state to the frontend/user running
2630 state. */
2631 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2632 &minus_one_ptid);
2633
2634 /* In non-stop mode, each thread is handled individually.
2635 Switch early, so the global state is set correctly for this
2636 thread. */
2637 if (non_stop
2638 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2639 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2640 context_switch (ecs->ptid);
2641
2642 /* Now figure out what to do with the result of the result. */
2643 handle_inferior_event (ecs);
2644
2645 /* No error, don't finish the state yet. */
2646 discard_cleanups (old_chain_2);
2647
2648 /* Breakpoints and watchpoints are not installed on the target
2649 at this point, and signals are passed directly to the
2650 inferior, so this must mean the process is gone. */
2651 if (!ecs->wait_some_more)
2652 {
2653 discard_cleanups (old_chain_1);
2654 error (_("Program exited while detaching"));
2655 }
2656 }
2657
2658 discard_cleanups (old_chain_1);
2659 }
2660
2661 /* Wait for control to return from inferior to debugger.
2662
2663 If inferior gets a signal, we may decide to start it up again
2664 instead of returning. That is why there is a loop in this function.
2665 When this function actually returns it means the inferior
2666 should be left stopped and GDB should read more commands. */
2667
2668 void
2669 wait_for_inferior (void)
2670 {
2671 struct cleanup *old_cleanups;
2672 struct execution_control_state ecss;
2673 struct execution_control_state *ecs;
2674
2675 if (debug_infrun)
2676 fprintf_unfiltered
2677 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2678
2679 old_cleanups =
2680 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2681
2682 ecs = &ecss;
2683 memset (ecs, 0, sizeof (*ecs));
2684
2685 while (1)
2686 {
2687 struct cleanup *old_chain;
2688
2689 overlay_cache_invalid = 1;
2690
2691 if (deprecated_target_wait_hook)
2692 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2693 else
2694 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2695
2696 if (debug_infrun)
2697 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2698
2699 /* If an error happens while handling the event, propagate GDB's
2700 knowledge of the executing state to the frontend/user running
2701 state. */
2702 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2703
2704 /* Now figure out what to do with the result of the result. */
2705 handle_inferior_event (ecs);
2706
2707 /* No error, don't finish the state yet. */
2708 discard_cleanups (old_chain);
2709
2710 if (!ecs->wait_some_more)
2711 break;
2712 }
2713
2714 do_cleanups (old_cleanups);
2715 }
2716
2717 /* Asynchronous version of wait_for_inferior. It is called by the
2718 event loop whenever a change of state is detected on the file
2719 descriptor corresponding to the target. It can be called more than
2720 once to complete a single execution command. In such cases we need
2721 to keep the state in a global variable ECSS. If it is the last time
2722 that this function is called for a single execution command, then
2723 report to the user that the inferior has stopped, and do the
2724 necessary cleanups. */
2725
2726 void
2727 fetch_inferior_event (void *client_data)
2728 {
2729 struct execution_control_state ecss;
2730 struct execution_control_state *ecs = &ecss;
2731 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2732 struct cleanup *ts_old_chain;
2733 int was_sync = sync_execution;
2734 int cmd_done = 0;
2735
2736 memset (ecs, 0, sizeof (*ecs));
2737
2738 /* We're handling a live event, so make sure we're doing live
2739 debugging. If we're looking at traceframes while the target is
2740 running, we're going to need to get back to that mode after
2741 handling the event. */
2742 if (non_stop)
2743 {
2744 make_cleanup_restore_current_traceframe ();
2745 set_current_traceframe (-1);
2746 }
2747
2748 if (non_stop)
2749 /* In non-stop mode, the user/frontend should not notice a thread
2750 switch due to internal events. Make sure we reverse to the
2751 user selected thread and frame after handling the event and
2752 running any breakpoint commands. */
2753 make_cleanup_restore_current_thread ();
2754
2755 overlay_cache_invalid = 1;
2756
2757 make_cleanup_restore_integer (&execution_direction);
2758 execution_direction = target_execution_direction ();
2759
2760 if (deprecated_target_wait_hook)
2761 ecs->ptid =
2762 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2763 else
2764 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2765
2766 if (debug_infrun)
2767 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2768
2769 if (non_stop
2770 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2771 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2772 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2773 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2774 /* In non-stop mode, each thread is handled individually. Switch
2775 early, so the global state is set correctly for this
2776 thread. */
2777 context_switch (ecs->ptid);
2778
2779 /* If an error happens while handling the event, propagate GDB's
2780 knowledge of the executing state to the frontend/user running
2781 state. */
2782 if (!non_stop)
2783 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2784 else
2785 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2786
2787 /* Get executed before make_cleanup_restore_current_thread above to apply
2788 still for the thread which has thrown the exception. */
2789 make_bpstat_clear_actions_cleanup ();
2790
2791 /* Now figure out what to do with the result of the result. */
2792 handle_inferior_event (ecs);
2793
2794 if (!ecs->wait_some_more)
2795 {
2796 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2797
2798 delete_step_thread_step_resume_breakpoint ();
2799
2800 /* We may not find an inferior if this was a process exit. */
2801 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2802 normal_stop ();
2803
2804 if (target_has_execution
2805 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2806 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2807 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2808 && ecs->event_thread->step_multi
2809 && ecs->event_thread->control.stop_step)
2810 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2811 else
2812 {
2813 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2814 cmd_done = 1;
2815 }
2816 }
2817
2818 /* No error, don't finish the thread states yet. */
2819 discard_cleanups (ts_old_chain);
2820
2821 /* Revert thread and frame. */
2822 do_cleanups (old_chain);
2823
2824 /* If the inferior was in sync execution mode, and now isn't,
2825 restore the prompt (a synchronous execution command has finished,
2826 and we're ready for input). */
2827 if (interpreter_async && was_sync && !sync_execution)
2828 display_gdb_prompt (0);
2829
2830 if (cmd_done
2831 && !was_sync
2832 && exec_done_display_p
2833 && (ptid_equal (inferior_ptid, null_ptid)
2834 || !is_running (inferior_ptid)))
2835 printf_unfiltered (_("completed.\n"));
2836 }
2837
2838 /* Record the frame and location we're currently stepping through. */
2839 void
2840 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2841 {
2842 struct thread_info *tp = inferior_thread ();
2843
2844 tp->control.step_frame_id = get_frame_id (frame);
2845 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2846
2847 tp->current_symtab = sal.symtab;
2848 tp->current_line = sal.line;
2849 }
2850
2851 /* Clear context switchable stepping state. */
2852
2853 void
2854 init_thread_stepping_state (struct thread_info *tss)
2855 {
2856 tss->stepping_over_breakpoint = 0;
2857 tss->step_after_step_resume_breakpoint = 0;
2858 }
2859
2860 /* Return the cached copy of the last pid/waitstatus returned by
2861 target_wait()/deprecated_target_wait_hook(). The data is actually
2862 cached by handle_inferior_event(), which gets called immediately
2863 after target_wait()/deprecated_target_wait_hook(). */
2864
2865 void
2866 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2867 {
2868 *ptidp = target_last_wait_ptid;
2869 *status = target_last_waitstatus;
2870 }
2871
2872 void
2873 nullify_last_target_wait_ptid (void)
2874 {
2875 target_last_wait_ptid = minus_one_ptid;
2876 }
2877
2878 /* Switch thread contexts. */
2879
2880 static void
2881 context_switch (ptid_t ptid)
2882 {
2883 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
2884 {
2885 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2886 target_pid_to_str (inferior_ptid));
2887 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2888 target_pid_to_str (ptid));
2889 }
2890
2891 switch_to_thread (ptid);
2892 }
2893
2894 static void
2895 adjust_pc_after_break (struct execution_control_state *ecs)
2896 {
2897 struct regcache *regcache;
2898 struct gdbarch *gdbarch;
2899 struct address_space *aspace;
2900 CORE_ADDR breakpoint_pc;
2901
2902 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2903 we aren't, just return.
2904
2905 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2906 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2907 implemented by software breakpoints should be handled through the normal
2908 breakpoint layer.
2909
2910 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2911 different signals (SIGILL or SIGEMT for instance), but it is less
2912 clear where the PC is pointing afterwards. It may not match
2913 gdbarch_decr_pc_after_break. I don't know any specific target that
2914 generates these signals at breakpoints (the code has been in GDB since at
2915 least 1992) so I can not guess how to handle them here.
2916
2917 In earlier versions of GDB, a target with
2918 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2919 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2920 target with both of these set in GDB history, and it seems unlikely to be
2921 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2922
2923 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2924 return;
2925
2926 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2927 return;
2928
2929 /* In reverse execution, when a breakpoint is hit, the instruction
2930 under it has already been de-executed. The reported PC always
2931 points at the breakpoint address, so adjusting it further would
2932 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2933 architecture:
2934
2935 B1 0x08000000 : INSN1
2936 B2 0x08000001 : INSN2
2937 0x08000002 : INSN3
2938 PC -> 0x08000003 : INSN4
2939
2940 Say you're stopped at 0x08000003 as above. Reverse continuing
2941 from that point should hit B2 as below. Reading the PC when the
2942 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2943 been de-executed already.
2944
2945 B1 0x08000000 : INSN1
2946 B2 PC -> 0x08000001 : INSN2
2947 0x08000002 : INSN3
2948 0x08000003 : INSN4
2949
2950 We can't apply the same logic as for forward execution, because
2951 we would wrongly adjust the PC to 0x08000000, since there's a
2952 breakpoint at PC - 1. We'd then report a hit on B1, although
2953 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2954 behaviour. */
2955 if (execution_direction == EXEC_REVERSE)
2956 return;
2957
2958 /* If this target does not decrement the PC after breakpoints, then
2959 we have nothing to do. */
2960 regcache = get_thread_regcache (ecs->ptid);
2961 gdbarch = get_regcache_arch (regcache);
2962 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2963 return;
2964
2965 aspace = get_regcache_aspace (regcache);
2966
2967 /* Find the location where (if we've hit a breakpoint) the
2968 breakpoint would be. */
2969 breakpoint_pc = regcache_read_pc (regcache)
2970 - gdbarch_decr_pc_after_break (gdbarch);
2971
2972 /* Check whether there actually is a software breakpoint inserted at
2973 that location.
2974
2975 If in non-stop mode, a race condition is possible where we've
2976 removed a breakpoint, but stop events for that breakpoint were
2977 already queued and arrive later. To suppress those spurious
2978 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2979 and retire them after a number of stop events are reported. */
2980 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2981 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2982 {
2983 struct cleanup *old_cleanups = NULL;
2984
2985 if (RECORD_IS_USED)
2986 old_cleanups = record_gdb_operation_disable_set ();
2987
2988 /* When using hardware single-step, a SIGTRAP is reported for both
2989 a completed single-step and a software breakpoint. Need to
2990 differentiate between the two, as the latter needs adjusting
2991 but the former does not.
2992
2993 The SIGTRAP can be due to a completed hardware single-step only if
2994 - we didn't insert software single-step breakpoints
2995 - the thread to be examined is still the current thread
2996 - this thread is currently being stepped
2997
2998 If any of these events did not occur, we must have stopped due
2999 to hitting a software breakpoint, and have to back up to the
3000 breakpoint address.
3001
3002 As a special case, we could have hardware single-stepped a
3003 software breakpoint. In this case (prev_pc == breakpoint_pc),
3004 we also need to back up to the breakpoint address. */
3005
3006 if (singlestep_breakpoints_inserted_p
3007 || !ptid_equal (ecs->ptid, inferior_ptid)
3008 || !currently_stepping (ecs->event_thread)
3009 || ecs->event_thread->prev_pc == breakpoint_pc)
3010 regcache_write_pc (regcache, breakpoint_pc);
3011
3012 if (RECORD_IS_USED)
3013 do_cleanups (old_cleanups);
3014 }
3015 }
3016
3017 void
3018 init_infwait_state (void)
3019 {
3020 waiton_ptid = pid_to_ptid (-1);
3021 infwait_state = infwait_normal_state;
3022 }
3023
3024 void
3025 error_is_running (void)
3026 {
3027 error (_("Cannot execute this command while "
3028 "the selected thread is running."));
3029 }
3030
3031 void
3032 ensure_not_running (void)
3033 {
3034 if (is_running (inferior_ptid))
3035 error_is_running ();
3036 }
3037
3038 static int
3039 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3040 {
3041 for (frame = get_prev_frame (frame);
3042 frame != NULL;
3043 frame = get_prev_frame (frame))
3044 {
3045 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3046 return 1;
3047 if (get_frame_type (frame) != INLINE_FRAME)
3048 break;
3049 }
3050
3051 return 0;
3052 }
3053
3054 /* Auxiliary function that handles syscall entry/return events.
3055 It returns 1 if the inferior should keep going (and GDB
3056 should ignore the event), or 0 if the event deserves to be
3057 processed. */
3058
3059 static int
3060 handle_syscall_event (struct execution_control_state *ecs)
3061 {
3062 struct regcache *regcache;
3063 struct gdbarch *gdbarch;
3064 int syscall_number;
3065
3066 if (!ptid_equal (ecs->ptid, inferior_ptid))
3067 context_switch (ecs->ptid);
3068
3069 regcache = get_thread_regcache (ecs->ptid);
3070 gdbarch = get_regcache_arch (regcache);
3071 syscall_number = ecs->ws.value.syscall_number;
3072 stop_pc = regcache_read_pc (regcache);
3073
3074 if (catch_syscall_enabled () > 0
3075 && catching_syscall_number (syscall_number) > 0)
3076 {
3077 if (debug_infrun)
3078 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3079 syscall_number);
3080
3081 ecs->event_thread->control.stop_bpstat
3082 = bpstat_stop_status (get_regcache_aspace (regcache),
3083 stop_pc, ecs->ptid, &ecs->ws);
3084 ecs->random_signal
3085 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3086
3087 if (!ecs->random_signal)
3088 {
3089 /* Catchpoint hit. */
3090 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3091 return 0;
3092 }
3093 }
3094
3095 /* If no catchpoint triggered for this, then keep going. */
3096 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3097 keep_going (ecs);
3098 return 1;
3099 }
3100
3101 /* Clear the supplied execution_control_state's stop_func_* fields. */
3102
3103 static void
3104 clear_stop_func (struct execution_control_state *ecs)
3105 {
3106 ecs->stop_func_filled_in = 0;
3107 ecs->stop_func_start = 0;
3108 ecs->stop_func_end = 0;
3109 ecs->stop_func_name = NULL;
3110 }
3111
3112 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3113
3114 static void
3115 fill_in_stop_func (struct gdbarch *gdbarch,
3116 struct execution_control_state *ecs)
3117 {
3118 if (!ecs->stop_func_filled_in)
3119 {
3120 /* Don't care about return value; stop_func_start and stop_func_name
3121 will both be 0 if it doesn't work. */
3122 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3123 &ecs->stop_func_start, &ecs->stop_func_end);
3124 ecs->stop_func_start
3125 += gdbarch_deprecated_function_start_offset (gdbarch);
3126
3127 ecs->stop_func_filled_in = 1;
3128 }
3129 }
3130
3131 /* Given an execution control state that has been freshly filled in
3132 by an event from the inferior, figure out what it means and take
3133 appropriate action. */
3134
3135 static void
3136 handle_inferior_event (struct execution_control_state *ecs)
3137 {
3138 struct frame_info *frame;
3139 struct gdbarch *gdbarch;
3140 int stopped_by_watchpoint;
3141 int stepped_after_stopped_by_watchpoint = 0;
3142 struct symtab_and_line stop_pc_sal;
3143 enum stop_kind stop_soon;
3144
3145 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3146 {
3147 /* We had an event in the inferior, but we are not interested in
3148 handling it at this level. The lower layers have already
3149 done what needs to be done, if anything.
3150
3151 One of the possible circumstances for this is when the
3152 inferior produces output for the console. The inferior has
3153 not stopped, and we are ignoring the event. Another possible
3154 circumstance is any event which the lower level knows will be
3155 reported multiple times without an intervening resume. */
3156 if (debug_infrun)
3157 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3158 prepare_to_wait (ecs);
3159 return;
3160 }
3161
3162 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3163 && target_can_async_p () && !sync_execution)
3164 {
3165 /* There were no unwaited-for children left in the target, but,
3166 we're not synchronously waiting for events either. Just
3167 ignore. Otherwise, if we were running a synchronous
3168 execution command, we need to cancel it and give the user
3169 back the terminal. */
3170 if (debug_infrun)
3171 fprintf_unfiltered (gdb_stdlog,
3172 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3173 prepare_to_wait (ecs);
3174 return;
3175 }
3176
3177 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3178 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3179 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
3180 {
3181 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3182
3183 gdb_assert (inf);
3184 stop_soon = inf->control.stop_soon;
3185 }
3186 else
3187 stop_soon = NO_STOP_QUIETLY;
3188
3189 /* Cache the last pid/waitstatus. */
3190 target_last_wait_ptid = ecs->ptid;
3191 target_last_waitstatus = ecs->ws;
3192
3193 /* Always clear state belonging to the previous time we stopped. */
3194 stop_stack_dummy = STOP_NONE;
3195
3196 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3197 {
3198 /* No unwaited-for children left. IOW, all resumed children
3199 have exited. */
3200 if (debug_infrun)
3201 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3202
3203 stop_print_frame = 0;
3204 stop_stepping (ecs);
3205 return;
3206 }
3207
3208 /* If it's a new process, add it to the thread database. */
3209
3210 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
3211 && !ptid_equal (ecs->ptid, minus_one_ptid)
3212 && !in_thread_list (ecs->ptid));
3213
3214 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3215 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
3216 add_thread (ecs->ptid);
3217
3218 ecs->event_thread = find_thread_ptid (ecs->ptid);
3219
3220 /* Dependent on valid ECS->EVENT_THREAD. */
3221 adjust_pc_after_break (ecs);
3222
3223 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3224 reinit_frame_cache ();
3225
3226 breakpoint_retire_moribund ();
3227
3228 /* First, distinguish signals caused by the debugger from signals
3229 that have to do with the program's own actions. Note that
3230 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3231 on the operating system version. Here we detect when a SIGILL or
3232 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3233 something similar for SIGSEGV, since a SIGSEGV will be generated
3234 when we're trying to execute a breakpoint instruction on a
3235 non-executable stack. This happens for call dummy breakpoints
3236 for architectures like SPARC that place call dummies on the
3237 stack. */
3238 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3239 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3240 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3241 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3242 {
3243 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3244
3245 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3246 regcache_read_pc (regcache)))
3247 {
3248 if (debug_infrun)
3249 fprintf_unfiltered (gdb_stdlog,
3250 "infrun: Treating signal as SIGTRAP\n");
3251 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3252 }
3253 }
3254
3255 /* Mark the non-executing threads accordingly. In all-stop, all
3256 threads of all processes are stopped when we get any event
3257 reported. In non-stop mode, only the event thread stops. If
3258 we're handling a process exit in non-stop mode, there's nothing
3259 to do, as threads of the dead process are gone, and threads of
3260 any other process were left running. */
3261 if (!non_stop)
3262 set_executing (minus_one_ptid, 0);
3263 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3264 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3265 set_executing (ecs->ptid, 0);
3266
3267 switch (infwait_state)
3268 {
3269 case infwait_thread_hop_state:
3270 if (debug_infrun)
3271 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3272 break;
3273
3274 case infwait_normal_state:
3275 if (debug_infrun)
3276 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3277 break;
3278
3279 case infwait_step_watch_state:
3280 if (debug_infrun)
3281 fprintf_unfiltered (gdb_stdlog,
3282 "infrun: infwait_step_watch_state\n");
3283
3284 stepped_after_stopped_by_watchpoint = 1;
3285 break;
3286
3287 case infwait_nonstep_watch_state:
3288 if (debug_infrun)
3289 fprintf_unfiltered (gdb_stdlog,
3290 "infrun: infwait_nonstep_watch_state\n");
3291 insert_breakpoints ();
3292
3293 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3294 handle things like signals arriving and other things happening
3295 in combination correctly? */
3296 stepped_after_stopped_by_watchpoint = 1;
3297 break;
3298
3299 default:
3300 internal_error (__FILE__, __LINE__, _("bad switch"));
3301 }
3302
3303 infwait_state = infwait_normal_state;
3304 waiton_ptid = pid_to_ptid (-1);
3305
3306 switch (ecs->ws.kind)
3307 {
3308 case TARGET_WAITKIND_LOADED:
3309 if (debug_infrun)
3310 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3311 /* Ignore gracefully during startup of the inferior, as it might
3312 be the shell which has just loaded some objects, otherwise
3313 add the symbols for the newly loaded objects. Also ignore at
3314 the beginning of an attach or remote session; we will query
3315 the full list of libraries once the connection is
3316 established. */
3317 if (stop_soon == NO_STOP_QUIETLY)
3318 {
3319 struct regcache *regcache;
3320
3321 if (!ptid_equal (ecs->ptid, inferior_ptid))
3322 context_switch (ecs->ptid);
3323 regcache = get_thread_regcache (ecs->ptid);
3324
3325 handle_solib_event ();
3326
3327 ecs->event_thread->control.stop_bpstat
3328 = bpstat_stop_status (get_regcache_aspace (regcache),
3329 stop_pc, ecs->ptid, &ecs->ws);
3330 ecs->random_signal
3331 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3332
3333 if (!ecs->random_signal)
3334 {
3335 /* A catchpoint triggered. */
3336 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3337 goto process_event_stop_test;
3338 }
3339
3340 /* If requested, stop when the dynamic linker notifies
3341 gdb of events. This allows the user to get control
3342 and place breakpoints in initializer routines for
3343 dynamically loaded objects (among other things). */
3344 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3345 if (stop_on_solib_events)
3346 {
3347 /* Make sure we print "Stopped due to solib-event" in
3348 normal_stop. */
3349 stop_print_frame = 1;
3350
3351 stop_stepping (ecs);
3352 return;
3353 }
3354 }
3355
3356 /* If we are skipping through a shell, or through shared library
3357 loading that we aren't interested in, resume the program. If
3358 we're running the program normally, also resume. But stop if
3359 we're attaching or setting up a remote connection. */
3360 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3361 {
3362 /* Loading of shared libraries might have changed breakpoint
3363 addresses. Make sure new breakpoints are inserted. */
3364 if (stop_soon == NO_STOP_QUIETLY
3365 && !breakpoints_always_inserted_mode ())
3366 insert_breakpoints ();
3367 resume (0, TARGET_SIGNAL_0);
3368 prepare_to_wait (ecs);
3369 return;
3370 }
3371
3372 break;
3373
3374 case TARGET_WAITKIND_SPURIOUS:
3375 if (debug_infrun)
3376 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3377 resume (0, TARGET_SIGNAL_0);
3378 prepare_to_wait (ecs);
3379 return;
3380
3381 case TARGET_WAITKIND_EXITED:
3382 if (debug_infrun)
3383 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3384 inferior_ptid = ecs->ptid;
3385 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3386 set_current_program_space (current_inferior ()->pspace);
3387 handle_vfork_child_exec_or_exit (0);
3388 target_terminal_ours (); /* Must do this before mourn anyway. */
3389 print_exited_reason (ecs->ws.value.integer);
3390
3391 /* Record the exit code in the convenience variable $_exitcode, so
3392 that the user can inspect this again later. */
3393 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3394 (LONGEST) ecs->ws.value.integer);
3395
3396 /* Also record this in the inferior itself. */
3397 current_inferior ()->has_exit_code = 1;
3398 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3399
3400 gdb_flush (gdb_stdout);
3401 target_mourn_inferior ();
3402 singlestep_breakpoints_inserted_p = 0;
3403 cancel_single_step_breakpoints ();
3404 stop_print_frame = 0;
3405 stop_stepping (ecs);
3406 return;
3407
3408 case TARGET_WAITKIND_SIGNALLED:
3409 if (debug_infrun)
3410 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3411 inferior_ptid = ecs->ptid;
3412 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3413 set_current_program_space (current_inferior ()->pspace);
3414 handle_vfork_child_exec_or_exit (0);
3415 stop_print_frame = 0;
3416 target_terminal_ours (); /* Must do this before mourn anyway. */
3417
3418 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3419 reach here unless the inferior is dead. However, for years
3420 target_kill() was called here, which hints that fatal signals aren't
3421 really fatal on some systems. If that's true, then some changes
3422 may be needed. */
3423 target_mourn_inferior ();
3424
3425 print_signal_exited_reason (ecs->ws.value.sig);
3426 singlestep_breakpoints_inserted_p = 0;
3427 cancel_single_step_breakpoints ();
3428 stop_stepping (ecs);
3429 return;
3430
3431 /* The following are the only cases in which we keep going;
3432 the above cases end in a continue or goto. */
3433 case TARGET_WAITKIND_FORKED:
3434 case TARGET_WAITKIND_VFORKED:
3435 if (debug_infrun)
3436 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3437
3438 /* Check whether the inferior is displaced stepping. */
3439 {
3440 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3441 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3442 struct displaced_step_inferior_state *displaced
3443 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3444
3445 /* If checking displaced stepping is supported, and thread
3446 ecs->ptid is displaced stepping. */
3447 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3448 {
3449 struct inferior *parent_inf
3450 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3451 struct regcache *child_regcache;
3452 CORE_ADDR parent_pc;
3453
3454 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3455 indicating that the displaced stepping of syscall instruction
3456 has been done. Perform cleanup for parent process here. Note
3457 that this operation also cleans up the child process for vfork,
3458 because their pages are shared. */
3459 displaced_step_fixup (ecs->ptid, TARGET_SIGNAL_TRAP);
3460
3461 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3462 {
3463 /* Restore scratch pad for child process. */
3464 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3465 }
3466
3467 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3468 the child's PC is also within the scratchpad. Set the child's PC
3469 to the parent's PC value, which has already been fixed up.
3470 FIXME: we use the parent's aspace here, although we're touching
3471 the child, because the child hasn't been added to the inferior
3472 list yet at this point. */
3473
3474 child_regcache
3475 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3476 gdbarch,
3477 parent_inf->aspace);
3478 /* Read PC value of parent process. */
3479 parent_pc = regcache_read_pc (regcache);
3480
3481 if (debug_displaced)
3482 fprintf_unfiltered (gdb_stdlog,
3483 "displaced: write child pc from %s to %s\n",
3484 paddress (gdbarch,
3485 regcache_read_pc (child_regcache)),
3486 paddress (gdbarch, parent_pc));
3487
3488 regcache_write_pc (child_regcache, parent_pc);
3489 }
3490 }
3491
3492 if (!ptid_equal (ecs->ptid, inferior_ptid))
3493 {
3494 context_switch (ecs->ptid);
3495 reinit_frame_cache ();
3496 }
3497
3498 /* Immediately detach breakpoints from the child before there's
3499 any chance of letting the user delete breakpoints from the
3500 breakpoint lists. If we don't do this early, it's easy to
3501 leave left over traps in the child, vis: "break foo; catch
3502 fork; c; <fork>; del; c; <child calls foo>". We only follow
3503 the fork on the last `continue', and by that time the
3504 breakpoint at "foo" is long gone from the breakpoint table.
3505 If we vforked, then we don't need to unpatch here, since both
3506 parent and child are sharing the same memory pages; we'll
3507 need to unpatch at follow/detach time instead to be certain
3508 that new breakpoints added between catchpoint hit time and
3509 vfork follow are detached. */
3510 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3511 {
3512 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3513
3514 /* This won't actually modify the breakpoint list, but will
3515 physically remove the breakpoints from the child. */
3516 detach_breakpoints (child_pid);
3517 }
3518
3519 if (singlestep_breakpoints_inserted_p)
3520 {
3521 /* Pull the single step breakpoints out of the target. */
3522 remove_single_step_breakpoints ();
3523 singlestep_breakpoints_inserted_p = 0;
3524 }
3525
3526 /* In case the event is caught by a catchpoint, remember that
3527 the event is to be followed at the next resume of the thread,
3528 and not immediately. */
3529 ecs->event_thread->pending_follow = ecs->ws;
3530
3531 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3532
3533 ecs->event_thread->control.stop_bpstat
3534 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3535 stop_pc, ecs->ptid, &ecs->ws);
3536
3537 /* Note that we're interested in knowing the bpstat actually
3538 causes a stop, not just if it may explain the signal.
3539 Software watchpoints, for example, always appear in the
3540 bpstat. */
3541 ecs->random_signal
3542 = !bpstat_causes_stop (ecs->event_thread->control.stop_bpstat);
3543
3544 /* If no catchpoint triggered for this, then keep going. */
3545 if (ecs->random_signal)
3546 {
3547 ptid_t parent;
3548 ptid_t child;
3549 int should_resume;
3550 int follow_child
3551 = (follow_fork_mode_string == follow_fork_mode_child);
3552
3553 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3554
3555 should_resume = follow_fork ();
3556
3557 parent = ecs->ptid;
3558 child = ecs->ws.value.related_pid;
3559
3560 /* In non-stop mode, also resume the other branch. */
3561 if (non_stop && !detach_fork)
3562 {
3563 if (follow_child)
3564 switch_to_thread (parent);
3565 else
3566 switch_to_thread (child);
3567
3568 ecs->event_thread = inferior_thread ();
3569 ecs->ptid = inferior_ptid;
3570 keep_going (ecs);
3571 }
3572
3573 if (follow_child)
3574 switch_to_thread (child);
3575 else
3576 switch_to_thread (parent);
3577
3578 ecs->event_thread = inferior_thread ();
3579 ecs->ptid = inferior_ptid;
3580
3581 if (should_resume)
3582 keep_going (ecs);
3583 else
3584 stop_stepping (ecs);
3585 return;
3586 }
3587 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3588 goto process_event_stop_test;
3589
3590 case TARGET_WAITKIND_VFORK_DONE:
3591 /* Done with the shared memory region. Re-insert breakpoints in
3592 the parent, and keep going. */
3593
3594 if (debug_infrun)
3595 fprintf_unfiltered (gdb_stdlog,
3596 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3597
3598 if (!ptid_equal (ecs->ptid, inferior_ptid))
3599 context_switch (ecs->ptid);
3600
3601 current_inferior ()->waiting_for_vfork_done = 0;
3602 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3603 /* This also takes care of reinserting breakpoints in the
3604 previously locked inferior. */
3605 keep_going (ecs);
3606 return;
3607
3608 case TARGET_WAITKIND_EXECD:
3609 if (debug_infrun)
3610 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3611
3612 if (!ptid_equal (ecs->ptid, inferior_ptid))
3613 {
3614 context_switch (ecs->ptid);
3615 reinit_frame_cache ();
3616 }
3617
3618 singlestep_breakpoints_inserted_p = 0;
3619 cancel_single_step_breakpoints ();
3620
3621 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3622
3623 /* Do whatever is necessary to the parent branch of the vfork. */
3624 handle_vfork_child_exec_or_exit (1);
3625
3626 /* This causes the eventpoints and symbol table to be reset.
3627 Must do this now, before trying to determine whether to
3628 stop. */
3629 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3630
3631 ecs->event_thread->control.stop_bpstat
3632 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3633 stop_pc, ecs->ptid, &ecs->ws);
3634 ecs->random_signal
3635 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3636
3637 /* Note that this may be referenced from inside
3638 bpstat_stop_status above, through inferior_has_execd. */
3639 xfree (ecs->ws.value.execd_pathname);
3640 ecs->ws.value.execd_pathname = NULL;
3641
3642 /* If no catchpoint triggered for this, then keep going. */
3643 if (ecs->random_signal)
3644 {
3645 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3646 keep_going (ecs);
3647 return;
3648 }
3649 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3650 goto process_event_stop_test;
3651
3652 /* Be careful not to try to gather much state about a thread
3653 that's in a syscall. It's frequently a losing proposition. */
3654 case TARGET_WAITKIND_SYSCALL_ENTRY:
3655 if (debug_infrun)
3656 fprintf_unfiltered (gdb_stdlog,
3657 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3658 /* Getting the current syscall number. */
3659 if (handle_syscall_event (ecs) != 0)
3660 return;
3661 goto process_event_stop_test;
3662
3663 /* Before examining the threads further, step this thread to
3664 get it entirely out of the syscall. (We get notice of the
3665 event when the thread is just on the verge of exiting a
3666 syscall. Stepping one instruction seems to get it back
3667 into user code.) */
3668 case TARGET_WAITKIND_SYSCALL_RETURN:
3669 if (debug_infrun)
3670 fprintf_unfiltered (gdb_stdlog,
3671 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3672 if (handle_syscall_event (ecs) != 0)
3673 return;
3674 goto process_event_stop_test;
3675
3676 case TARGET_WAITKIND_STOPPED:
3677 if (debug_infrun)
3678 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3679 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3680 break;
3681
3682 case TARGET_WAITKIND_NO_HISTORY:
3683 if (debug_infrun)
3684 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
3685 /* Reverse execution: target ran out of history info. */
3686 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3687 print_no_history_reason ();
3688 stop_stepping (ecs);
3689 return;
3690 }
3691
3692 if (ecs->new_thread_event)
3693 {
3694 if (non_stop)
3695 /* Non-stop assumes that the target handles adding new threads
3696 to the thread list. */
3697 internal_error (__FILE__, __LINE__,
3698 "targets should add new threads to the thread "
3699 "list themselves in non-stop mode.");
3700
3701 /* We may want to consider not doing a resume here in order to
3702 give the user a chance to play with the new thread. It might
3703 be good to make that a user-settable option. */
3704
3705 /* At this point, all threads are stopped (happens automatically
3706 in either the OS or the native code). Therefore we need to
3707 continue all threads in order to make progress. */
3708
3709 if (!ptid_equal (ecs->ptid, inferior_ptid))
3710 context_switch (ecs->ptid);
3711 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3712 prepare_to_wait (ecs);
3713 return;
3714 }
3715
3716 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3717 {
3718 /* Do we need to clean up the state of a thread that has
3719 completed a displaced single-step? (Doing so usually affects
3720 the PC, so do it here, before we set stop_pc.) */
3721 displaced_step_fixup (ecs->ptid,
3722 ecs->event_thread->suspend.stop_signal);
3723
3724 /* If we either finished a single-step or hit a breakpoint, but
3725 the user wanted this thread to be stopped, pretend we got a
3726 SIG0 (generic unsignaled stop). */
3727
3728 if (ecs->event_thread->stop_requested
3729 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3730 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3731 }
3732
3733 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3734
3735 if (debug_infrun)
3736 {
3737 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3738 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3739 struct cleanup *old_chain = save_inferior_ptid ();
3740
3741 inferior_ptid = ecs->ptid;
3742
3743 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3744 paddress (gdbarch, stop_pc));
3745 if (target_stopped_by_watchpoint ())
3746 {
3747 CORE_ADDR addr;
3748
3749 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3750
3751 if (target_stopped_data_address (&current_target, &addr))
3752 fprintf_unfiltered (gdb_stdlog,
3753 "infrun: stopped data address = %s\n",
3754 paddress (gdbarch, addr));
3755 else
3756 fprintf_unfiltered (gdb_stdlog,
3757 "infrun: (no data address available)\n");
3758 }
3759
3760 do_cleanups (old_chain);
3761 }
3762
3763 if (stepping_past_singlestep_breakpoint)
3764 {
3765 gdb_assert (singlestep_breakpoints_inserted_p);
3766 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3767 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3768
3769 stepping_past_singlestep_breakpoint = 0;
3770
3771 /* We've either finished single-stepping past the single-step
3772 breakpoint, or stopped for some other reason. It would be nice if
3773 we could tell, but we can't reliably. */
3774 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3775 {
3776 if (debug_infrun)
3777 fprintf_unfiltered (gdb_stdlog,
3778 "infrun: stepping_past_"
3779 "singlestep_breakpoint\n");
3780 /* Pull the single step breakpoints out of the target. */
3781 remove_single_step_breakpoints ();
3782 singlestep_breakpoints_inserted_p = 0;
3783
3784 ecs->random_signal = 0;
3785 ecs->event_thread->control.trap_expected = 0;
3786
3787 context_switch (saved_singlestep_ptid);
3788 if (deprecated_context_hook)
3789 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3790
3791 resume (1, TARGET_SIGNAL_0);
3792 prepare_to_wait (ecs);
3793 return;
3794 }
3795 }
3796
3797 if (!ptid_equal (deferred_step_ptid, null_ptid))
3798 {
3799 /* In non-stop mode, there's never a deferred_step_ptid set. */
3800 gdb_assert (!non_stop);
3801
3802 /* If we stopped for some other reason than single-stepping, ignore
3803 the fact that we were supposed to switch back. */
3804 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3805 {
3806 if (debug_infrun)
3807 fprintf_unfiltered (gdb_stdlog,
3808 "infrun: handling deferred step\n");
3809
3810 /* Pull the single step breakpoints out of the target. */
3811 if (singlestep_breakpoints_inserted_p)
3812 {
3813 remove_single_step_breakpoints ();
3814 singlestep_breakpoints_inserted_p = 0;
3815 }
3816
3817 ecs->event_thread->control.trap_expected = 0;
3818
3819 /* Note: We do not call context_switch at this point, as the
3820 context is already set up for stepping the original thread. */
3821 switch_to_thread (deferred_step_ptid);
3822 deferred_step_ptid = null_ptid;
3823 /* Suppress spurious "Switching to ..." message. */
3824 previous_inferior_ptid = inferior_ptid;
3825
3826 resume (1, TARGET_SIGNAL_0);
3827 prepare_to_wait (ecs);
3828 return;
3829 }
3830
3831 deferred_step_ptid = null_ptid;
3832 }
3833
3834 /* See if a thread hit a thread-specific breakpoint that was meant for
3835 another thread. If so, then step that thread past the breakpoint,
3836 and continue it. */
3837
3838 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3839 {
3840 int thread_hop_needed = 0;
3841 struct address_space *aspace =
3842 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3843
3844 /* Check if a regular breakpoint has been hit before checking
3845 for a potential single step breakpoint. Otherwise, GDB will
3846 not see this breakpoint hit when stepping onto breakpoints. */
3847 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3848 {
3849 ecs->random_signal = 0;
3850 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3851 thread_hop_needed = 1;
3852 }
3853 else if (singlestep_breakpoints_inserted_p)
3854 {
3855 /* We have not context switched yet, so this should be true
3856 no matter which thread hit the singlestep breakpoint. */
3857 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3858 if (debug_infrun)
3859 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3860 "trap for %s\n",
3861 target_pid_to_str (ecs->ptid));
3862
3863 ecs->random_signal = 0;
3864 /* The call to in_thread_list is necessary because PTIDs sometimes
3865 change when we go from single-threaded to multi-threaded. If
3866 the singlestep_ptid is still in the list, assume that it is
3867 really different from ecs->ptid. */
3868 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3869 && in_thread_list (singlestep_ptid))
3870 {
3871 /* If the PC of the thread we were trying to single-step
3872 has changed, discard this event (which we were going
3873 to ignore anyway), and pretend we saw that thread
3874 trap. This prevents us continuously moving the
3875 single-step breakpoint forward, one instruction at a
3876 time. If the PC has changed, then the thread we were
3877 trying to single-step has trapped or been signalled,
3878 but the event has not been reported to GDB yet.
3879
3880 There might be some cases where this loses signal
3881 information, if a signal has arrived at exactly the
3882 same time that the PC changed, but this is the best
3883 we can do with the information available. Perhaps we
3884 should arrange to report all events for all threads
3885 when they stop, or to re-poll the remote looking for
3886 this particular thread (i.e. temporarily enable
3887 schedlock). */
3888
3889 CORE_ADDR new_singlestep_pc
3890 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3891
3892 if (new_singlestep_pc != singlestep_pc)
3893 {
3894 enum target_signal stop_signal;
3895
3896 if (debug_infrun)
3897 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3898 " but expected thread advanced also\n");
3899
3900 /* The current context still belongs to
3901 singlestep_ptid. Don't swap here, since that's
3902 the context we want to use. Just fudge our
3903 state and continue. */
3904 stop_signal = ecs->event_thread->suspend.stop_signal;
3905 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3906 ecs->ptid = singlestep_ptid;
3907 ecs->event_thread = find_thread_ptid (ecs->ptid);
3908 ecs->event_thread->suspend.stop_signal = stop_signal;
3909 stop_pc = new_singlestep_pc;
3910 }
3911 else
3912 {
3913 if (debug_infrun)
3914 fprintf_unfiltered (gdb_stdlog,
3915 "infrun: unexpected thread\n");
3916
3917 thread_hop_needed = 1;
3918 stepping_past_singlestep_breakpoint = 1;
3919 saved_singlestep_ptid = singlestep_ptid;
3920 }
3921 }
3922 }
3923
3924 if (thread_hop_needed)
3925 {
3926 struct regcache *thread_regcache;
3927 int remove_status = 0;
3928
3929 if (debug_infrun)
3930 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3931
3932 /* Switch context before touching inferior memory, the
3933 previous thread may have exited. */
3934 if (!ptid_equal (inferior_ptid, ecs->ptid))
3935 context_switch (ecs->ptid);
3936
3937 /* Saw a breakpoint, but it was hit by the wrong thread.
3938 Just continue. */
3939
3940 if (singlestep_breakpoints_inserted_p)
3941 {
3942 /* Pull the single step breakpoints out of the target. */
3943 remove_single_step_breakpoints ();
3944 singlestep_breakpoints_inserted_p = 0;
3945 }
3946
3947 /* If the arch can displace step, don't remove the
3948 breakpoints. */
3949 thread_regcache = get_thread_regcache (ecs->ptid);
3950 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3951 remove_status = remove_breakpoints ();
3952
3953 /* Did we fail to remove breakpoints? If so, try
3954 to set the PC past the bp. (There's at least
3955 one situation in which we can fail to remove
3956 the bp's: On HP-UX's that use ttrace, we can't
3957 change the address space of a vforking child
3958 process until the child exits (well, okay, not
3959 then either :-) or execs. */
3960 if (remove_status != 0)
3961 error (_("Cannot step over breakpoint hit in wrong thread"));
3962 else
3963 { /* Single step */
3964 if (!non_stop)
3965 {
3966 /* Only need to require the next event from this
3967 thread in all-stop mode. */
3968 waiton_ptid = ecs->ptid;
3969 infwait_state = infwait_thread_hop_state;
3970 }
3971
3972 ecs->event_thread->stepping_over_breakpoint = 1;
3973 keep_going (ecs);
3974 return;
3975 }
3976 }
3977 else if (singlestep_breakpoints_inserted_p)
3978 {
3979 ecs->random_signal = 0;
3980 }
3981 }
3982 else
3983 ecs->random_signal = 1;
3984
3985 /* See if something interesting happened to the non-current thread. If
3986 so, then switch to that thread. */
3987 if (!ptid_equal (ecs->ptid, inferior_ptid))
3988 {
3989 if (debug_infrun)
3990 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3991
3992 context_switch (ecs->ptid);
3993
3994 if (deprecated_context_hook)
3995 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3996 }
3997
3998 /* At this point, get hold of the now-current thread's frame. */
3999 frame = get_current_frame ();
4000 gdbarch = get_frame_arch (frame);
4001
4002 if (singlestep_breakpoints_inserted_p)
4003 {
4004 /* Pull the single step breakpoints out of the target. */
4005 remove_single_step_breakpoints ();
4006 singlestep_breakpoints_inserted_p = 0;
4007 }
4008
4009 if (stepped_after_stopped_by_watchpoint)
4010 stopped_by_watchpoint = 0;
4011 else
4012 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4013
4014 /* If necessary, step over this watchpoint. We'll be back to display
4015 it in a moment. */
4016 if (stopped_by_watchpoint
4017 && (target_have_steppable_watchpoint
4018 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4019 {
4020 /* At this point, we are stopped at an instruction which has
4021 attempted to write to a piece of memory under control of
4022 a watchpoint. The instruction hasn't actually executed
4023 yet. If we were to evaluate the watchpoint expression
4024 now, we would get the old value, and therefore no change
4025 would seem to have occurred.
4026
4027 In order to make watchpoints work `right', we really need
4028 to complete the memory write, and then evaluate the
4029 watchpoint expression. We do this by single-stepping the
4030 target.
4031
4032 It may not be necessary to disable the watchpoint to stop over
4033 it. For example, the PA can (with some kernel cooperation)
4034 single step over a watchpoint without disabling the watchpoint.
4035
4036 It is far more common to need to disable a watchpoint to step
4037 the inferior over it. If we have non-steppable watchpoints,
4038 we must disable the current watchpoint; it's simplest to
4039 disable all watchpoints and breakpoints. */
4040 int hw_step = 1;
4041
4042 if (!target_have_steppable_watchpoint)
4043 {
4044 remove_breakpoints ();
4045 /* See comment in resume why we need to stop bypassing signals
4046 while breakpoints have been removed. */
4047 target_pass_signals (0, NULL);
4048 }
4049 /* Single step */
4050 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4051 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
4052 waiton_ptid = ecs->ptid;
4053 if (target_have_steppable_watchpoint)
4054 infwait_state = infwait_step_watch_state;
4055 else
4056 infwait_state = infwait_nonstep_watch_state;
4057 prepare_to_wait (ecs);
4058 return;
4059 }
4060
4061 clear_stop_func (ecs);
4062 ecs->event_thread->stepping_over_breakpoint = 0;
4063 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4064 ecs->event_thread->control.stop_step = 0;
4065 stop_print_frame = 1;
4066 ecs->random_signal = 0;
4067 stopped_by_random_signal = 0;
4068
4069 /* Hide inlined functions starting here, unless we just performed stepi or
4070 nexti. After stepi and nexti, always show the innermost frame (not any
4071 inline function call sites). */
4072 if (ecs->event_thread->control.step_range_end != 1)
4073 {
4074 struct address_space *aspace =
4075 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4076
4077 /* skip_inline_frames is expensive, so we avoid it if we can
4078 determine that the address is one where functions cannot have
4079 been inlined. This improves performance with inferiors that
4080 load a lot of shared libraries, because the solib event
4081 breakpoint is defined as the address of a function (i.e. not
4082 inline). Note that we have to check the previous PC as well
4083 as the current one to catch cases when we have just
4084 single-stepped off a breakpoint prior to reinstating it.
4085 Note that we're assuming that the code we single-step to is
4086 not inline, but that's not definitive: there's nothing
4087 preventing the event breakpoint function from containing
4088 inlined code, and the single-step ending up there. If the
4089 user had set a breakpoint on that inlined code, the missing
4090 skip_inline_frames call would break things. Fortunately
4091 that's an extremely unlikely scenario. */
4092 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4093 && !(ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4094 && ecs->event_thread->control.trap_expected
4095 && pc_at_non_inline_function (aspace,
4096 ecs->event_thread->prev_pc,
4097 &ecs->ws)))
4098 skip_inline_frames (ecs->ptid);
4099 }
4100
4101 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4102 && ecs->event_thread->control.trap_expected
4103 && gdbarch_single_step_through_delay_p (gdbarch)
4104 && currently_stepping (ecs->event_thread))
4105 {
4106 /* We're trying to step off a breakpoint. Turns out that we're
4107 also on an instruction that needs to be stepped multiple
4108 times before it's been fully executing. E.g., architectures
4109 with a delay slot. It needs to be stepped twice, once for
4110 the instruction and once for the delay slot. */
4111 int step_through_delay
4112 = gdbarch_single_step_through_delay (gdbarch, frame);
4113
4114 if (debug_infrun && step_through_delay)
4115 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4116 if (ecs->event_thread->control.step_range_end == 0
4117 && step_through_delay)
4118 {
4119 /* The user issued a continue when stopped at a breakpoint.
4120 Set up for another trap and get out of here. */
4121 ecs->event_thread->stepping_over_breakpoint = 1;
4122 keep_going (ecs);
4123 return;
4124 }
4125 else if (step_through_delay)
4126 {
4127 /* The user issued a step when stopped at a breakpoint.
4128 Maybe we should stop, maybe we should not - the delay
4129 slot *might* correspond to a line of source. In any
4130 case, don't decide that here, just set
4131 ecs->stepping_over_breakpoint, making sure we
4132 single-step again before breakpoints are re-inserted. */
4133 ecs->event_thread->stepping_over_breakpoint = 1;
4134 }
4135 }
4136
4137 /* Look at the cause of the stop, and decide what to do.
4138 The alternatives are:
4139 1) stop_stepping and return; to really stop and return to the debugger,
4140 2) keep_going and return to start up again
4141 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
4142 3) set ecs->random_signal to 1, and the decision between 1 and 2
4143 will be made according to the signal handling tables. */
4144
4145 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4146 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
4147 || stop_soon == STOP_QUIETLY_REMOTE)
4148 {
4149 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4150 && stop_after_trap)
4151 {
4152 if (debug_infrun)
4153 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4154 stop_print_frame = 0;
4155 stop_stepping (ecs);
4156 return;
4157 }
4158
4159 /* This is originated from start_remote(), start_inferior() and
4160 shared libraries hook functions. */
4161 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4162 {
4163 if (debug_infrun)
4164 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4165 stop_stepping (ecs);
4166 return;
4167 }
4168
4169 /* This originates from attach_command(). We need to overwrite
4170 the stop_signal here, because some kernels don't ignore a
4171 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4172 See more comments in inferior.h. On the other hand, if we
4173 get a non-SIGSTOP, report it to the user - assume the backend
4174 will handle the SIGSTOP if it should show up later.
4175
4176 Also consider that the attach is complete when we see a
4177 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4178 target extended-remote report it instead of a SIGSTOP
4179 (e.g. gdbserver). We already rely on SIGTRAP being our
4180 signal, so this is no exception.
4181
4182 Also consider that the attach is complete when we see a
4183 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4184 the target to stop all threads of the inferior, in case the
4185 low level attach operation doesn't stop them implicitly. If
4186 they weren't stopped implicitly, then the stub will report a
4187 TARGET_SIGNAL_0, meaning: stopped for no particular reason
4188 other than GDB's request. */
4189 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4190 && (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_STOP
4191 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4192 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_0))
4193 {
4194 stop_stepping (ecs);
4195 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4196 return;
4197 }
4198
4199 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4200 handles this event. */
4201 ecs->event_thread->control.stop_bpstat
4202 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4203 stop_pc, ecs->ptid, &ecs->ws);
4204
4205 /* Following in case break condition called a
4206 function. */
4207 stop_print_frame = 1;
4208
4209 /* This is where we handle "moribund" watchpoints. Unlike
4210 software breakpoints traps, hardware watchpoint traps are
4211 always distinguishable from random traps. If no high-level
4212 watchpoint is associated with the reported stop data address
4213 anymore, then the bpstat does not explain the signal ---
4214 simply make sure to ignore it if `stopped_by_watchpoint' is
4215 set. */
4216
4217 if (debug_infrun
4218 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4219 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4220 && stopped_by_watchpoint)
4221 fprintf_unfiltered (gdb_stdlog,
4222 "infrun: no user watchpoint explains "
4223 "watchpoint SIGTRAP, ignoring\n");
4224
4225 /* NOTE: cagney/2003-03-29: These two checks for a random signal
4226 at one stage in the past included checks for an inferior
4227 function call's call dummy's return breakpoint. The original
4228 comment, that went with the test, read:
4229
4230 ``End of a stack dummy. Some systems (e.g. Sony news) give
4231 another signal besides SIGTRAP, so check here as well as
4232 above.''
4233
4234 If someone ever tries to get call dummys on a
4235 non-executable stack to work (where the target would stop
4236 with something like a SIGSEGV), then those tests might need
4237 to be re-instated. Given, however, that the tests were only
4238 enabled when momentary breakpoints were not being used, I
4239 suspect that it won't be the case.
4240
4241 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4242 be necessary for call dummies on a non-executable stack on
4243 SPARC. */
4244
4245 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
4246 ecs->random_signal
4247 = !(bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4248 || stopped_by_watchpoint
4249 || ecs->event_thread->control.trap_expected
4250 || (ecs->event_thread->control.step_range_end
4251 && (ecs->event_thread->control.step_resume_breakpoint
4252 == NULL)));
4253 else
4254 {
4255 ecs->random_signal = !bpstat_explains_signal
4256 (ecs->event_thread->control.stop_bpstat);
4257 if (!ecs->random_signal)
4258 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
4259 }
4260 }
4261
4262 /* When we reach this point, we've pretty much decided
4263 that the reason for stopping must've been a random
4264 (unexpected) signal. */
4265
4266 else
4267 ecs->random_signal = 1;
4268
4269 process_event_stop_test:
4270
4271 /* Re-fetch current thread's frame in case we did a
4272 "goto process_event_stop_test" above. */
4273 frame = get_current_frame ();
4274 gdbarch = get_frame_arch (frame);
4275
4276 /* For the program's own signals, act according to
4277 the signal handling tables. */
4278
4279 if (ecs->random_signal)
4280 {
4281 /* Signal not for debugging purposes. */
4282 int printed = 0;
4283 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4284
4285 if (debug_infrun)
4286 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
4287 ecs->event_thread->suspend.stop_signal);
4288
4289 stopped_by_random_signal = 1;
4290
4291 if (signal_print[ecs->event_thread->suspend.stop_signal])
4292 {
4293 printed = 1;
4294 target_terminal_ours_for_output ();
4295 print_signal_received_reason
4296 (ecs->event_thread->suspend.stop_signal);
4297 }
4298 /* Always stop on signals if we're either just gaining control
4299 of the program, or the user explicitly requested this thread
4300 to remain stopped. */
4301 if (stop_soon != NO_STOP_QUIETLY
4302 || ecs->event_thread->stop_requested
4303 || (!inf->detaching
4304 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4305 {
4306 stop_stepping (ecs);
4307 return;
4308 }
4309 /* If not going to stop, give terminal back
4310 if we took it away. */
4311 else if (printed)
4312 target_terminal_inferior ();
4313
4314 /* Clear the signal if it should not be passed. */
4315 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4316 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4317
4318 if (ecs->event_thread->prev_pc == stop_pc
4319 && ecs->event_thread->control.trap_expected
4320 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4321 {
4322 /* We were just starting a new sequence, attempting to
4323 single-step off of a breakpoint and expecting a SIGTRAP.
4324 Instead this signal arrives. This signal will take us out
4325 of the stepping range so GDB needs to remember to, when
4326 the signal handler returns, resume stepping off that
4327 breakpoint. */
4328 /* To simplify things, "continue" is forced to use the same
4329 code paths as single-step - set a breakpoint at the
4330 signal return address and then, once hit, step off that
4331 breakpoint. */
4332 if (debug_infrun)
4333 fprintf_unfiltered (gdb_stdlog,
4334 "infrun: signal arrived while stepping over "
4335 "breakpoint\n");
4336
4337 insert_hp_step_resume_breakpoint_at_frame (frame);
4338 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4339 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4340 ecs->event_thread->control.trap_expected = 0;
4341 keep_going (ecs);
4342 return;
4343 }
4344
4345 if (ecs->event_thread->control.step_range_end != 0
4346 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_0
4347 && (ecs->event_thread->control.step_range_start <= stop_pc
4348 && stop_pc < ecs->event_thread->control.step_range_end)
4349 && frame_id_eq (get_stack_frame_id (frame),
4350 ecs->event_thread->control.step_stack_frame_id)
4351 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4352 {
4353 /* The inferior is about to take a signal that will take it
4354 out of the single step range. Set a breakpoint at the
4355 current PC (which is presumably where the signal handler
4356 will eventually return) and then allow the inferior to
4357 run free.
4358
4359 Note that this is only needed for a signal delivered
4360 while in the single-step range. Nested signals aren't a
4361 problem as they eventually all return. */
4362 if (debug_infrun)
4363 fprintf_unfiltered (gdb_stdlog,
4364 "infrun: signal may take us out of "
4365 "single-step range\n");
4366
4367 insert_hp_step_resume_breakpoint_at_frame (frame);
4368 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4369 ecs->event_thread->control.trap_expected = 0;
4370 keep_going (ecs);
4371 return;
4372 }
4373
4374 /* Note: step_resume_breakpoint may be non-NULL. This occures
4375 when either there's a nested signal, or when there's a
4376 pending signal enabled just as the signal handler returns
4377 (leaving the inferior at the step-resume-breakpoint without
4378 actually executing it). Either way continue until the
4379 breakpoint is really hit. */
4380 keep_going (ecs);
4381 return;
4382 }
4383
4384 /* Handle cases caused by hitting a breakpoint. */
4385 {
4386 CORE_ADDR jmp_buf_pc;
4387 struct bpstat_what what;
4388
4389 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4390
4391 if (what.call_dummy)
4392 {
4393 stop_stack_dummy = what.call_dummy;
4394 }
4395
4396 /* If we hit an internal event that triggers symbol changes, the
4397 current frame will be invalidated within bpstat_what (e.g., if
4398 we hit an internal solib event). Re-fetch it. */
4399 frame = get_current_frame ();
4400 gdbarch = get_frame_arch (frame);
4401
4402 switch (what.main_action)
4403 {
4404 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4405 /* If we hit the breakpoint at longjmp while stepping, we
4406 install a momentary breakpoint at the target of the
4407 jmp_buf. */
4408
4409 if (debug_infrun)
4410 fprintf_unfiltered (gdb_stdlog,
4411 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4412
4413 ecs->event_thread->stepping_over_breakpoint = 1;
4414
4415 if (what.is_longjmp)
4416 {
4417 if (!gdbarch_get_longjmp_target_p (gdbarch)
4418 || !gdbarch_get_longjmp_target (gdbarch,
4419 frame, &jmp_buf_pc))
4420 {
4421 if (debug_infrun)
4422 fprintf_unfiltered (gdb_stdlog,
4423 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4424 "(!gdbarch_get_longjmp_target)\n");
4425 keep_going (ecs);
4426 return;
4427 }
4428
4429 /* We're going to replace the current step-resume breakpoint
4430 with a longjmp-resume breakpoint. */
4431 delete_step_resume_breakpoint (ecs->event_thread);
4432
4433 /* Insert a breakpoint at resume address. */
4434 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4435 }
4436 else
4437 {
4438 struct symbol *func = get_frame_function (frame);
4439
4440 if (func)
4441 check_exception_resume (ecs, frame, func);
4442 }
4443 keep_going (ecs);
4444 return;
4445
4446 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4447 if (debug_infrun)
4448 fprintf_unfiltered (gdb_stdlog,
4449 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4450
4451 if (what.is_longjmp)
4452 {
4453 gdb_assert (ecs->event_thread->control.step_resume_breakpoint
4454 != NULL);
4455 delete_step_resume_breakpoint (ecs->event_thread);
4456 }
4457 else
4458 {
4459 /* There are several cases to consider.
4460
4461 1. The initiating frame no longer exists. In this case
4462 we must stop, because the exception has gone too far.
4463
4464 2. The initiating frame exists, and is the same as the
4465 current frame. We stop, because the exception has been
4466 caught.
4467
4468 3. The initiating frame exists and is different from
4469 the current frame. This means the exception has been
4470 caught beneath the initiating frame, so keep going. */
4471 struct frame_info *init_frame
4472 = frame_find_by_id (ecs->event_thread->initiating_frame);
4473
4474 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4475 != NULL);
4476 delete_exception_resume_breakpoint (ecs->event_thread);
4477
4478 if (init_frame)
4479 {
4480 struct frame_id current_id
4481 = get_frame_id (get_current_frame ());
4482 if (frame_id_eq (current_id,
4483 ecs->event_thread->initiating_frame))
4484 {
4485 /* Case 2. Fall through. */
4486 }
4487 else
4488 {
4489 /* Case 3. */
4490 keep_going (ecs);
4491 return;
4492 }
4493 }
4494
4495 /* For Cases 1 and 2, remove the step-resume breakpoint,
4496 if it exists. */
4497 delete_step_resume_breakpoint (ecs->event_thread);
4498 }
4499
4500 ecs->event_thread->control.stop_step = 1;
4501 print_end_stepping_range_reason ();
4502 stop_stepping (ecs);
4503 return;
4504
4505 case BPSTAT_WHAT_SINGLE:
4506 if (debug_infrun)
4507 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4508 ecs->event_thread->stepping_over_breakpoint = 1;
4509 /* Still need to check other stuff, at least the case
4510 where we are stepping and step out of the right range. */
4511 break;
4512
4513 case BPSTAT_WHAT_STEP_RESUME:
4514 if (debug_infrun)
4515 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4516
4517 delete_step_resume_breakpoint (ecs->event_thread);
4518 if (ecs->event_thread->control.proceed_to_finish
4519 && execution_direction == EXEC_REVERSE)
4520 {
4521 struct thread_info *tp = ecs->event_thread;
4522
4523 /* We are finishing a function in reverse, and just hit
4524 the step-resume breakpoint at the start address of the
4525 function, and we're almost there -- just need to back
4526 up by one more single-step, which should take us back
4527 to the function call. */
4528 tp->control.step_range_start = tp->control.step_range_end = 1;
4529 keep_going (ecs);
4530 return;
4531 }
4532 fill_in_stop_func (gdbarch, ecs);
4533 if (stop_pc == ecs->stop_func_start
4534 && execution_direction == EXEC_REVERSE)
4535 {
4536 /* We are stepping over a function call in reverse, and
4537 just hit the step-resume breakpoint at the start
4538 address of the function. Go back to single-stepping,
4539 which should take us back to the function call. */
4540 ecs->event_thread->stepping_over_breakpoint = 1;
4541 keep_going (ecs);
4542 return;
4543 }
4544 break;
4545
4546 case BPSTAT_WHAT_STOP_NOISY:
4547 if (debug_infrun)
4548 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4549 stop_print_frame = 1;
4550
4551 /* We are about to nuke the step_resume_breakpointt via the
4552 cleanup chain, so no need to worry about it here. */
4553
4554 stop_stepping (ecs);
4555 return;
4556
4557 case BPSTAT_WHAT_STOP_SILENT:
4558 if (debug_infrun)
4559 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4560 stop_print_frame = 0;
4561
4562 /* We are about to nuke the step_resume_breakpoin via the
4563 cleanup chain, so no need to worry about it here. */
4564
4565 stop_stepping (ecs);
4566 return;
4567
4568 case BPSTAT_WHAT_HP_STEP_RESUME:
4569 if (debug_infrun)
4570 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4571
4572 delete_step_resume_breakpoint (ecs->event_thread);
4573 if (ecs->event_thread->step_after_step_resume_breakpoint)
4574 {
4575 /* Back when the step-resume breakpoint was inserted, we
4576 were trying to single-step off a breakpoint. Go back
4577 to doing that. */
4578 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4579 ecs->event_thread->stepping_over_breakpoint = 1;
4580 keep_going (ecs);
4581 return;
4582 }
4583 break;
4584
4585 case BPSTAT_WHAT_KEEP_CHECKING:
4586 break;
4587 }
4588 }
4589
4590 /* We come here if we hit a breakpoint but should not
4591 stop for it. Possibly we also were stepping
4592 and should stop for that. So fall through and
4593 test for stepping. But, if not stepping,
4594 do not stop. */
4595
4596 /* In all-stop mode, if we're currently stepping but have stopped in
4597 some other thread, we need to switch back to the stepped thread. */
4598 if (!non_stop)
4599 {
4600 struct thread_info *tp;
4601
4602 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4603 ecs->event_thread);
4604 if (tp)
4605 {
4606 /* However, if the current thread is blocked on some internal
4607 breakpoint, and we simply need to step over that breakpoint
4608 to get it going again, do that first. */
4609 if ((ecs->event_thread->control.trap_expected
4610 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
4611 || ecs->event_thread->stepping_over_breakpoint)
4612 {
4613 keep_going (ecs);
4614 return;
4615 }
4616
4617 /* If the stepping thread exited, then don't try to switch
4618 back and resume it, which could fail in several different
4619 ways depending on the target. Instead, just keep going.
4620
4621 We can find a stepping dead thread in the thread list in
4622 two cases:
4623
4624 - The target supports thread exit events, and when the
4625 target tries to delete the thread from the thread list,
4626 inferior_ptid pointed at the exiting thread. In such
4627 case, calling delete_thread does not really remove the
4628 thread from the list; instead, the thread is left listed,
4629 with 'exited' state.
4630
4631 - The target's debug interface does not support thread
4632 exit events, and so we have no idea whatsoever if the
4633 previously stepping thread is still alive. For that
4634 reason, we need to synchronously query the target
4635 now. */
4636 if (is_exited (tp->ptid)
4637 || !target_thread_alive (tp->ptid))
4638 {
4639 if (debug_infrun)
4640 fprintf_unfiltered (gdb_stdlog,
4641 "infrun: not switching back to "
4642 "stepped thread, it has vanished\n");
4643
4644 delete_thread (tp->ptid);
4645 keep_going (ecs);
4646 return;
4647 }
4648
4649 /* Otherwise, we no longer expect a trap in the current thread.
4650 Clear the trap_expected flag before switching back -- this is
4651 what keep_going would do as well, if we called it. */
4652 ecs->event_thread->control.trap_expected = 0;
4653
4654 if (debug_infrun)
4655 fprintf_unfiltered (gdb_stdlog,
4656 "infrun: switching back to stepped thread\n");
4657
4658 ecs->event_thread = tp;
4659 ecs->ptid = tp->ptid;
4660 context_switch (ecs->ptid);
4661 keep_going (ecs);
4662 return;
4663 }
4664 }
4665
4666 if (ecs->event_thread->control.step_resume_breakpoint)
4667 {
4668 if (debug_infrun)
4669 fprintf_unfiltered (gdb_stdlog,
4670 "infrun: step-resume breakpoint is inserted\n");
4671
4672 /* Having a step-resume breakpoint overrides anything
4673 else having to do with stepping commands until
4674 that breakpoint is reached. */
4675 keep_going (ecs);
4676 return;
4677 }
4678
4679 if (ecs->event_thread->control.step_range_end == 0)
4680 {
4681 if (debug_infrun)
4682 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4683 /* Likewise if we aren't even stepping. */
4684 keep_going (ecs);
4685 return;
4686 }
4687
4688 /* Re-fetch current thread's frame in case the code above caused
4689 the frame cache to be re-initialized, making our FRAME variable
4690 a dangling pointer. */
4691 frame = get_current_frame ();
4692 gdbarch = get_frame_arch (frame);
4693 fill_in_stop_func (gdbarch, ecs);
4694
4695 /* If stepping through a line, keep going if still within it.
4696
4697 Note that step_range_end is the address of the first instruction
4698 beyond the step range, and NOT the address of the last instruction
4699 within it!
4700
4701 Note also that during reverse execution, we may be stepping
4702 through a function epilogue and therefore must detect when
4703 the current-frame changes in the middle of a line. */
4704
4705 if (stop_pc >= ecs->event_thread->control.step_range_start
4706 && stop_pc < ecs->event_thread->control.step_range_end
4707 && (execution_direction != EXEC_REVERSE
4708 || frame_id_eq (get_frame_id (frame),
4709 ecs->event_thread->control.step_frame_id)))
4710 {
4711 if (debug_infrun)
4712 fprintf_unfiltered
4713 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4714 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4715 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4716
4717 /* When stepping backward, stop at beginning of line range
4718 (unless it's the function entry point, in which case
4719 keep going back to the call point). */
4720 if (stop_pc == ecs->event_thread->control.step_range_start
4721 && stop_pc != ecs->stop_func_start
4722 && execution_direction == EXEC_REVERSE)
4723 {
4724 ecs->event_thread->control.stop_step = 1;
4725 print_end_stepping_range_reason ();
4726 stop_stepping (ecs);
4727 }
4728 else
4729 keep_going (ecs);
4730
4731 return;
4732 }
4733
4734 /* We stepped out of the stepping range. */
4735
4736 /* If we are stepping at the source level and entered the runtime
4737 loader dynamic symbol resolution code...
4738
4739 EXEC_FORWARD: we keep on single stepping until we exit the run
4740 time loader code and reach the callee's address.
4741
4742 EXEC_REVERSE: we've already executed the callee (backward), and
4743 the runtime loader code is handled just like any other
4744 undebuggable function call. Now we need only keep stepping
4745 backward through the trampoline code, and that's handled further
4746 down, so there is nothing for us to do here. */
4747
4748 if (execution_direction != EXEC_REVERSE
4749 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4750 && in_solib_dynsym_resolve_code (stop_pc))
4751 {
4752 CORE_ADDR pc_after_resolver =
4753 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4754
4755 if (debug_infrun)
4756 fprintf_unfiltered (gdb_stdlog,
4757 "infrun: stepped into dynsym resolve code\n");
4758
4759 if (pc_after_resolver)
4760 {
4761 /* Set up a step-resume breakpoint at the address
4762 indicated by SKIP_SOLIB_RESOLVER. */
4763 struct symtab_and_line sr_sal;
4764
4765 init_sal (&sr_sal);
4766 sr_sal.pc = pc_after_resolver;
4767 sr_sal.pspace = get_frame_program_space (frame);
4768
4769 insert_step_resume_breakpoint_at_sal (gdbarch,
4770 sr_sal, null_frame_id);
4771 }
4772
4773 keep_going (ecs);
4774 return;
4775 }
4776
4777 if (ecs->event_thread->control.step_range_end != 1
4778 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4779 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4780 && get_frame_type (frame) == SIGTRAMP_FRAME)
4781 {
4782 if (debug_infrun)
4783 fprintf_unfiltered (gdb_stdlog,
4784 "infrun: stepped into signal trampoline\n");
4785 /* The inferior, while doing a "step" or "next", has ended up in
4786 a signal trampoline (either by a signal being delivered or by
4787 the signal handler returning). Just single-step until the
4788 inferior leaves the trampoline (either by calling the handler
4789 or returning). */
4790 keep_going (ecs);
4791 return;
4792 }
4793
4794 /* Check for subroutine calls. The check for the current frame
4795 equalling the step ID is not necessary - the check of the
4796 previous frame's ID is sufficient - but it is a common case and
4797 cheaper than checking the previous frame's ID.
4798
4799 NOTE: frame_id_eq will never report two invalid frame IDs as
4800 being equal, so to get into this block, both the current and
4801 previous frame must have valid frame IDs. */
4802 /* The outer_frame_id check is a heuristic to detect stepping
4803 through startup code. If we step over an instruction which
4804 sets the stack pointer from an invalid value to a valid value,
4805 we may detect that as a subroutine call from the mythical
4806 "outermost" function. This could be fixed by marking
4807 outermost frames as !stack_p,code_p,special_p. Then the
4808 initial outermost frame, before sp was valid, would
4809 have code_addr == &_start. See the comment in frame_id_eq
4810 for more. */
4811 if (!frame_id_eq (get_stack_frame_id (frame),
4812 ecs->event_thread->control.step_stack_frame_id)
4813 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4814 ecs->event_thread->control.step_stack_frame_id)
4815 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4816 outer_frame_id)
4817 || step_start_function != find_pc_function (stop_pc))))
4818 {
4819 CORE_ADDR real_stop_pc;
4820
4821 if (debug_infrun)
4822 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4823
4824 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4825 || ((ecs->event_thread->control.step_range_end == 1)
4826 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4827 ecs->stop_func_start)))
4828 {
4829 /* I presume that step_over_calls is only 0 when we're
4830 supposed to be stepping at the assembly language level
4831 ("stepi"). Just stop. */
4832 /* Also, maybe we just did a "nexti" inside a prolog, so we
4833 thought it was a subroutine call but it was not. Stop as
4834 well. FENN */
4835 /* And this works the same backward as frontward. MVS */
4836 ecs->event_thread->control.stop_step = 1;
4837 print_end_stepping_range_reason ();
4838 stop_stepping (ecs);
4839 return;
4840 }
4841
4842 /* Reverse stepping through solib trampolines. */
4843
4844 if (execution_direction == EXEC_REVERSE
4845 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4846 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4847 || (ecs->stop_func_start == 0
4848 && in_solib_dynsym_resolve_code (stop_pc))))
4849 {
4850 /* Any solib trampoline code can be handled in reverse
4851 by simply continuing to single-step. We have already
4852 executed the solib function (backwards), and a few
4853 steps will take us back through the trampoline to the
4854 caller. */
4855 keep_going (ecs);
4856 return;
4857 }
4858
4859 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4860 {
4861 /* We're doing a "next".
4862
4863 Normal (forward) execution: set a breakpoint at the
4864 callee's return address (the address at which the caller
4865 will resume).
4866
4867 Reverse (backward) execution. set the step-resume
4868 breakpoint at the start of the function that we just
4869 stepped into (backwards), and continue to there. When we
4870 get there, we'll need to single-step back to the caller. */
4871
4872 if (execution_direction == EXEC_REVERSE)
4873 {
4874 struct symtab_and_line sr_sal;
4875
4876 /* Normal function call return (static or dynamic). */
4877 init_sal (&sr_sal);
4878 sr_sal.pc = ecs->stop_func_start;
4879 sr_sal.pspace = get_frame_program_space (frame);
4880 insert_step_resume_breakpoint_at_sal (gdbarch,
4881 sr_sal, null_frame_id);
4882 }
4883 else
4884 insert_step_resume_breakpoint_at_caller (frame);
4885
4886 keep_going (ecs);
4887 return;
4888 }
4889
4890 /* If we are in a function call trampoline (a stub between the
4891 calling routine and the real function), locate the real
4892 function. That's what tells us (a) whether we want to step
4893 into it at all, and (b) what prologue we want to run to the
4894 end of, if we do step into it. */
4895 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4896 if (real_stop_pc == 0)
4897 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4898 if (real_stop_pc != 0)
4899 ecs->stop_func_start = real_stop_pc;
4900
4901 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4902 {
4903 struct symtab_and_line sr_sal;
4904
4905 init_sal (&sr_sal);
4906 sr_sal.pc = ecs->stop_func_start;
4907 sr_sal.pspace = get_frame_program_space (frame);
4908
4909 insert_step_resume_breakpoint_at_sal (gdbarch,
4910 sr_sal, null_frame_id);
4911 keep_going (ecs);
4912 return;
4913 }
4914
4915 /* If we have line number information for the function we are
4916 thinking of stepping into and the function isn't on the skip
4917 list, step into it.
4918
4919 If there are several symtabs at that PC (e.g. with include
4920 files), just want to know whether *any* of them have line
4921 numbers. find_pc_line handles this. */
4922 {
4923 struct symtab_and_line tmp_sal;
4924
4925 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4926 if (tmp_sal.line != 0
4927 && !function_pc_is_marked_for_skip (ecs->stop_func_start))
4928 {
4929 if (execution_direction == EXEC_REVERSE)
4930 handle_step_into_function_backward (gdbarch, ecs);
4931 else
4932 handle_step_into_function (gdbarch, ecs);
4933 return;
4934 }
4935 }
4936
4937 /* If we have no line number and the step-stop-if-no-debug is
4938 set, we stop the step so that the user has a chance to switch
4939 in assembly mode. */
4940 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4941 && step_stop_if_no_debug)
4942 {
4943 ecs->event_thread->control.stop_step = 1;
4944 print_end_stepping_range_reason ();
4945 stop_stepping (ecs);
4946 return;
4947 }
4948
4949 if (execution_direction == EXEC_REVERSE)
4950 {
4951 /* Set a breakpoint at callee's start address.
4952 From there we can step once and be back in the caller. */
4953 struct symtab_and_line sr_sal;
4954
4955 init_sal (&sr_sal);
4956 sr_sal.pc = ecs->stop_func_start;
4957 sr_sal.pspace = get_frame_program_space (frame);
4958 insert_step_resume_breakpoint_at_sal (gdbarch,
4959 sr_sal, null_frame_id);
4960 }
4961 else
4962 /* Set a breakpoint at callee's return address (the address
4963 at which the caller will resume). */
4964 insert_step_resume_breakpoint_at_caller (frame);
4965
4966 keep_going (ecs);
4967 return;
4968 }
4969
4970 /* Reverse stepping through solib trampolines. */
4971
4972 if (execution_direction == EXEC_REVERSE
4973 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4974 {
4975 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4976 || (ecs->stop_func_start == 0
4977 && in_solib_dynsym_resolve_code (stop_pc)))
4978 {
4979 /* Any solib trampoline code can be handled in reverse
4980 by simply continuing to single-step. We have already
4981 executed the solib function (backwards), and a few
4982 steps will take us back through the trampoline to the
4983 caller. */
4984 keep_going (ecs);
4985 return;
4986 }
4987 else if (in_solib_dynsym_resolve_code (stop_pc))
4988 {
4989 /* Stepped backward into the solib dynsym resolver.
4990 Set a breakpoint at its start and continue, then
4991 one more step will take us out. */
4992 struct symtab_and_line sr_sal;
4993
4994 init_sal (&sr_sal);
4995 sr_sal.pc = ecs->stop_func_start;
4996 sr_sal.pspace = get_frame_program_space (frame);
4997 insert_step_resume_breakpoint_at_sal (gdbarch,
4998 sr_sal, null_frame_id);
4999 keep_going (ecs);
5000 return;
5001 }
5002 }
5003
5004 /* If we're in the return path from a shared library trampoline,
5005 we want to proceed through the trampoline when stepping. */
5006 if (gdbarch_in_solib_return_trampoline (gdbarch,
5007 stop_pc, ecs->stop_func_name))
5008 {
5009 /* Determine where this trampoline returns. */
5010 CORE_ADDR real_stop_pc;
5011
5012 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5013
5014 if (debug_infrun)
5015 fprintf_unfiltered (gdb_stdlog,
5016 "infrun: stepped into solib return tramp\n");
5017
5018 /* Only proceed through if we know where it's going. */
5019 if (real_stop_pc)
5020 {
5021 /* And put the step-breakpoint there and go until there. */
5022 struct symtab_and_line sr_sal;
5023
5024 init_sal (&sr_sal); /* initialize to zeroes */
5025 sr_sal.pc = real_stop_pc;
5026 sr_sal.section = find_pc_overlay (sr_sal.pc);
5027 sr_sal.pspace = get_frame_program_space (frame);
5028
5029 /* Do not specify what the fp should be when we stop since
5030 on some machines the prologue is where the new fp value
5031 is established. */
5032 insert_step_resume_breakpoint_at_sal (gdbarch,
5033 sr_sal, null_frame_id);
5034
5035 /* Restart without fiddling with the step ranges or
5036 other state. */
5037 keep_going (ecs);
5038 return;
5039 }
5040 }
5041
5042 stop_pc_sal = find_pc_line (stop_pc, 0);
5043
5044 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5045 the trampoline processing logic, however, there are some trampolines
5046 that have no names, so we should do trampoline handling first. */
5047 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5048 && ecs->stop_func_name == NULL
5049 && stop_pc_sal.line == 0)
5050 {
5051 if (debug_infrun)
5052 fprintf_unfiltered (gdb_stdlog,
5053 "infrun: stepped into undebuggable function\n");
5054
5055 /* The inferior just stepped into, or returned to, an
5056 undebuggable function (where there is no debugging information
5057 and no line number corresponding to the address where the
5058 inferior stopped). Since we want to skip this kind of code,
5059 we keep going until the inferior returns from this
5060 function - unless the user has asked us not to (via
5061 set step-mode) or we no longer know how to get back
5062 to the call site. */
5063 if (step_stop_if_no_debug
5064 || !frame_id_p (frame_unwind_caller_id (frame)))
5065 {
5066 /* If we have no line number and the step-stop-if-no-debug
5067 is set, we stop the step so that the user has a chance to
5068 switch in assembly mode. */
5069 ecs->event_thread->control.stop_step = 1;
5070 print_end_stepping_range_reason ();
5071 stop_stepping (ecs);
5072 return;
5073 }
5074 else
5075 {
5076 /* Set a breakpoint at callee's return address (the address
5077 at which the caller will resume). */
5078 insert_step_resume_breakpoint_at_caller (frame);
5079 keep_going (ecs);
5080 return;
5081 }
5082 }
5083
5084 if (ecs->event_thread->control.step_range_end == 1)
5085 {
5086 /* It is stepi or nexti. We always want to stop stepping after
5087 one instruction. */
5088 if (debug_infrun)
5089 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5090 ecs->event_thread->control.stop_step = 1;
5091 print_end_stepping_range_reason ();
5092 stop_stepping (ecs);
5093 return;
5094 }
5095
5096 if (stop_pc_sal.line == 0)
5097 {
5098 /* We have no line number information. That means to stop
5099 stepping (does this always happen right after one instruction,
5100 when we do "s" in a function with no line numbers,
5101 or can this happen as a result of a return or longjmp?). */
5102 if (debug_infrun)
5103 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5104 ecs->event_thread->control.stop_step = 1;
5105 print_end_stepping_range_reason ();
5106 stop_stepping (ecs);
5107 return;
5108 }
5109
5110 /* Look for "calls" to inlined functions, part one. If the inline
5111 frame machinery detected some skipped call sites, we have entered
5112 a new inline function. */
5113
5114 if (frame_id_eq (get_frame_id (get_current_frame ()),
5115 ecs->event_thread->control.step_frame_id)
5116 && inline_skipped_frames (ecs->ptid))
5117 {
5118 struct symtab_and_line call_sal;
5119
5120 if (debug_infrun)
5121 fprintf_unfiltered (gdb_stdlog,
5122 "infrun: stepped into inlined function\n");
5123
5124 find_frame_sal (get_current_frame (), &call_sal);
5125
5126 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5127 {
5128 /* For "step", we're going to stop. But if the call site
5129 for this inlined function is on the same source line as
5130 we were previously stepping, go down into the function
5131 first. Otherwise stop at the call site. */
5132
5133 if (call_sal.line == ecs->event_thread->current_line
5134 && call_sal.symtab == ecs->event_thread->current_symtab)
5135 step_into_inline_frame (ecs->ptid);
5136
5137 ecs->event_thread->control.stop_step = 1;
5138 print_end_stepping_range_reason ();
5139 stop_stepping (ecs);
5140 return;
5141 }
5142 else
5143 {
5144 /* For "next", we should stop at the call site if it is on a
5145 different source line. Otherwise continue through the
5146 inlined function. */
5147 if (call_sal.line == ecs->event_thread->current_line
5148 && call_sal.symtab == ecs->event_thread->current_symtab)
5149 keep_going (ecs);
5150 else
5151 {
5152 ecs->event_thread->control.stop_step = 1;
5153 print_end_stepping_range_reason ();
5154 stop_stepping (ecs);
5155 }
5156 return;
5157 }
5158 }
5159
5160 /* Look for "calls" to inlined functions, part two. If we are still
5161 in the same real function we were stepping through, but we have
5162 to go further up to find the exact frame ID, we are stepping
5163 through a more inlined call beyond its call site. */
5164
5165 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5166 && !frame_id_eq (get_frame_id (get_current_frame ()),
5167 ecs->event_thread->control.step_frame_id)
5168 && stepped_in_from (get_current_frame (),
5169 ecs->event_thread->control.step_frame_id))
5170 {
5171 if (debug_infrun)
5172 fprintf_unfiltered (gdb_stdlog,
5173 "infrun: stepping through inlined function\n");
5174
5175 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5176 keep_going (ecs);
5177 else
5178 {
5179 ecs->event_thread->control.stop_step = 1;
5180 print_end_stepping_range_reason ();
5181 stop_stepping (ecs);
5182 }
5183 return;
5184 }
5185
5186 if ((stop_pc == stop_pc_sal.pc)
5187 && (ecs->event_thread->current_line != stop_pc_sal.line
5188 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5189 {
5190 /* We are at the start of a different line. So stop. Note that
5191 we don't stop if we step into the middle of a different line.
5192 That is said to make things like for (;;) statements work
5193 better. */
5194 if (debug_infrun)
5195 fprintf_unfiltered (gdb_stdlog,
5196 "infrun: stepped to a different line\n");
5197 ecs->event_thread->control.stop_step = 1;
5198 print_end_stepping_range_reason ();
5199 stop_stepping (ecs);
5200 return;
5201 }
5202
5203 /* We aren't done stepping.
5204
5205 Optimize by setting the stepping range to the line.
5206 (We might not be in the original line, but if we entered a
5207 new line in mid-statement, we continue stepping. This makes
5208 things like for(;;) statements work better.) */
5209
5210 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5211 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5212 set_step_info (frame, stop_pc_sal);
5213
5214 if (debug_infrun)
5215 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5216 keep_going (ecs);
5217 }
5218
5219 /* Is thread TP in the middle of single-stepping? */
5220
5221 static int
5222 currently_stepping (struct thread_info *tp)
5223 {
5224 return ((tp->control.step_range_end
5225 && tp->control.step_resume_breakpoint == NULL)
5226 || tp->control.trap_expected
5227 || bpstat_should_step ());
5228 }
5229
5230 /* Returns true if any thread *but* the one passed in "data" is in the
5231 middle of stepping or of handling a "next". */
5232
5233 static int
5234 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
5235 {
5236 if (tp == data)
5237 return 0;
5238
5239 return (tp->control.step_range_end
5240 || tp->control.trap_expected);
5241 }
5242
5243 /* Inferior has stepped into a subroutine call with source code that
5244 we should not step over. Do step to the first line of code in
5245 it. */
5246
5247 static void
5248 handle_step_into_function (struct gdbarch *gdbarch,
5249 struct execution_control_state *ecs)
5250 {
5251 struct symtab *s;
5252 struct symtab_and_line stop_func_sal, sr_sal;
5253
5254 fill_in_stop_func (gdbarch, ecs);
5255
5256 s = find_pc_symtab (stop_pc);
5257 if (s && s->language != language_asm)
5258 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5259 ecs->stop_func_start);
5260
5261 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5262 /* Use the step_resume_break to step until the end of the prologue,
5263 even if that involves jumps (as it seems to on the vax under
5264 4.2). */
5265 /* If the prologue ends in the middle of a source line, continue to
5266 the end of that source line (if it is still within the function).
5267 Otherwise, just go to end of prologue. */
5268 if (stop_func_sal.end
5269 && stop_func_sal.pc != ecs->stop_func_start
5270 && stop_func_sal.end < ecs->stop_func_end)
5271 ecs->stop_func_start = stop_func_sal.end;
5272
5273 /* Architectures which require breakpoint adjustment might not be able
5274 to place a breakpoint at the computed address. If so, the test
5275 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5276 ecs->stop_func_start to an address at which a breakpoint may be
5277 legitimately placed.
5278
5279 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5280 made, GDB will enter an infinite loop when stepping through
5281 optimized code consisting of VLIW instructions which contain
5282 subinstructions corresponding to different source lines. On
5283 FR-V, it's not permitted to place a breakpoint on any but the
5284 first subinstruction of a VLIW instruction. When a breakpoint is
5285 set, GDB will adjust the breakpoint address to the beginning of
5286 the VLIW instruction. Thus, we need to make the corresponding
5287 adjustment here when computing the stop address. */
5288
5289 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5290 {
5291 ecs->stop_func_start
5292 = gdbarch_adjust_breakpoint_address (gdbarch,
5293 ecs->stop_func_start);
5294 }
5295
5296 if (ecs->stop_func_start == stop_pc)
5297 {
5298 /* We are already there: stop now. */
5299 ecs->event_thread->control.stop_step = 1;
5300 print_end_stepping_range_reason ();
5301 stop_stepping (ecs);
5302 return;
5303 }
5304 else
5305 {
5306 /* Put the step-breakpoint there and go until there. */
5307 init_sal (&sr_sal); /* initialize to zeroes */
5308 sr_sal.pc = ecs->stop_func_start;
5309 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5310 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5311
5312 /* Do not specify what the fp should be when we stop since on
5313 some machines the prologue is where the new fp value is
5314 established. */
5315 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5316
5317 /* And make sure stepping stops right away then. */
5318 ecs->event_thread->control.step_range_end
5319 = ecs->event_thread->control.step_range_start;
5320 }
5321 keep_going (ecs);
5322 }
5323
5324 /* Inferior has stepped backward into a subroutine call with source
5325 code that we should not step over. Do step to the beginning of the
5326 last line of code in it. */
5327
5328 static void
5329 handle_step_into_function_backward (struct gdbarch *gdbarch,
5330 struct execution_control_state *ecs)
5331 {
5332 struct symtab *s;
5333 struct symtab_and_line stop_func_sal;
5334
5335 fill_in_stop_func (gdbarch, ecs);
5336
5337 s = find_pc_symtab (stop_pc);
5338 if (s && s->language != language_asm)
5339 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5340 ecs->stop_func_start);
5341
5342 stop_func_sal = find_pc_line (stop_pc, 0);
5343
5344 /* OK, we're just going to keep stepping here. */
5345 if (stop_func_sal.pc == stop_pc)
5346 {
5347 /* We're there already. Just stop stepping now. */
5348 ecs->event_thread->control.stop_step = 1;
5349 print_end_stepping_range_reason ();
5350 stop_stepping (ecs);
5351 }
5352 else
5353 {
5354 /* Else just reset the step range and keep going.
5355 No step-resume breakpoint, they don't work for
5356 epilogues, which can have multiple entry paths. */
5357 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5358 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5359 keep_going (ecs);
5360 }
5361 return;
5362 }
5363
5364 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5365 This is used to both functions and to skip over code. */
5366
5367 static void
5368 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5369 struct symtab_and_line sr_sal,
5370 struct frame_id sr_id,
5371 enum bptype sr_type)
5372 {
5373 /* There should never be more than one step-resume or longjmp-resume
5374 breakpoint per thread, so we should never be setting a new
5375 step_resume_breakpoint when one is already active. */
5376 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5377 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5378
5379 if (debug_infrun)
5380 fprintf_unfiltered (gdb_stdlog,
5381 "infrun: inserting step-resume breakpoint at %s\n",
5382 paddress (gdbarch, sr_sal.pc));
5383
5384 inferior_thread ()->control.step_resume_breakpoint
5385 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5386 }
5387
5388 void
5389 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5390 struct symtab_and_line sr_sal,
5391 struct frame_id sr_id)
5392 {
5393 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5394 sr_sal, sr_id,
5395 bp_step_resume);
5396 }
5397
5398 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5399 This is used to skip a potential signal handler.
5400
5401 This is called with the interrupted function's frame. The signal
5402 handler, when it returns, will resume the interrupted function at
5403 RETURN_FRAME.pc. */
5404
5405 static void
5406 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5407 {
5408 struct symtab_and_line sr_sal;
5409 struct gdbarch *gdbarch;
5410
5411 gdb_assert (return_frame != NULL);
5412 init_sal (&sr_sal); /* initialize to zeros */
5413
5414 gdbarch = get_frame_arch (return_frame);
5415 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5416 sr_sal.section = find_pc_overlay (sr_sal.pc);
5417 sr_sal.pspace = get_frame_program_space (return_frame);
5418
5419 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5420 get_stack_frame_id (return_frame),
5421 bp_hp_step_resume);
5422 }
5423
5424 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5425 is used to skip a function after stepping into it (for "next" or if
5426 the called function has no debugging information).
5427
5428 The current function has almost always been reached by single
5429 stepping a call or return instruction. NEXT_FRAME belongs to the
5430 current function, and the breakpoint will be set at the caller's
5431 resume address.
5432
5433 This is a separate function rather than reusing
5434 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5435 get_prev_frame, which may stop prematurely (see the implementation
5436 of frame_unwind_caller_id for an example). */
5437
5438 static void
5439 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5440 {
5441 struct symtab_and_line sr_sal;
5442 struct gdbarch *gdbarch;
5443
5444 /* We shouldn't have gotten here if we don't know where the call site
5445 is. */
5446 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5447
5448 init_sal (&sr_sal); /* initialize to zeros */
5449
5450 gdbarch = frame_unwind_caller_arch (next_frame);
5451 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5452 frame_unwind_caller_pc (next_frame));
5453 sr_sal.section = find_pc_overlay (sr_sal.pc);
5454 sr_sal.pspace = frame_unwind_program_space (next_frame);
5455
5456 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5457 frame_unwind_caller_id (next_frame));
5458 }
5459
5460 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5461 new breakpoint at the target of a jmp_buf. The handling of
5462 longjmp-resume uses the same mechanisms used for handling
5463 "step-resume" breakpoints. */
5464
5465 static void
5466 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5467 {
5468 /* There should never be more than one step-resume or longjmp-resume
5469 breakpoint per thread, so we should never be setting a new
5470 longjmp_resume_breakpoint when one is already active. */
5471 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5472
5473 if (debug_infrun)
5474 fprintf_unfiltered (gdb_stdlog,
5475 "infrun: inserting longjmp-resume breakpoint at %s\n",
5476 paddress (gdbarch, pc));
5477
5478 inferior_thread ()->control.step_resume_breakpoint =
5479 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5480 }
5481
5482 /* Insert an exception resume breakpoint. TP is the thread throwing
5483 the exception. The block B is the block of the unwinder debug hook
5484 function. FRAME is the frame corresponding to the call to this
5485 function. SYM is the symbol of the function argument holding the
5486 target PC of the exception. */
5487
5488 static void
5489 insert_exception_resume_breakpoint (struct thread_info *tp,
5490 struct block *b,
5491 struct frame_info *frame,
5492 struct symbol *sym)
5493 {
5494 volatile struct gdb_exception e;
5495
5496 /* We want to ignore errors here. */
5497 TRY_CATCH (e, RETURN_MASK_ERROR)
5498 {
5499 struct symbol *vsym;
5500 struct value *value;
5501 CORE_ADDR handler;
5502 struct breakpoint *bp;
5503
5504 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5505 value = read_var_value (vsym, frame);
5506 /* If the value was optimized out, revert to the old behavior. */
5507 if (! value_optimized_out (value))
5508 {
5509 handler = value_as_address (value);
5510
5511 if (debug_infrun)
5512 fprintf_unfiltered (gdb_stdlog,
5513 "infrun: exception resume at %lx\n",
5514 (unsigned long) handler);
5515
5516 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5517 handler, bp_exception_resume);
5518 bp->thread = tp->num;
5519 inferior_thread ()->control.exception_resume_breakpoint = bp;
5520 }
5521 }
5522 }
5523
5524 /* This is called when an exception has been intercepted. Check to
5525 see whether the exception's destination is of interest, and if so,
5526 set an exception resume breakpoint there. */
5527
5528 static void
5529 check_exception_resume (struct execution_control_state *ecs,
5530 struct frame_info *frame, struct symbol *func)
5531 {
5532 volatile struct gdb_exception e;
5533
5534 TRY_CATCH (e, RETURN_MASK_ERROR)
5535 {
5536 struct block *b;
5537 struct dict_iterator iter;
5538 struct symbol *sym;
5539 int argno = 0;
5540
5541 /* The exception breakpoint is a thread-specific breakpoint on
5542 the unwinder's debug hook, declared as:
5543
5544 void _Unwind_DebugHook (void *cfa, void *handler);
5545
5546 The CFA argument indicates the frame to which control is
5547 about to be transferred. HANDLER is the destination PC.
5548
5549 We ignore the CFA and set a temporary breakpoint at HANDLER.
5550 This is not extremely efficient but it avoids issues in gdb
5551 with computing the DWARF CFA, and it also works even in weird
5552 cases such as throwing an exception from inside a signal
5553 handler. */
5554
5555 b = SYMBOL_BLOCK_VALUE (func);
5556 ALL_BLOCK_SYMBOLS (b, iter, sym)
5557 {
5558 if (!SYMBOL_IS_ARGUMENT (sym))
5559 continue;
5560
5561 if (argno == 0)
5562 ++argno;
5563 else
5564 {
5565 insert_exception_resume_breakpoint (ecs->event_thread,
5566 b, frame, sym);
5567 break;
5568 }
5569 }
5570 }
5571 }
5572
5573 static void
5574 stop_stepping (struct execution_control_state *ecs)
5575 {
5576 if (debug_infrun)
5577 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5578
5579 /* Let callers know we don't want to wait for the inferior anymore. */
5580 ecs->wait_some_more = 0;
5581 }
5582
5583 /* This function handles various cases where we need to continue
5584 waiting for the inferior. */
5585 /* (Used to be the keep_going: label in the old wait_for_inferior). */
5586
5587 static void
5588 keep_going (struct execution_control_state *ecs)
5589 {
5590 /* Make sure normal_stop is called if we get a QUIT handled before
5591 reaching resume. */
5592 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5593
5594 /* Save the pc before execution, to compare with pc after stop. */
5595 ecs->event_thread->prev_pc
5596 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5597
5598 /* If we did not do break;, it means we should keep running the
5599 inferior and not return to debugger. */
5600
5601 if (ecs->event_thread->control.trap_expected
5602 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
5603 {
5604 /* We took a signal (which we are supposed to pass through to
5605 the inferior, else we'd not get here) and we haven't yet
5606 gotten our trap. Simply continue. */
5607
5608 discard_cleanups (old_cleanups);
5609 resume (currently_stepping (ecs->event_thread),
5610 ecs->event_thread->suspend.stop_signal);
5611 }
5612 else
5613 {
5614 /* Either the trap was not expected, but we are continuing
5615 anyway (the user asked that this signal be passed to the
5616 child)
5617 -- or --
5618 The signal was SIGTRAP, e.g. it was our signal, but we
5619 decided we should resume from it.
5620
5621 We're going to run this baby now!
5622
5623 Note that insert_breakpoints won't try to re-insert
5624 already inserted breakpoints. Therefore, we don't
5625 care if breakpoints were already inserted, or not. */
5626
5627 if (ecs->event_thread->stepping_over_breakpoint)
5628 {
5629 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5630
5631 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5632 /* Since we can't do a displaced step, we have to remove
5633 the breakpoint while we step it. To keep things
5634 simple, we remove them all. */
5635 remove_breakpoints ();
5636 }
5637 else
5638 {
5639 volatile struct gdb_exception e;
5640
5641 /* Stop stepping when inserting breakpoints
5642 has failed. */
5643 TRY_CATCH (e, RETURN_MASK_ERROR)
5644 {
5645 insert_breakpoints ();
5646 }
5647 if (e.reason < 0)
5648 {
5649 exception_print (gdb_stderr, e);
5650 stop_stepping (ecs);
5651 return;
5652 }
5653 }
5654
5655 ecs->event_thread->control.trap_expected
5656 = ecs->event_thread->stepping_over_breakpoint;
5657
5658 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5659 specifies that such a signal should be delivered to the
5660 target program).
5661
5662 Typically, this would occure when a user is debugging a
5663 target monitor on a simulator: the target monitor sets a
5664 breakpoint; the simulator encounters this break-point and
5665 halts the simulation handing control to GDB; GDB, noteing
5666 that the break-point isn't valid, returns control back to the
5667 simulator; the simulator then delivers the hardware
5668 equivalent of a SIGNAL_TRAP to the program being debugged. */
5669
5670 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
5671 && !signal_program[ecs->event_thread->suspend.stop_signal])
5672 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
5673
5674 discard_cleanups (old_cleanups);
5675 resume (currently_stepping (ecs->event_thread),
5676 ecs->event_thread->suspend.stop_signal);
5677 }
5678
5679 prepare_to_wait (ecs);
5680 }
5681
5682 /* This function normally comes after a resume, before
5683 handle_inferior_event exits. It takes care of any last bits of
5684 housekeeping, and sets the all-important wait_some_more flag. */
5685
5686 static void
5687 prepare_to_wait (struct execution_control_state *ecs)
5688 {
5689 if (debug_infrun)
5690 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5691
5692 /* This is the old end of the while loop. Let everybody know we
5693 want to wait for the inferior some more and get called again
5694 soon. */
5695 ecs->wait_some_more = 1;
5696 }
5697
5698 /* Several print_*_reason functions to print why the inferior has stopped.
5699 We always print something when the inferior exits, or receives a signal.
5700 The rest of the cases are dealt with later on in normal_stop and
5701 print_it_typical. Ideally there should be a call to one of these
5702 print_*_reason functions functions from handle_inferior_event each time
5703 stop_stepping is called. */
5704
5705 /* Print why the inferior has stopped.
5706 We are done with a step/next/si/ni command, print why the inferior has
5707 stopped. For now print nothing. Print a message only if not in the middle
5708 of doing a "step n" operation for n > 1. */
5709
5710 static void
5711 print_end_stepping_range_reason (void)
5712 {
5713 if ((!inferior_thread ()->step_multi
5714 || !inferior_thread ()->control.stop_step)
5715 && ui_out_is_mi_like_p (current_uiout))
5716 ui_out_field_string (current_uiout, "reason",
5717 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5718 }
5719
5720 /* The inferior was terminated by a signal, print why it stopped. */
5721
5722 static void
5723 print_signal_exited_reason (enum target_signal siggnal)
5724 {
5725 struct ui_out *uiout = current_uiout;
5726
5727 annotate_signalled ();
5728 if (ui_out_is_mi_like_p (uiout))
5729 ui_out_field_string
5730 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5731 ui_out_text (uiout, "\nProgram terminated with signal ");
5732 annotate_signal_name ();
5733 ui_out_field_string (uiout, "signal-name",
5734 target_signal_to_name (siggnal));
5735 annotate_signal_name_end ();
5736 ui_out_text (uiout, ", ");
5737 annotate_signal_string ();
5738 ui_out_field_string (uiout, "signal-meaning",
5739 target_signal_to_string (siggnal));
5740 annotate_signal_string_end ();
5741 ui_out_text (uiout, ".\n");
5742 ui_out_text (uiout, "The program no longer exists.\n");
5743 }
5744
5745 /* The inferior program is finished, print why it stopped. */
5746
5747 static void
5748 print_exited_reason (int exitstatus)
5749 {
5750 struct inferior *inf = current_inferior ();
5751 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5752 struct ui_out *uiout = current_uiout;
5753
5754 annotate_exited (exitstatus);
5755 if (exitstatus)
5756 {
5757 if (ui_out_is_mi_like_p (uiout))
5758 ui_out_field_string (uiout, "reason",
5759 async_reason_lookup (EXEC_ASYNC_EXITED));
5760 ui_out_text (uiout, "[Inferior ");
5761 ui_out_text (uiout, plongest (inf->num));
5762 ui_out_text (uiout, " (");
5763 ui_out_text (uiout, pidstr);
5764 ui_out_text (uiout, ") exited with code ");
5765 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5766 ui_out_text (uiout, "]\n");
5767 }
5768 else
5769 {
5770 if (ui_out_is_mi_like_p (uiout))
5771 ui_out_field_string
5772 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5773 ui_out_text (uiout, "[Inferior ");
5774 ui_out_text (uiout, plongest (inf->num));
5775 ui_out_text (uiout, " (");
5776 ui_out_text (uiout, pidstr);
5777 ui_out_text (uiout, ") exited normally]\n");
5778 }
5779 /* Support the --return-child-result option. */
5780 return_child_result_value = exitstatus;
5781 }
5782
5783 /* Signal received, print why the inferior has stopped. The signal table
5784 tells us to print about it. */
5785
5786 static void
5787 print_signal_received_reason (enum target_signal siggnal)
5788 {
5789 struct ui_out *uiout = current_uiout;
5790
5791 annotate_signal ();
5792
5793 if (siggnal == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5794 {
5795 struct thread_info *t = inferior_thread ();
5796
5797 ui_out_text (uiout, "\n[");
5798 ui_out_field_string (uiout, "thread-name",
5799 target_pid_to_str (t->ptid));
5800 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5801 ui_out_text (uiout, " stopped");
5802 }
5803 else
5804 {
5805 ui_out_text (uiout, "\nProgram received signal ");
5806 annotate_signal_name ();
5807 if (ui_out_is_mi_like_p (uiout))
5808 ui_out_field_string
5809 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5810 ui_out_field_string (uiout, "signal-name",
5811 target_signal_to_name (siggnal));
5812 annotate_signal_name_end ();
5813 ui_out_text (uiout, ", ");
5814 annotate_signal_string ();
5815 ui_out_field_string (uiout, "signal-meaning",
5816 target_signal_to_string (siggnal));
5817 annotate_signal_string_end ();
5818 }
5819 ui_out_text (uiout, ".\n");
5820 }
5821
5822 /* Reverse execution: target ran out of history info, print why the inferior
5823 has stopped. */
5824
5825 static void
5826 print_no_history_reason (void)
5827 {
5828 ui_out_text (current_uiout, "\nNo more reverse-execution history.\n");
5829 }
5830
5831 /* Here to return control to GDB when the inferior stops for real.
5832 Print appropriate messages, remove breakpoints, give terminal our modes.
5833
5834 STOP_PRINT_FRAME nonzero means print the executing frame
5835 (pc, function, args, file, line number and line text).
5836 BREAKPOINTS_FAILED nonzero means stop was due to error
5837 attempting to insert breakpoints. */
5838
5839 void
5840 normal_stop (void)
5841 {
5842 struct target_waitstatus last;
5843 ptid_t last_ptid;
5844 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5845
5846 get_last_target_status (&last_ptid, &last);
5847
5848 /* If an exception is thrown from this point on, make sure to
5849 propagate GDB's knowledge of the executing state to the
5850 frontend/user running state. A QUIT is an easy exception to see
5851 here, so do this before any filtered output. */
5852 if (!non_stop)
5853 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5854 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5855 && last.kind != TARGET_WAITKIND_EXITED
5856 && last.kind != TARGET_WAITKIND_NO_RESUMED)
5857 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5858
5859 /* In non-stop mode, we don't want GDB to switch threads behind the
5860 user's back, to avoid races where the user is typing a command to
5861 apply to thread x, but GDB switches to thread y before the user
5862 finishes entering the command. */
5863
5864 /* As with the notification of thread events, we want to delay
5865 notifying the user that we've switched thread context until
5866 the inferior actually stops.
5867
5868 There's no point in saying anything if the inferior has exited.
5869 Note that SIGNALLED here means "exited with a signal", not
5870 "received a signal". */
5871 if (!non_stop
5872 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5873 && target_has_execution
5874 && last.kind != TARGET_WAITKIND_SIGNALLED
5875 && last.kind != TARGET_WAITKIND_EXITED
5876 && last.kind != TARGET_WAITKIND_NO_RESUMED)
5877 {
5878 target_terminal_ours_for_output ();
5879 printf_filtered (_("[Switching to %s]\n"),
5880 target_pid_to_str (inferior_ptid));
5881 annotate_thread_changed ();
5882 previous_inferior_ptid = inferior_ptid;
5883 }
5884
5885 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
5886 {
5887 gdb_assert (sync_execution || !target_can_async_p ());
5888
5889 target_terminal_ours_for_output ();
5890 printf_filtered (_("No unwaited-for children left.\n"));
5891 }
5892
5893 if (!breakpoints_always_inserted_mode () && target_has_execution)
5894 {
5895 if (remove_breakpoints ())
5896 {
5897 target_terminal_ours_for_output ();
5898 printf_filtered (_("Cannot remove breakpoints because "
5899 "program is no longer writable.\nFurther "
5900 "execution is probably impossible.\n"));
5901 }
5902 }
5903
5904 /* If an auto-display called a function and that got a signal,
5905 delete that auto-display to avoid an infinite recursion. */
5906
5907 if (stopped_by_random_signal)
5908 disable_current_display ();
5909
5910 /* Don't print a message if in the middle of doing a "step n"
5911 operation for n > 1 */
5912 if (target_has_execution
5913 && last.kind != TARGET_WAITKIND_SIGNALLED
5914 && last.kind != TARGET_WAITKIND_EXITED
5915 && inferior_thread ()->step_multi
5916 && inferior_thread ()->control.stop_step)
5917 goto done;
5918
5919 target_terminal_ours ();
5920 async_enable_stdin ();
5921
5922 /* Set the current source location. This will also happen if we
5923 display the frame below, but the current SAL will be incorrect
5924 during a user hook-stop function. */
5925 if (has_stack_frames () && !stop_stack_dummy)
5926 set_current_sal_from_frame (get_current_frame (), 1);
5927
5928 /* Let the user/frontend see the threads as stopped. */
5929 do_cleanups (old_chain);
5930
5931 /* Look up the hook_stop and run it (CLI internally handles problem
5932 of stop_command's pre-hook not existing). */
5933 if (stop_command)
5934 catch_errors (hook_stop_stub, stop_command,
5935 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5936
5937 if (!has_stack_frames ())
5938 goto done;
5939
5940 if (last.kind == TARGET_WAITKIND_SIGNALLED
5941 || last.kind == TARGET_WAITKIND_EXITED)
5942 goto done;
5943
5944 /* Select innermost stack frame - i.e., current frame is frame 0,
5945 and current location is based on that.
5946 Don't do this on return from a stack dummy routine,
5947 or if the program has exited. */
5948
5949 if (!stop_stack_dummy)
5950 {
5951 select_frame (get_current_frame ());
5952
5953 /* Print current location without a level number, if
5954 we have changed functions or hit a breakpoint.
5955 Print source line if we have one.
5956 bpstat_print() contains the logic deciding in detail
5957 what to print, based on the event(s) that just occurred. */
5958
5959 /* If --batch-silent is enabled then there's no need to print the current
5960 source location, and to try risks causing an error message about
5961 missing source files. */
5962 if (stop_print_frame && !batch_silent)
5963 {
5964 int bpstat_ret;
5965 int source_flag;
5966 int do_frame_printing = 1;
5967 struct thread_info *tp = inferior_thread ();
5968
5969 bpstat_ret = bpstat_print (tp->control.stop_bpstat, last.kind);
5970 switch (bpstat_ret)
5971 {
5972 case PRINT_UNKNOWN:
5973 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5974 (or should) carry around the function and does (or
5975 should) use that when doing a frame comparison. */
5976 if (tp->control.stop_step
5977 && frame_id_eq (tp->control.step_frame_id,
5978 get_frame_id (get_current_frame ()))
5979 && step_start_function == find_pc_function (stop_pc))
5980 source_flag = SRC_LINE; /* Finished step, just
5981 print source line. */
5982 else
5983 source_flag = SRC_AND_LOC; /* Print location and
5984 source line. */
5985 break;
5986 case PRINT_SRC_AND_LOC:
5987 source_flag = SRC_AND_LOC; /* Print location and
5988 source line. */
5989 break;
5990 case PRINT_SRC_ONLY:
5991 source_flag = SRC_LINE;
5992 break;
5993 case PRINT_NOTHING:
5994 source_flag = SRC_LINE; /* something bogus */
5995 do_frame_printing = 0;
5996 break;
5997 default:
5998 internal_error (__FILE__, __LINE__, _("Unknown value."));
5999 }
6000
6001 /* The behavior of this routine with respect to the source
6002 flag is:
6003 SRC_LINE: Print only source line
6004 LOCATION: Print only location
6005 SRC_AND_LOC: Print location and source line. */
6006 if (do_frame_printing)
6007 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
6008
6009 /* Display the auto-display expressions. */
6010 do_displays ();
6011 }
6012 }
6013
6014 /* Save the function value return registers, if we care.
6015 We might be about to restore their previous contents. */
6016 if (inferior_thread ()->control.proceed_to_finish
6017 && execution_direction != EXEC_REVERSE)
6018 {
6019 /* This should not be necessary. */
6020 if (stop_registers)
6021 regcache_xfree (stop_registers);
6022
6023 /* NB: The copy goes through to the target picking up the value of
6024 all the registers. */
6025 stop_registers = regcache_dup (get_current_regcache ());
6026 }
6027
6028 if (stop_stack_dummy == STOP_STACK_DUMMY)
6029 {
6030 /* Pop the empty frame that contains the stack dummy.
6031 This also restores inferior state prior to the call
6032 (struct infcall_suspend_state). */
6033 struct frame_info *frame = get_current_frame ();
6034
6035 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6036 frame_pop (frame);
6037 /* frame_pop() calls reinit_frame_cache as the last thing it
6038 does which means there's currently no selected frame. We
6039 don't need to re-establish a selected frame if the dummy call
6040 returns normally, that will be done by
6041 restore_infcall_control_state. However, we do have to handle
6042 the case where the dummy call is returning after being
6043 stopped (e.g. the dummy call previously hit a breakpoint).
6044 We can't know which case we have so just always re-establish
6045 a selected frame here. */
6046 select_frame (get_current_frame ());
6047 }
6048
6049 done:
6050 annotate_stopped ();
6051
6052 /* Suppress the stop observer if we're in the middle of:
6053
6054 - a step n (n > 1), as there still more steps to be done.
6055
6056 - a "finish" command, as the observer will be called in
6057 finish_command_continuation, so it can include the inferior
6058 function's return value.
6059
6060 - calling an inferior function, as we pretend we inferior didn't
6061 run at all. The return value of the call is handled by the
6062 expression evaluator, through call_function_by_hand. */
6063
6064 if (!target_has_execution
6065 || last.kind == TARGET_WAITKIND_SIGNALLED
6066 || last.kind == TARGET_WAITKIND_EXITED
6067 || last.kind == TARGET_WAITKIND_NO_RESUMED
6068 || (!(inferior_thread ()->step_multi
6069 && inferior_thread ()->control.stop_step)
6070 && !(inferior_thread ()->control.stop_bpstat
6071 && inferior_thread ()->control.proceed_to_finish)
6072 && !inferior_thread ()->control.in_infcall))
6073 {
6074 if (!ptid_equal (inferior_ptid, null_ptid))
6075 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6076 stop_print_frame);
6077 else
6078 observer_notify_normal_stop (NULL, stop_print_frame);
6079 }
6080
6081 if (target_has_execution)
6082 {
6083 if (last.kind != TARGET_WAITKIND_SIGNALLED
6084 && last.kind != TARGET_WAITKIND_EXITED)
6085 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6086 Delete any breakpoint that is to be deleted at the next stop. */
6087 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6088 }
6089
6090 /* Try to get rid of automatically added inferiors that are no
6091 longer needed. Keeping those around slows down things linearly.
6092 Note that this never removes the current inferior. */
6093 prune_inferiors ();
6094 }
6095
6096 static int
6097 hook_stop_stub (void *cmd)
6098 {
6099 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6100 return (0);
6101 }
6102 \f
6103 int
6104 signal_stop_state (int signo)
6105 {
6106 return signal_stop[signo];
6107 }
6108
6109 int
6110 signal_print_state (int signo)
6111 {
6112 return signal_print[signo];
6113 }
6114
6115 int
6116 signal_pass_state (int signo)
6117 {
6118 return signal_program[signo];
6119 }
6120
6121 static void
6122 signal_cache_update (int signo)
6123 {
6124 if (signo == -1)
6125 {
6126 for (signo = 0; signo < (int) TARGET_SIGNAL_LAST; signo++)
6127 signal_cache_update (signo);
6128
6129 return;
6130 }
6131
6132 signal_pass[signo] = (signal_stop[signo] == 0
6133 && signal_print[signo] == 0
6134 && signal_program[signo] == 1);
6135 }
6136
6137 int
6138 signal_stop_update (int signo, int state)
6139 {
6140 int ret = signal_stop[signo];
6141
6142 signal_stop[signo] = state;
6143 signal_cache_update (signo);
6144 return ret;
6145 }
6146
6147 int
6148 signal_print_update (int signo, int state)
6149 {
6150 int ret = signal_print[signo];
6151
6152 signal_print[signo] = state;
6153 signal_cache_update (signo);
6154 return ret;
6155 }
6156
6157 int
6158 signal_pass_update (int signo, int state)
6159 {
6160 int ret = signal_program[signo];
6161
6162 signal_program[signo] = state;
6163 signal_cache_update (signo);
6164 return ret;
6165 }
6166
6167 static void
6168 sig_print_header (void)
6169 {
6170 printf_filtered (_("Signal Stop\tPrint\tPass "
6171 "to program\tDescription\n"));
6172 }
6173
6174 static void
6175 sig_print_info (enum target_signal oursig)
6176 {
6177 const char *name = target_signal_to_name (oursig);
6178 int name_padding = 13 - strlen (name);
6179
6180 if (name_padding <= 0)
6181 name_padding = 0;
6182
6183 printf_filtered ("%s", name);
6184 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6185 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6186 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6187 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6188 printf_filtered ("%s\n", target_signal_to_string (oursig));
6189 }
6190
6191 /* Specify how various signals in the inferior should be handled. */
6192
6193 static void
6194 handle_command (char *args, int from_tty)
6195 {
6196 char **argv;
6197 int digits, wordlen;
6198 int sigfirst, signum, siglast;
6199 enum target_signal oursig;
6200 int allsigs;
6201 int nsigs;
6202 unsigned char *sigs;
6203 struct cleanup *old_chain;
6204
6205 if (args == NULL)
6206 {
6207 error_no_arg (_("signal to handle"));
6208 }
6209
6210 /* Allocate and zero an array of flags for which signals to handle. */
6211
6212 nsigs = (int) TARGET_SIGNAL_LAST;
6213 sigs = (unsigned char *) alloca (nsigs);
6214 memset (sigs, 0, nsigs);
6215
6216 /* Break the command line up into args. */
6217
6218 argv = gdb_buildargv (args);
6219 old_chain = make_cleanup_freeargv (argv);
6220
6221 /* Walk through the args, looking for signal oursigs, signal names, and
6222 actions. Signal numbers and signal names may be interspersed with
6223 actions, with the actions being performed for all signals cumulatively
6224 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6225
6226 while (*argv != NULL)
6227 {
6228 wordlen = strlen (*argv);
6229 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6230 {;
6231 }
6232 allsigs = 0;
6233 sigfirst = siglast = -1;
6234
6235 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6236 {
6237 /* Apply action to all signals except those used by the
6238 debugger. Silently skip those. */
6239 allsigs = 1;
6240 sigfirst = 0;
6241 siglast = nsigs - 1;
6242 }
6243 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6244 {
6245 SET_SIGS (nsigs, sigs, signal_stop);
6246 SET_SIGS (nsigs, sigs, signal_print);
6247 }
6248 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6249 {
6250 UNSET_SIGS (nsigs, sigs, signal_program);
6251 }
6252 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6253 {
6254 SET_SIGS (nsigs, sigs, signal_print);
6255 }
6256 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6257 {
6258 SET_SIGS (nsigs, sigs, signal_program);
6259 }
6260 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6261 {
6262 UNSET_SIGS (nsigs, sigs, signal_stop);
6263 }
6264 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6265 {
6266 SET_SIGS (nsigs, sigs, signal_program);
6267 }
6268 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6269 {
6270 UNSET_SIGS (nsigs, sigs, signal_print);
6271 UNSET_SIGS (nsigs, sigs, signal_stop);
6272 }
6273 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6274 {
6275 UNSET_SIGS (nsigs, sigs, signal_program);
6276 }
6277 else if (digits > 0)
6278 {
6279 /* It is numeric. The numeric signal refers to our own
6280 internal signal numbering from target.h, not to host/target
6281 signal number. This is a feature; users really should be
6282 using symbolic names anyway, and the common ones like
6283 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6284
6285 sigfirst = siglast = (int)
6286 target_signal_from_command (atoi (*argv));
6287 if ((*argv)[digits] == '-')
6288 {
6289 siglast = (int)
6290 target_signal_from_command (atoi ((*argv) + digits + 1));
6291 }
6292 if (sigfirst > siglast)
6293 {
6294 /* Bet he didn't figure we'd think of this case... */
6295 signum = sigfirst;
6296 sigfirst = siglast;
6297 siglast = signum;
6298 }
6299 }
6300 else
6301 {
6302 oursig = target_signal_from_name (*argv);
6303 if (oursig != TARGET_SIGNAL_UNKNOWN)
6304 {
6305 sigfirst = siglast = (int) oursig;
6306 }
6307 else
6308 {
6309 /* Not a number and not a recognized flag word => complain. */
6310 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6311 }
6312 }
6313
6314 /* If any signal numbers or symbol names were found, set flags for
6315 which signals to apply actions to. */
6316
6317 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6318 {
6319 switch ((enum target_signal) signum)
6320 {
6321 case TARGET_SIGNAL_TRAP:
6322 case TARGET_SIGNAL_INT:
6323 if (!allsigs && !sigs[signum])
6324 {
6325 if (query (_("%s is used by the debugger.\n\
6326 Are you sure you want to change it? "),
6327 target_signal_to_name ((enum target_signal) signum)))
6328 {
6329 sigs[signum] = 1;
6330 }
6331 else
6332 {
6333 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6334 gdb_flush (gdb_stdout);
6335 }
6336 }
6337 break;
6338 case TARGET_SIGNAL_0:
6339 case TARGET_SIGNAL_DEFAULT:
6340 case TARGET_SIGNAL_UNKNOWN:
6341 /* Make sure that "all" doesn't print these. */
6342 break;
6343 default:
6344 sigs[signum] = 1;
6345 break;
6346 }
6347 }
6348
6349 argv++;
6350 }
6351
6352 for (signum = 0; signum < nsigs; signum++)
6353 if (sigs[signum])
6354 {
6355 signal_cache_update (-1);
6356 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
6357
6358 if (from_tty)
6359 {
6360 /* Show the results. */
6361 sig_print_header ();
6362 for (; signum < nsigs; signum++)
6363 if (sigs[signum])
6364 sig_print_info (signum);
6365 }
6366
6367 break;
6368 }
6369
6370 do_cleanups (old_chain);
6371 }
6372
6373 static void
6374 xdb_handle_command (char *args, int from_tty)
6375 {
6376 char **argv;
6377 struct cleanup *old_chain;
6378
6379 if (args == NULL)
6380 error_no_arg (_("xdb command"));
6381
6382 /* Break the command line up into args. */
6383
6384 argv = gdb_buildargv (args);
6385 old_chain = make_cleanup_freeargv (argv);
6386 if (argv[1] != (char *) NULL)
6387 {
6388 char *argBuf;
6389 int bufLen;
6390
6391 bufLen = strlen (argv[0]) + 20;
6392 argBuf = (char *) xmalloc (bufLen);
6393 if (argBuf)
6394 {
6395 int validFlag = 1;
6396 enum target_signal oursig;
6397
6398 oursig = target_signal_from_name (argv[0]);
6399 memset (argBuf, 0, bufLen);
6400 if (strcmp (argv[1], "Q") == 0)
6401 sprintf (argBuf, "%s %s", argv[0], "noprint");
6402 else
6403 {
6404 if (strcmp (argv[1], "s") == 0)
6405 {
6406 if (!signal_stop[oursig])
6407 sprintf (argBuf, "%s %s", argv[0], "stop");
6408 else
6409 sprintf (argBuf, "%s %s", argv[0], "nostop");
6410 }
6411 else if (strcmp (argv[1], "i") == 0)
6412 {
6413 if (!signal_program[oursig])
6414 sprintf (argBuf, "%s %s", argv[0], "pass");
6415 else
6416 sprintf (argBuf, "%s %s", argv[0], "nopass");
6417 }
6418 else if (strcmp (argv[1], "r") == 0)
6419 {
6420 if (!signal_print[oursig])
6421 sprintf (argBuf, "%s %s", argv[0], "print");
6422 else
6423 sprintf (argBuf, "%s %s", argv[0], "noprint");
6424 }
6425 else
6426 validFlag = 0;
6427 }
6428 if (validFlag)
6429 handle_command (argBuf, from_tty);
6430 else
6431 printf_filtered (_("Invalid signal handling flag.\n"));
6432 if (argBuf)
6433 xfree (argBuf);
6434 }
6435 }
6436 do_cleanups (old_chain);
6437 }
6438
6439 /* Print current contents of the tables set by the handle command.
6440 It is possible we should just be printing signals actually used
6441 by the current target (but for things to work right when switching
6442 targets, all signals should be in the signal tables). */
6443
6444 static void
6445 signals_info (char *signum_exp, int from_tty)
6446 {
6447 enum target_signal oursig;
6448
6449 sig_print_header ();
6450
6451 if (signum_exp)
6452 {
6453 /* First see if this is a symbol name. */
6454 oursig = target_signal_from_name (signum_exp);
6455 if (oursig == TARGET_SIGNAL_UNKNOWN)
6456 {
6457 /* No, try numeric. */
6458 oursig =
6459 target_signal_from_command (parse_and_eval_long (signum_exp));
6460 }
6461 sig_print_info (oursig);
6462 return;
6463 }
6464
6465 printf_filtered ("\n");
6466 /* These ugly casts brought to you by the native VAX compiler. */
6467 for (oursig = TARGET_SIGNAL_FIRST;
6468 (int) oursig < (int) TARGET_SIGNAL_LAST;
6469 oursig = (enum target_signal) ((int) oursig + 1))
6470 {
6471 QUIT;
6472
6473 if (oursig != TARGET_SIGNAL_UNKNOWN
6474 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
6475 sig_print_info (oursig);
6476 }
6477
6478 printf_filtered (_("\nUse the \"handle\" command "
6479 "to change these tables.\n"));
6480 }
6481
6482 /* Check if it makes sense to read $_siginfo from the current thread
6483 at this point. If not, throw an error. */
6484
6485 static void
6486 validate_siginfo_access (void)
6487 {
6488 /* No current inferior, no siginfo. */
6489 if (ptid_equal (inferior_ptid, null_ptid))
6490 error (_("No thread selected."));
6491
6492 /* Don't try to read from a dead thread. */
6493 if (is_exited (inferior_ptid))
6494 error (_("The current thread has terminated"));
6495
6496 /* ... or from a spinning thread. */
6497 if (is_running (inferior_ptid))
6498 error (_("Selected thread is running."));
6499 }
6500
6501 /* The $_siginfo convenience variable is a bit special. We don't know
6502 for sure the type of the value until we actually have a chance to
6503 fetch the data. The type can change depending on gdbarch, so it is
6504 also dependent on which thread you have selected.
6505
6506 1. making $_siginfo be an internalvar that creates a new value on
6507 access.
6508
6509 2. making the value of $_siginfo be an lval_computed value. */
6510
6511 /* This function implements the lval_computed support for reading a
6512 $_siginfo value. */
6513
6514 static void
6515 siginfo_value_read (struct value *v)
6516 {
6517 LONGEST transferred;
6518
6519 validate_siginfo_access ();
6520
6521 transferred =
6522 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6523 NULL,
6524 value_contents_all_raw (v),
6525 value_offset (v),
6526 TYPE_LENGTH (value_type (v)));
6527
6528 if (transferred != TYPE_LENGTH (value_type (v)))
6529 error (_("Unable to read siginfo"));
6530 }
6531
6532 /* This function implements the lval_computed support for writing a
6533 $_siginfo value. */
6534
6535 static void
6536 siginfo_value_write (struct value *v, struct value *fromval)
6537 {
6538 LONGEST transferred;
6539
6540 validate_siginfo_access ();
6541
6542 transferred = target_write (&current_target,
6543 TARGET_OBJECT_SIGNAL_INFO,
6544 NULL,
6545 value_contents_all_raw (fromval),
6546 value_offset (v),
6547 TYPE_LENGTH (value_type (fromval)));
6548
6549 if (transferred != TYPE_LENGTH (value_type (fromval)))
6550 error (_("Unable to write siginfo"));
6551 }
6552
6553 static const struct lval_funcs siginfo_value_funcs =
6554 {
6555 siginfo_value_read,
6556 siginfo_value_write
6557 };
6558
6559 /* Return a new value with the correct type for the siginfo object of
6560 the current thread using architecture GDBARCH. Return a void value
6561 if there's no object available. */
6562
6563 static struct value *
6564 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
6565 {
6566 if (target_has_stack
6567 && !ptid_equal (inferior_ptid, null_ptid)
6568 && gdbarch_get_siginfo_type_p (gdbarch))
6569 {
6570 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6571
6572 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6573 }
6574
6575 return allocate_value (builtin_type (gdbarch)->builtin_void);
6576 }
6577
6578 \f
6579 /* infcall_suspend_state contains state about the program itself like its
6580 registers and any signal it received when it last stopped.
6581 This state must be restored regardless of how the inferior function call
6582 ends (either successfully, or after it hits a breakpoint or signal)
6583 if the program is to properly continue where it left off. */
6584
6585 struct infcall_suspend_state
6586 {
6587 struct thread_suspend_state thread_suspend;
6588 struct inferior_suspend_state inferior_suspend;
6589
6590 /* Other fields: */
6591 CORE_ADDR stop_pc;
6592 struct regcache *registers;
6593
6594 /* Format of SIGINFO_DATA or NULL if it is not present. */
6595 struct gdbarch *siginfo_gdbarch;
6596
6597 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6598 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6599 content would be invalid. */
6600 gdb_byte *siginfo_data;
6601 };
6602
6603 struct infcall_suspend_state *
6604 save_infcall_suspend_state (void)
6605 {
6606 struct infcall_suspend_state *inf_state;
6607 struct thread_info *tp = inferior_thread ();
6608 struct inferior *inf = current_inferior ();
6609 struct regcache *regcache = get_current_regcache ();
6610 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6611 gdb_byte *siginfo_data = NULL;
6612
6613 if (gdbarch_get_siginfo_type_p (gdbarch))
6614 {
6615 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6616 size_t len = TYPE_LENGTH (type);
6617 struct cleanup *back_to;
6618
6619 siginfo_data = xmalloc (len);
6620 back_to = make_cleanup (xfree, siginfo_data);
6621
6622 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6623 siginfo_data, 0, len) == len)
6624 discard_cleanups (back_to);
6625 else
6626 {
6627 /* Errors ignored. */
6628 do_cleanups (back_to);
6629 siginfo_data = NULL;
6630 }
6631 }
6632
6633 inf_state = XZALLOC (struct infcall_suspend_state);
6634
6635 if (siginfo_data)
6636 {
6637 inf_state->siginfo_gdbarch = gdbarch;
6638 inf_state->siginfo_data = siginfo_data;
6639 }
6640
6641 inf_state->thread_suspend = tp->suspend;
6642 inf_state->inferior_suspend = inf->suspend;
6643
6644 /* run_inferior_call will not use the signal due to its `proceed' call with
6645 TARGET_SIGNAL_0 anyway. */
6646 tp->suspend.stop_signal = TARGET_SIGNAL_0;
6647
6648 inf_state->stop_pc = stop_pc;
6649
6650 inf_state->registers = regcache_dup (regcache);
6651
6652 return inf_state;
6653 }
6654
6655 /* Restore inferior session state to INF_STATE. */
6656
6657 void
6658 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6659 {
6660 struct thread_info *tp = inferior_thread ();
6661 struct inferior *inf = current_inferior ();
6662 struct regcache *regcache = get_current_regcache ();
6663 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6664
6665 tp->suspend = inf_state->thread_suspend;
6666 inf->suspend = inf_state->inferior_suspend;
6667
6668 stop_pc = inf_state->stop_pc;
6669
6670 if (inf_state->siginfo_gdbarch == gdbarch)
6671 {
6672 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6673 size_t len = TYPE_LENGTH (type);
6674
6675 /* Errors ignored. */
6676 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6677 inf_state->siginfo_data, 0, len);
6678 }
6679
6680 /* The inferior can be gone if the user types "print exit(0)"
6681 (and perhaps other times). */
6682 if (target_has_execution)
6683 /* NB: The register write goes through to the target. */
6684 regcache_cpy (regcache, inf_state->registers);
6685
6686 discard_infcall_suspend_state (inf_state);
6687 }
6688
6689 static void
6690 do_restore_infcall_suspend_state_cleanup (void *state)
6691 {
6692 restore_infcall_suspend_state (state);
6693 }
6694
6695 struct cleanup *
6696 make_cleanup_restore_infcall_suspend_state
6697 (struct infcall_suspend_state *inf_state)
6698 {
6699 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6700 }
6701
6702 void
6703 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6704 {
6705 regcache_xfree (inf_state->registers);
6706 xfree (inf_state->siginfo_data);
6707 xfree (inf_state);
6708 }
6709
6710 struct regcache *
6711 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6712 {
6713 return inf_state->registers;
6714 }
6715
6716 /* infcall_control_state contains state regarding gdb's control of the
6717 inferior itself like stepping control. It also contains session state like
6718 the user's currently selected frame. */
6719
6720 struct infcall_control_state
6721 {
6722 struct thread_control_state thread_control;
6723 struct inferior_control_state inferior_control;
6724
6725 /* Other fields: */
6726 enum stop_stack_kind stop_stack_dummy;
6727 int stopped_by_random_signal;
6728 int stop_after_trap;
6729
6730 /* ID if the selected frame when the inferior function call was made. */
6731 struct frame_id selected_frame_id;
6732 };
6733
6734 /* Save all of the information associated with the inferior<==>gdb
6735 connection. */
6736
6737 struct infcall_control_state *
6738 save_infcall_control_state (void)
6739 {
6740 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
6741 struct thread_info *tp = inferior_thread ();
6742 struct inferior *inf = current_inferior ();
6743
6744 inf_status->thread_control = tp->control;
6745 inf_status->inferior_control = inf->control;
6746
6747 tp->control.step_resume_breakpoint = NULL;
6748 tp->control.exception_resume_breakpoint = NULL;
6749
6750 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
6751 chain. If caller's caller is walking the chain, they'll be happier if we
6752 hand them back the original chain when restore_infcall_control_state is
6753 called. */
6754 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
6755
6756 /* Other fields: */
6757 inf_status->stop_stack_dummy = stop_stack_dummy;
6758 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6759 inf_status->stop_after_trap = stop_after_trap;
6760
6761 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6762
6763 return inf_status;
6764 }
6765
6766 static int
6767 restore_selected_frame (void *args)
6768 {
6769 struct frame_id *fid = (struct frame_id *) args;
6770 struct frame_info *frame;
6771
6772 frame = frame_find_by_id (*fid);
6773
6774 /* If inf_status->selected_frame_id is NULL, there was no previously
6775 selected frame. */
6776 if (frame == NULL)
6777 {
6778 warning (_("Unable to restore previously selected frame."));
6779 return 0;
6780 }
6781
6782 select_frame (frame);
6783
6784 return (1);
6785 }
6786
6787 /* Restore inferior session state to INF_STATUS. */
6788
6789 void
6790 restore_infcall_control_state (struct infcall_control_state *inf_status)
6791 {
6792 struct thread_info *tp = inferior_thread ();
6793 struct inferior *inf = current_inferior ();
6794
6795 if (tp->control.step_resume_breakpoint)
6796 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
6797
6798 if (tp->control.exception_resume_breakpoint)
6799 tp->control.exception_resume_breakpoint->disposition
6800 = disp_del_at_next_stop;
6801
6802 /* Handle the bpstat_copy of the chain. */
6803 bpstat_clear (&tp->control.stop_bpstat);
6804
6805 tp->control = inf_status->thread_control;
6806 inf->control = inf_status->inferior_control;
6807
6808 /* Other fields: */
6809 stop_stack_dummy = inf_status->stop_stack_dummy;
6810 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6811 stop_after_trap = inf_status->stop_after_trap;
6812
6813 if (target_has_stack)
6814 {
6815 /* The point of catch_errors is that if the stack is clobbered,
6816 walking the stack might encounter a garbage pointer and
6817 error() trying to dereference it. */
6818 if (catch_errors
6819 (restore_selected_frame, &inf_status->selected_frame_id,
6820 "Unable to restore previously selected frame:\n",
6821 RETURN_MASK_ERROR) == 0)
6822 /* Error in restoring the selected frame. Select the innermost
6823 frame. */
6824 select_frame (get_current_frame ());
6825 }
6826
6827 xfree (inf_status);
6828 }
6829
6830 static void
6831 do_restore_infcall_control_state_cleanup (void *sts)
6832 {
6833 restore_infcall_control_state (sts);
6834 }
6835
6836 struct cleanup *
6837 make_cleanup_restore_infcall_control_state
6838 (struct infcall_control_state *inf_status)
6839 {
6840 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
6841 }
6842
6843 void
6844 discard_infcall_control_state (struct infcall_control_state *inf_status)
6845 {
6846 if (inf_status->thread_control.step_resume_breakpoint)
6847 inf_status->thread_control.step_resume_breakpoint->disposition
6848 = disp_del_at_next_stop;
6849
6850 if (inf_status->thread_control.exception_resume_breakpoint)
6851 inf_status->thread_control.exception_resume_breakpoint->disposition
6852 = disp_del_at_next_stop;
6853
6854 /* See save_infcall_control_state for info on stop_bpstat. */
6855 bpstat_clear (&inf_status->thread_control.stop_bpstat);
6856
6857 xfree (inf_status);
6858 }
6859 \f
6860 int
6861 ptid_match (ptid_t ptid, ptid_t filter)
6862 {
6863 if (ptid_equal (filter, minus_one_ptid))
6864 return 1;
6865 if (ptid_is_pid (filter)
6866 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6867 return 1;
6868 else if (ptid_equal (ptid, filter))
6869 return 1;
6870
6871 return 0;
6872 }
6873
6874 /* restore_inferior_ptid() will be used by the cleanup machinery
6875 to restore the inferior_ptid value saved in a call to
6876 save_inferior_ptid(). */
6877
6878 static void
6879 restore_inferior_ptid (void *arg)
6880 {
6881 ptid_t *saved_ptid_ptr = arg;
6882
6883 inferior_ptid = *saved_ptid_ptr;
6884 xfree (arg);
6885 }
6886
6887 /* Save the value of inferior_ptid so that it may be restored by a
6888 later call to do_cleanups(). Returns the struct cleanup pointer
6889 needed for later doing the cleanup. */
6890
6891 struct cleanup *
6892 save_inferior_ptid (void)
6893 {
6894 ptid_t *saved_ptid_ptr;
6895
6896 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6897 *saved_ptid_ptr = inferior_ptid;
6898 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6899 }
6900 \f
6901
6902 /* User interface for reverse debugging:
6903 Set exec-direction / show exec-direction commands
6904 (returns error unless target implements to_set_exec_direction method). */
6905
6906 int execution_direction = EXEC_FORWARD;
6907 static const char exec_forward[] = "forward";
6908 static const char exec_reverse[] = "reverse";
6909 static const char *exec_direction = exec_forward;
6910 static const char *const exec_direction_names[] = {
6911 exec_forward,
6912 exec_reverse,
6913 NULL
6914 };
6915
6916 static void
6917 set_exec_direction_func (char *args, int from_tty,
6918 struct cmd_list_element *cmd)
6919 {
6920 if (target_can_execute_reverse)
6921 {
6922 if (!strcmp (exec_direction, exec_forward))
6923 execution_direction = EXEC_FORWARD;
6924 else if (!strcmp (exec_direction, exec_reverse))
6925 execution_direction = EXEC_REVERSE;
6926 }
6927 else
6928 {
6929 exec_direction = exec_forward;
6930 error (_("Target does not support this operation."));
6931 }
6932 }
6933
6934 static void
6935 show_exec_direction_func (struct ui_file *out, int from_tty,
6936 struct cmd_list_element *cmd, const char *value)
6937 {
6938 switch (execution_direction) {
6939 case EXEC_FORWARD:
6940 fprintf_filtered (out, _("Forward.\n"));
6941 break;
6942 case EXEC_REVERSE:
6943 fprintf_filtered (out, _("Reverse.\n"));
6944 break;
6945 default:
6946 internal_error (__FILE__, __LINE__,
6947 _("bogus execution_direction value: %d"),
6948 (int) execution_direction);
6949 }
6950 }
6951
6952 /* User interface for non-stop mode. */
6953
6954 int non_stop = 0;
6955
6956 static void
6957 set_non_stop (char *args, int from_tty,
6958 struct cmd_list_element *c)
6959 {
6960 if (target_has_execution)
6961 {
6962 non_stop_1 = non_stop;
6963 error (_("Cannot change this setting while the inferior is running."));
6964 }
6965
6966 non_stop = non_stop_1;
6967 }
6968
6969 static void
6970 show_non_stop (struct ui_file *file, int from_tty,
6971 struct cmd_list_element *c, const char *value)
6972 {
6973 fprintf_filtered (file,
6974 _("Controlling the inferior in non-stop mode is %s.\n"),
6975 value);
6976 }
6977
6978 static void
6979 show_schedule_multiple (struct ui_file *file, int from_tty,
6980 struct cmd_list_element *c, const char *value)
6981 {
6982 fprintf_filtered (file, _("Resuming the execution of threads "
6983 "of all processes is %s.\n"), value);
6984 }
6985
6986 void
6987 _initialize_infrun (void)
6988 {
6989 int i;
6990 int numsigs;
6991
6992 add_info ("signals", signals_info, _("\
6993 What debugger does when program gets various signals.\n\
6994 Specify a signal as argument to print info on that signal only."));
6995 add_info_alias ("handle", "signals", 0);
6996
6997 add_com ("handle", class_run, handle_command, _("\
6998 Specify how to handle a signal.\n\
6999 Args are signals and actions to apply to those signals.\n\
7000 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7001 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7002 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7003 The special arg \"all\" is recognized to mean all signals except those\n\
7004 used by the debugger, typically SIGTRAP and SIGINT.\n\
7005 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7006 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7007 Stop means reenter debugger if this signal happens (implies print).\n\
7008 Print means print a message if this signal happens.\n\
7009 Pass means let program see this signal; otherwise program doesn't know.\n\
7010 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7011 Pass and Stop may be combined."));
7012 if (xdb_commands)
7013 {
7014 add_com ("lz", class_info, signals_info, _("\
7015 What debugger does when program gets various signals.\n\
7016 Specify a signal as argument to print info on that signal only."));
7017 add_com ("z", class_run, xdb_handle_command, _("\
7018 Specify how to handle a signal.\n\
7019 Args are signals and actions to apply to those signals.\n\
7020 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7021 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7022 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7023 The special arg \"all\" is recognized to mean all signals except those\n\
7024 used by the debugger, typically SIGTRAP and SIGINT.\n\
7025 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7026 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7027 nopass), \"Q\" (noprint)\n\
7028 Stop means reenter debugger if this signal happens (implies print).\n\
7029 Print means print a message if this signal happens.\n\
7030 Pass means let program see this signal; otherwise program doesn't know.\n\
7031 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7032 Pass and Stop may be combined."));
7033 }
7034
7035 if (!dbx_commands)
7036 stop_command = add_cmd ("stop", class_obscure,
7037 not_just_help_class_command, _("\
7038 There is no `stop' command, but you can set a hook on `stop'.\n\
7039 This allows you to set a list of commands to be run each time execution\n\
7040 of the program stops."), &cmdlist);
7041
7042 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7043 Set inferior debugging."), _("\
7044 Show inferior debugging."), _("\
7045 When non-zero, inferior specific debugging is enabled."),
7046 NULL,
7047 show_debug_infrun,
7048 &setdebuglist, &showdebuglist);
7049
7050 add_setshow_boolean_cmd ("displaced", class_maintenance,
7051 &debug_displaced, _("\
7052 Set displaced stepping debugging."), _("\
7053 Show displaced stepping debugging."), _("\
7054 When non-zero, displaced stepping specific debugging is enabled."),
7055 NULL,
7056 show_debug_displaced,
7057 &setdebuglist, &showdebuglist);
7058
7059 add_setshow_boolean_cmd ("non-stop", no_class,
7060 &non_stop_1, _("\
7061 Set whether gdb controls the inferior in non-stop mode."), _("\
7062 Show whether gdb controls the inferior in non-stop mode."), _("\
7063 When debugging a multi-threaded program and this setting is\n\
7064 off (the default, also called all-stop mode), when one thread stops\n\
7065 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7066 all other threads in the program while you interact with the thread of\n\
7067 interest. When you continue or step a thread, you can allow the other\n\
7068 threads to run, or have them remain stopped, but while you inspect any\n\
7069 thread's state, all threads stop.\n\
7070 \n\
7071 In non-stop mode, when one thread stops, other threads can continue\n\
7072 to run freely. You'll be able to step each thread independently,\n\
7073 leave it stopped or free to run as needed."),
7074 set_non_stop,
7075 show_non_stop,
7076 &setlist,
7077 &showlist);
7078
7079 numsigs = (int) TARGET_SIGNAL_LAST;
7080 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7081 signal_print = (unsigned char *)
7082 xmalloc (sizeof (signal_print[0]) * numsigs);
7083 signal_program = (unsigned char *)
7084 xmalloc (sizeof (signal_program[0]) * numsigs);
7085 signal_pass = (unsigned char *)
7086 xmalloc (sizeof (signal_program[0]) * numsigs);
7087 for (i = 0; i < numsigs; i++)
7088 {
7089 signal_stop[i] = 1;
7090 signal_print[i] = 1;
7091 signal_program[i] = 1;
7092 }
7093
7094 /* Signals caused by debugger's own actions
7095 should not be given to the program afterwards. */
7096 signal_program[TARGET_SIGNAL_TRAP] = 0;
7097 signal_program[TARGET_SIGNAL_INT] = 0;
7098
7099 /* Signals that are not errors should not normally enter the debugger. */
7100 signal_stop[TARGET_SIGNAL_ALRM] = 0;
7101 signal_print[TARGET_SIGNAL_ALRM] = 0;
7102 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
7103 signal_print[TARGET_SIGNAL_VTALRM] = 0;
7104 signal_stop[TARGET_SIGNAL_PROF] = 0;
7105 signal_print[TARGET_SIGNAL_PROF] = 0;
7106 signal_stop[TARGET_SIGNAL_CHLD] = 0;
7107 signal_print[TARGET_SIGNAL_CHLD] = 0;
7108 signal_stop[TARGET_SIGNAL_IO] = 0;
7109 signal_print[TARGET_SIGNAL_IO] = 0;
7110 signal_stop[TARGET_SIGNAL_POLL] = 0;
7111 signal_print[TARGET_SIGNAL_POLL] = 0;
7112 signal_stop[TARGET_SIGNAL_URG] = 0;
7113 signal_print[TARGET_SIGNAL_URG] = 0;
7114 signal_stop[TARGET_SIGNAL_WINCH] = 0;
7115 signal_print[TARGET_SIGNAL_WINCH] = 0;
7116 signal_stop[TARGET_SIGNAL_PRIO] = 0;
7117 signal_print[TARGET_SIGNAL_PRIO] = 0;
7118
7119 /* These signals are used internally by user-level thread
7120 implementations. (See signal(5) on Solaris.) Like the above
7121 signals, a healthy program receives and handles them as part of
7122 its normal operation. */
7123 signal_stop[TARGET_SIGNAL_LWP] = 0;
7124 signal_print[TARGET_SIGNAL_LWP] = 0;
7125 signal_stop[TARGET_SIGNAL_WAITING] = 0;
7126 signal_print[TARGET_SIGNAL_WAITING] = 0;
7127 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
7128 signal_print[TARGET_SIGNAL_CANCEL] = 0;
7129
7130 /* Update cached state. */
7131 signal_cache_update (-1);
7132
7133 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7134 &stop_on_solib_events, _("\
7135 Set stopping for shared library events."), _("\
7136 Show stopping for shared library events."), _("\
7137 If nonzero, gdb will give control to the user when the dynamic linker\n\
7138 notifies gdb of shared library events. The most common event of interest\n\
7139 to the user would be loading/unloading of a new library."),
7140 NULL,
7141 show_stop_on_solib_events,
7142 &setlist, &showlist);
7143
7144 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7145 follow_fork_mode_kind_names,
7146 &follow_fork_mode_string, _("\
7147 Set debugger response to a program call of fork or vfork."), _("\
7148 Show debugger response to a program call of fork or vfork."), _("\
7149 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7150 parent - the original process is debugged after a fork\n\
7151 child - the new process is debugged after a fork\n\
7152 The unfollowed process will continue to run.\n\
7153 By default, the debugger will follow the parent process."),
7154 NULL,
7155 show_follow_fork_mode_string,
7156 &setlist, &showlist);
7157
7158 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7159 follow_exec_mode_names,
7160 &follow_exec_mode_string, _("\
7161 Set debugger response to a program call of exec."), _("\
7162 Show debugger response to a program call of exec."), _("\
7163 An exec call replaces the program image of a process.\n\
7164 \n\
7165 follow-exec-mode can be:\n\
7166 \n\
7167 new - the debugger creates a new inferior and rebinds the process\n\
7168 to this new inferior. The program the process was running before\n\
7169 the exec call can be restarted afterwards by restarting the original\n\
7170 inferior.\n\
7171 \n\
7172 same - the debugger keeps the process bound to the same inferior.\n\
7173 The new executable image replaces the previous executable loaded in\n\
7174 the inferior. Restarting the inferior after the exec call restarts\n\
7175 the executable the process was running after the exec call.\n\
7176 \n\
7177 By default, the debugger will use the same inferior."),
7178 NULL,
7179 show_follow_exec_mode_string,
7180 &setlist, &showlist);
7181
7182 add_setshow_enum_cmd ("scheduler-locking", class_run,
7183 scheduler_enums, &scheduler_mode, _("\
7184 Set mode for locking scheduler during execution."), _("\
7185 Show mode for locking scheduler during execution."), _("\
7186 off == no locking (threads may preempt at any time)\n\
7187 on == full locking (no thread except the current thread may run)\n\
7188 step == scheduler locked during every single-step operation.\n\
7189 In this mode, no other thread may run during a step command.\n\
7190 Other threads may run while stepping over a function call ('next')."),
7191 set_schedlock_func, /* traps on target vector */
7192 show_scheduler_mode,
7193 &setlist, &showlist);
7194
7195 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7196 Set mode for resuming threads of all processes."), _("\
7197 Show mode for resuming threads of all processes."), _("\
7198 When on, execution commands (such as 'continue' or 'next') resume all\n\
7199 threads of all processes. When off (which is the default), execution\n\
7200 commands only resume the threads of the current process. The set of\n\
7201 threads that are resumed is further refined by the scheduler-locking\n\
7202 mode (see help set scheduler-locking)."),
7203 NULL,
7204 show_schedule_multiple,
7205 &setlist, &showlist);
7206
7207 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7208 Set mode of the step operation."), _("\
7209 Show mode of the step operation."), _("\
7210 When set, doing a step over a function without debug line information\n\
7211 will stop at the first instruction of that function. Otherwise, the\n\
7212 function is skipped and the step command stops at a different source line."),
7213 NULL,
7214 show_step_stop_if_no_debug,
7215 &setlist, &showlist);
7216
7217 add_setshow_enum_cmd ("displaced-stepping", class_run,
7218 can_use_displaced_stepping_enum,
7219 &can_use_displaced_stepping, _("\
7220 Set debugger's willingness to use displaced stepping."), _("\
7221 Show debugger's willingness to use displaced stepping."), _("\
7222 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7223 supported by the target architecture. If off, gdb will not use displaced\n\
7224 stepping to step over breakpoints, even if such is supported by the target\n\
7225 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7226 if the target architecture supports it and non-stop mode is active, but will not\n\
7227 use it in all-stop mode (see help set non-stop)."),
7228 NULL,
7229 show_can_use_displaced_stepping,
7230 &setlist, &showlist);
7231
7232 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7233 &exec_direction, _("Set direction of execution.\n\
7234 Options are 'forward' or 'reverse'."),
7235 _("Show direction of execution (forward/reverse)."),
7236 _("Tells gdb whether to execute forward or backward."),
7237 set_exec_direction_func, show_exec_direction_func,
7238 &setlist, &showlist);
7239
7240 /* Set/show detach-on-fork: user-settable mode. */
7241
7242 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7243 Set whether gdb will detach the child of a fork."), _("\
7244 Show whether gdb will detach the child of a fork."), _("\
7245 Tells gdb whether to detach the child of a fork."),
7246 NULL, NULL, &setlist, &showlist);
7247
7248 /* Set/show disable address space randomization mode. */
7249
7250 add_setshow_boolean_cmd ("disable-randomization", class_support,
7251 &disable_randomization, _("\
7252 Set disabling of debuggee's virtual address space randomization."), _("\
7253 Show disabling of debuggee's virtual address space randomization."), _("\
7254 When this mode is on (which is the default), randomization of the virtual\n\
7255 address space is disabled. Standalone programs run with the randomization\n\
7256 enabled by default on some platforms."),
7257 &set_disable_randomization,
7258 &show_disable_randomization,
7259 &setlist, &showlist);
7260
7261 /* ptid initializations */
7262 inferior_ptid = null_ptid;
7263 target_last_wait_ptid = minus_one_ptid;
7264
7265 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7266 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7267 observer_attach_thread_exit (infrun_thread_thread_exit);
7268 observer_attach_inferior_exit (infrun_inferior_exit);
7269
7270 /* Explicitly create without lookup, since that tries to create a
7271 value with a void typed value, and when we get here, gdbarch
7272 isn't initialized yet. At this point, we're quite sure there
7273 isn't another convenience variable of the same name. */
7274 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
7275
7276 add_setshow_boolean_cmd ("observer", no_class,
7277 &observer_mode_1, _("\
7278 Set whether gdb controls the inferior in observer mode."), _("\
7279 Show whether gdb controls the inferior in observer mode."), _("\
7280 In observer mode, GDB can get data from the inferior, but not\n\
7281 affect its execution. Registers and memory may not be changed,\n\
7282 breakpoints may not be set, and the program cannot be interrupted\n\
7283 or signalled."),
7284 set_observer_mode,
7285 show_observer_mode,
7286 &setlist,
7287 &showlist);
7288 }
This page took 0.315646 seconds and 4 git commands to generate.