2013-06-04 Gary Benson <gbenson@redhat.com>
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2013 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "gdb_string.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "exceptions.h"
28 #include "breakpoint.h"
29 #include "gdb_wait.h"
30 #include "gdbcore.h"
31 #include "gdbcmd.h"
32 #include "cli/cli-script.h"
33 #include "target.h"
34 #include "gdbthread.h"
35 #include "annotate.h"
36 #include "symfile.h"
37 #include "top.h"
38 #include <signal.h>
39 #include "inf-loop.h"
40 #include "regcache.h"
41 #include "value.h"
42 #include "observer.h"
43 #include "language.h"
44 #include "solib.h"
45 #include "main.h"
46 #include "dictionary.h"
47 #include "block.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "record-full.h"
53 #include "inline-frame.h"
54 #include "jit.h"
55 #include "tracepoint.h"
56 #include "continuations.h"
57 #include "interps.h"
58 #include "skip.h"
59 #include "probe.h"
60 #include "objfiles.h"
61 #include "completer.h"
62 #include "target-descriptions.h"
63
64 /* Prototypes for local functions */
65
66 static void signals_info (char *, int);
67
68 static void handle_command (char *, int);
69
70 static void sig_print_info (enum gdb_signal);
71
72 static void sig_print_header (void);
73
74 static void resume_cleanups (void *);
75
76 static int hook_stop_stub (void *);
77
78 static int restore_selected_frame (void *);
79
80 static int follow_fork (void);
81
82 static void set_schedlock_func (char *args, int from_tty,
83 struct cmd_list_element *c);
84
85 static int currently_stepping (struct thread_info *tp);
86
87 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
88 void *data);
89
90 static void xdb_handle_command (char *args, int from_tty);
91
92 static int prepare_to_proceed (int);
93
94 static void print_exited_reason (int exitstatus);
95
96 static void print_signal_exited_reason (enum gdb_signal siggnal);
97
98 static void print_no_history_reason (void);
99
100 static void print_signal_received_reason (enum gdb_signal siggnal);
101
102 static void print_end_stepping_range_reason (void);
103
104 void _initialize_infrun (void);
105
106 void nullify_last_target_wait_ptid (void);
107
108 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
109
110 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
111
112 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
113
114 /* When set, stop the 'step' command if we enter a function which has
115 no line number information. The normal behavior is that we step
116 over such function. */
117 int step_stop_if_no_debug = 0;
118 static void
119 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
120 struct cmd_list_element *c, const char *value)
121 {
122 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
123 }
124
125 /* In asynchronous mode, but simulating synchronous execution. */
126
127 int sync_execution = 0;
128
129 /* wait_for_inferior and normal_stop use this to notify the user
130 when the inferior stopped in a different thread than it had been
131 running in. */
132
133 static ptid_t previous_inferior_ptid;
134
135 /* Default behavior is to detach newly forked processes (legacy). */
136 int detach_fork = 1;
137
138 int debug_displaced = 0;
139 static void
140 show_debug_displaced (struct ui_file *file, int from_tty,
141 struct cmd_list_element *c, const char *value)
142 {
143 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
144 }
145
146 unsigned int debug_infrun = 0;
147 static void
148 show_debug_infrun (struct ui_file *file, int from_tty,
149 struct cmd_list_element *c, const char *value)
150 {
151 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
152 }
153
154
155 /* Support for disabling address space randomization. */
156
157 int disable_randomization = 1;
158
159 static void
160 show_disable_randomization (struct ui_file *file, int from_tty,
161 struct cmd_list_element *c, const char *value)
162 {
163 if (target_supports_disable_randomization ())
164 fprintf_filtered (file,
165 _("Disabling randomization of debuggee's "
166 "virtual address space is %s.\n"),
167 value);
168 else
169 fputs_filtered (_("Disabling randomization of debuggee's "
170 "virtual address space is unsupported on\n"
171 "this platform.\n"), file);
172 }
173
174 static void
175 set_disable_randomization (char *args, int from_tty,
176 struct cmd_list_element *c)
177 {
178 if (!target_supports_disable_randomization ())
179 error (_("Disabling randomization of debuggee's "
180 "virtual address space is unsupported on\n"
181 "this platform."));
182 }
183
184
185 /* If the program uses ELF-style shared libraries, then calls to
186 functions in shared libraries go through stubs, which live in a
187 table called the PLT (Procedure Linkage Table). The first time the
188 function is called, the stub sends control to the dynamic linker,
189 which looks up the function's real address, patches the stub so
190 that future calls will go directly to the function, and then passes
191 control to the function.
192
193 If we are stepping at the source level, we don't want to see any of
194 this --- we just want to skip over the stub and the dynamic linker.
195 The simple approach is to single-step until control leaves the
196 dynamic linker.
197
198 However, on some systems (e.g., Red Hat's 5.2 distribution) the
199 dynamic linker calls functions in the shared C library, so you
200 can't tell from the PC alone whether the dynamic linker is still
201 running. In this case, we use a step-resume breakpoint to get us
202 past the dynamic linker, as if we were using "next" to step over a
203 function call.
204
205 in_solib_dynsym_resolve_code() says whether we're in the dynamic
206 linker code or not. Normally, this means we single-step. However,
207 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
208 address where we can place a step-resume breakpoint to get past the
209 linker's symbol resolution function.
210
211 in_solib_dynsym_resolve_code() can generally be implemented in a
212 pretty portable way, by comparing the PC against the address ranges
213 of the dynamic linker's sections.
214
215 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
216 it depends on internal details of the dynamic linker. It's usually
217 not too hard to figure out where to put a breakpoint, but it
218 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
219 sanity checking. If it can't figure things out, returning zero and
220 getting the (possibly confusing) stepping behavior is better than
221 signalling an error, which will obscure the change in the
222 inferior's state. */
223
224 /* This function returns TRUE if pc is the address of an instruction
225 that lies within the dynamic linker (such as the event hook, or the
226 dld itself).
227
228 This function must be used only when a dynamic linker event has
229 been caught, and the inferior is being stepped out of the hook, or
230 undefined results are guaranteed. */
231
232 #ifndef SOLIB_IN_DYNAMIC_LINKER
233 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
234 #endif
235
236 /* "Observer mode" is somewhat like a more extreme version of
237 non-stop, in which all GDB operations that might affect the
238 target's execution have been disabled. */
239
240 static int non_stop_1 = 0;
241
242 int observer_mode = 0;
243 static int observer_mode_1 = 0;
244
245 static void
246 set_observer_mode (char *args, int from_tty,
247 struct cmd_list_element *c)
248 {
249 extern int pagination_enabled;
250
251 if (target_has_execution)
252 {
253 observer_mode_1 = observer_mode;
254 error (_("Cannot change this setting while the inferior is running."));
255 }
256
257 observer_mode = observer_mode_1;
258
259 may_write_registers = !observer_mode;
260 may_write_memory = !observer_mode;
261 may_insert_breakpoints = !observer_mode;
262 may_insert_tracepoints = !observer_mode;
263 /* We can insert fast tracepoints in or out of observer mode,
264 but enable them if we're going into this mode. */
265 if (observer_mode)
266 may_insert_fast_tracepoints = 1;
267 may_stop = !observer_mode;
268 update_target_permissions ();
269
270 /* Going *into* observer mode we must force non-stop, then
271 going out we leave it that way. */
272 if (observer_mode)
273 {
274 target_async_permitted = 1;
275 pagination_enabled = 0;
276 non_stop = non_stop_1 = 1;
277 }
278
279 if (from_tty)
280 printf_filtered (_("Observer mode is now %s.\n"),
281 (observer_mode ? "on" : "off"));
282 }
283
284 static void
285 show_observer_mode (struct ui_file *file, int from_tty,
286 struct cmd_list_element *c, const char *value)
287 {
288 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
289 }
290
291 /* This updates the value of observer mode based on changes in
292 permissions. Note that we are deliberately ignoring the values of
293 may-write-registers and may-write-memory, since the user may have
294 reason to enable these during a session, for instance to turn on a
295 debugging-related global. */
296
297 void
298 update_observer_mode (void)
299 {
300 int newval;
301
302 newval = (!may_insert_breakpoints
303 && !may_insert_tracepoints
304 && may_insert_fast_tracepoints
305 && !may_stop
306 && non_stop);
307
308 /* Let the user know if things change. */
309 if (newval != observer_mode)
310 printf_filtered (_("Observer mode is now %s.\n"),
311 (newval ? "on" : "off"));
312
313 observer_mode = observer_mode_1 = newval;
314 }
315
316 /* Tables of how to react to signals; the user sets them. */
317
318 static unsigned char *signal_stop;
319 static unsigned char *signal_print;
320 static unsigned char *signal_program;
321
322 /* Table of signals that are registered with "catch signal". A
323 non-zero entry indicates that the signal is caught by some "catch
324 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
325 signals. */
326 static unsigned char *signal_catch;
327
328 /* Table of signals that the target may silently handle.
329 This is automatically determined from the flags above,
330 and simply cached here. */
331 static unsigned char *signal_pass;
332
333 #define SET_SIGS(nsigs,sigs,flags) \
334 do { \
335 int signum = (nsigs); \
336 while (signum-- > 0) \
337 if ((sigs)[signum]) \
338 (flags)[signum] = 1; \
339 } while (0)
340
341 #define UNSET_SIGS(nsigs,sigs,flags) \
342 do { \
343 int signum = (nsigs); \
344 while (signum-- > 0) \
345 if ((sigs)[signum]) \
346 (flags)[signum] = 0; \
347 } while (0)
348
349 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
350 this function is to avoid exporting `signal_program'. */
351
352 void
353 update_signals_program_target (void)
354 {
355 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
356 }
357
358 /* Value to pass to target_resume() to cause all threads to resume. */
359
360 #define RESUME_ALL minus_one_ptid
361
362 /* Command list pointer for the "stop" placeholder. */
363
364 static struct cmd_list_element *stop_command;
365
366 /* Function inferior was in as of last step command. */
367
368 static struct symbol *step_start_function;
369
370 /* Nonzero if we want to give control to the user when we're notified
371 of shared library events by the dynamic linker. */
372 int stop_on_solib_events;
373
374 /* Enable or disable optional shared library event breakpoints
375 as appropriate when the above flag is changed. */
376
377 static void
378 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
379 {
380 update_solib_breakpoints ();
381 }
382
383 static void
384 show_stop_on_solib_events (struct ui_file *file, int from_tty,
385 struct cmd_list_element *c, const char *value)
386 {
387 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
388 value);
389 }
390
391 /* Nonzero means expecting a trace trap
392 and should stop the inferior and return silently when it happens. */
393
394 int stop_after_trap;
395
396 /* Save register contents here when executing a "finish" command or are
397 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
398 Thus this contains the return value from the called function (assuming
399 values are returned in a register). */
400
401 struct regcache *stop_registers;
402
403 /* Nonzero after stop if current stack frame should be printed. */
404
405 static int stop_print_frame;
406
407 /* This is a cached copy of the pid/waitstatus of the last event
408 returned by target_wait()/deprecated_target_wait_hook(). This
409 information is returned by get_last_target_status(). */
410 static ptid_t target_last_wait_ptid;
411 static struct target_waitstatus target_last_waitstatus;
412
413 static void context_switch (ptid_t ptid);
414
415 void init_thread_stepping_state (struct thread_info *tss);
416
417 static void init_infwait_state (void);
418
419 static const char follow_fork_mode_child[] = "child";
420 static const char follow_fork_mode_parent[] = "parent";
421
422 static const char *const follow_fork_mode_kind_names[] = {
423 follow_fork_mode_child,
424 follow_fork_mode_parent,
425 NULL
426 };
427
428 static const char *follow_fork_mode_string = follow_fork_mode_parent;
429 static void
430 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
431 struct cmd_list_element *c, const char *value)
432 {
433 fprintf_filtered (file,
434 _("Debugger response to a program "
435 "call of fork or vfork is \"%s\".\n"),
436 value);
437 }
438 \f
439
440 /* Tell the target to follow the fork we're stopped at. Returns true
441 if the inferior should be resumed; false, if the target for some
442 reason decided it's best not to resume. */
443
444 static int
445 follow_fork (void)
446 {
447 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
448 int should_resume = 1;
449 struct thread_info *tp;
450
451 /* Copy user stepping state to the new inferior thread. FIXME: the
452 followed fork child thread should have a copy of most of the
453 parent thread structure's run control related fields, not just these.
454 Initialized to avoid "may be used uninitialized" warnings from gcc. */
455 struct breakpoint *step_resume_breakpoint = NULL;
456 struct breakpoint *exception_resume_breakpoint = NULL;
457 CORE_ADDR step_range_start = 0;
458 CORE_ADDR step_range_end = 0;
459 struct frame_id step_frame_id = { 0 };
460
461 if (!non_stop)
462 {
463 ptid_t wait_ptid;
464 struct target_waitstatus wait_status;
465
466 /* Get the last target status returned by target_wait(). */
467 get_last_target_status (&wait_ptid, &wait_status);
468
469 /* If not stopped at a fork event, then there's nothing else to
470 do. */
471 if (wait_status.kind != TARGET_WAITKIND_FORKED
472 && wait_status.kind != TARGET_WAITKIND_VFORKED)
473 return 1;
474
475 /* Check if we switched over from WAIT_PTID, since the event was
476 reported. */
477 if (!ptid_equal (wait_ptid, minus_one_ptid)
478 && !ptid_equal (inferior_ptid, wait_ptid))
479 {
480 /* We did. Switch back to WAIT_PTID thread, to tell the
481 target to follow it (in either direction). We'll
482 afterwards refuse to resume, and inform the user what
483 happened. */
484 switch_to_thread (wait_ptid);
485 should_resume = 0;
486 }
487 }
488
489 tp = inferior_thread ();
490
491 /* If there were any forks/vforks that were caught and are now to be
492 followed, then do so now. */
493 switch (tp->pending_follow.kind)
494 {
495 case TARGET_WAITKIND_FORKED:
496 case TARGET_WAITKIND_VFORKED:
497 {
498 ptid_t parent, child;
499
500 /* If the user did a next/step, etc, over a fork call,
501 preserve the stepping state in the fork child. */
502 if (follow_child && should_resume)
503 {
504 step_resume_breakpoint = clone_momentary_breakpoint
505 (tp->control.step_resume_breakpoint);
506 step_range_start = tp->control.step_range_start;
507 step_range_end = tp->control.step_range_end;
508 step_frame_id = tp->control.step_frame_id;
509 exception_resume_breakpoint
510 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
511
512 /* For now, delete the parent's sr breakpoint, otherwise,
513 parent/child sr breakpoints are considered duplicates,
514 and the child version will not be installed. Remove
515 this when the breakpoints module becomes aware of
516 inferiors and address spaces. */
517 delete_step_resume_breakpoint (tp);
518 tp->control.step_range_start = 0;
519 tp->control.step_range_end = 0;
520 tp->control.step_frame_id = null_frame_id;
521 delete_exception_resume_breakpoint (tp);
522 }
523
524 parent = inferior_ptid;
525 child = tp->pending_follow.value.related_pid;
526
527 /* Tell the target to do whatever is necessary to follow
528 either parent or child. */
529 if (target_follow_fork (follow_child))
530 {
531 /* Target refused to follow, or there's some other reason
532 we shouldn't resume. */
533 should_resume = 0;
534 }
535 else
536 {
537 /* This pending follow fork event is now handled, one way
538 or another. The previous selected thread may be gone
539 from the lists by now, but if it is still around, need
540 to clear the pending follow request. */
541 tp = find_thread_ptid (parent);
542 if (tp)
543 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
544
545 /* This makes sure we don't try to apply the "Switched
546 over from WAIT_PID" logic above. */
547 nullify_last_target_wait_ptid ();
548
549 /* If we followed the child, switch to it... */
550 if (follow_child)
551 {
552 switch_to_thread (child);
553
554 /* ... and preserve the stepping state, in case the
555 user was stepping over the fork call. */
556 if (should_resume)
557 {
558 tp = inferior_thread ();
559 tp->control.step_resume_breakpoint
560 = step_resume_breakpoint;
561 tp->control.step_range_start = step_range_start;
562 tp->control.step_range_end = step_range_end;
563 tp->control.step_frame_id = step_frame_id;
564 tp->control.exception_resume_breakpoint
565 = exception_resume_breakpoint;
566 }
567 else
568 {
569 /* If we get here, it was because we're trying to
570 resume from a fork catchpoint, but, the user
571 has switched threads away from the thread that
572 forked. In that case, the resume command
573 issued is most likely not applicable to the
574 child, so just warn, and refuse to resume. */
575 warning (_("Not resuming: switched threads "
576 "before following fork child.\n"));
577 }
578
579 /* Reset breakpoints in the child as appropriate. */
580 follow_inferior_reset_breakpoints ();
581 }
582 else
583 switch_to_thread (parent);
584 }
585 }
586 break;
587 case TARGET_WAITKIND_SPURIOUS:
588 /* Nothing to follow. */
589 break;
590 default:
591 internal_error (__FILE__, __LINE__,
592 "Unexpected pending_follow.kind %d\n",
593 tp->pending_follow.kind);
594 break;
595 }
596
597 return should_resume;
598 }
599
600 void
601 follow_inferior_reset_breakpoints (void)
602 {
603 struct thread_info *tp = inferior_thread ();
604
605 /* Was there a step_resume breakpoint? (There was if the user
606 did a "next" at the fork() call.) If so, explicitly reset its
607 thread number.
608
609 step_resumes are a form of bp that are made to be per-thread.
610 Since we created the step_resume bp when the parent process
611 was being debugged, and now are switching to the child process,
612 from the breakpoint package's viewpoint, that's a switch of
613 "threads". We must update the bp's notion of which thread
614 it is for, or it'll be ignored when it triggers. */
615
616 if (tp->control.step_resume_breakpoint)
617 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
618
619 if (tp->control.exception_resume_breakpoint)
620 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
621
622 /* Reinsert all breakpoints in the child. The user may have set
623 breakpoints after catching the fork, in which case those
624 were never set in the child, but only in the parent. This makes
625 sure the inserted breakpoints match the breakpoint list. */
626
627 breakpoint_re_set ();
628 insert_breakpoints ();
629 }
630
631 /* The child has exited or execed: resume threads of the parent the
632 user wanted to be executing. */
633
634 static int
635 proceed_after_vfork_done (struct thread_info *thread,
636 void *arg)
637 {
638 int pid = * (int *) arg;
639
640 if (ptid_get_pid (thread->ptid) == pid
641 && is_running (thread->ptid)
642 && !is_executing (thread->ptid)
643 && !thread->stop_requested
644 && thread->suspend.stop_signal == GDB_SIGNAL_0)
645 {
646 if (debug_infrun)
647 fprintf_unfiltered (gdb_stdlog,
648 "infrun: resuming vfork parent thread %s\n",
649 target_pid_to_str (thread->ptid));
650
651 switch_to_thread (thread->ptid);
652 clear_proceed_status ();
653 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
654 }
655
656 return 0;
657 }
658
659 /* Called whenever we notice an exec or exit event, to handle
660 detaching or resuming a vfork parent. */
661
662 static void
663 handle_vfork_child_exec_or_exit (int exec)
664 {
665 struct inferior *inf = current_inferior ();
666
667 if (inf->vfork_parent)
668 {
669 int resume_parent = -1;
670
671 /* This exec or exit marks the end of the shared memory region
672 between the parent and the child. If the user wanted to
673 detach from the parent, now is the time. */
674
675 if (inf->vfork_parent->pending_detach)
676 {
677 struct thread_info *tp;
678 struct cleanup *old_chain;
679 struct program_space *pspace;
680 struct address_space *aspace;
681
682 /* follow-fork child, detach-on-fork on. */
683
684 inf->vfork_parent->pending_detach = 0;
685
686 if (!exec)
687 {
688 /* If we're handling a child exit, then inferior_ptid
689 points at the inferior's pid, not to a thread. */
690 old_chain = save_inferior_ptid ();
691 save_current_program_space ();
692 save_current_inferior ();
693 }
694 else
695 old_chain = save_current_space_and_thread ();
696
697 /* We're letting loose of the parent. */
698 tp = any_live_thread_of_process (inf->vfork_parent->pid);
699 switch_to_thread (tp->ptid);
700
701 /* We're about to detach from the parent, which implicitly
702 removes breakpoints from its address space. There's a
703 catch here: we want to reuse the spaces for the child,
704 but, parent/child are still sharing the pspace at this
705 point, although the exec in reality makes the kernel give
706 the child a fresh set of new pages. The problem here is
707 that the breakpoints module being unaware of this, would
708 likely chose the child process to write to the parent
709 address space. Swapping the child temporarily away from
710 the spaces has the desired effect. Yes, this is "sort
711 of" a hack. */
712
713 pspace = inf->pspace;
714 aspace = inf->aspace;
715 inf->aspace = NULL;
716 inf->pspace = NULL;
717
718 if (debug_infrun || info_verbose)
719 {
720 target_terminal_ours ();
721
722 if (exec)
723 fprintf_filtered (gdb_stdlog,
724 "Detaching vfork parent process "
725 "%d after child exec.\n",
726 inf->vfork_parent->pid);
727 else
728 fprintf_filtered (gdb_stdlog,
729 "Detaching vfork parent process "
730 "%d after child exit.\n",
731 inf->vfork_parent->pid);
732 }
733
734 target_detach (NULL, 0);
735
736 /* Put it back. */
737 inf->pspace = pspace;
738 inf->aspace = aspace;
739
740 do_cleanups (old_chain);
741 }
742 else if (exec)
743 {
744 /* We're staying attached to the parent, so, really give the
745 child a new address space. */
746 inf->pspace = add_program_space (maybe_new_address_space ());
747 inf->aspace = inf->pspace->aspace;
748 inf->removable = 1;
749 set_current_program_space (inf->pspace);
750
751 resume_parent = inf->vfork_parent->pid;
752
753 /* Break the bonds. */
754 inf->vfork_parent->vfork_child = NULL;
755 }
756 else
757 {
758 struct cleanup *old_chain;
759 struct program_space *pspace;
760
761 /* If this is a vfork child exiting, then the pspace and
762 aspaces were shared with the parent. Since we're
763 reporting the process exit, we'll be mourning all that is
764 found in the address space, and switching to null_ptid,
765 preparing to start a new inferior. But, since we don't
766 want to clobber the parent's address/program spaces, we
767 go ahead and create a new one for this exiting
768 inferior. */
769
770 /* Switch to null_ptid, so that clone_program_space doesn't want
771 to read the selected frame of a dead process. */
772 old_chain = save_inferior_ptid ();
773 inferior_ptid = null_ptid;
774
775 /* This inferior is dead, so avoid giving the breakpoints
776 module the option to write through to it (cloning a
777 program space resets breakpoints). */
778 inf->aspace = NULL;
779 inf->pspace = NULL;
780 pspace = add_program_space (maybe_new_address_space ());
781 set_current_program_space (pspace);
782 inf->removable = 1;
783 inf->symfile_flags = SYMFILE_NO_READ;
784 clone_program_space (pspace, inf->vfork_parent->pspace);
785 inf->pspace = pspace;
786 inf->aspace = pspace->aspace;
787
788 /* Put back inferior_ptid. We'll continue mourning this
789 inferior. */
790 do_cleanups (old_chain);
791
792 resume_parent = inf->vfork_parent->pid;
793 /* Break the bonds. */
794 inf->vfork_parent->vfork_child = NULL;
795 }
796
797 inf->vfork_parent = NULL;
798
799 gdb_assert (current_program_space == inf->pspace);
800
801 if (non_stop && resume_parent != -1)
802 {
803 /* If the user wanted the parent to be running, let it go
804 free now. */
805 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
806
807 if (debug_infrun)
808 fprintf_unfiltered (gdb_stdlog,
809 "infrun: resuming vfork parent process %d\n",
810 resume_parent);
811
812 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
813
814 do_cleanups (old_chain);
815 }
816 }
817 }
818
819 /* Enum strings for "set|show follow-exec-mode". */
820
821 static const char follow_exec_mode_new[] = "new";
822 static const char follow_exec_mode_same[] = "same";
823 static const char *const follow_exec_mode_names[] =
824 {
825 follow_exec_mode_new,
826 follow_exec_mode_same,
827 NULL,
828 };
829
830 static const char *follow_exec_mode_string = follow_exec_mode_same;
831 static void
832 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
833 struct cmd_list_element *c, const char *value)
834 {
835 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
836 }
837
838 /* EXECD_PATHNAME is assumed to be non-NULL. */
839
840 static void
841 follow_exec (ptid_t pid, char *execd_pathname)
842 {
843 struct thread_info *th = inferior_thread ();
844 struct inferior *inf = current_inferior ();
845
846 /* This is an exec event that we actually wish to pay attention to.
847 Refresh our symbol table to the newly exec'd program, remove any
848 momentary bp's, etc.
849
850 If there are breakpoints, they aren't really inserted now,
851 since the exec() transformed our inferior into a fresh set
852 of instructions.
853
854 We want to preserve symbolic breakpoints on the list, since
855 we have hopes that they can be reset after the new a.out's
856 symbol table is read.
857
858 However, any "raw" breakpoints must be removed from the list
859 (e.g., the solib bp's), since their address is probably invalid
860 now.
861
862 And, we DON'T want to call delete_breakpoints() here, since
863 that may write the bp's "shadow contents" (the instruction
864 value that was overwritten witha TRAP instruction). Since
865 we now have a new a.out, those shadow contents aren't valid. */
866
867 mark_breakpoints_out ();
868
869 update_breakpoints_after_exec ();
870
871 /* If there was one, it's gone now. We cannot truly step-to-next
872 statement through an exec(). */
873 th->control.step_resume_breakpoint = NULL;
874 th->control.exception_resume_breakpoint = NULL;
875 th->control.step_range_start = 0;
876 th->control.step_range_end = 0;
877
878 /* The target reports the exec event to the main thread, even if
879 some other thread does the exec, and even if the main thread was
880 already stopped --- if debugging in non-stop mode, it's possible
881 the user had the main thread held stopped in the previous image
882 --- release it now. This is the same behavior as step-over-exec
883 with scheduler-locking on in all-stop mode. */
884 th->stop_requested = 0;
885
886 /* What is this a.out's name? */
887 printf_unfiltered (_("%s is executing new program: %s\n"),
888 target_pid_to_str (inferior_ptid),
889 execd_pathname);
890
891 /* We've followed the inferior through an exec. Therefore, the
892 inferior has essentially been killed & reborn. */
893
894 gdb_flush (gdb_stdout);
895
896 breakpoint_init_inferior (inf_execd);
897
898 if (gdb_sysroot && *gdb_sysroot)
899 {
900 char *name = alloca (strlen (gdb_sysroot)
901 + strlen (execd_pathname)
902 + 1);
903
904 strcpy (name, gdb_sysroot);
905 strcat (name, execd_pathname);
906 execd_pathname = name;
907 }
908
909 /* Reset the shared library package. This ensures that we get a
910 shlib event when the child reaches "_start", at which point the
911 dld will have had a chance to initialize the child. */
912 /* Also, loading a symbol file below may trigger symbol lookups, and
913 we don't want those to be satisfied by the libraries of the
914 previous incarnation of this process. */
915 no_shared_libraries (NULL, 0);
916
917 if (follow_exec_mode_string == follow_exec_mode_new)
918 {
919 struct program_space *pspace;
920
921 /* The user wants to keep the old inferior and program spaces
922 around. Create a new fresh one, and switch to it. */
923
924 inf = add_inferior (current_inferior ()->pid);
925 pspace = add_program_space (maybe_new_address_space ());
926 inf->pspace = pspace;
927 inf->aspace = pspace->aspace;
928
929 exit_inferior_num_silent (current_inferior ()->num);
930
931 set_current_inferior (inf);
932 set_current_program_space (pspace);
933 }
934 else
935 {
936 /* The old description may no longer be fit for the new image.
937 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
938 old description; we'll read a new one below. No need to do
939 this on "follow-exec-mode new", as the old inferior stays
940 around (its description is later cleared/refetched on
941 restart). */
942 target_clear_description ();
943 }
944
945 gdb_assert (current_program_space == inf->pspace);
946
947 /* That a.out is now the one to use. */
948 exec_file_attach (execd_pathname, 0);
949
950 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
951 (Position Independent Executable) main symbol file will get applied by
952 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
953 the breakpoints with the zero displacement. */
954
955 symbol_file_add (execd_pathname,
956 (inf->symfile_flags
957 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
958 NULL, 0);
959
960 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
961 set_initial_language ();
962
963 /* If the target can specify a description, read it. Must do this
964 after flipping to the new executable (because the target supplied
965 description must be compatible with the executable's
966 architecture, and the old executable may e.g., be 32-bit, while
967 the new one 64-bit), and before anything involving memory or
968 registers. */
969 target_find_description ();
970
971 solib_create_inferior_hook (0);
972
973 jit_inferior_created_hook ();
974
975 breakpoint_re_set ();
976
977 /* Reinsert all breakpoints. (Those which were symbolic have
978 been reset to the proper address in the new a.out, thanks
979 to symbol_file_command...). */
980 insert_breakpoints ();
981
982 /* The next resume of this inferior should bring it to the shlib
983 startup breakpoints. (If the user had also set bp's on
984 "main" from the old (parent) process, then they'll auto-
985 matically get reset there in the new process.). */
986 }
987
988 /* Non-zero if we just simulating a single-step. This is needed
989 because we cannot remove the breakpoints in the inferior process
990 until after the `wait' in `wait_for_inferior'. */
991 static int singlestep_breakpoints_inserted_p = 0;
992
993 /* The thread we inserted single-step breakpoints for. */
994 static ptid_t singlestep_ptid;
995
996 /* PC when we started this single-step. */
997 static CORE_ADDR singlestep_pc;
998
999 /* If another thread hit the singlestep breakpoint, we save the original
1000 thread here so that we can resume single-stepping it later. */
1001 static ptid_t saved_singlestep_ptid;
1002 static int stepping_past_singlestep_breakpoint;
1003
1004 /* If not equal to null_ptid, this means that after stepping over breakpoint
1005 is finished, we need to switch to deferred_step_ptid, and step it.
1006
1007 The use case is when one thread has hit a breakpoint, and then the user
1008 has switched to another thread and issued 'step'. We need to step over
1009 breakpoint in the thread which hit the breakpoint, but then continue
1010 stepping the thread user has selected. */
1011 static ptid_t deferred_step_ptid;
1012 \f
1013 /* Displaced stepping. */
1014
1015 /* In non-stop debugging mode, we must take special care to manage
1016 breakpoints properly; in particular, the traditional strategy for
1017 stepping a thread past a breakpoint it has hit is unsuitable.
1018 'Displaced stepping' is a tactic for stepping one thread past a
1019 breakpoint it has hit while ensuring that other threads running
1020 concurrently will hit the breakpoint as they should.
1021
1022 The traditional way to step a thread T off a breakpoint in a
1023 multi-threaded program in all-stop mode is as follows:
1024
1025 a0) Initially, all threads are stopped, and breakpoints are not
1026 inserted.
1027 a1) We single-step T, leaving breakpoints uninserted.
1028 a2) We insert breakpoints, and resume all threads.
1029
1030 In non-stop debugging, however, this strategy is unsuitable: we
1031 don't want to have to stop all threads in the system in order to
1032 continue or step T past a breakpoint. Instead, we use displaced
1033 stepping:
1034
1035 n0) Initially, T is stopped, other threads are running, and
1036 breakpoints are inserted.
1037 n1) We copy the instruction "under" the breakpoint to a separate
1038 location, outside the main code stream, making any adjustments
1039 to the instruction, register, and memory state as directed by
1040 T's architecture.
1041 n2) We single-step T over the instruction at its new location.
1042 n3) We adjust the resulting register and memory state as directed
1043 by T's architecture. This includes resetting T's PC to point
1044 back into the main instruction stream.
1045 n4) We resume T.
1046
1047 This approach depends on the following gdbarch methods:
1048
1049 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1050 indicate where to copy the instruction, and how much space must
1051 be reserved there. We use these in step n1.
1052
1053 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1054 address, and makes any necessary adjustments to the instruction,
1055 register contents, and memory. We use this in step n1.
1056
1057 - gdbarch_displaced_step_fixup adjusts registers and memory after
1058 we have successfuly single-stepped the instruction, to yield the
1059 same effect the instruction would have had if we had executed it
1060 at its original address. We use this in step n3.
1061
1062 - gdbarch_displaced_step_free_closure provides cleanup.
1063
1064 The gdbarch_displaced_step_copy_insn and
1065 gdbarch_displaced_step_fixup functions must be written so that
1066 copying an instruction with gdbarch_displaced_step_copy_insn,
1067 single-stepping across the copied instruction, and then applying
1068 gdbarch_displaced_insn_fixup should have the same effects on the
1069 thread's memory and registers as stepping the instruction in place
1070 would have. Exactly which responsibilities fall to the copy and
1071 which fall to the fixup is up to the author of those functions.
1072
1073 See the comments in gdbarch.sh for details.
1074
1075 Note that displaced stepping and software single-step cannot
1076 currently be used in combination, although with some care I think
1077 they could be made to. Software single-step works by placing
1078 breakpoints on all possible subsequent instructions; if the
1079 displaced instruction is a PC-relative jump, those breakpoints
1080 could fall in very strange places --- on pages that aren't
1081 executable, or at addresses that are not proper instruction
1082 boundaries. (We do generally let other threads run while we wait
1083 to hit the software single-step breakpoint, and they might
1084 encounter such a corrupted instruction.) One way to work around
1085 this would be to have gdbarch_displaced_step_copy_insn fully
1086 simulate the effect of PC-relative instructions (and return NULL)
1087 on architectures that use software single-stepping.
1088
1089 In non-stop mode, we can have independent and simultaneous step
1090 requests, so more than one thread may need to simultaneously step
1091 over a breakpoint. The current implementation assumes there is
1092 only one scratch space per process. In this case, we have to
1093 serialize access to the scratch space. If thread A wants to step
1094 over a breakpoint, but we are currently waiting for some other
1095 thread to complete a displaced step, we leave thread A stopped and
1096 place it in the displaced_step_request_queue. Whenever a displaced
1097 step finishes, we pick the next thread in the queue and start a new
1098 displaced step operation on it. See displaced_step_prepare and
1099 displaced_step_fixup for details. */
1100
1101 struct displaced_step_request
1102 {
1103 ptid_t ptid;
1104 struct displaced_step_request *next;
1105 };
1106
1107 /* Per-inferior displaced stepping state. */
1108 struct displaced_step_inferior_state
1109 {
1110 /* Pointer to next in linked list. */
1111 struct displaced_step_inferior_state *next;
1112
1113 /* The process this displaced step state refers to. */
1114 int pid;
1115
1116 /* A queue of pending displaced stepping requests. One entry per
1117 thread that needs to do a displaced step. */
1118 struct displaced_step_request *step_request_queue;
1119
1120 /* If this is not null_ptid, this is the thread carrying out a
1121 displaced single-step in process PID. This thread's state will
1122 require fixing up once it has completed its step. */
1123 ptid_t step_ptid;
1124
1125 /* The architecture the thread had when we stepped it. */
1126 struct gdbarch *step_gdbarch;
1127
1128 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1129 for post-step cleanup. */
1130 struct displaced_step_closure *step_closure;
1131
1132 /* The address of the original instruction, and the copy we
1133 made. */
1134 CORE_ADDR step_original, step_copy;
1135
1136 /* Saved contents of copy area. */
1137 gdb_byte *step_saved_copy;
1138 };
1139
1140 /* The list of states of processes involved in displaced stepping
1141 presently. */
1142 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1143
1144 /* Get the displaced stepping state of process PID. */
1145
1146 static struct displaced_step_inferior_state *
1147 get_displaced_stepping_state (int pid)
1148 {
1149 struct displaced_step_inferior_state *state;
1150
1151 for (state = displaced_step_inferior_states;
1152 state != NULL;
1153 state = state->next)
1154 if (state->pid == pid)
1155 return state;
1156
1157 return NULL;
1158 }
1159
1160 /* Add a new displaced stepping state for process PID to the displaced
1161 stepping state list, or return a pointer to an already existing
1162 entry, if it already exists. Never returns NULL. */
1163
1164 static struct displaced_step_inferior_state *
1165 add_displaced_stepping_state (int pid)
1166 {
1167 struct displaced_step_inferior_state *state;
1168
1169 for (state = displaced_step_inferior_states;
1170 state != NULL;
1171 state = state->next)
1172 if (state->pid == pid)
1173 return state;
1174
1175 state = xcalloc (1, sizeof (*state));
1176 state->pid = pid;
1177 state->next = displaced_step_inferior_states;
1178 displaced_step_inferior_states = state;
1179
1180 return state;
1181 }
1182
1183 /* If inferior is in displaced stepping, and ADDR equals to starting address
1184 of copy area, return corresponding displaced_step_closure. Otherwise,
1185 return NULL. */
1186
1187 struct displaced_step_closure*
1188 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1189 {
1190 struct displaced_step_inferior_state *displaced
1191 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1192
1193 /* If checking the mode of displaced instruction in copy area. */
1194 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1195 && (displaced->step_copy == addr))
1196 return displaced->step_closure;
1197
1198 return NULL;
1199 }
1200
1201 /* Remove the displaced stepping state of process PID. */
1202
1203 static void
1204 remove_displaced_stepping_state (int pid)
1205 {
1206 struct displaced_step_inferior_state *it, **prev_next_p;
1207
1208 gdb_assert (pid != 0);
1209
1210 it = displaced_step_inferior_states;
1211 prev_next_p = &displaced_step_inferior_states;
1212 while (it)
1213 {
1214 if (it->pid == pid)
1215 {
1216 *prev_next_p = it->next;
1217 xfree (it);
1218 return;
1219 }
1220
1221 prev_next_p = &it->next;
1222 it = *prev_next_p;
1223 }
1224 }
1225
1226 static void
1227 infrun_inferior_exit (struct inferior *inf)
1228 {
1229 remove_displaced_stepping_state (inf->pid);
1230 }
1231
1232 /* If ON, and the architecture supports it, GDB will use displaced
1233 stepping to step over breakpoints. If OFF, or if the architecture
1234 doesn't support it, GDB will instead use the traditional
1235 hold-and-step approach. If AUTO (which is the default), GDB will
1236 decide which technique to use to step over breakpoints depending on
1237 which of all-stop or non-stop mode is active --- displaced stepping
1238 in non-stop mode; hold-and-step in all-stop mode. */
1239
1240 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1241
1242 static void
1243 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1244 struct cmd_list_element *c,
1245 const char *value)
1246 {
1247 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1248 fprintf_filtered (file,
1249 _("Debugger's willingness to use displaced stepping "
1250 "to step over breakpoints is %s (currently %s).\n"),
1251 value, non_stop ? "on" : "off");
1252 else
1253 fprintf_filtered (file,
1254 _("Debugger's willingness to use displaced stepping "
1255 "to step over breakpoints is %s.\n"), value);
1256 }
1257
1258 /* Return non-zero if displaced stepping can/should be used to step
1259 over breakpoints. */
1260
1261 static int
1262 use_displaced_stepping (struct gdbarch *gdbarch)
1263 {
1264 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1265 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1266 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1267 && !RECORD_IS_USED);
1268 }
1269
1270 /* Clean out any stray displaced stepping state. */
1271 static void
1272 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1273 {
1274 /* Indicate that there is no cleanup pending. */
1275 displaced->step_ptid = null_ptid;
1276
1277 if (displaced->step_closure)
1278 {
1279 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1280 displaced->step_closure);
1281 displaced->step_closure = NULL;
1282 }
1283 }
1284
1285 static void
1286 displaced_step_clear_cleanup (void *arg)
1287 {
1288 struct displaced_step_inferior_state *state = arg;
1289
1290 displaced_step_clear (state);
1291 }
1292
1293 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1294 void
1295 displaced_step_dump_bytes (struct ui_file *file,
1296 const gdb_byte *buf,
1297 size_t len)
1298 {
1299 int i;
1300
1301 for (i = 0; i < len; i++)
1302 fprintf_unfiltered (file, "%02x ", buf[i]);
1303 fputs_unfiltered ("\n", file);
1304 }
1305
1306 /* Prepare to single-step, using displaced stepping.
1307
1308 Note that we cannot use displaced stepping when we have a signal to
1309 deliver. If we have a signal to deliver and an instruction to step
1310 over, then after the step, there will be no indication from the
1311 target whether the thread entered a signal handler or ignored the
1312 signal and stepped over the instruction successfully --- both cases
1313 result in a simple SIGTRAP. In the first case we mustn't do a
1314 fixup, and in the second case we must --- but we can't tell which.
1315 Comments in the code for 'random signals' in handle_inferior_event
1316 explain how we handle this case instead.
1317
1318 Returns 1 if preparing was successful -- this thread is going to be
1319 stepped now; or 0 if displaced stepping this thread got queued. */
1320 static int
1321 displaced_step_prepare (ptid_t ptid)
1322 {
1323 struct cleanup *old_cleanups, *ignore_cleanups;
1324 struct thread_info *tp = find_thread_ptid (ptid);
1325 struct regcache *regcache = get_thread_regcache (ptid);
1326 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1327 CORE_ADDR original, copy;
1328 ULONGEST len;
1329 struct displaced_step_closure *closure;
1330 struct displaced_step_inferior_state *displaced;
1331 int status;
1332
1333 /* We should never reach this function if the architecture does not
1334 support displaced stepping. */
1335 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1336
1337 /* Disable range stepping while executing in the scratch pad. We
1338 want a single-step even if executing the displaced instruction in
1339 the scratch buffer lands within the stepping range (e.g., a
1340 jump/branch). */
1341 tp->control.may_range_step = 0;
1342
1343 /* We have to displaced step one thread at a time, as we only have
1344 access to a single scratch space per inferior. */
1345
1346 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1347
1348 if (!ptid_equal (displaced->step_ptid, null_ptid))
1349 {
1350 /* Already waiting for a displaced step to finish. Defer this
1351 request and place in queue. */
1352 struct displaced_step_request *req, *new_req;
1353
1354 if (debug_displaced)
1355 fprintf_unfiltered (gdb_stdlog,
1356 "displaced: defering step of %s\n",
1357 target_pid_to_str (ptid));
1358
1359 new_req = xmalloc (sizeof (*new_req));
1360 new_req->ptid = ptid;
1361 new_req->next = NULL;
1362
1363 if (displaced->step_request_queue)
1364 {
1365 for (req = displaced->step_request_queue;
1366 req && req->next;
1367 req = req->next)
1368 ;
1369 req->next = new_req;
1370 }
1371 else
1372 displaced->step_request_queue = new_req;
1373
1374 return 0;
1375 }
1376 else
1377 {
1378 if (debug_displaced)
1379 fprintf_unfiltered (gdb_stdlog,
1380 "displaced: stepping %s now\n",
1381 target_pid_to_str (ptid));
1382 }
1383
1384 displaced_step_clear (displaced);
1385
1386 old_cleanups = save_inferior_ptid ();
1387 inferior_ptid = ptid;
1388
1389 original = regcache_read_pc (regcache);
1390
1391 copy = gdbarch_displaced_step_location (gdbarch);
1392 len = gdbarch_max_insn_length (gdbarch);
1393
1394 /* Save the original contents of the copy area. */
1395 displaced->step_saved_copy = xmalloc (len);
1396 ignore_cleanups = make_cleanup (free_current_contents,
1397 &displaced->step_saved_copy);
1398 status = target_read_memory (copy, displaced->step_saved_copy, len);
1399 if (status != 0)
1400 throw_error (MEMORY_ERROR,
1401 _("Error accessing memory address %s (%s) for "
1402 "displaced-stepping scratch space."),
1403 paddress (gdbarch, copy), safe_strerror (status));
1404 if (debug_displaced)
1405 {
1406 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1407 paddress (gdbarch, copy));
1408 displaced_step_dump_bytes (gdb_stdlog,
1409 displaced->step_saved_copy,
1410 len);
1411 };
1412
1413 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1414 original, copy, regcache);
1415
1416 /* We don't support the fully-simulated case at present. */
1417 gdb_assert (closure);
1418
1419 /* Save the information we need to fix things up if the step
1420 succeeds. */
1421 displaced->step_ptid = ptid;
1422 displaced->step_gdbarch = gdbarch;
1423 displaced->step_closure = closure;
1424 displaced->step_original = original;
1425 displaced->step_copy = copy;
1426
1427 make_cleanup (displaced_step_clear_cleanup, displaced);
1428
1429 /* Resume execution at the copy. */
1430 regcache_write_pc (regcache, copy);
1431
1432 discard_cleanups (ignore_cleanups);
1433
1434 do_cleanups (old_cleanups);
1435
1436 if (debug_displaced)
1437 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1438 paddress (gdbarch, copy));
1439
1440 return 1;
1441 }
1442
1443 static void
1444 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1445 const gdb_byte *myaddr, int len)
1446 {
1447 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1448
1449 inferior_ptid = ptid;
1450 write_memory (memaddr, myaddr, len);
1451 do_cleanups (ptid_cleanup);
1452 }
1453
1454 /* Restore the contents of the copy area for thread PTID. */
1455
1456 static void
1457 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1458 ptid_t ptid)
1459 {
1460 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1461
1462 write_memory_ptid (ptid, displaced->step_copy,
1463 displaced->step_saved_copy, len);
1464 if (debug_displaced)
1465 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1466 target_pid_to_str (ptid),
1467 paddress (displaced->step_gdbarch,
1468 displaced->step_copy));
1469 }
1470
1471 static void
1472 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1473 {
1474 struct cleanup *old_cleanups;
1475 struct displaced_step_inferior_state *displaced
1476 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1477
1478 /* Was any thread of this process doing a displaced step? */
1479 if (displaced == NULL)
1480 return;
1481
1482 /* Was this event for the pid we displaced? */
1483 if (ptid_equal (displaced->step_ptid, null_ptid)
1484 || ! ptid_equal (displaced->step_ptid, event_ptid))
1485 return;
1486
1487 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1488
1489 displaced_step_restore (displaced, displaced->step_ptid);
1490
1491 /* Did the instruction complete successfully? */
1492 if (signal == GDB_SIGNAL_TRAP)
1493 {
1494 /* Fix up the resulting state. */
1495 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1496 displaced->step_closure,
1497 displaced->step_original,
1498 displaced->step_copy,
1499 get_thread_regcache (displaced->step_ptid));
1500 }
1501 else
1502 {
1503 /* Since the instruction didn't complete, all we can do is
1504 relocate the PC. */
1505 struct regcache *regcache = get_thread_regcache (event_ptid);
1506 CORE_ADDR pc = regcache_read_pc (regcache);
1507
1508 pc = displaced->step_original + (pc - displaced->step_copy);
1509 regcache_write_pc (regcache, pc);
1510 }
1511
1512 do_cleanups (old_cleanups);
1513
1514 displaced->step_ptid = null_ptid;
1515
1516 /* Are there any pending displaced stepping requests? If so, run
1517 one now. Leave the state object around, since we're likely to
1518 need it again soon. */
1519 while (displaced->step_request_queue)
1520 {
1521 struct displaced_step_request *head;
1522 ptid_t ptid;
1523 struct regcache *regcache;
1524 struct gdbarch *gdbarch;
1525 CORE_ADDR actual_pc;
1526 struct address_space *aspace;
1527
1528 head = displaced->step_request_queue;
1529 ptid = head->ptid;
1530 displaced->step_request_queue = head->next;
1531 xfree (head);
1532
1533 context_switch (ptid);
1534
1535 regcache = get_thread_regcache (ptid);
1536 actual_pc = regcache_read_pc (regcache);
1537 aspace = get_regcache_aspace (regcache);
1538
1539 if (breakpoint_here_p (aspace, actual_pc))
1540 {
1541 if (debug_displaced)
1542 fprintf_unfiltered (gdb_stdlog,
1543 "displaced: stepping queued %s now\n",
1544 target_pid_to_str (ptid));
1545
1546 displaced_step_prepare (ptid);
1547
1548 gdbarch = get_regcache_arch (regcache);
1549
1550 if (debug_displaced)
1551 {
1552 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1553 gdb_byte buf[4];
1554
1555 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1556 paddress (gdbarch, actual_pc));
1557 read_memory (actual_pc, buf, sizeof (buf));
1558 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1559 }
1560
1561 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1562 displaced->step_closure))
1563 target_resume (ptid, 1, GDB_SIGNAL_0);
1564 else
1565 target_resume (ptid, 0, GDB_SIGNAL_0);
1566
1567 /* Done, we're stepping a thread. */
1568 break;
1569 }
1570 else
1571 {
1572 int step;
1573 struct thread_info *tp = inferior_thread ();
1574
1575 /* The breakpoint we were sitting under has since been
1576 removed. */
1577 tp->control.trap_expected = 0;
1578
1579 /* Go back to what we were trying to do. */
1580 step = currently_stepping (tp);
1581
1582 if (debug_displaced)
1583 fprintf_unfiltered (gdb_stdlog,
1584 "displaced: breakpoint is gone: %s, step(%d)\n",
1585 target_pid_to_str (tp->ptid), step);
1586
1587 target_resume (ptid, step, GDB_SIGNAL_0);
1588 tp->suspend.stop_signal = GDB_SIGNAL_0;
1589
1590 /* This request was discarded. See if there's any other
1591 thread waiting for its turn. */
1592 }
1593 }
1594 }
1595
1596 /* Update global variables holding ptids to hold NEW_PTID if they were
1597 holding OLD_PTID. */
1598 static void
1599 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1600 {
1601 struct displaced_step_request *it;
1602 struct displaced_step_inferior_state *displaced;
1603
1604 if (ptid_equal (inferior_ptid, old_ptid))
1605 inferior_ptid = new_ptid;
1606
1607 if (ptid_equal (singlestep_ptid, old_ptid))
1608 singlestep_ptid = new_ptid;
1609
1610 if (ptid_equal (deferred_step_ptid, old_ptid))
1611 deferred_step_ptid = new_ptid;
1612
1613 for (displaced = displaced_step_inferior_states;
1614 displaced;
1615 displaced = displaced->next)
1616 {
1617 if (ptid_equal (displaced->step_ptid, old_ptid))
1618 displaced->step_ptid = new_ptid;
1619
1620 for (it = displaced->step_request_queue; it; it = it->next)
1621 if (ptid_equal (it->ptid, old_ptid))
1622 it->ptid = new_ptid;
1623 }
1624 }
1625
1626 \f
1627 /* Resuming. */
1628
1629 /* Things to clean up if we QUIT out of resume (). */
1630 static void
1631 resume_cleanups (void *ignore)
1632 {
1633 normal_stop ();
1634 }
1635
1636 static const char schedlock_off[] = "off";
1637 static const char schedlock_on[] = "on";
1638 static const char schedlock_step[] = "step";
1639 static const char *const scheduler_enums[] = {
1640 schedlock_off,
1641 schedlock_on,
1642 schedlock_step,
1643 NULL
1644 };
1645 static const char *scheduler_mode = schedlock_off;
1646 static void
1647 show_scheduler_mode (struct ui_file *file, int from_tty,
1648 struct cmd_list_element *c, const char *value)
1649 {
1650 fprintf_filtered (file,
1651 _("Mode for locking scheduler "
1652 "during execution is \"%s\".\n"),
1653 value);
1654 }
1655
1656 static void
1657 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1658 {
1659 if (!target_can_lock_scheduler)
1660 {
1661 scheduler_mode = schedlock_off;
1662 error (_("Target '%s' cannot support this command."), target_shortname);
1663 }
1664 }
1665
1666 /* True if execution commands resume all threads of all processes by
1667 default; otherwise, resume only threads of the current inferior
1668 process. */
1669 int sched_multi = 0;
1670
1671 /* Try to setup for software single stepping over the specified location.
1672 Return 1 if target_resume() should use hardware single step.
1673
1674 GDBARCH the current gdbarch.
1675 PC the location to step over. */
1676
1677 static int
1678 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1679 {
1680 int hw_step = 1;
1681
1682 if (execution_direction == EXEC_FORWARD
1683 && gdbarch_software_single_step_p (gdbarch)
1684 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1685 {
1686 hw_step = 0;
1687 /* Do not pull these breakpoints until after a `wait' in
1688 `wait_for_inferior'. */
1689 singlestep_breakpoints_inserted_p = 1;
1690 singlestep_ptid = inferior_ptid;
1691 singlestep_pc = pc;
1692 }
1693 return hw_step;
1694 }
1695
1696 /* Return a ptid representing the set of threads that we will proceed,
1697 in the perspective of the user/frontend. We may actually resume
1698 fewer threads at first, e.g., if a thread is stopped at a
1699 breakpoint that needs stepping-off, but that should not be visible
1700 to the user/frontend, and neither should the frontend/user be
1701 allowed to proceed any of the threads that happen to be stopped for
1702 internal run control handling, if a previous command wanted them
1703 resumed. */
1704
1705 ptid_t
1706 user_visible_resume_ptid (int step)
1707 {
1708 /* By default, resume all threads of all processes. */
1709 ptid_t resume_ptid = RESUME_ALL;
1710
1711 /* Maybe resume only all threads of the current process. */
1712 if (!sched_multi && target_supports_multi_process ())
1713 {
1714 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1715 }
1716
1717 /* Maybe resume a single thread after all. */
1718 if (non_stop)
1719 {
1720 /* With non-stop mode on, threads are always handled
1721 individually. */
1722 resume_ptid = inferior_ptid;
1723 }
1724 else if ((scheduler_mode == schedlock_on)
1725 || (scheduler_mode == schedlock_step
1726 && (step || singlestep_breakpoints_inserted_p)))
1727 {
1728 /* User-settable 'scheduler' mode requires solo thread resume. */
1729 resume_ptid = inferior_ptid;
1730 }
1731
1732 return resume_ptid;
1733 }
1734
1735 /* Resume the inferior, but allow a QUIT. This is useful if the user
1736 wants to interrupt some lengthy single-stepping operation
1737 (for child processes, the SIGINT goes to the inferior, and so
1738 we get a SIGINT random_signal, but for remote debugging and perhaps
1739 other targets, that's not true).
1740
1741 STEP nonzero if we should step (zero to continue instead).
1742 SIG is the signal to give the inferior (zero for none). */
1743 void
1744 resume (int step, enum gdb_signal sig)
1745 {
1746 int should_resume = 1;
1747 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1748 struct regcache *regcache = get_current_regcache ();
1749 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1750 struct thread_info *tp = inferior_thread ();
1751 CORE_ADDR pc = regcache_read_pc (regcache);
1752 struct address_space *aspace = get_regcache_aspace (regcache);
1753
1754 QUIT;
1755
1756 if (current_inferior ()->waiting_for_vfork_done)
1757 {
1758 /* Don't try to single-step a vfork parent that is waiting for
1759 the child to get out of the shared memory region (by exec'ing
1760 or exiting). This is particularly important on software
1761 single-step archs, as the child process would trip on the
1762 software single step breakpoint inserted for the parent
1763 process. Since the parent will not actually execute any
1764 instruction until the child is out of the shared region (such
1765 are vfork's semantics), it is safe to simply continue it.
1766 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1767 the parent, and tell it to `keep_going', which automatically
1768 re-sets it stepping. */
1769 if (debug_infrun)
1770 fprintf_unfiltered (gdb_stdlog,
1771 "infrun: resume : clear step\n");
1772 step = 0;
1773 }
1774
1775 if (debug_infrun)
1776 fprintf_unfiltered (gdb_stdlog,
1777 "infrun: resume (step=%d, signal=%d), "
1778 "trap_expected=%d, current thread [%s] at %s\n",
1779 step, sig, tp->control.trap_expected,
1780 target_pid_to_str (inferior_ptid),
1781 paddress (gdbarch, pc));
1782
1783 /* Normally, by the time we reach `resume', the breakpoints are either
1784 removed or inserted, as appropriate. The exception is if we're sitting
1785 at a permanent breakpoint; we need to step over it, but permanent
1786 breakpoints can't be removed. So we have to test for it here. */
1787 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1788 {
1789 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1790 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1791 else
1792 error (_("\
1793 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1794 how to step past a permanent breakpoint on this architecture. Try using\n\
1795 a command like `return' or `jump' to continue execution."));
1796 }
1797
1798 /* If we have a breakpoint to step over, make sure to do a single
1799 step only. Same if we have software watchpoints. */
1800 if (tp->control.trap_expected || bpstat_should_step ())
1801 tp->control.may_range_step = 0;
1802
1803 /* If enabled, step over breakpoints by executing a copy of the
1804 instruction at a different address.
1805
1806 We can't use displaced stepping when we have a signal to deliver;
1807 the comments for displaced_step_prepare explain why. The
1808 comments in the handle_inferior event for dealing with 'random
1809 signals' explain what we do instead.
1810
1811 We can't use displaced stepping when we are waiting for vfork_done
1812 event, displaced stepping breaks the vfork child similarly as single
1813 step software breakpoint. */
1814 if (use_displaced_stepping (gdbarch)
1815 && (tp->control.trap_expected
1816 || (step && gdbarch_software_single_step_p (gdbarch)))
1817 && sig == GDB_SIGNAL_0
1818 && !current_inferior ()->waiting_for_vfork_done)
1819 {
1820 struct displaced_step_inferior_state *displaced;
1821
1822 if (!displaced_step_prepare (inferior_ptid))
1823 {
1824 /* Got placed in displaced stepping queue. Will be resumed
1825 later when all the currently queued displaced stepping
1826 requests finish. The thread is not executing at this point,
1827 and the call to set_executing will be made later. But we
1828 need to call set_running here, since from frontend point of view,
1829 the thread is running. */
1830 set_running (inferior_ptid, 1);
1831 discard_cleanups (old_cleanups);
1832 return;
1833 }
1834
1835 /* Update pc to reflect the new address from which we will execute
1836 instructions due to displaced stepping. */
1837 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
1838
1839 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1840 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1841 displaced->step_closure);
1842 }
1843
1844 /* Do we need to do it the hard way, w/temp breakpoints? */
1845 else if (step)
1846 step = maybe_software_singlestep (gdbarch, pc);
1847
1848 /* Currently, our software single-step implementation leads to different
1849 results than hardware single-stepping in one situation: when stepping
1850 into delivering a signal which has an associated signal handler,
1851 hardware single-step will stop at the first instruction of the handler,
1852 while software single-step will simply skip execution of the handler.
1853
1854 For now, this difference in behavior is accepted since there is no
1855 easy way to actually implement single-stepping into a signal handler
1856 without kernel support.
1857
1858 However, there is one scenario where this difference leads to follow-on
1859 problems: if we're stepping off a breakpoint by removing all breakpoints
1860 and then single-stepping. In this case, the software single-step
1861 behavior means that even if there is a *breakpoint* in the signal
1862 handler, GDB still would not stop.
1863
1864 Fortunately, we can at least fix this particular issue. We detect
1865 here the case where we are about to deliver a signal while software
1866 single-stepping with breakpoints removed. In this situation, we
1867 revert the decisions to remove all breakpoints and insert single-
1868 step breakpoints, and instead we install a step-resume breakpoint
1869 at the current address, deliver the signal without stepping, and
1870 once we arrive back at the step-resume breakpoint, actually step
1871 over the breakpoint we originally wanted to step over. */
1872 if (singlestep_breakpoints_inserted_p
1873 && tp->control.trap_expected && sig != GDB_SIGNAL_0)
1874 {
1875 /* If we have nested signals or a pending signal is delivered
1876 immediately after a handler returns, might might already have
1877 a step-resume breakpoint set on the earlier handler. We cannot
1878 set another step-resume breakpoint; just continue on until the
1879 original breakpoint is hit. */
1880 if (tp->control.step_resume_breakpoint == NULL)
1881 {
1882 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1883 tp->step_after_step_resume_breakpoint = 1;
1884 }
1885
1886 remove_single_step_breakpoints ();
1887 singlestep_breakpoints_inserted_p = 0;
1888
1889 insert_breakpoints ();
1890 tp->control.trap_expected = 0;
1891 }
1892
1893 if (should_resume)
1894 {
1895 ptid_t resume_ptid;
1896
1897 /* If STEP is set, it's a request to use hardware stepping
1898 facilities. But in that case, we should never
1899 use singlestep breakpoint. */
1900 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1901
1902 /* Decide the set of threads to ask the target to resume. Start
1903 by assuming everything will be resumed, than narrow the set
1904 by applying increasingly restricting conditions. */
1905 resume_ptid = user_visible_resume_ptid (step);
1906
1907 /* Maybe resume a single thread after all. */
1908 if (singlestep_breakpoints_inserted_p
1909 && stepping_past_singlestep_breakpoint)
1910 {
1911 /* The situation here is as follows. In thread T1 we wanted to
1912 single-step. Lacking hardware single-stepping we've
1913 set breakpoint at the PC of the next instruction -- call it
1914 P. After resuming, we've hit that breakpoint in thread T2.
1915 Now we've removed original breakpoint, inserted breakpoint
1916 at P+1, and try to step to advance T2 past breakpoint.
1917 We need to step only T2, as if T1 is allowed to freely run,
1918 it can run past P, and if other threads are allowed to run,
1919 they can hit breakpoint at P+1, and nested hits of single-step
1920 breakpoints is not something we'd want -- that's complicated
1921 to support, and has no value. */
1922 resume_ptid = inferior_ptid;
1923 }
1924 else if ((step || singlestep_breakpoints_inserted_p)
1925 && tp->control.trap_expected)
1926 {
1927 /* We're allowing a thread to run past a breakpoint it has
1928 hit, by single-stepping the thread with the breakpoint
1929 removed. In which case, we need to single-step only this
1930 thread, and keep others stopped, as they can miss this
1931 breakpoint if allowed to run.
1932
1933 The current code actually removes all breakpoints when
1934 doing this, not just the one being stepped over, so if we
1935 let other threads run, we can actually miss any
1936 breakpoint, not just the one at PC. */
1937 resume_ptid = inferior_ptid;
1938 }
1939
1940 if (gdbarch_cannot_step_breakpoint (gdbarch))
1941 {
1942 /* Most targets can step a breakpoint instruction, thus
1943 executing it normally. But if this one cannot, just
1944 continue and we will hit it anyway. */
1945 if (step && breakpoint_inserted_here_p (aspace, pc))
1946 step = 0;
1947 }
1948
1949 if (debug_displaced
1950 && use_displaced_stepping (gdbarch)
1951 && tp->control.trap_expected)
1952 {
1953 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1954 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1955 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1956 gdb_byte buf[4];
1957
1958 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1959 paddress (resume_gdbarch, actual_pc));
1960 read_memory (actual_pc, buf, sizeof (buf));
1961 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1962 }
1963
1964 if (tp->control.may_range_step)
1965 {
1966 /* If we're resuming a thread with the PC out of the step
1967 range, then we're doing some nested/finer run control
1968 operation, like stepping the thread out of the dynamic
1969 linker or the displaced stepping scratch pad. We
1970 shouldn't have allowed a range step then. */
1971 gdb_assert (pc_in_thread_step_range (pc, tp));
1972 }
1973
1974 /* Install inferior's terminal modes. */
1975 target_terminal_inferior ();
1976
1977 /* Avoid confusing the next resume, if the next stop/resume
1978 happens to apply to another thread. */
1979 tp->suspend.stop_signal = GDB_SIGNAL_0;
1980
1981 /* Advise target which signals may be handled silently. If we have
1982 removed breakpoints because we are stepping over one (which can
1983 happen only if we are not using displaced stepping), we need to
1984 receive all signals to avoid accidentally skipping a breakpoint
1985 during execution of a signal handler. */
1986 if ((step || singlestep_breakpoints_inserted_p)
1987 && tp->control.trap_expected
1988 && !use_displaced_stepping (gdbarch))
1989 target_pass_signals (0, NULL);
1990 else
1991 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
1992
1993 target_resume (resume_ptid, step, sig);
1994 }
1995
1996 discard_cleanups (old_cleanups);
1997 }
1998 \f
1999 /* Proceeding. */
2000
2001 /* Clear out all variables saying what to do when inferior is continued.
2002 First do this, then set the ones you want, then call `proceed'. */
2003
2004 static void
2005 clear_proceed_status_thread (struct thread_info *tp)
2006 {
2007 if (debug_infrun)
2008 fprintf_unfiltered (gdb_stdlog,
2009 "infrun: clear_proceed_status_thread (%s)\n",
2010 target_pid_to_str (tp->ptid));
2011
2012 tp->control.trap_expected = 0;
2013 tp->control.step_range_start = 0;
2014 tp->control.step_range_end = 0;
2015 tp->control.may_range_step = 0;
2016 tp->control.step_frame_id = null_frame_id;
2017 tp->control.step_stack_frame_id = null_frame_id;
2018 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2019 tp->stop_requested = 0;
2020
2021 tp->control.stop_step = 0;
2022
2023 tp->control.proceed_to_finish = 0;
2024
2025 /* Discard any remaining commands or status from previous stop. */
2026 bpstat_clear (&tp->control.stop_bpstat);
2027 }
2028
2029 static int
2030 clear_proceed_status_callback (struct thread_info *tp, void *data)
2031 {
2032 if (is_exited (tp->ptid))
2033 return 0;
2034
2035 clear_proceed_status_thread (tp);
2036 return 0;
2037 }
2038
2039 void
2040 clear_proceed_status (void)
2041 {
2042 if (!non_stop)
2043 {
2044 /* In all-stop mode, delete the per-thread status of all
2045 threads, even if inferior_ptid is null_ptid, there may be
2046 threads on the list. E.g., we may be launching a new
2047 process, while selecting the executable. */
2048 iterate_over_threads (clear_proceed_status_callback, NULL);
2049 }
2050
2051 if (!ptid_equal (inferior_ptid, null_ptid))
2052 {
2053 struct inferior *inferior;
2054
2055 if (non_stop)
2056 {
2057 /* If in non-stop mode, only delete the per-thread status of
2058 the current thread. */
2059 clear_proceed_status_thread (inferior_thread ());
2060 }
2061
2062 inferior = current_inferior ();
2063 inferior->control.stop_soon = NO_STOP_QUIETLY;
2064 }
2065
2066 stop_after_trap = 0;
2067
2068 observer_notify_about_to_proceed ();
2069
2070 if (stop_registers)
2071 {
2072 regcache_xfree (stop_registers);
2073 stop_registers = NULL;
2074 }
2075 }
2076
2077 /* Check the current thread against the thread that reported the most recent
2078 event. If a step-over is required return TRUE and set the current thread
2079 to the old thread. Otherwise return FALSE.
2080
2081 This should be suitable for any targets that support threads. */
2082
2083 static int
2084 prepare_to_proceed (int step)
2085 {
2086 ptid_t wait_ptid;
2087 struct target_waitstatus wait_status;
2088 int schedlock_enabled;
2089
2090 /* With non-stop mode on, threads are always handled individually. */
2091 gdb_assert (! non_stop);
2092
2093 /* Get the last target status returned by target_wait(). */
2094 get_last_target_status (&wait_ptid, &wait_status);
2095
2096 /* Make sure we were stopped at a breakpoint. */
2097 if (wait_status.kind != TARGET_WAITKIND_STOPPED
2098 || (wait_status.value.sig != GDB_SIGNAL_TRAP
2099 && wait_status.value.sig != GDB_SIGNAL_ILL
2100 && wait_status.value.sig != GDB_SIGNAL_SEGV
2101 && wait_status.value.sig != GDB_SIGNAL_EMT))
2102 {
2103 return 0;
2104 }
2105
2106 schedlock_enabled = (scheduler_mode == schedlock_on
2107 || (scheduler_mode == schedlock_step
2108 && step));
2109
2110 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
2111 if (schedlock_enabled)
2112 return 0;
2113
2114 /* Don't switch over if we're about to resume some other process
2115 other than WAIT_PTID's, and schedule-multiple is off. */
2116 if (!sched_multi
2117 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
2118 return 0;
2119
2120 /* Switched over from WAIT_PID. */
2121 if (!ptid_equal (wait_ptid, minus_one_ptid)
2122 && !ptid_equal (inferior_ptid, wait_ptid))
2123 {
2124 struct regcache *regcache = get_thread_regcache (wait_ptid);
2125
2126 if (breakpoint_here_p (get_regcache_aspace (regcache),
2127 regcache_read_pc (regcache)))
2128 {
2129 /* If stepping, remember current thread to switch back to. */
2130 if (step)
2131 deferred_step_ptid = inferior_ptid;
2132
2133 /* Switch back to WAIT_PID thread. */
2134 switch_to_thread (wait_ptid);
2135
2136 if (debug_infrun)
2137 fprintf_unfiltered (gdb_stdlog,
2138 "infrun: prepare_to_proceed (step=%d), "
2139 "switched to [%s]\n",
2140 step, target_pid_to_str (inferior_ptid));
2141
2142 /* We return 1 to indicate that there is a breakpoint here,
2143 so we need to step over it before continuing to avoid
2144 hitting it straight away. */
2145 return 1;
2146 }
2147 }
2148
2149 return 0;
2150 }
2151
2152 /* Basic routine for continuing the program in various fashions.
2153
2154 ADDR is the address to resume at, or -1 for resume where stopped.
2155 SIGGNAL is the signal to give it, or 0 for none,
2156 or -1 for act according to how it stopped.
2157 STEP is nonzero if should trap after one instruction.
2158 -1 means return after that and print nothing.
2159 You should probably set various step_... variables
2160 before calling here, if you are stepping.
2161
2162 You should call clear_proceed_status before calling proceed. */
2163
2164 void
2165 proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
2166 {
2167 struct regcache *regcache;
2168 struct gdbarch *gdbarch;
2169 struct thread_info *tp;
2170 CORE_ADDR pc;
2171 struct address_space *aspace;
2172 /* GDB may force the inferior to step due to various reasons. */
2173 int force_step = 0;
2174
2175 /* If we're stopped at a fork/vfork, follow the branch set by the
2176 "set follow-fork-mode" command; otherwise, we'll just proceed
2177 resuming the current thread. */
2178 if (!follow_fork ())
2179 {
2180 /* The target for some reason decided not to resume. */
2181 normal_stop ();
2182 if (target_can_async_p ())
2183 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2184 return;
2185 }
2186
2187 /* We'll update this if & when we switch to a new thread. */
2188 previous_inferior_ptid = inferior_ptid;
2189
2190 regcache = get_current_regcache ();
2191 gdbarch = get_regcache_arch (regcache);
2192 aspace = get_regcache_aspace (regcache);
2193 pc = regcache_read_pc (regcache);
2194
2195 if (step > 0)
2196 step_start_function = find_pc_function (pc);
2197 if (step < 0)
2198 stop_after_trap = 1;
2199
2200 if (addr == (CORE_ADDR) -1)
2201 {
2202 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2203 && execution_direction != EXEC_REVERSE)
2204 /* There is a breakpoint at the address we will resume at,
2205 step one instruction before inserting breakpoints so that
2206 we do not stop right away (and report a second hit at this
2207 breakpoint).
2208
2209 Note, we don't do this in reverse, because we won't
2210 actually be executing the breakpoint insn anyway.
2211 We'll be (un-)executing the previous instruction. */
2212
2213 force_step = 1;
2214 else if (gdbarch_single_step_through_delay_p (gdbarch)
2215 && gdbarch_single_step_through_delay (gdbarch,
2216 get_current_frame ()))
2217 /* We stepped onto an instruction that needs to be stepped
2218 again before re-inserting the breakpoint, do so. */
2219 force_step = 1;
2220 }
2221 else
2222 {
2223 regcache_write_pc (regcache, addr);
2224 }
2225
2226 if (debug_infrun)
2227 fprintf_unfiltered (gdb_stdlog,
2228 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
2229 paddress (gdbarch, addr), siggnal, step);
2230
2231 if (non_stop)
2232 /* In non-stop, each thread is handled individually. The context
2233 must already be set to the right thread here. */
2234 ;
2235 else
2236 {
2237 /* In a multi-threaded task we may select another thread and
2238 then continue or step.
2239
2240 But if the old thread was stopped at a breakpoint, it will
2241 immediately cause another breakpoint stop without any
2242 execution (i.e. it will report a breakpoint hit incorrectly).
2243 So we must step over it first.
2244
2245 prepare_to_proceed checks the current thread against the
2246 thread that reported the most recent event. If a step-over
2247 is required it returns TRUE and sets the current thread to
2248 the old thread. */
2249 if (prepare_to_proceed (step))
2250 force_step = 1;
2251 }
2252
2253 /* prepare_to_proceed may change the current thread. */
2254 tp = inferior_thread ();
2255
2256 if (force_step)
2257 {
2258 tp->control.trap_expected = 1;
2259 /* If displaced stepping is enabled, we can step over the
2260 breakpoint without hitting it, so leave all breakpoints
2261 inserted. Otherwise we need to disable all breakpoints, step
2262 one instruction, and then re-add them when that step is
2263 finished. */
2264 if (!use_displaced_stepping (gdbarch))
2265 remove_breakpoints ();
2266 }
2267
2268 /* We can insert breakpoints if we're not trying to step over one,
2269 or if we are stepping over one but we're using displaced stepping
2270 to do so. */
2271 if (! tp->control.trap_expected || use_displaced_stepping (gdbarch))
2272 insert_breakpoints ();
2273
2274 if (!non_stop)
2275 {
2276 /* Pass the last stop signal to the thread we're resuming,
2277 irrespective of whether the current thread is the thread that
2278 got the last event or not. This was historically GDB's
2279 behaviour before keeping a stop_signal per thread. */
2280
2281 struct thread_info *last_thread;
2282 ptid_t last_ptid;
2283 struct target_waitstatus last_status;
2284
2285 get_last_target_status (&last_ptid, &last_status);
2286 if (!ptid_equal (inferior_ptid, last_ptid)
2287 && !ptid_equal (last_ptid, null_ptid)
2288 && !ptid_equal (last_ptid, minus_one_ptid))
2289 {
2290 last_thread = find_thread_ptid (last_ptid);
2291 if (last_thread)
2292 {
2293 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2294 last_thread->suspend.stop_signal = GDB_SIGNAL_0;
2295 }
2296 }
2297 }
2298
2299 if (siggnal != GDB_SIGNAL_DEFAULT)
2300 tp->suspend.stop_signal = siggnal;
2301 /* If this signal should not be seen by program,
2302 give it zero. Used for debugging signals. */
2303 else if (!signal_program[tp->suspend.stop_signal])
2304 tp->suspend.stop_signal = GDB_SIGNAL_0;
2305
2306 annotate_starting ();
2307
2308 /* Make sure that output from GDB appears before output from the
2309 inferior. */
2310 gdb_flush (gdb_stdout);
2311
2312 /* Refresh prev_pc value just prior to resuming. This used to be
2313 done in stop_stepping, however, setting prev_pc there did not handle
2314 scenarios such as inferior function calls or returning from
2315 a function via the return command. In those cases, the prev_pc
2316 value was not set properly for subsequent commands. The prev_pc value
2317 is used to initialize the starting line number in the ecs. With an
2318 invalid value, the gdb next command ends up stopping at the position
2319 represented by the next line table entry past our start position.
2320 On platforms that generate one line table entry per line, this
2321 is not a problem. However, on the ia64, the compiler generates
2322 extraneous line table entries that do not increase the line number.
2323 When we issue the gdb next command on the ia64 after an inferior call
2324 or a return command, we often end up a few instructions forward, still
2325 within the original line we started.
2326
2327 An attempt was made to refresh the prev_pc at the same time the
2328 execution_control_state is initialized (for instance, just before
2329 waiting for an inferior event). But this approach did not work
2330 because of platforms that use ptrace, where the pc register cannot
2331 be read unless the inferior is stopped. At that point, we are not
2332 guaranteed the inferior is stopped and so the regcache_read_pc() call
2333 can fail. Setting the prev_pc value here ensures the value is updated
2334 correctly when the inferior is stopped. */
2335 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2336
2337 /* Fill in with reasonable starting values. */
2338 init_thread_stepping_state (tp);
2339
2340 /* Reset to normal state. */
2341 init_infwait_state ();
2342
2343 /* Resume inferior. */
2344 resume (force_step || step || bpstat_should_step (),
2345 tp->suspend.stop_signal);
2346
2347 /* Wait for it to stop (if not standalone)
2348 and in any case decode why it stopped, and act accordingly. */
2349 /* Do this only if we are not using the event loop, or if the target
2350 does not support asynchronous execution. */
2351 if (!target_can_async_p ())
2352 {
2353 wait_for_inferior ();
2354 normal_stop ();
2355 }
2356 }
2357 \f
2358
2359 /* Start remote-debugging of a machine over a serial link. */
2360
2361 void
2362 start_remote (int from_tty)
2363 {
2364 struct inferior *inferior;
2365
2366 inferior = current_inferior ();
2367 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2368
2369 /* Always go on waiting for the target, regardless of the mode. */
2370 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2371 indicate to wait_for_inferior that a target should timeout if
2372 nothing is returned (instead of just blocking). Because of this,
2373 targets expecting an immediate response need to, internally, set
2374 things up so that the target_wait() is forced to eventually
2375 timeout. */
2376 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2377 differentiate to its caller what the state of the target is after
2378 the initial open has been performed. Here we're assuming that
2379 the target has stopped. It should be possible to eventually have
2380 target_open() return to the caller an indication that the target
2381 is currently running and GDB state should be set to the same as
2382 for an async run. */
2383 wait_for_inferior ();
2384
2385 /* Now that the inferior has stopped, do any bookkeeping like
2386 loading shared libraries. We want to do this before normal_stop,
2387 so that the displayed frame is up to date. */
2388 post_create_inferior (&current_target, from_tty);
2389
2390 normal_stop ();
2391 }
2392
2393 /* Initialize static vars when a new inferior begins. */
2394
2395 void
2396 init_wait_for_inferior (void)
2397 {
2398 /* These are meaningless until the first time through wait_for_inferior. */
2399
2400 breakpoint_init_inferior (inf_starting);
2401
2402 clear_proceed_status ();
2403
2404 stepping_past_singlestep_breakpoint = 0;
2405 deferred_step_ptid = null_ptid;
2406
2407 target_last_wait_ptid = minus_one_ptid;
2408
2409 previous_inferior_ptid = inferior_ptid;
2410 init_infwait_state ();
2411
2412 /* Discard any skipped inlined frames. */
2413 clear_inline_frame_state (minus_one_ptid);
2414 }
2415
2416 \f
2417 /* This enum encodes possible reasons for doing a target_wait, so that
2418 wfi can call target_wait in one place. (Ultimately the call will be
2419 moved out of the infinite loop entirely.) */
2420
2421 enum infwait_states
2422 {
2423 infwait_normal_state,
2424 infwait_thread_hop_state,
2425 infwait_step_watch_state,
2426 infwait_nonstep_watch_state
2427 };
2428
2429 /* The PTID we'll do a target_wait on.*/
2430 ptid_t waiton_ptid;
2431
2432 /* Current inferior wait state. */
2433 static enum infwait_states infwait_state;
2434
2435 /* Data to be passed around while handling an event. This data is
2436 discarded between events. */
2437 struct execution_control_state
2438 {
2439 ptid_t ptid;
2440 /* The thread that got the event, if this was a thread event; NULL
2441 otherwise. */
2442 struct thread_info *event_thread;
2443
2444 struct target_waitstatus ws;
2445 int random_signal;
2446 int stop_func_filled_in;
2447 CORE_ADDR stop_func_start;
2448 CORE_ADDR stop_func_end;
2449 const char *stop_func_name;
2450 int wait_some_more;
2451 };
2452
2453 static void handle_inferior_event (struct execution_control_state *ecs);
2454
2455 static void handle_step_into_function (struct gdbarch *gdbarch,
2456 struct execution_control_state *ecs);
2457 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2458 struct execution_control_state *ecs);
2459 static void check_exception_resume (struct execution_control_state *,
2460 struct frame_info *);
2461
2462 static void stop_stepping (struct execution_control_state *ecs);
2463 static void prepare_to_wait (struct execution_control_state *ecs);
2464 static void keep_going (struct execution_control_state *ecs);
2465
2466 /* Callback for iterate over threads. If the thread is stopped, but
2467 the user/frontend doesn't know about that yet, go through
2468 normal_stop, as if the thread had just stopped now. ARG points at
2469 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2470 ptid_is_pid(PTID) is true, applies to all threads of the process
2471 pointed at by PTID. Otherwise, apply only to the thread pointed by
2472 PTID. */
2473
2474 static int
2475 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2476 {
2477 ptid_t ptid = * (ptid_t *) arg;
2478
2479 if ((ptid_equal (info->ptid, ptid)
2480 || ptid_equal (minus_one_ptid, ptid)
2481 || (ptid_is_pid (ptid)
2482 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2483 && is_running (info->ptid)
2484 && !is_executing (info->ptid))
2485 {
2486 struct cleanup *old_chain;
2487 struct execution_control_state ecss;
2488 struct execution_control_state *ecs = &ecss;
2489
2490 memset (ecs, 0, sizeof (*ecs));
2491
2492 old_chain = make_cleanup_restore_current_thread ();
2493
2494 /* Go through handle_inferior_event/normal_stop, so we always
2495 have consistent output as if the stop event had been
2496 reported. */
2497 ecs->ptid = info->ptid;
2498 ecs->event_thread = find_thread_ptid (info->ptid);
2499 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2500 ecs->ws.value.sig = GDB_SIGNAL_0;
2501
2502 handle_inferior_event (ecs);
2503
2504 if (!ecs->wait_some_more)
2505 {
2506 struct thread_info *tp;
2507
2508 normal_stop ();
2509
2510 /* Finish off the continuations. */
2511 tp = inferior_thread ();
2512 do_all_intermediate_continuations_thread (tp, 1);
2513 do_all_continuations_thread (tp, 1);
2514 }
2515
2516 do_cleanups (old_chain);
2517 }
2518
2519 return 0;
2520 }
2521
2522 /* This function is attached as a "thread_stop_requested" observer.
2523 Cleanup local state that assumed the PTID was to be resumed, and
2524 report the stop to the frontend. */
2525
2526 static void
2527 infrun_thread_stop_requested (ptid_t ptid)
2528 {
2529 struct displaced_step_inferior_state *displaced;
2530
2531 /* PTID was requested to stop. Remove it from the displaced
2532 stepping queue, so we don't try to resume it automatically. */
2533
2534 for (displaced = displaced_step_inferior_states;
2535 displaced;
2536 displaced = displaced->next)
2537 {
2538 struct displaced_step_request *it, **prev_next_p;
2539
2540 it = displaced->step_request_queue;
2541 prev_next_p = &displaced->step_request_queue;
2542 while (it)
2543 {
2544 if (ptid_match (it->ptid, ptid))
2545 {
2546 *prev_next_p = it->next;
2547 it->next = NULL;
2548 xfree (it);
2549 }
2550 else
2551 {
2552 prev_next_p = &it->next;
2553 }
2554
2555 it = *prev_next_p;
2556 }
2557 }
2558
2559 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2560 }
2561
2562 static void
2563 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2564 {
2565 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2566 nullify_last_target_wait_ptid ();
2567 }
2568
2569 /* Callback for iterate_over_threads. */
2570
2571 static int
2572 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2573 {
2574 if (is_exited (info->ptid))
2575 return 0;
2576
2577 delete_step_resume_breakpoint (info);
2578 delete_exception_resume_breakpoint (info);
2579 return 0;
2580 }
2581
2582 /* In all-stop, delete the step resume breakpoint of any thread that
2583 had one. In non-stop, delete the step resume breakpoint of the
2584 thread that just stopped. */
2585
2586 static void
2587 delete_step_thread_step_resume_breakpoint (void)
2588 {
2589 if (!target_has_execution
2590 || ptid_equal (inferior_ptid, null_ptid))
2591 /* If the inferior has exited, we have already deleted the step
2592 resume breakpoints out of GDB's lists. */
2593 return;
2594
2595 if (non_stop)
2596 {
2597 /* If in non-stop mode, only delete the step-resume or
2598 longjmp-resume breakpoint of the thread that just stopped
2599 stepping. */
2600 struct thread_info *tp = inferior_thread ();
2601
2602 delete_step_resume_breakpoint (tp);
2603 delete_exception_resume_breakpoint (tp);
2604 }
2605 else
2606 /* In all-stop mode, delete all step-resume and longjmp-resume
2607 breakpoints of any thread that had them. */
2608 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2609 }
2610
2611 /* A cleanup wrapper. */
2612
2613 static void
2614 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2615 {
2616 delete_step_thread_step_resume_breakpoint ();
2617 }
2618
2619 /* Pretty print the results of target_wait, for debugging purposes. */
2620
2621 static void
2622 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2623 const struct target_waitstatus *ws)
2624 {
2625 char *status_string = target_waitstatus_to_string (ws);
2626 struct ui_file *tmp_stream = mem_fileopen ();
2627 char *text;
2628
2629 /* The text is split over several lines because it was getting too long.
2630 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2631 output as a unit; we want only one timestamp printed if debug_timestamp
2632 is set. */
2633
2634 fprintf_unfiltered (tmp_stream,
2635 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2636 if (PIDGET (waiton_ptid) != -1)
2637 fprintf_unfiltered (tmp_stream,
2638 " [%s]", target_pid_to_str (waiton_ptid));
2639 fprintf_unfiltered (tmp_stream, ", status) =\n");
2640 fprintf_unfiltered (tmp_stream,
2641 "infrun: %d [%s],\n",
2642 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2643 fprintf_unfiltered (tmp_stream,
2644 "infrun: %s\n",
2645 status_string);
2646
2647 text = ui_file_xstrdup (tmp_stream, NULL);
2648
2649 /* This uses %s in part to handle %'s in the text, but also to avoid
2650 a gcc error: the format attribute requires a string literal. */
2651 fprintf_unfiltered (gdb_stdlog, "%s", text);
2652
2653 xfree (status_string);
2654 xfree (text);
2655 ui_file_delete (tmp_stream);
2656 }
2657
2658 /* Prepare and stabilize the inferior for detaching it. E.g.,
2659 detaching while a thread is displaced stepping is a recipe for
2660 crashing it, as nothing would readjust the PC out of the scratch
2661 pad. */
2662
2663 void
2664 prepare_for_detach (void)
2665 {
2666 struct inferior *inf = current_inferior ();
2667 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2668 struct cleanup *old_chain_1;
2669 struct displaced_step_inferior_state *displaced;
2670
2671 displaced = get_displaced_stepping_state (inf->pid);
2672
2673 /* Is any thread of this process displaced stepping? If not,
2674 there's nothing else to do. */
2675 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2676 return;
2677
2678 if (debug_infrun)
2679 fprintf_unfiltered (gdb_stdlog,
2680 "displaced-stepping in-process while detaching");
2681
2682 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2683 inf->detaching = 1;
2684
2685 while (!ptid_equal (displaced->step_ptid, null_ptid))
2686 {
2687 struct cleanup *old_chain_2;
2688 struct execution_control_state ecss;
2689 struct execution_control_state *ecs;
2690
2691 ecs = &ecss;
2692 memset (ecs, 0, sizeof (*ecs));
2693
2694 overlay_cache_invalid = 1;
2695
2696 if (deprecated_target_wait_hook)
2697 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2698 else
2699 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2700
2701 if (debug_infrun)
2702 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2703
2704 /* If an error happens while handling the event, propagate GDB's
2705 knowledge of the executing state to the frontend/user running
2706 state. */
2707 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2708 &minus_one_ptid);
2709
2710 /* Now figure out what to do with the result of the result. */
2711 handle_inferior_event (ecs);
2712
2713 /* No error, don't finish the state yet. */
2714 discard_cleanups (old_chain_2);
2715
2716 /* Breakpoints and watchpoints are not installed on the target
2717 at this point, and signals are passed directly to the
2718 inferior, so this must mean the process is gone. */
2719 if (!ecs->wait_some_more)
2720 {
2721 discard_cleanups (old_chain_1);
2722 error (_("Program exited while detaching"));
2723 }
2724 }
2725
2726 discard_cleanups (old_chain_1);
2727 }
2728
2729 /* Wait for control to return from inferior to debugger.
2730
2731 If inferior gets a signal, we may decide to start it up again
2732 instead of returning. That is why there is a loop in this function.
2733 When this function actually returns it means the inferior
2734 should be left stopped and GDB should read more commands. */
2735
2736 void
2737 wait_for_inferior (void)
2738 {
2739 struct cleanup *old_cleanups;
2740
2741 if (debug_infrun)
2742 fprintf_unfiltered
2743 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2744
2745 old_cleanups =
2746 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2747
2748 while (1)
2749 {
2750 struct execution_control_state ecss;
2751 struct execution_control_state *ecs = &ecss;
2752 struct cleanup *old_chain;
2753
2754 memset (ecs, 0, sizeof (*ecs));
2755
2756 overlay_cache_invalid = 1;
2757
2758 if (deprecated_target_wait_hook)
2759 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2760 else
2761 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2762
2763 if (debug_infrun)
2764 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2765
2766 /* If an error happens while handling the event, propagate GDB's
2767 knowledge of the executing state to the frontend/user running
2768 state. */
2769 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2770
2771 /* Now figure out what to do with the result of the result. */
2772 handle_inferior_event (ecs);
2773
2774 /* No error, don't finish the state yet. */
2775 discard_cleanups (old_chain);
2776
2777 if (!ecs->wait_some_more)
2778 break;
2779 }
2780
2781 do_cleanups (old_cleanups);
2782 }
2783
2784 /* Asynchronous version of wait_for_inferior. It is called by the
2785 event loop whenever a change of state is detected on the file
2786 descriptor corresponding to the target. It can be called more than
2787 once to complete a single execution command. In such cases we need
2788 to keep the state in a global variable ECSS. If it is the last time
2789 that this function is called for a single execution command, then
2790 report to the user that the inferior has stopped, and do the
2791 necessary cleanups. */
2792
2793 void
2794 fetch_inferior_event (void *client_data)
2795 {
2796 struct execution_control_state ecss;
2797 struct execution_control_state *ecs = &ecss;
2798 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2799 struct cleanup *ts_old_chain;
2800 int was_sync = sync_execution;
2801 int cmd_done = 0;
2802
2803 memset (ecs, 0, sizeof (*ecs));
2804
2805 /* We're handling a live event, so make sure we're doing live
2806 debugging. If we're looking at traceframes while the target is
2807 running, we're going to need to get back to that mode after
2808 handling the event. */
2809 if (non_stop)
2810 {
2811 make_cleanup_restore_current_traceframe ();
2812 set_current_traceframe (-1);
2813 }
2814
2815 if (non_stop)
2816 /* In non-stop mode, the user/frontend should not notice a thread
2817 switch due to internal events. Make sure we reverse to the
2818 user selected thread and frame after handling the event and
2819 running any breakpoint commands. */
2820 make_cleanup_restore_current_thread ();
2821
2822 overlay_cache_invalid = 1;
2823
2824 make_cleanup_restore_integer (&execution_direction);
2825 execution_direction = target_execution_direction ();
2826
2827 if (deprecated_target_wait_hook)
2828 ecs->ptid =
2829 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2830 else
2831 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2832
2833 if (debug_infrun)
2834 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2835
2836 /* If an error happens while handling the event, propagate GDB's
2837 knowledge of the executing state to the frontend/user running
2838 state. */
2839 if (!non_stop)
2840 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2841 else
2842 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2843
2844 /* Get executed before make_cleanup_restore_current_thread above to apply
2845 still for the thread which has thrown the exception. */
2846 make_bpstat_clear_actions_cleanup ();
2847
2848 /* Now figure out what to do with the result of the result. */
2849 handle_inferior_event (ecs);
2850
2851 if (!ecs->wait_some_more)
2852 {
2853 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2854
2855 delete_step_thread_step_resume_breakpoint ();
2856
2857 /* We may not find an inferior if this was a process exit. */
2858 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2859 normal_stop ();
2860
2861 if (target_has_execution
2862 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2863 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2864 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2865 && ecs->event_thread->step_multi
2866 && ecs->event_thread->control.stop_step)
2867 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2868 else
2869 {
2870 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2871 cmd_done = 1;
2872 }
2873 }
2874
2875 /* No error, don't finish the thread states yet. */
2876 discard_cleanups (ts_old_chain);
2877
2878 /* Revert thread and frame. */
2879 do_cleanups (old_chain);
2880
2881 /* If the inferior was in sync execution mode, and now isn't,
2882 restore the prompt (a synchronous execution command has finished,
2883 and we're ready for input). */
2884 if (interpreter_async && was_sync && !sync_execution)
2885 display_gdb_prompt (0);
2886
2887 if (cmd_done
2888 && !was_sync
2889 && exec_done_display_p
2890 && (ptid_equal (inferior_ptid, null_ptid)
2891 || !is_running (inferior_ptid)))
2892 printf_unfiltered (_("completed.\n"));
2893 }
2894
2895 /* Record the frame and location we're currently stepping through. */
2896 void
2897 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2898 {
2899 struct thread_info *tp = inferior_thread ();
2900
2901 tp->control.step_frame_id = get_frame_id (frame);
2902 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2903
2904 tp->current_symtab = sal.symtab;
2905 tp->current_line = sal.line;
2906 }
2907
2908 /* Clear context switchable stepping state. */
2909
2910 void
2911 init_thread_stepping_state (struct thread_info *tss)
2912 {
2913 tss->stepping_over_breakpoint = 0;
2914 tss->step_after_step_resume_breakpoint = 0;
2915 }
2916
2917 /* Return the cached copy of the last pid/waitstatus returned by
2918 target_wait()/deprecated_target_wait_hook(). The data is actually
2919 cached by handle_inferior_event(), which gets called immediately
2920 after target_wait()/deprecated_target_wait_hook(). */
2921
2922 void
2923 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2924 {
2925 *ptidp = target_last_wait_ptid;
2926 *status = target_last_waitstatus;
2927 }
2928
2929 void
2930 nullify_last_target_wait_ptid (void)
2931 {
2932 target_last_wait_ptid = minus_one_ptid;
2933 }
2934
2935 /* Switch thread contexts. */
2936
2937 static void
2938 context_switch (ptid_t ptid)
2939 {
2940 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
2941 {
2942 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2943 target_pid_to_str (inferior_ptid));
2944 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2945 target_pid_to_str (ptid));
2946 }
2947
2948 switch_to_thread (ptid);
2949 }
2950
2951 static void
2952 adjust_pc_after_break (struct execution_control_state *ecs)
2953 {
2954 struct regcache *regcache;
2955 struct gdbarch *gdbarch;
2956 struct address_space *aspace;
2957 CORE_ADDR breakpoint_pc;
2958
2959 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2960 we aren't, just return.
2961
2962 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2963 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2964 implemented by software breakpoints should be handled through the normal
2965 breakpoint layer.
2966
2967 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2968 different signals (SIGILL or SIGEMT for instance), but it is less
2969 clear where the PC is pointing afterwards. It may not match
2970 gdbarch_decr_pc_after_break. I don't know any specific target that
2971 generates these signals at breakpoints (the code has been in GDB since at
2972 least 1992) so I can not guess how to handle them here.
2973
2974 In earlier versions of GDB, a target with
2975 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2976 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2977 target with both of these set in GDB history, and it seems unlikely to be
2978 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2979
2980 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2981 return;
2982
2983 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
2984 return;
2985
2986 /* In reverse execution, when a breakpoint is hit, the instruction
2987 under it has already been de-executed. The reported PC always
2988 points at the breakpoint address, so adjusting it further would
2989 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2990 architecture:
2991
2992 B1 0x08000000 : INSN1
2993 B2 0x08000001 : INSN2
2994 0x08000002 : INSN3
2995 PC -> 0x08000003 : INSN4
2996
2997 Say you're stopped at 0x08000003 as above. Reverse continuing
2998 from that point should hit B2 as below. Reading the PC when the
2999 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3000 been de-executed already.
3001
3002 B1 0x08000000 : INSN1
3003 B2 PC -> 0x08000001 : INSN2
3004 0x08000002 : INSN3
3005 0x08000003 : INSN4
3006
3007 We can't apply the same logic as for forward execution, because
3008 we would wrongly adjust the PC to 0x08000000, since there's a
3009 breakpoint at PC - 1. We'd then report a hit on B1, although
3010 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3011 behaviour. */
3012 if (execution_direction == EXEC_REVERSE)
3013 return;
3014
3015 /* If this target does not decrement the PC after breakpoints, then
3016 we have nothing to do. */
3017 regcache = get_thread_regcache (ecs->ptid);
3018 gdbarch = get_regcache_arch (regcache);
3019 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
3020 return;
3021
3022 aspace = get_regcache_aspace (regcache);
3023
3024 /* Find the location where (if we've hit a breakpoint) the
3025 breakpoint would be. */
3026 breakpoint_pc = regcache_read_pc (regcache)
3027 - gdbarch_decr_pc_after_break (gdbarch);
3028
3029 /* Check whether there actually is a software breakpoint inserted at
3030 that location.
3031
3032 If in non-stop mode, a race condition is possible where we've
3033 removed a breakpoint, but stop events for that breakpoint were
3034 already queued and arrive later. To suppress those spurious
3035 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3036 and retire them after a number of stop events are reported. */
3037 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3038 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3039 {
3040 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3041
3042 if (RECORD_IS_USED)
3043 record_full_gdb_operation_disable_set ();
3044
3045 /* When using hardware single-step, a SIGTRAP is reported for both
3046 a completed single-step and a software breakpoint. Need to
3047 differentiate between the two, as the latter needs adjusting
3048 but the former does not.
3049
3050 The SIGTRAP can be due to a completed hardware single-step only if
3051 - we didn't insert software single-step breakpoints
3052 - the thread to be examined is still the current thread
3053 - this thread is currently being stepped
3054
3055 If any of these events did not occur, we must have stopped due
3056 to hitting a software breakpoint, and have to back up to the
3057 breakpoint address.
3058
3059 As a special case, we could have hardware single-stepped a
3060 software breakpoint. In this case (prev_pc == breakpoint_pc),
3061 we also need to back up to the breakpoint address. */
3062
3063 if (singlestep_breakpoints_inserted_p
3064 || !ptid_equal (ecs->ptid, inferior_ptid)
3065 || !currently_stepping (ecs->event_thread)
3066 || ecs->event_thread->prev_pc == breakpoint_pc)
3067 regcache_write_pc (regcache, breakpoint_pc);
3068
3069 do_cleanups (old_cleanups);
3070 }
3071 }
3072
3073 static void
3074 init_infwait_state (void)
3075 {
3076 waiton_ptid = pid_to_ptid (-1);
3077 infwait_state = infwait_normal_state;
3078 }
3079
3080 static int
3081 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3082 {
3083 for (frame = get_prev_frame (frame);
3084 frame != NULL;
3085 frame = get_prev_frame (frame))
3086 {
3087 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3088 return 1;
3089 if (get_frame_type (frame) != INLINE_FRAME)
3090 break;
3091 }
3092
3093 return 0;
3094 }
3095
3096 /* Auxiliary function that handles syscall entry/return events.
3097 It returns 1 if the inferior should keep going (and GDB
3098 should ignore the event), or 0 if the event deserves to be
3099 processed. */
3100
3101 static int
3102 handle_syscall_event (struct execution_control_state *ecs)
3103 {
3104 struct regcache *regcache;
3105 int syscall_number;
3106
3107 if (!ptid_equal (ecs->ptid, inferior_ptid))
3108 context_switch (ecs->ptid);
3109
3110 regcache = get_thread_regcache (ecs->ptid);
3111 syscall_number = ecs->ws.value.syscall_number;
3112 stop_pc = regcache_read_pc (regcache);
3113
3114 if (catch_syscall_enabled () > 0
3115 && catching_syscall_number (syscall_number) > 0)
3116 {
3117 enum bpstat_signal_value sval;
3118
3119 if (debug_infrun)
3120 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3121 syscall_number);
3122
3123 ecs->event_thread->control.stop_bpstat
3124 = bpstat_stop_status (get_regcache_aspace (regcache),
3125 stop_pc, ecs->ptid, &ecs->ws);
3126
3127 sval = bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3128 ecs->random_signal = sval == BPSTAT_SIGNAL_NO;
3129
3130 if (!ecs->random_signal)
3131 {
3132 /* Catchpoint hit. */
3133 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_TRAP;
3134 return 0;
3135 }
3136 }
3137
3138 /* If no catchpoint triggered for this, then keep going. */
3139 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3140 keep_going (ecs);
3141 return 1;
3142 }
3143
3144 /* Clear the supplied execution_control_state's stop_func_* fields. */
3145
3146 static void
3147 clear_stop_func (struct execution_control_state *ecs)
3148 {
3149 ecs->stop_func_filled_in = 0;
3150 ecs->stop_func_start = 0;
3151 ecs->stop_func_end = 0;
3152 ecs->stop_func_name = NULL;
3153 }
3154
3155 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3156
3157 static void
3158 fill_in_stop_func (struct gdbarch *gdbarch,
3159 struct execution_control_state *ecs)
3160 {
3161 if (!ecs->stop_func_filled_in)
3162 {
3163 /* Don't care about return value; stop_func_start and stop_func_name
3164 will both be 0 if it doesn't work. */
3165 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3166 &ecs->stop_func_start, &ecs->stop_func_end);
3167 ecs->stop_func_start
3168 += gdbarch_deprecated_function_start_offset (gdbarch);
3169
3170 ecs->stop_func_filled_in = 1;
3171 }
3172 }
3173
3174 /* Given an execution control state that has been freshly filled in
3175 by an event from the inferior, figure out what it means and take
3176 appropriate action. */
3177
3178 static void
3179 handle_inferior_event (struct execution_control_state *ecs)
3180 {
3181 struct frame_info *frame;
3182 struct gdbarch *gdbarch;
3183 int stopped_by_watchpoint;
3184 int stepped_after_stopped_by_watchpoint = 0;
3185 struct symtab_and_line stop_pc_sal;
3186 enum stop_kind stop_soon;
3187
3188 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3189 {
3190 /* We had an event in the inferior, but we are not interested in
3191 handling it at this level. The lower layers have already
3192 done what needs to be done, if anything.
3193
3194 One of the possible circumstances for this is when the
3195 inferior produces output for the console. The inferior has
3196 not stopped, and we are ignoring the event. Another possible
3197 circumstance is any event which the lower level knows will be
3198 reported multiple times without an intervening resume. */
3199 if (debug_infrun)
3200 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3201 prepare_to_wait (ecs);
3202 return;
3203 }
3204
3205 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3206 && target_can_async_p () && !sync_execution)
3207 {
3208 /* There were no unwaited-for children left in the target, but,
3209 we're not synchronously waiting for events either. Just
3210 ignore. Otherwise, if we were running a synchronous
3211 execution command, we need to cancel it and give the user
3212 back the terminal. */
3213 if (debug_infrun)
3214 fprintf_unfiltered (gdb_stdlog,
3215 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3216 prepare_to_wait (ecs);
3217 return;
3218 }
3219
3220 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3221 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3222 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
3223 {
3224 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3225
3226 gdb_assert (inf);
3227 stop_soon = inf->control.stop_soon;
3228 }
3229 else
3230 stop_soon = NO_STOP_QUIETLY;
3231
3232 /* Cache the last pid/waitstatus. */
3233 target_last_wait_ptid = ecs->ptid;
3234 target_last_waitstatus = ecs->ws;
3235
3236 /* Always clear state belonging to the previous time we stopped. */
3237 stop_stack_dummy = STOP_NONE;
3238
3239 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3240 {
3241 /* No unwaited-for children left. IOW, all resumed children
3242 have exited. */
3243 if (debug_infrun)
3244 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3245
3246 stop_print_frame = 0;
3247 stop_stepping (ecs);
3248 return;
3249 }
3250
3251 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3252 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3253 {
3254 ecs->event_thread = find_thread_ptid (ecs->ptid);
3255 /* If it's a new thread, add it to the thread database. */
3256 if (ecs->event_thread == NULL)
3257 ecs->event_thread = add_thread (ecs->ptid);
3258
3259 /* Disable range stepping. If the next step request could use a
3260 range, this will be end up re-enabled then. */
3261 ecs->event_thread->control.may_range_step = 0;
3262 }
3263
3264 /* Dependent on valid ECS->EVENT_THREAD. */
3265 adjust_pc_after_break (ecs);
3266
3267 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3268 reinit_frame_cache ();
3269
3270 breakpoint_retire_moribund ();
3271
3272 /* First, distinguish signals caused by the debugger from signals
3273 that have to do with the program's own actions. Note that
3274 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3275 on the operating system version. Here we detect when a SIGILL or
3276 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3277 something similar for SIGSEGV, since a SIGSEGV will be generated
3278 when we're trying to execute a breakpoint instruction on a
3279 non-executable stack. This happens for call dummy breakpoints
3280 for architectures like SPARC that place call dummies on the
3281 stack. */
3282 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3283 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3284 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3285 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3286 {
3287 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3288
3289 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3290 regcache_read_pc (regcache)))
3291 {
3292 if (debug_infrun)
3293 fprintf_unfiltered (gdb_stdlog,
3294 "infrun: Treating signal as SIGTRAP\n");
3295 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3296 }
3297 }
3298
3299 /* Mark the non-executing threads accordingly. In all-stop, all
3300 threads of all processes are stopped when we get any event
3301 reported. In non-stop mode, only the event thread stops. If
3302 we're handling a process exit in non-stop mode, there's nothing
3303 to do, as threads of the dead process are gone, and threads of
3304 any other process were left running. */
3305 if (!non_stop)
3306 set_executing (minus_one_ptid, 0);
3307 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3308 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3309 set_executing (ecs->ptid, 0);
3310
3311 switch (infwait_state)
3312 {
3313 case infwait_thread_hop_state:
3314 if (debug_infrun)
3315 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3316 break;
3317
3318 case infwait_normal_state:
3319 if (debug_infrun)
3320 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3321 break;
3322
3323 case infwait_step_watch_state:
3324 if (debug_infrun)
3325 fprintf_unfiltered (gdb_stdlog,
3326 "infrun: infwait_step_watch_state\n");
3327
3328 stepped_after_stopped_by_watchpoint = 1;
3329 break;
3330
3331 case infwait_nonstep_watch_state:
3332 if (debug_infrun)
3333 fprintf_unfiltered (gdb_stdlog,
3334 "infrun: infwait_nonstep_watch_state\n");
3335 insert_breakpoints ();
3336
3337 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3338 handle things like signals arriving and other things happening
3339 in combination correctly? */
3340 stepped_after_stopped_by_watchpoint = 1;
3341 break;
3342
3343 default:
3344 internal_error (__FILE__, __LINE__, _("bad switch"));
3345 }
3346
3347 infwait_state = infwait_normal_state;
3348 waiton_ptid = pid_to_ptid (-1);
3349
3350 switch (ecs->ws.kind)
3351 {
3352 case TARGET_WAITKIND_LOADED:
3353 if (debug_infrun)
3354 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3355 /* Ignore gracefully during startup of the inferior, as it might
3356 be the shell which has just loaded some objects, otherwise
3357 add the symbols for the newly loaded objects. Also ignore at
3358 the beginning of an attach or remote session; we will query
3359 the full list of libraries once the connection is
3360 established. */
3361 if (stop_soon == NO_STOP_QUIETLY)
3362 {
3363 struct regcache *regcache;
3364 enum bpstat_signal_value sval;
3365
3366 if (!ptid_equal (ecs->ptid, inferior_ptid))
3367 context_switch (ecs->ptid);
3368 regcache = get_thread_regcache (ecs->ptid);
3369
3370 handle_solib_event ();
3371
3372 ecs->event_thread->control.stop_bpstat
3373 = bpstat_stop_status (get_regcache_aspace (regcache),
3374 stop_pc, ecs->ptid, &ecs->ws);
3375
3376 sval
3377 = bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3378 ecs->random_signal = sval == BPSTAT_SIGNAL_NO;
3379
3380 if (!ecs->random_signal)
3381 {
3382 /* A catchpoint triggered. */
3383 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_TRAP;
3384 goto process_event_stop_test;
3385 }
3386
3387 /* If requested, stop when the dynamic linker notifies
3388 gdb of events. This allows the user to get control
3389 and place breakpoints in initializer routines for
3390 dynamically loaded objects (among other things). */
3391 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3392 if (stop_on_solib_events)
3393 {
3394 /* Make sure we print "Stopped due to solib-event" in
3395 normal_stop. */
3396 stop_print_frame = 1;
3397
3398 stop_stepping (ecs);
3399 return;
3400 }
3401 }
3402
3403 /* If we are skipping through a shell, or through shared library
3404 loading that we aren't interested in, resume the program. If
3405 we're running the program normally, also resume. But stop if
3406 we're attaching or setting up a remote connection. */
3407 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3408 {
3409 if (!ptid_equal (ecs->ptid, inferior_ptid))
3410 context_switch (ecs->ptid);
3411
3412 /* Loading of shared libraries might have changed breakpoint
3413 addresses. Make sure new breakpoints are inserted. */
3414 if (stop_soon == NO_STOP_QUIETLY
3415 && !breakpoints_always_inserted_mode ())
3416 insert_breakpoints ();
3417 resume (0, GDB_SIGNAL_0);
3418 prepare_to_wait (ecs);
3419 return;
3420 }
3421
3422 break;
3423
3424 case TARGET_WAITKIND_SPURIOUS:
3425 if (debug_infrun)
3426 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3427 if (!ptid_equal (ecs->ptid, inferior_ptid))
3428 context_switch (ecs->ptid);
3429 resume (0, GDB_SIGNAL_0);
3430 prepare_to_wait (ecs);
3431 return;
3432
3433 case TARGET_WAITKIND_EXITED:
3434 case TARGET_WAITKIND_SIGNALLED:
3435 if (debug_infrun)
3436 {
3437 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3438 fprintf_unfiltered (gdb_stdlog,
3439 "infrun: TARGET_WAITKIND_EXITED\n");
3440 else
3441 fprintf_unfiltered (gdb_stdlog,
3442 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3443 }
3444
3445 inferior_ptid = ecs->ptid;
3446 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3447 set_current_program_space (current_inferior ()->pspace);
3448 handle_vfork_child_exec_or_exit (0);
3449 target_terminal_ours (); /* Must do this before mourn anyway. */
3450
3451 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3452 {
3453 /* Record the exit code in the convenience variable $_exitcode, so
3454 that the user can inspect this again later. */
3455 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3456 (LONGEST) ecs->ws.value.integer);
3457
3458 /* Also record this in the inferior itself. */
3459 current_inferior ()->has_exit_code = 1;
3460 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3461
3462 print_exited_reason (ecs->ws.value.integer);
3463 }
3464 else
3465 print_signal_exited_reason (ecs->ws.value.sig);
3466
3467 gdb_flush (gdb_stdout);
3468 target_mourn_inferior ();
3469 singlestep_breakpoints_inserted_p = 0;
3470 cancel_single_step_breakpoints ();
3471 stop_print_frame = 0;
3472 stop_stepping (ecs);
3473 return;
3474
3475 /* The following are the only cases in which we keep going;
3476 the above cases end in a continue or goto. */
3477 case TARGET_WAITKIND_FORKED:
3478 case TARGET_WAITKIND_VFORKED:
3479 if (debug_infrun)
3480 {
3481 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3482 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3483 else
3484 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3485 }
3486
3487 /* Check whether the inferior is displaced stepping. */
3488 {
3489 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3490 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3491 struct displaced_step_inferior_state *displaced
3492 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3493
3494 /* If checking displaced stepping is supported, and thread
3495 ecs->ptid is displaced stepping. */
3496 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3497 {
3498 struct inferior *parent_inf
3499 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3500 struct regcache *child_regcache;
3501 CORE_ADDR parent_pc;
3502
3503 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3504 indicating that the displaced stepping of syscall instruction
3505 has been done. Perform cleanup for parent process here. Note
3506 that this operation also cleans up the child process for vfork,
3507 because their pages are shared. */
3508 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3509
3510 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3511 {
3512 /* Restore scratch pad for child process. */
3513 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3514 }
3515
3516 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3517 the child's PC is also within the scratchpad. Set the child's PC
3518 to the parent's PC value, which has already been fixed up.
3519 FIXME: we use the parent's aspace here, although we're touching
3520 the child, because the child hasn't been added to the inferior
3521 list yet at this point. */
3522
3523 child_regcache
3524 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3525 gdbarch,
3526 parent_inf->aspace);
3527 /* Read PC value of parent process. */
3528 parent_pc = regcache_read_pc (regcache);
3529
3530 if (debug_displaced)
3531 fprintf_unfiltered (gdb_stdlog,
3532 "displaced: write child pc from %s to %s\n",
3533 paddress (gdbarch,
3534 regcache_read_pc (child_regcache)),
3535 paddress (gdbarch, parent_pc));
3536
3537 regcache_write_pc (child_regcache, parent_pc);
3538 }
3539 }
3540
3541 if (!ptid_equal (ecs->ptid, inferior_ptid))
3542 context_switch (ecs->ptid);
3543
3544 /* Immediately detach breakpoints from the child before there's
3545 any chance of letting the user delete breakpoints from the
3546 breakpoint lists. If we don't do this early, it's easy to
3547 leave left over traps in the child, vis: "break foo; catch
3548 fork; c; <fork>; del; c; <child calls foo>". We only follow
3549 the fork on the last `continue', and by that time the
3550 breakpoint at "foo" is long gone from the breakpoint table.
3551 If we vforked, then we don't need to unpatch here, since both
3552 parent and child are sharing the same memory pages; we'll
3553 need to unpatch at follow/detach time instead to be certain
3554 that new breakpoints added between catchpoint hit time and
3555 vfork follow are detached. */
3556 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3557 {
3558 /* This won't actually modify the breakpoint list, but will
3559 physically remove the breakpoints from the child. */
3560 detach_breakpoints (ecs->ws.value.related_pid);
3561 }
3562
3563 if (singlestep_breakpoints_inserted_p)
3564 {
3565 /* Pull the single step breakpoints out of the target. */
3566 remove_single_step_breakpoints ();
3567 singlestep_breakpoints_inserted_p = 0;
3568 }
3569
3570 /* In case the event is caught by a catchpoint, remember that
3571 the event is to be followed at the next resume of the thread,
3572 and not immediately. */
3573 ecs->event_thread->pending_follow = ecs->ws;
3574
3575 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3576
3577 ecs->event_thread->control.stop_bpstat
3578 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3579 stop_pc, ecs->ptid, &ecs->ws);
3580
3581 /* Note that we're interested in knowing the bpstat actually
3582 causes a stop, not just if it may explain the signal.
3583 Software watchpoints, for example, always appear in the
3584 bpstat. */
3585 ecs->random_signal
3586 = !bpstat_causes_stop (ecs->event_thread->control.stop_bpstat);
3587
3588 /* If no catchpoint triggered for this, then keep going. */
3589 if (ecs->random_signal)
3590 {
3591 ptid_t parent;
3592 ptid_t child;
3593 int should_resume;
3594 int follow_child
3595 = (follow_fork_mode_string == follow_fork_mode_child);
3596
3597 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3598
3599 should_resume = follow_fork ();
3600
3601 parent = ecs->ptid;
3602 child = ecs->ws.value.related_pid;
3603
3604 /* In non-stop mode, also resume the other branch. */
3605 if (non_stop && !detach_fork)
3606 {
3607 if (follow_child)
3608 switch_to_thread (parent);
3609 else
3610 switch_to_thread (child);
3611
3612 ecs->event_thread = inferior_thread ();
3613 ecs->ptid = inferior_ptid;
3614 keep_going (ecs);
3615 }
3616
3617 if (follow_child)
3618 switch_to_thread (child);
3619 else
3620 switch_to_thread (parent);
3621
3622 ecs->event_thread = inferior_thread ();
3623 ecs->ptid = inferior_ptid;
3624
3625 if (should_resume)
3626 keep_going (ecs);
3627 else
3628 stop_stepping (ecs);
3629 return;
3630 }
3631 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_TRAP;
3632 goto process_event_stop_test;
3633
3634 case TARGET_WAITKIND_VFORK_DONE:
3635 /* Done with the shared memory region. Re-insert breakpoints in
3636 the parent, and keep going. */
3637
3638 if (debug_infrun)
3639 fprintf_unfiltered (gdb_stdlog,
3640 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3641
3642 if (!ptid_equal (ecs->ptid, inferior_ptid))
3643 context_switch (ecs->ptid);
3644
3645 current_inferior ()->waiting_for_vfork_done = 0;
3646 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3647 /* This also takes care of reinserting breakpoints in the
3648 previously locked inferior. */
3649 keep_going (ecs);
3650 return;
3651
3652 case TARGET_WAITKIND_EXECD:
3653 if (debug_infrun)
3654 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3655
3656 if (!ptid_equal (ecs->ptid, inferior_ptid))
3657 context_switch (ecs->ptid);
3658
3659 singlestep_breakpoints_inserted_p = 0;
3660 cancel_single_step_breakpoints ();
3661
3662 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3663
3664 /* Do whatever is necessary to the parent branch of the vfork. */
3665 handle_vfork_child_exec_or_exit (1);
3666
3667 /* This causes the eventpoints and symbol table to be reset.
3668 Must do this now, before trying to determine whether to
3669 stop. */
3670 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3671
3672 ecs->event_thread->control.stop_bpstat
3673 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3674 stop_pc, ecs->ptid, &ecs->ws);
3675 ecs->random_signal
3676 = (bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
3677 == BPSTAT_SIGNAL_NO);
3678
3679 /* Note that this may be referenced from inside
3680 bpstat_stop_status above, through inferior_has_execd. */
3681 xfree (ecs->ws.value.execd_pathname);
3682 ecs->ws.value.execd_pathname = NULL;
3683
3684 /* If no catchpoint triggered for this, then keep going. */
3685 if (ecs->random_signal)
3686 {
3687 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3688 keep_going (ecs);
3689 return;
3690 }
3691 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_TRAP;
3692 goto process_event_stop_test;
3693
3694 /* Be careful not to try to gather much state about a thread
3695 that's in a syscall. It's frequently a losing proposition. */
3696 case TARGET_WAITKIND_SYSCALL_ENTRY:
3697 if (debug_infrun)
3698 fprintf_unfiltered (gdb_stdlog,
3699 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3700 /* Getting the current syscall number. */
3701 if (handle_syscall_event (ecs) != 0)
3702 return;
3703 goto process_event_stop_test;
3704
3705 /* Before examining the threads further, step this thread to
3706 get it entirely out of the syscall. (We get notice of the
3707 event when the thread is just on the verge of exiting a
3708 syscall. Stepping one instruction seems to get it back
3709 into user code.) */
3710 case TARGET_WAITKIND_SYSCALL_RETURN:
3711 if (debug_infrun)
3712 fprintf_unfiltered (gdb_stdlog,
3713 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3714 if (handle_syscall_event (ecs) != 0)
3715 return;
3716 goto process_event_stop_test;
3717
3718 case TARGET_WAITKIND_STOPPED:
3719 if (debug_infrun)
3720 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3721 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3722 break;
3723
3724 case TARGET_WAITKIND_NO_HISTORY:
3725 if (debug_infrun)
3726 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
3727 /* Reverse execution: target ran out of history info. */
3728
3729 /* Pull the single step breakpoints out of the target. */
3730 if (singlestep_breakpoints_inserted_p)
3731 {
3732 if (!ptid_equal (ecs->ptid, inferior_ptid))
3733 context_switch (ecs->ptid);
3734 remove_single_step_breakpoints ();
3735 singlestep_breakpoints_inserted_p = 0;
3736 }
3737 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3738 print_no_history_reason ();
3739 stop_stepping (ecs);
3740 return;
3741 }
3742
3743 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3744 {
3745 /* Do we need to clean up the state of a thread that has
3746 completed a displaced single-step? (Doing so usually affects
3747 the PC, so do it here, before we set stop_pc.) */
3748 displaced_step_fixup (ecs->ptid,
3749 ecs->event_thread->suspend.stop_signal);
3750
3751 /* If we either finished a single-step or hit a breakpoint, but
3752 the user wanted this thread to be stopped, pretend we got a
3753 SIG0 (generic unsignaled stop). */
3754
3755 if (ecs->event_thread->stop_requested
3756 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3757 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3758 }
3759
3760 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3761
3762 if (debug_infrun)
3763 {
3764 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3765 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3766 struct cleanup *old_chain = save_inferior_ptid ();
3767
3768 inferior_ptid = ecs->ptid;
3769
3770 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3771 paddress (gdbarch, stop_pc));
3772 if (target_stopped_by_watchpoint ())
3773 {
3774 CORE_ADDR addr;
3775
3776 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3777
3778 if (target_stopped_data_address (&current_target, &addr))
3779 fprintf_unfiltered (gdb_stdlog,
3780 "infrun: stopped data address = %s\n",
3781 paddress (gdbarch, addr));
3782 else
3783 fprintf_unfiltered (gdb_stdlog,
3784 "infrun: (no data address available)\n");
3785 }
3786
3787 do_cleanups (old_chain);
3788 }
3789
3790 if (stepping_past_singlestep_breakpoint)
3791 {
3792 gdb_assert (singlestep_breakpoints_inserted_p);
3793 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3794 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3795
3796 stepping_past_singlestep_breakpoint = 0;
3797
3798 /* We've either finished single-stepping past the single-step
3799 breakpoint, or stopped for some other reason. It would be nice if
3800 we could tell, but we can't reliably. */
3801 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3802 {
3803 if (debug_infrun)
3804 fprintf_unfiltered (gdb_stdlog,
3805 "infrun: stepping_past_"
3806 "singlestep_breakpoint\n");
3807 /* Pull the single step breakpoints out of the target. */
3808 if (!ptid_equal (ecs->ptid, inferior_ptid))
3809 context_switch (ecs->ptid);
3810 remove_single_step_breakpoints ();
3811 singlestep_breakpoints_inserted_p = 0;
3812
3813 ecs->random_signal = 0;
3814 ecs->event_thread->control.trap_expected = 0;
3815
3816 context_switch (saved_singlestep_ptid);
3817 if (deprecated_context_hook)
3818 deprecated_context_hook (pid_to_thread_id (saved_singlestep_ptid));
3819
3820 resume (1, GDB_SIGNAL_0);
3821 prepare_to_wait (ecs);
3822 return;
3823 }
3824 }
3825
3826 if (!ptid_equal (deferred_step_ptid, null_ptid))
3827 {
3828 /* In non-stop mode, there's never a deferred_step_ptid set. */
3829 gdb_assert (!non_stop);
3830
3831 /* If we stopped for some other reason than single-stepping, ignore
3832 the fact that we were supposed to switch back. */
3833 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3834 {
3835 if (debug_infrun)
3836 fprintf_unfiltered (gdb_stdlog,
3837 "infrun: handling deferred step\n");
3838
3839 /* Pull the single step breakpoints out of the target. */
3840 if (singlestep_breakpoints_inserted_p)
3841 {
3842 if (!ptid_equal (ecs->ptid, inferior_ptid))
3843 context_switch (ecs->ptid);
3844 remove_single_step_breakpoints ();
3845 singlestep_breakpoints_inserted_p = 0;
3846 }
3847
3848 ecs->event_thread->control.trap_expected = 0;
3849
3850 context_switch (deferred_step_ptid);
3851 deferred_step_ptid = null_ptid;
3852 /* Suppress spurious "Switching to ..." message. */
3853 previous_inferior_ptid = inferior_ptid;
3854
3855 resume (1, GDB_SIGNAL_0);
3856 prepare_to_wait (ecs);
3857 return;
3858 }
3859
3860 deferred_step_ptid = null_ptid;
3861 }
3862
3863 /* See if a thread hit a thread-specific breakpoint that was meant for
3864 another thread. If so, then step that thread past the breakpoint,
3865 and continue it. */
3866
3867 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3868 {
3869 int thread_hop_needed = 0;
3870 struct address_space *aspace =
3871 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3872
3873 /* Check if a regular breakpoint has been hit before checking
3874 for a potential single step breakpoint. Otherwise, GDB will
3875 not see this breakpoint hit when stepping onto breakpoints. */
3876 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3877 {
3878 ecs->random_signal = 0;
3879 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3880 thread_hop_needed = 1;
3881 }
3882 else if (singlestep_breakpoints_inserted_p)
3883 {
3884 /* We have not context switched yet, so this should be true
3885 no matter which thread hit the singlestep breakpoint. */
3886 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3887 if (debug_infrun)
3888 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3889 "trap for %s\n",
3890 target_pid_to_str (ecs->ptid));
3891
3892 ecs->random_signal = 0;
3893 /* The call to in_thread_list is necessary because PTIDs sometimes
3894 change when we go from single-threaded to multi-threaded. If
3895 the singlestep_ptid is still in the list, assume that it is
3896 really different from ecs->ptid. */
3897 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3898 && in_thread_list (singlestep_ptid))
3899 {
3900 /* If the PC of the thread we were trying to single-step
3901 has changed, discard this event (which we were going
3902 to ignore anyway), and pretend we saw that thread
3903 trap. This prevents us continuously moving the
3904 single-step breakpoint forward, one instruction at a
3905 time. If the PC has changed, then the thread we were
3906 trying to single-step has trapped or been signalled,
3907 but the event has not been reported to GDB yet.
3908
3909 There might be some cases where this loses signal
3910 information, if a signal has arrived at exactly the
3911 same time that the PC changed, but this is the best
3912 we can do with the information available. Perhaps we
3913 should arrange to report all events for all threads
3914 when they stop, or to re-poll the remote looking for
3915 this particular thread (i.e. temporarily enable
3916 schedlock). */
3917
3918 CORE_ADDR new_singlestep_pc
3919 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3920
3921 if (new_singlestep_pc != singlestep_pc)
3922 {
3923 enum gdb_signal stop_signal;
3924
3925 if (debug_infrun)
3926 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3927 " but expected thread advanced also\n");
3928
3929 /* The current context still belongs to
3930 singlestep_ptid. Don't swap here, since that's
3931 the context we want to use. Just fudge our
3932 state and continue. */
3933 stop_signal = ecs->event_thread->suspend.stop_signal;
3934 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3935 ecs->ptid = singlestep_ptid;
3936 ecs->event_thread = find_thread_ptid (ecs->ptid);
3937 ecs->event_thread->suspend.stop_signal = stop_signal;
3938 stop_pc = new_singlestep_pc;
3939 }
3940 else
3941 {
3942 if (debug_infrun)
3943 fprintf_unfiltered (gdb_stdlog,
3944 "infrun: unexpected thread\n");
3945
3946 thread_hop_needed = 1;
3947 stepping_past_singlestep_breakpoint = 1;
3948 saved_singlestep_ptid = singlestep_ptid;
3949 }
3950 }
3951 }
3952
3953 if (thread_hop_needed)
3954 {
3955 struct regcache *thread_regcache;
3956 int remove_status = 0;
3957
3958 if (debug_infrun)
3959 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3960
3961 /* Switch context before touching inferior memory, the
3962 previous thread may have exited. */
3963 if (!ptid_equal (inferior_ptid, ecs->ptid))
3964 context_switch (ecs->ptid);
3965
3966 /* Saw a breakpoint, but it was hit by the wrong thread.
3967 Just continue. */
3968
3969 if (singlestep_breakpoints_inserted_p)
3970 {
3971 /* Pull the single step breakpoints out of the target. */
3972 remove_single_step_breakpoints ();
3973 singlestep_breakpoints_inserted_p = 0;
3974 }
3975
3976 /* If the arch can displace step, don't remove the
3977 breakpoints. */
3978 thread_regcache = get_thread_regcache (ecs->ptid);
3979 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3980 remove_status = remove_breakpoints ();
3981
3982 /* Did we fail to remove breakpoints? If so, try
3983 to set the PC past the bp. (There's at least
3984 one situation in which we can fail to remove
3985 the bp's: On HP-UX's that use ttrace, we can't
3986 change the address space of a vforking child
3987 process until the child exits (well, okay, not
3988 then either :-) or execs. */
3989 if (remove_status != 0)
3990 error (_("Cannot step over breakpoint hit in wrong thread"));
3991 else
3992 { /* Single step */
3993 if (!non_stop)
3994 {
3995 /* Only need to require the next event from this
3996 thread in all-stop mode. */
3997 waiton_ptid = ecs->ptid;
3998 infwait_state = infwait_thread_hop_state;
3999 }
4000
4001 ecs->event_thread->stepping_over_breakpoint = 1;
4002 keep_going (ecs);
4003 return;
4004 }
4005 }
4006 else if (singlestep_breakpoints_inserted_p)
4007 {
4008 ecs->random_signal = 0;
4009 }
4010 }
4011 else
4012 ecs->random_signal = 1;
4013
4014 /* See if something interesting happened to the non-current thread. If
4015 so, then switch to that thread. */
4016 if (!ptid_equal (ecs->ptid, inferior_ptid))
4017 {
4018 if (debug_infrun)
4019 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
4020
4021 context_switch (ecs->ptid);
4022
4023 if (deprecated_context_hook)
4024 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
4025 }
4026
4027 /* At this point, get hold of the now-current thread's frame. */
4028 frame = get_current_frame ();
4029 gdbarch = get_frame_arch (frame);
4030
4031 if (singlestep_breakpoints_inserted_p)
4032 {
4033 /* Pull the single step breakpoints out of the target. */
4034 remove_single_step_breakpoints ();
4035 singlestep_breakpoints_inserted_p = 0;
4036 }
4037
4038 if (stepped_after_stopped_by_watchpoint)
4039 stopped_by_watchpoint = 0;
4040 else
4041 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4042
4043 /* If necessary, step over this watchpoint. We'll be back to display
4044 it in a moment. */
4045 if (stopped_by_watchpoint
4046 && (target_have_steppable_watchpoint
4047 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4048 {
4049 /* At this point, we are stopped at an instruction which has
4050 attempted to write to a piece of memory under control of
4051 a watchpoint. The instruction hasn't actually executed
4052 yet. If we were to evaluate the watchpoint expression
4053 now, we would get the old value, and therefore no change
4054 would seem to have occurred.
4055
4056 In order to make watchpoints work `right', we really need
4057 to complete the memory write, and then evaluate the
4058 watchpoint expression. We do this by single-stepping the
4059 target.
4060
4061 It may not be necessary to disable the watchpoint to stop over
4062 it. For example, the PA can (with some kernel cooperation)
4063 single step over a watchpoint without disabling the watchpoint.
4064
4065 It is far more common to need to disable a watchpoint to step
4066 the inferior over it. If we have non-steppable watchpoints,
4067 we must disable the current watchpoint; it's simplest to
4068 disable all watchpoints and breakpoints. */
4069 int hw_step = 1;
4070
4071 if (!target_have_steppable_watchpoint)
4072 {
4073 remove_breakpoints ();
4074 /* See comment in resume why we need to stop bypassing signals
4075 while breakpoints have been removed. */
4076 target_pass_signals (0, NULL);
4077 }
4078 /* Single step */
4079 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4080 target_resume (ecs->ptid, hw_step, GDB_SIGNAL_0);
4081 waiton_ptid = ecs->ptid;
4082 if (target_have_steppable_watchpoint)
4083 infwait_state = infwait_step_watch_state;
4084 else
4085 infwait_state = infwait_nonstep_watch_state;
4086 prepare_to_wait (ecs);
4087 return;
4088 }
4089
4090 clear_stop_func (ecs);
4091 ecs->event_thread->stepping_over_breakpoint = 0;
4092 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4093 ecs->event_thread->control.stop_step = 0;
4094 stop_print_frame = 1;
4095 ecs->random_signal = 0;
4096 stopped_by_random_signal = 0;
4097
4098 /* Hide inlined functions starting here, unless we just performed stepi or
4099 nexti. After stepi and nexti, always show the innermost frame (not any
4100 inline function call sites). */
4101 if (ecs->event_thread->control.step_range_end != 1)
4102 {
4103 struct address_space *aspace =
4104 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4105
4106 /* skip_inline_frames is expensive, so we avoid it if we can
4107 determine that the address is one where functions cannot have
4108 been inlined. This improves performance with inferiors that
4109 load a lot of shared libraries, because the solib event
4110 breakpoint is defined as the address of a function (i.e. not
4111 inline). Note that we have to check the previous PC as well
4112 as the current one to catch cases when we have just
4113 single-stepped off a breakpoint prior to reinstating it.
4114 Note that we're assuming that the code we single-step to is
4115 not inline, but that's not definitive: there's nothing
4116 preventing the event breakpoint function from containing
4117 inlined code, and the single-step ending up there. If the
4118 user had set a breakpoint on that inlined code, the missing
4119 skip_inline_frames call would break things. Fortunately
4120 that's an extremely unlikely scenario. */
4121 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4122 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4123 && ecs->event_thread->control.trap_expected
4124 && pc_at_non_inline_function (aspace,
4125 ecs->event_thread->prev_pc,
4126 &ecs->ws)))
4127 {
4128 skip_inline_frames (ecs->ptid);
4129
4130 /* Re-fetch current thread's frame in case that invalidated
4131 the frame cache. */
4132 frame = get_current_frame ();
4133 gdbarch = get_frame_arch (frame);
4134 }
4135 }
4136
4137 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4138 && ecs->event_thread->control.trap_expected
4139 && gdbarch_single_step_through_delay_p (gdbarch)
4140 && currently_stepping (ecs->event_thread))
4141 {
4142 /* We're trying to step off a breakpoint. Turns out that we're
4143 also on an instruction that needs to be stepped multiple
4144 times before it's been fully executing. E.g., architectures
4145 with a delay slot. It needs to be stepped twice, once for
4146 the instruction and once for the delay slot. */
4147 int step_through_delay
4148 = gdbarch_single_step_through_delay (gdbarch, frame);
4149
4150 if (debug_infrun && step_through_delay)
4151 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4152 if (ecs->event_thread->control.step_range_end == 0
4153 && step_through_delay)
4154 {
4155 /* The user issued a continue when stopped at a breakpoint.
4156 Set up for another trap and get out of here. */
4157 ecs->event_thread->stepping_over_breakpoint = 1;
4158 keep_going (ecs);
4159 return;
4160 }
4161 else if (step_through_delay)
4162 {
4163 /* The user issued a step when stopped at a breakpoint.
4164 Maybe we should stop, maybe we should not - the delay
4165 slot *might* correspond to a line of source. In any
4166 case, don't decide that here, just set
4167 ecs->stepping_over_breakpoint, making sure we
4168 single-step again before breakpoints are re-inserted. */
4169 ecs->event_thread->stepping_over_breakpoint = 1;
4170 }
4171 }
4172
4173 /* Look at the cause of the stop, and decide what to do.
4174 The alternatives are:
4175 1) stop_stepping and return; to really stop and return to the debugger,
4176 2) keep_going and return to start up again
4177 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
4178 3) set ecs->random_signal to 1, and the decision between 1 and 2
4179 will be made according to the signal handling tables. */
4180
4181 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4182 && stop_after_trap)
4183 {
4184 if (debug_infrun)
4185 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4186 stop_print_frame = 0;
4187 stop_stepping (ecs);
4188 return;
4189 }
4190
4191 /* This is originated from start_remote(), start_inferior() and
4192 shared libraries hook functions. */
4193 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4194 {
4195 if (debug_infrun)
4196 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4197 stop_stepping (ecs);
4198 return;
4199 }
4200
4201 /* This originates from attach_command(). We need to overwrite
4202 the stop_signal here, because some kernels don't ignore a
4203 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4204 See more comments in inferior.h. On the other hand, if we
4205 get a non-SIGSTOP, report it to the user - assume the backend
4206 will handle the SIGSTOP if it should show up later.
4207
4208 Also consider that the attach is complete when we see a
4209 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4210 target extended-remote report it instead of a SIGSTOP
4211 (e.g. gdbserver). We already rely on SIGTRAP being our
4212 signal, so this is no exception.
4213
4214 Also consider that the attach is complete when we see a
4215 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4216 the target to stop all threads of the inferior, in case the
4217 low level attach operation doesn't stop them implicitly. If
4218 they weren't stopped implicitly, then the stub will report a
4219 GDB_SIGNAL_0, meaning: stopped for no particular reason
4220 other than GDB's request. */
4221 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4222 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
4223 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4224 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
4225 {
4226 stop_stepping (ecs);
4227 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4228 return;
4229 }
4230
4231 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4232 handles this event. */
4233 ecs->event_thread->control.stop_bpstat
4234 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4235 stop_pc, ecs->ptid, &ecs->ws);
4236
4237 /* Following in case break condition called a
4238 function. */
4239 stop_print_frame = 1;
4240
4241 /* This is where we handle "moribund" watchpoints. Unlike
4242 software breakpoints traps, hardware watchpoint traps are
4243 always distinguishable from random traps. If no high-level
4244 watchpoint is associated with the reported stop data address
4245 anymore, then the bpstat does not explain the signal ---
4246 simply make sure to ignore it if `stopped_by_watchpoint' is
4247 set. */
4248
4249 if (debug_infrun
4250 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4251 && (bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4252 == BPSTAT_SIGNAL_NO)
4253 && stopped_by_watchpoint)
4254 fprintf_unfiltered (gdb_stdlog,
4255 "infrun: no user watchpoint explains "
4256 "watchpoint SIGTRAP, ignoring\n");
4257
4258 /* NOTE: cagney/2003-03-29: These two checks for a random signal
4259 at one stage in the past included checks for an inferior
4260 function call's call dummy's return breakpoint. The original
4261 comment, that went with the test, read:
4262
4263 ``End of a stack dummy. Some systems (e.g. Sony news) give
4264 another signal besides SIGTRAP, so check here as well as
4265 above.''
4266
4267 If someone ever tries to get call dummys on a
4268 non-executable stack to work (where the target would stop
4269 with something like a SIGSEGV), then those tests might need
4270 to be re-instated. Given, however, that the tests were only
4271 enabled when momentary breakpoints were not being used, I
4272 suspect that it won't be the case.
4273
4274 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4275 be necessary for call dummies on a non-executable stack on
4276 SPARC. */
4277
4278 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4279 ecs->random_signal
4280 = !((bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4281 != BPSTAT_SIGNAL_NO)
4282 || stopped_by_watchpoint
4283 || ecs->event_thread->control.trap_expected
4284 || (ecs->event_thread->control.step_range_end
4285 && (ecs->event_thread->control.step_resume_breakpoint
4286 == NULL)));
4287 else
4288 {
4289 enum bpstat_signal_value sval;
4290
4291 sval = bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
4292 ecs->random_signal = (sval == BPSTAT_SIGNAL_NO);
4293
4294 if (sval == BPSTAT_SIGNAL_HIDE)
4295 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_TRAP;
4296 }
4297
4298 process_event_stop_test:
4299
4300 /* Re-fetch current thread's frame in case we did a
4301 "goto process_event_stop_test" above. */
4302 frame = get_current_frame ();
4303 gdbarch = get_frame_arch (frame);
4304
4305 /* For the program's own signals, act according to
4306 the signal handling tables. */
4307
4308 if (ecs->random_signal)
4309 {
4310 /* Signal not for debugging purposes. */
4311 int printed = 0;
4312 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4313
4314 if (debug_infrun)
4315 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
4316 ecs->event_thread->suspend.stop_signal);
4317
4318 stopped_by_random_signal = 1;
4319
4320 if (signal_print[ecs->event_thread->suspend.stop_signal])
4321 {
4322 printed = 1;
4323 target_terminal_ours_for_output ();
4324 print_signal_received_reason
4325 (ecs->event_thread->suspend.stop_signal);
4326 }
4327 /* Always stop on signals if we're either just gaining control
4328 of the program, or the user explicitly requested this thread
4329 to remain stopped. */
4330 if (stop_soon != NO_STOP_QUIETLY
4331 || ecs->event_thread->stop_requested
4332 || (!inf->detaching
4333 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4334 {
4335 stop_stepping (ecs);
4336 return;
4337 }
4338 /* If not going to stop, give terminal back
4339 if we took it away. */
4340 else if (printed)
4341 target_terminal_inferior ();
4342
4343 /* Clear the signal if it should not be passed. */
4344 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4345 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4346
4347 if (ecs->event_thread->prev_pc == stop_pc
4348 && ecs->event_thread->control.trap_expected
4349 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4350 {
4351 /* We were just starting a new sequence, attempting to
4352 single-step off of a breakpoint and expecting a SIGTRAP.
4353 Instead this signal arrives. This signal will take us out
4354 of the stepping range so GDB needs to remember to, when
4355 the signal handler returns, resume stepping off that
4356 breakpoint. */
4357 /* To simplify things, "continue" is forced to use the same
4358 code paths as single-step - set a breakpoint at the
4359 signal return address and then, once hit, step off that
4360 breakpoint. */
4361 if (debug_infrun)
4362 fprintf_unfiltered (gdb_stdlog,
4363 "infrun: signal arrived while stepping over "
4364 "breakpoint\n");
4365
4366 insert_hp_step_resume_breakpoint_at_frame (frame);
4367 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4368 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4369 ecs->event_thread->control.trap_expected = 0;
4370 keep_going (ecs);
4371 return;
4372 }
4373
4374 if (ecs->event_thread->control.step_range_end != 0
4375 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4376 && pc_in_thread_step_range (stop_pc, ecs->event_thread)
4377 && frame_id_eq (get_stack_frame_id (frame),
4378 ecs->event_thread->control.step_stack_frame_id)
4379 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4380 {
4381 /* The inferior is about to take a signal that will take it
4382 out of the single step range. Set a breakpoint at the
4383 current PC (which is presumably where the signal handler
4384 will eventually return) and then allow the inferior to
4385 run free.
4386
4387 Note that this is only needed for a signal delivered
4388 while in the single-step range. Nested signals aren't a
4389 problem as they eventually all return. */
4390 if (debug_infrun)
4391 fprintf_unfiltered (gdb_stdlog,
4392 "infrun: signal may take us out of "
4393 "single-step range\n");
4394
4395 insert_hp_step_resume_breakpoint_at_frame (frame);
4396 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4397 ecs->event_thread->control.trap_expected = 0;
4398 keep_going (ecs);
4399 return;
4400 }
4401
4402 /* Note: step_resume_breakpoint may be non-NULL. This occures
4403 when either there's a nested signal, or when there's a
4404 pending signal enabled just as the signal handler returns
4405 (leaving the inferior at the step-resume-breakpoint without
4406 actually executing it). Either way continue until the
4407 breakpoint is really hit. */
4408 }
4409 else
4410 {
4411 /* Handle cases caused by hitting a breakpoint. */
4412
4413 CORE_ADDR jmp_buf_pc;
4414 struct bpstat_what what;
4415
4416 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4417
4418 if (what.call_dummy)
4419 {
4420 stop_stack_dummy = what.call_dummy;
4421 }
4422
4423 /* If we hit an internal event that triggers symbol changes, the
4424 current frame will be invalidated within bpstat_what (e.g.,
4425 if we hit an internal solib event). Re-fetch it. */
4426 frame = get_current_frame ();
4427 gdbarch = get_frame_arch (frame);
4428
4429 switch (what.main_action)
4430 {
4431 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4432 /* If we hit the breakpoint at longjmp while stepping, we
4433 install a momentary breakpoint at the target of the
4434 jmp_buf. */
4435
4436 if (debug_infrun)
4437 fprintf_unfiltered (gdb_stdlog,
4438 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4439
4440 ecs->event_thread->stepping_over_breakpoint = 1;
4441
4442 if (what.is_longjmp)
4443 {
4444 struct value *arg_value;
4445
4446 /* If we set the longjmp breakpoint via a SystemTap
4447 probe, then use it to extract the arguments. The
4448 destination PC is the third argument to the
4449 probe. */
4450 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4451 if (arg_value)
4452 jmp_buf_pc = value_as_address (arg_value);
4453 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4454 || !gdbarch_get_longjmp_target (gdbarch,
4455 frame, &jmp_buf_pc))
4456 {
4457 if (debug_infrun)
4458 fprintf_unfiltered (gdb_stdlog,
4459 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4460 "(!gdbarch_get_longjmp_target)\n");
4461 keep_going (ecs);
4462 return;
4463 }
4464
4465 /* Insert a breakpoint at resume address. */
4466 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4467 }
4468 else
4469 check_exception_resume (ecs, frame);
4470 keep_going (ecs);
4471 return;
4472
4473 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4474 {
4475 struct frame_info *init_frame;
4476
4477 /* There are several cases to consider.
4478
4479 1. The initiating frame no longer exists. In this case
4480 we must stop, because the exception or longjmp has gone
4481 too far.
4482
4483 2. The initiating frame exists, and is the same as the
4484 current frame. We stop, because the exception or
4485 longjmp has been caught.
4486
4487 3. The initiating frame exists and is different from
4488 the current frame. This means the exception or longjmp
4489 has been caught beneath the initiating frame, so keep
4490 going.
4491
4492 4. longjmp breakpoint has been placed just to protect
4493 against stale dummy frames and user is not interested
4494 in stopping around longjmps. */
4495
4496 if (debug_infrun)
4497 fprintf_unfiltered (gdb_stdlog,
4498 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4499
4500 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4501 != NULL);
4502 delete_exception_resume_breakpoint (ecs->event_thread);
4503
4504 if (what.is_longjmp)
4505 {
4506 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread->num);
4507
4508 if (!frame_id_p (ecs->event_thread->initiating_frame))
4509 {
4510 /* Case 4. */
4511 keep_going (ecs);
4512 return;
4513 }
4514 }
4515
4516 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4517
4518 if (init_frame)
4519 {
4520 struct frame_id current_id
4521 = get_frame_id (get_current_frame ());
4522 if (frame_id_eq (current_id,
4523 ecs->event_thread->initiating_frame))
4524 {
4525 /* Case 2. Fall through. */
4526 }
4527 else
4528 {
4529 /* Case 3. */
4530 keep_going (ecs);
4531 return;
4532 }
4533 }
4534
4535 /* For Cases 1 and 2, remove the step-resume breakpoint,
4536 if it exists. */
4537 delete_step_resume_breakpoint (ecs->event_thread);
4538
4539 ecs->event_thread->control.stop_step = 1;
4540 print_end_stepping_range_reason ();
4541 stop_stepping (ecs);
4542 }
4543 return;
4544
4545 case BPSTAT_WHAT_SINGLE:
4546 if (debug_infrun)
4547 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4548 ecs->event_thread->stepping_over_breakpoint = 1;
4549 /* Still need to check other stuff, at least the case where
4550 we are stepping and step out of the right range. */
4551 break;
4552
4553 case BPSTAT_WHAT_STEP_RESUME:
4554 if (debug_infrun)
4555 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4556
4557 delete_step_resume_breakpoint (ecs->event_thread);
4558 if (ecs->event_thread->control.proceed_to_finish
4559 && execution_direction == EXEC_REVERSE)
4560 {
4561 struct thread_info *tp = ecs->event_thread;
4562
4563 /* We are finishing a function in reverse, and just hit
4564 the step-resume breakpoint at the start address of
4565 the function, and we're almost there -- just need to
4566 back up by one more single-step, which should take us
4567 back to the function call. */
4568 tp->control.step_range_start = tp->control.step_range_end = 1;
4569 keep_going (ecs);
4570 return;
4571 }
4572 fill_in_stop_func (gdbarch, ecs);
4573 if (stop_pc == ecs->stop_func_start
4574 && execution_direction == EXEC_REVERSE)
4575 {
4576 /* We are stepping over a function call in reverse, and
4577 just hit the step-resume breakpoint at the start
4578 address of the function. Go back to single-stepping,
4579 which should take us back to the function call. */
4580 ecs->event_thread->stepping_over_breakpoint = 1;
4581 keep_going (ecs);
4582 return;
4583 }
4584 break;
4585
4586 case BPSTAT_WHAT_STOP_NOISY:
4587 if (debug_infrun)
4588 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4589 stop_print_frame = 1;
4590
4591 /* We are about to nuke the step_resume_breakpointt via the
4592 cleanup chain, so no need to worry about it here. */
4593
4594 stop_stepping (ecs);
4595 return;
4596
4597 case BPSTAT_WHAT_STOP_SILENT:
4598 if (debug_infrun)
4599 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4600 stop_print_frame = 0;
4601
4602 /* We are about to nuke the step_resume_breakpoin via the
4603 cleanup chain, so no need to worry about it here. */
4604
4605 stop_stepping (ecs);
4606 return;
4607
4608 case BPSTAT_WHAT_HP_STEP_RESUME:
4609 if (debug_infrun)
4610 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4611
4612 delete_step_resume_breakpoint (ecs->event_thread);
4613 if (ecs->event_thread->step_after_step_resume_breakpoint)
4614 {
4615 /* Back when the step-resume breakpoint was inserted, we
4616 were trying to single-step off a breakpoint. Go back
4617 to doing that. */
4618 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4619 ecs->event_thread->stepping_over_breakpoint = 1;
4620 keep_going (ecs);
4621 return;
4622 }
4623 break;
4624
4625 case BPSTAT_WHAT_KEEP_CHECKING:
4626 break;
4627 }
4628 }
4629
4630 /* We come here if we hit a breakpoint but should not
4631 stop for it. Possibly we also were stepping
4632 and should stop for that. So fall through and
4633 test for stepping. But, if not stepping,
4634 do not stop. */
4635
4636 /* In all-stop mode, if we're currently stepping but have stopped in
4637 some other thread, we need to switch back to the stepped thread. */
4638 if (!non_stop)
4639 {
4640 struct thread_info *tp;
4641
4642 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4643 ecs->event_thread);
4644 if (tp)
4645 {
4646 /* However, if the current thread is blocked on some internal
4647 breakpoint, and we simply need to step over that breakpoint
4648 to get it going again, do that first. */
4649 if ((ecs->event_thread->control.trap_expected
4650 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
4651 || ecs->event_thread->stepping_over_breakpoint)
4652 {
4653 keep_going (ecs);
4654 return;
4655 }
4656
4657 /* If the stepping thread exited, then don't try to switch
4658 back and resume it, which could fail in several different
4659 ways depending on the target. Instead, just keep going.
4660
4661 We can find a stepping dead thread in the thread list in
4662 two cases:
4663
4664 - The target supports thread exit events, and when the
4665 target tries to delete the thread from the thread list,
4666 inferior_ptid pointed at the exiting thread. In such
4667 case, calling delete_thread does not really remove the
4668 thread from the list; instead, the thread is left listed,
4669 with 'exited' state.
4670
4671 - The target's debug interface does not support thread
4672 exit events, and so we have no idea whatsoever if the
4673 previously stepping thread is still alive. For that
4674 reason, we need to synchronously query the target
4675 now. */
4676 if (is_exited (tp->ptid)
4677 || !target_thread_alive (tp->ptid))
4678 {
4679 if (debug_infrun)
4680 fprintf_unfiltered (gdb_stdlog,
4681 "infrun: not switching back to "
4682 "stepped thread, it has vanished\n");
4683
4684 delete_thread (tp->ptid);
4685 keep_going (ecs);
4686 return;
4687 }
4688
4689 /* Otherwise, we no longer expect a trap in the current thread.
4690 Clear the trap_expected flag before switching back -- this is
4691 what keep_going would do as well, if we called it. */
4692 ecs->event_thread->control.trap_expected = 0;
4693
4694 if (debug_infrun)
4695 fprintf_unfiltered (gdb_stdlog,
4696 "infrun: switching back to stepped thread\n");
4697
4698 ecs->event_thread = tp;
4699 ecs->ptid = tp->ptid;
4700 context_switch (ecs->ptid);
4701 keep_going (ecs);
4702 return;
4703 }
4704 }
4705
4706 if (ecs->event_thread->control.step_resume_breakpoint)
4707 {
4708 if (debug_infrun)
4709 fprintf_unfiltered (gdb_stdlog,
4710 "infrun: step-resume breakpoint is inserted\n");
4711
4712 /* Having a step-resume breakpoint overrides anything
4713 else having to do with stepping commands until
4714 that breakpoint is reached. */
4715 keep_going (ecs);
4716 return;
4717 }
4718
4719 if (ecs->event_thread->control.step_range_end == 0)
4720 {
4721 if (debug_infrun)
4722 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4723 /* Likewise if we aren't even stepping. */
4724 keep_going (ecs);
4725 return;
4726 }
4727
4728 /* Re-fetch current thread's frame in case the code above caused
4729 the frame cache to be re-initialized, making our FRAME variable
4730 a dangling pointer. */
4731 frame = get_current_frame ();
4732 gdbarch = get_frame_arch (frame);
4733 fill_in_stop_func (gdbarch, ecs);
4734
4735 /* If stepping through a line, keep going if still within it.
4736
4737 Note that step_range_end is the address of the first instruction
4738 beyond the step range, and NOT the address of the last instruction
4739 within it!
4740
4741 Note also that during reverse execution, we may be stepping
4742 through a function epilogue and therefore must detect when
4743 the current-frame changes in the middle of a line. */
4744
4745 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4746 && (execution_direction != EXEC_REVERSE
4747 || frame_id_eq (get_frame_id (frame),
4748 ecs->event_thread->control.step_frame_id)))
4749 {
4750 if (debug_infrun)
4751 fprintf_unfiltered
4752 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4753 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4754 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4755
4756 /* Tentatively re-enable range stepping; `resume' disables it if
4757 necessary (e.g., if we're stepping over a breakpoint or we
4758 have software watchpoints). */
4759 ecs->event_thread->control.may_range_step = 1;
4760
4761 /* When stepping backward, stop at beginning of line range
4762 (unless it's the function entry point, in which case
4763 keep going back to the call point). */
4764 if (stop_pc == ecs->event_thread->control.step_range_start
4765 && stop_pc != ecs->stop_func_start
4766 && execution_direction == EXEC_REVERSE)
4767 {
4768 ecs->event_thread->control.stop_step = 1;
4769 print_end_stepping_range_reason ();
4770 stop_stepping (ecs);
4771 }
4772 else
4773 keep_going (ecs);
4774
4775 return;
4776 }
4777
4778 /* We stepped out of the stepping range. */
4779
4780 /* If we are stepping at the source level and entered the runtime
4781 loader dynamic symbol resolution code...
4782
4783 EXEC_FORWARD: we keep on single stepping until we exit the run
4784 time loader code and reach the callee's address.
4785
4786 EXEC_REVERSE: we've already executed the callee (backward), and
4787 the runtime loader code is handled just like any other
4788 undebuggable function call. Now we need only keep stepping
4789 backward through the trampoline code, and that's handled further
4790 down, so there is nothing for us to do here. */
4791
4792 if (execution_direction != EXEC_REVERSE
4793 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4794 && in_solib_dynsym_resolve_code (stop_pc))
4795 {
4796 CORE_ADDR pc_after_resolver =
4797 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4798
4799 if (debug_infrun)
4800 fprintf_unfiltered (gdb_stdlog,
4801 "infrun: stepped into dynsym resolve code\n");
4802
4803 if (pc_after_resolver)
4804 {
4805 /* Set up a step-resume breakpoint at the address
4806 indicated by SKIP_SOLIB_RESOLVER. */
4807 struct symtab_and_line sr_sal;
4808
4809 init_sal (&sr_sal);
4810 sr_sal.pc = pc_after_resolver;
4811 sr_sal.pspace = get_frame_program_space (frame);
4812
4813 insert_step_resume_breakpoint_at_sal (gdbarch,
4814 sr_sal, null_frame_id);
4815 }
4816
4817 keep_going (ecs);
4818 return;
4819 }
4820
4821 if (ecs->event_thread->control.step_range_end != 1
4822 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4823 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4824 && get_frame_type (frame) == SIGTRAMP_FRAME)
4825 {
4826 if (debug_infrun)
4827 fprintf_unfiltered (gdb_stdlog,
4828 "infrun: stepped into signal trampoline\n");
4829 /* The inferior, while doing a "step" or "next", has ended up in
4830 a signal trampoline (either by a signal being delivered or by
4831 the signal handler returning). Just single-step until the
4832 inferior leaves the trampoline (either by calling the handler
4833 or returning). */
4834 keep_going (ecs);
4835 return;
4836 }
4837
4838 /* If we're in the return path from a shared library trampoline,
4839 we want to proceed through the trampoline when stepping. */
4840 /* macro/2012-04-25: This needs to come before the subroutine
4841 call check below as on some targets return trampolines look
4842 like subroutine calls (MIPS16 return thunks). */
4843 if (gdbarch_in_solib_return_trampoline (gdbarch,
4844 stop_pc, ecs->stop_func_name)
4845 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4846 {
4847 /* Determine where this trampoline returns. */
4848 CORE_ADDR real_stop_pc;
4849
4850 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4851
4852 if (debug_infrun)
4853 fprintf_unfiltered (gdb_stdlog,
4854 "infrun: stepped into solib return tramp\n");
4855
4856 /* Only proceed through if we know where it's going. */
4857 if (real_stop_pc)
4858 {
4859 /* And put the step-breakpoint there and go until there. */
4860 struct symtab_and_line sr_sal;
4861
4862 init_sal (&sr_sal); /* initialize to zeroes */
4863 sr_sal.pc = real_stop_pc;
4864 sr_sal.section = find_pc_overlay (sr_sal.pc);
4865 sr_sal.pspace = get_frame_program_space (frame);
4866
4867 /* Do not specify what the fp should be when we stop since
4868 on some machines the prologue is where the new fp value
4869 is established. */
4870 insert_step_resume_breakpoint_at_sal (gdbarch,
4871 sr_sal, null_frame_id);
4872
4873 /* Restart without fiddling with the step ranges or
4874 other state. */
4875 keep_going (ecs);
4876 return;
4877 }
4878 }
4879
4880 /* Check for subroutine calls. The check for the current frame
4881 equalling the step ID is not necessary - the check of the
4882 previous frame's ID is sufficient - but it is a common case and
4883 cheaper than checking the previous frame's ID.
4884
4885 NOTE: frame_id_eq will never report two invalid frame IDs as
4886 being equal, so to get into this block, both the current and
4887 previous frame must have valid frame IDs. */
4888 /* The outer_frame_id check is a heuristic to detect stepping
4889 through startup code. If we step over an instruction which
4890 sets the stack pointer from an invalid value to a valid value,
4891 we may detect that as a subroutine call from the mythical
4892 "outermost" function. This could be fixed by marking
4893 outermost frames as !stack_p,code_p,special_p. Then the
4894 initial outermost frame, before sp was valid, would
4895 have code_addr == &_start. See the comment in frame_id_eq
4896 for more. */
4897 if (!frame_id_eq (get_stack_frame_id (frame),
4898 ecs->event_thread->control.step_stack_frame_id)
4899 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4900 ecs->event_thread->control.step_stack_frame_id)
4901 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4902 outer_frame_id)
4903 || step_start_function != find_pc_function (stop_pc))))
4904 {
4905 CORE_ADDR real_stop_pc;
4906
4907 if (debug_infrun)
4908 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4909
4910 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4911 || ((ecs->event_thread->control.step_range_end == 1)
4912 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4913 ecs->stop_func_start)))
4914 {
4915 /* I presume that step_over_calls is only 0 when we're
4916 supposed to be stepping at the assembly language level
4917 ("stepi"). Just stop. */
4918 /* Also, maybe we just did a "nexti" inside a prolog, so we
4919 thought it was a subroutine call but it was not. Stop as
4920 well. FENN */
4921 /* And this works the same backward as frontward. MVS */
4922 ecs->event_thread->control.stop_step = 1;
4923 print_end_stepping_range_reason ();
4924 stop_stepping (ecs);
4925 return;
4926 }
4927
4928 /* Reverse stepping through solib trampolines. */
4929
4930 if (execution_direction == EXEC_REVERSE
4931 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4932 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4933 || (ecs->stop_func_start == 0
4934 && in_solib_dynsym_resolve_code (stop_pc))))
4935 {
4936 /* Any solib trampoline code can be handled in reverse
4937 by simply continuing to single-step. We have already
4938 executed the solib function (backwards), and a few
4939 steps will take us back through the trampoline to the
4940 caller. */
4941 keep_going (ecs);
4942 return;
4943 }
4944
4945 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4946 {
4947 /* We're doing a "next".
4948
4949 Normal (forward) execution: set a breakpoint at the
4950 callee's return address (the address at which the caller
4951 will resume).
4952
4953 Reverse (backward) execution. set the step-resume
4954 breakpoint at the start of the function that we just
4955 stepped into (backwards), and continue to there. When we
4956 get there, we'll need to single-step back to the caller. */
4957
4958 if (execution_direction == EXEC_REVERSE)
4959 {
4960 /* If we're already at the start of the function, we've either
4961 just stepped backward into a single instruction function,
4962 or stepped back out of a signal handler to the first instruction
4963 of the function. Just keep going, which will single-step back
4964 to the caller. */
4965 if (ecs->stop_func_start != stop_pc)
4966 {
4967 struct symtab_and_line sr_sal;
4968
4969 /* Normal function call return (static or dynamic). */
4970 init_sal (&sr_sal);
4971 sr_sal.pc = ecs->stop_func_start;
4972 sr_sal.pspace = get_frame_program_space (frame);
4973 insert_step_resume_breakpoint_at_sal (gdbarch,
4974 sr_sal, null_frame_id);
4975 }
4976 }
4977 else
4978 insert_step_resume_breakpoint_at_caller (frame);
4979
4980 keep_going (ecs);
4981 return;
4982 }
4983
4984 /* If we are in a function call trampoline (a stub between the
4985 calling routine and the real function), locate the real
4986 function. That's what tells us (a) whether we want to step
4987 into it at all, and (b) what prologue we want to run to the
4988 end of, if we do step into it. */
4989 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4990 if (real_stop_pc == 0)
4991 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4992 if (real_stop_pc != 0)
4993 ecs->stop_func_start = real_stop_pc;
4994
4995 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4996 {
4997 struct symtab_and_line sr_sal;
4998
4999 init_sal (&sr_sal);
5000 sr_sal.pc = ecs->stop_func_start;
5001 sr_sal.pspace = get_frame_program_space (frame);
5002
5003 insert_step_resume_breakpoint_at_sal (gdbarch,
5004 sr_sal, null_frame_id);
5005 keep_going (ecs);
5006 return;
5007 }
5008
5009 /* If we have line number information for the function we are
5010 thinking of stepping into and the function isn't on the skip
5011 list, step into it.
5012
5013 If there are several symtabs at that PC (e.g. with include
5014 files), just want to know whether *any* of them have line
5015 numbers. find_pc_line handles this. */
5016 {
5017 struct symtab_and_line tmp_sal;
5018
5019 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
5020 if (tmp_sal.line != 0
5021 && !function_name_is_marked_for_skip (ecs->stop_func_name,
5022 &tmp_sal))
5023 {
5024 if (execution_direction == EXEC_REVERSE)
5025 handle_step_into_function_backward (gdbarch, ecs);
5026 else
5027 handle_step_into_function (gdbarch, ecs);
5028 return;
5029 }
5030 }
5031
5032 /* If we have no line number and the step-stop-if-no-debug is
5033 set, we stop the step so that the user has a chance to switch
5034 in assembly mode. */
5035 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5036 && step_stop_if_no_debug)
5037 {
5038 ecs->event_thread->control.stop_step = 1;
5039 print_end_stepping_range_reason ();
5040 stop_stepping (ecs);
5041 return;
5042 }
5043
5044 if (execution_direction == EXEC_REVERSE)
5045 {
5046 /* If we're already at the start of the function, we've either just
5047 stepped backward into a single instruction function without line
5048 number info, or stepped back out of a signal handler to the first
5049 instruction of the function without line number info. Just keep
5050 going, which will single-step back to the caller. */
5051 if (ecs->stop_func_start != stop_pc)
5052 {
5053 /* Set a breakpoint at callee's start address.
5054 From there we can step once and be back in the caller. */
5055 struct symtab_and_line sr_sal;
5056
5057 init_sal (&sr_sal);
5058 sr_sal.pc = ecs->stop_func_start;
5059 sr_sal.pspace = get_frame_program_space (frame);
5060 insert_step_resume_breakpoint_at_sal (gdbarch,
5061 sr_sal, null_frame_id);
5062 }
5063 }
5064 else
5065 /* Set a breakpoint at callee's return address (the address
5066 at which the caller will resume). */
5067 insert_step_resume_breakpoint_at_caller (frame);
5068
5069 keep_going (ecs);
5070 return;
5071 }
5072
5073 /* Reverse stepping through solib trampolines. */
5074
5075 if (execution_direction == EXEC_REVERSE
5076 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5077 {
5078 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5079 || (ecs->stop_func_start == 0
5080 && in_solib_dynsym_resolve_code (stop_pc)))
5081 {
5082 /* Any solib trampoline code can be handled in reverse
5083 by simply continuing to single-step. We have already
5084 executed the solib function (backwards), and a few
5085 steps will take us back through the trampoline to the
5086 caller. */
5087 keep_going (ecs);
5088 return;
5089 }
5090 else if (in_solib_dynsym_resolve_code (stop_pc))
5091 {
5092 /* Stepped backward into the solib dynsym resolver.
5093 Set a breakpoint at its start and continue, then
5094 one more step will take us out. */
5095 struct symtab_and_line sr_sal;
5096
5097 init_sal (&sr_sal);
5098 sr_sal.pc = ecs->stop_func_start;
5099 sr_sal.pspace = get_frame_program_space (frame);
5100 insert_step_resume_breakpoint_at_sal (gdbarch,
5101 sr_sal, null_frame_id);
5102 keep_going (ecs);
5103 return;
5104 }
5105 }
5106
5107 stop_pc_sal = find_pc_line (stop_pc, 0);
5108
5109 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5110 the trampoline processing logic, however, there are some trampolines
5111 that have no names, so we should do trampoline handling first. */
5112 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5113 && ecs->stop_func_name == NULL
5114 && stop_pc_sal.line == 0)
5115 {
5116 if (debug_infrun)
5117 fprintf_unfiltered (gdb_stdlog,
5118 "infrun: stepped into undebuggable function\n");
5119
5120 /* The inferior just stepped into, or returned to, an
5121 undebuggable function (where there is no debugging information
5122 and no line number corresponding to the address where the
5123 inferior stopped). Since we want to skip this kind of code,
5124 we keep going until the inferior returns from this
5125 function - unless the user has asked us not to (via
5126 set step-mode) or we no longer know how to get back
5127 to the call site. */
5128 if (step_stop_if_no_debug
5129 || !frame_id_p (frame_unwind_caller_id (frame)))
5130 {
5131 /* If we have no line number and the step-stop-if-no-debug
5132 is set, we stop the step so that the user has a chance to
5133 switch in assembly mode. */
5134 ecs->event_thread->control.stop_step = 1;
5135 print_end_stepping_range_reason ();
5136 stop_stepping (ecs);
5137 return;
5138 }
5139 else
5140 {
5141 /* Set a breakpoint at callee's return address (the address
5142 at which the caller will resume). */
5143 insert_step_resume_breakpoint_at_caller (frame);
5144 keep_going (ecs);
5145 return;
5146 }
5147 }
5148
5149 if (ecs->event_thread->control.step_range_end == 1)
5150 {
5151 /* It is stepi or nexti. We always want to stop stepping after
5152 one instruction. */
5153 if (debug_infrun)
5154 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5155 ecs->event_thread->control.stop_step = 1;
5156 print_end_stepping_range_reason ();
5157 stop_stepping (ecs);
5158 return;
5159 }
5160
5161 if (stop_pc_sal.line == 0)
5162 {
5163 /* We have no line number information. That means to stop
5164 stepping (does this always happen right after one instruction,
5165 when we do "s" in a function with no line numbers,
5166 or can this happen as a result of a return or longjmp?). */
5167 if (debug_infrun)
5168 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5169 ecs->event_thread->control.stop_step = 1;
5170 print_end_stepping_range_reason ();
5171 stop_stepping (ecs);
5172 return;
5173 }
5174
5175 /* Look for "calls" to inlined functions, part one. If the inline
5176 frame machinery detected some skipped call sites, we have entered
5177 a new inline function. */
5178
5179 if (frame_id_eq (get_frame_id (get_current_frame ()),
5180 ecs->event_thread->control.step_frame_id)
5181 && inline_skipped_frames (ecs->ptid))
5182 {
5183 struct symtab_and_line call_sal;
5184
5185 if (debug_infrun)
5186 fprintf_unfiltered (gdb_stdlog,
5187 "infrun: stepped into inlined function\n");
5188
5189 find_frame_sal (get_current_frame (), &call_sal);
5190
5191 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5192 {
5193 /* For "step", we're going to stop. But if the call site
5194 for this inlined function is on the same source line as
5195 we were previously stepping, go down into the function
5196 first. Otherwise stop at the call site. */
5197
5198 if (call_sal.line == ecs->event_thread->current_line
5199 && call_sal.symtab == ecs->event_thread->current_symtab)
5200 step_into_inline_frame (ecs->ptid);
5201
5202 ecs->event_thread->control.stop_step = 1;
5203 print_end_stepping_range_reason ();
5204 stop_stepping (ecs);
5205 return;
5206 }
5207 else
5208 {
5209 /* For "next", we should stop at the call site if it is on a
5210 different source line. Otherwise continue through the
5211 inlined function. */
5212 if (call_sal.line == ecs->event_thread->current_line
5213 && call_sal.symtab == ecs->event_thread->current_symtab)
5214 keep_going (ecs);
5215 else
5216 {
5217 ecs->event_thread->control.stop_step = 1;
5218 print_end_stepping_range_reason ();
5219 stop_stepping (ecs);
5220 }
5221 return;
5222 }
5223 }
5224
5225 /* Look for "calls" to inlined functions, part two. If we are still
5226 in the same real function we were stepping through, but we have
5227 to go further up to find the exact frame ID, we are stepping
5228 through a more inlined call beyond its call site. */
5229
5230 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5231 && !frame_id_eq (get_frame_id (get_current_frame ()),
5232 ecs->event_thread->control.step_frame_id)
5233 && stepped_in_from (get_current_frame (),
5234 ecs->event_thread->control.step_frame_id))
5235 {
5236 if (debug_infrun)
5237 fprintf_unfiltered (gdb_stdlog,
5238 "infrun: stepping through inlined function\n");
5239
5240 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5241 keep_going (ecs);
5242 else
5243 {
5244 ecs->event_thread->control.stop_step = 1;
5245 print_end_stepping_range_reason ();
5246 stop_stepping (ecs);
5247 }
5248 return;
5249 }
5250
5251 if ((stop_pc == stop_pc_sal.pc)
5252 && (ecs->event_thread->current_line != stop_pc_sal.line
5253 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5254 {
5255 /* We are at the start of a different line. So stop. Note that
5256 we don't stop if we step into the middle of a different line.
5257 That is said to make things like for (;;) statements work
5258 better. */
5259 if (debug_infrun)
5260 fprintf_unfiltered (gdb_stdlog,
5261 "infrun: stepped to a different line\n");
5262 ecs->event_thread->control.stop_step = 1;
5263 print_end_stepping_range_reason ();
5264 stop_stepping (ecs);
5265 return;
5266 }
5267
5268 /* We aren't done stepping.
5269
5270 Optimize by setting the stepping range to the line.
5271 (We might not be in the original line, but if we entered a
5272 new line in mid-statement, we continue stepping. This makes
5273 things like for(;;) statements work better.) */
5274
5275 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5276 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5277 ecs->event_thread->control.may_range_step = 1;
5278 set_step_info (frame, stop_pc_sal);
5279
5280 if (debug_infrun)
5281 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5282 keep_going (ecs);
5283 }
5284
5285 /* Is thread TP in the middle of single-stepping? */
5286
5287 static int
5288 currently_stepping (struct thread_info *tp)
5289 {
5290 return ((tp->control.step_range_end
5291 && tp->control.step_resume_breakpoint == NULL)
5292 || tp->control.trap_expected
5293 || bpstat_should_step ());
5294 }
5295
5296 /* Returns true if any thread *but* the one passed in "data" is in the
5297 middle of stepping or of handling a "next". */
5298
5299 static int
5300 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
5301 {
5302 if (tp == data)
5303 return 0;
5304
5305 return (tp->control.step_range_end
5306 || tp->control.trap_expected);
5307 }
5308
5309 /* Inferior has stepped into a subroutine call with source code that
5310 we should not step over. Do step to the first line of code in
5311 it. */
5312
5313 static void
5314 handle_step_into_function (struct gdbarch *gdbarch,
5315 struct execution_control_state *ecs)
5316 {
5317 struct symtab *s;
5318 struct symtab_and_line stop_func_sal, sr_sal;
5319
5320 fill_in_stop_func (gdbarch, ecs);
5321
5322 s = find_pc_symtab (stop_pc);
5323 if (s && s->language != language_asm)
5324 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5325 ecs->stop_func_start);
5326
5327 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5328 /* Use the step_resume_break to step until the end of the prologue,
5329 even if that involves jumps (as it seems to on the vax under
5330 4.2). */
5331 /* If the prologue ends in the middle of a source line, continue to
5332 the end of that source line (if it is still within the function).
5333 Otherwise, just go to end of prologue. */
5334 if (stop_func_sal.end
5335 && stop_func_sal.pc != ecs->stop_func_start
5336 && stop_func_sal.end < ecs->stop_func_end)
5337 ecs->stop_func_start = stop_func_sal.end;
5338
5339 /* Architectures which require breakpoint adjustment might not be able
5340 to place a breakpoint at the computed address. If so, the test
5341 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5342 ecs->stop_func_start to an address at which a breakpoint may be
5343 legitimately placed.
5344
5345 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5346 made, GDB will enter an infinite loop when stepping through
5347 optimized code consisting of VLIW instructions which contain
5348 subinstructions corresponding to different source lines. On
5349 FR-V, it's not permitted to place a breakpoint on any but the
5350 first subinstruction of a VLIW instruction. When a breakpoint is
5351 set, GDB will adjust the breakpoint address to the beginning of
5352 the VLIW instruction. Thus, we need to make the corresponding
5353 adjustment here when computing the stop address. */
5354
5355 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5356 {
5357 ecs->stop_func_start
5358 = gdbarch_adjust_breakpoint_address (gdbarch,
5359 ecs->stop_func_start);
5360 }
5361
5362 if (ecs->stop_func_start == stop_pc)
5363 {
5364 /* We are already there: stop now. */
5365 ecs->event_thread->control.stop_step = 1;
5366 print_end_stepping_range_reason ();
5367 stop_stepping (ecs);
5368 return;
5369 }
5370 else
5371 {
5372 /* Put the step-breakpoint there and go until there. */
5373 init_sal (&sr_sal); /* initialize to zeroes */
5374 sr_sal.pc = ecs->stop_func_start;
5375 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5376 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5377
5378 /* Do not specify what the fp should be when we stop since on
5379 some machines the prologue is where the new fp value is
5380 established. */
5381 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5382
5383 /* And make sure stepping stops right away then. */
5384 ecs->event_thread->control.step_range_end
5385 = ecs->event_thread->control.step_range_start;
5386 }
5387 keep_going (ecs);
5388 }
5389
5390 /* Inferior has stepped backward into a subroutine call with source
5391 code that we should not step over. Do step to the beginning of the
5392 last line of code in it. */
5393
5394 static void
5395 handle_step_into_function_backward (struct gdbarch *gdbarch,
5396 struct execution_control_state *ecs)
5397 {
5398 struct symtab *s;
5399 struct symtab_and_line stop_func_sal;
5400
5401 fill_in_stop_func (gdbarch, ecs);
5402
5403 s = find_pc_symtab (stop_pc);
5404 if (s && s->language != language_asm)
5405 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5406 ecs->stop_func_start);
5407
5408 stop_func_sal = find_pc_line (stop_pc, 0);
5409
5410 /* OK, we're just going to keep stepping here. */
5411 if (stop_func_sal.pc == stop_pc)
5412 {
5413 /* We're there already. Just stop stepping now. */
5414 ecs->event_thread->control.stop_step = 1;
5415 print_end_stepping_range_reason ();
5416 stop_stepping (ecs);
5417 }
5418 else
5419 {
5420 /* Else just reset the step range and keep going.
5421 No step-resume breakpoint, they don't work for
5422 epilogues, which can have multiple entry paths. */
5423 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5424 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5425 keep_going (ecs);
5426 }
5427 return;
5428 }
5429
5430 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5431 This is used to both functions and to skip over code. */
5432
5433 static void
5434 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5435 struct symtab_and_line sr_sal,
5436 struct frame_id sr_id,
5437 enum bptype sr_type)
5438 {
5439 /* There should never be more than one step-resume or longjmp-resume
5440 breakpoint per thread, so we should never be setting a new
5441 step_resume_breakpoint when one is already active. */
5442 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5443 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5444
5445 if (debug_infrun)
5446 fprintf_unfiltered (gdb_stdlog,
5447 "infrun: inserting step-resume breakpoint at %s\n",
5448 paddress (gdbarch, sr_sal.pc));
5449
5450 inferior_thread ()->control.step_resume_breakpoint
5451 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5452 }
5453
5454 void
5455 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5456 struct symtab_and_line sr_sal,
5457 struct frame_id sr_id)
5458 {
5459 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5460 sr_sal, sr_id,
5461 bp_step_resume);
5462 }
5463
5464 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5465 This is used to skip a potential signal handler.
5466
5467 This is called with the interrupted function's frame. The signal
5468 handler, when it returns, will resume the interrupted function at
5469 RETURN_FRAME.pc. */
5470
5471 static void
5472 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5473 {
5474 struct symtab_and_line sr_sal;
5475 struct gdbarch *gdbarch;
5476
5477 gdb_assert (return_frame != NULL);
5478 init_sal (&sr_sal); /* initialize to zeros */
5479
5480 gdbarch = get_frame_arch (return_frame);
5481 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5482 sr_sal.section = find_pc_overlay (sr_sal.pc);
5483 sr_sal.pspace = get_frame_program_space (return_frame);
5484
5485 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5486 get_stack_frame_id (return_frame),
5487 bp_hp_step_resume);
5488 }
5489
5490 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5491 is used to skip a function after stepping into it (for "next" or if
5492 the called function has no debugging information).
5493
5494 The current function has almost always been reached by single
5495 stepping a call or return instruction. NEXT_FRAME belongs to the
5496 current function, and the breakpoint will be set at the caller's
5497 resume address.
5498
5499 This is a separate function rather than reusing
5500 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5501 get_prev_frame, which may stop prematurely (see the implementation
5502 of frame_unwind_caller_id for an example). */
5503
5504 static void
5505 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5506 {
5507 struct symtab_and_line sr_sal;
5508 struct gdbarch *gdbarch;
5509
5510 /* We shouldn't have gotten here if we don't know where the call site
5511 is. */
5512 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5513
5514 init_sal (&sr_sal); /* initialize to zeros */
5515
5516 gdbarch = frame_unwind_caller_arch (next_frame);
5517 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5518 frame_unwind_caller_pc (next_frame));
5519 sr_sal.section = find_pc_overlay (sr_sal.pc);
5520 sr_sal.pspace = frame_unwind_program_space (next_frame);
5521
5522 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5523 frame_unwind_caller_id (next_frame));
5524 }
5525
5526 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5527 new breakpoint at the target of a jmp_buf. The handling of
5528 longjmp-resume uses the same mechanisms used for handling
5529 "step-resume" breakpoints. */
5530
5531 static void
5532 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5533 {
5534 /* There should never be more than one longjmp-resume breakpoint per
5535 thread, so we should never be setting a new
5536 longjmp_resume_breakpoint when one is already active. */
5537 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
5538
5539 if (debug_infrun)
5540 fprintf_unfiltered (gdb_stdlog,
5541 "infrun: inserting longjmp-resume breakpoint at %s\n",
5542 paddress (gdbarch, pc));
5543
5544 inferior_thread ()->control.exception_resume_breakpoint =
5545 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5546 }
5547
5548 /* Insert an exception resume breakpoint. TP is the thread throwing
5549 the exception. The block B is the block of the unwinder debug hook
5550 function. FRAME is the frame corresponding to the call to this
5551 function. SYM is the symbol of the function argument holding the
5552 target PC of the exception. */
5553
5554 static void
5555 insert_exception_resume_breakpoint (struct thread_info *tp,
5556 struct block *b,
5557 struct frame_info *frame,
5558 struct symbol *sym)
5559 {
5560 volatile struct gdb_exception e;
5561
5562 /* We want to ignore errors here. */
5563 TRY_CATCH (e, RETURN_MASK_ERROR)
5564 {
5565 struct symbol *vsym;
5566 struct value *value;
5567 CORE_ADDR handler;
5568 struct breakpoint *bp;
5569
5570 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5571 value = read_var_value (vsym, frame);
5572 /* If the value was optimized out, revert to the old behavior. */
5573 if (! value_optimized_out (value))
5574 {
5575 handler = value_as_address (value);
5576
5577 if (debug_infrun)
5578 fprintf_unfiltered (gdb_stdlog,
5579 "infrun: exception resume at %lx\n",
5580 (unsigned long) handler);
5581
5582 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5583 handler, bp_exception_resume);
5584
5585 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5586 frame = NULL;
5587
5588 bp->thread = tp->num;
5589 inferior_thread ()->control.exception_resume_breakpoint = bp;
5590 }
5591 }
5592 }
5593
5594 /* A helper for check_exception_resume that sets an
5595 exception-breakpoint based on a SystemTap probe. */
5596
5597 static void
5598 insert_exception_resume_from_probe (struct thread_info *tp,
5599 const struct probe *probe,
5600 struct frame_info *frame)
5601 {
5602 struct value *arg_value;
5603 CORE_ADDR handler;
5604 struct breakpoint *bp;
5605
5606 arg_value = probe_safe_evaluate_at_pc (frame, 1);
5607 if (!arg_value)
5608 return;
5609
5610 handler = value_as_address (arg_value);
5611
5612 if (debug_infrun)
5613 fprintf_unfiltered (gdb_stdlog,
5614 "infrun: exception resume at %s\n",
5615 paddress (get_objfile_arch (probe->objfile),
5616 handler));
5617
5618 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5619 handler, bp_exception_resume);
5620 bp->thread = tp->num;
5621 inferior_thread ()->control.exception_resume_breakpoint = bp;
5622 }
5623
5624 /* This is called when an exception has been intercepted. Check to
5625 see whether the exception's destination is of interest, and if so,
5626 set an exception resume breakpoint there. */
5627
5628 static void
5629 check_exception_resume (struct execution_control_state *ecs,
5630 struct frame_info *frame)
5631 {
5632 volatile struct gdb_exception e;
5633 const struct probe *probe;
5634 struct symbol *func;
5635
5636 /* First see if this exception unwinding breakpoint was set via a
5637 SystemTap probe point. If so, the probe has two arguments: the
5638 CFA and the HANDLER. We ignore the CFA, extract the handler, and
5639 set a breakpoint there. */
5640 probe = find_probe_by_pc (get_frame_pc (frame));
5641 if (probe)
5642 {
5643 insert_exception_resume_from_probe (ecs->event_thread, probe, frame);
5644 return;
5645 }
5646
5647 func = get_frame_function (frame);
5648 if (!func)
5649 return;
5650
5651 TRY_CATCH (e, RETURN_MASK_ERROR)
5652 {
5653 struct block *b;
5654 struct block_iterator iter;
5655 struct symbol *sym;
5656 int argno = 0;
5657
5658 /* The exception breakpoint is a thread-specific breakpoint on
5659 the unwinder's debug hook, declared as:
5660
5661 void _Unwind_DebugHook (void *cfa, void *handler);
5662
5663 The CFA argument indicates the frame to which control is
5664 about to be transferred. HANDLER is the destination PC.
5665
5666 We ignore the CFA and set a temporary breakpoint at HANDLER.
5667 This is not extremely efficient but it avoids issues in gdb
5668 with computing the DWARF CFA, and it also works even in weird
5669 cases such as throwing an exception from inside a signal
5670 handler. */
5671
5672 b = SYMBOL_BLOCK_VALUE (func);
5673 ALL_BLOCK_SYMBOLS (b, iter, sym)
5674 {
5675 if (!SYMBOL_IS_ARGUMENT (sym))
5676 continue;
5677
5678 if (argno == 0)
5679 ++argno;
5680 else
5681 {
5682 insert_exception_resume_breakpoint (ecs->event_thread,
5683 b, frame, sym);
5684 break;
5685 }
5686 }
5687 }
5688 }
5689
5690 static void
5691 stop_stepping (struct execution_control_state *ecs)
5692 {
5693 if (debug_infrun)
5694 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5695
5696 /* Let callers know we don't want to wait for the inferior anymore. */
5697 ecs->wait_some_more = 0;
5698 }
5699
5700 /* This function handles various cases where we need to continue
5701 waiting for the inferior. */
5702 /* (Used to be the keep_going: label in the old wait_for_inferior). */
5703
5704 static void
5705 keep_going (struct execution_control_state *ecs)
5706 {
5707 /* Make sure normal_stop is called if we get a QUIT handled before
5708 reaching resume. */
5709 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5710
5711 /* Save the pc before execution, to compare with pc after stop. */
5712 ecs->event_thread->prev_pc
5713 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5714
5715 /* If we did not do break;, it means we should keep running the
5716 inferior and not return to debugger. */
5717
5718 if (ecs->event_thread->control.trap_expected
5719 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5720 {
5721 /* We took a signal (which we are supposed to pass through to
5722 the inferior, else we'd not get here) and we haven't yet
5723 gotten our trap. Simply continue. */
5724
5725 discard_cleanups (old_cleanups);
5726 resume (currently_stepping (ecs->event_thread),
5727 ecs->event_thread->suspend.stop_signal);
5728 }
5729 else
5730 {
5731 /* Either the trap was not expected, but we are continuing
5732 anyway (the user asked that this signal be passed to the
5733 child)
5734 -- or --
5735 The signal was SIGTRAP, e.g. it was our signal, but we
5736 decided we should resume from it.
5737
5738 We're going to run this baby now!
5739
5740 Note that insert_breakpoints won't try to re-insert
5741 already inserted breakpoints. Therefore, we don't
5742 care if breakpoints were already inserted, or not. */
5743
5744 if (ecs->event_thread->stepping_over_breakpoint)
5745 {
5746 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5747
5748 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5749 /* Since we can't do a displaced step, we have to remove
5750 the breakpoint while we step it. To keep things
5751 simple, we remove them all. */
5752 remove_breakpoints ();
5753 }
5754 else
5755 {
5756 volatile struct gdb_exception e;
5757
5758 /* Stop stepping when inserting breakpoints
5759 has failed. */
5760 TRY_CATCH (e, RETURN_MASK_ERROR)
5761 {
5762 insert_breakpoints ();
5763 }
5764 if (e.reason < 0)
5765 {
5766 exception_print (gdb_stderr, e);
5767 stop_stepping (ecs);
5768 return;
5769 }
5770 }
5771
5772 ecs->event_thread->control.trap_expected
5773 = ecs->event_thread->stepping_over_breakpoint;
5774
5775 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5776 specifies that such a signal should be delivered to the
5777 target program).
5778
5779 Typically, this would occure when a user is debugging a
5780 target monitor on a simulator: the target monitor sets a
5781 breakpoint; the simulator encounters this break-point and
5782 halts the simulation handing control to GDB; GDB, noteing
5783 that the break-point isn't valid, returns control back to the
5784 simulator; the simulator then delivers the hardware
5785 equivalent of a SIGNAL_TRAP to the program being debugged. */
5786
5787 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5788 && !signal_program[ecs->event_thread->suspend.stop_signal])
5789 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5790
5791 discard_cleanups (old_cleanups);
5792 resume (currently_stepping (ecs->event_thread),
5793 ecs->event_thread->suspend.stop_signal);
5794 }
5795
5796 prepare_to_wait (ecs);
5797 }
5798
5799 /* This function normally comes after a resume, before
5800 handle_inferior_event exits. It takes care of any last bits of
5801 housekeeping, and sets the all-important wait_some_more flag. */
5802
5803 static void
5804 prepare_to_wait (struct execution_control_state *ecs)
5805 {
5806 if (debug_infrun)
5807 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5808
5809 /* This is the old end of the while loop. Let everybody know we
5810 want to wait for the inferior some more and get called again
5811 soon. */
5812 ecs->wait_some_more = 1;
5813 }
5814
5815 /* Several print_*_reason functions to print why the inferior has stopped.
5816 We always print something when the inferior exits, or receives a signal.
5817 The rest of the cases are dealt with later on in normal_stop and
5818 print_it_typical. Ideally there should be a call to one of these
5819 print_*_reason functions functions from handle_inferior_event each time
5820 stop_stepping is called. */
5821
5822 /* Print why the inferior has stopped.
5823 We are done with a step/next/si/ni command, print why the inferior has
5824 stopped. For now print nothing. Print a message only if not in the middle
5825 of doing a "step n" operation for n > 1. */
5826
5827 static void
5828 print_end_stepping_range_reason (void)
5829 {
5830 if ((!inferior_thread ()->step_multi
5831 || !inferior_thread ()->control.stop_step)
5832 && ui_out_is_mi_like_p (current_uiout))
5833 ui_out_field_string (current_uiout, "reason",
5834 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5835 }
5836
5837 /* The inferior was terminated by a signal, print why it stopped. */
5838
5839 static void
5840 print_signal_exited_reason (enum gdb_signal siggnal)
5841 {
5842 struct ui_out *uiout = current_uiout;
5843
5844 annotate_signalled ();
5845 if (ui_out_is_mi_like_p (uiout))
5846 ui_out_field_string
5847 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5848 ui_out_text (uiout, "\nProgram terminated with signal ");
5849 annotate_signal_name ();
5850 ui_out_field_string (uiout, "signal-name",
5851 gdb_signal_to_name (siggnal));
5852 annotate_signal_name_end ();
5853 ui_out_text (uiout, ", ");
5854 annotate_signal_string ();
5855 ui_out_field_string (uiout, "signal-meaning",
5856 gdb_signal_to_string (siggnal));
5857 annotate_signal_string_end ();
5858 ui_out_text (uiout, ".\n");
5859 ui_out_text (uiout, "The program no longer exists.\n");
5860 }
5861
5862 /* The inferior program is finished, print why it stopped. */
5863
5864 static void
5865 print_exited_reason (int exitstatus)
5866 {
5867 struct inferior *inf = current_inferior ();
5868 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5869 struct ui_out *uiout = current_uiout;
5870
5871 annotate_exited (exitstatus);
5872 if (exitstatus)
5873 {
5874 if (ui_out_is_mi_like_p (uiout))
5875 ui_out_field_string (uiout, "reason",
5876 async_reason_lookup (EXEC_ASYNC_EXITED));
5877 ui_out_text (uiout, "[Inferior ");
5878 ui_out_text (uiout, plongest (inf->num));
5879 ui_out_text (uiout, " (");
5880 ui_out_text (uiout, pidstr);
5881 ui_out_text (uiout, ") exited with code ");
5882 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5883 ui_out_text (uiout, "]\n");
5884 }
5885 else
5886 {
5887 if (ui_out_is_mi_like_p (uiout))
5888 ui_out_field_string
5889 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5890 ui_out_text (uiout, "[Inferior ");
5891 ui_out_text (uiout, plongest (inf->num));
5892 ui_out_text (uiout, " (");
5893 ui_out_text (uiout, pidstr);
5894 ui_out_text (uiout, ") exited normally]\n");
5895 }
5896 /* Support the --return-child-result option. */
5897 return_child_result_value = exitstatus;
5898 }
5899
5900 /* Signal received, print why the inferior has stopped. The signal table
5901 tells us to print about it. */
5902
5903 static void
5904 print_signal_received_reason (enum gdb_signal siggnal)
5905 {
5906 struct ui_out *uiout = current_uiout;
5907
5908 annotate_signal ();
5909
5910 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5911 {
5912 struct thread_info *t = inferior_thread ();
5913
5914 ui_out_text (uiout, "\n[");
5915 ui_out_field_string (uiout, "thread-name",
5916 target_pid_to_str (t->ptid));
5917 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5918 ui_out_text (uiout, " stopped");
5919 }
5920 else
5921 {
5922 ui_out_text (uiout, "\nProgram received signal ");
5923 annotate_signal_name ();
5924 if (ui_out_is_mi_like_p (uiout))
5925 ui_out_field_string
5926 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5927 ui_out_field_string (uiout, "signal-name",
5928 gdb_signal_to_name (siggnal));
5929 annotate_signal_name_end ();
5930 ui_out_text (uiout, ", ");
5931 annotate_signal_string ();
5932 ui_out_field_string (uiout, "signal-meaning",
5933 gdb_signal_to_string (siggnal));
5934 annotate_signal_string_end ();
5935 }
5936 ui_out_text (uiout, ".\n");
5937 }
5938
5939 /* Reverse execution: target ran out of history info, print why the inferior
5940 has stopped. */
5941
5942 static void
5943 print_no_history_reason (void)
5944 {
5945 ui_out_text (current_uiout, "\nNo more reverse-execution history.\n");
5946 }
5947
5948 /* Here to return control to GDB when the inferior stops for real.
5949 Print appropriate messages, remove breakpoints, give terminal our modes.
5950
5951 STOP_PRINT_FRAME nonzero means print the executing frame
5952 (pc, function, args, file, line number and line text).
5953 BREAKPOINTS_FAILED nonzero means stop was due to error
5954 attempting to insert breakpoints. */
5955
5956 void
5957 normal_stop (void)
5958 {
5959 struct target_waitstatus last;
5960 ptid_t last_ptid;
5961 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5962
5963 get_last_target_status (&last_ptid, &last);
5964
5965 /* If an exception is thrown from this point on, make sure to
5966 propagate GDB's knowledge of the executing state to the
5967 frontend/user running state. A QUIT is an easy exception to see
5968 here, so do this before any filtered output. */
5969 if (!non_stop)
5970 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5971 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5972 && last.kind != TARGET_WAITKIND_EXITED
5973 && last.kind != TARGET_WAITKIND_NO_RESUMED)
5974 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5975
5976 /* In non-stop mode, we don't want GDB to switch threads behind the
5977 user's back, to avoid races where the user is typing a command to
5978 apply to thread x, but GDB switches to thread y before the user
5979 finishes entering the command. */
5980
5981 /* As with the notification of thread events, we want to delay
5982 notifying the user that we've switched thread context until
5983 the inferior actually stops.
5984
5985 There's no point in saying anything if the inferior has exited.
5986 Note that SIGNALLED here means "exited with a signal", not
5987 "received a signal". */
5988 if (!non_stop
5989 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5990 && target_has_execution
5991 && last.kind != TARGET_WAITKIND_SIGNALLED
5992 && last.kind != TARGET_WAITKIND_EXITED
5993 && last.kind != TARGET_WAITKIND_NO_RESUMED)
5994 {
5995 target_terminal_ours_for_output ();
5996 printf_filtered (_("[Switching to %s]\n"),
5997 target_pid_to_str (inferior_ptid));
5998 annotate_thread_changed ();
5999 previous_inferior_ptid = inferior_ptid;
6000 }
6001
6002 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6003 {
6004 gdb_assert (sync_execution || !target_can_async_p ());
6005
6006 target_terminal_ours_for_output ();
6007 printf_filtered (_("No unwaited-for children left.\n"));
6008 }
6009
6010 if (!breakpoints_always_inserted_mode () && target_has_execution)
6011 {
6012 if (remove_breakpoints ())
6013 {
6014 target_terminal_ours_for_output ();
6015 printf_filtered (_("Cannot remove breakpoints because "
6016 "program is no longer writable.\nFurther "
6017 "execution is probably impossible.\n"));
6018 }
6019 }
6020
6021 /* If an auto-display called a function and that got a signal,
6022 delete that auto-display to avoid an infinite recursion. */
6023
6024 if (stopped_by_random_signal)
6025 disable_current_display ();
6026
6027 /* Don't print a message if in the middle of doing a "step n"
6028 operation for n > 1 */
6029 if (target_has_execution
6030 && last.kind != TARGET_WAITKIND_SIGNALLED
6031 && last.kind != TARGET_WAITKIND_EXITED
6032 && inferior_thread ()->step_multi
6033 && inferior_thread ()->control.stop_step)
6034 goto done;
6035
6036 target_terminal_ours ();
6037 async_enable_stdin ();
6038
6039 /* Set the current source location. This will also happen if we
6040 display the frame below, but the current SAL will be incorrect
6041 during a user hook-stop function. */
6042 if (has_stack_frames () && !stop_stack_dummy)
6043 set_current_sal_from_frame (get_current_frame (), 1);
6044
6045 /* Let the user/frontend see the threads as stopped. */
6046 do_cleanups (old_chain);
6047
6048 /* Look up the hook_stop and run it (CLI internally handles problem
6049 of stop_command's pre-hook not existing). */
6050 if (stop_command)
6051 catch_errors (hook_stop_stub, stop_command,
6052 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6053
6054 if (!has_stack_frames ())
6055 goto done;
6056
6057 if (last.kind == TARGET_WAITKIND_SIGNALLED
6058 || last.kind == TARGET_WAITKIND_EXITED)
6059 goto done;
6060
6061 /* Select innermost stack frame - i.e., current frame is frame 0,
6062 and current location is based on that.
6063 Don't do this on return from a stack dummy routine,
6064 or if the program has exited. */
6065
6066 if (!stop_stack_dummy)
6067 {
6068 select_frame (get_current_frame ());
6069
6070 /* Print current location without a level number, if
6071 we have changed functions or hit a breakpoint.
6072 Print source line if we have one.
6073 bpstat_print() contains the logic deciding in detail
6074 what to print, based on the event(s) that just occurred. */
6075
6076 /* If --batch-silent is enabled then there's no need to print the current
6077 source location, and to try risks causing an error message about
6078 missing source files. */
6079 if (stop_print_frame && !batch_silent)
6080 {
6081 int bpstat_ret;
6082 int source_flag;
6083 int do_frame_printing = 1;
6084 struct thread_info *tp = inferior_thread ();
6085
6086 bpstat_ret = bpstat_print (tp->control.stop_bpstat, last.kind);
6087 switch (bpstat_ret)
6088 {
6089 case PRINT_UNKNOWN:
6090 /* FIXME: cagney/2002-12-01: Given that a frame ID does
6091 (or should) carry around the function and does (or
6092 should) use that when doing a frame comparison. */
6093 if (tp->control.stop_step
6094 && frame_id_eq (tp->control.step_frame_id,
6095 get_frame_id (get_current_frame ()))
6096 && step_start_function == find_pc_function (stop_pc))
6097 source_flag = SRC_LINE; /* Finished step, just
6098 print source line. */
6099 else
6100 source_flag = SRC_AND_LOC; /* Print location and
6101 source line. */
6102 break;
6103 case PRINT_SRC_AND_LOC:
6104 source_flag = SRC_AND_LOC; /* Print location and
6105 source line. */
6106 break;
6107 case PRINT_SRC_ONLY:
6108 source_flag = SRC_LINE;
6109 break;
6110 case PRINT_NOTHING:
6111 source_flag = SRC_LINE; /* something bogus */
6112 do_frame_printing = 0;
6113 break;
6114 default:
6115 internal_error (__FILE__, __LINE__, _("Unknown value."));
6116 }
6117
6118 /* The behavior of this routine with respect to the source
6119 flag is:
6120 SRC_LINE: Print only source line
6121 LOCATION: Print only location
6122 SRC_AND_LOC: Print location and source line. */
6123 if (do_frame_printing)
6124 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
6125
6126 /* Display the auto-display expressions. */
6127 do_displays ();
6128 }
6129 }
6130
6131 /* Save the function value return registers, if we care.
6132 We might be about to restore their previous contents. */
6133 if (inferior_thread ()->control.proceed_to_finish
6134 && execution_direction != EXEC_REVERSE)
6135 {
6136 /* This should not be necessary. */
6137 if (stop_registers)
6138 regcache_xfree (stop_registers);
6139
6140 /* NB: The copy goes through to the target picking up the value of
6141 all the registers. */
6142 stop_registers = regcache_dup (get_current_regcache ());
6143 }
6144
6145 if (stop_stack_dummy == STOP_STACK_DUMMY)
6146 {
6147 /* Pop the empty frame that contains the stack dummy.
6148 This also restores inferior state prior to the call
6149 (struct infcall_suspend_state). */
6150 struct frame_info *frame = get_current_frame ();
6151
6152 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6153 frame_pop (frame);
6154 /* frame_pop() calls reinit_frame_cache as the last thing it
6155 does which means there's currently no selected frame. We
6156 don't need to re-establish a selected frame if the dummy call
6157 returns normally, that will be done by
6158 restore_infcall_control_state. However, we do have to handle
6159 the case where the dummy call is returning after being
6160 stopped (e.g. the dummy call previously hit a breakpoint).
6161 We can't know which case we have so just always re-establish
6162 a selected frame here. */
6163 select_frame (get_current_frame ());
6164 }
6165
6166 done:
6167 annotate_stopped ();
6168
6169 /* Suppress the stop observer if we're in the middle of:
6170
6171 - a step n (n > 1), as there still more steps to be done.
6172
6173 - a "finish" command, as the observer will be called in
6174 finish_command_continuation, so it can include the inferior
6175 function's return value.
6176
6177 - calling an inferior function, as we pretend we inferior didn't
6178 run at all. The return value of the call is handled by the
6179 expression evaluator, through call_function_by_hand. */
6180
6181 if (!target_has_execution
6182 || last.kind == TARGET_WAITKIND_SIGNALLED
6183 || last.kind == TARGET_WAITKIND_EXITED
6184 || last.kind == TARGET_WAITKIND_NO_RESUMED
6185 || (!(inferior_thread ()->step_multi
6186 && inferior_thread ()->control.stop_step)
6187 && !(inferior_thread ()->control.stop_bpstat
6188 && inferior_thread ()->control.proceed_to_finish)
6189 && !inferior_thread ()->control.in_infcall))
6190 {
6191 if (!ptid_equal (inferior_ptid, null_ptid))
6192 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6193 stop_print_frame);
6194 else
6195 observer_notify_normal_stop (NULL, stop_print_frame);
6196 }
6197
6198 if (target_has_execution)
6199 {
6200 if (last.kind != TARGET_WAITKIND_SIGNALLED
6201 && last.kind != TARGET_WAITKIND_EXITED)
6202 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6203 Delete any breakpoint that is to be deleted at the next stop. */
6204 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6205 }
6206
6207 /* Try to get rid of automatically added inferiors that are no
6208 longer needed. Keeping those around slows down things linearly.
6209 Note that this never removes the current inferior. */
6210 prune_inferiors ();
6211 }
6212
6213 static int
6214 hook_stop_stub (void *cmd)
6215 {
6216 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6217 return (0);
6218 }
6219 \f
6220 int
6221 signal_stop_state (int signo)
6222 {
6223 return signal_stop[signo];
6224 }
6225
6226 int
6227 signal_print_state (int signo)
6228 {
6229 return signal_print[signo];
6230 }
6231
6232 int
6233 signal_pass_state (int signo)
6234 {
6235 return signal_program[signo];
6236 }
6237
6238 static void
6239 signal_cache_update (int signo)
6240 {
6241 if (signo == -1)
6242 {
6243 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6244 signal_cache_update (signo);
6245
6246 return;
6247 }
6248
6249 signal_pass[signo] = (signal_stop[signo] == 0
6250 && signal_print[signo] == 0
6251 && signal_program[signo] == 1
6252 && signal_catch[signo] == 0);
6253 }
6254
6255 int
6256 signal_stop_update (int signo, int state)
6257 {
6258 int ret = signal_stop[signo];
6259
6260 signal_stop[signo] = state;
6261 signal_cache_update (signo);
6262 return ret;
6263 }
6264
6265 int
6266 signal_print_update (int signo, int state)
6267 {
6268 int ret = signal_print[signo];
6269
6270 signal_print[signo] = state;
6271 signal_cache_update (signo);
6272 return ret;
6273 }
6274
6275 int
6276 signal_pass_update (int signo, int state)
6277 {
6278 int ret = signal_program[signo];
6279
6280 signal_program[signo] = state;
6281 signal_cache_update (signo);
6282 return ret;
6283 }
6284
6285 /* Update the global 'signal_catch' from INFO and notify the
6286 target. */
6287
6288 void
6289 signal_catch_update (const unsigned int *info)
6290 {
6291 int i;
6292
6293 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6294 signal_catch[i] = info[i] > 0;
6295 signal_cache_update (-1);
6296 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6297 }
6298
6299 static void
6300 sig_print_header (void)
6301 {
6302 printf_filtered (_("Signal Stop\tPrint\tPass "
6303 "to program\tDescription\n"));
6304 }
6305
6306 static void
6307 sig_print_info (enum gdb_signal oursig)
6308 {
6309 const char *name = gdb_signal_to_name (oursig);
6310 int name_padding = 13 - strlen (name);
6311
6312 if (name_padding <= 0)
6313 name_padding = 0;
6314
6315 printf_filtered ("%s", name);
6316 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6317 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6318 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6319 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6320 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6321 }
6322
6323 /* Specify how various signals in the inferior should be handled. */
6324
6325 static void
6326 handle_command (char *args, int from_tty)
6327 {
6328 char **argv;
6329 int digits, wordlen;
6330 int sigfirst, signum, siglast;
6331 enum gdb_signal oursig;
6332 int allsigs;
6333 int nsigs;
6334 unsigned char *sigs;
6335 struct cleanup *old_chain;
6336
6337 if (args == NULL)
6338 {
6339 error_no_arg (_("signal to handle"));
6340 }
6341
6342 /* Allocate and zero an array of flags for which signals to handle. */
6343
6344 nsigs = (int) GDB_SIGNAL_LAST;
6345 sigs = (unsigned char *) alloca (nsigs);
6346 memset (sigs, 0, nsigs);
6347
6348 /* Break the command line up into args. */
6349
6350 argv = gdb_buildargv (args);
6351 old_chain = make_cleanup_freeargv (argv);
6352
6353 /* Walk through the args, looking for signal oursigs, signal names, and
6354 actions. Signal numbers and signal names may be interspersed with
6355 actions, with the actions being performed for all signals cumulatively
6356 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6357
6358 while (*argv != NULL)
6359 {
6360 wordlen = strlen (*argv);
6361 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6362 {;
6363 }
6364 allsigs = 0;
6365 sigfirst = siglast = -1;
6366
6367 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6368 {
6369 /* Apply action to all signals except those used by the
6370 debugger. Silently skip those. */
6371 allsigs = 1;
6372 sigfirst = 0;
6373 siglast = nsigs - 1;
6374 }
6375 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6376 {
6377 SET_SIGS (nsigs, sigs, signal_stop);
6378 SET_SIGS (nsigs, sigs, signal_print);
6379 }
6380 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6381 {
6382 UNSET_SIGS (nsigs, sigs, signal_program);
6383 }
6384 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6385 {
6386 SET_SIGS (nsigs, sigs, signal_print);
6387 }
6388 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6389 {
6390 SET_SIGS (nsigs, sigs, signal_program);
6391 }
6392 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6393 {
6394 UNSET_SIGS (nsigs, sigs, signal_stop);
6395 }
6396 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6397 {
6398 SET_SIGS (nsigs, sigs, signal_program);
6399 }
6400 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6401 {
6402 UNSET_SIGS (nsigs, sigs, signal_print);
6403 UNSET_SIGS (nsigs, sigs, signal_stop);
6404 }
6405 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6406 {
6407 UNSET_SIGS (nsigs, sigs, signal_program);
6408 }
6409 else if (digits > 0)
6410 {
6411 /* It is numeric. The numeric signal refers to our own
6412 internal signal numbering from target.h, not to host/target
6413 signal number. This is a feature; users really should be
6414 using symbolic names anyway, and the common ones like
6415 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6416
6417 sigfirst = siglast = (int)
6418 gdb_signal_from_command (atoi (*argv));
6419 if ((*argv)[digits] == '-')
6420 {
6421 siglast = (int)
6422 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6423 }
6424 if (sigfirst > siglast)
6425 {
6426 /* Bet he didn't figure we'd think of this case... */
6427 signum = sigfirst;
6428 sigfirst = siglast;
6429 siglast = signum;
6430 }
6431 }
6432 else
6433 {
6434 oursig = gdb_signal_from_name (*argv);
6435 if (oursig != GDB_SIGNAL_UNKNOWN)
6436 {
6437 sigfirst = siglast = (int) oursig;
6438 }
6439 else
6440 {
6441 /* Not a number and not a recognized flag word => complain. */
6442 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6443 }
6444 }
6445
6446 /* If any signal numbers or symbol names were found, set flags for
6447 which signals to apply actions to. */
6448
6449 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6450 {
6451 switch ((enum gdb_signal) signum)
6452 {
6453 case GDB_SIGNAL_TRAP:
6454 case GDB_SIGNAL_INT:
6455 if (!allsigs && !sigs[signum])
6456 {
6457 if (query (_("%s is used by the debugger.\n\
6458 Are you sure you want to change it? "),
6459 gdb_signal_to_name ((enum gdb_signal) signum)))
6460 {
6461 sigs[signum] = 1;
6462 }
6463 else
6464 {
6465 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6466 gdb_flush (gdb_stdout);
6467 }
6468 }
6469 break;
6470 case GDB_SIGNAL_0:
6471 case GDB_SIGNAL_DEFAULT:
6472 case GDB_SIGNAL_UNKNOWN:
6473 /* Make sure that "all" doesn't print these. */
6474 break;
6475 default:
6476 sigs[signum] = 1;
6477 break;
6478 }
6479 }
6480
6481 argv++;
6482 }
6483
6484 for (signum = 0; signum < nsigs; signum++)
6485 if (sigs[signum])
6486 {
6487 signal_cache_update (-1);
6488 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6489 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
6490
6491 if (from_tty)
6492 {
6493 /* Show the results. */
6494 sig_print_header ();
6495 for (; signum < nsigs; signum++)
6496 if (sigs[signum])
6497 sig_print_info (signum);
6498 }
6499
6500 break;
6501 }
6502
6503 do_cleanups (old_chain);
6504 }
6505
6506 /* Complete the "handle" command. */
6507
6508 static VEC (char_ptr) *
6509 handle_completer (struct cmd_list_element *ignore,
6510 const char *text, const char *word)
6511 {
6512 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
6513 static const char * const keywords[] =
6514 {
6515 "all",
6516 "stop",
6517 "ignore",
6518 "print",
6519 "pass",
6520 "nostop",
6521 "noignore",
6522 "noprint",
6523 "nopass",
6524 NULL,
6525 };
6526
6527 vec_signals = signal_completer (ignore, text, word);
6528 vec_keywords = complete_on_enum (keywords, word, word);
6529
6530 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
6531 VEC_free (char_ptr, vec_signals);
6532 VEC_free (char_ptr, vec_keywords);
6533 return return_val;
6534 }
6535
6536 static void
6537 xdb_handle_command (char *args, int from_tty)
6538 {
6539 char **argv;
6540 struct cleanup *old_chain;
6541
6542 if (args == NULL)
6543 error_no_arg (_("xdb command"));
6544
6545 /* Break the command line up into args. */
6546
6547 argv = gdb_buildargv (args);
6548 old_chain = make_cleanup_freeargv (argv);
6549 if (argv[1] != (char *) NULL)
6550 {
6551 char *argBuf;
6552 int bufLen;
6553
6554 bufLen = strlen (argv[0]) + 20;
6555 argBuf = (char *) xmalloc (bufLen);
6556 if (argBuf)
6557 {
6558 int validFlag = 1;
6559 enum gdb_signal oursig;
6560
6561 oursig = gdb_signal_from_name (argv[0]);
6562 memset (argBuf, 0, bufLen);
6563 if (strcmp (argv[1], "Q") == 0)
6564 sprintf (argBuf, "%s %s", argv[0], "noprint");
6565 else
6566 {
6567 if (strcmp (argv[1], "s") == 0)
6568 {
6569 if (!signal_stop[oursig])
6570 sprintf (argBuf, "%s %s", argv[0], "stop");
6571 else
6572 sprintf (argBuf, "%s %s", argv[0], "nostop");
6573 }
6574 else if (strcmp (argv[1], "i") == 0)
6575 {
6576 if (!signal_program[oursig])
6577 sprintf (argBuf, "%s %s", argv[0], "pass");
6578 else
6579 sprintf (argBuf, "%s %s", argv[0], "nopass");
6580 }
6581 else if (strcmp (argv[1], "r") == 0)
6582 {
6583 if (!signal_print[oursig])
6584 sprintf (argBuf, "%s %s", argv[0], "print");
6585 else
6586 sprintf (argBuf, "%s %s", argv[0], "noprint");
6587 }
6588 else
6589 validFlag = 0;
6590 }
6591 if (validFlag)
6592 handle_command (argBuf, from_tty);
6593 else
6594 printf_filtered (_("Invalid signal handling flag.\n"));
6595 if (argBuf)
6596 xfree (argBuf);
6597 }
6598 }
6599 do_cleanups (old_chain);
6600 }
6601
6602 enum gdb_signal
6603 gdb_signal_from_command (int num)
6604 {
6605 if (num >= 1 && num <= 15)
6606 return (enum gdb_signal) num;
6607 error (_("Only signals 1-15 are valid as numeric signals.\n\
6608 Use \"info signals\" for a list of symbolic signals."));
6609 }
6610
6611 /* Print current contents of the tables set by the handle command.
6612 It is possible we should just be printing signals actually used
6613 by the current target (but for things to work right when switching
6614 targets, all signals should be in the signal tables). */
6615
6616 static void
6617 signals_info (char *signum_exp, int from_tty)
6618 {
6619 enum gdb_signal oursig;
6620
6621 sig_print_header ();
6622
6623 if (signum_exp)
6624 {
6625 /* First see if this is a symbol name. */
6626 oursig = gdb_signal_from_name (signum_exp);
6627 if (oursig == GDB_SIGNAL_UNKNOWN)
6628 {
6629 /* No, try numeric. */
6630 oursig =
6631 gdb_signal_from_command (parse_and_eval_long (signum_exp));
6632 }
6633 sig_print_info (oursig);
6634 return;
6635 }
6636
6637 printf_filtered ("\n");
6638 /* These ugly casts brought to you by the native VAX compiler. */
6639 for (oursig = GDB_SIGNAL_FIRST;
6640 (int) oursig < (int) GDB_SIGNAL_LAST;
6641 oursig = (enum gdb_signal) ((int) oursig + 1))
6642 {
6643 QUIT;
6644
6645 if (oursig != GDB_SIGNAL_UNKNOWN
6646 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
6647 sig_print_info (oursig);
6648 }
6649
6650 printf_filtered (_("\nUse the \"handle\" command "
6651 "to change these tables.\n"));
6652 }
6653
6654 /* Check if it makes sense to read $_siginfo from the current thread
6655 at this point. If not, throw an error. */
6656
6657 static void
6658 validate_siginfo_access (void)
6659 {
6660 /* No current inferior, no siginfo. */
6661 if (ptid_equal (inferior_ptid, null_ptid))
6662 error (_("No thread selected."));
6663
6664 /* Don't try to read from a dead thread. */
6665 if (is_exited (inferior_ptid))
6666 error (_("The current thread has terminated"));
6667
6668 /* ... or from a spinning thread. */
6669 if (is_running (inferior_ptid))
6670 error (_("Selected thread is running."));
6671 }
6672
6673 /* The $_siginfo convenience variable is a bit special. We don't know
6674 for sure the type of the value until we actually have a chance to
6675 fetch the data. The type can change depending on gdbarch, so it is
6676 also dependent on which thread you have selected.
6677
6678 1. making $_siginfo be an internalvar that creates a new value on
6679 access.
6680
6681 2. making the value of $_siginfo be an lval_computed value. */
6682
6683 /* This function implements the lval_computed support for reading a
6684 $_siginfo value. */
6685
6686 static void
6687 siginfo_value_read (struct value *v)
6688 {
6689 LONGEST transferred;
6690
6691 validate_siginfo_access ();
6692
6693 transferred =
6694 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6695 NULL,
6696 value_contents_all_raw (v),
6697 value_offset (v),
6698 TYPE_LENGTH (value_type (v)));
6699
6700 if (transferred != TYPE_LENGTH (value_type (v)))
6701 error (_("Unable to read siginfo"));
6702 }
6703
6704 /* This function implements the lval_computed support for writing a
6705 $_siginfo value. */
6706
6707 static void
6708 siginfo_value_write (struct value *v, struct value *fromval)
6709 {
6710 LONGEST transferred;
6711
6712 validate_siginfo_access ();
6713
6714 transferred = target_write (&current_target,
6715 TARGET_OBJECT_SIGNAL_INFO,
6716 NULL,
6717 value_contents_all_raw (fromval),
6718 value_offset (v),
6719 TYPE_LENGTH (value_type (fromval)));
6720
6721 if (transferred != TYPE_LENGTH (value_type (fromval)))
6722 error (_("Unable to write siginfo"));
6723 }
6724
6725 static const struct lval_funcs siginfo_value_funcs =
6726 {
6727 siginfo_value_read,
6728 siginfo_value_write
6729 };
6730
6731 /* Return a new value with the correct type for the siginfo object of
6732 the current thread using architecture GDBARCH. Return a void value
6733 if there's no object available. */
6734
6735 static struct value *
6736 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
6737 void *ignore)
6738 {
6739 if (target_has_stack
6740 && !ptid_equal (inferior_ptid, null_ptid)
6741 && gdbarch_get_siginfo_type_p (gdbarch))
6742 {
6743 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6744
6745 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6746 }
6747
6748 return allocate_value (builtin_type (gdbarch)->builtin_void);
6749 }
6750
6751 \f
6752 /* infcall_suspend_state contains state about the program itself like its
6753 registers and any signal it received when it last stopped.
6754 This state must be restored regardless of how the inferior function call
6755 ends (either successfully, or after it hits a breakpoint or signal)
6756 if the program is to properly continue where it left off. */
6757
6758 struct infcall_suspend_state
6759 {
6760 struct thread_suspend_state thread_suspend;
6761 #if 0 /* Currently unused and empty structures are not valid C. */
6762 struct inferior_suspend_state inferior_suspend;
6763 #endif
6764
6765 /* Other fields: */
6766 CORE_ADDR stop_pc;
6767 struct regcache *registers;
6768
6769 /* Format of SIGINFO_DATA or NULL if it is not present. */
6770 struct gdbarch *siginfo_gdbarch;
6771
6772 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6773 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6774 content would be invalid. */
6775 gdb_byte *siginfo_data;
6776 };
6777
6778 struct infcall_suspend_state *
6779 save_infcall_suspend_state (void)
6780 {
6781 struct infcall_suspend_state *inf_state;
6782 struct thread_info *tp = inferior_thread ();
6783 #if 0
6784 struct inferior *inf = current_inferior ();
6785 #endif
6786 struct regcache *regcache = get_current_regcache ();
6787 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6788 gdb_byte *siginfo_data = NULL;
6789
6790 if (gdbarch_get_siginfo_type_p (gdbarch))
6791 {
6792 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6793 size_t len = TYPE_LENGTH (type);
6794 struct cleanup *back_to;
6795
6796 siginfo_data = xmalloc (len);
6797 back_to = make_cleanup (xfree, siginfo_data);
6798
6799 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6800 siginfo_data, 0, len) == len)
6801 discard_cleanups (back_to);
6802 else
6803 {
6804 /* Errors ignored. */
6805 do_cleanups (back_to);
6806 siginfo_data = NULL;
6807 }
6808 }
6809
6810 inf_state = XZALLOC (struct infcall_suspend_state);
6811
6812 if (siginfo_data)
6813 {
6814 inf_state->siginfo_gdbarch = gdbarch;
6815 inf_state->siginfo_data = siginfo_data;
6816 }
6817
6818 inf_state->thread_suspend = tp->suspend;
6819 #if 0 /* Currently unused and empty structures are not valid C. */
6820 inf_state->inferior_suspend = inf->suspend;
6821 #endif
6822
6823 /* run_inferior_call will not use the signal due to its `proceed' call with
6824 GDB_SIGNAL_0 anyway. */
6825 tp->suspend.stop_signal = GDB_SIGNAL_0;
6826
6827 inf_state->stop_pc = stop_pc;
6828
6829 inf_state->registers = regcache_dup (regcache);
6830
6831 return inf_state;
6832 }
6833
6834 /* Restore inferior session state to INF_STATE. */
6835
6836 void
6837 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6838 {
6839 struct thread_info *tp = inferior_thread ();
6840 #if 0
6841 struct inferior *inf = current_inferior ();
6842 #endif
6843 struct regcache *regcache = get_current_regcache ();
6844 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6845
6846 tp->suspend = inf_state->thread_suspend;
6847 #if 0 /* Currently unused and empty structures are not valid C. */
6848 inf->suspend = inf_state->inferior_suspend;
6849 #endif
6850
6851 stop_pc = inf_state->stop_pc;
6852
6853 if (inf_state->siginfo_gdbarch == gdbarch)
6854 {
6855 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6856
6857 /* Errors ignored. */
6858 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6859 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
6860 }
6861
6862 /* The inferior can be gone if the user types "print exit(0)"
6863 (and perhaps other times). */
6864 if (target_has_execution)
6865 /* NB: The register write goes through to the target. */
6866 regcache_cpy (regcache, inf_state->registers);
6867
6868 discard_infcall_suspend_state (inf_state);
6869 }
6870
6871 static void
6872 do_restore_infcall_suspend_state_cleanup (void *state)
6873 {
6874 restore_infcall_suspend_state (state);
6875 }
6876
6877 struct cleanup *
6878 make_cleanup_restore_infcall_suspend_state
6879 (struct infcall_suspend_state *inf_state)
6880 {
6881 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6882 }
6883
6884 void
6885 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6886 {
6887 regcache_xfree (inf_state->registers);
6888 xfree (inf_state->siginfo_data);
6889 xfree (inf_state);
6890 }
6891
6892 struct regcache *
6893 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6894 {
6895 return inf_state->registers;
6896 }
6897
6898 /* infcall_control_state contains state regarding gdb's control of the
6899 inferior itself like stepping control. It also contains session state like
6900 the user's currently selected frame. */
6901
6902 struct infcall_control_state
6903 {
6904 struct thread_control_state thread_control;
6905 struct inferior_control_state inferior_control;
6906
6907 /* Other fields: */
6908 enum stop_stack_kind stop_stack_dummy;
6909 int stopped_by_random_signal;
6910 int stop_after_trap;
6911
6912 /* ID if the selected frame when the inferior function call was made. */
6913 struct frame_id selected_frame_id;
6914 };
6915
6916 /* Save all of the information associated with the inferior<==>gdb
6917 connection. */
6918
6919 struct infcall_control_state *
6920 save_infcall_control_state (void)
6921 {
6922 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
6923 struct thread_info *tp = inferior_thread ();
6924 struct inferior *inf = current_inferior ();
6925
6926 inf_status->thread_control = tp->control;
6927 inf_status->inferior_control = inf->control;
6928
6929 tp->control.step_resume_breakpoint = NULL;
6930 tp->control.exception_resume_breakpoint = NULL;
6931
6932 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
6933 chain. If caller's caller is walking the chain, they'll be happier if we
6934 hand them back the original chain when restore_infcall_control_state is
6935 called. */
6936 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
6937
6938 /* Other fields: */
6939 inf_status->stop_stack_dummy = stop_stack_dummy;
6940 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6941 inf_status->stop_after_trap = stop_after_trap;
6942
6943 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6944
6945 return inf_status;
6946 }
6947
6948 static int
6949 restore_selected_frame (void *args)
6950 {
6951 struct frame_id *fid = (struct frame_id *) args;
6952 struct frame_info *frame;
6953
6954 frame = frame_find_by_id (*fid);
6955
6956 /* If inf_status->selected_frame_id is NULL, there was no previously
6957 selected frame. */
6958 if (frame == NULL)
6959 {
6960 warning (_("Unable to restore previously selected frame."));
6961 return 0;
6962 }
6963
6964 select_frame (frame);
6965
6966 return (1);
6967 }
6968
6969 /* Restore inferior session state to INF_STATUS. */
6970
6971 void
6972 restore_infcall_control_state (struct infcall_control_state *inf_status)
6973 {
6974 struct thread_info *tp = inferior_thread ();
6975 struct inferior *inf = current_inferior ();
6976
6977 if (tp->control.step_resume_breakpoint)
6978 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
6979
6980 if (tp->control.exception_resume_breakpoint)
6981 tp->control.exception_resume_breakpoint->disposition
6982 = disp_del_at_next_stop;
6983
6984 /* Handle the bpstat_copy of the chain. */
6985 bpstat_clear (&tp->control.stop_bpstat);
6986
6987 tp->control = inf_status->thread_control;
6988 inf->control = inf_status->inferior_control;
6989
6990 /* Other fields: */
6991 stop_stack_dummy = inf_status->stop_stack_dummy;
6992 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6993 stop_after_trap = inf_status->stop_after_trap;
6994
6995 if (target_has_stack)
6996 {
6997 /* The point of catch_errors is that if the stack is clobbered,
6998 walking the stack might encounter a garbage pointer and
6999 error() trying to dereference it. */
7000 if (catch_errors
7001 (restore_selected_frame, &inf_status->selected_frame_id,
7002 "Unable to restore previously selected frame:\n",
7003 RETURN_MASK_ERROR) == 0)
7004 /* Error in restoring the selected frame. Select the innermost
7005 frame. */
7006 select_frame (get_current_frame ());
7007 }
7008
7009 xfree (inf_status);
7010 }
7011
7012 static void
7013 do_restore_infcall_control_state_cleanup (void *sts)
7014 {
7015 restore_infcall_control_state (sts);
7016 }
7017
7018 struct cleanup *
7019 make_cleanup_restore_infcall_control_state
7020 (struct infcall_control_state *inf_status)
7021 {
7022 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7023 }
7024
7025 void
7026 discard_infcall_control_state (struct infcall_control_state *inf_status)
7027 {
7028 if (inf_status->thread_control.step_resume_breakpoint)
7029 inf_status->thread_control.step_resume_breakpoint->disposition
7030 = disp_del_at_next_stop;
7031
7032 if (inf_status->thread_control.exception_resume_breakpoint)
7033 inf_status->thread_control.exception_resume_breakpoint->disposition
7034 = disp_del_at_next_stop;
7035
7036 /* See save_infcall_control_state for info on stop_bpstat. */
7037 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7038
7039 xfree (inf_status);
7040 }
7041 \f
7042 int
7043 ptid_match (ptid_t ptid, ptid_t filter)
7044 {
7045 if (ptid_equal (filter, minus_one_ptid))
7046 return 1;
7047 if (ptid_is_pid (filter)
7048 && ptid_get_pid (ptid) == ptid_get_pid (filter))
7049 return 1;
7050 else if (ptid_equal (ptid, filter))
7051 return 1;
7052
7053 return 0;
7054 }
7055
7056 /* restore_inferior_ptid() will be used by the cleanup machinery
7057 to restore the inferior_ptid value saved in a call to
7058 save_inferior_ptid(). */
7059
7060 static void
7061 restore_inferior_ptid (void *arg)
7062 {
7063 ptid_t *saved_ptid_ptr = arg;
7064
7065 inferior_ptid = *saved_ptid_ptr;
7066 xfree (arg);
7067 }
7068
7069 /* Save the value of inferior_ptid so that it may be restored by a
7070 later call to do_cleanups(). Returns the struct cleanup pointer
7071 needed for later doing the cleanup. */
7072
7073 struct cleanup *
7074 save_inferior_ptid (void)
7075 {
7076 ptid_t *saved_ptid_ptr;
7077
7078 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7079 *saved_ptid_ptr = inferior_ptid;
7080 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7081 }
7082 \f
7083
7084 /* User interface for reverse debugging:
7085 Set exec-direction / show exec-direction commands
7086 (returns error unless target implements to_set_exec_direction method). */
7087
7088 int execution_direction = EXEC_FORWARD;
7089 static const char exec_forward[] = "forward";
7090 static const char exec_reverse[] = "reverse";
7091 static const char *exec_direction = exec_forward;
7092 static const char *const exec_direction_names[] = {
7093 exec_forward,
7094 exec_reverse,
7095 NULL
7096 };
7097
7098 static void
7099 set_exec_direction_func (char *args, int from_tty,
7100 struct cmd_list_element *cmd)
7101 {
7102 if (target_can_execute_reverse)
7103 {
7104 if (!strcmp (exec_direction, exec_forward))
7105 execution_direction = EXEC_FORWARD;
7106 else if (!strcmp (exec_direction, exec_reverse))
7107 execution_direction = EXEC_REVERSE;
7108 }
7109 else
7110 {
7111 exec_direction = exec_forward;
7112 error (_("Target does not support this operation."));
7113 }
7114 }
7115
7116 static void
7117 show_exec_direction_func (struct ui_file *out, int from_tty,
7118 struct cmd_list_element *cmd, const char *value)
7119 {
7120 switch (execution_direction) {
7121 case EXEC_FORWARD:
7122 fprintf_filtered (out, _("Forward.\n"));
7123 break;
7124 case EXEC_REVERSE:
7125 fprintf_filtered (out, _("Reverse.\n"));
7126 break;
7127 default:
7128 internal_error (__FILE__, __LINE__,
7129 _("bogus execution_direction value: %d"),
7130 (int) execution_direction);
7131 }
7132 }
7133
7134 /* User interface for non-stop mode. */
7135
7136 int non_stop = 0;
7137
7138 static void
7139 set_non_stop (char *args, int from_tty,
7140 struct cmd_list_element *c)
7141 {
7142 if (target_has_execution)
7143 {
7144 non_stop_1 = non_stop;
7145 error (_("Cannot change this setting while the inferior is running."));
7146 }
7147
7148 non_stop = non_stop_1;
7149 }
7150
7151 static void
7152 show_non_stop (struct ui_file *file, int from_tty,
7153 struct cmd_list_element *c, const char *value)
7154 {
7155 fprintf_filtered (file,
7156 _("Controlling the inferior in non-stop mode is %s.\n"),
7157 value);
7158 }
7159
7160 static void
7161 show_schedule_multiple (struct ui_file *file, int from_tty,
7162 struct cmd_list_element *c, const char *value)
7163 {
7164 fprintf_filtered (file, _("Resuming the execution of threads "
7165 "of all processes is %s.\n"), value);
7166 }
7167
7168 /* Implementation of `siginfo' variable. */
7169
7170 static const struct internalvar_funcs siginfo_funcs =
7171 {
7172 siginfo_make_value,
7173 NULL,
7174 NULL
7175 };
7176
7177 void
7178 _initialize_infrun (void)
7179 {
7180 int i;
7181 int numsigs;
7182 struct cmd_list_element *c;
7183
7184 add_info ("signals", signals_info, _("\
7185 What debugger does when program gets various signals.\n\
7186 Specify a signal as argument to print info on that signal only."));
7187 add_info_alias ("handle", "signals", 0);
7188
7189 c = add_com ("handle", class_run, handle_command, _("\
7190 Specify how to handle signals.\n\
7191 Usage: handle SIGNAL [ACTIONS]\n\
7192 Args are signals and actions to apply to those signals.\n\
7193 If no actions are specified, the current settings for the specified signals\n\
7194 will be displayed instead.\n\
7195 \n\
7196 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7197 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7198 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7199 The special arg \"all\" is recognized to mean all signals except those\n\
7200 used by the debugger, typically SIGTRAP and SIGINT.\n\
7201 \n\
7202 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7203 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7204 Stop means reenter debugger if this signal happens (implies print).\n\
7205 Print means print a message if this signal happens.\n\
7206 Pass means let program see this signal; otherwise program doesn't know.\n\
7207 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7208 Pass and Stop may be combined.\n\
7209 \n\
7210 Multiple signals may be specified. Signal numbers and signal names\n\
7211 may be interspersed with actions, with the actions being performed for\n\
7212 all signals cumulatively specified."));
7213 set_cmd_completer (c, handle_completer);
7214
7215 if (xdb_commands)
7216 {
7217 add_com ("lz", class_info, signals_info, _("\
7218 What debugger does when program gets various signals.\n\
7219 Specify a signal as argument to print info on that signal only."));
7220 add_com ("z", class_run, xdb_handle_command, _("\
7221 Specify how to handle a signal.\n\
7222 Args are signals and actions to apply to those signals.\n\
7223 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7224 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7225 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7226 The special arg \"all\" is recognized to mean all signals except those\n\
7227 used by the debugger, typically SIGTRAP and SIGINT.\n\
7228 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7229 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7230 nopass), \"Q\" (noprint)\n\
7231 Stop means reenter debugger if this signal happens (implies print).\n\
7232 Print means print a message if this signal happens.\n\
7233 Pass means let program see this signal; otherwise program doesn't know.\n\
7234 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7235 Pass and Stop may be combined."));
7236 }
7237
7238 if (!dbx_commands)
7239 stop_command = add_cmd ("stop", class_obscure,
7240 not_just_help_class_command, _("\
7241 There is no `stop' command, but you can set a hook on `stop'.\n\
7242 This allows you to set a list of commands to be run each time execution\n\
7243 of the program stops."), &cmdlist);
7244
7245 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7246 Set inferior debugging."), _("\
7247 Show inferior debugging."), _("\
7248 When non-zero, inferior specific debugging is enabled."),
7249 NULL,
7250 show_debug_infrun,
7251 &setdebuglist, &showdebuglist);
7252
7253 add_setshow_boolean_cmd ("displaced", class_maintenance,
7254 &debug_displaced, _("\
7255 Set displaced stepping debugging."), _("\
7256 Show displaced stepping debugging."), _("\
7257 When non-zero, displaced stepping specific debugging is enabled."),
7258 NULL,
7259 show_debug_displaced,
7260 &setdebuglist, &showdebuglist);
7261
7262 add_setshow_boolean_cmd ("non-stop", no_class,
7263 &non_stop_1, _("\
7264 Set whether gdb controls the inferior in non-stop mode."), _("\
7265 Show whether gdb controls the inferior in non-stop mode."), _("\
7266 When debugging a multi-threaded program and this setting is\n\
7267 off (the default, also called all-stop mode), when one thread stops\n\
7268 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7269 all other threads in the program while you interact with the thread of\n\
7270 interest. When you continue or step a thread, you can allow the other\n\
7271 threads to run, or have them remain stopped, but while you inspect any\n\
7272 thread's state, all threads stop.\n\
7273 \n\
7274 In non-stop mode, when one thread stops, other threads can continue\n\
7275 to run freely. You'll be able to step each thread independently,\n\
7276 leave it stopped or free to run as needed."),
7277 set_non_stop,
7278 show_non_stop,
7279 &setlist,
7280 &showlist);
7281
7282 numsigs = (int) GDB_SIGNAL_LAST;
7283 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7284 signal_print = (unsigned char *)
7285 xmalloc (sizeof (signal_print[0]) * numsigs);
7286 signal_program = (unsigned char *)
7287 xmalloc (sizeof (signal_program[0]) * numsigs);
7288 signal_catch = (unsigned char *)
7289 xmalloc (sizeof (signal_catch[0]) * numsigs);
7290 signal_pass = (unsigned char *)
7291 xmalloc (sizeof (signal_program[0]) * numsigs);
7292 for (i = 0; i < numsigs; i++)
7293 {
7294 signal_stop[i] = 1;
7295 signal_print[i] = 1;
7296 signal_program[i] = 1;
7297 signal_catch[i] = 0;
7298 }
7299
7300 /* Signals caused by debugger's own actions
7301 should not be given to the program afterwards. */
7302 signal_program[GDB_SIGNAL_TRAP] = 0;
7303 signal_program[GDB_SIGNAL_INT] = 0;
7304
7305 /* Signals that are not errors should not normally enter the debugger. */
7306 signal_stop[GDB_SIGNAL_ALRM] = 0;
7307 signal_print[GDB_SIGNAL_ALRM] = 0;
7308 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7309 signal_print[GDB_SIGNAL_VTALRM] = 0;
7310 signal_stop[GDB_SIGNAL_PROF] = 0;
7311 signal_print[GDB_SIGNAL_PROF] = 0;
7312 signal_stop[GDB_SIGNAL_CHLD] = 0;
7313 signal_print[GDB_SIGNAL_CHLD] = 0;
7314 signal_stop[GDB_SIGNAL_IO] = 0;
7315 signal_print[GDB_SIGNAL_IO] = 0;
7316 signal_stop[GDB_SIGNAL_POLL] = 0;
7317 signal_print[GDB_SIGNAL_POLL] = 0;
7318 signal_stop[GDB_SIGNAL_URG] = 0;
7319 signal_print[GDB_SIGNAL_URG] = 0;
7320 signal_stop[GDB_SIGNAL_WINCH] = 0;
7321 signal_print[GDB_SIGNAL_WINCH] = 0;
7322 signal_stop[GDB_SIGNAL_PRIO] = 0;
7323 signal_print[GDB_SIGNAL_PRIO] = 0;
7324
7325 /* These signals are used internally by user-level thread
7326 implementations. (See signal(5) on Solaris.) Like the above
7327 signals, a healthy program receives and handles them as part of
7328 its normal operation. */
7329 signal_stop[GDB_SIGNAL_LWP] = 0;
7330 signal_print[GDB_SIGNAL_LWP] = 0;
7331 signal_stop[GDB_SIGNAL_WAITING] = 0;
7332 signal_print[GDB_SIGNAL_WAITING] = 0;
7333 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7334 signal_print[GDB_SIGNAL_CANCEL] = 0;
7335
7336 /* Update cached state. */
7337 signal_cache_update (-1);
7338
7339 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7340 &stop_on_solib_events, _("\
7341 Set stopping for shared library events."), _("\
7342 Show stopping for shared library events."), _("\
7343 If nonzero, gdb will give control to the user when the dynamic linker\n\
7344 notifies gdb of shared library events. The most common event of interest\n\
7345 to the user would be loading/unloading of a new library."),
7346 set_stop_on_solib_events,
7347 show_stop_on_solib_events,
7348 &setlist, &showlist);
7349
7350 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7351 follow_fork_mode_kind_names,
7352 &follow_fork_mode_string, _("\
7353 Set debugger response to a program call of fork or vfork."), _("\
7354 Show debugger response to a program call of fork or vfork."), _("\
7355 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7356 parent - the original process is debugged after a fork\n\
7357 child - the new process is debugged after a fork\n\
7358 The unfollowed process will continue to run.\n\
7359 By default, the debugger will follow the parent process."),
7360 NULL,
7361 show_follow_fork_mode_string,
7362 &setlist, &showlist);
7363
7364 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7365 follow_exec_mode_names,
7366 &follow_exec_mode_string, _("\
7367 Set debugger response to a program call of exec."), _("\
7368 Show debugger response to a program call of exec."), _("\
7369 An exec call replaces the program image of a process.\n\
7370 \n\
7371 follow-exec-mode can be:\n\
7372 \n\
7373 new - the debugger creates a new inferior and rebinds the process\n\
7374 to this new inferior. The program the process was running before\n\
7375 the exec call can be restarted afterwards by restarting the original\n\
7376 inferior.\n\
7377 \n\
7378 same - the debugger keeps the process bound to the same inferior.\n\
7379 The new executable image replaces the previous executable loaded in\n\
7380 the inferior. Restarting the inferior after the exec call restarts\n\
7381 the executable the process was running after the exec call.\n\
7382 \n\
7383 By default, the debugger will use the same inferior."),
7384 NULL,
7385 show_follow_exec_mode_string,
7386 &setlist, &showlist);
7387
7388 add_setshow_enum_cmd ("scheduler-locking", class_run,
7389 scheduler_enums, &scheduler_mode, _("\
7390 Set mode for locking scheduler during execution."), _("\
7391 Show mode for locking scheduler during execution."), _("\
7392 off == no locking (threads may preempt at any time)\n\
7393 on == full locking (no thread except the current thread may run)\n\
7394 step == scheduler locked during every single-step operation.\n\
7395 In this mode, no other thread may run during a step command.\n\
7396 Other threads may run while stepping over a function call ('next')."),
7397 set_schedlock_func, /* traps on target vector */
7398 show_scheduler_mode,
7399 &setlist, &showlist);
7400
7401 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7402 Set mode for resuming threads of all processes."), _("\
7403 Show mode for resuming threads of all processes."), _("\
7404 When on, execution commands (such as 'continue' or 'next') resume all\n\
7405 threads of all processes. When off (which is the default), execution\n\
7406 commands only resume the threads of the current process. The set of\n\
7407 threads that are resumed is further refined by the scheduler-locking\n\
7408 mode (see help set scheduler-locking)."),
7409 NULL,
7410 show_schedule_multiple,
7411 &setlist, &showlist);
7412
7413 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7414 Set mode of the step operation."), _("\
7415 Show mode of the step operation."), _("\
7416 When set, doing a step over a function without debug line information\n\
7417 will stop at the first instruction of that function. Otherwise, the\n\
7418 function is skipped and the step command stops at a different source line."),
7419 NULL,
7420 show_step_stop_if_no_debug,
7421 &setlist, &showlist);
7422
7423 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7424 &can_use_displaced_stepping, _("\
7425 Set debugger's willingness to use displaced stepping."), _("\
7426 Show debugger's willingness to use displaced stepping."), _("\
7427 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7428 supported by the target architecture. If off, gdb will not use displaced\n\
7429 stepping to step over breakpoints, even if such is supported by the target\n\
7430 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7431 if the target architecture supports it and non-stop mode is active, but will not\n\
7432 use it in all-stop mode (see help set non-stop)."),
7433 NULL,
7434 show_can_use_displaced_stepping,
7435 &setlist, &showlist);
7436
7437 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7438 &exec_direction, _("Set direction of execution.\n\
7439 Options are 'forward' or 'reverse'."),
7440 _("Show direction of execution (forward/reverse)."),
7441 _("Tells gdb whether to execute forward or backward."),
7442 set_exec_direction_func, show_exec_direction_func,
7443 &setlist, &showlist);
7444
7445 /* Set/show detach-on-fork: user-settable mode. */
7446
7447 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7448 Set whether gdb will detach the child of a fork."), _("\
7449 Show whether gdb will detach the child of a fork."), _("\
7450 Tells gdb whether to detach the child of a fork."),
7451 NULL, NULL, &setlist, &showlist);
7452
7453 /* Set/show disable address space randomization mode. */
7454
7455 add_setshow_boolean_cmd ("disable-randomization", class_support,
7456 &disable_randomization, _("\
7457 Set disabling of debuggee's virtual address space randomization."), _("\
7458 Show disabling of debuggee's virtual address space randomization."), _("\
7459 When this mode is on (which is the default), randomization of the virtual\n\
7460 address space is disabled. Standalone programs run with the randomization\n\
7461 enabled by default on some platforms."),
7462 &set_disable_randomization,
7463 &show_disable_randomization,
7464 &setlist, &showlist);
7465
7466 /* ptid initializations */
7467 inferior_ptid = null_ptid;
7468 target_last_wait_ptid = minus_one_ptid;
7469
7470 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7471 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7472 observer_attach_thread_exit (infrun_thread_thread_exit);
7473 observer_attach_inferior_exit (infrun_inferior_exit);
7474
7475 /* Explicitly create without lookup, since that tries to create a
7476 value with a void typed value, and when we get here, gdbarch
7477 isn't initialized yet. At this point, we're quite sure there
7478 isn't another convenience variable of the same name. */
7479 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7480
7481 add_setshow_boolean_cmd ("observer", no_class,
7482 &observer_mode_1, _("\
7483 Set whether gdb controls the inferior in observer mode."), _("\
7484 Show whether gdb controls the inferior in observer mode."), _("\
7485 In observer mode, GDB can get data from the inferior, but not\n\
7486 affect its execution. Registers and memory may not be changed,\n\
7487 breakpoints may not be set, and the program cannot be interrupted\n\
7488 or signalled."),
7489 set_observer_mode,
7490 show_observer_mode,
7491 &setlist,
7492 &showlist);
7493 }
This page took 0.226027 seconds and 4 git commands to generate.