*** empty log message ***
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55
56 /* Prototypes for local functions */
57
58 static void signals_info (char *, int);
59
60 static void handle_command (char *, int);
61
62 static void sig_print_info (enum target_signal);
63
64 static void sig_print_header (void);
65
66 static void resume_cleanups (void *);
67
68 static int hook_stop_stub (void *);
69
70 static int restore_selected_frame (void *);
71
72 static int follow_fork (void);
73
74 static void set_schedlock_func (char *args, int from_tty,
75 struct cmd_list_element *c);
76
77 static int currently_stepping (struct thread_info *tp);
78
79 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
80 void *data);
81
82 static void xdb_handle_command (char *args, int from_tty);
83
84 static int prepare_to_proceed (int);
85
86 void _initialize_infrun (void);
87
88 void nullify_last_target_wait_ptid (void);
89
90 /* When set, stop the 'step' command if we enter a function which has
91 no line number information. The normal behavior is that we step
92 over such function. */
93 int step_stop_if_no_debug = 0;
94 static void
95 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
96 struct cmd_list_element *c, const char *value)
97 {
98 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
99 }
100
101 /* In asynchronous mode, but simulating synchronous execution. */
102
103 int sync_execution = 0;
104
105 /* wait_for_inferior and normal_stop use this to notify the user
106 when the inferior stopped in a different thread than it had been
107 running in. */
108
109 static ptid_t previous_inferior_ptid;
110
111 /* Default behavior is to detach newly forked processes (legacy). */
112 int detach_fork = 1;
113
114 int debug_displaced = 0;
115 static void
116 show_debug_displaced (struct ui_file *file, int from_tty,
117 struct cmd_list_element *c, const char *value)
118 {
119 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
120 }
121
122 int debug_infrun = 0;
123 static void
124 show_debug_infrun (struct ui_file *file, int from_tty,
125 struct cmd_list_element *c, const char *value)
126 {
127 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
128 }
129
130 /* If the program uses ELF-style shared libraries, then calls to
131 functions in shared libraries go through stubs, which live in a
132 table called the PLT (Procedure Linkage Table). The first time the
133 function is called, the stub sends control to the dynamic linker,
134 which looks up the function's real address, patches the stub so
135 that future calls will go directly to the function, and then passes
136 control to the function.
137
138 If we are stepping at the source level, we don't want to see any of
139 this --- we just want to skip over the stub and the dynamic linker.
140 The simple approach is to single-step until control leaves the
141 dynamic linker.
142
143 However, on some systems (e.g., Red Hat's 5.2 distribution) the
144 dynamic linker calls functions in the shared C library, so you
145 can't tell from the PC alone whether the dynamic linker is still
146 running. In this case, we use a step-resume breakpoint to get us
147 past the dynamic linker, as if we were using "next" to step over a
148 function call.
149
150 in_solib_dynsym_resolve_code() says whether we're in the dynamic
151 linker code or not. Normally, this means we single-step. However,
152 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
153 address where we can place a step-resume breakpoint to get past the
154 linker's symbol resolution function.
155
156 in_solib_dynsym_resolve_code() can generally be implemented in a
157 pretty portable way, by comparing the PC against the address ranges
158 of the dynamic linker's sections.
159
160 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
161 it depends on internal details of the dynamic linker. It's usually
162 not too hard to figure out where to put a breakpoint, but it
163 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
164 sanity checking. If it can't figure things out, returning zero and
165 getting the (possibly confusing) stepping behavior is better than
166 signalling an error, which will obscure the change in the
167 inferior's state. */
168
169 /* This function returns TRUE if pc is the address of an instruction
170 that lies within the dynamic linker (such as the event hook, or the
171 dld itself).
172
173 This function must be used only when a dynamic linker event has
174 been caught, and the inferior is being stepped out of the hook, or
175 undefined results are guaranteed. */
176
177 #ifndef SOLIB_IN_DYNAMIC_LINKER
178 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
179 #endif
180
181 /* "Observer mode" is somewhat like a more extreme version of
182 non-stop, in which all GDB operations that might affect the
183 target's execution have been disabled. */
184
185 static int non_stop_1 = 0;
186
187 int observer_mode = 0;
188 static int observer_mode_1 = 0;
189
190 static void
191 set_observer_mode (char *args, int from_tty,
192 struct cmd_list_element *c)
193 {
194 extern int pagination_enabled;
195
196 if (target_has_execution)
197 {
198 observer_mode_1 = observer_mode;
199 error (_("Cannot change this setting while the inferior is running."));
200 }
201
202 observer_mode = observer_mode_1;
203
204 may_write_registers = !observer_mode;
205 may_write_memory = !observer_mode;
206 may_insert_breakpoints = !observer_mode;
207 may_insert_tracepoints = !observer_mode;
208 /* We can insert fast tracepoints in or out of observer mode,
209 but enable them if we're going into this mode. */
210 if (observer_mode)
211 may_insert_fast_tracepoints = 1;
212 may_stop = !observer_mode;
213 update_target_permissions ();
214
215 /* Going *into* observer mode we must force non-stop, then
216 going out we leave it that way. */
217 if (observer_mode)
218 {
219 target_async_permitted = 1;
220 pagination_enabled = 0;
221 non_stop = non_stop_1 = 1;
222 }
223
224 if (from_tty)
225 printf_filtered (_("Observer mode is now %s.\n"),
226 (observer_mode ? "on" : "off"));
227 }
228
229 static void
230 show_observer_mode (struct ui_file *file, int from_tty,
231 struct cmd_list_element *c, const char *value)
232 {
233 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
234 }
235
236 /* This updates the value of observer mode based on changes in
237 permissions. Note that we are deliberately ignoring the values of
238 may-write-registers and may-write-memory, since the user may have
239 reason to enable these during a session, for instance to turn on a
240 debugging-related global. */
241
242 void
243 update_observer_mode (void)
244 {
245 int newval;
246
247 newval = (!may_insert_breakpoints
248 && !may_insert_tracepoints
249 && may_insert_fast_tracepoints
250 && !may_stop
251 && non_stop);
252
253 /* Let the user know if things change. */
254 if (newval != observer_mode)
255 printf_filtered (_("Observer mode is now %s.\n"),
256 (newval ? "on" : "off"));
257
258 observer_mode = observer_mode_1 = newval;
259 }
260
261 /* Tables of how to react to signals; the user sets them. */
262
263 static unsigned char *signal_stop;
264 static unsigned char *signal_print;
265 static unsigned char *signal_program;
266
267 #define SET_SIGS(nsigs,sigs,flags) \
268 do { \
269 int signum = (nsigs); \
270 while (signum-- > 0) \
271 if ((sigs)[signum]) \
272 (flags)[signum] = 1; \
273 } while (0)
274
275 #define UNSET_SIGS(nsigs,sigs,flags) \
276 do { \
277 int signum = (nsigs); \
278 while (signum-- > 0) \
279 if ((sigs)[signum]) \
280 (flags)[signum] = 0; \
281 } while (0)
282
283 /* Value to pass to target_resume() to cause all threads to resume */
284
285 #define RESUME_ALL minus_one_ptid
286
287 /* Command list pointer for the "stop" placeholder. */
288
289 static struct cmd_list_element *stop_command;
290
291 /* Function inferior was in as of last step command. */
292
293 static struct symbol *step_start_function;
294
295 /* Nonzero if we want to give control to the user when we're notified
296 of shared library events by the dynamic linker. */
297 int stop_on_solib_events;
298 static void
299 show_stop_on_solib_events (struct ui_file *file, int from_tty,
300 struct cmd_list_element *c, const char *value)
301 {
302 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
303 value);
304 }
305
306 /* Nonzero means expecting a trace trap
307 and should stop the inferior and return silently when it happens. */
308
309 int stop_after_trap;
310
311 /* Save register contents here when executing a "finish" command or are
312 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
313 Thus this contains the return value from the called function (assuming
314 values are returned in a register). */
315
316 struct regcache *stop_registers;
317
318 /* Nonzero after stop if current stack frame should be printed. */
319
320 static int stop_print_frame;
321
322 /* This is a cached copy of the pid/waitstatus of the last event
323 returned by target_wait()/deprecated_target_wait_hook(). This
324 information is returned by get_last_target_status(). */
325 static ptid_t target_last_wait_ptid;
326 static struct target_waitstatus target_last_waitstatus;
327
328 static void context_switch (ptid_t ptid);
329
330 void init_thread_stepping_state (struct thread_info *tss);
331
332 void init_infwait_state (void);
333
334 static const char follow_fork_mode_child[] = "child";
335 static const char follow_fork_mode_parent[] = "parent";
336
337 static const char *follow_fork_mode_kind_names[] = {
338 follow_fork_mode_child,
339 follow_fork_mode_parent,
340 NULL
341 };
342
343 static const char *follow_fork_mode_string = follow_fork_mode_parent;
344 static void
345 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
346 struct cmd_list_element *c, const char *value)
347 {
348 fprintf_filtered (file, _("\
349 Debugger response to a program call of fork or vfork is \"%s\".\n"),
350 value);
351 }
352 \f
353
354 /* Tell the target to follow the fork we're stopped at. Returns true
355 if the inferior should be resumed; false, if the target for some
356 reason decided it's best not to resume. */
357
358 static int
359 follow_fork (void)
360 {
361 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
362 int should_resume = 1;
363 struct thread_info *tp;
364
365 /* Copy user stepping state to the new inferior thread. FIXME: the
366 followed fork child thread should have a copy of most of the
367 parent thread structure's run control related fields, not just these.
368 Initialized to avoid "may be used uninitialized" warnings from gcc. */
369 struct breakpoint *step_resume_breakpoint = NULL;
370 CORE_ADDR step_range_start = 0;
371 CORE_ADDR step_range_end = 0;
372 struct frame_id step_frame_id = { 0 };
373
374 if (!non_stop)
375 {
376 ptid_t wait_ptid;
377 struct target_waitstatus wait_status;
378
379 /* Get the last target status returned by target_wait(). */
380 get_last_target_status (&wait_ptid, &wait_status);
381
382 /* If not stopped at a fork event, then there's nothing else to
383 do. */
384 if (wait_status.kind != TARGET_WAITKIND_FORKED
385 && wait_status.kind != TARGET_WAITKIND_VFORKED)
386 return 1;
387
388 /* Check if we switched over from WAIT_PTID, since the event was
389 reported. */
390 if (!ptid_equal (wait_ptid, minus_one_ptid)
391 && !ptid_equal (inferior_ptid, wait_ptid))
392 {
393 /* We did. Switch back to WAIT_PTID thread, to tell the
394 target to follow it (in either direction). We'll
395 afterwards refuse to resume, and inform the user what
396 happened. */
397 switch_to_thread (wait_ptid);
398 should_resume = 0;
399 }
400 }
401
402 tp = inferior_thread ();
403
404 /* If there were any forks/vforks that were caught and are now to be
405 followed, then do so now. */
406 switch (tp->pending_follow.kind)
407 {
408 case TARGET_WAITKIND_FORKED:
409 case TARGET_WAITKIND_VFORKED:
410 {
411 ptid_t parent, child;
412
413 /* If the user did a next/step, etc, over a fork call,
414 preserve the stepping state in the fork child. */
415 if (follow_child && should_resume)
416 {
417 step_resume_breakpoint
418 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
419 step_range_start = tp->step_range_start;
420 step_range_end = tp->step_range_end;
421 step_frame_id = tp->step_frame_id;
422
423 /* For now, delete the parent's sr breakpoint, otherwise,
424 parent/child sr breakpoints are considered duplicates,
425 and the child version will not be installed. Remove
426 this when the breakpoints module becomes aware of
427 inferiors and address spaces. */
428 delete_step_resume_breakpoint (tp);
429 tp->step_range_start = 0;
430 tp->step_range_end = 0;
431 tp->step_frame_id = null_frame_id;
432 }
433
434 parent = inferior_ptid;
435 child = tp->pending_follow.value.related_pid;
436
437 /* Tell the target to do whatever is necessary to follow
438 either parent or child. */
439 if (target_follow_fork (follow_child))
440 {
441 /* Target refused to follow, or there's some other reason
442 we shouldn't resume. */
443 should_resume = 0;
444 }
445 else
446 {
447 /* This pending follow fork event is now handled, one way
448 or another. The previous selected thread may be gone
449 from the lists by now, but if it is still around, need
450 to clear the pending follow request. */
451 tp = find_thread_ptid (parent);
452 if (tp)
453 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
454
455 /* This makes sure we don't try to apply the "Switched
456 over from WAIT_PID" logic above. */
457 nullify_last_target_wait_ptid ();
458
459 /* If we followed the child, switch to it... */
460 if (follow_child)
461 {
462 switch_to_thread (child);
463
464 /* ... and preserve the stepping state, in case the
465 user was stepping over the fork call. */
466 if (should_resume)
467 {
468 tp = inferior_thread ();
469 tp->step_resume_breakpoint = step_resume_breakpoint;
470 tp->step_range_start = step_range_start;
471 tp->step_range_end = step_range_end;
472 tp->step_frame_id = step_frame_id;
473 }
474 else
475 {
476 /* If we get here, it was because we're trying to
477 resume from a fork catchpoint, but, the user
478 has switched threads away from the thread that
479 forked. In that case, the resume command
480 issued is most likely not applicable to the
481 child, so just warn, and refuse to resume. */
482 warning (_("\
483 Not resuming: switched threads before following fork child.\n"));
484 }
485
486 /* Reset breakpoints in the child as appropriate. */
487 follow_inferior_reset_breakpoints ();
488 }
489 else
490 switch_to_thread (parent);
491 }
492 }
493 break;
494 case TARGET_WAITKIND_SPURIOUS:
495 /* Nothing to follow. */
496 break;
497 default:
498 internal_error (__FILE__, __LINE__,
499 "Unexpected pending_follow.kind %d\n",
500 tp->pending_follow.kind);
501 break;
502 }
503
504 return should_resume;
505 }
506
507 void
508 follow_inferior_reset_breakpoints (void)
509 {
510 struct thread_info *tp = inferior_thread ();
511
512 /* Was there a step_resume breakpoint? (There was if the user
513 did a "next" at the fork() call.) If so, explicitly reset its
514 thread number.
515
516 step_resumes are a form of bp that are made to be per-thread.
517 Since we created the step_resume bp when the parent process
518 was being debugged, and now are switching to the child process,
519 from the breakpoint package's viewpoint, that's a switch of
520 "threads". We must update the bp's notion of which thread
521 it is for, or it'll be ignored when it triggers. */
522
523 if (tp->step_resume_breakpoint)
524 breakpoint_re_set_thread (tp->step_resume_breakpoint);
525
526 /* Reinsert all breakpoints in the child. The user may have set
527 breakpoints after catching the fork, in which case those
528 were never set in the child, but only in the parent. This makes
529 sure the inserted breakpoints match the breakpoint list. */
530
531 breakpoint_re_set ();
532 insert_breakpoints ();
533 }
534
535 /* The child has exited or execed: resume threads of the parent the
536 user wanted to be executing. */
537
538 static int
539 proceed_after_vfork_done (struct thread_info *thread,
540 void *arg)
541 {
542 int pid = * (int *) arg;
543
544 if (ptid_get_pid (thread->ptid) == pid
545 && is_running (thread->ptid)
546 && !is_executing (thread->ptid)
547 && !thread->stop_requested
548 && thread->stop_signal == TARGET_SIGNAL_0)
549 {
550 if (debug_infrun)
551 fprintf_unfiltered (gdb_stdlog,
552 "infrun: resuming vfork parent thread %s\n",
553 target_pid_to_str (thread->ptid));
554
555 switch_to_thread (thread->ptid);
556 clear_proceed_status ();
557 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
558 }
559
560 return 0;
561 }
562
563 /* Called whenever we notice an exec or exit event, to handle
564 detaching or resuming a vfork parent. */
565
566 static void
567 handle_vfork_child_exec_or_exit (int exec)
568 {
569 struct inferior *inf = current_inferior ();
570
571 if (inf->vfork_parent)
572 {
573 int resume_parent = -1;
574
575 /* This exec or exit marks the end of the shared memory region
576 between the parent and the child. If the user wanted to
577 detach from the parent, now is the time. */
578
579 if (inf->vfork_parent->pending_detach)
580 {
581 struct thread_info *tp;
582 struct cleanup *old_chain;
583 struct program_space *pspace;
584 struct address_space *aspace;
585
586 /* follow-fork child, detach-on-fork on */
587
588 old_chain = make_cleanup_restore_current_thread ();
589
590 /* We're letting loose of the parent. */
591 tp = any_live_thread_of_process (inf->vfork_parent->pid);
592 switch_to_thread (tp->ptid);
593
594 /* We're about to detach from the parent, which implicitly
595 removes breakpoints from its address space. There's a
596 catch here: we want to reuse the spaces for the child,
597 but, parent/child are still sharing the pspace at this
598 point, although the exec in reality makes the kernel give
599 the child a fresh set of new pages. The problem here is
600 that the breakpoints module being unaware of this, would
601 likely chose the child process to write to the parent
602 address space. Swapping the child temporarily away from
603 the spaces has the desired effect. Yes, this is "sort
604 of" a hack. */
605
606 pspace = inf->pspace;
607 aspace = inf->aspace;
608 inf->aspace = NULL;
609 inf->pspace = NULL;
610
611 if (debug_infrun || info_verbose)
612 {
613 target_terminal_ours ();
614
615 if (exec)
616 fprintf_filtered (gdb_stdlog,
617 "Detaching vfork parent process %d after child exec.\n",
618 inf->vfork_parent->pid);
619 else
620 fprintf_filtered (gdb_stdlog,
621 "Detaching vfork parent process %d after child exit.\n",
622 inf->vfork_parent->pid);
623 }
624
625 target_detach (NULL, 0);
626
627 /* Put it back. */
628 inf->pspace = pspace;
629 inf->aspace = aspace;
630
631 do_cleanups (old_chain);
632 }
633 else if (exec)
634 {
635 /* We're staying attached to the parent, so, really give the
636 child a new address space. */
637 inf->pspace = add_program_space (maybe_new_address_space ());
638 inf->aspace = inf->pspace->aspace;
639 inf->removable = 1;
640 set_current_program_space (inf->pspace);
641
642 resume_parent = inf->vfork_parent->pid;
643
644 /* Break the bonds. */
645 inf->vfork_parent->vfork_child = NULL;
646 }
647 else
648 {
649 struct cleanup *old_chain;
650 struct program_space *pspace;
651
652 /* If this is a vfork child exiting, then the pspace and
653 aspaces were shared with the parent. Since we're
654 reporting the process exit, we'll be mourning all that is
655 found in the address space, and switching to null_ptid,
656 preparing to start a new inferior. But, since we don't
657 want to clobber the parent's address/program spaces, we
658 go ahead and create a new one for this exiting
659 inferior. */
660
661 /* Switch to null_ptid, so that clone_program_space doesn't want
662 to read the selected frame of a dead process. */
663 old_chain = save_inferior_ptid ();
664 inferior_ptid = null_ptid;
665
666 /* This inferior is dead, so avoid giving the breakpoints
667 module the option to write through to it (cloning a
668 program space resets breakpoints). */
669 inf->aspace = NULL;
670 inf->pspace = NULL;
671 pspace = add_program_space (maybe_new_address_space ());
672 set_current_program_space (pspace);
673 inf->removable = 1;
674 clone_program_space (pspace, inf->vfork_parent->pspace);
675 inf->pspace = pspace;
676 inf->aspace = pspace->aspace;
677
678 /* Put back inferior_ptid. We'll continue mourning this
679 inferior. */
680 do_cleanups (old_chain);
681
682 resume_parent = inf->vfork_parent->pid;
683 /* Break the bonds. */
684 inf->vfork_parent->vfork_child = NULL;
685 }
686
687 inf->vfork_parent = NULL;
688
689 gdb_assert (current_program_space == inf->pspace);
690
691 if (non_stop && resume_parent != -1)
692 {
693 /* If the user wanted the parent to be running, let it go
694 free now. */
695 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
696
697 if (debug_infrun)
698 fprintf_unfiltered (gdb_stdlog, "infrun: resuming vfork parent process %d\n",
699 resume_parent);
700
701 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
702
703 do_cleanups (old_chain);
704 }
705 }
706 }
707
708 /* Enum strings for "set|show displaced-stepping". */
709
710 static const char follow_exec_mode_new[] = "new";
711 static const char follow_exec_mode_same[] = "same";
712 static const char *follow_exec_mode_names[] =
713 {
714 follow_exec_mode_new,
715 follow_exec_mode_same,
716 NULL,
717 };
718
719 static const char *follow_exec_mode_string = follow_exec_mode_same;
720 static void
721 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
722 struct cmd_list_element *c, const char *value)
723 {
724 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
725 }
726
727 /* EXECD_PATHNAME is assumed to be non-NULL. */
728
729 static void
730 follow_exec (ptid_t pid, char *execd_pathname)
731 {
732 struct thread_info *th = inferior_thread ();
733 struct inferior *inf = current_inferior ();
734
735 /* This is an exec event that we actually wish to pay attention to.
736 Refresh our symbol table to the newly exec'd program, remove any
737 momentary bp's, etc.
738
739 If there are breakpoints, they aren't really inserted now,
740 since the exec() transformed our inferior into a fresh set
741 of instructions.
742
743 We want to preserve symbolic breakpoints on the list, since
744 we have hopes that they can be reset after the new a.out's
745 symbol table is read.
746
747 However, any "raw" breakpoints must be removed from the list
748 (e.g., the solib bp's), since their address is probably invalid
749 now.
750
751 And, we DON'T want to call delete_breakpoints() here, since
752 that may write the bp's "shadow contents" (the instruction
753 value that was overwritten witha TRAP instruction). Since
754 we now have a new a.out, those shadow contents aren't valid. */
755
756 mark_breakpoints_out ();
757
758 update_breakpoints_after_exec ();
759
760 /* If there was one, it's gone now. We cannot truly step-to-next
761 statement through an exec(). */
762 th->step_resume_breakpoint = NULL;
763 th->step_range_start = 0;
764 th->step_range_end = 0;
765
766 /* The target reports the exec event to the main thread, even if
767 some other thread does the exec, and even if the main thread was
768 already stopped --- if debugging in non-stop mode, it's possible
769 the user had the main thread held stopped in the previous image
770 --- release it now. This is the same behavior as step-over-exec
771 with scheduler-locking on in all-stop mode. */
772 th->stop_requested = 0;
773
774 /* What is this a.out's name? */
775 printf_unfiltered (_("%s is executing new program: %s\n"),
776 target_pid_to_str (inferior_ptid),
777 execd_pathname);
778
779 /* We've followed the inferior through an exec. Therefore, the
780 inferior has essentially been killed & reborn. */
781
782 gdb_flush (gdb_stdout);
783
784 breakpoint_init_inferior (inf_execd);
785
786 if (gdb_sysroot && *gdb_sysroot)
787 {
788 char *name = alloca (strlen (gdb_sysroot)
789 + strlen (execd_pathname)
790 + 1);
791
792 strcpy (name, gdb_sysroot);
793 strcat (name, execd_pathname);
794 execd_pathname = name;
795 }
796
797 /* Reset the shared library package. This ensures that we get a
798 shlib event when the child reaches "_start", at which point the
799 dld will have had a chance to initialize the child. */
800 /* Also, loading a symbol file below may trigger symbol lookups, and
801 we don't want those to be satisfied by the libraries of the
802 previous incarnation of this process. */
803 no_shared_libraries (NULL, 0);
804
805 if (follow_exec_mode_string == follow_exec_mode_new)
806 {
807 struct program_space *pspace;
808
809 /* The user wants to keep the old inferior and program spaces
810 around. Create a new fresh one, and switch to it. */
811
812 inf = add_inferior (current_inferior ()->pid);
813 pspace = add_program_space (maybe_new_address_space ());
814 inf->pspace = pspace;
815 inf->aspace = pspace->aspace;
816
817 exit_inferior_num_silent (current_inferior ()->num);
818
819 set_current_inferior (inf);
820 set_current_program_space (pspace);
821 }
822
823 gdb_assert (current_program_space == inf->pspace);
824
825 /* That a.out is now the one to use. */
826 exec_file_attach (execd_pathname, 0);
827
828 /* Load the main file's symbols. */
829 symbol_file_add_main (execd_pathname, 0);
830
831 #ifdef SOLIB_CREATE_INFERIOR_HOOK
832 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
833 #else
834 solib_create_inferior_hook (0);
835 #endif
836
837 jit_inferior_created_hook ();
838
839 /* Reinsert all breakpoints. (Those which were symbolic have
840 been reset to the proper address in the new a.out, thanks
841 to symbol_file_command...) */
842 insert_breakpoints ();
843
844 /* The next resume of this inferior should bring it to the shlib
845 startup breakpoints. (If the user had also set bp's on
846 "main" from the old (parent) process, then they'll auto-
847 matically get reset there in the new process.) */
848 }
849
850 /* Non-zero if we just simulating a single-step. This is needed
851 because we cannot remove the breakpoints in the inferior process
852 until after the `wait' in `wait_for_inferior'. */
853 static int singlestep_breakpoints_inserted_p = 0;
854
855 /* The thread we inserted single-step breakpoints for. */
856 static ptid_t singlestep_ptid;
857
858 /* PC when we started this single-step. */
859 static CORE_ADDR singlestep_pc;
860
861 /* If another thread hit the singlestep breakpoint, we save the original
862 thread here so that we can resume single-stepping it later. */
863 static ptid_t saved_singlestep_ptid;
864 static int stepping_past_singlestep_breakpoint;
865
866 /* If not equal to null_ptid, this means that after stepping over breakpoint
867 is finished, we need to switch to deferred_step_ptid, and step it.
868
869 The use case is when one thread has hit a breakpoint, and then the user
870 has switched to another thread and issued 'step'. We need to step over
871 breakpoint in the thread which hit the breakpoint, but then continue
872 stepping the thread user has selected. */
873 static ptid_t deferred_step_ptid;
874 \f
875 /* Displaced stepping. */
876
877 /* In non-stop debugging mode, we must take special care to manage
878 breakpoints properly; in particular, the traditional strategy for
879 stepping a thread past a breakpoint it has hit is unsuitable.
880 'Displaced stepping' is a tactic for stepping one thread past a
881 breakpoint it has hit while ensuring that other threads running
882 concurrently will hit the breakpoint as they should.
883
884 The traditional way to step a thread T off a breakpoint in a
885 multi-threaded program in all-stop mode is as follows:
886
887 a0) Initially, all threads are stopped, and breakpoints are not
888 inserted.
889 a1) We single-step T, leaving breakpoints uninserted.
890 a2) We insert breakpoints, and resume all threads.
891
892 In non-stop debugging, however, this strategy is unsuitable: we
893 don't want to have to stop all threads in the system in order to
894 continue or step T past a breakpoint. Instead, we use displaced
895 stepping:
896
897 n0) Initially, T is stopped, other threads are running, and
898 breakpoints are inserted.
899 n1) We copy the instruction "under" the breakpoint to a separate
900 location, outside the main code stream, making any adjustments
901 to the instruction, register, and memory state as directed by
902 T's architecture.
903 n2) We single-step T over the instruction at its new location.
904 n3) We adjust the resulting register and memory state as directed
905 by T's architecture. This includes resetting T's PC to point
906 back into the main instruction stream.
907 n4) We resume T.
908
909 This approach depends on the following gdbarch methods:
910
911 - gdbarch_max_insn_length and gdbarch_displaced_step_location
912 indicate where to copy the instruction, and how much space must
913 be reserved there. We use these in step n1.
914
915 - gdbarch_displaced_step_copy_insn copies a instruction to a new
916 address, and makes any necessary adjustments to the instruction,
917 register contents, and memory. We use this in step n1.
918
919 - gdbarch_displaced_step_fixup adjusts registers and memory after
920 we have successfuly single-stepped the instruction, to yield the
921 same effect the instruction would have had if we had executed it
922 at its original address. We use this in step n3.
923
924 - gdbarch_displaced_step_free_closure provides cleanup.
925
926 The gdbarch_displaced_step_copy_insn and
927 gdbarch_displaced_step_fixup functions must be written so that
928 copying an instruction with gdbarch_displaced_step_copy_insn,
929 single-stepping across the copied instruction, and then applying
930 gdbarch_displaced_insn_fixup should have the same effects on the
931 thread's memory and registers as stepping the instruction in place
932 would have. Exactly which responsibilities fall to the copy and
933 which fall to the fixup is up to the author of those functions.
934
935 See the comments in gdbarch.sh for details.
936
937 Note that displaced stepping and software single-step cannot
938 currently be used in combination, although with some care I think
939 they could be made to. Software single-step works by placing
940 breakpoints on all possible subsequent instructions; if the
941 displaced instruction is a PC-relative jump, those breakpoints
942 could fall in very strange places --- on pages that aren't
943 executable, or at addresses that are not proper instruction
944 boundaries. (We do generally let other threads run while we wait
945 to hit the software single-step breakpoint, and they might
946 encounter such a corrupted instruction.) One way to work around
947 this would be to have gdbarch_displaced_step_copy_insn fully
948 simulate the effect of PC-relative instructions (and return NULL)
949 on architectures that use software single-stepping.
950
951 In non-stop mode, we can have independent and simultaneous step
952 requests, so more than one thread may need to simultaneously step
953 over a breakpoint. The current implementation assumes there is
954 only one scratch space per process. In this case, we have to
955 serialize access to the scratch space. If thread A wants to step
956 over a breakpoint, but we are currently waiting for some other
957 thread to complete a displaced step, we leave thread A stopped and
958 place it in the displaced_step_request_queue. Whenever a displaced
959 step finishes, we pick the next thread in the queue and start a new
960 displaced step operation on it. See displaced_step_prepare and
961 displaced_step_fixup for details. */
962
963 struct displaced_step_request
964 {
965 ptid_t ptid;
966 struct displaced_step_request *next;
967 };
968
969 /* Per-inferior displaced stepping state. */
970 struct displaced_step_inferior_state
971 {
972 /* Pointer to next in linked list. */
973 struct displaced_step_inferior_state *next;
974
975 /* The process this displaced step state refers to. */
976 int pid;
977
978 /* A queue of pending displaced stepping requests. One entry per
979 thread that needs to do a displaced step. */
980 struct displaced_step_request *step_request_queue;
981
982 /* If this is not null_ptid, this is the thread carrying out a
983 displaced single-step in process PID. This thread's state will
984 require fixing up once it has completed its step. */
985 ptid_t step_ptid;
986
987 /* The architecture the thread had when we stepped it. */
988 struct gdbarch *step_gdbarch;
989
990 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
991 for post-step cleanup. */
992 struct displaced_step_closure *step_closure;
993
994 /* The address of the original instruction, and the copy we
995 made. */
996 CORE_ADDR step_original, step_copy;
997
998 /* Saved contents of copy area. */
999 gdb_byte *step_saved_copy;
1000 };
1001
1002 /* The list of states of processes involved in displaced stepping
1003 presently. */
1004 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1005
1006 /* Get the displaced stepping state of process PID. */
1007
1008 static struct displaced_step_inferior_state *
1009 get_displaced_stepping_state (int pid)
1010 {
1011 struct displaced_step_inferior_state *state;
1012
1013 for (state = displaced_step_inferior_states;
1014 state != NULL;
1015 state = state->next)
1016 if (state->pid == pid)
1017 return state;
1018
1019 return NULL;
1020 }
1021
1022 /* Add a new displaced stepping state for process PID to the displaced
1023 stepping state list, or return a pointer to an already existing
1024 entry, if it already exists. Never returns NULL. */
1025
1026 static struct displaced_step_inferior_state *
1027 add_displaced_stepping_state (int pid)
1028 {
1029 struct displaced_step_inferior_state *state;
1030
1031 for (state = displaced_step_inferior_states;
1032 state != NULL;
1033 state = state->next)
1034 if (state->pid == pid)
1035 return state;
1036
1037 state = xcalloc (1, sizeof (*state));
1038 state->pid = pid;
1039 state->next = displaced_step_inferior_states;
1040 displaced_step_inferior_states = state;
1041
1042 return state;
1043 }
1044
1045 /* Remove the displaced stepping state of process PID. */
1046
1047 static void
1048 remove_displaced_stepping_state (int pid)
1049 {
1050 struct displaced_step_inferior_state *it, **prev_next_p;
1051
1052 gdb_assert (pid != 0);
1053
1054 it = displaced_step_inferior_states;
1055 prev_next_p = &displaced_step_inferior_states;
1056 while (it)
1057 {
1058 if (it->pid == pid)
1059 {
1060 *prev_next_p = it->next;
1061 xfree (it);
1062 return;
1063 }
1064
1065 prev_next_p = &it->next;
1066 it = *prev_next_p;
1067 }
1068 }
1069
1070 static void
1071 infrun_inferior_exit (struct inferior *inf)
1072 {
1073 remove_displaced_stepping_state (inf->pid);
1074 }
1075
1076 /* Enum strings for "set|show displaced-stepping". */
1077
1078 static const char can_use_displaced_stepping_auto[] = "auto";
1079 static const char can_use_displaced_stepping_on[] = "on";
1080 static const char can_use_displaced_stepping_off[] = "off";
1081 static const char *can_use_displaced_stepping_enum[] =
1082 {
1083 can_use_displaced_stepping_auto,
1084 can_use_displaced_stepping_on,
1085 can_use_displaced_stepping_off,
1086 NULL,
1087 };
1088
1089 /* If ON, and the architecture supports it, GDB will use displaced
1090 stepping to step over breakpoints. If OFF, or if the architecture
1091 doesn't support it, GDB will instead use the traditional
1092 hold-and-step approach. If AUTO (which is the default), GDB will
1093 decide which technique to use to step over breakpoints depending on
1094 which of all-stop or non-stop mode is active --- displaced stepping
1095 in non-stop mode; hold-and-step in all-stop mode. */
1096
1097 static const char *can_use_displaced_stepping =
1098 can_use_displaced_stepping_auto;
1099
1100 static void
1101 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1102 struct cmd_list_element *c,
1103 const char *value)
1104 {
1105 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1106 fprintf_filtered (file, _("\
1107 Debugger's willingness to use displaced stepping to step over \
1108 breakpoints is %s (currently %s).\n"),
1109 value, non_stop ? "on" : "off");
1110 else
1111 fprintf_filtered (file, _("\
1112 Debugger's willingness to use displaced stepping to step over \
1113 breakpoints is %s.\n"), value);
1114 }
1115
1116 /* Return non-zero if displaced stepping can/should be used to step
1117 over breakpoints. */
1118
1119 static int
1120 use_displaced_stepping (struct gdbarch *gdbarch)
1121 {
1122 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1123 && non_stop)
1124 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1125 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1126 && !RECORD_IS_USED);
1127 }
1128
1129 /* Clean out any stray displaced stepping state. */
1130 static void
1131 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1132 {
1133 /* Indicate that there is no cleanup pending. */
1134 displaced->step_ptid = null_ptid;
1135
1136 if (displaced->step_closure)
1137 {
1138 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1139 displaced->step_closure);
1140 displaced->step_closure = NULL;
1141 }
1142 }
1143
1144 static void
1145 displaced_step_clear_cleanup (void *arg)
1146 {
1147 struct displaced_step_inferior_state *state = arg;
1148
1149 displaced_step_clear (state);
1150 }
1151
1152 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1153 void
1154 displaced_step_dump_bytes (struct ui_file *file,
1155 const gdb_byte *buf,
1156 size_t len)
1157 {
1158 int i;
1159
1160 for (i = 0; i < len; i++)
1161 fprintf_unfiltered (file, "%02x ", buf[i]);
1162 fputs_unfiltered ("\n", file);
1163 }
1164
1165 /* Prepare to single-step, using displaced stepping.
1166
1167 Note that we cannot use displaced stepping when we have a signal to
1168 deliver. If we have a signal to deliver and an instruction to step
1169 over, then after the step, there will be no indication from the
1170 target whether the thread entered a signal handler or ignored the
1171 signal and stepped over the instruction successfully --- both cases
1172 result in a simple SIGTRAP. In the first case we mustn't do a
1173 fixup, and in the second case we must --- but we can't tell which.
1174 Comments in the code for 'random signals' in handle_inferior_event
1175 explain how we handle this case instead.
1176
1177 Returns 1 if preparing was successful -- this thread is going to be
1178 stepped now; or 0 if displaced stepping this thread got queued. */
1179 static int
1180 displaced_step_prepare (ptid_t ptid)
1181 {
1182 struct cleanup *old_cleanups, *ignore_cleanups;
1183 struct regcache *regcache = get_thread_regcache (ptid);
1184 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1185 CORE_ADDR original, copy;
1186 ULONGEST len;
1187 struct displaced_step_closure *closure;
1188 struct displaced_step_inferior_state *displaced;
1189
1190 /* We should never reach this function if the architecture does not
1191 support displaced stepping. */
1192 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1193
1194 /* We have to displaced step one thread at a time, as we only have
1195 access to a single scratch space per inferior. */
1196
1197 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1198
1199 if (!ptid_equal (displaced->step_ptid, null_ptid))
1200 {
1201 /* Already waiting for a displaced step to finish. Defer this
1202 request and place in queue. */
1203 struct displaced_step_request *req, *new_req;
1204
1205 if (debug_displaced)
1206 fprintf_unfiltered (gdb_stdlog,
1207 "displaced: defering step of %s\n",
1208 target_pid_to_str (ptid));
1209
1210 new_req = xmalloc (sizeof (*new_req));
1211 new_req->ptid = ptid;
1212 new_req->next = NULL;
1213
1214 if (displaced->step_request_queue)
1215 {
1216 for (req = displaced->step_request_queue;
1217 req && req->next;
1218 req = req->next)
1219 ;
1220 req->next = new_req;
1221 }
1222 else
1223 displaced->step_request_queue = new_req;
1224
1225 return 0;
1226 }
1227 else
1228 {
1229 if (debug_displaced)
1230 fprintf_unfiltered (gdb_stdlog,
1231 "displaced: stepping %s now\n",
1232 target_pid_to_str (ptid));
1233 }
1234
1235 displaced_step_clear (displaced);
1236
1237 old_cleanups = save_inferior_ptid ();
1238 inferior_ptid = ptid;
1239
1240 original = regcache_read_pc (regcache);
1241
1242 copy = gdbarch_displaced_step_location (gdbarch);
1243 len = gdbarch_max_insn_length (gdbarch);
1244
1245 /* Save the original contents of the copy area. */
1246 displaced->step_saved_copy = xmalloc (len);
1247 ignore_cleanups = make_cleanup (free_current_contents,
1248 &displaced->step_saved_copy);
1249 read_memory (copy, displaced->step_saved_copy, len);
1250 if (debug_displaced)
1251 {
1252 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1253 paddress (gdbarch, copy));
1254 displaced_step_dump_bytes (gdb_stdlog,
1255 displaced->step_saved_copy,
1256 len);
1257 };
1258
1259 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1260 original, copy, regcache);
1261
1262 /* We don't support the fully-simulated case at present. */
1263 gdb_assert (closure);
1264
1265 /* Save the information we need to fix things up if the step
1266 succeeds. */
1267 displaced->step_ptid = ptid;
1268 displaced->step_gdbarch = gdbarch;
1269 displaced->step_closure = closure;
1270 displaced->step_original = original;
1271 displaced->step_copy = copy;
1272
1273 make_cleanup (displaced_step_clear_cleanup, displaced);
1274
1275 /* Resume execution at the copy. */
1276 regcache_write_pc (regcache, copy);
1277
1278 discard_cleanups (ignore_cleanups);
1279
1280 do_cleanups (old_cleanups);
1281
1282 if (debug_displaced)
1283 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1284 paddress (gdbarch, copy));
1285
1286 return 1;
1287 }
1288
1289 static void
1290 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1291 {
1292 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1293
1294 inferior_ptid = ptid;
1295 write_memory (memaddr, myaddr, len);
1296 do_cleanups (ptid_cleanup);
1297 }
1298
1299 static void
1300 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1301 {
1302 struct cleanup *old_cleanups;
1303 struct displaced_step_inferior_state *displaced
1304 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1305
1306 /* Was any thread of this process doing a displaced step? */
1307 if (displaced == NULL)
1308 return;
1309
1310 /* Was this event for the pid we displaced? */
1311 if (ptid_equal (displaced->step_ptid, null_ptid)
1312 || ! ptid_equal (displaced->step_ptid, event_ptid))
1313 return;
1314
1315 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1316
1317 /* Restore the contents of the copy area. */
1318 {
1319 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1320
1321 write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1322 displaced->step_saved_copy, len);
1323 if (debug_displaced)
1324 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1325 paddress (displaced->step_gdbarch,
1326 displaced->step_copy));
1327 }
1328
1329 /* Did the instruction complete successfully? */
1330 if (signal == TARGET_SIGNAL_TRAP)
1331 {
1332 /* Fix up the resulting state. */
1333 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1334 displaced->step_closure,
1335 displaced->step_original,
1336 displaced->step_copy,
1337 get_thread_regcache (displaced->step_ptid));
1338 }
1339 else
1340 {
1341 /* Since the instruction didn't complete, all we can do is
1342 relocate the PC. */
1343 struct regcache *regcache = get_thread_regcache (event_ptid);
1344 CORE_ADDR pc = regcache_read_pc (regcache);
1345
1346 pc = displaced->step_original + (pc - displaced->step_copy);
1347 regcache_write_pc (regcache, pc);
1348 }
1349
1350 do_cleanups (old_cleanups);
1351
1352 displaced->step_ptid = null_ptid;
1353
1354 /* Are there any pending displaced stepping requests? If so, run
1355 one now. Leave the state object around, since we're likely to
1356 need it again soon. */
1357 while (displaced->step_request_queue)
1358 {
1359 struct displaced_step_request *head;
1360 ptid_t ptid;
1361 struct regcache *regcache;
1362 struct gdbarch *gdbarch;
1363 CORE_ADDR actual_pc;
1364 struct address_space *aspace;
1365
1366 head = displaced->step_request_queue;
1367 ptid = head->ptid;
1368 displaced->step_request_queue = head->next;
1369 xfree (head);
1370
1371 context_switch (ptid);
1372
1373 regcache = get_thread_regcache (ptid);
1374 actual_pc = regcache_read_pc (regcache);
1375 aspace = get_regcache_aspace (regcache);
1376
1377 if (breakpoint_here_p (aspace, actual_pc))
1378 {
1379 if (debug_displaced)
1380 fprintf_unfiltered (gdb_stdlog,
1381 "displaced: stepping queued %s now\n",
1382 target_pid_to_str (ptid));
1383
1384 displaced_step_prepare (ptid);
1385
1386 gdbarch = get_regcache_arch (regcache);
1387
1388 if (debug_displaced)
1389 {
1390 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1391 gdb_byte buf[4];
1392
1393 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1394 paddress (gdbarch, actual_pc));
1395 read_memory (actual_pc, buf, sizeof (buf));
1396 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1397 }
1398
1399 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1400 displaced->step_closure))
1401 target_resume (ptid, 1, TARGET_SIGNAL_0);
1402 else
1403 target_resume (ptid, 0, TARGET_SIGNAL_0);
1404
1405 /* Done, we're stepping a thread. */
1406 break;
1407 }
1408 else
1409 {
1410 int step;
1411 struct thread_info *tp = inferior_thread ();
1412
1413 /* The breakpoint we were sitting under has since been
1414 removed. */
1415 tp->trap_expected = 0;
1416
1417 /* Go back to what we were trying to do. */
1418 step = currently_stepping (tp);
1419
1420 if (debug_displaced)
1421 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1422 target_pid_to_str (tp->ptid), step);
1423
1424 target_resume (ptid, step, TARGET_SIGNAL_0);
1425 tp->stop_signal = TARGET_SIGNAL_0;
1426
1427 /* This request was discarded. See if there's any other
1428 thread waiting for its turn. */
1429 }
1430 }
1431 }
1432
1433 /* Update global variables holding ptids to hold NEW_PTID if they were
1434 holding OLD_PTID. */
1435 static void
1436 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1437 {
1438 struct displaced_step_request *it;
1439 struct displaced_step_inferior_state *displaced;
1440
1441 if (ptid_equal (inferior_ptid, old_ptid))
1442 inferior_ptid = new_ptid;
1443
1444 if (ptid_equal (singlestep_ptid, old_ptid))
1445 singlestep_ptid = new_ptid;
1446
1447 if (ptid_equal (deferred_step_ptid, old_ptid))
1448 deferred_step_ptid = new_ptid;
1449
1450 for (displaced = displaced_step_inferior_states;
1451 displaced;
1452 displaced = displaced->next)
1453 {
1454 if (ptid_equal (displaced->step_ptid, old_ptid))
1455 displaced->step_ptid = new_ptid;
1456
1457 for (it = displaced->step_request_queue; it; it = it->next)
1458 if (ptid_equal (it->ptid, old_ptid))
1459 it->ptid = new_ptid;
1460 }
1461 }
1462
1463 \f
1464 /* Resuming. */
1465
1466 /* Things to clean up if we QUIT out of resume (). */
1467 static void
1468 resume_cleanups (void *ignore)
1469 {
1470 normal_stop ();
1471 }
1472
1473 static const char schedlock_off[] = "off";
1474 static const char schedlock_on[] = "on";
1475 static const char schedlock_step[] = "step";
1476 static const char *scheduler_enums[] = {
1477 schedlock_off,
1478 schedlock_on,
1479 schedlock_step,
1480 NULL
1481 };
1482 static const char *scheduler_mode = schedlock_off;
1483 static void
1484 show_scheduler_mode (struct ui_file *file, int from_tty,
1485 struct cmd_list_element *c, const char *value)
1486 {
1487 fprintf_filtered (file, _("\
1488 Mode for locking scheduler during execution is \"%s\".\n"),
1489 value);
1490 }
1491
1492 static void
1493 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1494 {
1495 if (!target_can_lock_scheduler)
1496 {
1497 scheduler_mode = schedlock_off;
1498 error (_("Target '%s' cannot support this command."), target_shortname);
1499 }
1500 }
1501
1502 /* True if execution commands resume all threads of all processes by
1503 default; otherwise, resume only threads of the current inferior
1504 process. */
1505 int sched_multi = 0;
1506
1507 /* Try to setup for software single stepping over the specified location.
1508 Return 1 if target_resume() should use hardware single step.
1509
1510 GDBARCH the current gdbarch.
1511 PC the location to step over. */
1512
1513 static int
1514 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1515 {
1516 int hw_step = 1;
1517
1518 if (execution_direction == EXEC_FORWARD
1519 && gdbarch_software_single_step_p (gdbarch)
1520 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1521 {
1522 hw_step = 0;
1523 /* Do not pull these breakpoints until after a `wait' in
1524 `wait_for_inferior' */
1525 singlestep_breakpoints_inserted_p = 1;
1526 singlestep_ptid = inferior_ptid;
1527 singlestep_pc = pc;
1528 }
1529 return hw_step;
1530 }
1531
1532 /* Resume the inferior, but allow a QUIT. This is useful if the user
1533 wants to interrupt some lengthy single-stepping operation
1534 (for child processes, the SIGINT goes to the inferior, and so
1535 we get a SIGINT random_signal, but for remote debugging and perhaps
1536 other targets, that's not true).
1537
1538 STEP nonzero if we should step (zero to continue instead).
1539 SIG is the signal to give the inferior (zero for none). */
1540 void
1541 resume (int step, enum target_signal sig)
1542 {
1543 int should_resume = 1;
1544 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1545 struct regcache *regcache = get_current_regcache ();
1546 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1547 struct thread_info *tp = inferior_thread ();
1548 CORE_ADDR pc = regcache_read_pc (regcache);
1549 struct address_space *aspace = get_regcache_aspace (regcache);
1550
1551 QUIT;
1552
1553 if (debug_infrun)
1554 fprintf_unfiltered (gdb_stdlog,
1555 "infrun: resume (step=%d, signal=%d), "
1556 "trap_expected=%d\n",
1557 step, sig, tp->trap_expected);
1558
1559 /* Normally, by the time we reach `resume', the breakpoints are either
1560 removed or inserted, as appropriate. The exception is if we're sitting
1561 at a permanent breakpoint; we need to step over it, but permanent
1562 breakpoints can't be removed. So we have to test for it here. */
1563 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1564 {
1565 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1566 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1567 else
1568 error (_("\
1569 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1570 how to step past a permanent breakpoint on this architecture. Try using\n\
1571 a command like `return' or `jump' to continue execution."));
1572 }
1573
1574 /* If enabled, step over breakpoints by executing a copy of the
1575 instruction at a different address.
1576
1577 We can't use displaced stepping when we have a signal to deliver;
1578 the comments for displaced_step_prepare explain why. The
1579 comments in the handle_inferior event for dealing with 'random
1580 signals' explain what we do instead. */
1581 if (use_displaced_stepping (gdbarch)
1582 && (tp->trap_expected
1583 || (step && gdbarch_software_single_step_p (gdbarch)))
1584 && sig == TARGET_SIGNAL_0)
1585 {
1586 struct displaced_step_inferior_state *displaced;
1587
1588 if (!displaced_step_prepare (inferior_ptid))
1589 {
1590 /* Got placed in displaced stepping queue. Will be resumed
1591 later when all the currently queued displaced stepping
1592 requests finish. The thread is not executing at this point,
1593 and the call to set_executing will be made later. But we
1594 need to call set_running here, since from frontend point of view,
1595 the thread is running. */
1596 set_running (inferior_ptid, 1);
1597 discard_cleanups (old_cleanups);
1598 return;
1599 }
1600
1601 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1602 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1603 displaced->step_closure);
1604 }
1605
1606 /* Do we need to do it the hard way, w/temp breakpoints? */
1607 else if (step)
1608 step = maybe_software_singlestep (gdbarch, pc);
1609
1610 if (should_resume)
1611 {
1612 ptid_t resume_ptid;
1613
1614 /* If STEP is set, it's a request to use hardware stepping
1615 facilities. But in that case, we should never
1616 use singlestep breakpoint. */
1617 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1618
1619 /* Decide the set of threads to ask the target to resume. Start
1620 by assuming everything will be resumed, than narrow the set
1621 by applying increasingly restricting conditions. */
1622
1623 /* By default, resume all threads of all processes. */
1624 resume_ptid = RESUME_ALL;
1625
1626 /* Maybe resume only all threads of the current process. */
1627 if (!sched_multi && target_supports_multi_process ())
1628 {
1629 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1630 }
1631
1632 /* Maybe resume a single thread after all. */
1633 if (singlestep_breakpoints_inserted_p
1634 && stepping_past_singlestep_breakpoint)
1635 {
1636 /* The situation here is as follows. In thread T1 we wanted to
1637 single-step. Lacking hardware single-stepping we've
1638 set breakpoint at the PC of the next instruction -- call it
1639 P. After resuming, we've hit that breakpoint in thread T2.
1640 Now we've removed original breakpoint, inserted breakpoint
1641 at P+1, and try to step to advance T2 past breakpoint.
1642 We need to step only T2, as if T1 is allowed to freely run,
1643 it can run past P, and if other threads are allowed to run,
1644 they can hit breakpoint at P+1, and nested hits of single-step
1645 breakpoints is not something we'd want -- that's complicated
1646 to support, and has no value. */
1647 resume_ptid = inferior_ptid;
1648 }
1649 else if ((step || singlestep_breakpoints_inserted_p)
1650 && tp->trap_expected)
1651 {
1652 /* We're allowing a thread to run past a breakpoint it has
1653 hit, by single-stepping the thread with the breakpoint
1654 removed. In which case, we need to single-step only this
1655 thread, and keep others stopped, as they can miss this
1656 breakpoint if allowed to run.
1657
1658 The current code actually removes all breakpoints when
1659 doing this, not just the one being stepped over, so if we
1660 let other threads run, we can actually miss any
1661 breakpoint, not just the one at PC. */
1662 resume_ptid = inferior_ptid;
1663 }
1664 else if (non_stop)
1665 {
1666 /* With non-stop mode on, threads are always handled
1667 individually. */
1668 resume_ptid = inferior_ptid;
1669 }
1670 else if ((scheduler_mode == schedlock_on)
1671 || (scheduler_mode == schedlock_step
1672 && (step || singlestep_breakpoints_inserted_p)))
1673 {
1674 /* User-settable 'scheduler' mode requires solo thread resume. */
1675 resume_ptid = inferior_ptid;
1676 }
1677
1678 if (gdbarch_cannot_step_breakpoint (gdbarch))
1679 {
1680 /* Most targets can step a breakpoint instruction, thus
1681 executing it normally. But if this one cannot, just
1682 continue and we will hit it anyway. */
1683 if (step && breakpoint_inserted_here_p (aspace, pc))
1684 step = 0;
1685 }
1686
1687 if (debug_displaced
1688 && use_displaced_stepping (gdbarch)
1689 && tp->trap_expected)
1690 {
1691 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1692 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1693 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1694 gdb_byte buf[4];
1695
1696 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1697 paddress (resume_gdbarch, actual_pc));
1698 read_memory (actual_pc, buf, sizeof (buf));
1699 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1700 }
1701
1702 /* Install inferior's terminal modes. */
1703 target_terminal_inferior ();
1704
1705 /* Avoid confusing the next resume, if the next stop/resume
1706 happens to apply to another thread. */
1707 tp->stop_signal = TARGET_SIGNAL_0;
1708
1709 target_resume (resume_ptid, step, sig);
1710 }
1711
1712 discard_cleanups (old_cleanups);
1713 }
1714 \f
1715 /* Proceeding. */
1716
1717 /* Clear out all variables saying what to do when inferior is continued.
1718 First do this, then set the ones you want, then call `proceed'. */
1719
1720 static void
1721 clear_proceed_status_thread (struct thread_info *tp)
1722 {
1723 if (debug_infrun)
1724 fprintf_unfiltered (gdb_stdlog,
1725 "infrun: clear_proceed_status_thread (%s)\n",
1726 target_pid_to_str (tp->ptid));
1727
1728 tp->trap_expected = 0;
1729 tp->step_range_start = 0;
1730 tp->step_range_end = 0;
1731 tp->step_frame_id = null_frame_id;
1732 tp->step_stack_frame_id = null_frame_id;
1733 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1734 tp->stop_requested = 0;
1735
1736 tp->stop_step = 0;
1737
1738 tp->proceed_to_finish = 0;
1739
1740 /* Discard any remaining commands or status from previous stop. */
1741 bpstat_clear (&tp->stop_bpstat);
1742 }
1743
1744 static int
1745 clear_proceed_status_callback (struct thread_info *tp, void *data)
1746 {
1747 if (is_exited (tp->ptid))
1748 return 0;
1749
1750 clear_proceed_status_thread (tp);
1751 return 0;
1752 }
1753
1754 void
1755 clear_proceed_status (void)
1756 {
1757 if (!non_stop)
1758 {
1759 /* In all-stop mode, delete the per-thread status of all
1760 threads, even if inferior_ptid is null_ptid, there may be
1761 threads on the list. E.g., we may be launching a new
1762 process, while selecting the executable. */
1763 iterate_over_threads (clear_proceed_status_callback, NULL);
1764 }
1765
1766 if (!ptid_equal (inferior_ptid, null_ptid))
1767 {
1768 struct inferior *inferior;
1769
1770 if (non_stop)
1771 {
1772 /* If in non-stop mode, only delete the per-thread status of
1773 the current thread. */
1774 clear_proceed_status_thread (inferior_thread ());
1775 }
1776
1777 inferior = current_inferior ();
1778 inferior->stop_soon = NO_STOP_QUIETLY;
1779 }
1780
1781 stop_after_trap = 0;
1782
1783 observer_notify_about_to_proceed ();
1784
1785 if (stop_registers)
1786 {
1787 regcache_xfree (stop_registers);
1788 stop_registers = NULL;
1789 }
1790 }
1791
1792 /* Check the current thread against the thread that reported the most recent
1793 event. If a step-over is required return TRUE and set the current thread
1794 to the old thread. Otherwise return FALSE.
1795
1796 This should be suitable for any targets that support threads. */
1797
1798 static int
1799 prepare_to_proceed (int step)
1800 {
1801 ptid_t wait_ptid;
1802 struct target_waitstatus wait_status;
1803 int schedlock_enabled;
1804
1805 /* With non-stop mode on, threads are always handled individually. */
1806 gdb_assert (! non_stop);
1807
1808 /* Get the last target status returned by target_wait(). */
1809 get_last_target_status (&wait_ptid, &wait_status);
1810
1811 /* Make sure we were stopped at a breakpoint. */
1812 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1813 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1814 && wait_status.value.sig != TARGET_SIGNAL_ILL
1815 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1816 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1817 {
1818 return 0;
1819 }
1820
1821 schedlock_enabled = (scheduler_mode == schedlock_on
1822 || (scheduler_mode == schedlock_step
1823 && step));
1824
1825 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1826 if (schedlock_enabled)
1827 return 0;
1828
1829 /* Don't switch over if we're about to resume some other process
1830 other than WAIT_PTID's, and schedule-multiple is off. */
1831 if (!sched_multi
1832 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1833 return 0;
1834
1835 /* Switched over from WAIT_PID. */
1836 if (!ptid_equal (wait_ptid, minus_one_ptid)
1837 && !ptid_equal (inferior_ptid, wait_ptid))
1838 {
1839 struct regcache *regcache = get_thread_regcache (wait_ptid);
1840
1841 if (breakpoint_here_p (get_regcache_aspace (regcache),
1842 regcache_read_pc (regcache)))
1843 {
1844 /* If stepping, remember current thread to switch back to. */
1845 if (step)
1846 deferred_step_ptid = inferior_ptid;
1847
1848 /* Switch back to WAIT_PID thread. */
1849 switch_to_thread (wait_ptid);
1850
1851 /* We return 1 to indicate that there is a breakpoint here,
1852 so we need to step over it before continuing to avoid
1853 hitting it straight away. */
1854 return 1;
1855 }
1856 }
1857
1858 return 0;
1859 }
1860
1861 /* Basic routine for continuing the program in various fashions.
1862
1863 ADDR is the address to resume at, or -1 for resume where stopped.
1864 SIGGNAL is the signal to give it, or 0 for none,
1865 or -1 for act according to how it stopped.
1866 STEP is nonzero if should trap after one instruction.
1867 -1 means return after that and print nothing.
1868 You should probably set various step_... variables
1869 before calling here, if you are stepping.
1870
1871 You should call clear_proceed_status before calling proceed. */
1872
1873 void
1874 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1875 {
1876 struct regcache *regcache;
1877 struct gdbarch *gdbarch;
1878 struct thread_info *tp;
1879 CORE_ADDR pc;
1880 struct address_space *aspace;
1881 int oneproc = 0;
1882
1883 /* If we're stopped at a fork/vfork, follow the branch set by the
1884 "set follow-fork-mode" command; otherwise, we'll just proceed
1885 resuming the current thread. */
1886 if (!follow_fork ())
1887 {
1888 /* The target for some reason decided not to resume. */
1889 normal_stop ();
1890 return;
1891 }
1892
1893 regcache = get_current_regcache ();
1894 gdbarch = get_regcache_arch (regcache);
1895 aspace = get_regcache_aspace (regcache);
1896 pc = regcache_read_pc (regcache);
1897
1898 if (step > 0)
1899 step_start_function = find_pc_function (pc);
1900 if (step < 0)
1901 stop_after_trap = 1;
1902
1903 if (addr == (CORE_ADDR) -1)
1904 {
1905 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
1906 && execution_direction != EXEC_REVERSE)
1907 /* There is a breakpoint at the address we will resume at,
1908 step one instruction before inserting breakpoints so that
1909 we do not stop right away (and report a second hit at this
1910 breakpoint).
1911
1912 Note, we don't do this in reverse, because we won't
1913 actually be executing the breakpoint insn anyway.
1914 We'll be (un-)executing the previous instruction. */
1915
1916 oneproc = 1;
1917 else if (gdbarch_single_step_through_delay_p (gdbarch)
1918 && gdbarch_single_step_through_delay (gdbarch,
1919 get_current_frame ()))
1920 /* We stepped onto an instruction that needs to be stepped
1921 again before re-inserting the breakpoint, do so. */
1922 oneproc = 1;
1923 }
1924 else
1925 {
1926 regcache_write_pc (regcache, addr);
1927 }
1928
1929 if (debug_infrun)
1930 fprintf_unfiltered (gdb_stdlog,
1931 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1932 paddress (gdbarch, addr), siggnal, step);
1933
1934 /* We're handling a live event, so make sure we're doing live
1935 debugging. If we're looking at traceframes while the target is
1936 running, we're going to need to get back to that mode after
1937 handling the event. */
1938 if (non_stop)
1939 {
1940 make_cleanup_restore_current_traceframe ();
1941 set_traceframe_number (-1);
1942 }
1943
1944 if (non_stop)
1945 /* In non-stop, each thread is handled individually. The context
1946 must already be set to the right thread here. */
1947 ;
1948 else
1949 {
1950 /* In a multi-threaded task we may select another thread and
1951 then continue or step.
1952
1953 But if the old thread was stopped at a breakpoint, it will
1954 immediately cause another breakpoint stop without any
1955 execution (i.e. it will report a breakpoint hit incorrectly).
1956 So we must step over it first.
1957
1958 prepare_to_proceed checks the current thread against the
1959 thread that reported the most recent event. If a step-over
1960 is required it returns TRUE and sets the current thread to
1961 the old thread. */
1962 if (prepare_to_proceed (step))
1963 oneproc = 1;
1964 }
1965
1966 /* prepare_to_proceed may change the current thread. */
1967 tp = inferior_thread ();
1968
1969 if (oneproc)
1970 {
1971 tp->trap_expected = 1;
1972 /* If displaced stepping is enabled, we can step over the
1973 breakpoint without hitting it, so leave all breakpoints
1974 inserted. Otherwise we need to disable all breakpoints, step
1975 one instruction, and then re-add them when that step is
1976 finished. */
1977 if (!use_displaced_stepping (gdbarch))
1978 remove_breakpoints ();
1979 }
1980
1981 /* We can insert breakpoints if we're not trying to step over one,
1982 or if we are stepping over one but we're using displaced stepping
1983 to do so. */
1984 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
1985 insert_breakpoints ();
1986
1987 if (!non_stop)
1988 {
1989 /* Pass the last stop signal to the thread we're resuming,
1990 irrespective of whether the current thread is the thread that
1991 got the last event or not. This was historically GDB's
1992 behaviour before keeping a stop_signal per thread. */
1993
1994 struct thread_info *last_thread;
1995 ptid_t last_ptid;
1996 struct target_waitstatus last_status;
1997
1998 get_last_target_status (&last_ptid, &last_status);
1999 if (!ptid_equal (inferior_ptid, last_ptid)
2000 && !ptid_equal (last_ptid, null_ptid)
2001 && !ptid_equal (last_ptid, minus_one_ptid))
2002 {
2003 last_thread = find_thread_ptid (last_ptid);
2004 if (last_thread)
2005 {
2006 tp->stop_signal = last_thread->stop_signal;
2007 last_thread->stop_signal = TARGET_SIGNAL_0;
2008 }
2009 }
2010 }
2011
2012 if (siggnal != TARGET_SIGNAL_DEFAULT)
2013 tp->stop_signal = siggnal;
2014 /* If this signal should not be seen by program,
2015 give it zero. Used for debugging signals. */
2016 else if (!signal_program[tp->stop_signal])
2017 tp->stop_signal = TARGET_SIGNAL_0;
2018
2019 annotate_starting ();
2020
2021 /* Make sure that output from GDB appears before output from the
2022 inferior. */
2023 gdb_flush (gdb_stdout);
2024
2025 /* Refresh prev_pc value just prior to resuming. This used to be
2026 done in stop_stepping, however, setting prev_pc there did not handle
2027 scenarios such as inferior function calls or returning from
2028 a function via the return command. In those cases, the prev_pc
2029 value was not set properly for subsequent commands. The prev_pc value
2030 is used to initialize the starting line number in the ecs. With an
2031 invalid value, the gdb next command ends up stopping at the position
2032 represented by the next line table entry past our start position.
2033 On platforms that generate one line table entry per line, this
2034 is not a problem. However, on the ia64, the compiler generates
2035 extraneous line table entries that do not increase the line number.
2036 When we issue the gdb next command on the ia64 after an inferior call
2037 or a return command, we often end up a few instructions forward, still
2038 within the original line we started.
2039
2040 An attempt was made to refresh the prev_pc at the same time the
2041 execution_control_state is initialized (for instance, just before
2042 waiting for an inferior event). But this approach did not work
2043 because of platforms that use ptrace, where the pc register cannot
2044 be read unless the inferior is stopped. At that point, we are not
2045 guaranteed the inferior is stopped and so the regcache_read_pc() call
2046 can fail. Setting the prev_pc value here ensures the value is updated
2047 correctly when the inferior is stopped. */
2048 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2049
2050 /* Fill in with reasonable starting values. */
2051 init_thread_stepping_state (tp);
2052
2053 /* Reset to normal state. */
2054 init_infwait_state ();
2055
2056 /* Resume inferior. */
2057 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
2058
2059 /* Wait for it to stop (if not standalone)
2060 and in any case decode why it stopped, and act accordingly. */
2061 /* Do this only if we are not using the event loop, or if the target
2062 does not support asynchronous execution. */
2063 if (!target_can_async_p ())
2064 {
2065 wait_for_inferior (0);
2066 normal_stop ();
2067 }
2068 }
2069 \f
2070
2071 /* Start remote-debugging of a machine over a serial link. */
2072
2073 void
2074 start_remote (int from_tty)
2075 {
2076 struct inferior *inferior;
2077
2078 init_wait_for_inferior ();
2079 inferior = current_inferior ();
2080 inferior->stop_soon = STOP_QUIETLY_REMOTE;
2081
2082 /* Always go on waiting for the target, regardless of the mode. */
2083 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2084 indicate to wait_for_inferior that a target should timeout if
2085 nothing is returned (instead of just blocking). Because of this,
2086 targets expecting an immediate response need to, internally, set
2087 things up so that the target_wait() is forced to eventually
2088 timeout. */
2089 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2090 differentiate to its caller what the state of the target is after
2091 the initial open has been performed. Here we're assuming that
2092 the target has stopped. It should be possible to eventually have
2093 target_open() return to the caller an indication that the target
2094 is currently running and GDB state should be set to the same as
2095 for an async run. */
2096 wait_for_inferior (0);
2097
2098 /* Now that the inferior has stopped, do any bookkeeping like
2099 loading shared libraries. We want to do this before normal_stop,
2100 so that the displayed frame is up to date. */
2101 post_create_inferior (&current_target, from_tty);
2102
2103 normal_stop ();
2104 }
2105
2106 /* Initialize static vars when a new inferior begins. */
2107
2108 void
2109 init_wait_for_inferior (void)
2110 {
2111 /* These are meaningless until the first time through wait_for_inferior. */
2112
2113 breakpoint_init_inferior (inf_starting);
2114
2115 clear_proceed_status ();
2116
2117 stepping_past_singlestep_breakpoint = 0;
2118 deferred_step_ptid = null_ptid;
2119
2120 target_last_wait_ptid = minus_one_ptid;
2121
2122 previous_inferior_ptid = null_ptid;
2123 init_infwait_state ();
2124
2125 /* Discard any skipped inlined frames. */
2126 clear_inline_frame_state (minus_one_ptid);
2127 }
2128
2129 \f
2130 /* This enum encodes possible reasons for doing a target_wait, so that
2131 wfi can call target_wait in one place. (Ultimately the call will be
2132 moved out of the infinite loop entirely.) */
2133
2134 enum infwait_states
2135 {
2136 infwait_normal_state,
2137 infwait_thread_hop_state,
2138 infwait_step_watch_state,
2139 infwait_nonstep_watch_state
2140 };
2141
2142 /* Why did the inferior stop? Used to print the appropriate messages
2143 to the interface from within handle_inferior_event(). */
2144 enum inferior_stop_reason
2145 {
2146 /* Step, next, nexti, stepi finished. */
2147 END_STEPPING_RANGE,
2148 /* Inferior terminated by signal. */
2149 SIGNAL_EXITED,
2150 /* Inferior exited. */
2151 EXITED,
2152 /* Inferior received signal, and user asked to be notified. */
2153 SIGNAL_RECEIVED,
2154 /* Reverse execution -- target ran out of history info. */
2155 NO_HISTORY
2156 };
2157
2158 /* The PTID we'll do a target_wait on.*/
2159 ptid_t waiton_ptid;
2160
2161 /* Current inferior wait state. */
2162 enum infwait_states infwait_state;
2163
2164 /* Data to be passed around while handling an event. This data is
2165 discarded between events. */
2166 struct execution_control_state
2167 {
2168 ptid_t ptid;
2169 /* The thread that got the event, if this was a thread event; NULL
2170 otherwise. */
2171 struct thread_info *event_thread;
2172
2173 struct target_waitstatus ws;
2174 int random_signal;
2175 CORE_ADDR stop_func_start;
2176 CORE_ADDR stop_func_end;
2177 char *stop_func_name;
2178 int new_thread_event;
2179 int wait_some_more;
2180 };
2181
2182 static void handle_inferior_event (struct execution_control_state *ecs);
2183
2184 static void handle_step_into_function (struct gdbarch *gdbarch,
2185 struct execution_control_state *ecs);
2186 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2187 struct execution_control_state *ecs);
2188 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
2189 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
2190 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
2191 struct symtab_and_line sr_sal,
2192 struct frame_id sr_id);
2193 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
2194
2195 static void stop_stepping (struct execution_control_state *ecs);
2196 static void prepare_to_wait (struct execution_control_state *ecs);
2197 static void keep_going (struct execution_control_state *ecs);
2198 static void print_stop_reason (enum inferior_stop_reason stop_reason,
2199 int stop_info);
2200
2201 /* Callback for iterate over threads. If the thread is stopped, but
2202 the user/frontend doesn't know about that yet, go through
2203 normal_stop, as if the thread had just stopped now. ARG points at
2204 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2205 ptid_is_pid(PTID) is true, applies to all threads of the process
2206 pointed at by PTID. Otherwise, apply only to the thread pointed by
2207 PTID. */
2208
2209 static int
2210 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2211 {
2212 ptid_t ptid = * (ptid_t *) arg;
2213
2214 if ((ptid_equal (info->ptid, ptid)
2215 || ptid_equal (minus_one_ptid, ptid)
2216 || (ptid_is_pid (ptid)
2217 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2218 && is_running (info->ptid)
2219 && !is_executing (info->ptid))
2220 {
2221 struct cleanup *old_chain;
2222 struct execution_control_state ecss;
2223 struct execution_control_state *ecs = &ecss;
2224
2225 memset (ecs, 0, sizeof (*ecs));
2226
2227 old_chain = make_cleanup_restore_current_thread ();
2228
2229 switch_to_thread (info->ptid);
2230
2231 /* Go through handle_inferior_event/normal_stop, so we always
2232 have consistent output as if the stop event had been
2233 reported. */
2234 ecs->ptid = info->ptid;
2235 ecs->event_thread = find_thread_ptid (info->ptid);
2236 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2237 ecs->ws.value.sig = TARGET_SIGNAL_0;
2238
2239 handle_inferior_event (ecs);
2240
2241 if (!ecs->wait_some_more)
2242 {
2243 struct thread_info *tp;
2244
2245 normal_stop ();
2246
2247 /* Finish off the continuations. The continations
2248 themselves are responsible for realising the thread
2249 didn't finish what it was supposed to do. */
2250 tp = inferior_thread ();
2251 do_all_intermediate_continuations_thread (tp);
2252 do_all_continuations_thread (tp);
2253 }
2254
2255 do_cleanups (old_chain);
2256 }
2257
2258 return 0;
2259 }
2260
2261 /* This function is attached as a "thread_stop_requested" observer.
2262 Cleanup local state that assumed the PTID was to be resumed, and
2263 report the stop to the frontend. */
2264
2265 static void
2266 infrun_thread_stop_requested (ptid_t ptid)
2267 {
2268 struct displaced_step_inferior_state *displaced;
2269
2270 /* PTID was requested to stop. Remove it from the displaced
2271 stepping queue, so we don't try to resume it automatically. */
2272
2273 for (displaced = displaced_step_inferior_states;
2274 displaced;
2275 displaced = displaced->next)
2276 {
2277 struct displaced_step_request *it, **prev_next_p;
2278
2279 it = displaced->step_request_queue;
2280 prev_next_p = &displaced->step_request_queue;
2281 while (it)
2282 {
2283 if (ptid_match (it->ptid, ptid))
2284 {
2285 *prev_next_p = it->next;
2286 it->next = NULL;
2287 xfree (it);
2288 }
2289 else
2290 {
2291 prev_next_p = &it->next;
2292 }
2293
2294 it = *prev_next_p;
2295 }
2296 }
2297
2298 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2299 }
2300
2301 static void
2302 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2303 {
2304 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2305 nullify_last_target_wait_ptid ();
2306 }
2307
2308 /* Callback for iterate_over_threads. */
2309
2310 static int
2311 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2312 {
2313 if (is_exited (info->ptid))
2314 return 0;
2315
2316 delete_step_resume_breakpoint (info);
2317 return 0;
2318 }
2319
2320 /* In all-stop, delete the step resume breakpoint of any thread that
2321 had one. In non-stop, delete the step resume breakpoint of the
2322 thread that just stopped. */
2323
2324 static void
2325 delete_step_thread_step_resume_breakpoint (void)
2326 {
2327 if (!target_has_execution
2328 || ptid_equal (inferior_ptid, null_ptid))
2329 /* If the inferior has exited, we have already deleted the step
2330 resume breakpoints out of GDB's lists. */
2331 return;
2332
2333 if (non_stop)
2334 {
2335 /* If in non-stop mode, only delete the step-resume or
2336 longjmp-resume breakpoint of the thread that just stopped
2337 stepping. */
2338 struct thread_info *tp = inferior_thread ();
2339
2340 delete_step_resume_breakpoint (tp);
2341 }
2342 else
2343 /* In all-stop mode, delete all step-resume and longjmp-resume
2344 breakpoints of any thread that had them. */
2345 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2346 }
2347
2348 /* A cleanup wrapper. */
2349
2350 static void
2351 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2352 {
2353 delete_step_thread_step_resume_breakpoint ();
2354 }
2355
2356 /* Pretty print the results of target_wait, for debugging purposes. */
2357
2358 static void
2359 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2360 const struct target_waitstatus *ws)
2361 {
2362 char *status_string = target_waitstatus_to_string (ws);
2363 struct ui_file *tmp_stream = mem_fileopen ();
2364 char *text;
2365
2366 /* The text is split over several lines because it was getting too long.
2367 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2368 output as a unit; we want only one timestamp printed if debug_timestamp
2369 is set. */
2370
2371 fprintf_unfiltered (tmp_stream,
2372 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2373 if (PIDGET (waiton_ptid) != -1)
2374 fprintf_unfiltered (tmp_stream,
2375 " [%s]", target_pid_to_str (waiton_ptid));
2376 fprintf_unfiltered (tmp_stream, ", status) =\n");
2377 fprintf_unfiltered (tmp_stream,
2378 "infrun: %d [%s],\n",
2379 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2380 fprintf_unfiltered (tmp_stream,
2381 "infrun: %s\n",
2382 status_string);
2383
2384 text = ui_file_xstrdup (tmp_stream, NULL);
2385
2386 /* This uses %s in part to handle %'s in the text, but also to avoid
2387 a gcc error: the format attribute requires a string literal. */
2388 fprintf_unfiltered (gdb_stdlog, "%s", text);
2389
2390 xfree (status_string);
2391 xfree (text);
2392 ui_file_delete (tmp_stream);
2393 }
2394
2395 /* Prepare and stabilize the inferior for detaching it. E.g.,
2396 detaching while a thread is displaced stepping is a recipe for
2397 crashing it, as nothing would readjust the PC out of the scratch
2398 pad. */
2399
2400 void
2401 prepare_for_detach (void)
2402 {
2403 struct inferior *inf = current_inferior ();
2404 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2405 struct cleanup *old_chain_1;
2406 struct displaced_step_inferior_state *displaced;
2407
2408 displaced = get_displaced_stepping_state (inf->pid);
2409
2410 /* Is any thread of this process displaced stepping? If not,
2411 there's nothing else to do. */
2412 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2413 return;
2414
2415 if (debug_infrun)
2416 fprintf_unfiltered (gdb_stdlog,
2417 "displaced-stepping in-process while detaching");
2418
2419 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2420 inf->detaching = 1;
2421
2422 while (!ptid_equal (displaced->step_ptid, null_ptid))
2423 {
2424 struct cleanup *old_chain_2;
2425 struct execution_control_state ecss;
2426 struct execution_control_state *ecs;
2427
2428 ecs = &ecss;
2429 memset (ecs, 0, sizeof (*ecs));
2430
2431 overlay_cache_invalid = 1;
2432
2433 /* We have to invalidate the registers BEFORE calling
2434 target_wait because they can be loaded from the target while
2435 in target_wait. This makes remote debugging a bit more
2436 efficient for those targets that provide critical registers
2437 as part of their normal status mechanism. */
2438
2439 registers_changed ();
2440
2441 if (deprecated_target_wait_hook)
2442 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2443 else
2444 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2445
2446 if (debug_infrun)
2447 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2448
2449 /* If an error happens while handling the event, propagate GDB's
2450 knowledge of the executing state to the frontend/user running
2451 state. */
2452 old_chain_2 = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2453
2454 /* In non-stop mode, each thread is handled individually.
2455 Switch early, so the global state is set correctly for this
2456 thread. */
2457 if (non_stop
2458 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2459 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2460 context_switch (ecs->ptid);
2461
2462 /* Now figure out what to do with the result of the result. */
2463 handle_inferior_event (ecs);
2464
2465 /* No error, don't finish the state yet. */
2466 discard_cleanups (old_chain_2);
2467
2468 /* Breakpoints and watchpoints are not installed on the target
2469 at this point, and signals are passed directly to the
2470 inferior, so this must mean the process is gone. */
2471 if (!ecs->wait_some_more)
2472 {
2473 discard_cleanups (old_chain_1);
2474 error (_("Program exited while detaching"));
2475 }
2476 }
2477
2478 discard_cleanups (old_chain_1);
2479 }
2480
2481 /* Wait for control to return from inferior to debugger.
2482
2483 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
2484 as if they were SIGTRAP signals. This can be useful during
2485 the startup sequence on some targets such as HP/UX, where
2486 we receive an EXEC event instead of the expected SIGTRAP.
2487
2488 If inferior gets a signal, we may decide to start it up again
2489 instead of returning. That is why there is a loop in this function.
2490 When this function actually returns it means the inferior
2491 should be left stopped and GDB should read more commands. */
2492
2493 void
2494 wait_for_inferior (int treat_exec_as_sigtrap)
2495 {
2496 struct cleanup *old_cleanups;
2497 struct execution_control_state ecss;
2498 struct execution_control_state *ecs;
2499
2500 if (debug_infrun)
2501 fprintf_unfiltered
2502 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2503 treat_exec_as_sigtrap);
2504
2505 old_cleanups =
2506 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2507
2508 ecs = &ecss;
2509 memset (ecs, 0, sizeof (*ecs));
2510
2511 /* We'll update this if & when we switch to a new thread. */
2512 previous_inferior_ptid = inferior_ptid;
2513
2514 while (1)
2515 {
2516 struct cleanup *old_chain;
2517
2518 /* We have to invalidate the registers BEFORE calling target_wait
2519 because they can be loaded from the target while in target_wait.
2520 This makes remote debugging a bit more efficient for those
2521 targets that provide critical registers as part of their normal
2522 status mechanism. */
2523
2524 overlay_cache_invalid = 1;
2525 registers_changed ();
2526
2527 if (deprecated_target_wait_hook)
2528 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2529 else
2530 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2531
2532 if (debug_infrun)
2533 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2534
2535 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2536 {
2537 xfree (ecs->ws.value.execd_pathname);
2538 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2539 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2540 }
2541
2542 /* If an error happens while handling the event, propagate GDB's
2543 knowledge of the executing state to the frontend/user running
2544 state. */
2545 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2546
2547 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2548 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2549 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2550
2551 /* Now figure out what to do with the result of the result. */
2552 handle_inferior_event (ecs);
2553
2554 /* No error, don't finish the state yet. */
2555 discard_cleanups (old_chain);
2556
2557 if (!ecs->wait_some_more)
2558 break;
2559 }
2560
2561 do_cleanups (old_cleanups);
2562 }
2563
2564 /* Asynchronous version of wait_for_inferior. It is called by the
2565 event loop whenever a change of state is detected on the file
2566 descriptor corresponding to the target. It can be called more than
2567 once to complete a single execution command. In such cases we need
2568 to keep the state in a global variable ECSS. If it is the last time
2569 that this function is called for a single execution command, then
2570 report to the user that the inferior has stopped, and do the
2571 necessary cleanups. */
2572
2573 void
2574 fetch_inferior_event (void *client_data)
2575 {
2576 struct execution_control_state ecss;
2577 struct execution_control_state *ecs = &ecss;
2578 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2579 struct cleanup *ts_old_chain;
2580 int was_sync = sync_execution;
2581
2582 memset (ecs, 0, sizeof (*ecs));
2583
2584 /* We'll update this if & when we switch to a new thread. */
2585 previous_inferior_ptid = inferior_ptid;
2586
2587 if (non_stop)
2588 /* In non-stop mode, the user/frontend should not notice a thread
2589 switch due to internal events. Make sure we reverse to the
2590 user selected thread and frame after handling the event and
2591 running any breakpoint commands. */
2592 make_cleanup_restore_current_thread ();
2593
2594 /* We have to invalidate the registers BEFORE calling target_wait
2595 because they can be loaded from the target while in target_wait.
2596 This makes remote debugging a bit more efficient for those
2597 targets that provide critical registers as part of their normal
2598 status mechanism. */
2599
2600 overlay_cache_invalid = 1;
2601 registers_changed ();
2602
2603 if (deprecated_target_wait_hook)
2604 ecs->ptid =
2605 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2606 else
2607 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2608
2609 if (debug_infrun)
2610 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2611
2612 if (non_stop
2613 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2614 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2615 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2616 /* In non-stop mode, each thread is handled individually. Switch
2617 early, so the global state is set correctly for this
2618 thread. */
2619 context_switch (ecs->ptid);
2620
2621 /* If an error happens while handling the event, propagate GDB's
2622 knowledge of the executing state to the frontend/user running
2623 state. */
2624 if (!non_stop)
2625 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2626 else
2627 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2628
2629 /* Now figure out what to do with the result of the result. */
2630 handle_inferior_event (ecs);
2631
2632 if (!ecs->wait_some_more)
2633 {
2634 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2635
2636 delete_step_thread_step_resume_breakpoint ();
2637
2638 /* We may not find an inferior if this was a process exit. */
2639 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2640 normal_stop ();
2641
2642 if (target_has_execution
2643 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2644 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2645 && ecs->event_thread->step_multi
2646 && ecs->event_thread->stop_step)
2647 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2648 else
2649 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2650 }
2651
2652 /* No error, don't finish the thread states yet. */
2653 discard_cleanups (ts_old_chain);
2654
2655 /* Revert thread and frame. */
2656 do_cleanups (old_chain);
2657
2658 /* If the inferior was in sync execution mode, and now isn't,
2659 restore the prompt. */
2660 if (was_sync && !sync_execution)
2661 display_gdb_prompt (0);
2662 }
2663
2664 /* Record the frame and location we're currently stepping through. */
2665 void
2666 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2667 {
2668 struct thread_info *tp = inferior_thread ();
2669
2670 tp->step_frame_id = get_frame_id (frame);
2671 tp->step_stack_frame_id = get_stack_frame_id (frame);
2672
2673 tp->current_symtab = sal.symtab;
2674 tp->current_line = sal.line;
2675 }
2676
2677 /* Clear context switchable stepping state. */
2678
2679 void
2680 init_thread_stepping_state (struct thread_info *tss)
2681 {
2682 tss->stepping_over_breakpoint = 0;
2683 tss->step_after_step_resume_breakpoint = 0;
2684 tss->stepping_through_solib_after_catch = 0;
2685 tss->stepping_through_solib_catchpoints = NULL;
2686 }
2687
2688 /* Return the cached copy of the last pid/waitstatus returned by
2689 target_wait()/deprecated_target_wait_hook(). The data is actually
2690 cached by handle_inferior_event(), which gets called immediately
2691 after target_wait()/deprecated_target_wait_hook(). */
2692
2693 void
2694 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2695 {
2696 *ptidp = target_last_wait_ptid;
2697 *status = target_last_waitstatus;
2698 }
2699
2700 void
2701 nullify_last_target_wait_ptid (void)
2702 {
2703 target_last_wait_ptid = minus_one_ptid;
2704 }
2705
2706 /* Switch thread contexts. */
2707
2708 static void
2709 context_switch (ptid_t ptid)
2710 {
2711 if (debug_infrun)
2712 {
2713 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2714 target_pid_to_str (inferior_ptid));
2715 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2716 target_pid_to_str (ptid));
2717 }
2718
2719 switch_to_thread (ptid);
2720 }
2721
2722 static void
2723 adjust_pc_after_break (struct execution_control_state *ecs)
2724 {
2725 struct regcache *regcache;
2726 struct gdbarch *gdbarch;
2727 struct address_space *aspace;
2728 CORE_ADDR breakpoint_pc;
2729
2730 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2731 we aren't, just return.
2732
2733 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2734 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2735 implemented by software breakpoints should be handled through the normal
2736 breakpoint layer.
2737
2738 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2739 different signals (SIGILL or SIGEMT for instance), but it is less
2740 clear where the PC is pointing afterwards. It may not match
2741 gdbarch_decr_pc_after_break. I don't know any specific target that
2742 generates these signals at breakpoints (the code has been in GDB since at
2743 least 1992) so I can not guess how to handle them here.
2744
2745 In earlier versions of GDB, a target with
2746 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2747 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2748 target with both of these set in GDB history, and it seems unlikely to be
2749 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2750
2751 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2752 return;
2753
2754 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2755 return;
2756
2757 /* In reverse execution, when a breakpoint is hit, the instruction
2758 under it has already been de-executed. The reported PC always
2759 points at the breakpoint address, so adjusting it further would
2760 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2761 architecture:
2762
2763 B1 0x08000000 : INSN1
2764 B2 0x08000001 : INSN2
2765 0x08000002 : INSN3
2766 PC -> 0x08000003 : INSN4
2767
2768 Say you're stopped at 0x08000003 as above. Reverse continuing
2769 from that point should hit B2 as below. Reading the PC when the
2770 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2771 been de-executed already.
2772
2773 B1 0x08000000 : INSN1
2774 B2 PC -> 0x08000001 : INSN2
2775 0x08000002 : INSN3
2776 0x08000003 : INSN4
2777
2778 We can't apply the same logic as for forward execution, because
2779 we would wrongly adjust the PC to 0x08000000, since there's a
2780 breakpoint at PC - 1. We'd then report a hit on B1, although
2781 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2782 behaviour. */
2783 if (execution_direction == EXEC_REVERSE)
2784 return;
2785
2786 /* If this target does not decrement the PC after breakpoints, then
2787 we have nothing to do. */
2788 regcache = get_thread_regcache (ecs->ptid);
2789 gdbarch = get_regcache_arch (regcache);
2790 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2791 return;
2792
2793 aspace = get_regcache_aspace (regcache);
2794
2795 /* Find the location where (if we've hit a breakpoint) the
2796 breakpoint would be. */
2797 breakpoint_pc = regcache_read_pc (regcache)
2798 - gdbarch_decr_pc_after_break (gdbarch);
2799
2800 /* Check whether there actually is a software breakpoint inserted at
2801 that location.
2802
2803 If in non-stop mode, a race condition is possible where we've
2804 removed a breakpoint, but stop events for that breakpoint were
2805 already queued and arrive later. To suppress those spurious
2806 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2807 and retire them after a number of stop events are reported. */
2808 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2809 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2810 {
2811 struct cleanup *old_cleanups = NULL;
2812
2813 if (RECORD_IS_USED)
2814 old_cleanups = record_gdb_operation_disable_set ();
2815
2816 /* When using hardware single-step, a SIGTRAP is reported for both
2817 a completed single-step and a software breakpoint. Need to
2818 differentiate between the two, as the latter needs adjusting
2819 but the former does not.
2820
2821 The SIGTRAP can be due to a completed hardware single-step only if
2822 - we didn't insert software single-step breakpoints
2823 - the thread to be examined is still the current thread
2824 - this thread is currently being stepped
2825
2826 If any of these events did not occur, we must have stopped due
2827 to hitting a software breakpoint, and have to back up to the
2828 breakpoint address.
2829
2830 As a special case, we could have hardware single-stepped a
2831 software breakpoint. In this case (prev_pc == breakpoint_pc),
2832 we also need to back up to the breakpoint address. */
2833
2834 if (singlestep_breakpoints_inserted_p
2835 || !ptid_equal (ecs->ptid, inferior_ptid)
2836 || !currently_stepping (ecs->event_thread)
2837 || ecs->event_thread->prev_pc == breakpoint_pc)
2838 regcache_write_pc (regcache, breakpoint_pc);
2839
2840 if (RECORD_IS_USED)
2841 do_cleanups (old_cleanups);
2842 }
2843 }
2844
2845 void
2846 init_infwait_state (void)
2847 {
2848 waiton_ptid = pid_to_ptid (-1);
2849 infwait_state = infwait_normal_state;
2850 }
2851
2852 void
2853 error_is_running (void)
2854 {
2855 error (_("\
2856 Cannot execute this command while the selected thread is running."));
2857 }
2858
2859 void
2860 ensure_not_running (void)
2861 {
2862 if (is_running (inferior_ptid))
2863 error_is_running ();
2864 }
2865
2866 static int
2867 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2868 {
2869 for (frame = get_prev_frame (frame);
2870 frame != NULL;
2871 frame = get_prev_frame (frame))
2872 {
2873 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2874 return 1;
2875 if (get_frame_type (frame) != INLINE_FRAME)
2876 break;
2877 }
2878
2879 return 0;
2880 }
2881
2882 /* Auxiliary function that handles syscall entry/return events.
2883 It returns 1 if the inferior should keep going (and GDB
2884 should ignore the event), or 0 if the event deserves to be
2885 processed. */
2886
2887 static int
2888 handle_syscall_event (struct execution_control_state *ecs)
2889 {
2890 struct regcache *regcache;
2891 struct gdbarch *gdbarch;
2892 int syscall_number;
2893
2894 if (!ptid_equal (ecs->ptid, inferior_ptid))
2895 context_switch (ecs->ptid);
2896
2897 regcache = get_thread_regcache (ecs->ptid);
2898 gdbarch = get_regcache_arch (regcache);
2899 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2900 stop_pc = regcache_read_pc (regcache);
2901
2902 target_last_waitstatus.value.syscall_number = syscall_number;
2903
2904 if (catch_syscall_enabled () > 0
2905 && catching_syscall_number (syscall_number) > 0)
2906 {
2907 if (debug_infrun)
2908 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2909 syscall_number);
2910
2911 ecs->event_thread->stop_bpstat
2912 = bpstat_stop_status (get_regcache_aspace (regcache),
2913 stop_pc, ecs->ptid);
2914 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2915
2916 if (!ecs->random_signal)
2917 {
2918 /* Catchpoint hit. */
2919 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2920 return 0;
2921 }
2922 }
2923
2924 /* If no catchpoint triggered for this, then keep going. */
2925 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2926 keep_going (ecs);
2927 return 1;
2928 }
2929
2930 /* Given an execution control state that has been freshly filled in
2931 by an event from the inferior, figure out what it means and take
2932 appropriate action. */
2933
2934 static void
2935 handle_inferior_event (struct execution_control_state *ecs)
2936 {
2937 struct frame_info *frame;
2938 struct gdbarch *gdbarch;
2939 int sw_single_step_trap_p = 0;
2940 int stopped_by_watchpoint;
2941 int stepped_after_stopped_by_watchpoint = 0;
2942 struct symtab_and_line stop_pc_sal;
2943 enum stop_kind stop_soon;
2944
2945 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2946 {
2947 /* We had an event in the inferior, but we are not interested in
2948 handling it at this level. The lower layers have already
2949 done what needs to be done, if anything.
2950
2951 One of the possible circumstances for this is when the
2952 inferior produces output for the console. The inferior has
2953 not stopped, and we are ignoring the event. Another possible
2954 circumstance is any event which the lower level knows will be
2955 reported multiple times without an intervening resume. */
2956 if (debug_infrun)
2957 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2958 prepare_to_wait (ecs);
2959 return;
2960 }
2961
2962 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2963 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2964 {
2965 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2966
2967 gdb_assert (inf);
2968 stop_soon = inf->stop_soon;
2969 }
2970 else
2971 stop_soon = NO_STOP_QUIETLY;
2972
2973 /* Cache the last pid/waitstatus. */
2974 target_last_wait_ptid = ecs->ptid;
2975 target_last_waitstatus = ecs->ws;
2976
2977 /* Always clear state belonging to the previous time we stopped. */
2978 stop_stack_dummy = STOP_NONE;
2979
2980 /* If it's a new process, add it to the thread database */
2981
2982 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2983 && !ptid_equal (ecs->ptid, minus_one_ptid)
2984 && !in_thread_list (ecs->ptid));
2985
2986 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2987 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2988 add_thread (ecs->ptid);
2989
2990 ecs->event_thread = find_thread_ptid (ecs->ptid);
2991
2992 /* Dependent on valid ECS->EVENT_THREAD. */
2993 adjust_pc_after_break (ecs);
2994
2995 /* Dependent on the current PC value modified by adjust_pc_after_break. */
2996 reinit_frame_cache ();
2997
2998 breakpoint_retire_moribund ();
2999
3000 /* First, distinguish signals caused by the debugger from signals
3001 that have to do with the program's own actions. Note that
3002 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3003 on the operating system version. Here we detect when a SIGILL or
3004 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3005 something similar for SIGSEGV, since a SIGSEGV will be generated
3006 when we're trying to execute a breakpoint instruction on a
3007 non-executable stack. This happens for call dummy breakpoints
3008 for architectures like SPARC that place call dummies on the
3009 stack. */
3010 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3011 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3012 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3013 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3014 {
3015 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3016
3017 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3018 regcache_read_pc (regcache)))
3019 {
3020 if (debug_infrun)
3021 fprintf_unfiltered (gdb_stdlog,
3022 "infrun: Treating signal as SIGTRAP\n");
3023 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3024 }
3025 }
3026
3027 /* Mark the non-executing threads accordingly. In all-stop, all
3028 threads of all processes are stopped when we get any event
3029 reported. In non-stop mode, only the event thread stops. If
3030 we're handling a process exit in non-stop mode, there's nothing
3031 to do, as threads of the dead process are gone, and threads of
3032 any other process were left running. */
3033 if (!non_stop)
3034 set_executing (minus_one_ptid, 0);
3035 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3036 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3037 set_executing (inferior_ptid, 0);
3038
3039 switch (infwait_state)
3040 {
3041 case infwait_thread_hop_state:
3042 if (debug_infrun)
3043 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3044 break;
3045
3046 case infwait_normal_state:
3047 if (debug_infrun)
3048 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3049 break;
3050
3051 case infwait_step_watch_state:
3052 if (debug_infrun)
3053 fprintf_unfiltered (gdb_stdlog,
3054 "infrun: infwait_step_watch_state\n");
3055
3056 stepped_after_stopped_by_watchpoint = 1;
3057 break;
3058
3059 case infwait_nonstep_watch_state:
3060 if (debug_infrun)
3061 fprintf_unfiltered (gdb_stdlog,
3062 "infrun: infwait_nonstep_watch_state\n");
3063 insert_breakpoints ();
3064
3065 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3066 handle things like signals arriving and other things happening
3067 in combination correctly? */
3068 stepped_after_stopped_by_watchpoint = 1;
3069 break;
3070
3071 default:
3072 internal_error (__FILE__, __LINE__, _("bad switch"));
3073 }
3074
3075 infwait_state = infwait_normal_state;
3076 waiton_ptid = pid_to_ptid (-1);
3077
3078 switch (ecs->ws.kind)
3079 {
3080 case TARGET_WAITKIND_LOADED:
3081 if (debug_infrun)
3082 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3083 /* Ignore gracefully during startup of the inferior, as it might
3084 be the shell which has just loaded some objects, otherwise
3085 add the symbols for the newly loaded objects. Also ignore at
3086 the beginning of an attach or remote session; we will query
3087 the full list of libraries once the connection is
3088 established. */
3089 if (stop_soon == NO_STOP_QUIETLY)
3090 {
3091 /* Check for any newly added shared libraries if we're
3092 supposed to be adding them automatically. Switch
3093 terminal for any messages produced by
3094 breakpoint_re_set. */
3095 target_terminal_ours_for_output ();
3096 /* NOTE: cagney/2003-11-25: Make certain that the target
3097 stack's section table is kept up-to-date. Architectures,
3098 (e.g., PPC64), use the section table to perform
3099 operations such as address => section name and hence
3100 require the table to contain all sections (including
3101 those found in shared libraries). */
3102 #ifdef SOLIB_ADD
3103 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3104 #else
3105 solib_add (NULL, 0, &current_target, auto_solib_add);
3106 #endif
3107 target_terminal_inferior ();
3108
3109 /* If requested, stop when the dynamic linker notifies
3110 gdb of events. This allows the user to get control
3111 and place breakpoints in initializer routines for
3112 dynamically loaded objects (among other things). */
3113 if (stop_on_solib_events)
3114 {
3115 /* Make sure we print "Stopped due to solib-event" in
3116 normal_stop. */
3117 stop_print_frame = 1;
3118
3119 stop_stepping (ecs);
3120 return;
3121 }
3122
3123 /* NOTE drow/2007-05-11: This might be a good place to check
3124 for "catch load". */
3125 }
3126
3127 /* If we are skipping through a shell, or through shared library
3128 loading that we aren't interested in, resume the program. If
3129 we're running the program normally, also resume. But stop if
3130 we're attaching or setting up a remote connection. */
3131 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3132 {
3133 /* Loading of shared libraries might have changed breakpoint
3134 addresses. Make sure new breakpoints are inserted. */
3135 if (stop_soon == NO_STOP_QUIETLY
3136 && !breakpoints_always_inserted_mode ())
3137 insert_breakpoints ();
3138 resume (0, TARGET_SIGNAL_0);
3139 prepare_to_wait (ecs);
3140 return;
3141 }
3142
3143 break;
3144
3145 case TARGET_WAITKIND_SPURIOUS:
3146 if (debug_infrun)
3147 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3148 resume (0, TARGET_SIGNAL_0);
3149 prepare_to_wait (ecs);
3150 return;
3151
3152 case TARGET_WAITKIND_EXITED:
3153 if (debug_infrun)
3154 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3155 inferior_ptid = ecs->ptid;
3156 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3157 set_current_program_space (current_inferior ()->pspace);
3158 handle_vfork_child_exec_or_exit (0);
3159 target_terminal_ours (); /* Must do this before mourn anyway */
3160 print_stop_reason (EXITED, ecs->ws.value.integer);
3161
3162 /* Record the exit code in the convenience variable $_exitcode, so
3163 that the user can inspect this again later. */
3164 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3165 (LONGEST) ecs->ws.value.integer);
3166 gdb_flush (gdb_stdout);
3167 target_mourn_inferior ();
3168 singlestep_breakpoints_inserted_p = 0;
3169 cancel_single_step_breakpoints ();
3170 stop_print_frame = 0;
3171 stop_stepping (ecs);
3172 return;
3173
3174 case TARGET_WAITKIND_SIGNALLED:
3175 if (debug_infrun)
3176 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3177 inferior_ptid = ecs->ptid;
3178 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3179 set_current_program_space (current_inferior ()->pspace);
3180 handle_vfork_child_exec_or_exit (0);
3181 stop_print_frame = 0;
3182 target_terminal_ours (); /* Must do this before mourn anyway */
3183
3184 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3185 reach here unless the inferior is dead. However, for years
3186 target_kill() was called here, which hints that fatal signals aren't
3187 really fatal on some systems. If that's true, then some changes
3188 may be needed. */
3189 target_mourn_inferior ();
3190
3191 print_stop_reason (SIGNAL_EXITED, ecs->ws.value.sig);
3192 singlestep_breakpoints_inserted_p = 0;
3193 cancel_single_step_breakpoints ();
3194 stop_stepping (ecs);
3195 return;
3196
3197 /* The following are the only cases in which we keep going;
3198 the above cases end in a continue or goto. */
3199 case TARGET_WAITKIND_FORKED:
3200 case TARGET_WAITKIND_VFORKED:
3201 if (debug_infrun)
3202 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3203
3204 if (!ptid_equal (ecs->ptid, inferior_ptid))
3205 {
3206 context_switch (ecs->ptid);
3207 reinit_frame_cache ();
3208 }
3209
3210 /* Immediately detach breakpoints from the child before there's
3211 any chance of letting the user delete breakpoints from the
3212 breakpoint lists. If we don't do this early, it's easy to
3213 leave left over traps in the child, vis: "break foo; catch
3214 fork; c; <fork>; del; c; <child calls foo>". We only follow
3215 the fork on the last `continue', and by that time the
3216 breakpoint at "foo" is long gone from the breakpoint table.
3217 If we vforked, then we don't need to unpatch here, since both
3218 parent and child are sharing the same memory pages; we'll
3219 need to unpatch at follow/detach time instead to be certain
3220 that new breakpoints added between catchpoint hit time and
3221 vfork follow are detached. */
3222 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3223 {
3224 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3225
3226 /* This won't actually modify the breakpoint list, but will
3227 physically remove the breakpoints from the child. */
3228 detach_breakpoints (child_pid);
3229 }
3230
3231 if (singlestep_breakpoints_inserted_p)
3232 {
3233 /* Pull the single step breakpoints out of the target. */
3234 remove_single_step_breakpoints ();
3235 singlestep_breakpoints_inserted_p = 0;
3236 }
3237
3238 /* In case the event is caught by a catchpoint, remember that
3239 the event is to be followed at the next resume of the thread,
3240 and not immediately. */
3241 ecs->event_thread->pending_follow = ecs->ws;
3242
3243 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3244
3245 ecs->event_thread->stop_bpstat
3246 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3247 stop_pc, ecs->ptid);
3248
3249 /* Note that we're interested in knowing the bpstat actually
3250 causes a stop, not just if it may explain the signal.
3251 Software watchpoints, for example, always appear in the
3252 bpstat. */
3253 ecs->random_signal = !bpstat_causes_stop (ecs->event_thread->stop_bpstat);
3254
3255 /* If no catchpoint triggered for this, then keep going. */
3256 if (ecs->random_signal)
3257 {
3258 ptid_t parent;
3259 ptid_t child;
3260 int should_resume;
3261 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
3262
3263 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3264
3265 should_resume = follow_fork ();
3266
3267 parent = ecs->ptid;
3268 child = ecs->ws.value.related_pid;
3269
3270 /* In non-stop mode, also resume the other branch. */
3271 if (non_stop && !detach_fork)
3272 {
3273 if (follow_child)
3274 switch_to_thread (parent);
3275 else
3276 switch_to_thread (child);
3277
3278 ecs->event_thread = inferior_thread ();
3279 ecs->ptid = inferior_ptid;
3280 keep_going (ecs);
3281 }
3282
3283 if (follow_child)
3284 switch_to_thread (child);
3285 else
3286 switch_to_thread (parent);
3287
3288 ecs->event_thread = inferior_thread ();
3289 ecs->ptid = inferior_ptid;
3290
3291 if (should_resume)
3292 keep_going (ecs);
3293 else
3294 stop_stepping (ecs);
3295 return;
3296 }
3297 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3298 goto process_event_stop_test;
3299
3300 case TARGET_WAITKIND_VFORK_DONE:
3301 /* Done with the shared memory region. Re-insert breakpoints in
3302 the parent, and keep going. */
3303
3304 if (debug_infrun)
3305 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3306
3307 if (!ptid_equal (ecs->ptid, inferior_ptid))
3308 context_switch (ecs->ptid);
3309
3310 current_inferior ()->waiting_for_vfork_done = 0;
3311 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3312 /* This also takes care of reinserting breakpoints in the
3313 previously locked inferior. */
3314 keep_going (ecs);
3315 return;
3316
3317 case TARGET_WAITKIND_EXECD:
3318 if (debug_infrun)
3319 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3320
3321 if (!ptid_equal (ecs->ptid, inferior_ptid))
3322 {
3323 context_switch (ecs->ptid);
3324 reinit_frame_cache ();
3325 }
3326
3327 singlestep_breakpoints_inserted_p = 0;
3328 cancel_single_step_breakpoints ();
3329
3330 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3331
3332 /* Do whatever is necessary to the parent branch of the vfork. */
3333 handle_vfork_child_exec_or_exit (1);
3334
3335 /* This causes the eventpoints and symbol table to be reset.
3336 Must do this now, before trying to determine whether to
3337 stop. */
3338 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3339
3340 ecs->event_thread->stop_bpstat
3341 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3342 stop_pc, ecs->ptid);
3343 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3344
3345 /* Note that this may be referenced from inside
3346 bpstat_stop_status above, through inferior_has_execd. */
3347 xfree (ecs->ws.value.execd_pathname);
3348 ecs->ws.value.execd_pathname = NULL;
3349
3350 /* If no catchpoint triggered for this, then keep going. */
3351 if (ecs->random_signal)
3352 {
3353 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3354 keep_going (ecs);
3355 return;
3356 }
3357 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3358 goto process_event_stop_test;
3359
3360 /* Be careful not to try to gather much state about a thread
3361 that's in a syscall. It's frequently a losing proposition. */
3362 case TARGET_WAITKIND_SYSCALL_ENTRY:
3363 if (debug_infrun)
3364 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3365 /* Getting the current syscall number */
3366 if (handle_syscall_event (ecs) != 0)
3367 return;
3368 goto process_event_stop_test;
3369
3370 /* Before examining the threads further, step this thread to
3371 get it entirely out of the syscall. (We get notice of the
3372 event when the thread is just on the verge of exiting a
3373 syscall. Stepping one instruction seems to get it back
3374 into user code.) */
3375 case TARGET_WAITKIND_SYSCALL_RETURN:
3376 if (debug_infrun)
3377 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3378 if (handle_syscall_event (ecs) != 0)
3379 return;
3380 goto process_event_stop_test;
3381
3382 case TARGET_WAITKIND_STOPPED:
3383 if (debug_infrun)
3384 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3385 ecs->event_thread->stop_signal = ecs->ws.value.sig;
3386 break;
3387
3388 case TARGET_WAITKIND_NO_HISTORY:
3389 /* Reverse execution: target ran out of history info. */
3390 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3391 print_stop_reason (NO_HISTORY, 0);
3392 stop_stepping (ecs);
3393 return;
3394 }
3395
3396 if (ecs->new_thread_event)
3397 {
3398 if (non_stop)
3399 /* Non-stop assumes that the target handles adding new threads
3400 to the thread list. */
3401 internal_error (__FILE__, __LINE__, "\
3402 targets should add new threads to the thread list themselves in non-stop mode.");
3403
3404 /* We may want to consider not doing a resume here in order to
3405 give the user a chance to play with the new thread. It might
3406 be good to make that a user-settable option. */
3407
3408 /* At this point, all threads are stopped (happens automatically
3409 in either the OS or the native code). Therefore we need to
3410 continue all threads in order to make progress. */
3411
3412 if (!ptid_equal (ecs->ptid, inferior_ptid))
3413 context_switch (ecs->ptid);
3414 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3415 prepare_to_wait (ecs);
3416 return;
3417 }
3418
3419 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3420 {
3421 /* Do we need to clean up the state of a thread that has
3422 completed a displaced single-step? (Doing so usually affects
3423 the PC, so do it here, before we set stop_pc.) */
3424 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
3425
3426 /* If we either finished a single-step or hit a breakpoint, but
3427 the user wanted this thread to be stopped, pretend we got a
3428 SIG0 (generic unsignaled stop). */
3429
3430 if (ecs->event_thread->stop_requested
3431 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3432 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3433 }
3434
3435 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3436
3437 if (debug_infrun)
3438 {
3439 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3440 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3441 struct cleanup *old_chain = save_inferior_ptid ();
3442
3443 inferior_ptid = ecs->ptid;
3444
3445 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3446 paddress (gdbarch, stop_pc));
3447 if (target_stopped_by_watchpoint ())
3448 {
3449 CORE_ADDR addr;
3450
3451 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3452
3453 if (target_stopped_data_address (&current_target, &addr))
3454 fprintf_unfiltered (gdb_stdlog,
3455 "infrun: stopped data address = %s\n",
3456 paddress (gdbarch, addr));
3457 else
3458 fprintf_unfiltered (gdb_stdlog,
3459 "infrun: (no data address available)\n");
3460 }
3461
3462 do_cleanups (old_chain);
3463 }
3464
3465 if (stepping_past_singlestep_breakpoint)
3466 {
3467 gdb_assert (singlestep_breakpoints_inserted_p);
3468 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3469 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3470
3471 stepping_past_singlestep_breakpoint = 0;
3472
3473 /* We've either finished single-stepping past the single-step
3474 breakpoint, or stopped for some other reason. It would be nice if
3475 we could tell, but we can't reliably. */
3476 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3477 {
3478 if (debug_infrun)
3479 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
3480 /* Pull the single step breakpoints out of the target. */
3481 remove_single_step_breakpoints ();
3482 singlestep_breakpoints_inserted_p = 0;
3483
3484 ecs->random_signal = 0;
3485 ecs->event_thread->trap_expected = 0;
3486
3487 context_switch (saved_singlestep_ptid);
3488 if (deprecated_context_hook)
3489 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3490
3491 resume (1, TARGET_SIGNAL_0);
3492 prepare_to_wait (ecs);
3493 return;
3494 }
3495 }
3496
3497 if (!ptid_equal (deferred_step_ptid, null_ptid))
3498 {
3499 /* In non-stop mode, there's never a deferred_step_ptid set. */
3500 gdb_assert (!non_stop);
3501
3502 /* If we stopped for some other reason than single-stepping, ignore
3503 the fact that we were supposed to switch back. */
3504 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3505 {
3506 if (debug_infrun)
3507 fprintf_unfiltered (gdb_stdlog,
3508 "infrun: handling deferred step\n");
3509
3510 /* Pull the single step breakpoints out of the target. */
3511 if (singlestep_breakpoints_inserted_p)
3512 {
3513 remove_single_step_breakpoints ();
3514 singlestep_breakpoints_inserted_p = 0;
3515 }
3516
3517 /* Note: We do not call context_switch at this point, as the
3518 context is already set up for stepping the original thread. */
3519 switch_to_thread (deferred_step_ptid);
3520 deferred_step_ptid = null_ptid;
3521 /* Suppress spurious "Switching to ..." message. */
3522 previous_inferior_ptid = inferior_ptid;
3523
3524 resume (1, TARGET_SIGNAL_0);
3525 prepare_to_wait (ecs);
3526 return;
3527 }
3528
3529 deferred_step_ptid = null_ptid;
3530 }
3531
3532 /* See if a thread hit a thread-specific breakpoint that was meant for
3533 another thread. If so, then step that thread past the breakpoint,
3534 and continue it. */
3535
3536 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3537 {
3538 int thread_hop_needed = 0;
3539 struct address_space *aspace =
3540 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3541
3542 /* Check if a regular breakpoint has been hit before checking
3543 for a potential single step breakpoint. Otherwise, GDB will
3544 not see this breakpoint hit when stepping onto breakpoints. */
3545 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3546 {
3547 ecs->random_signal = 0;
3548 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3549 thread_hop_needed = 1;
3550 }
3551 else if (singlestep_breakpoints_inserted_p)
3552 {
3553 /* We have not context switched yet, so this should be true
3554 no matter which thread hit the singlestep breakpoint. */
3555 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3556 if (debug_infrun)
3557 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3558 "trap for %s\n",
3559 target_pid_to_str (ecs->ptid));
3560
3561 ecs->random_signal = 0;
3562 /* The call to in_thread_list is necessary because PTIDs sometimes
3563 change when we go from single-threaded to multi-threaded. If
3564 the singlestep_ptid is still in the list, assume that it is
3565 really different from ecs->ptid. */
3566 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3567 && in_thread_list (singlestep_ptid))
3568 {
3569 /* If the PC of the thread we were trying to single-step
3570 has changed, discard this event (which we were going
3571 to ignore anyway), and pretend we saw that thread
3572 trap. This prevents us continuously moving the
3573 single-step breakpoint forward, one instruction at a
3574 time. If the PC has changed, then the thread we were
3575 trying to single-step has trapped or been signalled,
3576 but the event has not been reported to GDB yet.
3577
3578 There might be some cases where this loses signal
3579 information, if a signal has arrived at exactly the
3580 same time that the PC changed, but this is the best
3581 we can do with the information available. Perhaps we
3582 should arrange to report all events for all threads
3583 when they stop, or to re-poll the remote looking for
3584 this particular thread (i.e. temporarily enable
3585 schedlock). */
3586
3587 CORE_ADDR new_singlestep_pc
3588 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3589
3590 if (new_singlestep_pc != singlestep_pc)
3591 {
3592 enum target_signal stop_signal;
3593
3594 if (debug_infrun)
3595 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3596 " but expected thread advanced also\n");
3597
3598 /* The current context still belongs to
3599 singlestep_ptid. Don't swap here, since that's
3600 the context we want to use. Just fudge our
3601 state and continue. */
3602 stop_signal = ecs->event_thread->stop_signal;
3603 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3604 ecs->ptid = singlestep_ptid;
3605 ecs->event_thread = find_thread_ptid (ecs->ptid);
3606 ecs->event_thread->stop_signal = stop_signal;
3607 stop_pc = new_singlestep_pc;
3608 }
3609 else
3610 {
3611 if (debug_infrun)
3612 fprintf_unfiltered (gdb_stdlog,
3613 "infrun: unexpected thread\n");
3614
3615 thread_hop_needed = 1;
3616 stepping_past_singlestep_breakpoint = 1;
3617 saved_singlestep_ptid = singlestep_ptid;
3618 }
3619 }
3620 }
3621
3622 if (thread_hop_needed)
3623 {
3624 struct regcache *thread_regcache;
3625 int remove_status = 0;
3626
3627 if (debug_infrun)
3628 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3629
3630 /* Switch context before touching inferior memory, the
3631 previous thread may have exited. */
3632 if (!ptid_equal (inferior_ptid, ecs->ptid))
3633 context_switch (ecs->ptid);
3634
3635 /* Saw a breakpoint, but it was hit by the wrong thread.
3636 Just continue. */
3637
3638 if (singlestep_breakpoints_inserted_p)
3639 {
3640 /* Pull the single step breakpoints out of the target. */
3641 remove_single_step_breakpoints ();
3642 singlestep_breakpoints_inserted_p = 0;
3643 }
3644
3645 /* If the arch can displace step, don't remove the
3646 breakpoints. */
3647 thread_regcache = get_thread_regcache (ecs->ptid);
3648 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3649 remove_status = remove_breakpoints ();
3650
3651 /* Did we fail to remove breakpoints? If so, try
3652 to set the PC past the bp. (There's at least
3653 one situation in which we can fail to remove
3654 the bp's: On HP-UX's that use ttrace, we can't
3655 change the address space of a vforking child
3656 process until the child exits (well, okay, not
3657 then either :-) or execs. */
3658 if (remove_status != 0)
3659 error (_("Cannot step over breakpoint hit in wrong thread"));
3660 else
3661 { /* Single step */
3662 if (!non_stop)
3663 {
3664 /* Only need to require the next event from this
3665 thread in all-stop mode. */
3666 waiton_ptid = ecs->ptid;
3667 infwait_state = infwait_thread_hop_state;
3668 }
3669
3670 ecs->event_thread->stepping_over_breakpoint = 1;
3671 keep_going (ecs);
3672 return;
3673 }
3674 }
3675 else if (singlestep_breakpoints_inserted_p)
3676 {
3677 sw_single_step_trap_p = 1;
3678 ecs->random_signal = 0;
3679 }
3680 }
3681 else
3682 ecs->random_signal = 1;
3683
3684 /* See if something interesting happened to the non-current thread. If
3685 so, then switch to that thread. */
3686 if (!ptid_equal (ecs->ptid, inferior_ptid))
3687 {
3688 if (debug_infrun)
3689 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3690
3691 context_switch (ecs->ptid);
3692
3693 if (deprecated_context_hook)
3694 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3695 }
3696
3697 /* At this point, get hold of the now-current thread's frame. */
3698 frame = get_current_frame ();
3699 gdbarch = get_frame_arch (frame);
3700
3701 if (singlestep_breakpoints_inserted_p)
3702 {
3703 /* Pull the single step breakpoints out of the target. */
3704 remove_single_step_breakpoints ();
3705 singlestep_breakpoints_inserted_p = 0;
3706 }
3707
3708 if (stepped_after_stopped_by_watchpoint)
3709 stopped_by_watchpoint = 0;
3710 else
3711 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3712
3713 /* If necessary, step over this watchpoint. We'll be back to display
3714 it in a moment. */
3715 if (stopped_by_watchpoint
3716 && (target_have_steppable_watchpoint
3717 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3718 {
3719 /* At this point, we are stopped at an instruction which has
3720 attempted to write to a piece of memory under control of
3721 a watchpoint. The instruction hasn't actually executed
3722 yet. If we were to evaluate the watchpoint expression
3723 now, we would get the old value, and therefore no change
3724 would seem to have occurred.
3725
3726 In order to make watchpoints work `right', we really need
3727 to complete the memory write, and then evaluate the
3728 watchpoint expression. We do this by single-stepping the
3729 target.
3730
3731 It may not be necessary to disable the watchpoint to stop over
3732 it. For example, the PA can (with some kernel cooperation)
3733 single step over a watchpoint without disabling the watchpoint.
3734
3735 It is far more common to need to disable a watchpoint to step
3736 the inferior over it. If we have non-steppable watchpoints,
3737 we must disable the current watchpoint; it's simplest to
3738 disable all watchpoints and breakpoints. */
3739 int hw_step = 1;
3740
3741 if (!target_have_steppable_watchpoint)
3742 remove_breakpoints ();
3743 /* Single step */
3744 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3745 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3746 waiton_ptid = ecs->ptid;
3747 if (target_have_steppable_watchpoint)
3748 infwait_state = infwait_step_watch_state;
3749 else
3750 infwait_state = infwait_nonstep_watch_state;
3751 prepare_to_wait (ecs);
3752 return;
3753 }
3754
3755 ecs->stop_func_start = 0;
3756 ecs->stop_func_end = 0;
3757 ecs->stop_func_name = 0;
3758 /* Don't care about return value; stop_func_start and stop_func_name
3759 will both be 0 if it doesn't work. */
3760 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3761 &ecs->stop_func_start, &ecs->stop_func_end);
3762 ecs->stop_func_start
3763 += gdbarch_deprecated_function_start_offset (gdbarch);
3764 ecs->event_thread->stepping_over_breakpoint = 0;
3765 bpstat_clear (&ecs->event_thread->stop_bpstat);
3766 ecs->event_thread->stop_step = 0;
3767 stop_print_frame = 1;
3768 ecs->random_signal = 0;
3769 stopped_by_random_signal = 0;
3770
3771 /* Hide inlined functions starting here, unless we just performed stepi or
3772 nexti. After stepi and nexti, always show the innermost frame (not any
3773 inline function call sites). */
3774 if (ecs->event_thread->step_range_end != 1)
3775 skip_inline_frames (ecs->ptid);
3776
3777 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3778 && ecs->event_thread->trap_expected
3779 && gdbarch_single_step_through_delay_p (gdbarch)
3780 && currently_stepping (ecs->event_thread))
3781 {
3782 /* We're trying to step off a breakpoint. Turns out that we're
3783 also on an instruction that needs to be stepped multiple
3784 times before it's been fully executing. E.g., architectures
3785 with a delay slot. It needs to be stepped twice, once for
3786 the instruction and once for the delay slot. */
3787 int step_through_delay
3788 = gdbarch_single_step_through_delay (gdbarch, frame);
3789
3790 if (debug_infrun && step_through_delay)
3791 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3792 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3793 {
3794 /* The user issued a continue when stopped at a breakpoint.
3795 Set up for another trap and get out of here. */
3796 ecs->event_thread->stepping_over_breakpoint = 1;
3797 keep_going (ecs);
3798 return;
3799 }
3800 else if (step_through_delay)
3801 {
3802 /* The user issued a step when stopped at a breakpoint.
3803 Maybe we should stop, maybe we should not - the delay
3804 slot *might* correspond to a line of source. In any
3805 case, don't decide that here, just set
3806 ecs->stepping_over_breakpoint, making sure we
3807 single-step again before breakpoints are re-inserted. */
3808 ecs->event_thread->stepping_over_breakpoint = 1;
3809 }
3810 }
3811
3812 /* Look at the cause of the stop, and decide what to do.
3813 The alternatives are:
3814 1) stop_stepping and return; to really stop and return to the debugger,
3815 2) keep_going and return to start up again
3816 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3817 3) set ecs->random_signal to 1, and the decision between 1 and 2
3818 will be made according to the signal handling tables. */
3819
3820 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3821 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3822 || stop_soon == STOP_QUIETLY_REMOTE)
3823 {
3824 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3825 {
3826 if (debug_infrun)
3827 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3828 stop_print_frame = 0;
3829 stop_stepping (ecs);
3830 return;
3831 }
3832
3833 /* This is originated from start_remote(), start_inferior() and
3834 shared libraries hook functions. */
3835 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3836 {
3837 if (debug_infrun)
3838 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3839 stop_stepping (ecs);
3840 return;
3841 }
3842
3843 /* This originates from attach_command(). We need to overwrite
3844 the stop_signal here, because some kernels don't ignore a
3845 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3846 See more comments in inferior.h. On the other hand, if we
3847 get a non-SIGSTOP, report it to the user - assume the backend
3848 will handle the SIGSTOP if it should show up later.
3849
3850 Also consider that the attach is complete when we see a
3851 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3852 target extended-remote report it instead of a SIGSTOP
3853 (e.g. gdbserver). We already rely on SIGTRAP being our
3854 signal, so this is no exception.
3855
3856 Also consider that the attach is complete when we see a
3857 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3858 the target to stop all threads of the inferior, in case the
3859 low level attach operation doesn't stop them implicitly. If
3860 they weren't stopped implicitly, then the stub will report a
3861 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3862 other than GDB's request. */
3863 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3864 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3865 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3866 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3867 {
3868 stop_stepping (ecs);
3869 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3870 return;
3871 }
3872
3873 /* See if there is a breakpoint at the current PC. */
3874 ecs->event_thread->stop_bpstat
3875 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3876 stop_pc, ecs->ptid);
3877
3878 /* Following in case break condition called a
3879 function. */
3880 stop_print_frame = 1;
3881
3882 /* This is where we handle "moribund" watchpoints. Unlike
3883 software breakpoints traps, hardware watchpoint traps are
3884 always distinguishable from random traps. If no high-level
3885 watchpoint is associated with the reported stop data address
3886 anymore, then the bpstat does not explain the signal ---
3887 simply make sure to ignore it if `stopped_by_watchpoint' is
3888 set. */
3889
3890 if (debug_infrun
3891 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3892 && !bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3893 && stopped_by_watchpoint)
3894 fprintf_unfiltered (gdb_stdlog, "\
3895 infrun: no user watchpoint explains watchpoint SIGTRAP, ignoring\n");
3896
3897 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3898 at one stage in the past included checks for an inferior
3899 function call's call dummy's return breakpoint. The original
3900 comment, that went with the test, read:
3901
3902 ``End of a stack dummy. Some systems (e.g. Sony news) give
3903 another signal besides SIGTRAP, so check here as well as
3904 above.''
3905
3906 If someone ever tries to get call dummys on a
3907 non-executable stack to work (where the target would stop
3908 with something like a SIGSEGV), then those tests might need
3909 to be re-instated. Given, however, that the tests were only
3910 enabled when momentary breakpoints were not being used, I
3911 suspect that it won't be the case.
3912
3913 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3914 be necessary for call dummies on a non-executable stack on
3915 SPARC. */
3916
3917 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3918 ecs->random_signal
3919 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3920 || stopped_by_watchpoint
3921 || ecs->event_thread->trap_expected
3922 || (ecs->event_thread->step_range_end
3923 && ecs->event_thread->step_resume_breakpoint == NULL));
3924 else
3925 {
3926 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3927 if (!ecs->random_signal)
3928 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3929 }
3930 }
3931
3932 /* When we reach this point, we've pretty much decided
3933 that the reason for stopping must've been a random
3934 (unexpected) signal. */
3935
3936 else
3937 ecs->random_signal = 1;
3938
3939 process_event_stop_test:
3940
3941 /* Re-fetch current thread's frame in case we did a
3942 "goto process_event_stop_test" above. */
3943 frame = get_current_frame ();
3944 gdbarch = get_frame_arch (frame);
3945
3946 /* For the program's own signals, act according to
3947 the signal handling tables. */
3948
3949 if (ecs->random_signal)
3950 {
3951 /* Signal not for debugging purposes. */
3952 int printed = 0;
3953 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3954
3955 if (debug_infrun)
3956 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3957 ecs->event_thread->stop_signal);
3958
3959 stopped_by_random_signal = 1;
3960
3961 if (signal_print[ecs->event_thread->stop_signal])
3962 {
3963 printed = 1;
3964 target_terminal_ours_for_output ();
3965 print_stop_reason (SIGNAL_RECEIVED, ecs->event_thread->stop_signal);
3966 }
3967 /* Always stop on signals if we're either just gaining control
3968 of the program, or the user explicitly requested this thread
3969 to remain stopped. */
3970 if (stop_soon != NO_STOP_QUIETLY
3971 || ecs->event_thread->stop_requested
3972 || (!inf->detaching
3973 && signal_stop_state (ecs->event_thread->stop_signal)))
3974 {
3975 stop_stepping (ecs);
3976 return;
3977 }
3978 /* If not going to stop, give terminal back
3979 if we took it away. */
3980 else if (printed)
3981 target_terminal_inferior ();
3982
3983 /* Clear the signal if it should not be passed. */
3984 if (signal_program[ecs->event_thread->stop_signal] == 0)
3985 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3986
3987 if (ecs->event_thread->prev_pc == stop_pc
3988 && ecs->event_thread->trap_expected
3989 && ecs->event_thread->step_resume_breakpoint == NULL)
3990 {
3991 /* We were just starting a new sequence, attempting to
3992 single-step off of a breakpoint and expecting a SIGTRAP.
3993 Instead this signal arrives. This signal will take us out
3994 of the stepping range so GDB needs to remember to, when
3995 the signal handler returns, resume stepping off that
3996 breakpoint. */
3997 /* To simplify things, "continue" is forced to use the same
3998 code paths as single-step - set a breakpoint at the
3999 signal return address and then, once hit, step off that
4000 breakpoint. */
4001 if (debug_infrun)
4002 fprintf_unfiltered (gdb_stdlog,
4003 "infrun: signal arrived while stepping over "
4004 "breakpoint\n");
4005
4006 insert_step_resume_breakpoint_at_frame (frame);
4007 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4008 keep_going (ecs);
4009 return;
4010 }
4011
4012 if (ecs->event_thread->step_range_end != 0
4013 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
4014 && (ecs->event_thread->step_range_start <= stop_pc
4015 && stop_pc < ecs->event_thread->step_range_end)
4016 && frame_id_eq (get_stack_frame_id (frame),
4017 ecs->event_thread->step_stack_frame_id)
4018 && ecs->event_thread->step_resume_breakpoint == NULL)
4019 {
4020 /* The inferior is about to take a signal that will take it
4021 out of the single step range. Set a breakpoint at the
4022 current PC (which is presumably where the signal handler
4023 will eventually return) and then allow the inferior to
4024 run free.
4025
4026 Note that this is only needed for a signal delivered
4027 while in the single-step range. Nested signals aren't a
4028 problem as they eventually all return. */
4029 if (debug_infrun)
4030 fprintf_unfiltered (gdb_stdlog,
4031 "infrun: signal may take us out of "
4032 "single-step range\n");
4033
4034 insert_step_resume_breakpoint_at_frame (frame);
4035 keep_going (ecs);
4036 return;
4037 }
4038
4039 /* Note: step_resume_breakpoint may be non-NULL. This occures
4040 when either there's a nested signal, or when there's a
4041 pending signal enabled just as the signal handler returns
4042 (leaving the inferior at the step-resume-breakpoint without
4043 actually executing it). Either way continue until the
4044 breakpoint is really hit. */
4045 keep_going (ecs);
4046 return;
4047 }
4048
4049 /* Handle cases caused by hitting a breakpoint. */
4050 {
4051 CORE_ADDR jmp_buf_pc;
4052 struct bpstat_what what;
4053
4054 what = bpstat_what (ecs->event_thread->stop_bpstat);
4055
4056 if (what.call_dummy)
4057 {
4058 stop_stack_dummy = what.call_dummy;
4059 }
4060
4061 /* If we hit an internal event that triggers symbol changes, the
4062 current frame will be invalidated within bpstat_what (e.g., if
4063 we hit an internal solib event). Re-fetch it. */
4064 frame = get_current_frame ();
4065 gdbarch = get_frame_arch (frame);
4066
4067 switch (what.main_action)
4068 {
4069 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4070 /* If we hit the breakpoint at longjmp while stepping, we
4071 install a momentary breakpoint at the target of the
4072 jmp_buf. */
4073
4074 if (debug_infrun)
4075 fprintf_unfiltered (gdb_stdlog,
4076 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4077
4078 ecs->event_thread->stepping_over_breakpoint = 1;
4079
4080 if (!gdbarch_get_longjmp_target_p (gdbarch)
4081 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
4082 {
4083 if (debug_infrun)
4084 fprintf_unfiltered (gdb_stdlog, "\
4085 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
4086 keep_going (ecs);
4087 return;
4088 }
4089
4090 /* We're going to replace the current step-resume breakpoint
4091 with a longjmp-resume breakpoint. */
4092 delete_step_resume_breakpoint (ecs->event_thread);
4093
4094 /* Insert a breakpoint at resume address. */
4095 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4096
4097 keep_going (ecs);
4098 return;
4099
4100 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4101 if (debug_infrun)
4102 fprintf_unfiltered (gdb_stdlog,
4103 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4104
4105 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
4106 delete_step_resume_breakpoint (ecs->event_thread);
4107
4108 ecs->event_thread->stop_step = 1;
4109 print_stop_reason (END_STEPPING_RANGE, 0);
4110 stop_stepping (ecs);
4111 return;
4112
4113 case BPSTAT_WHAT_SINGLE:
4114 if (debug_infrun)
4115 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4116 ecs->event_thread->stepping_over_breakpoint = 1;
4117 /* Still need to check other stuff, at least the case
4118 where we are stepping and step out of the right range. */
4119 break;
4120
4121 case BPSTAT_WHAT_STOP_NOISY:
4122 if (debug_infrun)
4123 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4124 stop_print_frame = 1;
4125
4126 /* We are about to nuke the step_resume_breakpointt via the
4127 cleanup chain, so no need to worry about it here. */
4128
4129 stop_stepping (ecs);
4130 return;
4131
4132 case BPSTAT_WHAT_STOP_SILENT:
4133 if (debug_infrun)
4134 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4135 stop_print_frame = 0;
4136
4137 /* We are about to nuke the step_resume_breakpoin via the
4138 cleanup chain, so no need to worry about it here. */
4139
4140 stop_stepping (ecs);
4141 return;
4142
4143 case BPSTAT_WHAT_STEP_RESUME:
4144 if (debug_infrun)
4145 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4146
4147 delete_step_resume_breakpoint (ecs->event_thread);
4148 if (ecs->event_thread->step_after_step_resume_breakpoint)
4149 {
4150 /* Back when the step-resume breakpoint was inserted, we
4151 were trying to single-step off a breakpoint. Go back
4152 to doing that. */
4153 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4154 ecs->event_thread->stepping_over_breakpoint = 1;
4155 keep_going (ecs);
4156 return;
4157 }
4158 if (stop_pc == ecs->stop_func_start
4159 && execution_direction == EXEC_REVERSE)
4160 {
4161 /* We are stepping over a function call in reverse, and
4162 just hit the step-resume breakpoint at the start
4163 address of the function. Go back to single-stepping,
4164 which should take us back to the function call. */
4165 ecs->event_thread->stepping_over_breakpoint = 1;
4166 keep_going (ecs);
4167 return;
4168 }
4169 break;
4170
4171 case BPSTAT_WHAT_KEEP_CHECKING:
4172 break;
4173 }
4174 }
4175
4176 /* We come here if we hit a breakpoint but should not
4177 stop for it. Possibly we also were stepping
4178 and should stop for that. So fall through and
4179 test for stepping. But, if not stepping,
4180 do not stop. */
4181
4182 /* In all-stop mode, if we're currently stepping but have stopped in
4183 some other thread, we need to switch back to the stepped thread. */
4184 if (!non_stop)
4185 {
4186 struct thread_info *tp;
4187
4188 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4189 ecs->event_thread);
4190 if (tp)
4191 {
4192 /* However, if the current thread is blocked on some internal
4193 breakpoint, and we simply need to step over that breakpoint
4194 to get it going again, do that first. */
4195 if ((ecs->event_thread->trap_expected
4196 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4197 || ecs->event_thread->stepping_over_breakpoint)
4198 {
4199 keep_going (ecs);
4200 return;
4201 }
4202
4203 /* If the stepping thread exited, then don't try to switch
4204 back and resume it, which could fail in several different
4205 ways depending on the target. Instead, just keep going.
4206
4207 We can find a stepping dead thread in the thread list in
4208 two cases:
4209
4210 - The target supports thread exit events, and when the
4211 target tries to delete the thread from the thread list,
4212 inferior_ptid pointed at the exiting thread. In such
4213 case, calling delete_thread does not really remove the
4214 thread from the list; instead, the thread is left listed,
4215 with 'exited' state.
4216
4217 - The target's debug interface does not support thread
4218 exit events, and so we have no idea whatsoever if the
4219 previously stepping thread is still alive. For that
4220 reason, we need to synchronously query the target
4221 now. */
4222 if (is_exited (tp->ptid)
4223 || !target_thread_alive (tp->ptid))
4224 {
4225 if (debug_infrun)
4226 fprintf_unfiltered (gdb_stdlog, "\
4227 infrun: not switching back to stepped thread, it has vanished\n");
4228
4229 delete_thread (tp->ptid);
4230 keep_going (ecs);
4231 return;
4232 }
4233
4234 /* Otherwise, we no longer expect a trap in the current thread.
4235 Clear the trap_expected flag before switching back -- this is
4236 what keep_going would do as well, if we called it. */
4237 ecs->event_thread->trap_expected = 0;
4238
4239 if (debug_infrun)
4240 fprintf_unfiltered (gdb_stdlog,
4241 "infrun: switching back to stepped thread\n");
4242
4243 ecs->event_thread = tp;
4244 ecs->ptid = tp->ptid;
4245 context_switch (ecs->ptid);
4246 keep_going (ecs);
4247 return;
4248 }
4249 }
4250
4251 /* Are we stepping to get the inferior out of the dynamic linker's
4252 hook (and possibly the dld itself) after catching a shlib
4253 event? */
4254 if (ecs->event_thread->stepping_through_solib_after_catch)
4255 {
4256 #if defined(SOLIB_ADD)
4257 /* Have we reached our destination? If not, keep going. */
4258 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4259 {
4260 if (debug_infrun)
4261 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
4262 ecs->event_thread->stepping_over_breakpoint = 1;
4263 keep_going (ecs);
4264 return;
4265 }
4266 #endif
4267 if (debug_infrun)
4268 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4269 /* Else, stop and report the catchpoint(s) whose triggering
4270 caused us to begin stepping. */
4271 ecs->event_thread->stepping_through_solib_after_catch = 0;
4272 bpstat_clear (&ecs->event_thread->stop_bpstat);
4273 ecs->event_thread->stop_bpstat
4274 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4275 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4276 stop_print_frame = 1;
4277 stop_stepping (ecs);
4278 return;
4279 }
4280
4281 if (ecs->event_thread->step_resume_breakpoint)
4282 {
4283 if (debug_infrun)
4284 fprintf_unfiltered (gdb_stdlog,
4285 "infrun: step-resume breakpoint is inserted\n");
4286
4287 /* Having a step-resume breakpoint overrides anything
4288 else having to do with stepping commands until
4289 that breakpoint is reached. */
4290 keep_going (ecs);
4291 return;
4292 }
4293
4294 if (ecs->event_thread->step_range_end == 0)
4295 {
4296 if (debug_infrun)
4297 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4298 /* Likewise if we aren't even stepping. */
4299 keep_going (ecs);
4300 return;
4301 }
4302
4303 /* Re-fetch current thread's frame in case the code above caused
4304 the frame cache to be re-initialized, making our FRAME variable
4305 a dangling pointer. */
4306 frame = get_current_frame ();
4307 gdbarch = get_frame_arch (frame);
4308
4309 /* If stepping through a line, keep going if still within it.
4310
4311 Note that step_range_end is the address of the first instruction
4312 beyond the step range, and NOT the address of the last instruction
4313 within it!
4314
4315 Note also that during reverse execution, we may be stepping
4316 through a function epilogue and therefore must detect when
4317 the current-frame changes in the middle of a line. */
4318
4319 if (stop_pc >= ecs->event_thread->step_range_start
4320 && stop_pc < ecs->event_thread->step_range_end
4321 && (execution_direction != EXEC_REVERSE
4322 || frame_id_eq (get_frame_id (frame),
4323 ecs->event_thread->step_frame_id)))
4324 {
4325 if (debug_infrun)
4326 fprintf_unfiltered
4327 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4328 paddress (gdbarch, ecs->event_thread->step_range_start),
4329 paddress (gdbarch, ecs->event_thread->step_range_end));
4330
4331 /* When stepping backward, stop at beginning of line range
4332 (unless it's the function entry point, in which case
4333 keep going back to the call point). */
4334 if (stop_pc == ecs->event_thread->step_range_start
4335 && stop_pc != ecs->stop_func_start
4336 && execution_direction == EXEC_REVERSE)
4337 {
4338 ecs->event_thread->stop_step = 1;
4339 print_stop_reason (END_STEPPING_RANGE, 0);
4340 stop_stepping (ecs);
4341 }
4342 else
4343 keep_going (ecs);
4344
4345 return;
4346 }
4347
4348 /* We stepped out of the stepping range. */
4349
4350 /* If we are stepping at the source level and entered the runtime
4351 loader dynamic symbol resolution code...
4352
4353 EXEC_FORWARD: we keep on single stepping until we exit the run
4354 time loader code and reach the callee's address.
4355
4356 EXEC_REVERSE: we've already executed the callee (backward), and
4357 the runtime loader code is handled just like any other
4358 undebuggable function call. Now we need only keep stepping
4359 backward through the trampoline code, and that's handled further
4360 down, so there is nothing for us to do here. */
4361
4362 if (execution_direction != EXEC_REVERSE
4363 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4364 && in_solib_dynsym_resolve_code (stop_pc))
4365 {
4366 CORE_ADDR pc_after_resolver =
4367 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4368
4369 if (debug_infrun)
4370 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
4371
4372 if (pc_after_resolver)
4373 {
4374 /* Set up a step-resume breakpoint at the address
4375 indicated by SKIP_SOLIB_RESOLVER. */
4376 struct symtab_and_line sr_sal;
4377
4378 init_sal (&sr_sal);
4379 sr_sal.pc = pc_after_resolver;
4380 sr_sal.pspace = get_frame_program_space (frame);
4381
4382 insert_step_resume_breakpoint_at_sal (gdbarch,
4383 sr_sal, null_frame_id);
4384 }
4385
4386 keep_going (ecs);
4387 return;
4388 }
4389
4390 if (ecs->event_thread->step_range_end != 1
4391 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4392 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4393 && get_frame_type (frame) == SIGTRAMP_FRAME)
4394 {
4395 if (debug_infrun)
4396 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
4397 /* The inferior, while doing a "step" or "next", has ended up in
4398 a signal trampoline (either by a signal being delivered or by
4399 the signal handler returning). Just single-step until the
4400 inferior leaves the trampoline (either by calling the handler
4401 or returning). */
4402 keep_going (ecs);
4403 return;
4404 }
4405
4406 /* Check for subroutine calls. The check for the current frame
4407 equalling the step ID is not necessary - the check of the
4408 previous frame's ID is sufficient - but it is a common case and
4409 cheaper than checking the previous frame's ID.
4410
4411 NOTE: frame_id_eq will never report two invalid frame IDs as
4412 being equal, so to get into this block, both the current and
4413 previous frame must have valid frame IDs. */
4414 /* The outer_frame_id check is a heuristic to detect stepping
4415 through startup code. If we step over an instruction which
4416 sets the stack pointer from an invalid value to a valid value,
4417 we may detect that as a subroutine call from the mythical
4418 "outermost" function. This could be fixed by marking
4419 outermost frames as !stack_p,code_p,special_p. Then the
4420 initial outermost frame, before sp was valid, would
4421 have code_addr == &_start. See the comment in frame_id_eq
4422 for more. */
4423 if (!frame_id_eq (get_stack_frame_id (frame),
4424 ecs->event_thread->step_stack_frame_id)
4425 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4426 ecs->event_thread->step_stack_frame_id)
4427 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
4428 outer_frame_id)
4429 || step_start_function != find_pc_function (stop_pc))))
4430 {
4431 CORE_ADDR real_stop_pc;
4432
4433 if (debug_infrun)
4434 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4435
4436 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
4437 || ((ecs->event_thread->step_range_end == 1)
4438 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4439 ecs->stop_func_start)))
4440 {
4441 /* I presume that step_over_calls is only 0 when we're
4442 supposed to be stepping at the assembly language level
4443 ("stepi"). Just stop. */
4444 /* Also, maybe we just did a "nexti" inside a prolog, so we
4445 thought it was a subroutine call but it was not. Stop as
4446 well. FENN */
4447 /* And this works the same backward as frontward. MVS */
4448 ecs->event_thread->stop_step = 1;
4449 print_stop_reason (END_STEPPING_RANGE, 0);
4450 stop_stepping (ecs);
4451 return;
4452 }
4453
4454 /* Reverse stepping through solib trampolines. */
4455
4456 if (execution_direction == EXEC_REVERSE
4457 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
4458 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4459 || (ecs->stop_func_start == 0
4460 && in_solib_dynsym_resolve_code (stop_pc))))
4461 {
4462 /* Any solib trampoline code can be handled in reverse
4463 by simply continuing to single-step. We have already
4464 executed the solib function (backwards), and a few
4465 steps will take us back through the trampoline to the
4466 caller. */
4467 keep_going (ecs);
4468 return;
4469 }
4470
4471 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4472 {
4473 /* We're doing a "next".
4474
4475 Normal (forward) execution: set a breakpoint at the
4476 callee's return address (the address at which the caller
4477 will resume).
4478
4479 Reverse (backward) execution. set the step-resume
4480 breakpoint at the start of the function that we just
4481 stepped into (backwards), and continue to there. When we
4482 get there, we'll need to single-step back to the caller. */
4483
4484 if (execution_direction == EXEC_REVERSE)
4485 {
4486 struct symtab_and_line sr_sal;
4487
4488 /* Normal function call return (static or dynamic). */
4489 init_sal (&sr_sal);
4490 sr_sal.pc = ecs->stop_func_start;
4491 sr_sal.pspace = get_frame_program_space (frame);
4492 insert_step_resume_breakpoint_at_sal (gdbarch,
4493 sr_sal, null_frame_id);
4494 }
4495 else
4496 insert_step_resume_breakpoint_at_caller (frame);
4497
4498 keep_going (ecs);
4499 return;
4500 }
4501
4502 /* If we are in a function call trampoline (a stub between the
4503 calling routine and the real function), locate the real
4504 function. That's what tells us (a) whether we want to step
4505 into it at all, and (b) what prologue we want to run to the
4506 end of, if we do step into it. */
4507 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4508 if (real_stop_pc == 0)
4509 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4510 if (real_stop_pc != 0)
4511 ecs->stop_func_start = real_stop_pc;
4512
4513 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4514 {
4515 struct symtab_and_line sr_sal;
4516
4517 init_sal (&sr_sal);
4518 sr_sal.pc = ecs->stop_func_start;
4519 sr_sal.pspace = get_frame_program_space (frame);
4520
4521 insert_step_resume_breakpoint_at_sal (gdbarch,
4522 sr_sal, null_frame_id);
4523 keep_going (ecs);
4524 return;
4525 }
4526
4527 /* If we have line number information for the function we are
4528 thinking of stepping into, step into it.
4529
4530 If there are several symtabs at that PC (e.g. with include
4531 files), just want to know whether *any* of them have line
4532 numbers. find_pc_line handles this. */
4533 {
4534 struct symtab_and_line tmp_sal;
4535
4536 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4537 tmp_sal.pspace = get_frame_program_space (frame);
4538 if (tmp_sal.line != 0)
4539 {
4540 if (execution_direction == EXEC_REVERSE)
4541 handle_step_into_function_backward (gdbarch, ecs);
4542 else
4543 handle_step_into_function (gdbarch, ecs);
4544 return;
4545 }
4546 }
4547
4548 /* If we have no line number and the step-stop-if-no-debug is
4549 set, we stop the step so that the user has a chance to switch
4550 in assembly mode. */
4551 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4552 && step_stop_if_no_debug)
4553 {
4554 ecs->event_thread->stop_step = 1;
4555 print_stop_reason (END_STEPPING_RANGE, 0);
4556 stop_stepping (ecs);
4557 return;
4558 }
4559
4560 if (execution_direction == EXEC_REVERSE)
4561 {
4562 /* Set a breakpoint at callee's start address.
4563 From there we can step once and be back in the caller. */
4564 struct symtab_and_line sr_sal;
4565
4566 init_sal (&sr_sal);
4567 sr_sal.pc = ecs->stop_func_start;
4568 sr_sal.pspace = get_frame_program_space (frame);
4569 insert_step_resume_breakpoint_at_sal (gdbarch,
4570 sr_sal, null_frame_id);
4571 }
4572 else
4573 /* Set a breakpoint at callee's return address (the address
4574 at which the caller will resume). */
4575 insert_step_resume_breakpoint_at_caller (frame);
4576
4577 keep_going (ecs);
4578 return;
4579 }
4580
4581 /* Reverse stepping through solib trampolines. */
4582
4583 if (execution_direction == EXEC_REVERSE
4584 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4585 {
4586 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4587 || (ecs->stop_func_start == 0
4588 && in_solib_dynsym_resolve_code (stop_pc)))
4589 {
4590 /* Any solib trampoline code can be handled in reverse
4591 by simply continuing to single-step. We have already
4592 executed the solib function (backwards), and a few
4593 steps will take us back through the trampoline to the
4594 caller. */
4595 keep_going (ecs);
4596 return;
4597 }
4598 else if (in_solib_dynsym_resolve_code (stop_pc))
4599 {
4600 /* Stepped backward into the solib dynsym resolver.
4601 Set a breakpoint at its start and continue, then
4602 one more step will take us out. */
4603 struct symtab_and_line sr_sal;
4604
4605 init_sal (&sr_sal);
4606 sr_sal.pc = ecs->stop_func_start;
4607 sr_sal.pspace = get_frame_program_space (frame);
4608 insert_step_resume_breakpoint_at_sal (gdbarch,
4609 sr_sal, null_frame_id);
4610 keep_going (ecs);
4611 return;
4612 }
4613 }
4614
4615 /* If we're in the return path from a shared library trampoline,
4616 we want to proceed through the trampoline when stepping. */
4617 if (gdbarch_in_solib_return_trampoline (gdbarch,
4618 stop_pc, ecs->stop_func_name))
4619 {
4620 /* Determine where this trampoline returns. */
4621 CORE_ADDR real_stop_pc;
4622
4623 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4624
4625 if (debug_infrun)
4626 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4627
4628 /* Only proceed through if we know where it's going. */
4629 if (real_stop_pc)
4630 {
4631 /* And put the step-breakpoint there and go until there. */
4632 struct symtab_and_line sr_sal;
4633
4634 init_sal (&sr_sal); /* initialize to zeroes */
4635 sr_sal.pc = real_stop_pc;
4636 sr_sal.section = find_pc_overlay (sr_sal.pc);
4637 sr_sal.pspace = get_frame_program_space (frame);
4638
4639 /* Do not specify what the fp should be when we stop since
4640 on some machines the prologue is where the new fp value
4641 is established. */
4642 insert_step_resume_breakpoint_at_sal (gdbarch,
4643 sr_sal, null_frame_id);
4644
4645 /* Restart without fiddling with the step ranges or
4646 other state. */
4647 keep_going (ecs);
4648 return;
4649 }
4650 }
4651
4652 stop_pc_sal = find_pc_line (stop_pc, 0);
4653
4654 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4655 the trampoline processing logic, however, there are some trampolines
4656 that have no names, so we should do trampoline handling first. */
4657 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4658 && ecs->stop_func_name == NULL
4659 && stop_pc_sal.line == 0)
4660 {
4661 if (debug_infrun)
4662 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4663
4664 /* The inferior just stepped into, or returned to, an
4665 undebuggable function (where there is no debugging information
4666 and no line number corresponding to the address where the
4667 inferior stopped). Since we want to skip this kind of code,
4668 we keep going until the inferior returns from this
4669 function - unless the user has asked us not to (via
4670 set step-mode) or we no longer know how to get back
4671 to the call site. */
4672 if (step_stop_if_no_debug
4673 || !frame_id_p (frame_unwind_caller_id (frame)))
4674 {
4675 /* If we have no line number and the step-stop-if-no-debug
4676 is set, we stop the step so that the user has a chance to
4677 switch in assembly mode. */
4678 ecs->event_thread->stop_step = 1;
4679 print_stop_reason (END_STEPPING_RANGE, 0);
4680 stop_stepping (ecs);
4681 return;
4682 }
4683 else
4684 {
4685 /* Set a breakpoint at callee's return address (the address
4686 at which the caller will resume). */
4687 insert_step_resume_breakpoint_at_caller (frame);
4688 keep_going (ecs);
4689 return;
4690 }
4691 }
4692
4693 if (ecs->event_thread->step_range_end == 1)
4694 {
4695 /* It is stepi or nexti. We always want to stop stepping after
4696 one instruction. */
4697 if (debug_infrun)
4698 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4699 ecs->event_thread->stop_step = 1;
4700 print_stop_reason (END_STEPPING_RANGE, 0);
4701 stop_stepping (ecs);
4702 return;
4703 }
4704
4705 if (stop_pc_sal.line == 0)
4706 {
4707 /* We have no line number information. That means to stop
4708 stepping (does this always happen right after one instruction,
4709 when we do "s" in a function with no line numbers,
4710 or can this happen as a result of a return or longjmp?). */
4711 if (debug_infrun)
4712 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4713 ecs->event_thread->stop_step = 1;
4714 print_stop_reason (END_STEPPING_RANGE, 0);
4715 stop_stepping (ecs);
4716 return;
4717 }
4718
4719 /* Look for "calls" to inlined functions, part one. If the inline
4720 frame machinery detected some skipped call sites, we have entered
4721 a new inline function. */
4722
4723 if (frame_id_eq (get_frame_id (get_current_frame ()),
4724 ecs->event_thread->step_frame_id)
4725 && inline_skipped_frames (ecs->ptid))
4726 {
4727 struct symtab_and_line call_sal;
4728
4729 if (debug_infrun)
4730 fprintf_unfiltered (gdb_stdlog,
4731 "infrun: stepped into inlined function\n");
4732
4733 find_frame_sal (get_current_frame (), &call_sal);
4734
4735 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4736 {
4737 /* For "step", we're going to stop. But if the call site
4738 for this inlined function is on the same source line as
4739 we were previously stepping, go down into the function
4740 first. Otherwise stop at the call site. */
4741
4742 if (call_sal.line == ecs->event_thread->current_line
4743 && call_sal.symtab == ecs->event_thread->current_symtab)
4744 step_into_inline_frame (ecs->ptid);
4745
4746 ecs->event_thread->stop_step = 1;
4747 print_stop_reason (END_STEPPING_RANGE, 0);
4748 stop_stepping (ecs);
4749 return;
4750 }
4751 else
4752 {
4753 /* For "next", we should stop at the call site if it is on a
4754 different source line. Otherwise continue through the
4755 inlined function. */
4756 if (call_sal.line == ecs->event_thread->current_line
4757 && call_sal.symtab == ecs->event_thread->current_symtab)
4758 keep_going (ecs);
4759 else
4760 {
4761 ecs->event_thread->stop_step = 1;
4762 print_stop_reason (END_STEPPING_RANGE, 0);
4763 stop_stepping (ecs);
4764 }
4765 return;
4766 }
4767 }
4768
4769 /* Look for "calls" to inlined functions, part two. If we are still
4770 in the same real function we were stepping through, but we have
4771 to go further up to find the exact frame ID, we are stepping
4772 through a more inlined call beyond its call site. */
4773
4774 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4775 && !frame_id_eq (get_frame_id (get_current_frame ()),
4776 ecs->event_thread->step_frame_id)
4777 && stepped_in_from (get_current_frame (),
4778 ecs->event_thread->step_frame_id))
4779 {
4780 if (debug_infrun)
4781 fprintf_unfiltered (gdb_stdlog,
4782 "infrun: stepping through inlined function\n");
4783
4784 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4785 keep_going (ecs);
4786 else
4787 {
4788 ecs->event_thread->stop_step = 1;
4789 print_stop_reason (END_STEPPING_RANGE, 0);
4790 stop_stepping (ecs);
4791 }
4792 return;
4793 }
4794
4795 if ((stop_pc == stop_pc_sal.pc)
4796 && (ecs->event_thread->current_line != stop_pc_sal.line
4797 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4798 {
4799 /* We are at the start of a different line. So stop. Note that
4800 we don't stop if we step into the middle of a different line.
4801 That is said to make things like for (;;) statements work
4802 better. */
4803 if (debug_infrun)
4804 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4805 ecs->event_thread->stop_step = 1;
4806 print_stop_reason (END_STEPPING_RANGE, 0);
4807 stop_stepping (ecs);
4808 return;
4809 }
4810
4811 /* We aren't done stepping.
4812
4813 Optimize by setting the stepping range to the line.
4814 (We might not be in the original line, but if we entered a
4815 new line in mid-statement, we continue stepping. This makes
4816 things like for(;;) statements work better.) */
4817
4818 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4819 ecs->event_thread->step_range_end = stop_pc_sal.end;
4820 set_step_info (frame, stop_pc_sal);
4821
4822 if (debug_infrun)
4823 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4824 keep_going (ecs);
4825 }
4826
4827 /* Is thread TP in the middle of single-stepping? */
4828
4829 static int
4830 currently_stepping (struct thread_info *tp)
4831 {
4832 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4833 || tp->trap_expected
4834 || tp->stepping_through_solib_after_catch
4835 || bpstat_should_step ());
4836 }
4837
4838 /* Returns true if any thread *but* the one passed in "data" is in the
4839 middle of stepping or of handling a "next". */
4840
4841 static int
4842 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4843 {
4844 if (tp == data)
4845 return 0;
4846
4847 return (tp->step_range_end
4848 || tp->trap_expected
4849 || tp->stepping_through_solib_after_catch);
4850 }
4851
4852 /* Inferior has stepped into a subroutine call with source code that
4853 we should not step over. Do step to the first line of code in
4854 it. */
4855
4856 static void
4857 handle_step_into_function (struct gdbarch *gdbarch,
4858 struct execution_control_state *ecs)
4859 {
4860 struct symtab *s;
4861 struct symtab_and_line stop_func_sal, sr_sal;
4862
4863 s = find_pc_symtab (stop_pc);
4864 if (s && s->language != language_asm)
4865 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4866 ecs->stop_func_start);
4867
4868 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4869 /* Use the step_resume_break to step until the end of the prologue,
4870 even if that involves jumps (as it seems to on the vax under
4871 4.2). */
4872 /* If the prologue ends in the middle of a source line, continue to
4873 the end of that source line (if it is still within the function).
4874 Otherwise, just go to end of prologue. */
4875 if (stop_func_sal.end
4876 && stop_func_sal.pc != ecs->stop_func_start
4877 && stop_func_sal.end < ecs->stop_func_end)
4878 ecs->stop_func_start = stop_func_sal.end;
4879
4880 /* Architectures which require breakpoint adjustment might not be able
4881 to place a breakpoint at the computed address. If so, the test
4882 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4883 ecs->stop_func_start to an address at which a breakpoint may be
4884 legitimately placed.
4885
4886 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4887 made, GDB will enter an infinite loop when stepping through
4888 optimized code consisting of VLIW instructions which contain
4889 subinstructions corresponding to different source lines. On
4890 FR-V, it's not permitted to place a breakpoint on any but the
4891 first subinstruction of a VLIW instruction. When a breakpoint is
4892 set, GDB will adjust the breakpoint address to the beginning of
4893 the VLIW instruction. Thus, we need to make the corresponding
4894 adjustment here when computing the stop address. */
4895
4896 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4897 {
4898 ecs->stop_func_start
4899 = gdbarch_adjust_breakpoint_address (gdbarch,
4900 ecs->stop_func_start);
4901 }
4902
4903 if (ecs->stop_func_start == stop_pc)
4904 {
4905 /* We are already there: stop now. */
4906 ecs->event_thread->stop_step = 1;
4907 print_stop_reason (END_STEPPING_RANGE, 0);
4908 stop_stepping (ecs);
4909 return;
4910 }
4911 else
4912 {
4913 /* Put the step-breakpoint there and go until there. */
4914 init_sal (&sr_sal); /* initialize to zeroes */
4915 sr_sal.pc = ecs->stop_func_start;
4916 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4917 sr_sal.pspace = get_frame_program_space (get_current_frame ());
4918
4919 /* Do not specify what the fp should be when we stop since on
4920 some machines the prologue is where the new fp value is
4921 established. */
4922 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4923
4924 /* And make sure stepping stops right away then. */
4925 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4926 }
4927 keep_going (ecs);
4928 }
4929
4930 /* Inferior has stepped backward into a subroutine call with source
4931 code that we should not step over. Do step to the beginning of the
4932 last line of code in it. */
4933
4934 static void
4935 handle_step_into_function_backward (struct gdbarch *gdbarch,
4936 struct execution_control_state *ecs)
4937 {
4938 struct symtab *s;
4939 struct symtab_and_line stop_func_sal;
4940
4941 s = find_pc_symtab (stop_pc);
4942 if (s && s->language != language_asm)
4943 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4944 ecs->stop_func_start);
4945
4946 stop_func_sal = find_pc_line (stop_pc, 0);
4947
4948 /* OK, we're just going to keep stepping here. */
4949 if (stop_func_sal.pc == stop_pc)
4950 {
4951 /* We're there already. Just stop stepping now. */
4952 ecs->event_thread->stop_step = 1;
4953 print_stop_reason (END_STEPPING_RANGE, 0);
4954 stop_stepping (ecs);
4955 }
4956 else
4957 {
4958 /* Else just reset the step range and keep going.
4959 No step-resume breakpoint, they don't work for
4960 epilogues, which can have multiple entry paths. */
4961 ecs->event_thread->step_range_start = stop_func_sal.pc;
4962 ecs->event_thread->step_range_end = stop_func_sal.end;
4963 keep_going (ecs);
4964 }
4965 return;
4966 }
4967
4968 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4969 This is used to both functions and to skip over code. */
4970
4971 static void
4972 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4973 struct symtab_and_line sr_sal,
4974 struct frame_id sr_id)
4975 {
4976 /* There should never be more than one step-resume or longjmp-resume
4977 breakpoint per thread, so we should never be setting a new
4978 step_resume_breakpoint when one is already active. */
4979 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4980
4981 if (debug_infrun)
4982 fprintf_unfiltered (gdb_stdlog,
4983 "infrun: inserting step-resume breakpoint at %s\n",
4984 paddress (gdbarch, sr_sal.pc));
4985
4986 inferior_thread ()->step_resume_breakpoint
4987 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
4988 }
4989
4990 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
4991 to skip a potential signal handler.
4992
4993 This is called with the interrupted function's frame. The signal
4994 handler, when it returns, will resume the interrupted function at
4995 RETURN_FRAME.pc. */
4996
4997 static void
4998 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
4999 {
5000 struct symtab_and_line sr_sal;
5001 struct gdbarch *gdbarch;
5002
5003 gdb_assert (return_frame != NULL);
5004 init_sal (&sr_sal); /* initialize to zeros */
5005
5006 gdbarch = get_frame_arch (return_frame);
5007 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5008 sr_sal.section = find_pc_overlay (sr_sal.pc);
5009 sr_sal.pspace = get_frame_program_space (return_frame);
5010
5011 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5012 get_stack_frame_id (return_frame));
5013 }
5014
5015 /* Similar to insert_step_resume_breakpoint_at_frame, except
5016 but a breakpoint at the previous frame's PC. This is used to
5017 skip a function after stepping into it (for "next" or if the called
5018 function has no debugging information).
5019
5020 The current function has almost always been reached by single
5021 stepping a call or return instruction. NEXT_FRAME belongs to the
5022 current function, and the breakpoint will be set at the caller's
5023 resume address.
5024
5025 This is a separate function rather than reusing
5026 insert_step_resume_breakpoint_at_frame in order to avoid
5027 get_prev_frame, which may stop prematurely (see the implementation
5028 of frame_unwind_caller_id for an example). */
5029
5030 static void
5031 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5032 {
5033 struct symtab_and_line sr_sal;
5034 struct gdbarch *gdbarch;
5035
5036 /* We shouldn't have gotten here if we don't know where the call site
5037 is. */
5038 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5039
5040 init_sal (&sr_sal); /* initialize to zeros */
5041
5042 gdbarch = frame_unwind_caller_arch (next_frame);
5043 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5044 frame_unwind_caller_pc (next_frame));
5045 sr_sal.section = find_pc_overlay (sr_sal.pc);
5046 sr_sal.pspace = frame_unwind_program_space (next_frame);
5047
5048 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5049 frame_unwind_caller_id (next_frame));
5050 }
5051
5052 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5053 new breakpoint at the target of a jmp_buf. The handling of
5054 longjmp-resume uses the same mechanisms used for handling
5055 "step-resume" breakpoints. */
5056
5057 static void
5058 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5059 {
5060 /* There should never be more than one step-resume or longjmp-resume
5061 breakpoint per thread, so we should never be setting a new
5062 longjmp_resume_breakpoint when one is already active. */
5063 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
5064
5065 if (debug_infrun)
5066 fprintf_unfiltered (gdb_stdlog,
5067 "infrun: inserting longjmp-resume breakpoint at %s\n",
5068 paddress (gdbarch, pc));
5069
5070 inferior_thread ()->step_resume_breakpoint =
5071 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5072 }
5073
5074 static void
5075 stop_stepping (struct execution_control_state *ecs)
5076 {
5077 if (debug_infrun)
5078 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5079
5080 /* Let callers know we don't want to wait for the inferior anymore. */
5081 ecs->wait_some_more = 0;
5082 }
5083
5084 /* This function handles various cases where we need to continue
5085 waiting for the inferior. */
5086 /* (Used to be the keep_going: label in the old wait_for_inferior) */
5087
5088 static void
5089 keep_going (struct execution_control_state *ecs)
5090 {
5091 /* Make sure normal_stop is called if we get a QUIT handled before
5092 reaching resume. */
5093 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5094
5095 /* Save the pc before execution, to compare with pc after stop. */
5096 ecs->event_thread->prev_pc
5097 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5098
5099 /* If we did not do break;, it means we should keep running the
5100 inferior and not return to debugger. */
5101
5102 if (ecs->event_thread->trap_expected
5103 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
5104 {
5105 /* We took a signal (which we are supposed to pass through to
5106 the inferior, else we'd not get here) and we haven't yet
5107 gotten our trap. Simply continue. */
5108
5109 discard_cleanups (old_cleanups);
5110 resume (currently_stepping (ecs->event_thread),
5111 ecs->event_thread->stop_signal);
5112 }
5113 else
5114 {
5115 /* Either the trap was not expected, but we are continuing
5116 anyway (the user asked that this signal be passed to the
5117 child)
5118 -- or --
5119 The signal was SIGTRAP, e.g. it was our signal, but we
5120 decided we should resume from it.
5121
5122 We're going to run this baby now!
5123
5124 Note that insert_breakpoints won't try to re-insert
5125 already inserted breakpoints. Therefore, we don't
5126 care if breakpoints were already inserted, or not. */
5127
5128 if (ecs->event_thread->stepping_over_breakpoint)
5129 {
5130 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5131
5132 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5133 /* Since we can't do a displaced step, we have to remove
5134 the breakpoint while we step it. To keep things
5135 simple, we remove them all. */
5136 remove_breakpoints ();
5137 }
5138 else
5139 {
5140 struct gdb_exception e;
5141
5142 /* Stop stepping when inserting breakpoints
5143 has failed. */
5144 TRY_CATCH (e, RETURN_MASK_ERROR)
5145 {
5146 insert_breakpoints ();
5147 }
5148 if (e.reason < 0)
5149 {
5150 exception_print (gdb_stderr, e);
5151 stop_stepping (ecs);
5152 return;
5153 }
5154 }
5155
5156 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
5157
5158 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5159 specifies that such a signal should be delivered to the
5160 target program).
5161
5162 Typically, this would occure when a user is debugging a
5163 target monitor on a simulator: the target monitor sets a
5164 breakpoint; the simulator encounters this break-point and
5165 halts the simulation handing control to GDB; GDB, noteing
5166 that the break-point isn't valid, returns control back to the
5167 simulator; the simulator then delivers the hardware
5168 equivalent of a SIGNAL_TRAP to the program being debugged. */
5169
5170 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
5171 && !signal_program[ecs->event_thread->stop_signal])
5172 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
5173
5174 discard_cleanups (old_cleanups);
5175 resume (currently_stepping (ecs->event_thread),
5176 ecs->event_thread->stop_signal);
5177 }
5178
5179 prepare_to_wait (ecs);
5180 }
5181
5182 /* This function normally comes after a resume, before
5183 handle_inferior_event exits. It takes care of any last bits of
5184 housekeeping, and sets the all-important wait_some_more flag. */
5185
5186 static void
5187 prepare_to_wait (struct execution_control_state *ecs)
5188 {
5189 if (debug_infrun)
5190 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5191
5192 /* This is the old end of the while loop. Let everybody know we
5193 want to wait for the inferior some more and get called again
5194 soon. */
5195 ecs->wait_some_more = 1;
5196 }
5197
5198 /* Print why the inferior has stopped. We always print something when
5199 the inferior exits, or receives a signal. The rest of the cases are
5200 dealt with later on in normal_stop() and print_it_typical(). Ideally
5201 there should be a call to this function from handle_inferior_event()
5202 each time stop_stepping() is called.*/
5203 static void
5204 print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info)
5205 {
5206 switch (stop_reason)
5207 {
5208 case END_STEPPING_RANGE:
5209 /* We are done with a step/next/si/ni command. */
5210 /* For now print nothing. */
5211 /* Print a message only if not in the middle of doing a "step n"
5212 operation for n > 1 */
5213 if (!inferior_thread ()->step_multi
5214 || !inferior_thread ()->stop_step)
5215 if (ui_out_is_mi_like_p (uiout))
5216 ui_out_field_string
5217 (uiout, "reason",
5218 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5219 break;
5220 case SIGNAL_EXITED:
5221 /* The inferior was terminated by a signal. */
5222 annotate_signalled ();
5223 if (ui_out_is_mi_like_p (uiout))
5224 ui_out_field_string
5225 (uiout, "reason",
5226 async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5227 ui_out_text (uiout, "\nProgram terminated with signal ");
5228 annotate_signal_name ();
5229 ui_out_field_string (uiout, "signal-name",
5230 target_signal_to_name (stop_info));
5231 annotate_signal_name_end ();
5232 ui_out_text (uiout, ", ");
5233 annotate_signal_string ();
5234 ui_out_field_string (uiout, "signal-meaning",
5235 target_signal_to_string (stop_info));
5236 annotate_signal_string_end ();
5237 ui_out_text (uiout, ".\n");
5238 ui_out_text (uiout, "The program no longer exists.\n");
5239 break;
5240 case EXITED:
5241 /* The inferior program is finished. */
5242 annotate_exited (stop_info);
5243 if (stop_info)
5244 {
5245 if (ui_out_is_mi_like_p (uiout))
5246 ui_out_field_string (uiout, "reason",
5247 async_reason_lookup (EXEC_ASYNC_EXITED));
5248 ui_out_text (uiout, "\nProgram exited with code ");
5249 ui_out_field_fmt (uiout, "exit-code", "0%o",
5250 (unsigned int) stop_info);
5251 ui_out_text (uiout, ".\n");
5252 }
5253 else
5254 {
5255 if (ui_out_is_mi_like_p (uiout))
5256 ui_out_field_string
5257 (uiout, "reason",
5258 async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5259 ui_out_text (uiout, "\nProgram exited normally.\n");
5260 }
5261 /* Support the --return-child-result option. */
5262 return_child_result_value = stop_info;
5263 break;
5264 case SIGNAL_RECEIVED:
5265 /* Signal received. The signal table tells us to print about
5266 it. */
5267 annotate_signal ();
5268
5269 if (stop_info == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5270 {
5271 struct thread_info *t = inferior_thread ();
5272
5273 ui_out_text (uiout, "\n[");
5274 ui_out_field_string (uiout, "thread-name",
5275 target_pid_to_str (t->ptid));
5276 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5277 ui_out_text (uiout, " stopped");
5278 }
5279 else
5280 {
5281 ui_out_text (uiout, "\nProgram received signal ");
5282 annotate_signal_name ();
5283 if (ui_out_is_mi_like_p (uiout))
5284 ui_out_field_string
5285 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5286 ui_out_field_string (uiout, "signal-name",
5287 target_signal_to_name (stop_info));
5288 annotate_signal_name_end ();
5289 ui_out_text (uiout, ", ");
5290 annotate_signal_string ();
5291 ui_out_field_string (uiout, "signal-meaning",
5292 target_signal_to_string (stop_info));
5293 annotate_signal_string_end ();
5294 }
5295 ui_out_text (uiout, ".\n");
5296 break;
5297 case NO_HISTORY:
5298 /* Reverse execution: target ran out of history info. */
5299 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5300 break;
5301 default:
5302 internal_error (__FILE__, __LINE__,
5303 _("print_stop_reason: unrecognized enum value"));
5304 break;
5305 }
5306 }
5307 \f
5308
5309 /* Here to return control to GDB when the inferior stops for real.
5310 Print appropriate messages, remove breakpoints, give terminal our modes.
5311
5312 STOP_PRINT_FRAME nonzero means print the executing frame
5313 (pc, function, args, file, line number and line text).
5314 BREAKPOINTS_FAILED nonzero means stop was due to error
5315 attempting to insert breakpoints. */
5316
5317 void
5318 normal_stop (void)
5319 {
5320 struct target_waitstatus last;
5321 ptid_t last_ptid;
5322 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5323
5324 get_last_target_status (&last_ptid, &last);
5325
5326 /* If an exception is thrown from this point on, make sure to
5327 propagate GDB's knowledge of the executing state to the
5328 frontend/user running state. A QUIT is an easy exception to see
5329 here, so do this before any filtered output. */
5330 if (!non_stop)
5331 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5332 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5333 && last.kind != TARGET_WAITKIND_EXITED)
5334 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5335
5336 /* In non-stop mode, we don't want GDB to switch threads behind the
5337 user's back, to avoid races where the user is typing a command to
5338 apply to thread x, but GDB switches to thread y before the user
5339 finishes entering the command. */
5340
5341 /* As with the notification of thread events, we want to delay
5342 notifying the user that we've switched thread context until
5343 the inferior actually stops.
5344
5345 There's no point in saying anything if the inferior has exited.
5346 Note that SIGNALLED here means "exited with a signal", not
5347 "received a signal". */
5348 if (!non_stop
5349 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5350 && target_has_execution
5351 && last.kind != TARGET_WAITKIND_SIGNALLED
5352 && last.kind != TARGET_WAITKIND_EXITED)
5353 {
5354 target_terminal_ours_for_output ();
5355 printf_filtered (_("[Switching to %s]\n"),
5356 target_pid_to_str (inferior_ptid));
5357 annotate_thread_changed ();
5358 previous_inferior_ptid = inferior_ptid;
5359 }
5360
5361 if (!breakpoints_always_inserted_mode () && target_has_execution)
5362 {
5363 if (remove_breakpoints ())
5364 {
5365 target_terminal_ours_for_output ();
5366 printf_filtered (_("\
5367 Cannot remove breakpoints because program is no longer writable.\n\
5368 Further execution is probably impossible.\n"));
5369 }
5370 }
5371
5372 /* If an auto-display called a function and that got a signal,
5373 delete that auto-display to avoid an infinite recursion. */
5374
5375 if (stopped_by_random_signal)
5376 disable_current_display ();
5377
5378 /* Don't print a message if in the middle of doing a "step n"
5379 operation for n > 1 */
5380 if (target_has_execution
5381 && last.kind != TARGET_WAITKIND_SIGNALLED
5382 && last.kind != TARGET_WAITKIND_EXITED
5383 && inferior_thread ()->step_multi
5384 && inferior_thread ()->stop_step)
5385 goto done;
5386
5387 target_terminal_ours ();
5388
5389 /* Set the current source location. This will also happen if we
5390 display the frame below, but the current SAL will be incorrect
5391 during a user hook-stop function. */
5392 if (has_stack_frames () && !stop_stack_dummy)
5393 set_current_sal_from_frame (get_current_frame (), 1);
5394
5395 /* Let the user/frontend see the threads as stopped. */
5396 do_cleanups (old_chain);
5397
5398 /* Look up the hook_stop and run it (CLI internally handles problem
5399 of stop_command's pre-hook not existing). */
5400 if (stop_command)
5401 catch_errors (hook_stop_stub, stop_command,
5402 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5403
5404 if (!has_stack_frames ())
5405 goto done;
5406
5407 if (last.kind == TARGET_WAITKIND_SIGNALLED
5408 || last.kind == TARGET_WAITKIND_EXITED)
5409 goto done;
5410
5411 /* Select innermost stack frame - i.e., current frame is frame 0,
5412 and current location is based on that.
5413 Don't do this on return from a stack dummy routine,
5414 or if the program has exited. */
5415
5416 if (!stop_stack_dummy)
5417 {
5418 select_frame (get_current_frame ());
5419
5420 /* Print current location without a level number, if
5421 we have changed functions or hit a breakpoint.
5422 Print source line if we have one.
5423 bpstat_print() contains the logic deciding in detail
5424 what to print, based on the event(s) that just occurred. */
5425
5426 /* If --batch-silent is enabled then there's no need to print the current
5427 source location, and to try risks causing an error message about
5428 missing source files. */
5429 if (stop_print_frame && !batch_silent)
5430 {
5431 int bpstat_ret;
5432 int source_flag;
5433 int do_frame_printing = 1;
5434 struct thread_info *tp = inferior_thread ();
5435
5436 bpstat_ret = bpstat_print (tp->stop_bpstat);
5437 switch (bpstat_ret)
5438 {
5439 case PRINT_UNKNOWN:
5440 /* If we had hit a shared library event breakpoint,
5441 bpstat_print would print out this message. If we hit
5442 an OS-level shared library event, do the same
5443 thing. */
5444 if (last.kind == TARGET_WAITKIND_LOADED)
5445 {
5446 printf_filtered (_("Stopped due to shared library event\n"));
5447 source_flag = SRC_LINE; /* something bogus */
5448 do_frame_printing = 0;
5449 break;
5450 }
5451
5452 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5453 (or should) carry around the function and does (or
5454 should) use that when doing a frame comparison. */
5455 if (tp->stop_step
5456 && frame_id_eq (tp->step_frame_id,
5457 get_frame_id (get_current_frame ()))
5458 && step_start_function == find_pc_function (stop_pc))
5459 source_flag = SRC_LINE; /* finished step, just print source line */
5460 else
5461 source_flag = SRC_AND_LOC; /* print location and source line */
5462 break;
5463 case PRINT_SRC_AND_LOC:
5464 source_flag = SRC_AND_LOC; /* print location and source line */
5465 break;
5466 case PRINT_SRC_ONLY:
5467 source_flag = SRC_LINE;
5468 break;
5469 case PRINT_NOTHING:
5470 source_flag = SRC_LINE; /* something bogus */
5471 do_frame_printing = 0;
5472 break;
5473 default:
5474 internal_error (__FILE__, __LINE__, _("Unknown value."));
5475 }
5476
5477 /* The behavior of this routine with respect to the source
5478 flag is:
5479 SRC_LINE: Print only source line
5480 LOCATION: Print only location
5481 SRC_AND_LOC: Print location and source line */
5482 if (do_frame_printing)
5483 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5484
5485 /* Display the auto-display expressions. */
5486 do_displays ();
5487 }
5488 }
5489
5490 /* Save the function value return registers, if we care.
5491 We might be about to restore their previous contents. */
5492 if (inferior_thread ()->proceed_to_finish)
5493 {
5494 /* This should not be necessary. */
5495 if (stop_registers)
5496 regcache_xfree (stop_registers);
5497
5498 /* NB: The copy goes through to the target picking up the value of
5499 all the registers. */
5500 stop_registers = regcache_dup (get_current_regcache ());
5501 }
5502
5503 if (stop_stack_dummy == STOP_STACK_DUMMY)
5504 {
5505 /* Pop the empty frame that contains the stack dummy.
5506 This also restores inferior state prior to the call
5507 (struct inferior_thread_state). */
5508 struct frame_info *frame = get_current_frame ();
5509
5510 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5511 frame_pop (frame);
5512 /* frame_pop() calls reinit_frame_cache as the last thing it does
5513 which means there's currently no selected frame. We don't need
5514 to re-establish a selected frame if the dummy call returns normally,
5515 that will be done by restore_inferior_status. However, we do have
5516 to handle the case where the dummy call is returning after being
5517 stopped (e.g. the dummy call previously hit a breakpoint). We
5518 can't know which case we have so just always re-establish a
5519 selected frame here. */
5520 select_frame (get_current_frame ());
5521 }
5522
5523 done:
5524 annotate_stopped ();
5525
5526 /* Suppress the stop observer if we're in the middle of:
5527
5528 - a step n (n > 1), as there still more steps to be done.
5529
5530 - a "finish" command, as the observer will be called in
5531 finish_command_continuation, so it can include the inferior
5532 function's return value.
5533
5534 - calling an inferior function, as we pretend we inferior didn't
5535 run at all. The return value of the call is handled by the
5536 expression evaluator, through call_function_by_hand. */
5537
5538 if (!target_has_execution
5539 || last.kind == TARGET_WAITKIND_SIGNALLED
5540 || last.kind == TARGET_WAITKIND_EXITED
5541 || (!inferior_thread ()->step_multi
5542 && !(inferior_thread ()->stop_bpstat
5543 && inferior_thread ()->proceed_to_finish)
5544 && !inferior_thread ()->in_infcall))
5545 {
5546 if (!ptid_equal (inferior_ptid, null_ptid))
5547 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
5548 stop_print_frame);
5549 else
5550 observer_notify_normal_stop (NULL, stop_print_frame);
5551 }
5552
5553 if (target_has_execution)
5554 {
5555 if (last.kind != TARGET_WAITKIND_SIGNALLED
5556 && last.kind != TARGET_WAITKIND_EXITED)
5557 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5558 Delete any breakpoint that is to be deleted at the next stop. */
5559 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
5560 }
5561
5562 /* Try to get rid of automatically added inferiors that are no
5563 longer needed. Keeping those around slows down things linearly.
5564 Note that this never removes the current inferior. */
5565 prune_inferiors ();
5566 }
5567
5568 static int
5569 hook_stop_stub (void *cmd)
5570 {
5571 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5572 return (0);
5573 }
5574 \f
5575 int
5576 signal_stop_state (int signo)
5577 {
5578 return signal_stop[signo];
5579 }
5580
5581 int
5582 signal_print_state (int signo)
5583 {
5584 return signal_print[signo];
5585 }
5586
5587 int
5588 signal_pass_state (int signo)
5589 {
5590 return signal_program[signo];
5591 }
5592
5593 int
5594 signal_stop_update (int signo, int state)
5595 {
5596 int ret = signal_stop[signo];
5597
5598 signal_stop[signo] = state;
5599 return ret;
5600 }
5601
5602 int
5603 signal_print_update (int signo, int state)
5604 {
5605 int ret = signal_print[signo];
5606
5607 signal_print[signo] = state;
5608 return ret;
5609 }
5610
5611 int
5612 signal_pass_update (int signo, int state)
5613 {
5614 int ret = signal_program[signo];
5615
5616 signal_program[signo] = state;
5617 return ret;
5618 }
5619
5620 static void
5621 sig_print_header (void)
5622 {
5623 printf_filtered (_("\
5624 Signal Stop\tPrint\tPass to program\tDescription\n"));
5625 }
5626
5627 static void
5628 sig_print_info (enum target_signal oursig)
5629 {
5630 const char *name = target_signal_to_name (oursig);
5631 int name_padding = 13 - strlen (name);
5632
5633 if (name_padding <= 0)
5634 name_padding = 0;
5635
5636 printf_filtered ("%s", name);
5637 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5638 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5639 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5640 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5641 printf_filtered ("%s\n", target_signal_to_string (oursig));
5642 }
5643
5644 /* Specify how various signals in the inferior should be handled. */
5645
5646 static void
5647 handle_command (char *args, int from_tty)
5648 {
5649 char **argv;
5650 int digits, wordlen;
5651 int sigfirst, signum, siglast;
5652 enum target_signal oursig;
5653 int allsigs;
5654 int nsigs;
5655 unsigned char *sigs;
5656 struct cleanup *old_chain;
5657
5658 if (args == NULL)
5659 {
5660 error_no_arg (_("signal to handle"));
5661 }
5662
5663 /* Allocate and zero an array of flags for which signals to handle. */
5664
5665 nsigs = (int) TARGET_SIGNAL_LAST;
5666 sigs = (unsigned char *) alloca (nsigs);
5667 memset (sigs, 0, nsigs);
5668
5669 /* Break the command line up into args. */
5670
5671 argv = gdb_buildargv (args);
5672 old_chain = make_cleanup_freeargv (argv);
5673
5674 /* Walk through the args, looking for signal oursigs, signal names, and
5675 actions. Signal numbers and signal names may be interspersed with
5676 actions, with the actions being performed for all signals cumulatively
5677 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5678
5679 while (*argv != NULL)
5680 {
5681 wordlen = strlen (*argv);
5682 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5683 {;
5684 }
5685 allsigs = 0;
5686 sigfirst = siglast = -1;
5687
5688 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5689 {
5690 /* Apply action to all signals except those used by the
5691 debugger. Silently skip those. */
5692 allsigs = 1;
5693 sigfirst = 0;
5694 siglast = nsigs - 1;
5695 }
5696 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5697 {
5698 SET_SIGS (nsigs, sigs, signal_stop);
5699 SET_SIGS (nsigs, sigs, signal_print);
5700 }
5701 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5702 {
5703 UNSET_SIGS (nsigs, sigs, signal_program);
5704 }
5705 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5706 {
5707 SET_SIGS (nsigs, sigs, signal_print);
5708 }
5709 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5710 {
5711 SET_SIGS (nsigs, sigs, signal_program);
5712 }
5713 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5714 {
5715 UNSET_SIGS (nsigs, sigs, signal_stop);
5716 }
5717 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5718 {
5719 SET_SIGS (nsigs, sigs, signal_program);
5720 }
5721 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5722 {
5723 UNSET_SIGS (nsigs, sigs, signal_print);
5724 UNSET_SIGS (nsigs, sigs, signal_stop);
5725 }
5726 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5727 {
5728 UNSET_SIGS (nsigs, sigs, signal_program);
5729 }
5730 else if (digits > 0)
5731 {
5732 /* It is numeric. The numeric signal refers to our own
5733 internal signal numbering from target.h, not to host/target
5734 signal number. This is a feature; users really should be
5735 using symbolic names anyway, and the common ones like
5736 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5737
5738 sigfirst = siglast = (int)
5739 target_signal_from_command (atoi (*argv));
5740 if ((*argv)[digits] == '-')
5741 {
5742 siglast = (int)
5743 target_signal_from_command (atoi ((*argv) + digits + 1));
5744 }
5745 if (sigfirst > siglast)
5746 {
5747 /* Bet he didn't figure we'd think of this case... */
5748 signum = sigfirst;
5749 sigfirst = siglast;
5750 siglast = signum;
5751 }
5752 }
5753 else
5754 {
5755 oursig = target_signal_from_name (*argv);
5756 if (oursig != TARGET_SIGNAL_UNKNOWN)
5757 {
5758 sigfirst = siglast = (int) oursig;
5759 }
5760 else
5761 {
5762 /* Not a number and not a recognized flag word => complain. */
5763 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5764 }
5765 }
5766
5767 /* If any signal numbers or symbol names were found, set flags for
5768 which signals to apply actions to. */
5769
5770 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5771 {
5772 switch ((enum target_signal) signum)
5773 {
5774 case TARGET_SIGNAL_TRAP:
5775 case TARGET_SIGNAL_INT:
5776 if (!allsigs && !sigs[signum])
5777 {
5778 if (query (_("%s is used by the debugger.\n\
5779 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5780 {
5781 sigs[signum] = 1;
5782 }
5783 else
5784 {
5785 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5786 gdb_flush (gdb_stdout);
5787 }
5788 }
5789 break;
5790 case TARGET_SIGNAL_0:
5791 case TARGET_SIGNAL_DEFAULT:
5792 case TARGET_SIGNAL_UNKNOWN:
5793 /* Make sure that "all" doesn't print these. */
5794 break;
5795 default:
5796 sigs[signum] = 1;
5797 break;
5798 }
5799 }
5800
5801 argv++;
5802 }
5803
5804 for (signum = 0; signum < nsigs; signum++)
5805 if (sigs[signum])
5806 {
5807 target_notice_signals (inferior_ptid);
5808
5809 if (from_tty)
5810 {
5811 /* Show the results. */
5812 sig_print_header ();
5813 for (; signum < nsigs; signum++)
5814 if (sigs[signum])
5815 sig_print_info (signum);
5816 }
5817
5818 break;
5819 }
5820
5821 do_cleanups (old_chain);
5822 }
5823
5824 static void
5825 xdb_handle_command (char *args, int from_tty)
5826 {
5827 char **argv;
5828 struct cleanup *old_chain;
5829
5830 if (args == NULL)
5831 error_no_arg (_("xdb command"));
5832
5833 /* Break the command line up into args. */
5834
5835 argv = gdb_buildargv (args);
5836 old_chain = make_cleanup_freeargv (argv);
5837 if (argv[1] != (char *) NULL)
5838 {
5839 char *argBuf;
5840 int bufLen;
5841
5842 bufLen = strlen (argv[0]) + 20;
5843 argBuf = (char *) xmalloc (bufLen);
5844 if (argBuf)
5845 {
5846 int validFlag = 1;
5847 enum target_signal oursig;
5848
5849 oursig = target_signal_from_name (argv[0]);
5850 memset (argBuf, 0, bufLen);
5851 if (strcmp (argv[1], "Q") == 0)
5852 sprintf (argBuf, "%s %s", argv[0], "noprint");
5853 else
5854 {
5855 if (strcmp (argv[1], "s") == 0)
5856 {
5857 if (!signal_stop[oursig])
5858 sprintf (argBuf, "%s %s", argv[0], "stop");
5859 else
5860 sprintf (argBuf, "%s %s", argv[0], "nostop");
5861 }
5862 else if (strcmp (argv[1], "i") == 0)
5863 {
5864 if (!signal_program[oursig])
5865 sprintf (argBuf, "%s %s", argv[0], "pass");
5866 else
5867 sprintf (argBuf, "%s %s", argv[0], "nopass");
5868 }
5869 else if (strcmp (argv[1], "r") == 0)
5870 {
5871 if (!signal_print[oursig])
5872 sprintf (argBuf, "%s %s", argv[0], "print");
5873 else
5874 sprintf (argBuf, "%s %s", argv[0], "noprint");
5875 }
5876 else
5877 validFlag = 0;
5878 }
5879 if (validFlag)
5880 handle_command (argBuf, from_tty);
5881 else
5882 printf_filtered (_("Invalid signal handling flag.\n"));
5883 if (argBuf)
5884 xfree (argBuf);
5885 }
5886 }
5887 do_cleanups (old_chain);
5888 }
5889
5890 /* Print current contents of the tables set by the handle command.
5891 It is possible we should just be printing signals actually used
5892 by the current target (but for things to work right when switching
5893 targets, all signals should be in the signal tables). */
5894
5895 static void
5896 signals_info (char *signum_exp, int from_tty)
5897 {
5898 enum target_signal oursig;
5899
5900 sig_print_header ();
5901
5902 if (signum_exp)
5903 {
5904 /* First see if this is a symbol name. */
5905 oursig = target_signal_from_name (signum_exp);
5906 if (oursig == TARGET_SIGNAL_UNKNOWN)
5907 {
5908 /* No, try numeric. */
5909 oursig =
5910 target_signal_from_command (parse_and_eval_long (signum_exp));
5911 }
5912 sig_print_info (oursig);
5913 return;
5914 }
5915
5916 printf_filtered ("\n");
5917 /* These ugly casts brought to you by the native VAX compiler. */
5918 for (oursig = TARGET_SIGNAL_FIRST;
5919 (int) oursig < (int) TARGET_SIGNAL_LAST;
5920 oursig = (enum target_signal) ((int) oursig + 1))
5921 {
5922 QUIT;
5923
5924 if (oursig != TARGET_SIGNAL_UNKNOWN
5925 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5926 sig_print_info (oursig);
5927 }
5928
5929 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5930 }
5931
5932 /* The $_siginfo convenience variable is a bit special. We don't know
5933 for sure the type of the value until we actually have a chance to
5934 fetch the data. The type can change depending on gdbarch, so it it
5935 also dependent on which thread you have selected.
5936
5937 1. making $_siginfo be an internalvar that creates a new value on
5938 access.
5939
5940 2. making the value of $_siginfo be an lval_computed value. */
5941
5942 /* This function implements the lval_computed support for reading a
5943 $_siginfo value. */
5944
5945 static void
5946 siginfo_value_read (struct value *v)
5947 {
5948 LONGEST transferred;
5949
5950 transferred =
5951 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5952 NULL,
5953 value_contents_all_raw (v),
5954 value_offset (v),
5955 TYPE_LENGTH (value_type (v)));
5956
5957 if (transferred != TYPE_LENGTH (value_type (v)))
5958 error (_("Unable to read siginfo"));
5959 }
5960
5961 /* This function implements the lval_computed support for writing a
5962 $_siginfo value. */
5963
5964 static void
5965 siginfo_value_write (struct value *v, struct value *fromval)
5966 {
5967 LONGEST transferred;
5968
5969 transferred = target_write (&current_target,
5970 TARGET_OBJECT_SIGNAL_INFO,
5971 NULL,
5972 value_contents_all_raw (fromval),
5973 value_offset (v),
5974 TYPE_LENGTH (value_type (fromval)));
5975
5976 if (transferred != TYPE_LENGTH (value_type (fromval)))
5977 error (_("Unable to write siginfo"));
5978 }
5979
5980 static struct lval_funcs siginfo_value_funcs =
5981 {
5982 siginfo_value_read,
5983 siginfo_value_write
5984 };
5985
5986 /* Return a new value with the correct type for the siginfo object of
5987 the current thread using architecture GDBARCH. Return a void value
5988 if there's no object available. */
5989
5990 static struct value *
5991 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
5992 {
5993 if (target_has_stack
5994 && !ptid_equal (inferior_ptid, null_ptid)
5995 && gdbarch_get_siginfo_type_p (gdbarch))
5996 {
5997 struct type *type = gdbarch_get_siginfo_type (gdbarch);
5998
5999 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6000 }
6001
6002 return allocate_value (builtin_type (gdbarch)->builtin_void);
6003 }
6004
6005 \f
6006 /* Inferior thread state.
6007 These are details related to the inferior itself, and don't include
6008 things like what frame the user had selected or what gdb was doing
6009 with the target at the time.
6010 For inferior function calls these are things we want to restore
6011 regardless of whether the function call successfully completes
6012 or the dummy frame has to be manually popped. */
6013
6014 struct inferior_thread_state
6015 {
6016 enum target_signal stop_signal;
6017 CORE_ADDR stop_pc;
6018 struct regcache *registers;
6019 };
6020
6021 struct inferior_thread_state *
6022 save_inferior_thread_state (void)
6023 {
6024 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
6025 struct thread_info *tp = inferior_thread ();
6026
6027 inf_state->stop_signal = tp->stop_signal;
6028 inf_state->stop_pc = stop_pc;
6029
6030 inf_state->registers = regcache_dup (get_current_regcache ());
6031
6032 return inf_state;
6033 }
6034
6035 /* Restore inferior session state to INF_STATE. */
6036
6037 void
6038 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6039 {
6040 struct thread_info *tp = inferior_thread ();
6041
6042 tp->stop_signal = inf_state->stop_signal;
6043 stop_pc = inf_state->stop_pc;
6044
6045 /* The inferior can be gone if the user types "print exit(0)"
6046 (and perhaps other times). */
6047 if (target_has_execution)
6048 /* NB: The register write goes through to the target. */
6049 regcache_cpy (get_current_regcache (), inf_state->registers);
6050 regcache_xfree (inf_state->registers);
6051 xfree (inf_state);
6052 }
6053
6054 static void
6055 do_restore_inferior_thread_state_cleanup (void *state)
6056 {
6057 restore_inferior_thread_state (state);
6058 }
6059
6060 struct cleanup *
6061 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6062 {
6063 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
6064 }
6065
6066 void
6067 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
6068 {
6069 regcache_xfree (inf_state->registers);
6070 xfree (inf_state);
6071 }
6072
6073 struct regcache *
6074 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
6075 {
6076 return inf_state->registers;
6077 }
6078
6079 /* Session related state for inferior function calls.
6080 These are the additional bits of state that need to be restored
6081 when an inferior function call successfully completes. */
6082
6083 struct inferior_status
6084 {
6085 bpstat stop_bpstat;
6086 int stop_step;
6087 enum stop_stack_kind stop_stack_dummy;
6088 int stopped_by_random_signal;
6089 int stepping_over_breakpoint;
6090 CORE_ADDR step_range_start;
6091 CORE_ADDR step_range_end;
6092 struct frame_id step_frame_id;
6093 struct frame_id step_stack_frame_id;
6094 enum step_over_calls_kind step_over_calls;
6095 CORE_ADDR step_resume_break_address;
6096 int stop_after_trap;
6097 int stop_soon;
6098
6099 /* ID if the selected frame when the inferior function call was made. */
6100 struct frame_id selected_frame_id;
6101
6102 int proceed_to_finish;
6103 int in_infcall;
6104 };
6105
6106 /* Save all of the information associated with the inferior<==>gdb
6107 connection. */
6108
6109 struct inferior_status *
6110 save_inferior_status (void)
6111 {
6112 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
6113 struct thread_info *tp = inferior_thread ();
6114 struct inferior *inf = current_inferior ();
6115
6116 inf_status->stop_step = tp->stop_step;
6117 inf_status->stop_stack_dummy = stop_stack_dummy;
6118 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6119 inf_status->stepping_over_breakpoint = tp->trap_expected;
6120 inf_status->step_range_start = tp->step_range_start;
6121 inf_status->step_range_end = tp->step_range_end;
6122 inf_status->step_frame_id = tp->step_frame_id;
6123 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
6124 inf_status->step_over_calls = tp->step_over_calls;
6125 inf_status->stop_after_trap = stop_after_trap;
6126 inf_status->stop_soon = inf->stop_soon;
6127 /* Save original bpstat chain here; replace it with copy of chain.
6128 If caller's caller is walking the chain, they'll be happier if we
6129 hand them back the original chain when restore_inferior_status is
6130 called. */
6131 inf_status->stop_bpstat = tp->stop_bpstat;
6132 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
6133 inf_status->proceed_to_finish = tp->proceed_to_finish;
6134 inf_status->in_infcall = tp->in_infcall;
6135
6136 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6137
6138 return inf_status;
6139 }
6140
6141 static int
6142 restore_selected_frame (void *args)
6143 {
6144 struct frame_id *fid = (struct frame_id *) args;
6145 struct frame_info *frame;
6146
6147 frame = frame_find_by_id (*fid);
6148
6149 /* If inf_status->selected_frame_id is NULL, there was no previously
6150 selected frame. */
6151 if (frame == NULL)
6152 {
6153 warning (_("Unable to restore previously selected frame."));
6154 return 0;
6155 }
6156
6157 select_frame (frame);
6158
6159 return (1);
6160 }
6161
6162 /* Restore inferior session state to INF_STATUS. */
6163
6164 void
6165 restore_inferior_status (struct inferior_status *inf_status)
6166 {
6167 struct thread_info *tp = inferior_thread ();
6168 struct inferior *inf = current_inferior ();
6169
6170 tp->stop_step = inf_status->stop_step;
6171 stop_stack_dummy = inf_status->stop_stack_dummy;
6172 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6173 tp->trap_expected = inf_status->stepping_over_breakpoint;
6174 tp->step_range_start = inf_status->step_range_start;
6175 tp->step_range_end = inf_status->step_range_end;
6176 tp->step_frame_id = inf_status->step_frame_id;
6177 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
6178 tp->step_over_calls = inf_status->step_over_calls;
6179 stop_after_trap = inf_status->stop_after_trap;
6180 inf->stop_soon = inf_status->stop_soon;
6181 bpstat_clear (&tp->stop_bpstat);
6182 tp->stop_bpstat = inf_status->stop_bpstat;
6183 inf_status->stop_bpstat = NULL;
6184 tp->proceed_to_finish = inf_status->proceed_to_finish;
6185 tp->in_infcall = inf_status->in_infcall;
6186
6187 if (target_has_stack)
6188 {
6189 /* The point of catch_errors is that if the stack is clobbered,
6190 walking the stack might encounter a garbage pointer and
6191 error() trying to dereference it. */
6192 if (catch_errors
6193 (restore_selected_frame, &inf_status->selected_frame_id,
6194 "Unable to restore previously selected frame:\n",
6195 RETURN_MASK_ERROR) == 0)
6196 /* Error in restoring the selected frame. Select the innermost
6197 frame. */
6198 select_frame (get_current_frame ());
6199 }
6200
6201 xfree (inf_status);
6202 }
6203
6204 static void
6205 do_restore_inferior_status_cleanup (void *sts)
6206 {
6207 restore_inferior_status (sts);
6208 }
6209
6210 struct cleanup *
6211 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
6212 {
6213 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
6214 }
6215
6216 void
6217 discard_inferior_status (struct inferior_status *inf_status)
6218 {
6219 /* See save_inferior_status for info on stop_bpstat. */
6220 bpstat_clear (&inf_status->stop_bpstat);
6221 xfree (inf_status);
6222 }
6223 \f
6224 int
6225 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6226 {
6227 struct target_waitstatus last;
6228 ptid_t last_ptid;
6229
6230 get_last_target_status (&last_ptid, &last);
6231
6232 if (last.kind != TARGET_WAITKIND_FORKED)
6233 return 0;
6234
6235 if (!ptid_equal (last_ptid, pid))
6236 return 0;
6237
6238 *child_pid = last.value.related_pid;
6239 return 1;
6240 }
6241
6242 int
6243 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6244 {
6245 struct target_waitstatus last;
6246 ptid_t last_ptid;
6247
6248 get_last_target_status (&last_ptid, &last);
6249
6250 if (last.kind != TARGET_WAITKIND_VFORKED)
6251 return 0;
6252
6253 if (!ptid_equal (last_ptid, pid))
6254 return 0;
6255
6256 *child_pid = last.value.related_pid;
6257 return 1;
6258 }
6259
6260 int
6261 inferior_has_execd (ptid_t pid, char **execd_pathname)
6262 {
6263 struct target_waitstatus last;
6264 ptid_t last_ptid;
6265
6266 get_last_target_status (&last_ptid, &last);
6267
6268 if (last.kind != TARGET_WAITKIND_EXECD)
6269 return 0;
6270
6271 if (!ptid_equal (last_ptid, pid))
6272 return 0;
6273
6274 *execd_pathname = xstrdup (last.value.execd_pathname);
6275 return 1;
6276 }
6277
6278 int
6279 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6280 {
6281 struct target_waitstatus last;
6282 ptid_t last_ptid;
6283
6284 get_last_target_status (&last_ptid, &last);
6285
6286 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6287 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6288 return 0;
6289
6290 if (!ptid_equal (last_ptid, pid))
6291 return 0;
6292
6293 *syscall_number = last.value.syscall_number;
6294 return 1;
6295 }
6296
6297 /* Oft used ptids */
6298 ptid_t null_ptid;
6299 ptid_t minus_one_ptid;
6300
6301 /* Create a ptid given the necessary PID, LWP, and TID components. */
6302
6303 ptid_t
6304 ptid_build (int pid, long lwp, long tid)
6305 {
6306 ptid_t ptid;
6307
6308 ptid.pid = pid;
6309 ptid.lwp = lwp;
6310 ptid.tid = tid;
6311 return ptid;
6312 }
6313
6314 /* Create a ptid from just a pid. */
6315
6316 ptid_t
6317 pid_to_ptid (int pid)
6318 {
6319 return ptid_build (pid, 0, 0);
6320 }
6321
6322 /* Fetch the pid (process id) component from a ptid. */
6323
6324 int
6325 ptid_get_pid (ptid_t ptid)
6326 {
6327 return ptid.pid;
6328 }
6329
6330 /* Fetch the lwp (lightweight process) component from a ptid. */
6331
6332 long
6333 ptid_get_lwp (ptid_t ptid)
6334 {
6335 return ptid.lwp;
6336 }
6337
6338 /* Fetch the tid (thread id) component from a ptid. */
6339
6340 long
6341 ptid_get_tid (ptid_t ptid)
6342 {
6343 return ptid.tid;
6344 }
6345
6346 /* ptid_equal() is used to test equality of two ptids. */
6347
6348 int
6349 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6350 {
6351 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6352 && ptid1.tid == ptid2.tid);
6353 }
6354
6355 /* Returns true if PTID represents a process. */
6356
6357 int
6358 ptid_is_pid (ptid_t ptid)
6359 {
6360 if (ptid_equal (minus_one_ptid, ptid))
6361 return 0;
6362 if (ptid_equal (null_ptid, ptid))
6363 return 0;
6364
6365 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6366 }
6367
6368 int
6369 ptid_match (ptid_t ptid, ptid_t filter)
6370 {
6371 /* Since both parameters have the same type, prevent easy mistakes
6372 from happening. */
6373 gdb_assert (!ptid_equal (ptid, minus_one_ptid)
6374 && !ptid_equal (ptid, null_ptid));
6375
6376 if (ptid_equal (filter, minus_one_ptid))
6377 return 1;
6378 if (ptid_is_pid (filter)
6379 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6380 return 1;
6381 else if (ptid_equal (ptid, filter))
6382 return 1;
6383
6384 return 0;
6385 }
6386
6387 /* restore_inferior_ptid() will be used by the cleanup machinery
6388 to restore the inferior_ptid value saved in a call to
6389 save_inferior_ptid(). */
6390
6391 static void
6392 restore_inferior_ptid (void *arg)
6393 {
6394 ptid_t *saved_ptid_ptr = arg;
6395
6396 inferior_ptid = *saved_ptid_ptr;
6397 xfree (arg);
6398 }
6399
6400 /* Save the value of inferior_ptid so that it may be restored by a
6401 later call to do_cleanups(). Returns the struct cleanup pointer
6402 needed for later doing the cleanup. */
6403
6404 struct cleanup *
6405 save_inferior_ptid (void)
6406 {
6407 ptid_t *saved_ptid_ptr;
6408
6409 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6410 *saved_ptid_ptr = inferior_ptid;
6411 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6412 }
6413 \f
6414
6415 /* User interface for reverse debugging:
6416 Set exec-direction / show exec-direction commands
6417 (returns error unless target implements to_set_exec_direction method). */
6418
6419 enum exec_direction_kind execution_direction = EXEC_FORWARD;
6420 static const char exec_forward[] = "forward";
6421 static const char exec_reverse[] = "reverse";
6422 static const char *exec_direction = exec_forward;
6423 static const char *exec_direction_names[] = {
6424 exec_forward,
6425 exec_reverse,
6426 NULL
6427 };
6428
6429 static void
6430 set_exec_direction_func (char *args, int from_tty,
6431 struct cmd_list_element *cmd)
6432 {
6433 if (target_can_execute_reverse)
6434 {
6435 if (!strcmp (exec_direction, exec_forward))
6436 execution_direction = EXEC_FORWARD;
6437 else if (!strcmp (exec_direction, exec_reverse))
6438 execution_direction = EXEC_REVERSE;
6439 }
6440 }
6441
6442 static void
6443 show_exec_direction_func (struct ui_file *out, int from_tty,
6444 struct cmd_list_element *cmd, const char *value)
6445 {
6446 switch (execution_direction) {
6447 case EXEC_FORWARD:
6448 fprintf_filtered (out, _("Forward.\n"));
6449 break;
6450 case EXEC_REVERSE:
6451 fprintf_filtered (out, _("Reverse.\n"));
6452 break;
6453 case EXEC_ERROR:
6454 default:
6455 fprintf_filtered (out,
6456 _("Forward (target `%s' does not support exec-direction).\n"),
6457 target_shortname);
6458 break;
6459 }
6460 }
6461
6462 /* User interface for non-stop mode. */
6463
6464 int non_stop = 0;
6465
6466 static void
6467 set_non_stop (char *args, int from_tty,
6468 struct cmd_list_element *c)
6469 {
6470 if (target_has_execution)
6471 {
6472 non_stop_1 = non_stop;
6473 error (_("Cannot change this setting while the inferior is running."));
6474 }
6475
6476 non_stop = non_stop_1;
6477 }
6478
6479 static void
6480 show_non_stop (struct ui_file *file, int from_tty,
6481 struct cmd_list_element *c, const char *value)
6482 {
6483 fprintf_filtered (file,
6484 _("Controlling the inferior in non-stop mode is %s.\n"),
6485 value);
6486 }
6487
6488 static void
6489 show_schedule_multiple (struct ui_file *file, int from_tty,
6490 struct cmd_list_element *c, const char *value)
6491 {
6492 fprintf_filtered (file, _("\
6493 Resuming the execution of threads of all processes is %s.\n"), value);
6494 }
6495
6496 void
6497 _initialize_infrun (void)
6498 {
6499 int i;
6500 int numsigs;
6501
6502 add_info ("signals", signals_info, _("\
6503 What debugger does when program gets various signals.\n\
6504 Specify a signal as argument to print info on that signal only."));
6505 add_info_alias ("handle", "signals", 0);
6506
6507 add_com ("handle", class_run, handle_command, _("\
6508 Specify how to handle a signal.\n\
6509 Args are signals and actions to apply to those signals.\n\
6510 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6511 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6512 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6513 The special arg \"all\" is recognized to mean all signals except those\n\
6514 used by the debugger, typically SIGTRAP and SIGINT.\n\
6515 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6516 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6517 Stop means reenter debugger if this signal happens (implies print).\n\
6518 Print means print a message if this signal happens.\n\
6519 Pass means let program see this signal; otherwise program doesn't know.\n\
6520 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6521 Pass and Stop may be combined."));
6522 if (xdb_commands)
6523 {
6524 add_com ("lz", class_info, signals_info, _("\
6525 What debugger does when program gets various signals.\n\
6526 Specify a signal as argument to print info on that signal only."));
6527 add_com ("z", class_run, xdb_handle_command, _("\
6528 Specify how to handle a signal.\n\
6529 Args are signals and actions to apply to those signals.\n\
6530 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6531 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6532 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6533 The special arg \"all\" is recognized to mean all signals except those\n\
6534 used by the debugger, typically SIGTRAP and SIGINT.\n\
6535 Recognized actions include \"s\" (toggles between stop and nostop),\n\
6536 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6537 nopass), \"Q\" (noprint)\n\
6538 Stop means reenter debugger if this signal happens (implies print).\n\
6539 Print means print a message if this signal happens.\n\
6540 Pass means let program see this signal; otherwise program doesn't know.\n\
6541 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6542 Pass and Stop may be combined."));
6543 }
6544
6545 if (!dbx_commands)
6546 stop_command = add_cmd ("stop", class_obscure,
6547 not_just_help_class_command, _("\
6548 There is no `stop' command, but you can set a hook on `stop'.\n\
6549 This allows you to set a list of commands to be run each time execution\n\
6550 of the program stops."), &cmdlist);
6551
6552 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6553 Set inferior debugging."), _("\
6554 Show inferior debugging."), _("\
6555 When non-zero, inferior specific debugging is enabled."),
6556 NULL,
6557 show_debug_infrun,
6558 &setdebuglist, &showdebuglist);
6559
6560 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
6561 Set displaced stepping debugging."), _("\
6562 Show displaced stepping debugging."), _("\
6563 When non-zero, displaced stepping specific debugging is enabled."),
6564 NULL,
6565 show_debug_displaced,
6566 &setdebuglist, &showdebuglist);
6567
6568 add_setshow_boolean_cmd ("non-stop", no_class,
6569 &non_stop_1, _("\
6570 Set whether gdb controls the inferior in non-stop mode."), _("\
6571 Show whether gdb controls the inferior in non-stop mode."), _("\
6572 When debugging a multi-threaded program and this setting is\n\
6573 off (the default, also called all-stop mode), when one thread stops\n\
6574 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
6575 all other threads in the program while you interact with the thread of\n\
6576 interest. When you continue or step a thread, you can allow the other\n\
6577 threads to run, or have them remain stopped, but while you inspect any\n\
6578 thread's state, all threads stop.\n\
6579 \n\
6580 In non-stop mode, when one thread stops, other threads can continue\n\
6581 to run freely. You'll be able to step each thread independently,\n\
6582 leave it stopped or free to run as needed."),
6583 set_non_stop,
6584 show_non_stop,
6585 &setlist,
6586 &showlist);
6587
6588 numsigs = (int) TARGET_SIGNAL_LAST;
6589 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
6590 signal_print = (unsigned char *)
6591 xmalloc (sizeof (signal_print[0]) * numsigs);
6592 signal_program = (unsigned char *)
6593 xmalloc (sizeof (signal_program[0]) * numsigs);
6594 for (i = 0; i < numsigs; i++)
6595 {
6596 signal_stop[i] = 1;
6597 signal_print[i] = 1;
6598 signal_program[i] = 1;
6599 }
6600
6601 /* Signals caused by debugger's own actions
6602 should not be given to the program afterwards. */
6603 signal_program[TARGET_SIGNAL_TRAP] = 0;
6604 signal_program[TARGET_SIGNAL_INT] = 0;
6605
6606 /* Signals that are not errors should not normally enter the debugger. */
6607 signal_stop[TARGET_SIGNAL_ALRM] = 0;
6608 signal_print[TARGET_SIGNAL_ALRM] = 0;
6609 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6610 signal_print[TARGET_SIGNAL_VTALRM] = 0;
6611 signal_stop[TARGET_SIGNAL_PROF] = 0;
6612 signal_print[TARGET_SIGNAL_PROF] = 0;
6613 signal_stop[TARGET_SIGNAL_CHLD] = 0;
6614 signal_print[TARGET_SIGNAL_CHLD] = 0;
6615 signal_stop[TARGET_SIGNAL_IO] = 0;
6616 signal_print[TARGET_SIGNAL_IO] = 0;
6617 signal_stop[TARGET_SIGNAL_POLL] = 0;
6618 signal_print[TARGET_SIGNAL_POLL] = 0;
6619 signal_stop[TARGET_SIGNAL_URG] = 0;
6620 signal_print[TARGET_SIGNAL_URG] = 0;
6621 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6622 signal_print[TARGET_SIGNAL_WINCH] = 0;
6623
6624 /* These signals are used internally by user-level thread
6625 implementations. (See signal(5) on Solaris.) Like the above
6626 signals, a healthy program receives and handles them as part of
6627 its normal operation. */
6628 signal_stop[TARGET_SIGNAL_LWP] = 0;
6629 signal_print[TARGET_SIGNAL_LWP] = 0;
6630 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6631 signal_print[TARGET_SIGNAL_WAITING] = 0;
6632 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6633 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6634
6635 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6636 &stop_on_solib_events, _("\
6637 Set stopping for shared library events."), _("\
6638 Show stopping for shared library events."), _("\
6639 If nonzero, gdb will give control to the user when the dynamic linker\n\
6640 notifies gdb of shared library events. The most common event of interest\n\
6641 to the user would be loading/unloading of a new library."),
6642 NULL,
6643 show_stop_on_solib_events,
6644 &setlist, &showlist);
6645
6646 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6647 follow_fork_mode_kind_names,
6648 &follow_fork_mode_string, _("\
6649 Set debugger response to a program call of fork or vfork."), _("\
6650 Show debugger response to a program call of fork or vfork."), _("\
6651 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6652 parent - the original process is debugged after a fork\n\
6653 child - the new process is debugged after a fork\n\
6654 The unfollowed process will continue to run.\n\
6655 By default, the debugger will follow the parent process."),
6656 NULL,
6657 show_follow_fork_mode_string,
6658 &setlist, &showlist);
6659
6660 add_setshow_enum_cmd ("follow-exec-mode", class_run,
6661 follow_exec_mode_names,
6662 &follow_exec_mode_string, _("\
6663 Set debugger response to a program call of exec."), _("\
6664 Show debugger response to a program call of exec."), _("\
6665 An exec call replaces the program image of a process.\n\
6666 \n\
6667 follow-exec-mode can be:\n\
6668 \n\
6669 new - the debugger creates a new inferior and rebinds the process\n\
6670 to this new inferior. The program the process was running before\n\
6671 the exec call can be restarted afterwards by restarting the original\n\
6672 inferior.\n\
6673 \n\
6674 same - the debugger keeps the process bound to the same inferior.\n\
6675 The new executable image replaces the previous executable loaded in\n\
6676 the inferior. Restarting the inferior after the exec call restarts\n\
6677 the executable the process was running after the exec call.\n\
6678 \n\
6679 By default, the debugger will use the same inferior."),
6680 NULL,
6681 show_follow_exec_mode_string,
6682 &setlist, &showlist);
6683
6684 add_setshow_enum_cmd ("scheduler-locking", class_run,
6685 scheduler_enums, &scheduler_mode, _("\
6686 Set mode for locking scheduler during execution."), _("\
6687 Show mode for locking scheduler during execution."), _("\
6688 off == no locking (threads may preempt at any time)\n\
6689 on == full locking (no thread except the current thread may run)\n\
6690 step == scheduler locked during every single-step operation.\n\
6691 In this mode, no other thread may run during a step command.\n\
6692 Other threads may run while stepping over a function call ('next')."),
6693 set_schedlock_func, /* traps on target vector */
6694 show_scheduler_mode,
6695 &setlist, &showlist);
6696
6697 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6698 Set mode for resuming threads of all processes."), _("\
6699 Show mode for resuming threads of all processes."), _("\
6700 When on, execution commands (such as 'continue' or 'next') resume all\n\
6701 threads of all processes. When off (which is the default), execution\n\
6702 commands only resume the threads of the current process. The set of\n\
6703 threads that are resumed is further refined by the scheduler-locking\n\
6704 mode (see help set scheduler-locking)."),
6705 NULL,
6706 show_schedule_multiple,
6707 &setlist, &showlist);
6708
6709 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6710 Set mode of the step operation."), _("\
6711 Show mode of the step operation."), _("\
6712 When set, doing a step over a function without debug line information\n\
6713 will stop at the first instruction of that function. Otherwise, the\n\
6714 function is skipped and the step command stops at a different source line."),
6715 NULL,
6716 show_step_stop_if_no_debug,
6717 &setlist, &showlist);
6718
6719 add_setshow_enum_cmd ("displaced-stepping", class_run,
6720 can_use_displaced_stepping_enum,
6721 &can_use_displaced_stepping, _("\
6722 Set debugger's willingness to use displaced stepping."), _("\
6723 Show debugger's willingness to use displaced stepping."), _("\
6724 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6725 supported by the target architecture. If off, gdb will not use displaced\n\
6726 stepping to step over breakpoints, even if such is supported by the target\n\
6727 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6728 if the target architecture supports it and non-stop mode is active, but will not\n\
6729 use it in all-stop mode (see help set non-stop)."),
6730 NULL,
6731 show_can_use_displaced_stepping,
6732 &setlist, &showlist);
6733
6734 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6735 &exec_direction, _("Set direction of execution.\n\
6736 Options are 'forward' or 'reverse'."),
6737 _("Show direction of execution (forward/reverse)."),
6738 _("Tells gdb whether to execute forward or backward."),
6739 set_exec_direction_func, show_exec_direction_func,
6740 &setlist, &showlist);
6741
6742 /* Set/show detach-on-fork: user-settable mode. */
6743
6744 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
6745 Set whether gdb will detach the child of a fork."), _("\
6746 Show whether gdb will detach the child of a fork."), _("\
6747 Tells gdb whether to detach the child of a fork."),
6748 NULL, NULL, &setlist, &showlist);
6749
6750 /* ptid initializations */
6751 null_ptid = ptid_build (0, 0, 0);
6752 minus_one_ptid = ptid_build (-1, 0, 0);
6753 inferior_ptid = null_ptid;
6754 target_last_wait_ptid = minus_one_ptid;
6755
6756 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6757 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6758 observer_attach_thread_exit (infrun_thread_thread_exit);
6759 observer_attach_inferior_exit (infrun_inferior_exit);
6760
6761 /* Explicitly create without lookup, since that tries to create a
6762 value with a void typed value, and when we get here, gdbarch
6763 isn't initialized yet. At this point, we're quite sure there
6764 isn't another convenience variable of the same name. */
6765 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6766
6767 add_setshow_boolean_cmd ("observer", no_class,
6768 &observer_mode_1, _("\
6769 Set whether gdb controls the inferior in observer mode."), _("\
6770 Show whether gdb controls the inferior in observer mode."), _("\
6771 In observer mode, GDB can get data from the inferior, but not\n\
6772 affect its execution. Registers and memory may not be changed,\n\
6773 breakpoints may not be set, and the program cannot be interrupted\n\
6774 or signalled."),
6775 set_observer_mode,
6776 show_observer_mode,
6777 &setlist,
6778 &showlist);
6779 }
This page took 0.229735 seconds and 4 git commands to generate.